Browse Source

Merging r1616428 through r1616893 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1616897 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 năm trước cách đây
mục cha
commit
9d5f8fa68d
100 tập tin đã thay đổi với 6588 bổ sung3320 xóa
  1. 1 0
      BUILDING.txt
  2. 9 0
      hadoop-common-project/hadoop-auth/pom.xml
  3. 10 26
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  4. 20 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  5. 20 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  6. 8 30
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  7. 7 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  8. 11 0
      hadoop-common-project/hadoop-common/pom.xml
  9. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  10. 1 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  11. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java
  12. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
  13. 330 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
  14. 102 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
  15. 355 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
  16. 250 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
  17. 10 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java
  18. 153 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
  19. 54 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java
  20. 46 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java
  21. 55 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java
  22. 25 18
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java
  23. 59 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java
  24. 16 179
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  25. 0 24
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
  26. 326 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
  27. 59 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java
  28. 727 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
  29. 68 59
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  30. 0 188
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
  31. 8 7
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
  32. 0 230
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
  33. 0 78
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java
  34. 0 51
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java
  35. 0 242
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java
  36. 0 9
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
  37. 5 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
  38. 0 94
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java
  39. 0 316
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
  40. 5 4
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  41. 3 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
  42. 0 89
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java
  43. 23 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  44. 101 16
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  45. 57 21
      hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
  46. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  47. 23 922
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  48. 1060 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
  49. 12 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  50. 20 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  51. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
  52. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  53. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  54. 12 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  55. 144 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  56. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
  57. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
  58. 40 29
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
  59. 58 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  60. 14 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
  61. 9 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  62. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
  63. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
  64. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
  65. 271 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
  66. 161 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
  67. 33 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
  68. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
  69. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
  70. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
  71. 194 160
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
  72. 90 175
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
  73. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
  74. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
  75. 55 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
  76. 43 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
  77. 34 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
  78. 52 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
  79. 80 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
  80. 54 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
  81. 75 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
  82. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
  83. 52 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
  84. 86 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
  85. 66 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
  86. 164 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
  87. 29 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
  88. 119 113
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
  89. 11 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
  90. 17 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
  91. 11 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
  92. 18 21
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
  93. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c
  94. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
  95. 10 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
  96. 408 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
  97. 14 15
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
  98. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  99. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
  100. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

+ 1 - 0
BUILDING.txt

@@ -189,6 +189,7 @@ Requirements:
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
+* CMake 2.6 or newer
 * Windows SDK or Visual Studio 2010 Professional
 * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
 * zlib headers (if building native code bindings for zlib)

+ 9 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -144,6 +144,15 @@
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
           <execution>
+            <id>prepare-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>prepare-test-jar</id>
+            <phase>prepare-package</phase>
             <goals>
               <goal>test-jar</goal>
             </goals>

+ 10 - 26
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -120,32 +120,6 @@ public class AuthenticatedURL {
       return token;
     }
 
-    /**
-     * Return the hashcode for the token.
-     *
-     * @return the hashcode for the token.
-     */
-    @Override
-    public int hashCode() {
-      return (token != null) ? token.hashCode() : 0;
-    }
-
-    /**
-     * Return if two token instances are equal.
-     *
-     * @param o the other token instance.
-     *
-     * @return if this instance and the other instance are equal.
-     */
-    @Override
-    public boolean equals(Object o) {
-      boolean eq = false;
-      if (o instanceof Token) {
-        Token other = (Token) o;
-        eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
-      }
-      return eq;
-    }
   }
 
   private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@@ -208,6 +182,16 @@ public class AuthenticatedURL {
     this.authenticator.setConnectionConfigurator(connConfigurator);
   }
 
+  /**
+   * Returns the {@link Authenticator} instance used by the
+   * <code>AuthenticatedURL</code>.
+   *
+   * @return the {@link Authenticator} instance
+   */
+  protected Authenticator getAuthenticator() {
+    return authenticator;
+  }
+
   /**
    * Returns an authenticated {@link HttpURLConnection}.
    *

+ 20 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -142,11 +142,30 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   public static final String NAME_RULES = TYPE + ".name.rules";
 
+  private String type;
   private String keytab;
   private GSSManager gssManager;
   private Subject serverSubject = new Subject();
   private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
 
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with the default
+   * auth-token type, <code>kerberos</code>.
+   */
+  public KerberosAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public KerberosAuthenticationHandler(String type) {
+    this.type = type;
+  }
+
   /**
    * Initializes the authentication handler instance.
    * <p/>
@@ -249,7 +268,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public String getType() {
-    return TYPE;
+    return type;
   }
 
   /**

+ 20 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
   private boolean acceptAnonymous;
+  private String type;
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with the default auth-token
+   * type, <code>simple</code>.
+   */
+  public PseudoAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public PseudoAuthenticationHandler(String type) {
+    this.type = type;
+  }
 
   /**
    * Initializes the authentication handler instance.
@@ -96,7 +115,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
    */
   @Override
   public String getType() {
-    return TYPE;
+    return type;
   }
 
   /**

+ 8 - 30
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -33,36 +33,6 @@ public class TestAuthenticatedURL {
     token = new AuthenticatedURL.Token("foo");
     Assert.assertTrue(token.isSet());
     Assert.assertEquals("foo", token.toString());
-
-    AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
-    AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token();
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token();
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("bar");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("bar");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
   }
 
   @Test
@@ -137,4 +107,12 @@ public class TestAuthenticatedURL {
     Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
   }
 
+  @Test
+  public void testGetAuthenticator() throws Exception {
+    Authenticator authenticator = Mockito.mock(Authenticator.class);
+
+    AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
+    Assert.assertEquals(authenticator, aURL.getAuthenticator());
+  }
+
 }

+ 7 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -490,6 +490,8 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10791. AuthenticationFilter should support externalizing the 
     secret for signing and provide rotation support. (rkanter via tucu)
 
+    HADOOP-10771. Refactor HTTP delegation support out of httpfs to common, PART 1. (tucu)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -521,9 +523,6 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
     (Benoy Antony via umamahesh)
 
-    HADOOP-10876. The constructor of Path should not take an empty URL as a
-    parameter. (Zhihai Xu via wang)
-
     HADOOP-10928. Incorrect usage on `hadoop credential list`.
     (Josh Elser via wang)
 
@@ -543,6 +542,11 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10905. LdapGroupsMapping Should use configuration.getPassword for SSL
     and LDAP Passwords. (lmccay via brandonli)
 
+    HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
+
+    HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
+    (lmccay via brandonli)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 11 - 0
hadoop-common-project/hadoop-common/pom.xml

@@ -203,6 +203,17 @@
       <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1781,7 +1781,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public char[] getPassword(String name) throws IOException {
     char[] pass = null;
 
-    pass = getPasswordFromCredenitalProviders(name);
+    pass = getPasswordFromCredentialProviders(name);
 
     if (pass == null) {
       pass = getPasswordFromConfig(name);
@@ -1797,7 +1797,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return password or null if not found
    * @throws IOException
    */
-  protected char[] getPasswordFromCredenitalProviders(String name)
+  protected char[] getPasswordFromCredentialProviders(String name)
       throws IOException {
     char[] pass = null;
     try {

+ 1 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -128,20 +128,7 @@ public class Path implements Comparable {
            "Can not create a Path from an empty string");
     }   
   }
-
-  /** check URI parameter of Path constructor. */
-  private void checkPathArg(URI aUri) throws IllegalArgumentException {
-    // disallow construction of a Path from an empty URI
-    if (aUri == null) {
-      throw new IllegalArgumentException(
-          "Can not create a Path from a null URI");
-    }
-    if (aUri.toString().isEmpty()) {
-      throw new IllegalArgumentException(
-          "Can not create a Path from an empty URI");
-    }
-  }
-
+  
   /** Construct a path from a String.  Path strings are URIs, but with
    * unescaped elements and some additional normalization. */
   public Path(String pathString) throws IllegalArgumentException {
@@ -189,7 +176,6 @@ public class Path implements Comparable {
    * Construct a path from a URI
    */
   public Path(URI aUri) {
-    checkPathArg(aUri);
     uri = aUri.normalize();
   }
   

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java

@@ -21,14 +21,18 @@ package org.apache.hadoop.metrics2.impl;
 import java.util.Iterator;
 import java.util.List;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import static org.apache.hadoop.metrics2.lib.Interns.*;
 
-class MetricsCollectorImpl implements MetricsCollector,
+@InterfaceAudience.Private
+@VisibleForTesting
+public class MetricsCollectorImpl implements MetricsCollector,
     Iterable<MetricsRecordBuilderImpl> {
 
   private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList();

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java

@@ -89,6 +89,14 @@ public class MutableStat extends MutableMetric {
     this(name, description, sampleName, valueName, false);
   }
 
+  /**
+   * Set whether to display the extended stats (stdev, min/max etc.) or not
+   * @param extended enable/disable displaying extended stats
+   */
+  public synchronized void setExtended(boolean extended) {
+    this.extended = extended;
+  }
+
   /**
    * Add a number of samples and their sum to the running stat
    * @param numSamples  number of samples

+ 330 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java

@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * The <code>DelegationTokenAuthenticatedURL</code> is a
+ * {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
+ * functionality.
+ * <p/>
+ * The authentication mechanisms supported by default are Hadoop Simple
+ * authentication (also known as pseudo authentication) and Kerberos SPNEGO
+ * authentication.
+ * <p/>
+ * Additional authentication mechanisms can be supported via {@link
+ * DelegationTokenAuthenticator} implementations.
+ * <p/>
+ * The default {@link DelegationTokenAuthenticator} is the {@link
+ * KerberosDelegationTokenAuthenticator} class which supports
+ * automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
+ * the {@link PseudoDelegationTokenAuthenticator} class.
+ * <p/>
+ * <code>AuthenticatedURL</code> instances are not thread-safe.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
+
+  /**
+   * Client side authentication token that handles Delegation Tokens.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public static class Token extends AuthenticatedURL.Token {
+    private
+    org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
+        delegationToken;
+
+    org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
+    getDelegationToken() {
+      return delegationToken;
+    }
+
+  }
+
+  private static Class<? extends DelegationTokenAuthenticator>
+      DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
+
+  /**
+   * Sets the default {@link DelegationTokenAuthenticator} class to use when an
+   * {@link DelegationTokenAuthenticatedURL} instance is created without
+   * specifying one.
+   *
+   * The default class is {@link KerberosDelegationTokenAuthenticator}
+   *
+   * @param authenticator the authenticator class to use as default.
+   */
+  public static void setDefaultDelegationTokenAuthenticator(
+      Class<? extends DelegationTokenAuthenticator> authenticator) {
+    DEFAULT_AUTHENTICATOR = authenticator;
+  }
+
+  /**
+   * Returns the default {@link DelegationTokenAuthenticator} class to use when
+   * an {@link DelegationTokenAuthenticatedURL} instance is created without
+   * specifying one.
+   * <p/>
+   * The default class is {@link KerberosDelegationTokenAuthenticator}
+   *
+   * @return the delegation token authenticator class to use as default.
+   */
+  public static Class<? extends DelegationTokenAuthenticator>
+      getDefaultDelegationTokenAuthenticator() {
+    return DEFAULT_AUTHENTICATOR;
+  }
+
+  private static DelegationTokenAuthenticator
+      obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta) {
+    try {
+      return (dta != null) ? dta : DEFAULT_AUTHENTICATOR.newInstance();
+    } catch (Exception ex) {
+      throw new IllegalArgumentException(ex);
+    }
+  }
+
+  /**
+   * Creates an <code>DelegationTokenAuthenticatedURL</code>.
+   * <p/>
+   * An instance of the default {@link DelegationTokenAuthenticator} will be
+   * used.
+   */
+  public DelegationTokenAuthenticatedURL() {
+    this(null, null);
+  }
+
+  /**
+   * Creates an <code>DelegationTokenAuthenticatedURL</code>.
+   *
+   * @param authenticator the {@link DelegationTokenAuthenticator} instance to
+   * use, if <code>null</code> the default one will be used.
+   */
+  public DelegationTokenAuthenticatedURL(
+      DelegationTokenAuthenticator authenticator) {
+    this(authenticator, null);
+  }
+
+  /**
+   * Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
+   * {@link DelegationTokenAuthenticator} class.
+   *
+   * @param connConfigurator a connection configurator.
+   */
+  public DelegationTokenAuthenticatedURL(
+      ConnectionConfigurator connConfigurator) {
+    this(null, connConfigurator);
+  }
+
+  /**
+   * Creates an <code>DelegationTokenAuthenticatedURL</code>.
+   *
+   * @param authenticator the {@link DelegationTokenAuthenticator} instance to
+   * use, if <code>null</code> the default one will be used.
+   * @param connConfigurator a connection configurator.
+   */
+  public DelegationTokenAuthenticatedURL(
+      DelegationTokenAuthenticator authenticator,
+      ConnectionConfigurator connConfigurator) {
+    super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
+  }
+
+  /**
+   * Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
+   * Token only if the given auth token is an instance of {@link Token} and
+   * it contains a Delegation Token, otherwise use the configured
+   * {@link DelegationTokenAuthenticator} to authenticate the connection.
+   *
+   * @param url the URL to connect to. Only HTTP/S URLs are supported.
+   * @param token the authentication token being used for the user.
+   * @return an authenticated {@link HttpURLConnection}.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  @Override
+  public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
+      throws IOException, AuthenticationException {
+    return (token instanceof Token) ? openConnection(url, (Token) token)
+                                    : super.openConnection(url ,token);
+  }
+
+  /**
+   * Returns an authenticated {@link HttpURLConnection}. If the Delegation
+   * Token is present, it will be used taking precedence over the configured
+   * <code>Authenticator</code>.
+   *
+   * @param url the URL to connect to. Only HTTP/S URLs are supported.
+   * @param token the authentication token being used for the user.
+   * @return an authenticated {@link HttpURLConnection}.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public HttpURLConnection openConnection(URL url, Token token)
+      throws IOException, AuthenticationException {
+    return openConnection(url, token, null);
+  }
+
+  private URL augmentURL(URL url, Map<String, String> params)
+      throws IOException {
+    if (params != null && params.size() > 0) {
+      String urlStr = url.toExternalForm();
+      StringBuilder sb = new StringBuilder(urlStr);
+      String separator = (urlStr.contains("?")) ? "&" : "?";
+      for (Map.Entry<String, String> param : params.entrySet()) {
+        sb.append(separator).append(param.getKey()).append("=").append(
+            param.getValue());
+        separator = "&";
+      }
+      url = new URL(sb.toString());
+    }
+    return url;
+  }
+
+  /**
+   * Returns an authenticated {@link HttpURLConnection}. If the Delegation
+   * Token is present, it will be used taking precedence over the configured
+   * <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
+   * the request will be done on behalf of the specified <code>doAs</code> user.
+   *
+   * @param url the URL to connect to. Only HTTP/S URLs are supported.
+   * @param token the authentication token being used for the user.
+   * @param doAs user to do the the request on behalf of, if NULL the request is
+   * as self.
+   * @return an authenticated {@link HttpURLConnection}.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public HttpURLConnection openConnection(URL url, Token token, String doAs)
+      throws IOException, AuthenticationException {
+    Preconditions.checkNotNull(url, "url");
+    Preconditions.checkNotNull(token, "token");
+    Map<String, String> extraParams = new HashMap<String, String>();
+
+    // delegation token
+    Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
+    if (!creds.getAllTokens().isEmpty()) {
+      InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
+          url.getPort());
+      Text service = SecurityUtil.buildTokenService(serviceAddr);
+      org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
+          creds.getToken(service);
+      if (dt != null) {
+        extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
+            dt.encodeToUrlString());
+      }
+    }
+
+    url = augmentURL(url, extraParams);
+    return super.openConnection(url, token);
+  }
+
+  /**
+   * Requests a delegation token using the configured <code>Authenticator</code>
+   * for authentication.
+   *
+   * @param url the URL to get the delegation token from. Only HTTP/S URLs are
+   * supported.
+   * @param token the authentication token being used for the user where the
+   * Delegation token will be stored.
+   * @return a delegation token.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
+      getDelegationToken(URL url, Token token, String renewer)
+          throws IOException, AuthenticationException {
+    Preconditions.checkNotNull(url, "url");
+    Preconditions.checkNotNull(token, "token");
+    try {
+      token.delegationToken =
+          ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+              getDelegationToken(url, token, renewer);
+      return token.delegationToken;
+    } catch (IOException ex) {
+      token.delegationToken = null;
+      throw ex;
+    }
+  }
+
+  /**
+   * Renews a delegation token from the server end-point using the
+   * configured <code>Authenticator</code> for authentication.
+   *
+   * @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+   * supported.
+   * @param token the authentication token with the Delegation Token to renew.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public long renewDelegationToken(URL url, Token token)
+      throws IOException, AuthenticationException {
+    Preconditions.checkNotNull(url, "url");
+    Preconditions.checkNotNull(token, "token");
+    Preconditions.checkNotNull(token.delegationToken,
+        "No delegation token available");
+    try {
+      return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+          renewDelegationToken(url, token, token.delegationToken);
+    } catch (IOException ex) {
+      token.delegationToken = null;
+      throw ex;
+    }
+  }
+
+  /**
+   * Cancels a delegation token from the server end-point. It does not require
+   * being authenticated by the configured <code>Authenticator</code>.
+   *
+   * @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+   * are supported.
+   * @param token the authentication token with the Delegation Token to cancel.
+   * @throws IOException if an IO error occurred.
+   */
+  public void cancelDelegationToken(URL url, Token token)
+      throws IOException {
+    Preconditions.checkNotNull(url, "url");
+    Preconditions.checkNotNull(token, "token");
+    Preconditions.checkNotNull(token.delegationToken,
+        "No delegation token available");
+    try {
+      ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+          cancelDelegationToken(url, token, token.delegationToken);
+    } finally {
+      token.delegationToken = null;
+    }
+  }
+
+}

+ 102 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java

@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import java.util.Properties;
+
+/**
+ *  The <code>DelegationTokenAuthenticationFilter</code> filter is a
+ *  {@link AuthenticationFilter} with Hadoop Delegation Token support.
+ *  <p/>
+ *  By default it uses it own instance of the {@link
+ *  AbstractDelegationTokenSecretManager}. For situations where an external
+ *  <code>AbstractDelegationTokenSecretManager</code> is required (i.e. one that
+ *  shares the secret with <code>AbstractDelegationTokenSecretManager</code>
+ *  instance running in other services), the external
+ *  <code>AbstractDelegationTokenSecretManager</code> must be set as an
+ *  attribute in the {@link ServletContext} of the web application using the
+ *  {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
+ *  'hadoop.http.delegation-token-secret-manager').
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DelegationTokenAuthenticationFilter
+    extends AuthenticationFilter {
+
+  /**
+   * Sets an external <code>DelegationTokenSecretManager</code> instance to
+   * manage creation and verification of Delegation Tokens.
+   * <p/>
+   * This is useful for use cases where secrets must be shared across multiple
+   * services.
+   */
+
+  public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
+      "hadoop.http.delegation-token-secret-manager";
+
+  /**
+   * It delegates to
+   * {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
+   * then overrides the {@link AuthenticationHandler} to use if authentication
+   * type is set to <code>simple</code> or <code>kerberos</code> in order to use
+   * the corresponding implementation with delegation token support.
+   *
+   * @param configPrefix parameter not used.
+   * @param filterConfig parameter not used.
+   * @return hadoop-auth de-prefixed configuration for the filter and handler.
+   */
+  @Override
+  protected Properties getConfiguration(String configPrefix,
+      FilterConfig filterConfig) throws ServletException {
+    Properties props = super.getConfiguration(configPrefix, filterConfig);
+    String authType = props.getProperty(AUTH_TYPE);
+    if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
+      props.setProperty(AUTH_TYPE,
+          PseudoDelegationTokenAuthenticationHandler.class.getName());
+    } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
+      props.setProperty(AUTH_TYPE,
+          KerberosDelegationTokenAuthenticationHandler.class.getName());
+    }
+    return props;
+  }
+
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+    super.init(filterConfig);
+    AbstractDelegationTokenSecretManager dtSecretManager =
+        (AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
+            getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
+    if (dtSecretManager != null && getAuthenticationHandler()
+        instanceof DelegationTokenAuthenticationHandler) {
+      DelegationTokenAuthenticationHandler handler =
+          (DelegationTokenAuthenticationHandler) getAuthenticationHandler();
+      handler.setExternalDelegationTokenSecretManager(dtSecretManager);
+    }
+  }
+}

+ 355 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java

@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ * <p/>
+ * In addition to the wrapped {@link AuthenticationHandler} configuration
+ * properties, this handler supports the following properties prefixed
+ * with the type of the wrapped <code>AuthenticationHandler</code>:
+ * <ul>
+ * <li>delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).</li>
+ * <li>delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).</li>
+ * <li>delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).</li>
+ * <li>delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).</li>
+ * <li>delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).</li>
+ * </ul>
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class DelegationTokenAuthenticationHandler
+    implements AuthenticationHandler {
+
+  protected static final String TYPE_POSTFIX = "-dt";
+
+  public static final String PREFIX = "delegation-token.";
+
+  public static final String TOKEN_KIND = PREFIX + "token-kind.sec";
+
+  public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
+  public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
+
+  public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
+  public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
+
+  public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
+  public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
+
+  public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
+      "removal-scan-interval.sec";
+  public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
+
+  private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
+
+  static {
+    DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+        DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
+    DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+        DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
+    DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+        DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
+  }
+
+  private AuthenticationHandler authHandler;
+  private DelegationTokenManager tokenManager;
+  private String authType;
+
+  public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
+    authHandler = handler;
+    authType = handler.getType();
+  }
+
+  @VisibleForTesting
+  DelegationTokenManager getTokenManager() {
+    return tokenManager;
+  }
+
+  @Override
+  public void init(Properties config) throws ServletException {
+    authHandler.init(config);
+    initTokenManager(config);
+  }
+
+  /**
+   * Sets an external <code>DelegationTokenSecretManager</code> instance to
+   * manage creation and verification of Delegation Tokens.
+   * <p/>
+   * This is useful for use cases where secrets must be shared across multiple
+   * services.
+   *
+   * @param secretManager a <code>DelegationTokenSecretManager</code> instance
+   */
+  public void setExternalDelegationTokenSecretManager(
+      AbstractDelegationTokenSecretManager secretManager) {
+    tokenManager.setExternalDelegationTokenSecretManager(secretManager);
+  }
+
+  @VisibleForTesting
+  @SuppressWarnings("unchecked")
+  public void initTokenManager(Properties config) {
+    String configPrefix = authHandler.getType() + ".";
+    Configuration conf = new Configuration(false);
+    for (Map.Entry entry : config.entrySet()) {
+      conf.set((String) entry.getKey(), (String) entry.getValue());
+    }
+    String tokenKind = conf.get(TOKEN_KIND);
+    if (tokenKind == null) {
+      throw new IllegalArgumentException(
+          "The configuration does not define the token kind");
+    }
+    tokenKind = tokenKind.trim();
+    long updateInterval = conf.getLong(configPrefix + UPDATE_INTERVAL,
+        UPDATE_INTERVAL_DEFAULT);
+    long maxLifeTime = conf.getLong(configPrefix + MAX_LIFETIME,
+        MAX_LIFETIME_DEFAULT);
+    long renewInterval = conf.getLong(configPrefix + RENEW_INTERVAL,
+        RENEW_INTERVAL_DEFAULT);
+    long removalScanInterval = conf.getLong(
+        configPrefix + REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT);
+    tokenManager = new DelegationTokenManager(new Text(tokenKind),
+        updateInterval * 1000, maxLifeTime * 1000, renewInterval * 1000,
+        removalScanInterval * 1000);
+    tokenManager.init();
+  }
+
+  @Override
+  public void destroy() {
+    tokenManager.destroy();
+    authHandler.destroy();
+  }
+
+  @Override
+  public String getType() {
+    return authType;
+  }
+
+  private static final String ENTER = System.getProperty("line.separator");
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public boolean managementOperation(AuthenticationToken token,
+      HttpServletRequest request, HttpServletResponse response)
+      throws IOException, AuthenticationException {
+    boolean requestContinues = true;
+    String op = ServletUtils.getParameter(request,
+        KerberosDelegationTokenAuthenticator.OP_PARAM);
+    op = (op != null) ? op.toUpperCase() : null;
+    if (DELEGATION_TOKEN_OPS.contains(op) &&
+        !request.getMethod().equals("OPTIONS")) {
+      KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
+          KerberosDelegationTokenAuthenticator.
+              DelegationTokenOperation.valueOf(op);
+      if (dtOp.getHttpMethod().equals(request.getMethod())) {
+        boolean doManagement;
+        if (dtOp.requiresKerberosCredentials() && token == null) {
+          token = authenticate(request, response);
+          if (token == null) {
+            requestContinues = false;
+            doManagement = false;
+          } else {
+            doManagement = true;
+          }
+        } else {
+          doManagement = true;
+        }
+        if (doManagement) {
+          UserGroupInformation requestUgi = (token != null)
+              ? UserGroupInformation.createRemoteUser(token.getUserName())
+              : null;
+          Map map = null;
+          switch (dtOp) {
+            case GETDELEGATIONTOKEN:
+              if (requestUgi == null) {
+                throw new IllegalStateException("request UGI cannot be NULL");
+              }
+              String renewer = ServletUtils.getParameter(request,
+                  KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
+              try {
+                Token<?> dToken = tokenManager.createToken(requestUgi, renewer);
+                map = delegationTokenToJSON(dToken);
+              } catch (IOException ex) {
+                throw new AuthenticationException(ex.toString(), ex);
+              }
+              break;
+            case RENEWDELEGATIONTOKEN:
+              if (requestUgi == null) {
+                throw new IllegalStateException("request UGI cannot be NULL");
+              }
+              String tokenToRenew = ServletUtils.getParameter(request,
+                  KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
+              if (tokenToRenew == null) {
+                response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+                    MessageFormat.format(
+                        "Operation [{0}] requires the parameter [{1}]", dtOp,
+                        KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
+                );
+                requestContinues = false;
+              } else {
+                Token<DelegationTokenIdentifier> dt =
+                    new Token<DelegationTokenIdentifier>();
+                try {
+                  dt.decodeFromUrlString(tokenToRenew);
+                  long expirationTime = tokenManager.renewToken(dt,
+                      requestUgi.getShortUserName());
+                  map = new HashMap();
+                  map.put("long", expirationTime);
+                } catch (IOException ex) {
+                  throw new AuthenticationException(ex.toString(), ex);
+                }
+              }
+              break;
+            case CANCELDELEGATIONTOKEN:
+              String tokenToCancel = ServletUtils.getParameter(request,
+                  KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
+              if (tokenToCancel == null) {
+                response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+                    MessageFormat.format(
+                        "Operation [{0}] requires the parameter [{1}]", dtOp,
+                        KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
+                );
+                requestContinues = false;
+              } else {
+                Token<DelegationTokenIdentifier> dt =
+                    new Token<DelegationTokenIdentifier>();
+                try {
+                  dt.decodeFromUrlString(tokenToCancel);
+                  tokenManager.cancelToken(dt, (requestUgi != null)
+                      ? requestUgi.getShortUserName() : null);
+                } catch (IOException ex) {
+                  response.sendError(HttpServletResponse.SC_NOT_FOUND,
+                      "Invalid delegation token, cannot cancel");
+                  requestContinues = false;
+                }
+              }
+              break;
+          }
+          if (requestContinues) {
+            response.setStatus(HttpServletResponse.SC_OK);
+            if (map != null) {
+              response.setContentType(MediaType.APPLICATION_JSON);
+              Writer writer = response.getWriter();
+              ObjectMapper jsonMapper = new ObjectMapper();
+              jsonMapper.writeValue(writer, map);
+              writer.write(ENTER);
+              writer.flush();
+            }
+            requestContinues = false;
+          }
+        }
+      } else {
+        response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+            MessageFormat.format(
+                "Wrong HTTP method [{0}] for operation [{1}], it should be " +
+                    "[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
+        requestContinues = false;
+      }
+    }
+    return requestContinues;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static Map delegationTokenToJSON(Token token) throws IOException {
+    Map json = new LinkedHashMap();
+    json.put(
+        KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
+        token.encodeToUrlString());
+    Map response = new LinkedHashMap();
+    response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
+        json);
+    return response;
+  }
+
+  /**
+   * Authenticates a request looking for the <code>delegation</code>
+   * query-string parameter and verifying it is a valid token. If there is not
+   * <code>delegation</code> query-string parameter, it delegates the
+   * authentication to the {@link KerberosAuthenticationHandler} unless it is
+   * disabled.
+   *
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   * @return the authentication token for the authenticated request.
+   * @throws IOException thrown if an IO error occurred.
+   * @throws AuthenticationException thrown if the authentication failed.
+   */
+  @Override
+  public AuthenticationToken authenticate(HttpServletRequest request,
+      HttpServletResponse response)
+      throws IOException, AuthenticationException {
+    AuthenticationToken token;
+    String delegationParam = ServletUtils.getParameter(request,
+        KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+    if (delegationParam != null) {
+      try {
+        Token<DelegationTokenIdentifier> dt =
+            new Token<DelegationTokenIdentifier>();
+        dt.decodeFromUrlString(delegationParam);
+        UserGroupInformation ugi = tokenManager.verifyToken(dt);
+        final String shortName = ugi.getShortUserName();
+
+        // creating a ephemeral token
+        token = new AuthenticationToken(shortName, ugi.getUserName(),
+            getType());
+        token.setExpires(0);
+      } catch (Throwable ex) {
+        throw new AuthenticationException("Could not verify DelegationToken, " +
+            ex.toString(), ex);
+      }
+    } else {
+      token = authHandler.authenticate(request, response);
+    }
+    return token;
+  }
+
+}

+ 250 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java

@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * {@link Authenticator} wrapper that enhances an {@link Authenticator} with
+ * Delegation Token support.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class DelegationTokenAuthenticator implements Authenticator {
+  private static Logger LOG = 
+      LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
+  
+  private static final String CONTENT_TYPE = "Content-Type";
+  private static final String APPLICATION_JSON_MIME = "application/json";
+
+  private static final String HTTP_GET = "GET";
+  private static final String HTTP_PUT = "PUT";
+
+  public static final String OP_PARAM = "op";
+
+  public static final String DELEGATION_PARAM = "delegation";
+  public static final String TOKEN_PARAM = "token";
+  public static final String RENEWER_PARAM = "renewer";
+  public static final String DELEGATION_TOKEN_JSON = "Token";
+  public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
+  public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
+
+  /**
+   * DelegationToken operations.
+   */
+  @InterfaceAudience.Private
+  public static enum DelegationTokenOperation {
+    GETDELEGATIONTOKEN(HTTP_GET, true),
+    RENEWDELEGATIONTOKEN(HTTP_PUT, true),
+    CANCELDELEGATIONTOKEN(HTTP_PUT, false);
+
+    private String httpMethod;
+    private boolean requiresKerberosCredentials;
+
+    private DelegationTokenOperation(String httpMethod,
+        boolean requiresKerberosCredentials) {
+      this.httpMethod = httpMethod;
+      this.requiresKerberosCredentials = requiresKerberosCredentials;
+    }
+
+    public String getHttpMethod() {
+      return httpMethod;
+    }
+
+    public boolean requiresKerberosCredentials() {
+      return requiresKerberosCredentials;
+    }
+  }
+
+  private Authenticator authenticator;
+
+  public DelegationTokenAuthenticator(Authenticator authenticator) {
+    this.authenticator = authenticator;
+  }
+
+  @Override
+  public void setConnectionConfigurator(ConnectionConfigurator configurator) {
+    authenticator.setConnectionConfigurator(configurator);
+  }
+
+  private boolean hasDelegationToken(URL url) {
+    String queryStr = url.getQuery();
+    return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+  }
+
+  @Override
+  public void authenticate(URL url, AuthenticatedURL.Token token)
+      throws IOException, AuthenticationException {
+    if (!hasDelegationToken(url)) {
+      authenticator.authenticate(url, token);
+    }
+  }
+
+  /**
+   * Requests a delegation token using the configured <code>Authenticator</code>
+   * for authentication.
+   *
+   * @param url the URL to get the delegation token from. Only HTTP/S URLs are
+   * supported.
+   * @param token the authentication token being used for the user where the
+   * Delegation token will be stored.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
+      AuthenticatedURL.Token token, String renewer)
+      throws IOException, AuthenticationException {
+    Map json = doDelegationTokenOperation(url, token,
+        DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true);
+    json = (Map) json.get(DELEGATION_TOKEN_JSON);
+    String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
+    Token<AbstractDelegationTokenIdentifier> dToken =
+        new Token<AbstractDelegationTokenIdentifier>();
+    dToken.decodeFromUrlString(tokenStr);
+    InetSocketAddress service = new InetSocketAddress(url.getHost(),
+        url.getPort());
+    SecurityUtil.setTokenService(dToken, service);
+    return dToken;
+  }
+
+  /**
+   * Renews a delegation token from the server end-point using the
+   * configured <code>Authenticator</code> for authentication.
+   *
+   * @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+   * supported.
+   * @param token the authentication token with the Delegation Token to renew.
+   * @throws IOException if an IO error occurred.
+   * @throws AuthenticationException if an authentication exception occurred.
+   */
+  public long renewDelegationToken(URL url,
+      AuthenticatedURL.Token token,
+      Token<AbstractDelegationTokenIdentifier> dToken)
+      throws IOException, AuthenticationException {
+    Map json = doDelegationTokenOperation(url, token,
+        DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true);
+    return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
+  }
+
+  /**
+   * Cancels a delegation token from the server end-point. It does not require
+   * being authenticated by the configured <code>Authenticator</code>.
+   *
+   * @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+   * are supported.
+   * @param token the authentication token with the Delegation Token to cancel.
+   * @throws IOException if an IO error occurred.
+   */
+  public void cancelDelegationToken(URL url,
+      AuthenticatedURL.Token token,
+      Token<AbstractDelegationTokenIdentifier> dToken)
+      throws IOException {
+    try {
+      doDelegationTokenOperation(url, token,
+          DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false);
+    } catch (AuthenticationException ex) {
+      throw new IOException("This should not happen: " + ex.getMessage(), ex);
+    }
+  }
+
+  private Map doDelegationTokenOperation(URL url,
+      AuthenticatedURL.Token token, DelegationTokenOperation operation,
+      String renewer, Token<?> dToken, boolean hasResponse)
+      throws IOException, AuthenticationException {
+    Map ret = null;
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, operation.toString());
+    if (renewer != null) {
+      params.put(RENEWER_PARAM, renewer);
+    }
+    if (dToken != null) {
+      params.put(TOKEN_PARAM, dToken.encodeToUrlString());
+    }
+    String urlStr = url.toExternalForm();
+    StringBuilder sb = new StringBuilder(urlStr);
+    String separator = (urlStr.contains("?")) ? "&" : "?";
+    for (Map.Entry<String, String> entry : params.entrySet()) {
+      sb.append(separator).append(entry.getKey()).append("=").
+          append(URLEncoder.encode(entry.getValue(), "UTF8"));
+      separator = "&";
+    }
+    url = new URL(sb.toString());
+    AuthenticatedURL aUrl = new AuthenticatedURL(this);
+    HttpURLConnection conn = aUrl.openConnection(url, token);
+    conn.setRequestMethod(operation.getHttpMethod());
+    validateResponse(conn, HttpURLConnection.HTTP_OK);
+    if (hasResponse) {
+      String contentType = conn.getHeaderField(CONTENT_TYPE);
+      contentType = (contentType != null) ? contentType.toLowerCase()
+                                          : null;
+      if (contentType != null &&
+          contentType.contains(APPLICATION_JSON_MIME)) {
+        try {
+          ObjectMapper mapper = new ObjectMapper();
+          ret = mapper.readValue(conn.getInputStream(), Map.class);
+        } catch (Exception ex) {
+          throw new AuthenticationException(String.format(
+              "'%s' did not handle the '%s' delegation token operation: %s",
+              url.getAuthority(), operation, ex.getMessage()), ex);
+        }
+      } else {
+        throw new AuthenticationException(String.format("'%s' did not " +
+                "respond with JSON to the '%s' delegation token operation",
+            url.getAuthority(), operation));
+      }
+    }
+    return ret;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void validateResponse(HttpURLConnection conn, int expected)
+      throws IOException {
+    int status = conn.getResponseCode();
+    if (status != expected) {
+      try {
+        conn.getInputStream().close();
+      } catch (IOException ex) {
+        //NOP
+      }
+      String msg = String.format("HTTP status, expected [%d], got [%d]: %s", 
+          expected, status, conn.getResponseMessage());
+      LOG.debug(msg);
+      throw new IOException(msg);
+    }
+  }
+
+}

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java

@@ -15,21 +15,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.lib.service;
+package org.apache.hadoop.security.token.delegation.web;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 
 /**
- * HttpFS <code>DelegationTokenIdentifier</code> implementation.
+ * Concrete delegation token identifier used by {@link DelegationTokenManager},
+ * {@link KerberosDelegationTokenAuthenticationHandler} and
+ * {@link DelegationTokenAuthenticationFilter}.
  */
 @InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DelegationTokenIdentifier
-  extends AbstractDelegationTokenIdentifier {
+    extends AbstractDelegationTokenIdentifier {
 
-  private Text kind = WebHdfsFileSystem.TOKEN_KIND;
+  private Text kind;
 
   public DelegationTokenIdentifier(Text kind) {
     this.kind = kind;
@@ -50,8 +53,8 @@ public class DelegationTokenIdentifier
   }
 
   /**
-   * Returns the kind, <code>TOKEN_KIND</code>.
-   * @return returns <code>TOKEN_KIND</code>.
+   * Return the delegation token kind
+   * @return returns the delegation token kind
    */
   @Override
   public Text getKind() {

+ 153 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java

@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+/**
+ * Delegation Token Manager used by the
+ * {@link KerberosDelegationTokenAuthenticationHandler}.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class DelegationTokenManager {
+
+  private static class DelegationTokenSecretManager
+      extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
+
+    private Text tokenKind;
+
+    public DelegationTokenSecretManager(Text tokenKind,
+        long delegationKeyUpdateInterval,
+        long delegationTokenMaxLifetime,
+        long delegationTokenRenewInterval,
+        long delegationTokenRemoverScanInterval) {
+      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+      this.tokenKind = tokenKind;
+    }
+
+    @Override
+    public DelegationTokenIdentifier createIdentifier() {
+      return new DelegationTokenIdentifier(tokenKind);
+    }
+
+  }
+
+  private AbstractDelegationTokenSecretManager secretManager = null;
+  private boolean managedSecretManager;
+  private Text tokenKind;
+
+  public DelegationTokenManager(Text tokenKind,
+      long delegationKeyUpdateInterval,
+      long delegationTokenMaxLifetime,
+      long delegationTokenRenewInterval,
+      long delegationTokenRemoverScanInterval) {
+    this.secretManager = new DelegationTokenSecretManager(tokenKind,
+        delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+        delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+    this.tokenKind = tokenKind;
+    managedSecretManager = true;
+  }
+
+  /**
+   * Sets an external <code>DelegationTokenSecretManager</code> instance to
+   * manage creation and verification of Delegation Tokens.
+   * <p/>
+   * This is useful for use cases where secrets must be shared across multiple
+   * services.
+   *
+   * @param secretManager a <code>DelegationTokenSecretManager</code> instance
+   */
+  public void setExternalDelegationTokenSecretManager(
+      AbstractDelegationTokenSecretManager secretManager) {
+    this.secretManager.stopThreads();
+    this.secretManager = secretManager;
+    this.tokenKind = secretManager.createIdentifier().getKind();
+    managedSecretManager = false;
+  }
+
+  public void init() {
+    if (managedSecretManager) {
+      try {
+        secretManager.startThreads();
+      } catch (IOException ex) {
+        throw new RuntimeException("Could not start " +
+            secretManager.getClass() + ": " + ex.toString(), ex);
+      }
+    }
+  }
+
+  public void destroy() {
+    if (managedSecretManager) {
+      secretManager.stopThreads();
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
+      String renewer) {
+    renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
+    String user = ugi.getUserName();
+    Text owner = new Text(user);
+    Text realUser = null;
+    if (ugi.getRealUser() != null) {
+      realUser = new Text(ugi.getRealUser().getUserName());
+    }
+    DelegationTokenIdentifier tokenIdentifier = new DelegationTokenIdentifier(
+        tokenKind, owner, new Text(renewer), realUser);
+    return new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
+  }
+
+  @SuppressWarnings("unchecked")
+  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
+      throws IOException {
+    return secretManager.renewToken(token, renewer);
+  }
+
+  @SuppressWarnings("unchecked")
+  public void cancelToken(Token<DelegationTokenIdentifier> token,
+      String canceler) throws IOException {
+    canceler = (canceler != null) ? canceler :
+               verifyToken(token).getShortUserName();
+    secretManager.cancelToken(token, canceler);
+  }
+
+  @SuppressWarnings("unchecked")
+  public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier>
+      token) throws IOException {
+    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+    DataInputStream dis = new DataInputStream(buf);
+    DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
+    id.readFields(dis);
+    dis.close();
+    secretManager.verifyToken(id, token.getPassword());
+    return id.getUser();
+  }
+
+}

+ 54 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ * <p/>
+ * In addition to the {@link KerberosAuthenticationHandler} configuration
+ * properties, this handler supports:
+ * <ul>
+ * <li>kerberos.delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).</li>
+ * <li>kerberos.delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).</li>
+ * <li>kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).</li>
+ * <li>kerberos.delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).</li>
+ * <li>kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).</li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class KerberosDelegationTokenAuthenticationHandler
+    extends DelegationTokenAuthenticationHandler {
+
+  public KerberosDelegationTokenAuthenticationHandler() {
+    super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
+        TYPE_POSTFIX));
+  }
+
+}

+ 46 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java

@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+
+/**
+ * The <code>KerberosDelegationTokenAuthenticator</code> provides support for
+ * Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
+ * Token operations.
+ * <p/>
+ * It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
+ * endpoint does not trigger a SPNEGO authentication
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class KerberosDelegationTokenAuthenticator
+    extends DelegationTokenAuthenticator {
+
+  public KerberosDelegationTokenAuthenticator() {
+    super(new KerberosAuthenticator() {
+      @Override
+      protected Authenticator getFallBackAuthenticator() {
+        return new PseudoDelegationTokenAuthenticator();
+      }
+    });
+  }
+}

+ 55 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ * <p/>
+ * In addition to the {@link KerberosAuthenticationHandler} configuration
+ * properties, this handler supports:
+ * <ul>
+ * <li>simple.delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).</li>
+ * <li>simple.delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).</li>
+ * <li>simple.delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).</li>
+ * <li>simple.delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).</li>
+ * <li>simple.delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).</li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PseudoDelegationTokenAuthenticationHandler
+    extends DelegationTokenAuthenticationHandler {
+
+  public PseudoDelegationTokenAuthenticationHandler() {
+    super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
+        TYPE_POSTFIX));
+  }
+
+}

+ 25 - 18
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java

@@ -15,33 +15,40 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-package org.apache.hadoop.fs.http.client;
+package org.apache.hadoop.security.token.delegation.web;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
 
 import java.io.IOException;
 
 /**
- * A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
- * <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
+ * The <code>PseudoDelegationTokenAuthenticator</code> provides support for
+ * Hadoop's pseudo authentication mechanism that accepts
+ * the user name specified as a query string parameter and support for Hadoop
+ * Delegation Token operations.
+ * <p/>
+ * This mimics the model of Hadoop Simple authentication trusting the
+ * {@link UserGroupInformation#getCurrentUser()} value.
  */
-@InterfaceAudience.Private
-public class HttpFSPseudoAuthenticator extends PseudoAuthenticator {
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class PseudoDelegationTokenAuthenticator
+    extends DelegationTokenAuthenticator {
 
-  /**
-   * Return the client user name.
-   *
-   * @return the client user name.
-   */
-  @Override
-  protected String getUserName() {
-    try {
-      return UserGroupInformation.getLoginUser().getUserName();
-    } catch (IOException ex) {
-      throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
-    }
+  public PseudoDelegationTokenAuthenticator() {
+    super(new PseudoAuthenticator() {
+      @Override
+      protected String getUserName() {
+        try {
+          return UserGroupInformation.getCurrentUser().getShortUserName();
+        } catch (IOException ex) {
+          throw new RuntimeException(ex);
+        }
+      }
+    });
   }
+
 }

+ 59 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
+
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.List;
+
+/**
+ * Servlet utility methods.
+ */
+@InterfaceAudience.Private
+class ServletUtils {
+  private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+
+  /**
+   * Extract a query string parameter without triggering http parameters
+   * processing by the servlet container.
+   *
+   * @param request the request
+   * @param name the parameter to get the value.
+   * @return the parameter value, or <code>NULL</code> if the parameter is not
+   * defined.
+   * @throws IOException thrown if there was an error parsing the query string.
+   */
+  public static String getParameter(HttpServletRequest request, String name)
+      throws IOException {
+    List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
+        UTF8_CHARSET);
+    if (list != null) {
+      for (NameValuePair nv : list) {
+        if (name.equals(nv.getName())) {
+          return nv.getValue();
+        }
+      }
+    }
+    return null;
+  }
+}

+ 16 - 179
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -114,57 +114,18 @@ User Commands
 
 * <<<fs>>>
 
-   Usage: <<<hadoop fs [GENERIC_OPTIONS] [COMMAND_OPTIONS]>>>
-
-   Deprecated, use <<<hdfs dfs>>> instead.
-
-   Runs a generic filesystem user client.
-
-   The various COMMAND_OPTIONS can be found at File System Shell Guide.
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfs}<<<hdfs dfs>>>}}
+   instead.
 
 * <<<fsck>>>
 
-   Runs a HDFS filesystem checking utility.
-   See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
-
-   Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
-
-*------------------+---------------------------------------------+
-||  COMMAND_OPTION || Description
-*------------------+---------------------------------------------+
-|   <path>         | Start checking from this path.
-*------------------+---------------------------------------------+
-|   -move          | Move corrupted files to /lost+found
-*------------------+---------------------------------------------+
-|   -delete        | Delete corrupted files.
-*------------------+---------------------------------------------+
-|   -openforwrite  | Print out files opened for write.
-*------------------+---------------------------------------------+
-|   -files         | Print out files being checked.
-*------------------+---------------------------------------------+
-|   -blocks        | Print out block report.
-*------------------+---------------------------------------------+
-|   -locations     | Print out locations for every block.
-*------------------+---------------------------------------------+
-|   -racks         | Print out network topology for data-node locations.
-*------------------+---------------------------------------------+
-|   -showprogress  | Print out show progress in output. Default is OFF (no progress).
-*------------------+---------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#fsck}<<<hdfs fsck>>>}}
+   instead.
 
 * <<<fetchdt>>>
 
-   Gets Delegation Token from a NameNode.
-   See {{{../hadoop-hdfs/HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
-
-   Usage: <<<hadoop fetchdt [GENERIC_OPTIONS] [--webservice <namenode_http_addr>] <path> >>>
-
-*------------------------------+---------------------------------------------+
-|| COMMAND_OPTION              || Description
-*------------------------------+---------------------------------------------+
-| <fileName>                   | File name to store the token into.
-*------------------------------+---------------------------------------------+
-| --webservice <https_address> | use http protocol instead of RPC
-*------------------------------+---------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#fetchdt}
+   <<<hdfs fetchdt>>>}} instead.
 
 * <<<jar>>>
 
@@ -321,23 +282,8 @@ Administration Commands
 
 * <<<balancer>>>
 
-   Runs a cluster balancing utility. An administrator can simply press Ctrl-C
-   to stop the rebalancing process. See
-   {{{../hadoop-hdfs/HdfsUserGuide.html#Balancer}Balancer}} for more details.
-
-   Usage: <<<hadoop balancer [-threshold <threshold>] [-policy <policy>]>>>
-
-*------------------------+-----------------------------------------------------------+
-|| COMMAND_OPTION        | Description
-*------------------------+-----------------------------------------------------------+
-| -threshold <threshold> | Percentage of disk capacity. This overwrites the
-                         | default threshold.
-*------------------------+-----------------------------------------------------------+
-| -policy <policy>       | <<<datanode>>> (default): Cluster is balanced if each datanode is balanced. \
-                         | <<<blockpool>>>: Cluster is balanced if each block pool in each datanode is balanced.
-*------------------------+-----------------------------------------------------------+
-
-   Note that the <<<blockpool>>> policy is more strict than the <<<datanode>>> policy.
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#balancer}
+   <<<hdfs balancer>>>}} instead.
 
 * <<<daemonlog>>>
 
@@ -360,84 +306,13 @@ Administration Commands
 
 * <<<datanode>>>
 
-   Runs a HDFS datanode.
-
-   Usage: <<<hadoop datanode [-rollback]>>>
-
-*-----------------+-----------------------------------------------------------+
-|| COMMAND_OPTION || Description
-*-----------------+-----------------------------------------------------------+
-| -rollback       | Rollsback the datanode to the previous version. This should
-                  | be used after stopping the datanode and distributing the old
-                  | hadoop version.
-*-----------------+-----------------------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#datanode}
+   <<<hdfs datanode>>>}} instead.
 
 * <<<dfsadmin>>>
 
-   Runs a HDFS dfsadmin client.
-
-   Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-restoreFailedStorage true|false|check] [-help [cmd]]>>>
-
-*-----------------+-----------------------------------------------------------+
-|| COMMAND_OPTION || Description
-*-----------------+-----------------------------------------------------------+
-| -report         | Reports basic filesystem information and statistics.
-*-----------------+-----------------------------------------------------------+
-| -safemode enter / leave / get / wait | Safe mode maintenance command. Safe
-                  | mode is a Namenode state in which it \
-                  | 1. does not accept changes to the name space (read-only) \
-                  | 2. does not replicate or delete blocks. \
-                  | Safe mode is entered automatically at Namenode startup, and
-                  | leaves safe mode automatically when the configured minimum
-                  | percentage of blocks satisfies the minimum replication
-                  | condition. Safe mode can also be entered manually, but then
-                  | it can only be turned off manually as well.
-*-----------------+-----------------------------------------------------------+
-| -refreshNodes   | Re-read the hosts and exclude files to update the set of
-                  | Datanodes that are allowed to connect to the Namenode and
-                  | those that should be decommissioned or recommissioned.
-*-----------------+-----------------------------------------------------------+
-| -finalizeUpgrade| Finalize upgrade of HDFS. Datanodes delete their previous
-                  | version working directories, followed by Namenode doing the
-                  | same. This completes the upgrade process.
-*-----------------+-----------------------------------------------------------+
-| -upgradeProgress status / details / force | Request current distributed
-                  | upgrade status, a detailed status or force the upgrade to
-                  | proceed.
-*-----------------+-----------------------------------------------------------+
-| -metasave filename | Save Namenode's primary data structures to <filename> in
-                  | the directory specified by hadoop.log.dir property.
-                  | <filename> is overwritten if it exists.
-                  | <filename> will contain one line for each of the following\
-                  | 1. Datanodes heart beating with Namenode\
-                  | 2. Blocks waiting to be replicated\
-                  | 3. Blocks currrently being replicated\
-                  | 4. Blocks waiting to be deleted\
-*-----------------+-----------------------------------------------------------+
-| -setQuota <quota> <dirname>...<dirname> | Set the quota <quota> for each
-                  | directory <dirname>. The directory quota is a long integer
-                  | that puts a hard limit on the number of names in the
-                  | directory tree.  Best effort for the directory, with faults
-                  | reported if \
-                  | 1. N is not a positive integer, or \
-                  | 2. user is not an administrator, or \
-                  | 3. the directory does not exist or is a file, or \
-                  | 4. the directory would immediately exceed the new quota. \
-*-----------------+-----------------------------------------------------------+
-| -clrQuota <dirname>...<dirname> | Clear the quota for each directory
-                  | <dirname>.  Best effort for the directory. with fault
-                  | reported if \
-                  | 1. the directory does not exist or is a file, or \
-                  | 2. user is not an administrator.  It does not fault if the
-                  | directory has no quota.
-*-----------------+-----------------------------------------------------------+
-| -restoreFailedStorage true / false / check | This option will turn on/off automatic attempt to restore failed storage replicas.
-                  | If a failed storage becomes available again the system will attempt to restore
-                  | edits and/or fsimage during checkpoint. 'check' option will return current setting.
-*-----------------+-----------------------------------------------------------+
-| -help [cmd]     | Displays help for the given command or all commands if none
-                  | is specified.
-*-----------------+-----------------------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfsadmin}
+   <<<hdfs dfsadmin>>>}} instead.
 
 * <<<mradmin>>>
 
@@ -470,51 +345,13 @@ Administration Commands
 
 * <<<namenode>>>
 
-   Runs the namenode. More info about the upgrade, rollback and finalize is
-   at {{{../hadoop-hdfs/HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
-
-   Usage: <<<hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint]>>>
-
-*--------------------+-----------------------------------------------------------+
-|| COMMAND_OPTION    || Description
-*--------------------+-----------------------------------------------------------+
-| -format            | Formats the namenode. It starts the namenode, formats
-                     | it and then shut it down.
-*--------------------+-----------------------------------------------------------+
-| -upgrade           | Namenode should be started with upgrade option after
-                     | the distribution of new hadoop version.
-*--------------------+-----------------------------------------------------------+
-| -rollback          | Rollsback the namenode to the previous version. This
-                     | should be used after stopping the cluster and
-                     | distributing the old hadoop version.
-*--------------------+-----------------------------------------------------------+
-| -finalize          | Finalize will remove the previous state of the files
-                     | system. Recent upgrade will become permanent.  Rollback
-                     | option will not be available anymore. After finalization
-                     | it shuts the namenode down.
-*--------------------+-----------------------------------------------------------+
-| -importCheckpoint  | Loads image from a checkpoint directory and save it
-                     | into the current one. Checkpoint dir is read from
-                     | property fs.checkpoint.dir
-*--------------------+-----------------------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#namenode}
+   <<<hdfs namenode>>>}} instead.
 
 * <<<secondarynamenode>>>
 
-   Runs the HDFS secondary namenode.
-   See {{{../hadoop-hdfs/HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
-   for more info.
-
-   Usage: <<<hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize]>>>
-
-*----------------------+-----------------------------------------------------------+
-|| COMMAND_OPTION      || Description
-*----------------------+-----------------------------------------------------------+
-| -checkpoint [-force] | Checkpoints the Secondary namenode if EditLog size
-                       | >= fs.checkpoint.size. If <<<-force>>> is used,
-                       | checkpoint irrespective of EditLog size.
-*----------------------+-----------------------------------------------------------+
-| -geteditsize         | Prints the EditLog size.
-*----------------------+-----------------------------------------------------------+
+   Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#secondarynamenode}
+   <<<hdfs secondarynamenode>>>}} instead.
 
 * <<<tasktracker>>>
 

+ 0 - 24
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java

@@ -26,13 +26,11 @@ import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.AvroTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 
 import com.google.common.base.Joiner;
 
 import junit.framework.TestCase;
-import static org.junit.Assert.fail;
 
 public class TestPath extends TestCase {
   /**
@@ -307,28 +305,6 @@ public class TestPath extends TestCase {
     // if the child uri is absolute path
     assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
         "foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
-
-    // empty URI
-    URI uri3 = new URI("");
-    assertEquals("", uri3.toString());
-    try {
-      path = new Path(uri3);
-      fail("Expected exception for empty URI");
-    } catch (IllegalArgumentException e) {
-      // expect to receive an IllegalArgumentException
-      GenericTestUtils.assertExceptionContains("Can not create a Path"
-          + " from an empty URI", e);
-    }
-    // null URI
-    uri3 = null;
-    try {
-      path = new Path(uri3);
-      fail("Expected exception for null URI");
-    } catch (IllegalArgumentException e) {
-      // expect to receive an IllegalArgumentException
-      GenericTestUtils.assertExceptionContains("Can not create a Path"
-          + " from a null URI", e);
-    }
   }
 
   /** Test URIs created from Path objects */

+ 326 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java

@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Map;
+import java.util.Properties;
+
+public class TestDelegationTokenAuthenticationHandlerWithMocks {
+
+  public static class MockDelegationTokenAuthenticationHandler
+      extends DelegationTokenAuthenticationHandler {
+
+    public MockDelegationTokenAuthenticationHandler() {
+      super(new AuthenticationHandler() {
+        @Override
+        public String getType() {
+          return "T";
+        }
+
+        @Override
+        public void init(Properties config) throws ServletException {
+
+        }
+
+        @Override
+        public void destroy() {
+
+        }
+
+        @Override
+        public boolean managementOperation(AuthenticationToken token,
+            HttpServletRequest request, HttpServletResponse response)
+            throws IOException, AuthenticationException {
+          return false;
+        }
+
+        @Override
+        public AuthenticationToken authenticate(HttpServletRequest request,
+            HttpServletResponse response)
+            throws IOException, AuthenticationException {
+          response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+          response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
+          return null;
+        }
+      });
+    }
+
+  }
+
+  private DelegationTokenAuthenticationHandler handler;
+
+  @Before
+  public void setUp() throws Exception {
+    Properties conf = new Properties();
+
+    conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
+    handler = new MockDelegationTokenAuthenticationHandler();
+    handler.initTokenManager(conf);
+  }
+
+  @After
+  public void cleanUp() {
+      handler.destroy();
+  }
+
+  @Test
+  public void testManagementOperations() throws Exception {
+      testNonManagementOperation();
+      testManagementOperationErrors();
+      testGetToken(null, new Text("foo"));
+      testGetToken("bar", new Text("foo"));
+      testCancelToken();
+      testRenewToken();
+  }
+
+  private void testNonManagementOperation() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(request.getParameter(
+        DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
+    Assert.assertTrue(handler.managementOperation(null, request, null));
+    Mockito.when(request.getParameter(
+        DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
+    Assert.assertTrue(handler.managementOperation(null, request, null));
+  }
+
+  private void testManagementOperationErrors() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getQueryString()).thenReturn(
+        DelegationTokenAuthenticator.OP_PARAM + "=" +
+            DelegationTokenAuthenticator.DelegationTokenOperation.
+                GETDELEGATIONTOKEN.toString()
+    );
+    Mockito.when(request.getMethod()).thenReturn("FOO");
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+        Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+        Mockito.startsWith("Wrong HTTP method"));
+
+    Mockito.reset(response);
+    Mockito.when(request.getMethod()).thenReturn(
+        DelegationTokenAuthenticator.DelegationTokenOperation.
+            GETDELEGATIONTOKEN.getHttpMethod()
+    );
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).setStatus(
+        Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
+    Mockito.verify(response).setHeader(
+        Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
+        Mockito.eq("mock"));
+  }
+
+  private void testGetToken(String renewer, Text expectedTokenKind)
+      throws Exception {
+    DelegationTokenAuthenticator.DelegationTokenOperation op =
+        DelegationTokenAuthenticator.DelegationTokenOperation.
+            GETDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getQueryString()).
+        thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+    Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
+
+    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+    Mockito.when(token.getUserName()).thenReturn("user");
+    Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
+        new StringWriter()));
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+
+    Mockito.when(request.getQueryString()).
+        thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
+        "&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
+
+    Mockito.reset(response);
+    Mockito.reset(token);
+    Mockito.when(token.getUserName()).thenReturn("user");
+    StringWriter writer = new StringWriter();
+    PrintWriter pwriter = new PrintWriter(writer);
+    Mockito.when(response.getWriter()).thenReturn(pwriter);
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    if (renewer == null) {
+      Mockito.verify(token).getUserName();
+    } else {
+      Mockito.verify(token).getUserName();
+    }
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
+    pwriter.close();
+    String responseOutput = writer.toString();
+    String tokenLabel = DelegationTokenAuthenticator.
+        DELEGATION_TOKEN_JSON;
+    Assert.assertTrue(responseOutput.contains(tokenLabel));
+    Assert.assertTrue(responseOutput.contains(
+        DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
+    ObjectMapper jsonMapper = new ObjectMapper();
+    Map json = jsonMapper.readValue(responseOutput, Map.class);
+    json = (Map) json.get(tokenLabel);
+    String tokenStr;
+    tokenStr = (String) json.get(DelegationTokenAuthenticator.
+        DELEGATION_TOKEN_URL_STRING_JSON);
+    Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
+    dt.decodeFromUrlString(tokenStr);
+    handler.getTokenManager().verifyToken(dt);
+    Assert.assertEquals(expectedTokenKind, dt.getKind());
+  }
+
+  private void testCancelToken() throws Exception {
+    DelegationTokenAuthenticator.DelegationTokenOperation op =
+        DelegationTokenAuthenticator.DelegationTokenOperation.
+            CANCELDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getQueryString()).thenReturn(
+        DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+    Mockito.when(request.getMethod()).
+        thenReturn(op.getHttpMethod());
+
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).sendError(
+        Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+        Mockito.contains("requires the parameter [token]"));
+
+    Mockito.reset(response);
+    Token<DelegationTokenIdentifier> token =
+        handler.getTokenManager().createToken(
+            UserGroupInformation.getCurrentUser(), "foo");
+    Mockito.when(request.getQueryString()).thenReturn(
+        DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
+            DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
+            token.encodeToUrlString());
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    try {
+      handler.getTokenManager().verifyToken(token);
+      Assert.fail();
+    } catch (SecretManager.InvalidToken ex) {
+      //NOP
+    } catch (Throwable ex) {
+      Assert.fail();
+    }
+  }
+
+  private void testRenewToken() throws Exception {
+    DelegationTokenAuthenticator.DelegationTokenOperation op =
+        DelegationTokenAuthenticator.DelegationTokenOperation.
+            RENEWDELEGATIONTOKEN;
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getQueryString()).
+        thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+    Mockito.when(request.getMethod()).
+        thenReturn(op.getHttpMethod());
+
+    Assert.assertFalse(handler.managementOperation(null, request, response));
+    Mockito.verify(response).setStatus(
+        Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
+    Mockito.verify(response).setHeader(Mockito.eq(
+            KerberosAuthenticator.WWW_AUTHENTICATE),
+        Mockito.eq("mock")
+    );
+
+    Mockito.reset(response);
+    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+    Mockito.when(token.getUserName()).thenReturn("user");
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    Mockito.verify(response).sendError(
+        Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+        Mockito.contains("requires the parameter [token]"));
+
+    Mockito.reset(response);
+    StringWriter writer = new StringWriter();
+    PrintWriter pwriter = new PrintWriter(writer);
+    Mockito.when(response.getWriter()).thenReturn(pwriter);
+    Token<DelegationTokenIdentifier> dToken =
+        handler.getTokenManager().createToken(
+            UserGroupInformation.getCurrentUser(), "user");
+    Mockito.when(request.getQueryString()).
+        thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
+        "&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
+        dToken.encodeToUrlString());
+    Assert.assertFalse(handler.managementOperation(token, request, response));
+    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+    pwriter.close();
+    Assert.assertTrue(writer.toString().contains("long"));
+    handler.getTokenManager().verifyToken(dToken);
+  }
+
+  @Test
+  public void testAuthenticate() throws Exception {
+    testValidDelegationToken();
+    testInvalidDelegationToken();
+  }
+
+  private void testValidDelegationToken() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Token<DelegationTokenIdentifier> dToken =
+        handler.getTokenManager().createToken(
+            UserGroupInformation.getCurrentUser(), "user");
+    Mockito.when(request.getQueryString()).thenReturn(
+        DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
+        dToken.encodeToUrlString());
+
+    AuthenticationToken token = handler.authenticate(request, response);
+    Assert.assertEquals(UserGroupInformation.getCurrentUser().
+            getShortUserName(), token.getUserName());
+    Assert.assertEquals(0, token.getExpires());
+    Assert.assertEquals(handler.getType(),
+        token.getType());
+    Assert.assertTrue(token.isExpired());
+  }
+
+  private void testInvalidDelegationToken() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+    Mockito.when(request.getQueryString()).thenReturn(
+        DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
+
+    try {
+      handler.authenticate(request, response);
+      Assert.fail();
+    } catch (AuthenticationException ex) {
+      //NOP
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+  }
+
+}

+ 59 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+
+public class TestDelegationTokenManager {
+
+  private static final long DAY_IN_SECS = 86400;
+
+  @Test
+  public void testDTManager() throws Exception {
+    DelegationTokenManager tm = new DelegationTokenManager(new Text("foo"),
+        DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS);
+    tm.init();
+    Token<DelegationTokenIdentifier> token =
+        tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
+    Assert.assertNotNull(token);
+    tm.verifyToken(token);
+    Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
+    tm.cancelToken(token, "foo");
+    try {
+      tm.verifyToken(token);
+      Assert.fail();
+    } catch (IOException ex) {
+      //NOP
+    } catch (Exception ex) {
+      Assert.fail();
+    }
+    tm.destroy();
+  }
+
+}

+ 727 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java

@@ -0,0 +1,727 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.FilterHolder;
+import org.mortbay.jetty.servlet.ServletHolder;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+import javax.servlet.Filter;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.IOException;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import java.net.URL;
+import java.security.Principal;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+public class TestWebDelegationToken {
+  private Server jetty;
+
+  public static class DummyAuthenticationHandler
+      implements AuthenticationHandler {
+    @Override
+    public String getType() {
+      return "dummy";
+    }
+
+    @Override
+    public void init(Properties config) throws ServletException {
+    }
+
+    @Override
+    public void destroy() {
+    }
+
+    @Override
+    public boolean managementOperation(AuthenticationToken token,
+        HttpServletRequest request, HttpServletResponse response)
+        throws IOException, AuthenticationException {
+      return false;
+    }
+
+    @Override
+    public AuthenticationToken authenticate(HttpServletRequest request,
+        HttpServletResponse response)
+        throws IOException, AuthenticationException {
+      AuthenticationToken token = null;
+      if (request.getParameter("authenticated") != null) {
+        token = new AuthenticationToken(request.getParameter("authenticated"),
+            "U", "test");
+      } else {
+        response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+        response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
+      }
+      return token;
+    }
+  }
+
+  public static class DummyDelegationTokenAuthenticationHandler extends
+      DelegationTokenAuthenticationHandler {
+    public DummyDelegationTokenAuthenticationHandler() {
+      super(new DummyAuthenticationHandler());
+    }
+
+    @Override
+    public void init(Properties config) throws ServletException {
+      Properties conf = new Properties(config);
+      conf.setProperty(TOKEN_KIND, "token-kind");
+      initTokenManager(conf);
+    }
+  }
+
+  public static class AFilter extends DelegationTokenAuthenticationFilter {
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) {
+      Properties conf = new Properties();
+      conf.setProperty(AUTH_TYPE,
+          DummyDelegationTokenAuthenticationHandler.class.getName());
+      return conf;
+    }
+  }
+
+  public static class PingServlet extends HttpServlet {
+
+    @Override
+    protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      resp.setStatus(HttpServletResponse.SC_OK);
+      resp.getWriter().write("ping");
+    }
+
+    @Override
+    protected void doPost(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      Writer writer = resp.getWriter();
+      writer.write("ping: ");
+      IOUtils.copy(req.getReader(), writer);
+      resp.setStatus(HttpServletResponse.SC_OK);
+    }
+  }
+
+  protected Server createJettyServer() {
+    try {
+      InetAddress localhost = InetAddress.getLocalHost();
+      ServerSocket ss = new ServerSocket(0, 50, localhost);
+      int port = ss.getLocalPort();
+      ss.close();
+      jetty = new Server(0);
+      jetty.getConnectors()[0].setHost("localhost");
+      jetty.getConnectors()[0].setPort(port);
+      return jetty;
+    } catch (Exception ex) {
+      throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
+          ex);
+    }
+  }
+
+  protected String getJettyURL() {
+    Connector c = jetty.getConnectors()[0];
+    return "http://" + c.getHost() + ":" + c.getPort();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // resetting hadoop security to simple
+    org.apache.hadoop.conf.Configuration conf =
+        new org.apache.hadoop.conf.Configuration();
+    UserGroupInformation.setConfiguration(conf);
+
+    jetty = createJettyServer();
+  }
+
+  @After
+  public void cleanUp() throws Exception {
+    jetty.stop();
+
+    // resetting hadoop security to simple
+    org.apache.hadoop.conf.Configuration conf =
+        new org.apache.hadoop.conf.Configuration();
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  protected Server getJetty() {
+    return jetty;
+  }
+
+  @Test
+  public void testRawHttpCalls() throws Exception {
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+    context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+    try {
+      jetty.start();
+      URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+      URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+
+      // unauthenticated access to URL
+      HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+          conn.getResponseCode());
+
+      // authenticated access to URL
+      conn = (HttpURLConnection) authURL.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+      // unauthenticated access to get delegation token
+      URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
+      conn = (HttpURLConnection) url.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+          conn.getResponseCode());
+
+      // authenticated access to get delegation token
+      url = new URL(authURL.toExternalForm() +
+          "&op=GETDELEGATIONTOKEN&renewer=foo");
+      conn = (HttpURLConnection) url.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      ObjectMapper mapper = new ObjectMapper();
+      Map map = mapper.readValue(conn.getInputStream(), Map.class);
+      String dt = (String) ((Map) map.get("Token")).get("urlString");
+      Assert.assertNotNull(dt);
+
+      // delegation token access to URL
+      url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+      // delegation token and authenticated access to URL
+      url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+      // renewew delegation token, unauthenticated access to URL
+      url = new URL(nonAuthURL.toExternalForm() +
+          "?op=RENEWDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+          conn.getResponseCode());
+
+      // renewew delegation token, authenticated access to URL
+      url = new URL(authURL.toExternalForm() +
+          "&op=RENEWDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+      // renewew delegation token, authenticated access to URL, not renewer
+      url = new URL(getJettyURL() +
+          "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
+          conn.getResponseCode());
+
+      // cancel delegation token, nonauthenticated access to URL
+      url = new URL(nonAuthURL.toExternalForm() +
+          "?op=CANCELDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+      // cancel canceled delegation token, nonauthenticated access to URL
+      url = new URL(nonAuthURL.toExternalForm() +
+          "?op=CANCELDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
+          conn.getResponseCode());
+
+      // get new delegation token
+      url = new URL(authURL.toExternalForm() +
+          "&op=GETDELEGATIONTOKEN&renewer=foo");
+      conn = (HttpURLConnection) url.openConnection();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      mapper = new ObjectMapper();
+      map = mapper.readValue(conn.getInputStream(), Map.class);
+      dt = (String) ((Map) map.get("Token")).get("urlString");
+      Assert.assertNotNull(dt);
+
+      // cancel delegation token, authenticated access to URL
+      url = new URL(authURL.toExternalForm() +
+          "&op=CANCELDELEGATIONTOKEN&token=" + dt);
+      conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod("PUT");
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    } finally {
+      jetty.stop();
+    }
+  }
+
+  @Test
+  public void testDelegationTokenAuthenticatorCalls() throws Exception {
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+    context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+
+    try {
+      jetty.start();
+      URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+      URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+      URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
+
+      DelegationTokenAuthenticatedURL.Token token =
+          new DelegationTokenAuthenticatedURL.Token();
+      DelegationTokenAuthenticatedURL aUrl =
+          new DelegationTokenAuthenticatedURL();
+
+      try {
+        aUrl.getDelegationToken(nonAuthURL, token, "foo");
+        Assert.fail();
+      } catch (Exception ex) {
+        Assert.assertTrue(ex.getMessage().contains("401"));
+      }
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+      Assert.assertNotNull(token.getDelegationToken());
+      Assert.assertEquals(new Text("token-kind"),
+          token.getDelegationToken().getKind());
+
+      aUrl.renewDelegationToken(authURL, token);
+
+      try {
+        aUrl.renewDelegationToken(nonAuthURL, token);
+        Assert.fail();
+      } catch (Exception ex) {
+        Assert.assertTrue(ex.getMessage().contains("401"));
+      }
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+
+      try {
+        aUrl.renewDelegationToken(authURL2, token);
+        Assert.fail();
+      } catch (Exception ex) {
+        Assert.assertTrue(ex.getMessage().contains("403"));
+      }
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+
+      aUrl.cancelDelegationToken(authURL, token);
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+
+      aUrl.cancelDelegationToken(nonAuthURL, token);
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+
+      try {
+        aUrl.renewDelegationToken(nonAuthURL, token);
+      } catch (Exception ex) {
+        Assert.assertTrue(ex.getMessage().contains("401"));
+      }
+
+    } finally {
+      jetty.stop();
+    }
+  }
+
+  private static class DummyDelegationTokenSecretManager
+      extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
+
+    public DummyDelegationTokenSecretManager() {
+      super(10000, 10000, 10000, 10000);
+    }
+
+    @Override
+    public DelegationTokenIdentifier createIdentifier() {
+      return new DelegationTokenIdentifier(new Text("fooKind"));
+    }
+
+  }
+
+  @Test
+  public void testExternalDelegationTokenSecretManager() throws Exception {
+    DummyDelegationTokenSecretManager secretMgr
+        = new DummyDelegationTokenSecretManager();
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+    context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+    try {
+      secretMgr.startThreads();
+      context.setAttribute(DelegationTokenAuthenticationFilter.
+              DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
+      jetty.start();
+      URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+
+      DelegationTokenAuthenticatedURL.Token token =
+          new DelegationTokenAuthenticatedURL.Token();
+      DelegationTokenAuthenticatedURL aUrl =
+          new DelegationTokenAuthenticatedURL();
+
+      aUrl.getDelegationToken(authURL, token, "foo");
+      Assert.assertNotNull(token.getDelegationToken());
+      Assert.assertEquals(new Text("fooKind"),
+          token.getDelegationToken().getKind());
+
+    } finally {
+      jetty.stop();
+      secretMgr.stopThreads();
+    }
+  }
+
+  public static class NoDTFilter extends AuthenticationFilter {
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) {
+      Properties conf = new Properties();
+      conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
+      return conf;
+    }
+  }
+
+
+  public static class NoDTHandlerDTAFilter
+      extends DelegationTokenAuthenticationFilter {
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) {
+      Properties conf = new Properties();
+      conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
+      return conf;
+    }
+  }
+
+  public static class UserServlet extends HttpServlet {
+
+    @Override
+    protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      resp.setStatus(HttpServletResponse.SC_OK);
+      resp.getWriter().write(req.getUserPrincipal().getName());
+    }
+  }
+
+  @Test
+  public void testDelegationTokenAuthenticationURLWithNoDTFilter()
+    throws Exception {
+    testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
+  }
+
+  @Test
+  public void testDelegationTokenAuthenticationURLWithNoDTHandler()
+      throws Exception {
+    testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
+  }
+
+  // we are, also, implicitly testing  KerberosDelegationTokenAuthenticator
+  // fallback here
+  private void testDelegationTokenAuthenticatedURLWithNoDT(
+      Class<? extends Filter> filterClass)  throws Exception {
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(filterClass), "/*", 0);
+    context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+
+    try {
+      jetty.start();
+      final URL url = new URL(getJettyURL() + "/foo/bar");
+
+      UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo");
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          DelegationTokenAuthenticatedURL.Token token =
+              new DelegationTokenAuthenticatedURL.Token();
+          DelegationTokenAuthenticatedURL aUrl =
+              new DelegationTokenAuthenticatedURL();
+          HttpURLConnection conn = aUrl.openConnection(url, token);
+          Assert.assertEquals(HttpURLConnection.HTTP_OK,
+              conn.getResponseCode());
+          List<String> ret = IOUtils.readLines(conn.getInputStream());
+          Assert.assertEquals(1, ret.size());
+          Assert.assertEquals("foo", ret.get(0));
+
+          try {
+            aUrl.getDelegationToken(url, token, "foo");
+            Assert.fail();
+          } catch (AuthenticationException ex) {
+            Assert.assertTrue(ex.getMessage().contains(
+                "delegation token operation"));
+          }
+          return null;
+        }
+      });
+    } finally {
+      jetty.stop();
+    }
+  }
+
+  public static class PseudoDTAFilter
+      extends DelegationTokenAuthenticationFilter {
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) {
+      Properties conf = new Properties();
+      conf.setProperty(AUTH_TYPE,
+          PseudoDelegationTokenAuthenticationHandler.class.getName());
+      conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
+          "token-kind");
+      return conf;
+    }
+  }
+
+  @Test
+  public void testFallbackToPseudoDelegationTokenAuthenticator()
+      throws Exception {
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
+    context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+
+    try {
+      jetty.start();
+      final URL url = new URL(getJettyURL() + "/foo/bar");
+
+      UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo");
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          DelegationTokenAuthenticatedURL.Token token =
+              new DelegationTokenAuthenticatedURL.Token();
+          DelegationTokenAuthenticatedURL aUrl =
+              new DelegationTokenAuthenticatedURL();
+          HttpURLConnection conn = aUrl.openConnection(url, token);
+          Assert.assertEquals(HttpURLConnection.HTTP_OK,
+              conn.getResponseCode());
+          List<String> ret = IOUtils.readLines(conn.getInputStream());
+          Assert.assertEquals(1, ret.size());
+          Assert.assertEquals("foo", ret.get(0));
+
+          aUrl.getDelegationToken(url, token, "foo");
+          Assert.assertNotNull(token.getDelegationToken());
+          Assert.assertEquals(new Text("token-kind"),
+              token.getDelegationToken().getKind());
+          return null;
+        }
+      });
+    } finally {
+      jetty.stop();
+    }
+  }
+
+  public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
+    static String keytabFile;
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) {
+      Properties conf = new Properties();
+      conf.setProperty(AUTH_TYPE,
+          KerberosDelegationTokenAuthenticationHandler.class.getName());
+      conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
+      conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
+          "HTTP/localhost");
+      conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
+          "token-kind");
+      return conf;
+    }
+  }
+
+  private static class KerberosConfiguration extends Configuration {
+    private String principal;
+    private String keytab;
+
+    public KerberosConfiguration(String principal, String keytab) {
+      this.principal = principal;
+      this.keytab = keytab;
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+      Map<String, String> options = new HashMap<String, String>();
+      options.put("principal", principal);
+      options.put("keyTab", keytab);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("doNotPrompt", "true");
+      options.put("useTicketCache", "true");
+      options.put("renewTGT", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("isInitiator", "true");
+      String ticketCache = System.getenv("KRB5CCNAME");
+      if (ticketCache != null) {
+        options.put("ticketCache", ticketCache);
+      }
+      options.put("debug", "true");
+
+      return new AppConfigurationEntry[]{
+          new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+              options),};
+    }
+  }
+
+  public static <T> T doAsKerberosUser(String principal, String keytab,
+      final Callable<T> callable) throws Exception {
+    LoginContext loginContext = null;
+    try {
+      Set<Principal> principals = new HashSet<Principal>();
+      principals.add(new KerberosPrincipal(principal));
+      Subject subject = new Subject(false, principals, new HashSet<Object>(),
+          new HashSet<Object>());
+      loginContext = new LoginContext("", subject, null,
+          new KerberosConfiguration(principal, keytab));
+      loginContext.login();
+      subject = loginContext.getSubject();
+      return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
+        @Override
+        public T run() throws Exception {
+          return callable.call();
+        }
+      });
+    } catch (PrivilegedActionException ex) {
+      throw ex.getException();
+    } finally {
+      if (loginContext != null) {
+        loginContext.logout();
+      }
+    }
+  }
+
+  @Test
+  public void testKerberosDelegationTokenAuthenticator() throws Exception {
+    // setting hadoop security to kerberos
+    org.apache.hadoop.conf.Configuration conf =
+        new org.apache.hadoop.conf.Configuration();
+    conf.set("hadoop.security.authentication", "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+
+    File testDir = new File("target/" + UUID.randomUUID().toString());
+    Assert.assertTrue(testDir.mkdirs());
+    MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
+    final Server jetty = createJettyServer();
+    Context context = new Context();
+    context.setContextPath("/foo");
+    jetty.setHandler(context);
+    context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
+    context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+    try {
+      kdc.start();
+      File keytabFile = new File(testDir, "test.keytab");
+      kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
+      KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
+      jetty.start();
+
+      final DelegationTokenAuthenticatedURL.Token token =
+          new DelegationTokenAuthenticatedURL.Token();
+      final DelegationTokenAuthenticatedURL aUrl =
+          new DelegationTokenAuthenticatedURL();
+      final URL url = new URL(getJettyURL() + "/foo/bar");
+
+      try {
+        aUrl.getDelegationToken(url, token, "foo");
+        Assert.fail();
+      } catch (AuthenticationException ex) {
+        Assert.assertTrue(ex.getMessage().contains("GSSException"));
+      }
+
+      doAsKerberosUser("client", keytabFile.getAbsolutePath(),
+          new Callable<Void>() {
+            @Override
+            public Void call() throws Exception {
+              aUrl.getDelegationToken(url, token, "client");
+              Assert.assertNotNull(token.getDelegationToken());
+
+              aUrl.renewDelegationToken(url, token);
+              Assert.assertNotNull(token.getDelegationToken());
+
+              aUrl.getDelegationToken(url, token, "foo");
+              Assert.assertNotNull(token.getDelegationToken());
+
+              try {
+                aUrl.renewDelegationToken(url, token);
+                Assert.fail();
+              } catch (Exception ex) {
+                Assert.assertTrue(ex.getMessage().contains("403"));
+              }
+
+              aUrl.getDelegationToken(url, token, "foo");
+
+              aUrl.cancelDelegationToken(url, token);
+              Assert.assertNull(token.getDelegationToken());
+
+              return null;
+            }
+          });
+    } finally {
+      jetty.stop();
+      kdc.stop();
+    }
+  }
+
+}

+ 68 - 59
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -39,12 +39,14 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.lib.wsrs.EnumSetParam;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -67,7 +69,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
@@ -75,7 +76,6 @@ import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.Callable;
 
 /**
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
@@ -217,34 +217,15 @@ public class HttpFSFileSystem extends FileSystem
 
   }
 
-
-  private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+  private DelegationTokenAuthenticatedURL authURL;
+  private DelegationTokenAuthenticatedURL.Token authToken =
+      new DelegationTokenAuthenticatedURL.Token();
   private URI uri;
-  private InetSocketAddress httpFSAddr;
   private Path workingDir;
   private UserGroupInformation realUser;
   private String doAs;
-  private Token<?> delegationToken;
 
-  //This method enables handling UGI doAs with SPNEGO, we have to
-  //fallback to the realuser who logged in with Kerberos credentials
-  private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
-    throws IOException {
-    try {
-      if (realUser.getShortUserName().equals(doAs)) {
-        return callable.call();
-      } else {
-        return realUser.doAs(new PrivilegedExceptionAction<T>() {
-          @Override
-          public T run() throws Exception {
-            return callable.call();
-          }
-        });
-      }
-    } catch (Exception ex) {
-      throw new IOException(ex.toString(), ex);
-    }
-  }
+
 
   /**
    * Convenience method that creates a <code>HttpURLConnection</code> for the
@@ -291,20 +272,26 @@ public class HttpFSFileSystem extends FileSystem
   private HttpURLConnection getConnection(final String method,
       Map<String, String> params, Map<String, List<String>> multiValuedParams,
       Path path, boolean makeQualified) throws IOException {
-    if (!realUser.getShortUserName().equals(doAs)) {
-      params.put(DO_AS_PARAM, doAs);
-    }
-    HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
     if (makeQualified) {
       path = makeQualified(path);
     }
     final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
-    return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
-      @Override
-      public HttpURLConnection call() throws Exception {
-        return getConnection(url, method);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<HttpURLConnection>() {
+            @Override
+            public HttpURLConnection run() throws Exception {
+              return getConnection(url, method);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   /**
@@ -321,12 +308,8 @@ public class HttpFSFileSystem extends FileSystem
    * @throws IOException thrown if an IO error occurrs.
    */
   private HttpURLConnection getConnection(URL url, String method) throws IOException {
-    Class<? extends Authenticator> klass =
-      getConf().getClass("httpfs.authenticator.class",
-                         HttpFSKerberosAuthenticator.class, Authenticator.class);
-    Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
     try {
-      HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
+      HttpURLConnection conn = authURL.openConnection(url, authToken);
       conn.setRequestMethod(method);
       if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
         conn.setDoOutput(true);
@@ -357,10 +340,17 @@ public class HttpFSFileSystem extends FileSystem
     super.initialize(name, conf);
     try {
       uri = new URI(name.getScheme() + "://" + name.getAuthority());
-      httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
     } catch (URISyntaxException ex) {
       throw new IOException(ex);
     }
+
+    Class<? extends DelegationTokenAuthenticator> klass =
+        getConf().getClass("httpfs.authenticator.class",
+            KerberosDelegationTokenAuthenticator.class,
+            DelegationTokenAuthenticator.class);
+    DelegationTokenAuthenticator authenticator =
+        ReflectionUtils.newInstance(klass, getConf());
+    authURL = new DelegationTokenAuthenticatedURL(authenticator);
   }
 
   @Override
@@ -1059,38 +1049,57 @@ public class HttpFSFileSystem extends FileSystem
   @Override
   public Token<?> getDelegationToken(final String renewer)
     throws IOException {
-    return doAsRealUserIfNecessary(new Callable<Token<?>>() {
-      @Override
-      public Token<?> call() throws Exception {
-        return HttpFSKerberosAuthenticator.
-          getDelegationToken(uri, httpFSAddr, authToken, renewer);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<Token<?>>() {
+            @Override
+            public Token<?> run() throws Exception {
+              return authURL.getDelegationToken(uri.toURL(), authToken,
+                  renewer);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   public long renewDelegationToken(final Token<?> token) throws IOException {
-    return doAsRealUserIfNecessary(new Callable<Long>() {
-      @Override
-      public Long call() throws Exception {
-        return HttpFSKerberosAuthenticator.
-          renewDelegationToken(uri,  authToken, token);
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<Long>() {
+            @Override
+            public Long run() throws Exception {
+              return authURL.renewDelegationToken(uri.toURL(), authToken);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
       }
-    });
+    }
   }
 
   public void cancelDelegationToken(final Token<?> token) throws IOException {
-    HttpFSKerberosAuthenticator.
-      cancelDelegationToken(uri, authToken, token);
+    authURL.cancelDelegationToken(uri.toURL(), authToken);
   }
 
   @Override
   public Token<?> getRenewToken() {
-    return delegationToken;
+    return null; //TODO : for renewer
   }
 
   @Override
+  @SuppressWarnings("unchecked")
   public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
-    delegationToken = token;
+    //TODO : for renewer
   }
 
   @Override

+ 0 - 188
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java

@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.client;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.Authenticator;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.json.simple.JSONObject;
-
-import java.io.IOException;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URL;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A <code>KerberosAuthenticator</code> subclass that fallback to
- * {@link HttpFSPseudoAuthenticator}.
- */
-@InterfaceAudience.Private
-public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
-
-  /**
-   * Returns the fallback authenticator if the server does not use
-   * Kerberos SPNEGO HTTP authentication.
-   *
-   * @return a {@link HttpFSPseudoAuthenticator} instance.
-   */
-  @Override
-  protected Authenticator getFallBackAuthenticator() {
-    return new HttpFSPseudoAuthenticator();
-  }
-
-  private static final String HTTP_GET = "GET";
-  private static final String HTTP_PUT = "PUT";
-
-  public static final String DELEGATION_PARAM = "delegation";
-  public static final String TOKEN_PARAM = "token";
-  public static final String RENEWER_PARAM = "renewer";
-  public static final String DELEGATION_TOKEN_JSON = "Token";
-  public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
-  public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
-
-  /**
-   * DelegationToken operations.
-   */
-  @InterfaceAudience.Private
-  public static enum DelegationTokenOperation {
-    GETDELEGATIONTOKEN(HTTP_GET, true),
-    RENEWDELEGATIONTOKEN(HTTP_PUT, true),
-    CANCELDELEGATIONTOKEN(HTTP_PUT, false);
-
-    private String httpMethod;
-    private boolean requiresKerberosCredentials;
-
-    private DelegationTokenOperation(String httpMethod,
-                                     boolean requiresKerberosCredentials) {
-      this.httpMethod = httpMethod;
-      this.requiresKerberosCredentials = requiresKerberosCredentials;
-    }
-
-    public String getHttpMethod() {
-      return httpMethod;
-    }
-
-    public boolean requiresKerberosCredentials() {
-      return requiresKerberosCredentials;
-    }
-
-  }
-
-  public static void injectDelegationToken(Map<String, String> params,
-                                          Token<?> dtToken)
-    throws IOException {
-    if (dtToken != null) {
-      params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
-    }
-  }
-
-  private boolean hasDelegationToken(URL url) {
-    return url.getQuery().contains(DELEGATION_PARAM + "=");
-  }
-
-  @Override
-  public void authenticate(URL url, AuthenticatedURL.Token token)
-    throws IOException, AuthenticationException {
-    if (!hasDelegationToken(url)) {
-      super.authenticate(url, token);
-    }
-  }
-
-  public static final String OP_PARAM = "op";
-
-  public static Token<?> getDelegationToken(URI fsURI,
-    InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
-    String renewer) throws IOException {
-    DelegationTokenOperation op = 
-      DelegationTokenOperation.GETDELEGATIONTOKEN;
-    Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, op.toString());
-    params.put(RENEWER_PARAM,renewer);
-    URL url = HttpFSUtils.createURL(new Path(fsURI), params);
-    AuthenticatedURL aUrl =
-      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
-    try {
-      HttpURLConnection conn = aUrl.openConnection(url, token);
-      conn.setRequestMethod(op.getHttpMethod());
-      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-      JSONObject json = (JSONObject) ((JSONObject)
-        HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
-      String tokenStr = (String)
-        json.get(DELEGATION_TOKEN_URL_STRING_JSON);
-      Token<AbstractDelegationTokenIdentifier> dToken =
-        new Token<AbstractDelegationTokenIdentifier>();
-      dToken.decodeFromUrlString(tokenStr);
-      SecurityUtil.setTokenService(dToken, httpFSAddr);
-      return dToken;
-    } catch (AuthenticationException ex) {
-      throw new IOException(ex.toString(), ex);
-    }
-  }
-
-  public static long renewDelegationToken(URI fsURI,
-    AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
-    Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM,
-               DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
-    params.put(TOKEN_PARAM, dToken.encodeToUrlString());
-    URL url = HttpFSUtils.createURL(new Path(fsURI), params);
-    AuthenticatedURL aUrl =
-      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
-    try {
-      HttpURLConnection conn = aUrl.openConnection(url, token);
-      conn.setRequestMethod(
-        DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
-      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-      JSONObject json = (JSONObject) ((JSONObject)
-        HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
-      return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
-    } catch (AuthenticationException ex) {
-      throw new IOException(ex.toString(), ex);
-    }
-  }
-
-  public static void cancelDelegationToken(URI fsURI,
-    AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
-    Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM,
-               DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
-    params.put(TOKEN_PARAM, dToken.encodeToUrlString());
-    URL url = HttpFSUtils.createURL(new Path(fsURI), params);
-    AuthenticatedURL aUrl =
-      new AuthenticatedURL(new HttpFSKerberosAuthenticator());
-    try {
-      HttpURLConnection conn = aUrl.openConnection(url, token);
-      conn.setRequestMethod(
-        DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
-      HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-    } catch (AuthenticationException ex) {
-      throw new IOException(ex.toString(), ex);
-    }
-  }
-
-}

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java

@@ -20,7 +20,10 @@ package org.apache.hadoop.fs.http.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
+
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.Reader;
@@ -32,7 +35,9 @@ import java.util.Properties;
  * from HttpFSServer's server configuration.
  */
 @InterfaceAudience.Private
-public class HttpFSAuthenticationFilter extends AuthenticationFilter {
+public class HttpFSAuthenticationFilter
+    extends DelegationTokenAuthenticationFilter {
+
   private static final String CONF_PREFIX = "httpfs.authentication.";
 
   private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@@ -50,7 +55,8 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
    * @return hadoop-auth configuration read from HttpFSServer's configuration.
    */
   @Override
-  protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
+  protected Properties getConfiguration(String configPrefix,
+      FilterConfig filterConfig) throws ServletException{
     Properties props = new Properties();
     Configuration conf = HttpFSServerWebApp.get().getConfig();
 
@@ -64,11 +70,6 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
       }
     }
 
-    if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
-      props.setProperty(AUTH_TYPE,
-                        HttpFSKerberosAuthenticationHandler.class.getName());
-    }
-
     String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
     if (signatureSecretFile == null) {
       throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);

+ 0 - 230
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java

@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.token.Token;
-import org.json.simple.JSONObject;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.MediaType;
-import java.io.IOException;
-import java.io.Writer;
-import java.text.MessageFormat;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Server side <code>AuthenticationHandler</code> that authenticates requests
- * using the incoming delegation token as a 'delegation' query string parameter.
- * <p/>
- * If not delegation token is present in the request it delegates to the
- * {@link KerberosAuthenticationHandler}
- */
-@InterfaceAudience.Private
-public class HttpFSKerberosAuthenticationHandler
-  extends KerberosAuthenticationHandler {
-
-  static final Set<String> DELEGATION_TOKEN_OPS =
-    new HashSet<String>();
-
-  static {
-    DELEGATION_TOKEN_OPS.add(
-      DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
-    DELEGATION_TOKEN_OPS.add(
-      DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
-    DELEGATION_TOKEN_OPS.add(
-      DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
-  }
-
-  public static final String TYPE = "kerberos-dt";
-
-  /**
-   * Returns authentication type of the handler.
-   *
-   * @return <code>delegationtoken-kerberos</code>
-   */
-  @Override
-  public String getType() {
-    return TYPE;
-  }
-
-  private static final String ENTER = System.getProperty("line.separator");
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public boolean managementOperation(AuthenticationToken token,
-      HttpServletRequest request, HttpServletResponse response)
-    throws IOException, AuthenticationException {
-    boolean requestContinues = true;
-    String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
-    op = (op != null) ? op.toUpperCase() : null;
-    if (DELEGATION_TOKEN_OPS.contains(op) &&
-        !request.getMethod().equals("OPTIONS")) {
-      DelegationTokenOperation dtOp =
-        DelegationTokenOperation.valueOf(op);
-      if (dtOp.getHttpMethod().equals(request.getMethod())) {
-        if (dtOp.requiresKerberosCredentials() && token == null) {
-          response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
-            MessageFormat.format(
-              "Operation [{0}] requires SPNEGO authentication established",
-              dtOp));
-          requestContinues = false;
-        } else {
-          DelegationTokenManager tokenManager =
-            HttpFSServerWebApp.get().get(DelegationTokenManager.class);
-          try {
-            Map map = null;
-            switch (dtOp) {
-              case GETDELEGATIONTOKEN:
-                String renewerParam =
-                  request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
-                if (renewerParam == null) {
-                  renewerParam = token.getUserName();
-                }
-                Token<?> dToken = tokenManager.createToken(
-                  UserGroupInformation.getCurrentUser(), renewerParam);
-                map = delegationTokenToJSON(dToken);
-                break;
-              case RENEWDELEGATIONTOKEN:
-              case CANCELDELEGATIONTOKEN:
-                String tokenParam =
-                  request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
-                if (tokenParam == null) {
-                  response.sendError(HttpServletResponse.SC_BAD_REQUEST,
-                    MessageFormat.format(
-                      "Operation [{0}] requires the parameter [{1}]",
-                      dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
-                  requestContinues = false;
-                } else {
-                  if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
-                    Token<DelegationTokenIdentifier> dt =
-                      new Token<DelegationTokenIdentifier>();
-                    dt.decodeFromUrlString(tokenParam);
-                    tokenManager.cancelToken(dt,
-                      UserGroupInformation.getCurrentUser().getUserName());
-                  } else {
-                    Token<DelegationTokenIdentifier> dt =
-                      new Token<DelegationTokenIdentifier>();
-                    dt.decodeFromUrlString(tokenParam);
-                    long expirationTime =
-                      tokenManager.renewToken(dt, token.getUserName());
-                    map = new HashMap();
-                    map.put("long", expirationTime);
-                  }
-                }
-                break;
-            }
-            if (requestContinues) {
-              response.setStatus(HttpServletResponse.SC_OK);
-              if (map != null) {
-                response.setContentType(MediaType.APPLICATION_JSON);
-                Writer writer = response.getWriter();
-                JSONObject.writeJSONString(map, writer);
-                writer.write(ENTER);
-                writer.flush();
-
-              }
-              requestContinues = false;
-            }
-          } catch (DelegationTokenManagerException ex) {
-            throw new AuthenticationException(ex.toString(), ex);
-          }
-        }
-      } else {
-        response.sendError(HttpServletResponse.SC_BAD_REQUEST,
-          MessageFormat.format(
-            "Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
-            request.getMethod(), dtOp, dtOp.getHttpMethod()));
-        requestContinues = false;
-      }
-    }
-    return requestContinues;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static Map delegationTokenToJSON(Token token) throws IOException {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
-             token.encodeToUrlString());
-    Map response = new LinkedHashMap();
-    response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
-    return response;
-  }
-  
-  /**
-   * Authenticates a request looking for the <code>delegation</code>
-   * query-string parameter and verifying it is a valid token. If there is not
-   * <code>delegation</code> query-string parameter, it delegates the
-   * authentication to the {@link KerberosAuthenticationHandler} unless it is
-   * disabled.
-   *
-   * @param request the HTTP client request.
-   * @param response the HTTP client response.
-   *
-   * @return the authentication token for the authenticated request.
-   * @throws IOException thrown if an IO error occurred.
-   * @throws AuthenticationException thrown if the authentication failed.
-   */
-  @Override
-  public AuthenticationToken authenticate(HttpServletRequest request,
-                                          HttpServletResponse response)
-    throws IOException, AuthenticationException {
-    AuthenticationToken token;
-    String delegationParam =
-      request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
-    if (delegationParam != null) {
-      try {
-        Token<DelegationTokenIdentifier> dt =
-          new Token<DelegationTokenIdentifier>();
-        dt.decodeFromUrlString(delegationParam);
-        DelegationTokenManager tokenManager =
-          HttpFSServerWebApp.get().get(DelegationTokenManager.class);
-        UserGroupInformation ugi = tokenManager.verifyToken(dt);
-        final String shortName = ugi.getShortUserName();
-
-        // creating a ephemeral token
-        token = new AuthenticationToken(shortName, ugi.getUserName(),
-                                        getType());
-        token.setExpires(0);
-      } catch (Throwable ex) {
-        throw new AuthenticationException("Could not verify DelegationToken, " +
-                                          ex.toString(), ex);
-      }
-    } else {
-      token = super.authenticate(request, response);
-    }
-    return token;
-  }
-
-
-}

+ 0 - 78
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java

@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * Service interface to manage HttpFS delegation tokens.
- */
-@InterfaceAudience.Private
-public interface DelegationTokenManager {
-
-  /**
-   * Creates a delegation token.
-   *
-   * @param ugi UGI creating the token.
-   * @param renewer token renewer.
-   * @return new delegation token.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * created.
-   */
-  public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
-                                                      String renewer)
-    throws DelegationTokenManagerException;
-
-  /**
-   * Renews a delegation token.
-   *
-   * @param token delegation token to renew.
-   * @param renewer token renewer.
-   * @return epoc expiration time.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * renewed.
-   */
-  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
-    throws DelegationTokenManagerException;
-
-  /**
-   * Cancels a delegation token.
-   *
-   * @param token delegation token to cancel.
-   * @param canceler token canceler.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * canceled.
-   */
-  public void cancelToken(Token<DelegationTokenIdentifier> token,
-                          String canceler)
-    throws DelegationTokenManagerException;
-
-  /**
-   * Verifies a delegation token.
-   *
-   * @param token delegation token to verify.
-   * @return the UGI for the token.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * verified.
-   */
-  public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
-    throws DelegationTokenManagerException;
-
-}

+ 0 - 51
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java

@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.lib.lang.XException;
-
-/**
- * Exception thrown by the {@link DelegationTokenManager} service implementation.
- */
-@InterfaceAudience.Private
-public class DelegationTokenManagerException extends XException {
-
-  public enum ERROR implements XException.ERROR {
-    DT01("Could not verify delegation token, {0}"),
-    DT02("Could not renew delegation token, {0}"),
-    DT03("Could not cancel delegation token, {0}"),
-    DT04("Could not create delegation token, {0}");
-
-    private String template;
-
-    ERROR(String template) {
-      this.template = template;
-    }
-
-    @Override
-    public String getTemplate() {
-      return template;
-    }
-  }
-
-  public DelegationTokenManagerException(ERROR error, Object... params) {
-    super(error, params);
-  }
-
-}

+ 0 - 242
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java

@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service.security;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.lib.server.BaseService;
-import org.apache.hadoop.lib.server.ServerException;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-
-/**
- * DelegationTokenManager service implementation.
- */
-@InterfaceAudience.Private
-public class DelegationTokenManagerService extends BaseService
-  implements DelegationTokenManager {
-
-  private static final String PREFIX = "delegation.token.manager";
-
-  private static final String UPDATE_INTERVAL = "update.interval";
-
-  private static final String MAX_LIFETIME = "max.lifetime";
-
-  private static final String RENEW_INTERVAL = "renew.interval";
-
-  private static final long HOUR = 60 * 60 * 1000;
-  private static final long DAY = 24 * HOUR;
-
-  DelegationTokenSecretManager secretManager = null;
-
-  private Text tokenKind;
-
-  public DelegationTokenManagerService() {
-    super(PREFIX);
-  }
-
-  /**
-   * Initializes the service.
-   *
-   * @throws ServiceException thrown if the service could not be initialized.
-   */
-  @Override
-  protected void init() throws ServiceException {
-
-    long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
-    long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
-    long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
-    tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
-                ? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
-    secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
-                                                     maxLifetime,
-                                                     renewInterval, HOUR);
-    try {
-      secretManager.startThreads();
-    } catch (IOException ex) {
-      throw new ServiceException(ServiceException.ERROR.S12,
-                                 DelegationTokenManager.class.getSimpleName(),
-                                 ex.toString(), ex);
-    }
-  }
-
-  /**
-   * Destroys the service.
-   */
-  @Override
-  public void destroy() {
-    secretManager.stopThreads();
-    super.destroy();
-  }
-
-  /**
-   * Returns the service interface.
-   *
-   * @return the service interface.
-   */
-  @Override
-  public Class getInterface() {
-    return DelegationTokenManager.class;
-  }
-
-  /**
-   * Creates a delegation token.
-   *
-   * @param ugi UGI creating the token.
-   * @param renewer token renewer.
-   * @return new delegation token.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * created.
-   */
-  @Override
-  public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
-                                                      String renewer)
-    throws DelegationTokenManagerException {
-    renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
-    String user = ugi.getUserName();
-    Text owner = new Text(user);
-    Text realUser = null;
-    if (ugi.getRealUser() != null) {
-      realUser = new Text(ugi.getRealUser().getUserName());
-    }
-    DelegationTokenIdentifier tokenIdentifier =
-      new DelegationTokenIdentifier(tokenKind, owner, new Text(renewer), realUser);
-    Token<DelegationTokenIdentifier> token =
-      new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
-    try {
-      SecurityUtil.setTokenService(token,
-                                   HttpFSServerWebApp.get().getAuthority());
-    } catch (ServerException ex) {
-      throw new DelegationTokenManagerException(
-        DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
-    }
-    return token;
-  }
-
-  /**
-   * Renews a delegation token.
-   *
-   * @param token delegation token to renew.
-   * @param renewer token renewer.
-   * @return epoc expiration time.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * renewed.
-   */
-  @Override
-  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
-    throws DelegationTokenManagerException {
-    try {
-      return secretManager.renewToken(token, renewer);
-    } catch (IOException ex) {
-      throw new DelegationTokenManagerException(
-        DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
-    }
-  }
-
-  /**
-   * Cancels a delegation token.
-   *
-   * @param token delegation token to cancel.
-   * @param canceler token canceler.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * canceled.
-   */
-  @Override
-  public void cancelToken(Token<DelegationTokenIdentifier> token,
-                          String canceler)
-    throws DelegationTokenManagerException {
-    try {
-      secretManager.cancelToken(token, canceler);
-    } catch (IOException ex) {
-      throw new DelegationTokenManagerException(
-        DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
-    }
-  }
-
-  /**
-   * Verifies a delegation token.
-   *
-   * @param token delegation token to verify.
-   * @return the UGI for the token.
-   * @throws DelegationTokenManagerException thrown if the token could not be
-   * verified.
-   */
-  @Override
-  public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
-    throws DelegationTokenManagerException {
-    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
-    DataInputStream dis = new DataInputStream(buf);
-    DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
-    try {
-      id.readFields(dis);
-      dis.close();
-      secretManager.verifyToken(id, token.getPassword());
-    } catch (Exception ex) {
-      throw new DelegationTokenManagerException(
-        DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
-    }
-    return id.getUser();
-  }
-
-  private static class DelegationTokenSecretManager
-    extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
-
-    private Text tokenKind;
-
-    /**
-     * Create a secret manager
-     *
-     * @param delegationKeyUpdateInterval the number of seconds for rolling new
-     * secret keys.
-     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
-     * tokens
-     * @param delegationTokenRenewInterval how often the tokens must be renewed
-     * @param delegationTokenRemoverScanInterval how often the tokens are
-     * scanned
-     * for expired tokens
-     */
-    public DelegationTokenSecretManager(Text tokenKind, long delegationKeyUpdateInterval,
-                                        long delegationTokenMaxLifetime,
-                                        long delegationTokenRenewInterval,
-                                        long delegationTokenRemoverScanInterval) {
-      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
-            delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
-      this.tokenKind = tokenKind;
-    }
-
-    @Override
-    public DelegationTokenIdentifier createIdentifier() {
-      return new DelegationTokenIdentifier(tokenKind);
-    }
-
-  }
-
-}

+ 0 - 9
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml

@@ -35,7 +35,6 @@
       org.apache.hadoop.lib.service.scheduler.SchedulerService,
       org.apache.hadoop.lib.service.security.GroupsService,
       org.apache.hadoop.lib.service.security.ProxyUserService,
-      org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
       org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
     </value>
     <description>
@@ -226,12 +225,4 @@
     </description>
   </property>
 
-  <property>
-    <name>httpfs.user.provider.user.pattern</name>
-    <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
-    <description>
-      Valid pattern for user and group names, it must be a valid java regex.
-    </description>
-  </property>
-
 </configuration>

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java

@@ -17,15 +17,19 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+
 import javax.servlet.ServletException;
 import java.util.Properties;
 
 public class HttpFSKerberosAuthenticationHandlerForTesting
-  extends HttpFSKerberosAuthenticationHandler {
+  extends KerberosDelegationTokenAuthenticationHandler {
 
   @Override
   public void init(Properties config) throws ServletException {
     //NOP overwrite to avoid Kerberos initialization
+    config.setProperty(TOKEN_KIND, "t");
+    initTokenManager(config);
   }
 
   @Override

+ 0 - 94
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java

@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.lib.server.Service;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.Groups;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.authentication.util.Signer;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.webapp.WebAppContext;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.text.MessageFormat;
-import java.util.Arrays;
-import java.util.List;
-
-public class TestHttpFSCustomUserName extends HFSTestCase {
-
-  @Test
-  @TestDir
-  @TestJetty
-  public void defaultUserName() throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
-    Configuration httpfsConf = new Configuration(false);
-    HttpFSServerWebApp server = 
-      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
-    server.init();
-    Assert.assertEquals(UserProvider.USER_PATTERN_DEFAULT, 
-      UserProvider.getUserPattern().pattern());
-    server.destroy();
-  }
-
-  @Test
-  @TestDir
-  @TestJetty
-  public void customUserName() throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
-    Configuration httpfsConf = new Configuration(false);
-    httpfsConf.set(UserProvider.USER_PATTERN_KEY, "1");
-    HttpFSServerWebApp server =
-      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
-    server.init();
-    Assert.assertEquals("1", UserProvider.getUserPattern().pattern());
-    server.destroy();
-  }
-
-}

+ 0 - 316
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java

@@ -1,316 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.lib.servlet.ServerWebApp;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.MediaType;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-
-public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
-
-  @Test
-  @TestDir
-  public void testManagementOperationsWebHdfsFileSystem() throws Exception {
-    testManagementOperations(WebHdfsFileSystem.TOKEN_KIND);
-  }
-
-  @Test
-  @TestDir
-  public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
-    try {
-      System.setProperty(HttpFSServerWebApp.NAME +
-          ServerWebApp.SSL_ENABLED, "true");
-      testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
-    } finally {
-      System.getProperties().remove(HttpFSServerWebApp.NAME +
-          ServerWebApp.SSL_ENABLED);
-    }
-  }
-
-  private void testManagementOperations(Text expectedTokenKind) throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
-    Configuration httpfsConf = new Configuration(false);
-    HttpFSServerWebApp server =
-      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
-    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 
-                                              14000));
-    AuthenticationHandler handler =
-      new HttpFSKerberosAuthenticationHandlerForTesting();
-    try {
-      server.init();
-      handler.init(null);
-
-      testNonManagementOperation(handler);
-      testManagementOperationErrors(handler);
-      testGetToken(handler, null, expectedTokenKind);
-      testGetToken(handler, "foo", expectedTokenKind);
-      testCancelToken(handler);
-      testRenewToken(handler);
-
-    } finally {
-      if (handler != null) {
-        handler.destroy();
-      }
-    server.destroy();
-    }
-  }
-
-  private void testNonManagementOperation(AuthenticationHandler handler)
-    throws Exception {
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(null);
-    Assert.assertTrue(handler.managementOperation(null, request, null));
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
-    Assert.assertTrue(handler.managementOperation(null, request, null));
-  }
-
-  private void testManagementOperationErrors(AuthenticationHandler handler)
-    throws Exception {
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
-    Mockito.when(request.getMethod()).thenReturn("FOO");
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.verify(response).sendError(
-      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
-      Mockito.startsWith("Wrong HTTP method"));
-
-    Mockito.reset(response);
-    Mockito.when(request.getMethod()).
-      thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.verify(response).sendError(
-      Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
-      Mockito.contains("requires SPNEGO"));
-  }
-
-  private void testGetToken(AuthenticationHandler handler, String renewer,
-      Text expectedTokenKind) throws Exception {
-    DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(op.toString());
-    Mockito.when(request.getMethod()).
-      thenReturn(op.getHttpMethod());
-
-    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
-    Mockito.when(token.getUserName()).thenReturn("user");
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
-      thenReturn(renewer);
-
-    Mockito.reset(response);
-    StringWriter writer = new StringWriter();
-    PrintWriter pwriter = new PrintWriter(writer);
-    Mockito.when(response.getWriter()).thenReturn(pwriter);
-    Assert.assertFalse(handler.managementOperation(token, request, response));
-    if (renewer == null) {
-      Mockito.verify(token).getUserName();
-    } else {
-      Mockito.verify(token, Mockito.never()).getUserName();
-    }
-    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
-    Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
-    pwriter.close();
-    String responseOutput = writer.toString();
-    String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
-    Assert.assertTrue(responseOutput.contains(tokenLabel));
-    Assert.assertTrue(responseOutput.contains(
-      HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
-    JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
-    json = (JSONObject) json.get(tokenLabel);
-    String tokenStr;
-    tokenStr = (String)
-      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
-    Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
-    dt.decodeFromUrlString(tokenStr);
-    HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
-    Assert.assertEquals(expectedTokenKind, dt.getKind());
-  }
-
-  private void testCancelToken(AuthenticationHandler handler)
-    throws Exception {
-    DelegationTokenOperation op =
-      DelegationTokenOperation.CANCELDELEGATIONTOKEN;
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(op.toString());
-    Mockito.when(request.getMethod()).
-      thenReturn(op.getHttpMethod());
-
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.verify(response).sendError(
-      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
-      Mockito.contains("requires the parameter [token]"));
-
-    Mockito.reset(response);
-    Token<DelegationTokenIdentifier> token =
-      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
-        UserGroupInformation.getCurrentUser(), "foo");
-    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
-      thenReturn(token.encodeToUrlString());
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
-    try {
-      HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
-      Assert.fail();
-    }
-    catch (DelegationTokenManagerException ex) {
-      Assert.assertTrue(ex.toString().contains("DT01"));
-    }
-  }
-
-  private void testRenewToken(AuthenticationHandler handler)
-    throws Exception {
-    DelegationTokenOperation op =
-      DelegationTokenOperation.RENEWDELEGATIONTOKEN;
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
-      thenReturn(op.toString());
-    Mockito.when(request.getMethod()).
-      thenReturn(op.getHttpMethod());
-
-    Assert.assertFalse(handler.managementOperation(null, request, response));
-    Mockito.verify(response).sendError(
-      Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
-      Mockito.contains("equires SPNEGO authentication established"));
-
-    Mockito.reset(response);
-    AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
-    Mockito.when(token.getUserName()).thenReturn("user");
-    Assert.assertFalse(handler.managementOperation(token, request, response));
-    Mockito.verify(response).sendError(
-      Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
-      Mockito.contains("requires the parameter [token]"));
-
-    Mockito.reset(response);
-    StringWriter writer = new StringWriter();
-    PrintWriter pwriter = new PrintWriter(writer);
-    Mockito.when(response.getWriter()).thenReturn(pwriter);
-    Token<DelegationTokenIdentifier> dToken =
-      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
-        UserGroupInformation.getCurrentUser(), "user");
-    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
-      thenReturn(dToken.encodeToUrlString());
-    Assert.assertFalse(handler.managementOperation(token, request, response));
-    Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
-    pwriter.close();
-    Assert.assertTrue(writer.toString().contains("long"));
-    HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
-  }
-
-  @Test
-  @TestDir
-  public void testAuthenticate() throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
-    Configuration httpfsConf = new Configuration(false);
-    HttpFSServerWebApp server =
-      new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
-    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
-                                              14000));
-    AuthenticationHandler handler =
-      new HttpFSKerberosAuthenticationHandlerForTesting();
-    try {
-      server.init();
-      handler.init(null);
-
-      testValidDelegationToken(handler);
-      testInvalidDelegationToken(handler);
-    } finally {
-      if (handler != null) {
-        handler.destroy();
-      }
-    server.destroy();
-    }
-  }
-
-  private void testValidDelegationToken(AuthenticationHandler handler)
-    throws Exception {
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Token<DelegationTokenIdentifier> dToken =
-      HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
-        UserGroupInformation.getCurrentUser(), "user");
-    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
-      thenReturn(dToken.encodeToUrlString());
-
-    AuthenticationToken token = handler.authenticate(request, response);
-    Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
-                        token.getUserName());
-    Assert.assertEquals(0, token.getExpires());
-    Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
-                        token.getType());
-    Assert.assertTrue(token.isExpired());
-  }
-
-  private void testInvalidDelegationToken(AuthenticationHandler handler)
-    throws Exception {
-    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
-    Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
-      thenReturn("invalid");
-
-    try {
-      handler.authenticate(request, response);
-      Assert.fail();
-    } catch (AuthenticationException ex) {
-      //NOP
-    } catch (Exception ex) {
-      Assert.fail();
-    }
-  }
-
-}

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.fs.http.server;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
 import org.json.simple.JSONArray;
 import org.junit.Assert;
 
@@ -43,7 +45,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
@@ -682,7 +683,7 @@ public class TestHttpFSServer extends HFSTestCase {
 
     AuthenticationToken token =
       new AuthenticationToken("u", "p",
-        HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
+          new KerberosDelegationTokenAuthenticationHandler().getType());
     token.setExpires(System.currentTimeMillis() + 100000000);
     Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String tokenSigned = signer.sign(token.toString());
@@ -706,9 +707,9 @@ public class TestHttpFSServer extends HFSTestCase {
     JSONObject json = (JSONObject)
       new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
     json = (JSONObject)
-      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+      json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
     String tokenStr = (String)
-        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+        json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
     url = new URL(TestJettyHelper.getJettyURL(),
                   "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java

@@ -23,11 +23,11 @@ import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.KerberosTestUtils;
 import org.apache.hadoop.test.TestDir;
@@ -166,9 +166,9 @@ public class TestHttpFSWithKerberos extends HFSTestCase {
           .parse(new InputStreamReader(conn.getInputStream()));
         json =
           (JSONObject) json
-            .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+            .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
         String tokenStr = (String) json
-          .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+          .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
         //access httpfs using the delegation token
         url = new URL(TestJettyHelper.getJettyURL(),

+ 0 - 89
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java

@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.lib.service.hadoop.FileSystemAccessService;
-import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
-import org.apache.hadoop.lib.service.scheduler.SchedulerService;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.Arrays;
-
-public class TestDelegationTokenManagerService extends HTestCase {
-
-  @Test
-  @TestDir
-  public void service() throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-    Configuration conf = new Configuration(false);
-    conf.set("httpfs.services", StringUtils.join(",",
-      Arrays.asList(InstrumentationService.class.getName(),
-          SchedulerService.class.getName(),
-          FileSystemAccessService.class.getName(),
-          DelegationTokenManagerService.class.getName())));
-    Server server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
-    server.init();
-    DelegationTokenManager tm = server.get(DelegationTokenManager.class);
-    Assert.assertNotNull(tm);
-    server.destroy();
-  }
-
-  @Test
-  @TestDir
-  @SuppressWarnings("unchecked")
-  public void tokens() throws Exception {
-    String dir = TestDirHelper.getTestDir().getAbsolutePath();
-    Configuration conf = new Configuration(false);
-    conf.set("server.services", StringUtils.join(",",
-      Arrays.asList(DelegationTokenManagerService.class.getName())));
-    HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
-    server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
-    server.init();
-    DelegationTokenManager tm = server.get(DelegationTokenManager.class);
-    Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
-    Assert.assertNotNull(token);
-    tm.verifyToken(token);
-    Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
-    tm.cancelToken(token, "foo");
-    try {
-      tm.verifyToken(token);
-      Assert.fail();
-    } catch (DelegationTokenManagerException ex) {
-      //NOP
-    } catch (Exception ex) {
-      Assert.fail();
-    }
-    server.destroy();
-  }
-
-}

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -387,12 +387,35 @@ Release 2.6.0 - UNRELEASED
     HDFS-6812. Remove addBlock and replaceBlock from DatanodeDescriptor.
     (szetszwo)
 
+    HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira
+    Ajisaka via Arpit Agarwal)
+
+    HDFS-6728. Dynamically add new volumes to DataStorage, formatted if
+    necessary. (Lei Xu via atm)
+
+    HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
+    Xu via atm)
+
+    HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
+    (Ming Ma via wheat9)
+
+    HDFS-6772. Get DN storages out of blockContentsStale state faster after
+    NN restarts. (Ming Ma via Arpit Agarwal)
+
+    HDFS-573. Porting libhdfs to Windows. (cnauroth)
+
+    HDFS-6828. Separate block replica dispatching from Balancer. (szetszwo via
+    jing9)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
 
   BUG FIXES
 
+    HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for 
+    insecure HDFS (Allen Wittenauer via raviprak)
+
     HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
     due to a long edit log sync op. (Liang Xie via cnauroth)
 

+ 101 - 16
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -360,16 +360,97 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   <profiles>
     <profile>
-      <id>windows</id>
+      <id>native-win</id>
       <activation>
         <activeByDefault>false</activeByDefault>
         <os>
           <family>windows</family>
         </os>
       </activation>
-      <properties>
-        <windows.build>true</windows.build>
-      </properties>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-enforcer-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>enforce-os</id>
+                <goals>
+                  <goal>enforce</goal>
+                </goals>
+                <configuration>
+                  <rules>
+                    <requireOS>
+                      <family>windows</family>
+                      <message>native-win build only supported on Windows</message>
+                    </requireOS>
+                  </rules>
+                  <fail>true</fail>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>make</id>
+                <phase>compile</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <configuration>
+                  <target>
+                    <mkdir dir="${project.build.directory}/native"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native"
+                        failonerror="true">
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/>
+                    </exec>
+                    <exec executable="msbuild" dir="${project.build.directory}/native"
+                        failonerror="true">
+                      <arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
+                    </exec>
+                    <!-- Copy for inclusion in distribution. -->
+                    <copy todir="${project.build.directory}/bin">
+                      <fileset dir="${project.build.directory}/native/target/bin/Release"/>
+                    </copy>
+                  </target>
+                </configuration>
+              </execution>
+              <execution>
+                <id>native_tests</id>
+                <phase>test</phase>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <skip>${skipTests}</skip>
+                  <target>
+                    <property name="compile_classpath" refid="maven.compile.classpath"/>
+                    <property name="test_classpath" refid="maven.test.classpath"/>
+                    <macrodef name="run-test">
+                      <attribute name="test"/>
+                      <sequential>
+                        <echo message="Running @{test}"/>
+                        <exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
+                          <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+                          <!-- HADOOP_HOME required to find winutils. -->
+                          <env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
+                          <!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
+                          <env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
+                        </exec>
+                        <echo message="Finished @{test}"/>
+                      </sequential>
+                    </macrodef>
+                    <run-test test="test_libhdfs_threaded"/>
+                    <echo message="Skipping test_libhdfs_zerocopy"/>
+                    <run-test test="test_native_mini_dfs"/>
+                  </target>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
     </profile>
     <profile>
       <id>native</id>
@@ -407,21 +488,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <phase>test</phase>
                 <goals><goal>run</goal></goals>
                 <configuration>
+                  <skip>${skipTests}</skip>
                   <target>
                     <property name="compile_classpath" refid="maven.compile.classpath"/>
                     <property name="test_classpath" refid="maven.test.classpath"/>
-                    <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
-                      <arg value="-c"/>
-                      <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
-                      <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
-                      <env key="SKIPTESTS" value="${skipTests}"/>
-                    </exec>
-                    <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
-                        <arg value="-c"/>
-                        <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
-                      <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
-                      <env key="SKIPTESTS" value="${skipTests}"/>
-                    </exec>
+                    <macrodef name="run-test">
+                      <attribute name="test"/>
+                      <sequential>
+                        <echo message="Running @{test}"/>
+                        <exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
+                          <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
+                          <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
+                          <env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
+                        </exec>
+                        <echo message="Finished @{test}"/>
+                      </sequential>
+                    </macrodef>
+                    <run-test test="test_libhdfs_threaded"/>
+                    <run-test test="test_libhdfs_zerocopy"/>
+                    <run-test test="test_native_mini_dfs"/>
                   </target>
                 </configuration>
               </execution>

+ 57 - 21
hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt

@@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
     MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
 endif (NOT GENERATED_JAVAH)
 
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+if (WIN32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
+
+    # Set warning level 4.
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
+
+    # Skip "unreferenced formal parameter".
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
+
+    # Skip "conditional expression is constant".
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
+
+    # Skip deprecated POSIX function warnings.
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
+
+    # Skip CRT non-secure function warnings.  If we can convert usage of
+    # strerror, getenv and ctime to their secure CRT equivalents, then we can
+    # re-enable the CRT non-secure function warnings.
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
+
+    # Omit unneeded headers.
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
+
+    set(OS_DIR main/native/libhdfs/os/windows)
+    set(OUT_DIR target/bin)
+else (WIN32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+    set(OS_DIR main/native/libhdfs/os/posix)
+    set(OS_LINK_LIBRARIES pthread)
+    set(OUT_DIR target/usr/local/lib)
+endif (WIN32)
 
 include_directories(
     ${GENERATED_JAVAH}
@@ -87,6 +117,7 @@ include_directories(
     ${JNI_INCLUDE_DIRS}
     main/native
     main/native/libhdfs
+    ${OS_DIR}
 )
 
 set(_FUSE_DFS_VERSION 0.1.0)
@@ -96,6 +127,9 @@ add_dual_library(hdfs
     main/native/libhdfs/exception.c
     main/native/libhdfs/jni_helper.c
     main/native/libhdfs/hdfs.c
+    main/native/libhdfs/common/htable.c
+    ${OS_DIR}/mutexes.c
+    ${OS_DIR}/thread_local_storage.c
 )
 if (NEED_LINK_DL)
    set(LIB_DL dl)
@@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}
     ${LIB_DL}
-    pthread
+    ${OS_LINK_LIBRARIES}
 )
-dual_output_directory(hdfs target/usr/local/lib)
+
+dual_output_directory(hdfs ${OUT_DIR})
 set(LIBHDFS_VERSION "0.0.0")
 set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
 
-add_library(posix_util
-    main/native/util/posix_util.c
-)
-
 add_executable(test_libhdfs_ops
     main/native/libhdfs/test/test_libhdfs_ops.c
 )
@@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_dfs
 add_executable(test_libhdfs_threaded
     main/native/libhdfs/expect.c
     main/native/libhdfs/test_libhdfs_threaded.c
+    ${OS_DIR}/thread.c
 )
 target_link_libraries(test_libhdfs_threaded
     hdfs
     native_mini_dfs
-    pthread
+    ${OS_LINK_LIBRARIES}
 )
 
 add_executable(test_libhdfs_zerocopy
@@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
 target_link_libraries(test_libhdfs_zerocopy
     hdfs
     native_mini_dfs
-    pthread
-)
-
-add_executable(test_libhdfs_vecsum
-    main/native/libhdfs/test/vecsum.c
-)
-target_link_libraries(test_libhdfs_vecsum
-    hdfs
-    pthread
-    rt
-)
+    ${OS_LINK_LIBRARIES}
+)
+
+# Skip vecsum on Windows.  This could be made to work in the future by
+# introducing an abstraction layer over the sys/mman.h functions.
+if (NOT WIN32)
+    add_executable(test_libhdfs_vecsum
+        main/native/libhdfs/test/vecsum.c
+    )
+    target_link_libraries(test_libhdfs_vecsum
+        hdfs
+        pthread
+        rt
+    )
+endif(NOT WIN32)
 
 IF(REQUIRE_LIBWEBHDFS)
     add_subdirectory(contrib/libwebhdfs)

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -1668,9 +1668,11 @@ public class DFSUtil {
         .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
 
     // initialize the webserver for uploading/downloading files.
-    LOG.info("Starting web server as: "
-        + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
-            httpAddr.getHostName()));
+    if (UserGroupInformation.isSecurityEnabled()) {
+      LOG.info("Starting web server as: "
+          + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
+              httpAddr.getHostName()));
+    }
 
     if (policy.isHttpEnabled()) {
       if (httpAddr.getPort() == 0) {

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 23 - 922
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java


+ 1060 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java

@@ -0,0 +1,1060 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
+
+/** Dispatching block replica moves between datanodes. */
+@InterfaceAudience.Private
+public class Dispatcher {
+  static final Log LOG = LogFactory.getLog(Dispatcher.class);
+
+  private static final long GB = 1L << 30; // 1GB
+  private static final long MAX_BLOCKS_SIZE_TO_FETCH = 2 * GB;
+
+  private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
+  private static final long DELAY_AFTER_ERROR = 10 * 1000L; // 10 seconds
+  private static final int BLOCK_MOVE_READ_TIMEOUT = 20 * 60 * 1000; // 20
+                                                                     // minutes
+
+  private final NameNodeConnector nnc;
+  private final KeyManager keyManager;
+  private final SaslDataTransferClient saslClient;
+
+  /** Set of datanodes to be excluded. */
+  private final Set<String> excludedNodes;
+  /** Restrict to the following nodes. */
+  private final Set<String> includedNodes;
+
+  private final Collection<Source> sources = new HashSet<Source>();
+  private final Collection<BalancerDatanode.StorageGroup> targets
+      = new HashSet<BalancerDatanode.StorageGroup>();
+
+  private final GlobalBlockMap globalBlocks = new GlobalBlockMap();
+  private final MovedBlocks<BalancerDatanode.StorageGroup> movedBlocks;
+
+  /** Map (datanodeUuid,storageType -> StorageGroup) */
+  private final StorageGroupMap storageGroupMap = new StorageGroupMap();
+
+  private NetworkTopology cluster;
+
+  private final ExecutorService moveExecutor;
+  private final ExecutorService dispatchExecutor;
+  /** The maximum number of concurrent blocks moves at a datanode */
+  private final int maxConcurrentMovesPerNode;
+
+  private final AtomicLong bytesMoved = new AtomicLong();
+
+  private static class GlobalBlockMap {
+    private final Map<Block, DBlock> map = new HashMap<Block, DBlock>();
+
+    /**
+     * Get the block from the map;
+     * if the block is not found, create a new block and put it in the map.
+     */
+    private DBlock get(Block b) {
+      DBlock block = map.get(b);
+      if (block == null) {
+        block = new DBlock(b);
+        map.put(b, block);
+      }
+      return block;
+    }
+    
+    /** Remove all blocks except for the moved blocks. */
+    private void removeAllButRetain(
+        MovedBlocks<BalancerDatanode.StorageGroup> movedBlocks) {
+      for (Iterator<Block> i = map.keySet().iterator(); i.hasNext();) {
+        if (!movedBlocks.contains(i.next())) {
+          i.remove();
+        }
+      }
+    }
+  }
+
+  static class StorageGroupMap {
+    private static String toKey(String datanodeUuid, StorageType storageType) {
+      return datanodeUuid + ":" + storageType;
+    }
+
+    private final Map<String, BalancerDatanode.StorageGroup> map
+        = new HashMap<String, BalancerDatanode.StorageGroup>();
+
+    BalancerDatanode.StorageGroup get(String datanodeUuid,
+        StorageType storageType) {
+      return map.get(toKey(datanodeUuid, storageType));
+    }
+
+    void put(BalancerDatanode.StorageGroup g) {
+      final String key = toKey(g.getDatanode().getDatanodeUuid(), g.storageType);
+      final BalancerDatanode.StorageGroup existing = map.put(key, g);
+      Preconditions.checkState(existing == null);
+    }
+
+    int size() {
+      return map.size();
+    }
+
+    void clear() {
+      map.clear();
+    }
+  }
+
+  /** This class keeps track of a scheduled block move */
+  private class PendingMove {
+    private DBlock block;
+    private Source source;
+    private BalancerDatanode proxySource;
+    private BalancerDatanode.StorageGroup target;
+
+    private PendingMove() {
+    }
+
+    @Override
+    public String toString() {
+      final Block b = block.getBlock();
+      return b + " with size=" + b.getNumBytes() + " from "
+          + source.getDisplayName() + " to " + target.getDisplayName()
+          + " through " + proxySource.datanode;
+    }
+
+    /**
+     * Choose a block & a proxy source for this pendingMove whose source &
+     * target have already been chosen.
+     * 
+     * @return true if a block and its proxy are chosen; false otherwise
+     */
+    private boolean chooseBlockAndProxy() {
+      // iterate all source's blocks until find a good one
+      for (Iterator<DBlock> i = source.getBlockIterator(); i.hasNext();) {
+        if (markMovedIfGoodBlock(i.next())) {
+          i.remove();
+          return true;
+        }
+      }
+      return false;
+    }
+
+    /**
+     * @return true if the given block is good for the tentative move.
+     */
+    private boolean markMovedIfGoodBlock(DBlock block) {
+      synchronized (block) {
+        synchronized (movedBlocks) {
+          if (isGoodBlockCandidate(source, target, block)) {
+            this.block = block;
+            if (chooseProxySource()) {
+              movedBlocks.put(block);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Decided to move " + this);
+              }
+              return true;
+            }
+          }
+        }
+      }
+      return false;
+    }
+
+    /**
+     * Choose a proxy source.
+     * 
+     * @return true if a proxy is found; otherwise false
+     */
+    private boolean chooseProxySource() {
+      final DatanodeInfo targetDN = target.getDatanode();
+      // if node group is supported, first try add nodes in the same node group
+      if (cluster.isNodeGroupAware()) {
+        for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
+          if (cluster.isOnSameNodeGroup(loc.getDatanode(), targetDN)
+              && addTo(loc)) {
+            return true;
+          }
+        }
+      }
+      // check if there is replica which is on the same rack with the target
+      for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
+        if (cluster.isOnSameRack(loc.getDatanode(), targetDN) && addTo(loc)) {
+          return true;
+        }
+      }
+      // find out a non-busy replica
+      for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
+        if (addTo(loc)) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    /** add to a proxy source for specific block movement */
+    private boolean addTo(BalancerDatanode.StorageGroup g) {
+      final BalancerDatanode bdn = g.getBalancerDatanode();
+      if (bdn.addPendingBlock(this)) {
+        proxySource = bdn;
+        return true;
+      }
+      return false;
+    }
+
+    /** Dispatch the move to the proxy source & wait for the response. */
+    private void dispatch() {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Start moving " + this);
+      }
+
+      Socket sock = new Socket();
+      DataOutputStream out = null;
+      DataInputStream in = null;
+      try {
+        sock.connect(
+            NetUtils.createSocketAddr(target.getDatanode().getXferAddr()),
+            HdfsServerConstants.READ_TIMEOUT);
+        /*
+         * Unfortunately we don't have a good way to know if the Datanode is
+         * taking a really long time to move a block, OR something has gone
+         * wrong and it's never going to finish. To deal with this scenario, we
+         * set a long timeout (20 minutes) to avoid hanging the balancer
+         * indefinitely.
+         */
+        sock.setSoTimeout(BLOCK_MOVE_READ_TIMEOUT);
+
+        sock.setKeepAlive(true);
+
+        OutputStream unbufOut = sock.getOutputStream();
+        InputStream unbufIn = sock.getInputStream();
+        ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(),
+            block.getBlock());
+        Token<BlockTokenIdentifier> accessToken = keyManager.getAccessToken(eb);
+        IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
+            unbufIn, keyManager, accessToken, target.getDatanode());
+        unbufOut = saslStreams.out;
+        unbufIn = saslStreams.in;
+        out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+            HdfsConstants.IO_FILE_BUFFER_SIZE));
+        in = new DataInputStream(new BufferedInputStream(unbufIn,
+            HdfsConstants.IO_FILE_BUFFER_SIZE));
+
+        sendRequest(out, eb, accessToken);
+        receiveResponse(in);
+        bytesMoved.addAndGet(block.getNumBytes());
+        LOG.info("Successfully moved " + this);
+      } catch (IOException e) {
+        LOG.warn("Failed to move " + this + ": " + e.getMessage());
+        /*
+         * proxy or target may have an issue, insert a small delay before using
+         * these nodes further. This avoids a potential storm of
+         * "threads quota exceeded" Warnings when the balancer gets out of sync
+         * with work going on in datanode.
+         */
+        proxySource.activateDelay(DELAY_AFTER_ERROR);
+        target.getBalancerDatanode().activateDelay(DELAY_AFTER_ERROR);
+      } finally {
+        IOUtils.closeStream(out);
+        IOUtils.closeStream(in);
+        IOUtils.closeSocket(sock);
+
+        proxySource.removePendingBlock(this);
+        target.getBalancerDatanode().removePendingBlock(this);
+
+        synchronized (this) {
+          reset();
+        }
+        synchronized (Dispatcher.this) {
+          Dispatcher.this.notifyAll();
+        }
+      }
+    }
+
+    /** Send a block replace request to the output stream */
+    private void sendRequest(DataOutputStream out, ExtendedBlock eb,
+        Token<BlockTokenIdentifier> accessToken) throws IOException {
+      new Sender(out).replaceBlock(eb, target.storageType, accessToken, source
+          .getDatanode().getDatanodeUuid(), proxySource.datanode);
+    }
+
+    /** Receive a block copy response from the input stream */
+    private void receiveResponse(DataInputStream in) throws IOException {
+      BlockOpResponseProto response = BlockOpResponseProto
+          .parseFrom(vintPrefixed(in));
+      if (response.getStatus() != Status.SUCCESS) {
+        if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+          throw new IOException("block move failed due to access token error");
+        }
+        throw new IOException("block move is failed: " + response.getMessage());
+      }
+    }
+
+    /** reset the object */
+    private void reset() {
+      block = null;
+      source = null;
+      proxySource = null;
+      target = null;
+    }
+  }
+
+  /** A class for keeping track of block locations in the dispatcher. */
+  private static class DBlock extends
+      MovedBlocks.Locations<BalancerDatanode.StorageGroup> {
+    DBlock(Block block) {
+      super(block);
+    }
+  }
+
+  /** The class represents a desired move. */
+  static class Task {
+    private final BalancerDatanode.StorageGroup target;
+    private long size; // bytes scheduled to move
+
+    Task(BalancerDatanode.StorageGroup target, long size) {
+      this.target = target;
+      this.size = size;
+    }
+
+    long getSize() {
+      return size;
+    }
+  }
+
+  /** A class that keeps track of a datanode. */
+  static class BalancerDatanode {
+
+    /** A group of storages in a datanode with the same storage type. */
+    class StorageGroup {
+      final StorageType storageType;
+      final double utilization;
+      final long maxSize2Move;
+      private long scheduledSize = 0L;
+
+      private StorageGroup(StorageType storageType, double utilization,
+          long maxSize2Move) {
+        this.storageType = storageType;
+        this.utilization = utilization;
+        this.maxSize2Move = maxSize2Move;
+      }
+
+      BalancerDatanode getBalancerDatanode() {
+        return BalancerDatanode.this;
+      }
+
+      DatanodeInfo getDatanode() {
+        return BalancerDatanode.this.datanode;
+      }
+
+      /** Decide if still need to move more bytes */
+      synchronized boolean hasSpaceForScheduling() {
+        return availableSizeToMove() > 0L;
+      }
+
+      /** @return the total number of bytes that need to be moved */
+      synchronized long availableSizeToMove() {
+        return maxSize2Move - scheduledSize;
+      }
+
+      /** increment scheduled size */
+      synchronized void incScheduledSize(long size) {
+        scheduledSize += size;
+      }
+
+      /** @return scheduled size */
+      synchronized long getScheduledSize() {
+        return scheduledSize;
+      }
+
+      /** Reset scheduled size to zero. */
+      synchronized void resetScheduledSize() {
+        scheduledSize = 0L;
+      }
+
+      /** @return the name for display */
+      String getDisplayName() {
+        return datanode + ":" + storageType;
+      }
+
+      @Override
+      public String toString() {
+        return "" + utilization;
+      }
+    }
+
+    final DatanodeInfo datanode;
+    final EnumMap<StorageType, StorageGroup> storageMap
+        = new EnumMap<StorageType, StorageGroup>(StorageType.class);
+    protected long delayUntil = 0L;
+    /** blocks being moved but not confirmed yet */
+    private final List<PendingMove> pendings;
+    private final int maxConcurrentMoves;
+
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + ":" + datanode + ":" + storageMap;
+    }
+
+    private BalancerDatanode(DatanodeStorageReport r, int maxConcurrentMoves) {
+      this.datanode = r.getDatanodeInfo();
+      this.maxConcurrentMoves = maxConcurrentMoves;
+      this.pendings = new ArrayList<PendingMove>(maxConcurrentMoves);
+    }
+
+    private void put(StorageType storageType, StorageGroup g) {
+      final StorageGroup existing = storageMap.put(storageType, g);
+      Preconditions.checkState(existing == null);
+    }
+
+    StorageGroup addStorageGroup(StorageType storageType, double utilization,
+        long maxSize2Move) {
+      final StorageGroup g = new StorageGroup(storageType, utilization,
+          maxSize2Move);
+      put(storageType, g);
+      return g;
+    }
+
+    Source addSource(StorageType storageType, double utilization,
+        long maxSize2Move, Dispatcher balancer) {
+      final Source s = balancer.new Source(storageType, utilization,
+          maxSize2Move, this);
+      put(storageType, s);
+      return s;
+    }
+
+    synchronized private void activateDelay(long delta) {
+      delayUntil = Time.monotonicNow() + delta;
+    }
+
+    synchronized private boolean isDelayActive() {
+      if (delayUntil == 0 || Time.monotonicNow() > delayUntil) {
+        delayUntil = 0;
+        return false;
+      }
+      return true;
+    }
+
+    /** Check if the node can schedule more blocks to move */
+    synchronized boolean isPendingQNotFull() {
+      return pendings.size() < maxConcurrentMoves;
+    }
+
+    /** Check if all the dispatched moves are done */
+    synchronized boolean isPendingQEmpty() {
+      return pendings.isEmpty();
+    }
+
+    /** Add a scheduled block move to the node */
+    synchronized boolean addPendingBlock(PendingMove pendingBlock) {
+      if (!isDelayActive() && isPendingQNotFull()) {
+        return pendings.add(pendingBlock);
+      }
+      return false;
+    }
+
+    /** Remove a scheduled block move from the node */
+    synchronized boolean removePendingBlock(PendingMove pendingBlock) {
+      return pendings.remove(pendingBlock);
+    }
+  }
+
+  /** A node that can be the sources of a block move */
+  class Source extends BalancerDatanode.StorageGroup {
+
+    private final List<Task> tasks = new ArrayList<Task>(2);
+    private long blocksToReceive = 0L;
+    /**
+     * Source blocks point to the objects in {@link Dispatcher#globalBlocks}
+     * because we want to keep one copy of a block and be aware that the
+     * locations are changing over time.
+     */
+    private final List<DBlock> srcBlocks = new ArrayList<DBlock>();
+
+    private Source(StorageType storageType, double utilization,
+        long maxSize2Move, BalancerDatanode dn) {
+      dn.super(storageType, utilization, maxSize2Move);
+    }
+
+    /** Add a task */
+    void addTask(Task task) {
+      Preconditions.checkState(task.target != this,
+          "Source and target are the same storage group " + getDisplayName());
+      incScheduledSize(task.size);
+      tasks.add(task);
+    }
+
+    /** @return an iterator to this source's blocks */
+    Iterator<DBlock> getBlockIterator() {
+      return srcBlocks.iterator();
+    }
+
+    /**
+     * Fetch new blocks of this source from namenode and update this source's
+     * block list & {@link Dispatcher#globalBlocks}.
+     * 
+     * @return the total size of the received blocks in the number of bytes.
+     */
+    private long getBlockList() throws IOException {
+      final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
+      final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanode(), size);
+
+      long bytesReceived = 0;
+      for (BlockWithLocations blk : newBlocks.getBlocks()) {
+        bytesReceived += blk.getBlock().getNumBytes();
+        synchronized (globalBlocks) {
+          final DBlock block = globalBlocks.get(blk.getBlock());
+          synchronized (block) {
+            block.clearLocations();
+
+            // update locations
+            final String[] datanodeUuids = blk.getDatanodeUuids();
+            final StorageType[] storageTypes = blk.getStorageTypes();
+            for (int i = 0; i < datanodeUuids.length; i++) {
+              final BalancerDatanode.StorageGroup g = storageGroupMap.get(
+                  datanodeUuids[i], storageTypes[i]);
+              if (g != null) { // not unknown
+                block.addLocation(g);
+              }
+            }
+          }
+          if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
+            // filter bad candidates
+            srcBlocks.add(block);
+          }
+        }
+      }
+      return bytesReceived;
+    }
+
+    /** Decide if the given block is a good candidate to move or not */
+    private boolean isGoodBlockCandidate(DBlock block) {
+      for (Task t : tasks) {
+        if (Dispatcher.this.isGoodBlockCandidate(this, t.target, block)) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    /**
+     * Choose a move for the source. The block's source, target, and proxy
+     * are determined too. When choosing proxy and target, source &
+     * target throttling has been considered. They are chosen only when they
+     * have the capacity to support this block move. The block should be
+     * dispatched immediately after this method is returned.
+     * 
+     * @return a move that's good for the source to dispatch immediately.
+     */
+    private PendingMove chooseNextMove() {
+      for (Iterator<Task> i = tasks.iterator(); i.hasNext();) {
+        final Task task = i.next();
+        final BalancerDatanode target = task.target.getBalancerDatanode();
+        PendingMove pendingBlock = new PendingMove();
+        if (target.addPendingBlock(pendingBlock)) {
+          // target is not busy, so do a tentative block allocation
+          pendingBlock.source = this;
+          pendingBlock.target = task.target;
+          if (pendingBlock.chooseBlockAndProxy()) {
+            long blockSize = pendingBlock.block.getNumBytes();
+            incScheduledSize(-blockSize);
+            task.size -= blockSize;
+            if (task.size == 0) {
+              i.remove();
+            }
+            return pendingBlock;
+          } else {
+            // cancel the tentative move
+            target.removePendingBlock(pendingBlock);
+          }
+        }
+      }
+      return null;
+    }
+
+    /** Iterate all source's blocks to remove moved ones */
+    private void removeMovedBlocks() {
+      for (Iterator<DBlock> i = getBlockIterator(); i.hasNext();) {
+        if (movedBlocks.contains(i.next().getBlock())) {
+          i.remove();
+        }
+      }
+    }
+
+    private static final int SOURCE_BLOCKS_MIN_SIZE = 5;
+
+    /** @return if should fetch more blocks from namenode */
+    private boolean shouldFetchMoreBlocks() {
+      return srcBlocks.size() < SOURCE_BLOCKS_MIN_SIZE && blocksToReceive > 0;
+    }
+
+    private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins
+
+    /**
+     * This method iteratively does the following: it first selects a block to
+     * move, then sends a request to the proxy source to start the block move
+     * when the source's block list falls below a threshold, it asks the
+     * namenode for more blocks. It terminates when it has dispatch enough block
+     * move tasks or it has received enough blocks from the namenode, or the
+     * elapsed time of the iteration has exceeded the max time limit.
+     */
+    private void dispatchBlocks() {
+      final long startTime = Time.monotonicNow();
+      this.blocksToReceive = 2 * getScheduledSize();
+      boolean isTimeUp = false;
+      int noPendingBlockIteration = 0;
+      while (!isTimeUp && getScheduledSize() > 0
+          && (!srcBlocks.isEmpty() || blocksToReceive > 0)) {
+        final PendingMove p = chooseNextMove();
+        if (p != null) {
+          // move the block
+          moveExecutor.execute(new Runnable() {
+            @Override
+            public void run() {
+              p.dispatch();
+            }
+          });
+          continue;
+        }
+
+        // Since we cannot schedule any block to move,
+        // remove any moved blocks from the source block list and
+        removeMovedBlocks(); // filter already moved blocks
+        // check if we should fetch more blocks from the namenode
+        if (shouldFetchMoreBlocks()) {
+          // fetch new blocks
+          try {
+            blocksToReceive -= getBlockList();
+            continue;
+          } catch (IOException e) {
+            LOG.warn("Exception while getting block list", e);
+            return;
+          }
+        } else {
+          // source node cannot find a pendingBlockToMove, iteration +1
+          noPendingBlockIteration++;
+          // in case no blocks can be moved for source node's task,
+          // jump out of while-loop after 5 iterations.
+          if (noPendingBlockIteration >= MAX_NO_PENDING_MOVE_ITERATIONS) {
+            resetScheduledSize();
+          }
+        }
+
+        // check if time is up or not
+        if (Time.monotonicNow() - startTime > MAX_ITERATION_TIME) {
+          isTimeUp = true;
+          continue;
+        }
+
+        // Now we can not schedule any block to move and there are
+        // no new blocks added to the source block list, so we wait.
+        try {
+          synchronized (Dispatcher.this) {
+            Dispatcher.this.wait(1000); // wait for targets/sources to be idle
+          }
+        } catch (InterruptedException ignored) {
+        }
+      }
+    }
+  }
+
+  Dispatcher(NameNodeConnector theblockpool, Set<String> includedNodes,
+      Set<String> excludedNodes, Configuration conf) {
+    this.nnc = theblockpool;
+    this.keyManager = nnc.getKeyManager();
+    this.excludedNodes = excludedNodes;
+    this.includedNodes = includedNodes;
+
+    final long movedWinWidth = conf.getLong(
+        DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
+        DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
+    movedBlocks = new MovedBlocks<BalancerDatanode.StorageGroup>(movedWinWidth);
+
+    this.cluster = NetworkTopology.getInstance(conf);
+
+    this.moveExecutor = Executors.newFixedThreadPool(conf.getInt(
+        DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
+        DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT));
+    this.dispatchExecutor = Executors.newFixedThreadPool(conf.getInt(
+        DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
+        DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT));
+    this.maxConcurrentMovesPerNode = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+        DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+
+    final boolean fallbackToSimpleAuthAllowed = conf.getBoolean(
+        CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
+        CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
+    this.saslClient = new SaslDataTransferClient(
+        DataTransferSaslUtil.getSaslPropertiesResolver(conf),
+        TrustedChannelResolver.getInstance(conf), fallbackToSimpleAuthAllowed);
+  }
+
+  StorageGroupMap getStorageGroupMap() {
+    return storageGroupMap;
+  }
+
+  NetworkTopology getCluster() {
+    return cluster;
+  }
+  
+  long getBytesMoved() {
+    return bytesMoved.get();
+  }
+
+  long bytesToMove() {
+    Preconditions.checkState(
+        storageGroupMap.size() >= sources.size() + targets.size(),
+        "Mismatched number of storage groups (" + storageGroupMap.size()
+            + " < " + sources.size() + " sources + " + targets.size()
+            + " targets)");
+
+    long b = 0L;
+    for (Source src : sources) {
+      b += src.getScheduledSize();
+    }
+    return b;
+  }
+
+  void add(Source source, BalancerDatanode.StorageGroup target) {
+    sources.add(source);
+    targets.add(target);
+  }
+
+  private boolean shouldIgnore(DatanodeInfo dn) {
+    // ignore decommissioned nodes
+    final boolean decommissioned = dn.isDecommissioned();
+    // ignore decommissioning nodes
+    final boolean decommissioning = dn.isDecommissionInProgress();
+    // ignore nodes in exclude list
+    final boolean excluded = Util.isExcluded(excludedNodes, dn);
+    // ignore nodes not in the include list (if include list is not empty)
+    final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
+
+    if (decommissioned || decommissioning || excluded || notIncluded) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+            + decommissioning + ", " + excluded + ", " + notIncluded);
+      }
+      return true;
+    }
+    return false;
+  }
+
+  /** Get live datanode storage reports and then build the network topology. */
+  List<DatanodeStorageReport> init() throws IOException {
+    final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
+    final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>(); 
+    // create network topology and classify utilization collections:
+    // over-utilized, above-average, below-average and under-utilized.
+    for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
+      final DatanodeInfo datanode = r.getDatanodeInfo();
+      if (shouldIgnore(datanode)) {
+        continue;
+      }
+      trimmed.add(r);
+      cluster.add(datanode);
+    }
+    return trimmed;
+  }
+
+  public BalancerDatanode newDatanode(DatanodeStorageReport r) {
+    return new BalancerDatanode(r, maxConcurrentMovesPerNode);
+  }
+
+  public boolean dispatchAndCheckContinue() throws InterruptedException {
+    return nnc.shouldContinue(dispatchBlockMoves());
+  }
+
+  /**
+   * Dispatch block moves for each source. The thread selects blocks to move &
+   * sends request to proxy source to initiate block move. The process is flow
+   * controlled. Block selection is blocked if there are too many un-confirmed
+   * block moves.
+   * 
+   * @return the total number of bytes successfully moved in this iteration.
+   */
+  private long dispatchBlockMoves() throws InterruptedException {
+    final long bytesLastMoved = bytesMoved.get();
+    final Future<?>[] futures = new Future<?>[sources.size()];
+
+    final Iterator<Source> i = sources.iterator();
+    for (int j = 0; j < futures.length; j++) {
+      final Source s = i.next();
+      futures[j] = dispatchExecutor.submit(new Runnable() {
+        @Override
+        public void run() {
+          s.dispatchBlocks();
+        }
+      });
+    }
+
+    // wait for all dispatcher threads to finish
+    for (Future<?> future : futures) {
+      try {
+        future.get();
+      } catch (ExecutionException e) {
+        LOG.warn("Dispatcher thread failed", e.getCause());
+      }
+    }
+
+    // wait for all block moving to be done
+    waitForMoveCompletion();
+
+    return bytesMoved.get() - bytesLastMoved;
+  }
+
+  /** The sleeping period before checking if block move is completed again */
+  static private long blockMoveWaitTime = 30000L;
+
+  /** set the sleeping period for block move completion check */
+  static void setBlockMoveWaitTime(long time) {
+    blockMoveWaitTime = time;
+  }
+
+  /** Wait for all block move confirmations. */
+  private void waitForMoveCompletion() {
+    for(;;) {
+      boolean empty = true;
+      for (BalancerDatanode.StorageGroup t : targets) {
+        if (!t.getBalancerDatanode().isPendingQEmpty()) {
+          empty = false;
+          break;
+        }
+      }
+      if (empty) {
+        return; //all pending queues are empty
+      }
+      try {
+        Thread.sleep(blockMoveWaitTime);
+      } catch (InterruptedException ignored) {
+      }
+    }
+  }
+
+  /**
+   * Decide if the block is a good candidate to be moved from source to target.
+   * A block is a good candidate if 
+   * 1. the block is not in the process of being moved/has not been moved;
+   * 2. the block does not have a replica on the target;
+   * 3. doing the move does not reduce the number of racks that the block has
+   */
+  private boolean isGoodBlockCandidate(Source source,
+      BalancerDatanode.StorageGroup target, DBlock block) {
+    if (source.storageType != target.storageType) {
+      return false;
+    }
+    // check if the block is moved or not
+    if (movedBlocks.contains(block.getBlock())) {
+      return false;
+    }
+    if (block.isLocatedOn(target)) {
+      return false;
+    }
+    if (cluster.isNodeGroupAware()
+        && isOnSameNodeGroupWithReplicas(target, block, source)) {
+      return false;
+    }
+    if (reduceNumOfRacks(source, target, block)) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Determine whether moving the given block replica from source to target
+   * would reduce the number of racks of the block replicas.
+   */
+  private boolean reduceNumOfRacks(Source source,
+      BalancerDatanode.StorageGroup target, DBlock block) {
+    final DatanodeInfo sourceDn = source.getDatanode();
+    if (cluster.isOnSameRack(sourceDn, target.getDatanode())) {
+      // source and target are on the same rack
+      return false;
+    }
+    boolean notOnSameRack = true;
+    synchronized (block) {
+      for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
+        if (cluster.isOnSameRack(loc.getDatanode(), target.getDatanode())) {
+          notOnSameRack = false;
+          break;
+        }
+      }
+    }
+    if (notOnSameRack) {
+      // target is not on the same rack as any replica
+      return false;
+    }
+    for (BalancerDatanode.StorageGroup g : block.getLocations()) {
+      if (g != source && cluster.isOnSameRack(g.getDatanode(), sourceDn)) {
+        // source is on the same rack of another replica
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Check if there are any replica (other than source) on the same node group
+   * with target. If true, then target is not a good candidate for placing
+   * specific replica as we don't want 2 replicas under the same nodegroup.
+   * 
+   * @return true if there are any replica (other than source) on the same node
+   *         group with target
+   */
+  private boolean isOnSameNodeGroupWithReplicas(
+      BalancerDatanode.StorageGroup target, DBlock block, Source source) {
+    final DatanodeInfo targetDn = target.getDatanode();
+    for (BalancerDatanode.StorageGroup g : block.getLocations()) {
+      if (g != source && cluster.isOnSameNodeGroup(g.getDatanode(), targetDn)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /** Reset all fields in order to prepare for the next iteration */
+  void reset(Configuration conf) {
+    cluster = NetworkTopology.getInstance(conf);
+    storageGroupMap.clear();
+    sources.clear();
+    targets.clear();
+    globalBlocks.removeAllButRetain(movedBlocks);
+    movedBlocks.cleanup();
+  }
+
+  /** shutdown thread pools */
+  void shutdownNow() {
+    dispatchExecutor.shutdownNow();
+    moveExecutor.shutdownNow();
+  }
+
+  static class Util {
+    /** @return true if data node is part of the excludedNodes. */
+    static boolean isExcluded(Set<String> excludedNodes, DatanodeInfo dn) {
+      return isIn(excludedNodes, dn);
+    }
+
+    /**
+     * @return true if includedNodes is empty or data node is part of the
+     *         includedNodes.
+     */
+    static boolean isIncluded(Set<String> includedNodes, DatanodeInfo dn) {
+      return (includedNodes.isEmpty() || isIn(includedNodes, dn));
+    }
+
+    /**
+     * Match is checked using host name , ip address with and without port
+     * number.
+     * 
+     * @return true if the datanode's transfer address matches the set of nodes.
+     */
+    private static boolean isIn(Set<String> datanodes, DatanodeInfo dn) {
+      return isIn(datanodes, dn.getPeerHostName(), dn.getXferPort())
+          || isIn(datanodes, dn.getIpAddr(), dn.getXferPort())
+          || isIn(datanodes, dn.getHostName(), dn.getXferPort());
+    }
+
+    /** @return true if nodes contains host or host:port */
+    private static boolean isIn(Set<String> nodes, String host, int port) {
+      if (host == null) {
+        return false;
+      }
+      return (nodes.contains(host) || nodes.contains(host + ":" + port));
+    }
+
+    /**
+     * Parse a comma separated string to obtain set of host names
+     * 
+     * @return set of host names
+     */
+    static Set<String> parseHostList(String string) {
+      String[] addrs = StringUtils.getTrimmedStrings(string);
+      return new HashSet<String>(Arrays.asList(addrs));
+    }
+
+    /**
+     * Read set of host names from a file
+     * 
+     * @return set of host names
+     */
+    static Set<String> getHostListFromFile(String fileName, String type) {
+      Set<String> nodes = new HashSet<String>();
+      try {
+        HostsFileReader.readFileToSet(type, fileName, nodes);
+        return StringUtils.getTrimmedStrings(nodes);
+      } catch (IOException e) {
+        throw new IllegalArgumentException(
+            "Failed to read host list from file: " + fileName);
+      }
+    }
+  }
+}

+ 12 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -34,6 +34,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
@@ -90,14 +94,16 @@ public class NameNodeConnector implements Closeable {
     return blockpoolID;
   }
 
-  /** @return the namenode proxy. */
-  public NamenodeProtocol getNamenode() {
-    return namenode;
+  /** @return blocks with locations. */
+  public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
+      throws IOException {
+    return namenode.getBlocks(datanode, size);
   }
 
-  /** @return the client proxy. */
-  public ClientProtocol getClient() {
-    return client;
+  /** @return live datanode storage reports. */
+  public DatanodeStorageReport[] getLiveDatanodeStorageReport()
+      throws IOException {
+    return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
   }
 
   /** @return the key manager */

+ 20 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -135,7 +135,10 @@ public class DatanodeManager {
   
   /** The number of stale DataNodes */
   private volatile int numStaleNodes;
-  
+
+  /** The number of stale storages */
+  private volatile int numStaleStorages;
+
   /**
    * Whether or not this cluster has ever consisted of more than 1 rack,
    * according to the NetworkTopology.
@@ -1142,6 +1145,22 @@ public class DatanodeManager {
     return this.numStaleNodes;
   }
 
+  /**
+   * Get the number of content stale storages.
+   */
+  public int getNumStaleStorages() {
+    return numStaleStorages;
+  }
+
+  /**
+   * Set the number of content stale storages.
+   *
+   * @param numStaleStorages The number of content stale storages.
+   */
+  void setNumStaleStorages(int numStaleStorages) {
+    this.numStaleStorages = numStaleStorages;
+  }
+
   /** Fetch live and dead datanodes. */
   public void fetchDatanodes(final List<DatanodeDescriptor> live, 
       final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

@@ -256,6 +256,7 @@ class HeartbeatManager implements DatanodeStatistics {
       DatanodeID dead = null;
       // check the number of stale nodes
       int numOfStaleNodes = 0;
+      int numOfStaleStorages = 0;
       synchronized(this) {
         for (DatanodeDescriptor d : datanodes) {
           if (dead == null && dm.isDatanodeDead(d)) {
@@ -265,10 +266,17 @@ class HeartbeatManager implements DatanodeStatistics {
           if (d.isStale(dm.getStaleInterval())) {
             numOfStaleNodes++;
           }
+          DatanodeStorageInfo[] storageInfos = d.getStorageInfos();
+          for(DatanodeStorageInfo storageInfo : storageInfos) {
+            if (storageInfo.areBlockContentsStale()) {
+              numOfStaleStorages++;
+            }
+          }
         }
         
         // Set the number of stale nodes in the DatanodeManager
         dm.setNumStaleNodes(numOfStaleNodes);
+        dm.setNumStaleStorages(numOfStaleStorages);
       }
 
       allAlive = dead == null;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -601,7 +601,7 @@ class BPOfferService {
       LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
           + " with " + actor.state + " state");
       actor.reRegister();
-      return true;
+      return false;
     }
     writeLock();
     try {

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -222,7 +222,19 @@ class BPServiceActor implements Runnable {
     // Second phase of the handshake with the NN.
     register();
   }
-  
+
+  // This is useful to make sure NN gets Heartbeat before Blockreport
+  // upon NN restart while DN keeps retrying Otherwise,
+  // 1. NN restarts.
+  // 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
+  // 3. After reregistration completes, DN will send Blockreport first.
+  // 4. Given NN receives Blockreport after Heartbeat, it won't mark
+  //    DatanodeStorageInfo#blockContentsStale to false until the next
+  //    Blockreport.
+  void scheduleHeartbeat() {
+    lastHeartbeat = 0;
+  }
+
   /**
    * This methods  arranges for the data node to send the block report at 
    * the next heartbeat.
@@ -902,6 +914,7 @@ class BPServiceActor implements Runnable {
       retrieveNamespaceInfo();
       // and re-register
       register();
+      scheduleHeartbeat();
     }
   }
 

+ 12 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -36,8 +36,10 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Properties;
+import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -106,13 +108,22 @@ public class BlockPoolSliceStorage extends Storage {
   void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
       Collection<File> dataDirs, StartupOption startOpt) throws IOException {
     LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
+    Set<String> existingStorageDirs = new HashSet<String>();
+    for (int i = 0; i < getNumStorageDirs(); i++) {
+      existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
+    }
+
     // 1. For each BP data directory analyze the state and
     // check whether all is consistent before transitioning.
-    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
     ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
         dataDirs.size());
     for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
+      if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
+        LOG.info("Storage directory " + dataDir + " has already been used.");
+        it.remove();
+        continue;
+      }
       StorageDirectory sd = new StorageDirectory(dataDir, null, true);
       StorageState curState;
       try {

+ 144 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -55,6 +55,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -172,43 +173,99 @@ public class DataStorage extends Storage {
   }
   
   /**
-   * Analyze storage directories.
-   * Recover from previous transitions if required. 
-   * Perform fs state transition if necessary depending on the namespace info.
-   * Read storage info.
-   * <br>
-   * This method should be synchronized between multiple DN threads.  Only the 
-   * first DN thread does DN level storage dir recoverTransitionRead.
-   * 
+   * {{@inheritDoc org.apache.hadoop.hdfs.server.common.Storage#writeAll()}}
+   */
+  private void writeAll(Collection<StorageDirectory> dirs) throws IOException {
+    this.layoutVersion = getServiceLayoutVersion();
+    for (StorageDirectory dir : dirs) {
+      writeProperties(dir);
+    }
+  }
+
+  /**
+   * Add a list of volumes to be managed by DataStorage. If the volume is empty,
+   * format it, otherwise recover it from previous transitions if required.
+   *
+   * @param datanode the reference to DataNode.
    * @param nsInfo namespace information
    * @param dataDirs array of data storage directories
    * @param startOpt startup option
    * @throws IOException
    */
-  synchronized void recoverTransitionRead(DataNode datanode,
+  synchronized void addStorageLocations(DataNode datanode,
       NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
       StartupOption startOpt)
       throws IOException {
-    if (initialized) {
-      // DN storage has been initialized, no need to do anything
-      return;
+    // Similar to recoverTransitionRead, it first ensures the datanode level
+    // format is completed.
+    List<StorageLocation> tmpDataDirs =
+        new ArrayList<StorageLocation>(dataDirs);
+    addStorageLocations(datanode, nsInfo, tmpDataDirs, startOpt, false, true);
+
+    Collection<File> bpDataDirs = new ArrayList<File>();
+    String bpid = nsInfo.getBlockPoolID();
+    for (StorageLocation dir : dataDirs) {
+      File dnRoot = dir.getFile();
+      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, new File(dnRoot,
+          STORAGE_DIR_CURRENT));
+      bpDataDirs.add(bpRoot);
     }
-    LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
-        + " and name-node layout version: " + nsInfo.getLayoutVersion());
-    
-    // 1. For each data directory calculate its state and 
-    // check whether all is consistent before transitioning.
-    // Format and recover.
-    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
-    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
+    // mkdir for the list of BlockPoolStorage
+    makeBlockPoolDataDir(bpDataDirs, null);
+    BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
+    if (bpStorage == null) {
+      bpStorage = new BlockPoolSliceStorage(
+          nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
+          nsInfo.getClusterID());
+    }
+
+    bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
+    addBlockPoolStorage(bpid, bpStorage);
+  }
+
+  /**
+   * Add a list of volumes to be managed by this DataStorage. If the volume is
+   * empty, it formats the volume, otherwise it recovers it from previous
+   * transitions if required.
+   *
+   * If isInitialize is false, only the directories that have finished the
+   * doTransition() process will be added into DataStorage.
+   *
+   * @param datanode the reference to DataNode.
+   * @param nsInfo namespace information
+   * @param dataDirs array of data storage directories
+   * @param startOpt startup option
+   * @param isInitialize whether it is called when DataNode starts up.
+   * @throws IOException
+   */
+  private synchronized void addStorageLocations(DataNode datanode,
+      NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
+      StartupOption startOpt, boolean isInitialize, boolean ignoreExistingDirs)
+      throws IOException {
+    Set<String> existingStorageDirs = new HashSet<String>();
+    for (int i = 0; i < getNumStorageDirs(); i++) {
+      existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
+    }
+
+    // 1. For each data directory calculate its state and check whether all is
+    // consistent before transitioning. Format and recover.
+    ArrayList<StorageState> dataDirStates =
+        new ArrayList<StorageState>(dataDirs.size());
+    List<StorageDirectory> addedStorageDirectories =
+        new ArrayList<StorageDirectory>();
     for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next().getFile();
+      if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
+        LOG.info("Storage directory " + dataDir + " has already been used.");
+        it.remove();
+        continue;
+      }
       StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       try {
         curState = sd.analyzeStorage(startOpt, this);
         // sd is locked but not opened
-        switch(curState) {
+        switch (curState) {
         case NORMAL:
           break;
         case NON_EXISTENT:
@@ -217,7 +274,8 @@ public class DataStorage extends Storage {
           it.remove();
           continue;
         case NOT_FORMATTED: // format
-          LOG.info("Storage directory " + dataDir + " is not formatted");
+          LOG.info("Storage directory " + dataDir + " is not formatted for "
+            + nsInfo.getBlockPoolID());
           LOG.info("Formatting ...");
           format(sd, nsInfo, datanode.getDatanodeUuid());
           break;
@@ -231,33 +289,82 @@ public class DataStorage extends Storage {
         //continue with other good dirs
         continue;
       }
-      // add to the storage list
-      addStorageDir(sd);
+      if (isInitialize) {
+        addStorageDir(sd);
+      }
+      addedStorageDirectories.add(sd);
       dataDirStates.add(curState);
     }
 
-    if (dataDirs.size() == 0 || dataDirStates.size() == 0)  // none of the data dirs exist
+    if (dataDirs.size() == 0 || dataDirStates.size() == 0) {
+      // none of the data dirs exist
+      if (ignoreExistingDirs) {
+        return;
+      }
       throw new IOException(
           "All specified directories are not accessible or do not exist.");
+    }
 
     // 2. Do transitions
     // Each storage directory is treated individually.
-    // During startup some of them can upgrade or rollback 
-    // while others could be uptodate for the regular startup.
-    try {
-      for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-        doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
-        createStorageID(getStorageDir(idx));
+    // During startup some of them can upgrade or rollback
+    // while others could be up-to-date for the regular startup.
+    for (Iterator<StorageDirectory> it = addedStorageDirectories.iterator();
+        it.hasNext(); ) {
+      StorageDirectory sd = it.next();
+      try {
+        doTransition(datanode, sd, nsInfo, startOpt);
+        createStorageID(sd);
+      } catch (IOException e) {
+        if (!isInitialize) {
+          sd.unlock();
+          it.remove();
+          continue;
+        }
+        unlockAll();
+        throw e;
       }
-    } catch (IOException e) {
-      unlockAll();
-      throw e;
     }
 
-    // 3. Update all storages. Some of them might have just been formatted.
-    this.writeAll();
+    // 3. Update all successfully loaded storages. Some of them might have just
+    // been formatted.
+    this.writeAll(addedStorageDirectories);
+
+    // 4. Make newly loaded storage directories visible for service.
+    if (!isInitialize) {
+      this.storageDirs.addAll(addedStorageDirectories);
+    }
+  }
+
+  /**
+   * Analyze storage directories.
+   * Recover from previous transitions if required.
+   * Perform fs state transition if necessary depending on the namespace info.
+   * Read storage info.
+   * <br>
+   * This method should be synchronized between multiple DN threads.  Only the
+   * first DN thread does DN level storage dir recoverTransitionRead.
+   *
+   * @param nsInfo namespace information
+   * @param dataDirs array of data storage directories
+   * @param startOpt startup option
+   * @throws IOException
+   */
+  synchronized void recoverTransitionRead(DataNode datanode,
+      NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
+      StartupOption startOpt)
+      throws IOException {
+    if (initialized) {
+      // DN storage has been initialized, no need to do anything
+      return;
+    }
+    LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+        + " and NameNode layout version: " + nsInfo.getLayoutVersion());
+
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
+    addStorageLocations(datanode, nsInfo, dataDirs, startOpt, true, false);
     
-    // 4. mark DN storage is initialized
+    // mark DN storage is initialized
     this.initialized = true;
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java

@@ -78,7 +78,7 @@ public class StorageLocation {
    * @return A StorageLocation object if successfully parsed, null otherwise.
    *         Does not throw any exceptions.
    */
-  static StorageLocation parse(String rawLocation)
+  public static StorageLocation parse(String rawLocation)
       throws IOException, SecurityException {
     Matcher matcher = regex.matcher(rawLocation);
     StorageType storageType = StorageType.DEFAULT;

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java

@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -91,6 +93,10 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
   /** @return a list of volumes. */
   public List<V> getVolumes();
 
+  /** Add an array of StorageLocation to FsDataset. */
+  public void addVolumes(Collection<StorageLocation> volumes)
+      throws IOException;
+
   /** @return a storage with the given storage ID */
   public DatanodeStorage getStorage(final String storageUuid);
 

+ 40 - 29
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java

@@ -61,6 +61,7 @@ class FsDatasetAsyncDiskService {
   private static final long THREADS_KEEP_ALIVE_SECONDS = 60; 
   
   private final DataNode datanode;
+  private final ThreadGroup threadGroup;
   private Map<File, ThreadPoolExecutor> executors
       = new HashMap<File, ThreadPoolExecutor>();
   
@@ -70,42 +71,52 @@ class FsDatasetAsyncDiskService {
    * 
    * The AsyncDiskServices uses one ThreadPool per volume to do the async
    * disk operations.
-   * 
-   * @param volumes The roots of the data volumes.
    */
-  FsDatasetAsyncDiskService(DataNode datanode, File[] volumes) {
+  FsDatasetAsyncDiskService(DataNode datanode) {
     this.datanode = datanode;
+    this.threadGroup = new ThreadGroup(getClass().getSimpleName());
+  }
 
-    final ThreadGroup threadGroup = new ThreadGroup(getClass().getSimpleName());
-    // Create one ThreadPool per volume
-    for (int v = 0 ; v < volumes.length; v++) {
-      final File vol = volumes[v];
-      ThreadFactory threadFactory = new ThreadFactory() {
-          int counter = 0;
+  private void addExecutorForVolume(final File volume) {
+    ThreadFactory threadFactory = new ThreadFactory() {
+      int counter = 0;
 
-          @Override
-          public Thread newThread(Runnable r) {
-            int thisIndex;
-            synchronized (this) {
-              thisIndex = counter++;
-            }
-            Thread t = new Thread(threadGroup, r);
-            t.setName("Async disk worker #" + thisIndex +
-                      " for volume " + vol);
-            return t;
-          }
-        };
+      @Override
+      public Thread newThread(Runnable r) {
+        int thisIndex;
+        synchronized (this) {
+          thisIndex = counter++;
+        }
+        Thread t = new Thread(threadGroup, r);
+        t.setName("Async disk worker #" + thisIndex +
+            " for volume " + volume);
+        return t;
+      }
+    };
+
+    ThreadPoolExecutor executor = new ThreadPoolExecutor(
+        CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
+        THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
+        new LinkedBlockingQueue<Runnable>(), threadFactory);
 
-      ThreadPoolExecutor executor = new ThreadPoolExecutor(
-          CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, 
-          THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, 
-          new LinkedBlockingQueue<Runnable>(), threadFactory);
+    // This can reduce the number of running threads
+    executor.allowCoreThreadTimeOut(true);
+    executors.put(volume, executor);
+  }
 
-      // This can reduce the number of running threads
-      executor.allowCoreThreadTimeOut(true);
-      executors.put(vol, executor);
+  /**
+   * Starts AsyncDiskService for a new volume
+   * @param volume the root of the new data volume.
+   */
+  synchronized void addVolume(File volume) {
+    if (executors == null) {
+      throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    
+    ThreadPoolExecutor executor = executors.get(volume);
+    if (executor != null) {
+      throw new RuntimeException("Volume " + volume + " is already existed.");
+    }
+    addExecutorForVolume(volume);
   }
   
   synchronized long countPendingDeletions() {

+ 58 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -202,6 +202,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   final Map<String, DatanodeStorage> storageMap;
   final FsDatasetAsyncDiskService asyncDiskService;
   final FsDatasetCache cacheManager;
+  private final Configuration conf;
   private final int validVolsRequired;
 
   final ReplicaMap volumeMap;
@@ -216,6 +217,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       ) throws IOException {
     this.datanode = datanode;
     this.dataStorage = storage;
+    this.conf = conf;
     // The number of volumes required for operation is the total number 
     // of volumes minus the number of failed volumes we can tolerate.
     final int volFailuresTolerated =
@@ -242,38 +244,76 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
 
     storageMap = new HashMap<String, DatanodeStorage>();
-    final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>(
-        storage.getNumStorageDirs());
-    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
-      Storage.StorageDirectory sd = storage.getStorageDir(idx);
-      final File dir = sd.getCurrentDir();
-      final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
-      volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
-          storageType));
-      LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
-      storageMap.put(sd.getStorageUuid(),
-          new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
-    }
     volumeMap = new ReplicaMap(this);
-
     @SuppressWarnings("unchecked")
     final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
         ReflectionUtils.newInstance(conf.getClass(
             DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
             RoundRobinVolumeChoosingPolicy.class,
             VolumeChoosingPolicy.class), conf);
-    volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl);
-    volumes.initializeReplicaMaps(volumeMap);
+    volumes = new FsVolumeList(volsFailed, blockChooserImpl);
+    asyncDiskService = new FsDatasetAsyncDiskService(datanode);
 
-    File[] roots = new File[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
-      roots[idx] = storage.getStorageDir(idx).getCurrentDir();
+      addVolume(dataLocations, storage.getStorageDir(idx));
     }
-    asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots);
+
     cacheManager = new FsDatasetCache(this);
     registerMBean(datanode.getDatanodeUuid());
   }
 
+  private void addVolume(Collection<StorageLocation> dataLocations,
+      Storage.StorageDirectory sd) throws IOException {
+    final File dir = sd.getCurrentDir();
+    final StorageType storageType =
+        getStorageTypeFromLocations(dataLocations, sd.getRoot());
+
+    // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
+    // nothing needed to be rolled back to make various data structures, e.g.,
+    // storageMap and asyncDiskService, consistent.
+    FsVolumeImpl fsVolume = new FsVolumeImpl(
+        this, sd.getStorageUuid(), dir, this.conf, storageType);
+    fsVolume.getVolumeMap(volumeMap);
+
+    volumes.addVolume(fsVolume);
+    storageMap.put(sd.getStorageUuid(),
+        new DatanodeStorage(sd.getStorageUuid(),
+                            DatanodeStorage.State.NORMAL,
+                            storageType));
+    asyncDiskService.addVolume(sd.getCurrentDir());
+
+    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+  }
+
+  /**
+   * Add an array of StorageLocation to FsDataset.
+   *
+   * @pre dataStorage must have these volumes.
+   * @param volumes
+   * @throws IOException
+   */
+  @Override
+  public synchronized void addVolumes(Collection<StorageLocation> volumes)
+      throws IOException {
+    final Collection<StorageLocation> dataLocations =
+        DataNode.getStorageLocations(this.conf);
+    Map<String, Storage.StorageDirectory> allStorageDirs =
+        new HashMap<String, Storage.StorageDirectory>();
+    for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+      Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+      allStorageDirs.put(sd.getRoot().getAbsolutePath(), sd);
+    }
+
+    for (StorageLocation vol : volumes) {
+      String key = vol.getFile().getAbsolutePath();
+      if (!allStorageDirs.containsKey(key)) {
+        LOG.warn("Attempt to add an invalid volume: " + vol.getFile());
+      } else {
+        addVolume(dataLocations, allStorageDirs.get(key));
+      }
+    }
+  }
+
   private StorageType getStorageTypeFromLocations(
       Collection<StorageLocation> dataLocations, File dir) {
     for (StorageLocation dataLocation : dataLocations) {

+ 14 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

@@ -40,9 +40,8 @@ class FsVolumeList {
   private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
   private volatile int numFailedVolumes;
 
-  FsVolumeList(List<FsVolumeImpl> volumes, int failedVols,
+  FsVolumeList(int failedVols,
       VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
-    this.volumes = Collections.unmodifiableList(volumes);
     this.blockChooser = blockChooser;
     this.numFailedVolumes = failedVols;
   }
@@ -101,12 +100,6 @@ class FsVolumeList {
     }
     return remaining;
   }
-    
-  void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
-    for (FsVolumeImpl v : volumes) {
-      v.getVolumeMap(globalReplicaMap);
-    }
-  }
   
   void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
     long totalStartTime = Time.monotonicNow();
@@ -205,6 +198,19 @@ class FsVolumeList {
     return volumes.toString();
   }
 
+  /**
+   * Dynamically add new volumes to the existing volumes that this DN manages.
+   * @param newVolume the instance of new FsVolumeImpl.
+   */
+  synchronized void addVolume(FsVolumeImpl newVolume) {
+    // Make a copy of volumes to add new volumes.
+    final List<FsVolumeImpl> volumeList = volumes == null ?
+        new ArrayList<FsVolumeImpl>() :
+        new ArrayList<FsVolumeImpl>(volumes);
+    volumeList.add(newVolume);
+    volumes = Collections.unmodifiableList(volumeList);
+    FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString());
+  }
 
   void addBlockPool(final String bpid, final Configuration conf) throws IOException {
     long totalStartTime = Time.monotonicNow();

+ 9 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6091,7 +6091,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       blockManager.shutdown();
     }
   }
-  
 
   @Override // FSNamesystemMBean
   public int getNumLiveDataNodes() {
@@ -6138,6 +6137,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return getBlockManager().getDatanodeManager().getNumStaleNodes();
   }
 
+  /**
+   * Storages are marked as "content stale" after NN restart or fails over and
+   * before NN receives the first Heartbeat followed by the first Blockreport.
+   */
+  @Override // FSNamesystemMBean
+  public int getNumStaleStorages() {
+    return getBlockManager().getDatanodeManager().getNumStaleStorages();
+  }
+
   /**
    * Sets the current generation stamp for legacy blocks
    */

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java

@@ -151,4 +151,11 @@ public interface FSNamesystemMBean {
    * @return number of blocks pending deletion
    */
   long getPendingDeletionBlocks();
+
+  /**
+   * Number of content stale storages.
+   * @return number of content stale storages
+   */
+  public int getNumStaleStorages();
+
 }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java

@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
+ * This command can't be combined with other commands in the same response.
+ * This is because after the datanode processes RegisterCommand, it will skip
+ * the rest of the DatanodeCommands in the same HeartbeatResponse.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt

@@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 
 IF(FUSE_FOUND)
+    add_library(posix_util
+        ../util/posix_util.c
+    )
+
     add_executable(fuse_dfs
         fuse_dfs.c
         fuse_options.c 

+ 271 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c

@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+struct htable_pair {
+    void *key;
+    void *val;
+};
+
+/**
+ * A hash table which uses linear probing.
+ */
+struct htable {
+    uint32_t capacity;
+    uint32_t used;
+    htable_hash_fn_t hash_fun;
+    htable_eq_fn_t eq_fun;
+    struct htable_pair *elem;
+};
+
+/**
+ * An internal function for inserting a value into the hash table.
+ *
+ * Note: this function assumes that you have made enough space in the table.
+ *
+ * @param nelem         The new element to insert.
+ * @param capacity      The capacity of the hash table.
+ * @param hash_fun      The hash function to use.
+ * @param key           The key to insert.
+ * @param val           The value to insert.
+ */
+static void htable_insert_internal(struct htable_pair *nelem, 
+        uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
+        void *val)
+{
+    uint32_t i;
+
+    i = hash_fun(key, capacity);
+    while (1) {
+        if (!nelem[i].key) {
+            nelem[i].key = key;
+            nelem[i].val = val;
+            return;
+        }
+        i++;
+        if (i == capacity) {
+            i = 0;
+        }
+    }
+}
+
+static int htable_realloc(struct htable *htable, uint32_t new_capacity)
+{
+    struct htable_pair *nelem;
+    uint32_t i, old_capacity = htable->capacity;
+    htable_hash_fn_t hash_fun = htable->hash_fun;
+
+    nelem = calloc(new_capacity, sizeof(struct htable_pair));
+    if (!nelem) {
+        return ENOMEM;
+    }
+    for (i = 0; i < old_capacity; i++) {
+        struct htable_pair *pair = htable->elem + i;
+        htable_insert_internal(nelem, new_capacity, hash_fun,
+                               pair->key, pair->val);
+    }
+    free(htable->elem);
+    htable->elem = nelem;
+    htable->capacity = new_capacity;
+    return 0;
+}
+
+struct htable *htable_alloc(uint32_t size,
+                htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
+{
+    struct htable *htable;
+
+    htable = calloc(1, sizeof(*htable));
+    if (!htable) {
+        return NULL;
+    }
+    size = (size + 1) >> 1;
+    size = size << 1;
+    if (size < HTABLE_MIN_SIZE) {
+        size = HTABLE_MIN_SIZE;
+    }
+    htable->hash_fun = hash_fun;
+    htable->eq_fun = eq_fun;
+    htable->used = 0;
+    if (htable_realloc(htable, size)) {
+        free(htable);
+        return NULL;
+    }
+    return htable;
+}
+
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
+{
+    uint32_t i;
+
+    for (i = 0; i != htable->capacity; ++i) {
+        struct htable_pair *elem = htable->elem + i;
+        if (elem->key) {
+            fun(ctx, elem->key, elem->val);
+        }
+    }
+}
+
+void htable_free(struct htable *htable)
+{
+    if (htable) {
+        free(htable->elem);
+        free(htable);
+    }
+}
+
+int htable_put(struct htable *htable, void *key, void *val)
+{
+    int ret;
+    uint32_t nused;
+
+    // NULL is not a valid key value.
+    // This helps us implement htable_get_internal efficiently, since we know
+    // that we can stop when we encounter the first NULL key.
+    if (!key) {
+        return EINVAL;
+    }
+    // NULL is not a valid value.  Otherwise the results of htable_get would
+    // be confusing (does a NULL return mean entry not found, or that the
+    // entry was found and was NULL?) 
+    if (!val) {
+        return EINVAL;
+    }
+    // Re-hash if we have used more than half of the hash table
+    nused = htable->used + 1;
+    if (nused >= (htable->capacity / 2)) {
+        ret = htable_realloc(htable, htable->capacity * 2);
+        if (ret)
+            return ret;
+    }
+    htable_insert_internal(htable->elem, htable->capacity,
+                                htable->hash_fun, key, val);
+    htable->used++;
+    return 0;
+}
+
+static int htable_get_internal(const struct htable *htable,
+                               const void *key, uint32_t *out)
+{
+    uint32_t start_idx, idx;
+
+    start_idx = htable->hash_fun(key, htable->capacity);
+    idx = start_idx;
+    while (1) {
+        struct htable_pair *pair = htable->elem + idx;
+        if (!pair->key) {
+            // We always maintain the invariant that the entries corresponding
+            // to a given key are stored in a contiguous block, not separated
+            // by any NULLs.  So if we encounter a NULL, our search is over.
+            return ENOENT;
+        } else if (htable->eq_fun(pair->key, key)) {
+            *out = idx;
+            return 0;
+        }
+        idx++;
+        if (idx == htable->capacity) {
+            idx = 0;
+        }
+        if (idx == start_idx) {
+            return ENOENT;
+        }
+    }
+}
+
+void *htable_get(const struct htable *htable, const void *key)
+{
+    uint32_t idx;
+
+    if (htable_get_internal(htable, key, &idx)) {
+        return NULL;
+    }
+    return htable->elem[idx].val;
+}
+
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val)
+{
+    uint32_t hole, i;
+    const void *nkey;
+
+    if (htable_get_internal(htable, key, &hole)) {
+        *found_key = NULL;
+        *found_val = NULL;
+        return;
+    }
+    i = hole;
+    htable->used--;
+    // We need to maintain the compactness invariant used in
+    // htable_get_internal.  This invariant specifies that the entries for any
+    // given key are never separated by NULLs (although they may be separated
+    // by entries for other keys.)
+    while (1) {
+        i++;
+        if (i == htable->capacity) {
+            i = 0;
+        }
+        nkey = htable->elem[i].key;
+        if (!nkey) {
+            *found_key = htable->elem[hole].key;
+            *found_val = htable->elem[hole].val;
+            htable->elem[hole].key = NULL;
+            htable->elem[hole].val = NULL;
+            return;
+        } else if (htable->eq_fun(key, nkey)) {
+            htable->elem[hole].key = htable->elem[i].key;
+            htable->elem[hole].val = htable->elem[i].val;
+            hole = i;
+        }
+    }
+}
+
+uint32_t htable_used(const struct htable *htable)
+{
+    return htable->used;
+}
+
+uint32_t htable_capacity(const struct htable *htable)
+{
+    return htable->capacity;
+}
+
+uint32_t ht_hash_string(const void *str, uint32_t max)
+{
+    const char *s = str;
+    uint32_t hash = 0;
+
+    while (*s) {
+        hash = (hash * 31) + *s;
+        s++;
+    }
+    return hash % max;
+}
+
+int ht_compare_string(const void *a, const void *b)
+{
+    return strcmp(a, b) == 0;
+}
+
+// vim: ts=4:sw=4:tw=79:et

+ 161 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h

@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_HASH_TABLE
+#define HADOOP_CORE_COMMON_HASH_TABLE
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#define HTABLE_MIN_SIZE 4
+
+struct htable;
+
+/**
+ * An HTable hash function.
+ *
+ * @param key       The key.
+ * @param capacity  The total capacity.
+ *
+ * @return          The hash slot.  Must be less than the capacity.
+ */
+typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
+
+/**
+ * An HTable equality function.  Compares two keys.
+ *
+ * @param a         First key.
+ * @param b         Second key.
+ *
+ * @return          nonzero if the keys are equal.
+ */
+typedef int (*htable_eq_fn_t)(const void *a, const void *b);
+
+/**
+ * Allocate a new hash table.
+ *
+ * @param capacity  The minimum suggested starting capacity.
+ * @param hash_fun  The hash function to use in this hash table.
+ * @param eq_fun    The equals function to use in this hash table.
+ *
+ * @return          The new hash table on success; NULL on OOM.
+ */
+struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
+                            htable_eq_fn_t eq_fun);
+
+typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
+
+/**
+ * Visit all of the entries in the hash table.
+ *
+ * @param htable    The hash table.
+ * @param fun       The callback function to invoke on each key and value.
+ * @param ctx       Context pointer to pass to the callback.
+ */
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
+
+/**
+ * Free the hash table.
+ *
+ * It is up the calling code to ensure that the keys and values inside the
+ * table are de-allocated, if that is necessary.
+ *
+ * @param htable    The hash table.
+ */
+void htable_free(struct htable *htable);
+
+/**
+ * Add an entry to the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to add.  This cannot be NULL.
+ * @param fun       The value to add.  This cannot be NULL.
+ *
+ * @return          0 on success;
+ *                  EEXIST if the value already exists in the table;
+ *                  ENOMEM if there is not enough memory to add the element.
+ *                  EFBIG if the hash table has too many entries to fit in 32
+ *                      bits.
+ */
+int htable_put(struct htable *htable, void *key, void *val);
+
+/**
+ * Get an entry from the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to find.
+ *
+ * @return          NULL if there is no such entry; the entry otherwise.
+ */
+void *htable_get(const struct htable *htable, const void *key);
+
+/**
+ * Get an entry from the hash table and remove it.
+ *
+ * @param htable    The hash table.
+ * @param key       The key for the entry find and remove.
+ * @param found_key (out param) NULL if the entry was not found; the found key
+ *                      otherwise.
+ * @param found_val (out param) NULL if the entry was not found; the found
+ *                      value otherwise.
+ */
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val);
+
+/**
+ * Get the number of entries used in the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The number of entries used in the hash table.
+ */
+uint32_t htable_used(const struct htable *htable);
+
+/**
+ * Get the capacity of the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The capacity of the hash table.
+ */
+uint32_t htable_capacity(const struct htable *htable);
+
+/**
+ * Hash a string.
+ *
+ * @param str       The string.
+ * @param max       Maximum hash value
+ *
+ * @return          A number less than max.
+ */
+uint32_t ht_hash_string(const void *str, uint32_t max);
+
+/**
+ * Compare two strings.
+ *
+ * @param a         The first string.
+ * @param b         The second string.
+ *
+ * @return          1 if the strings are identical; 0 otherwise.
+ */
+int ht_compare_string(const void *a, const void *b);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et

+ 33 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c

@@ -19,8 +19,8 @@
 #include "exception.h"
 #include "hdfs.h"
 #include "jni_helper.h"
+#include "platform.h"
 
-#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -35,54 +35,54 @@ struct ExceptionInfo {
 
 static const struct ExceptionInfo gExceptionInfo[] = {
     {
-        .name = "java.io.FileNotFoundException",
-        .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
-        .excErrno = ENOENT,
+        "java.io.FileNotFoundException",
+        NOPRINT_EXC_FILE_NOT_FOUND,
+        ENOENT,
     },
     {
-        .name = "org.apache.hadoop.security.AccessControlException",
-        .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
-        .excErrno = EACCES,
+        "org.apache.hadoop.security.AccessControlException",
+        NOPRINT_EXC_ACCESS_CONTROL,
+        EACCES,
     },
     {
-        .name = "org.apache.hadoop.fs.UnresolvedLinkException",
-        .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
-        .excErrno = ENOLINK,
+        "org.apache.hadoop.fs.UnresolvedLinkException",
+        NOPRINT_EXC_UNRESOLVED_LINK,
+        ENOLINK,
     },
     {
-        .name = "org.apache.hadoop.fs.ParentNotDirectoryException",
-        .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
-        .excErrno = ENOTDIR,
+        "org.apache.hadoop.fs.ParentNotDirectoryException",
+        NOPRINT_EXC_PARENT_NOT_DIRECTORY,
+        ENOTDIR,
     },
     {
-        .name = "java.lang.IllegalArgumentException",
-        .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
-        .excErrno = EINVAL,
+        "java.lang.IllegalArgumentException",
+        NOPRINT_EXC_ILLEGAL_ARGUMENT,
+        EINVAL,
     },
     {
-        .name = "java.lang.OutOfMemoryError",
-        .noPrintFlag = 0,
-        .excErrno = ENOMEM,
+        "java.lang.OutOfMemoryError",
+        0,
+        ENOMEM,
     },
     {
-        .name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
-        .noPrintFlag = 0,
-        .excErrno = EROFS,
+        "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
+        0,
+        EROFS,
     },
     {
-        .name = "org.apache.hadoop.fs.FileAlreadyExistsException",
-        .noPrintFlag = 0,
-        .excErrno = EEXIST,
+        "org.apache.hadoop.fs.FileAlreadyExistsException",
+        0,
+        EEXIST,
     },
     {
-        .name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
-        .noPrintFlag = 0,
-        .excErrno = EDQUOT,
+        "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
+        0,
+        EDQUOT,
     },
     {
-        .name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
-        .noPrintFlag = 0,
-        .excErrno = ESTALE,
+        "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
+        0,
+        ESTALE,
     },
 };
 
@@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
     jstring jStr = NULL;
     jvalue jVal;
     jthrowable jthr;
+    const char *stackTrace;
 
     jthr = classNameOfObject(exc, env, &className);
     if (jthr) {
@@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
             destroyLocalReference(env, jthr);
         } else {
             jStr = jVal.l;
-            const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
+            stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
             if (!stackTrace) {
                 fprintf(stderr, "(unable to get stack trace for %s exception: "
                         "GetStringUTFChars error.)\n", className);

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h

@@ -34,13 +34,14 @@
  * usually not what you want.)
  */
 
+#include "platform.h"
+
 #include <jni.h>
 #include <stdio.h>
 
 #include <stdlib.h>
 #include <stdarg.h>
 #include <search.h>
-#include <pthread.h>
 #include <errno.h>
 
 /**
@@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
  *                        object.
  */
 int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
-        const char *fmt, ...) __attribute__((format(printf, 4, 5)));  
+        const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
 
 /**
  * Print out information about the pending exception and free it.
@@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
  *                        object.
  */
 int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
-        const char *fmt, ...) __attribute__((format(printf, 3, 4)));  
+        const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
 
 /**
  * Get a local reference to the pending exception and clear it.
@@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(JNIEnv *env);
  * @return                A local reference to a RuntimeError
  */
 jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
-        __attribute__((format(printf, 2, 3)));
+        TYPE_CHECKED_PRINTF_FORMAT(2, 3);
 
+#undef TYPE_CHECKED_PRINTF_FORMAT
 #endif

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c

@@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
             stats->totalShortCircuitBytesRead,
             stats->totalZeroCopyBytesRead);
     if (expectedTotalBytesRead != UINT64_MAX) {
-        EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
+        EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
     }
     if (expectedTotalLocalBytesRead != UINT64_MAX) {
-        EXPECT_INT64_EQ(expectedTotalLocalBytesRead,
+        EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
                       stats->totalLocalBytesRead);
     }
     if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
-        EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead,
+        EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
                       stats->totalShortCircuitBytesRead);
     }
     if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
-        EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead,
+        EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
                       stats->totalZeroCopyBytesRead);
     }
     hdfsFileFreeReadStatistics(stats);

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h

@@ -126,6 +126,18 @@ struct hdfsFile_internal;
         } \
     } while (0);
 
+#define EXPECT_UINT64_EQ(x, y) \
+    do { \
+        uint64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
 #define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
     ret = expr; \
     if (!ret) \

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 194 - 160
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c


+ 90 - 175
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c

@@ -19,20 +19,18 @@
 #include "config.h"
 #include "exception.h"
 #include "jni_helper.h"
+#include "platform.h"
+#include "common/htable.h"
+#include "os/mutexes.h"
+#include "os/thread_local_storage.h"
 
 #include <stdio.h> 
 #include <string.h> 
 
-static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int hashTableInited = 0;
-
-#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
-#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
-
+static struct htable *gClassRefHTable = NULL;
 
 /** The Native return types that methods could return */
-#define VOID          'V'
+#define JVOID         'V'
 #define JOBJECT       'L'
 #define JARRAYOBJECT  '['
 #define JBOOLEAN      'Z'
@@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
  */
 #define MAX_HASH_TABLE_ELEM 4096
 
-/** Key that allows us to retrieve thread-local storage */
-static pthread_key_t gTlsKey;
-
-/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
-static int gTlsKeyInitialized = 0;
-
-/** Pthreads thread-local storage for each library thread. */
-struct hdfsTls {
-    JNIEnv *env;
-};
-
 /**
- * The function that is called whenever a thread with libhdfs thread local data
- * is destroyed.
- *
- * @param v         The thread-local data
+ * Length of buffer for retrieving created JVMs.  (We only ever create one.)
  */
-static void hdfsThreadDestructor(void *v)
-{
-    struct hdfsTls *tls = v;
-    JavaVM *vm;
-    JNIEnv *env = tls->env;
-    jint ret;
-
-    ret = (*env)->GetJavaVM(env, &vm);
-    if (ret) {
-        fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
-                "error %d\n", ret);
-        (*env)->ExceptionDescribe(env);
-    } else {
-        (*vm)->DetachCurrentThread(vm);
-    }
-    free(tls);
-}
+#define VM_BUF_LENGTH 1
 
 void destroyLocalReference(JNIEnv *env, jobject jObject)
 {
@@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
     return NULL;
 }
 
-static int hashTableInit(void)
-{
-    if (!hashTableInited) {
-        LOCK_HASH_TABLE();
-        if (!hashTableInited) {
-            if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
-                fprintf(stderr, "error creating hashtable, <%d>: %s\n",
-                        errno, strerror(errno));
-                UNLOCK_HASH_TABLE();
-                return 0;
-            } 
-            hashTableInited = 1;
-        }
-        UNLOCK_HASH_TABLE();
-    }
-    return 1;
-}
-
-
-static int insertEntryIntoTable(const char *key, void *data)
-{
-    ENTRY e, *ep;
-    if (key == NULL || data == NULL) {
-        return 0;
-    }
-    if (! hashTableInit()) {
-      return -1;
-    }
-    e.data = data;
-    e.key = (char*)key;
-    LOCK_HASH_TABLE();
-    ep = hsearch(e, ENTER);
-    UNLOCK_HASH_TABLE();
-    if (ep == NULL) {
-        fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
-                key, errno, strerror(errno));
-    }  
-    return 0;
-}
-
-
-
-static void* searchEntryFromTable(const char *key)
-{
-    ENTRY e,*ep;
-    if (key == NULL) {
-        return NULL;
-    }
-    hashTableInit();
-    e.key = (char*)key;
-    LOCK_HASH_TABLE();
-    ep = hsearch(e, FIND);
-    UNLOCK_HASH_TABLE();
-    if (ep != NULL) {
-        return ep->data;
-    }
-    return NULL;
-}
-
-
-
 jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
                  jobject instObj, const char *className,
                  const char *methName, const char *methSignature, ...)
@@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
         }
         retval->l = jobj;
     }
-    else if (returnType == VOID) {
+    else if (returnType == JVOID) {
         if (methType == STATIC) {
             (*env)->CallStaticVoidMethodV(env, cls, mid, args);
         }
@@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
 {
     jclass cls;
     jthrowable jthr;
+    jmethodID mid = 0;
 
     jthr = globalClassReference(className, env, &cls);
     if (jthr)
         return jthr;
-    jmethodID mid = 0;
     jthr = validateMethodType(env, methType);
     if (jthr)
         return jthr;
@@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
 
 jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
 {
-    jclass clsLocalRef;
-    jclass cls = searchEntryFromTable(className);
-    if (cls) {
-        *out = cls;
-        return NULL;
+    jthrowable jthr = NULL;
+    jclass local_clazz = NULL;
+    jclass clazz = NULL;
+    int ret;
+
+    mutexLock(&hdfsHashMutex);
+    if (!gClassRefHTable) {
+        gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
+            ht_compare_string);
+        if (!gClassRefHTable) {
+            jthr = newRuntimeError(env, "htable_alloc failed\n");
+            goto done;
+        }
     }
-    clsLocalRef = (*env)->FindClass(env,className);
-    if (clsLocalRef == NULL) {
-        return getPendingExceptionAndClear(env);
+    clazz = htable_get(gClassRefHTable, className);
+    if (clazz) {
+        *out = clazz;
+        goto done;
     }
-    cls = (*env)->NewGlobalRef(env, clsLocalRef);
-    if (cls == NULL) {
-        (*env)->DeleteLocalRef(env, clsLocalRef);
-        return getPendingExceptionAndClear(env);
+    local_clazz = (*env)->FindClass(env,className);
+    if (!local_clazz) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
     }
-    (*env)->DeleteLocalRef(env, clsLocalRef);
-    insertEntryIntoTable(className, cls);
-    *out = cls;
-    return NULL;
+    clazz = (*env)->NewGlobalRef(env, local_clazz);
+    if (!clazz) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
+    }
+    ret = htable_put(gClassRefHTable, (void*)className, clazz);
+    if (ret) {
+        jthr = newRuntimeError(env, "htable_put failed with error "
+                               "code %d\n", ret);
+        goto done;
+    }
+    *out = clazz;
+    jthr = NULL;
+done:
+    mutexUnlock(&hdfsHashMutex);
+    (*env)->DeleteLocalRef(env, local_clazz);
+    if (jthr && clazz) {
+        (*env)->DeleteGlobalRef(env, clazz);
+    }
+    return jthr;
 }
 
 jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@@ -436,14 +368,24 @@ done:
  */
 static JNIEnv* getGlobalJNIEnv(void)
 {
-    const jsize vmBufLength = 1;
-    JavaVM* vmBuf[vmBufLength]; 
+    JavaVM* vmBuf[VM_BUF_LENGTH]; 
     JNIEnv *env;
     jint rv = 0; 
     jint noVMs = 0;
     jthrowable jthr;
+    char *hadoopClassPath;
+    const char *hadoopClassPathVMArg = "-Djava.class.path=";
+    size_t optHadoopClassPathLen;
+    char *optHadoopClassPath;
+    int noArgs = 1;
+    char *hadoopJvmArgs;
+    char jvmArgDelims[] = " ";
+    char *str, *token, *savePtr;
+    JavaVMInitArgs vm_args;
+    JavaVM *vm;
+    JavaVMOption *options;
 
-    rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
+    rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
     if (rv != 0) {
         fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
         return NULL;
@@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
 
     if (noVMs == 0) {
         //Get the environment variables for initializing the JVM
-        char *hadoopClassPath = getenv("CLASSPATH");
+        hadoopClassPath = getenv("CLASSPATH");
         if (hadoopClassPath == NULL) {
             fprintf(stderr, "Environment variable CLASSPATH not set!\n");
             return NULL;
         } 
-        char *hadoopClassPathVMArg = "-Djava.class.path=";
-        size_t optHadoopClassPathLen = strlen(hadoopClassPath) + 
+        optHadoopClassPathLen = strlen(hadoopClassPath) + 
           strlen(hadoopClassPathVMArg) + 1;
-        char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
+        optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
         snprintf(optHadoopClassPath, optHadoopClassPathLen,
                 "%s%s", hadoopClassPathVMArg, hadoopClassPath);
 
         // Determine the # of LIBHDFS_OPTS args
-        int noArgs = 1;
-        char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
-        char jvmArgDelims[] = " ";
-        char *str, *token, *savePtr;
+        hadoopJvmArgs = getenv("LIBHDFS_OPTS");
         if (hadoopJvmArgs != NULL)  {
           hadoopJvmArgs = strdup(hadoopJvmArgs);
           for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
         }
 
         // Now that we know the # args, populate the options array
-        JavaVMOption options[noArgs];
+        options = calloc(noArgs, sizeof(JavaVMOption));
+        if (!options) {
+          fputs("Call to calloc failed\n", stderr);
+          free(optHadoopClassPath);
+          return NULL;
+        }
         options[0].optionString = optHadoopClassPath;
         hadoopJvmArgs = getenv("LIBHDFS_OPTS");
 	if (hadoopJvmArgs != NULL)  {
@@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
         }
 
         //Create the VM
-        JavaVMInitArgs vm_args;
-        JavaVM *vm;
         vm_args.version = JNI_VERSION_1_2;
         vm_args.options = options;
         vm_args.nOptions = noArgs; 
@@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
           free(hadoopJvmArgs);
         }
         free(optHadoopClassPath);
+        free(options);
 
         if (rv != 0) {
             fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
     }
     else {
         //Attach this thread to the VM
-        JavaVM* vm = vmBuf[0];
+        vm = vmBuf[0];
         rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
         if (rv != 0) {
             fprintf(stderr, "Call to AttachCurrentThread "
@@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
 JNIEnv* getJNIEnv(void)
 {
     JNIEnv *env;
-    struct hdfsTls *tls;
-    int ret;
-
-#ifdef HAVE_BETTER_TLS
-    static __thread struct hdfsTls *quickTls = NULL;
-    if (quickTls)
-        return quickTls->env;
-#endif
-    pthread_mutex_lock(&jvmMutex);
-    if (!gTlsKeyInitialized) {
-        ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
-        if (ret) {
-            pthread_mutex_unlock(&jvmMutex);
-            fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
-                "error %d\n", ret);
-            return NULL;
-        }
-        gTlsKeyInitialized = 1;
+    THREAD_LOCAL_STORAGE_GET_QUICK();
+    mutexLock(&jvmMutex);
+    if (threadLocalStorageGet(&env)) {
+      mutexUnlock(&jvmMutex);
+      return NULL;
     }
-    tls = pthread_getspecific(gTlsKey);
-    if (tls) {
-        pthread_mutex_unlock(&jvmMutex);
-        return tls->env;
+    if (env) {
+      mutexUnlock(&jvmMutex);
+      return env;
     }
 
     env = getGlobalJNIEnv();
-    pthread_mutex_unlock(&jvmMutex);
+    mutexUnlock(&jvmMutex);
     if (!env) {
-        fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
-        return NULL;
+      fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
+      return NULL;
     }
-    tls = calloc(1, sizeof(struct hdfsTls));
-    if (!tls) {
-        fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
-                sizeof(struct hdfsTls));
-        return NULL;
-    }
-    tls->env = env;
-    ret = pthread_setspecific(gTlsKey, tls);
-    if (ret) {
-        fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
-            "error code %d\n", ret);
-        hdfsThreadDestructor(tls);
-        return NULL;
+    if (threadLocalStorageSet(env)) {
+      return NULL;
     }
-#ifdef HAVE_BETTER_TLS
-    quickTls = tls;
-#endif
+    THREAD_LOCAL_STORAGE_SET_QUICK(env);
     return env;
 }
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h

@@ -24,8 +24,6 @@
 
 #include <stdlib.h>
 #include <stdarg.h>
-#include <search.h>
-#include <pthread.h>
 #include <errno.h>
 
 #define PATH_SEPARATOR ':'

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c

@@ -21,6 +21,7 @@
 #include "hdfs_test.h"
 #include "jni_helper.h"
 #include "native_mini_dfs.h"
+#include "platform.h"
 
 #include <errno.h>
 #include <jni.h>
@@ -347,10 +348,11 @@ error_dlr_nn:
 int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
                             struct hdfsBuilder *bld)
 {
-    int port, ret;
+    int ret;
+    tPort port;
 
     hdfsBuilderSetNameNode(bld, "localhost");
-    port = nmdGetNameNodePort(cl);
+    port = (tPort)nmdGetNameNodePort(cl);
     if (port < 0) {
       fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
       return EIO;

+ 55 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_MUTEXES_H
+#define LIBHDFS_MUTEXES_H
+
+/*
+ * Defines abstraction over platform-specific mutexes.  libhdfs has no formal
+ * initialization function that users would call from a single-threaded context
+ * to initialize the library.  This creates a challenge for bootstrapping the
+ * mutexes.  To address this, all required mutexes are pre-defined here with
+ * external storage.  Platform-specific implementations must guarantee that the
+ * mutexes are initialized via static initialization.
+ */
+
+#include "platform.h"
+
+/** Mutex protecting the class reference hash table. */
+extern mutex hdfsHashMutex;
+
+/** Mutex protecting singleton JVM instance. */
+extern mutex jvmMutex;
+
+/**
+ * Locks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexLock(mutex *m);
+
+/**
+ * Unlocks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexUnlock(mutex *m);
+
+#endif

+ 43 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include <pthread.h>
+#include <stdio.h>
+
+mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
+
+int mutexLock(mutex *m) {
+  int ret = pthread_mutex_lock(m);
+  if (ret) {
+    fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
+      ret);
+  }
+  return ret;
+}
+
+int mutexUnlock(mutex *m) {
+  int ret = pthread_mutex_unlock(m);
+  if (ret) {
+    fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
+      ret);
+  }
+  return ret;
+}

+ 34 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h

@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include <pthread.h>
+
+/* Use gcc type-checked format arguments. */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
+  __attribute__((format(printf, formatArg, varArgs)))
+
+/*
+ * Mutex and thread data types defined by pthreads.
+ */
+typedef pthread_mutex_t mutex;
+typedef pthread_t threadId;
+
+#endif

+ 52 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include <pthread.h>
+#include <stdio.h>
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by pthread_create.
+ *
+ * @param toRun thread to run
+ * @return void* result of running thread (always NULL)
+ */
+static void* runThread(void *toRun) {
+  const thread *t = toRun;
+  t->start(t->arg);
+  return NULL;
+}
+
+int threadCreate(thread *t) {
+  int ret;
+  ret = pthread_create(&t->id, NULL, runThread, t);
+  if (ret) {
+    fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
+  }
+  return ret;
+}
+
+int threadJoin(const thread *t) {
+  int ret = pthread_join(t->id, NULL);
+  if (ret) {
+    fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
+  }
+  return ret;
+}

+ 80 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c

@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stdio.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static pthread_key_t gTlsKey;
+
+/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
+static int gTlsKeyInitialized = 0;
+
+/**
+ * The function that is called whenever a thread with libhdfs thread local data
+ * is destroyed.
+ *
+ * @param v         The thread-local data
+ */
+static void hdfsThreadDestructor(void *v)
+{
+  JavaVM *vm;
+  JNIEnv *env = v;
+  jint ret;
+
+  ret = (*env)->GetJavaVM(env, &vm);
+  if (ret) {
+    fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
+      ret);
+    (*env)->ExceptionDescribe(env);
+  } else {
+    (*vm)->DetachCurrentThread(vm);
+  }
+}
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+  int ret = 0;
+  if (!gTlsKeyInitialized) {
+    ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
+    if (ret) {
+      fprintf(stderr,
+        "threadLocalStorageGet: pthread_key_create failed with error %d\n",
+        ret);
+      return ret;
+    }
+    gTlsKeyInitialized = 1;
+  }
+  *env = pthread_getspecific(gTlsKey);
+  return ret;
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+  int ret = pthread_setspecific(gTlsKey, env);
+  if (ret) {
+    fprintf(stderr,
+      "threadLocalStorageSet: pthread_setspecific failed with error %d\n",
+      ret);
+    hdfsThreadDestructor(env);
+  }
+  return ret;
+}

+ 54 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_H
+#define LIBHDFS_THREAD_H
+
+/*
+ * Defines abstraction over platform-specific threads.
+ */
+
+#include "platform.h"
+
+/** Pointer to function to run in thread. */
+typedef void (*threadProcedure)(void *);
+
+/** Structure containing a thread's ID, starting address and argument. */
+typedef struct {
+  threadId id;
+  threadProcedure start;
+  void *arg;
+} thread;
+
+/**
+ * Creates and immediately starts a new thread.
+ *
+ * @param t thread to create
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadCreate(thread *t);
+
+/**
+ * Joins to the given thread, blocking if necessary.
+ *
+ * @param t thread to join
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadJoin(const thread *t);
+
+#endif

+ 75 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h

@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_LOCAL_STORAGE_H
+#define LIBHDFS_THREAD_LOCAL_STORAGE_H
+
+/*
+ * Defines abstraction over platform-specific thread-local storage.  libhdfs
+ * currently only needs thread-local storage for a single piece of data: the
+ * thread's JNIEnv.  For simplicity, this interface is defined in terms of
+ * JNIEnv, not general-purpose thread-local storage of any arbitrary data.
+ */
+
+#include <jni.h>
+
+/*
+ * Most operating systems support the more efficient __thread construct, which
+ * is initialized by the linker.  The following macros use this technique on the
+ * operating systems that support it.
+ */
+#ifdef HAVE_BETTER_TLS
+  #define THREAD_LOCAL_STORAGE_GET_QUICK() \
+    static __thread JNIEnv *quickTlsEnv = NULL; \
+    { \
+      if (quickTlsEnv) { \
+        return quickTlsEnv; \
+      } \
+    }
+
+  #define THREAD_LOCAL_STORAGE_SET_QUICK(env) \
+    { \
+      quickTlsEnv = (env); \
+    }
+#else
+  #define THREAD_LOCAL_STORAGE_GET_QUICK()
+  #define THREAD_LOCAL_STORAGE_SET_QUICK(env)
+#endif
+
+/**
+ * Gets the JNIEnv in thread-local storage for the current thread.  If the call
+ * succeeds, and there is a JNIEnv associated with this thread, then returns 0
+ * and populates env.  If the call succeeds, but there is no JNIEnv associated
+ * with this thread, then returns 0 and sets JNIEnv to NULL.  If the call fails,
+ * then returns non-zero.  Only one thread at a time may execute this function.
+ * The caller is responsible for enforcing mutual exclusion.
+ *
+ * @param env JNIEnv out parameter
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageGet(JNIEnv **env);
+
+/**
+ * Sets the JNIEnv in thread-local storage for the current thread.
+ *
+ * @param env JNIEnv to set
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageSet(JNIEnv *env);
+
+#endif

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h

@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_INTTYPES_H
+#define LIBHDFS_INTTYPES_H
+
+/* On Windows, inttypes.h does not exist, so manually define what we need. */
+
+#define PRId64 "I64d"
+#define PRIu64 "I64u"
+typedef unsigned __int64 uint64_t;
+
+#endif

+ 52 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include <windows.h>
+
+mutex hdfsHashMutex;
+mutex jvmMutex;
+
+/**
+ * Unfortunately, there is no simple static initializer for a critical section.
+ * Instead, the API requires calling InitializeCriticalSection.  Since libhdfs
+ * lacks an explicit initialization function, there is no obvious existing place
+ * for the InitializeCriticalSection calls.  To work around this, we define an
+ * initialization function and instruct the linker to set a pointer to that
+ * function as a user-defined global initializer.  See discussion of CRT
+ * Initialization:
+ * http://msdn.microsoft.com/en-us/library/bb918180.aspx
+ */
+static void __cdecl initializeMutexes(void) {
+  InitializeCriticalSection(&hdfsHashMutex);
+  InitializeCriticalSection(&jvmMutex);
+}
+#pragma section(".CRT$XCU", read)
+__declspec(allocate(".CRT$XCU"))
+const void (__cdecl *pInitialize)(void) = initializeMutexes;
+
+int mutexLock(mutex *m) {
+  EnterCriticalSection(m);
+  return 0;
+}
+
+int mutexUnlock(mutex *m) {
+  LeaveCriticalSection(m);
+  return 0;
+}

+ 86 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h

@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include <stdio.h>
+#include <windows.h>
+#include <winsock.h>
+
+/*
+ * O_ACCMODE defined to match Linux definition.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE 0x0003
+#endif
+
+/*
+ * Windows has a different name for its maximum path length constant.
+ */
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+
+/*
+ * Windows does not define EDQUOT and ESTALE in errno.h.  The closest equivalents
+ * are these constants from winsock.h.
+ */
+#ifndef EDQUOT
+#define EDQUOT WSAEDQUOT
+#endif
+
+#ifndef ESTALE
+#define ESTALE WSAESTALE
+#endif
+
+/*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
+/*
+ * Define macros for various string formatting functions not defined on Windows.
+ * Where possible, we reroute to one of the secure CRT variants.  On Windows,
+ * the preprocessor does support variadic macros, even though they weren't
+ * defined until C99.
+ */
+#define snprintf(str, size, format, ...) \
+  _snprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+#define strncpy(dest, src, n) \
+  strncpy_s((dest), (n), (src), _TRUNCATE)
+#define strtok_r(str, delim, saveptr) \
+  strtok_s((str), (delim), (saveptr))
+#define vsnprintf(str, size, format, ...) \
+  vsnprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+
+/*
+ * Mutex data type defined as Windows CRITICAL_SECTION.   A critical section (not
+ * Windows mutex) is used, because libhdfs only needs synchronization of multiple
+ * threads within a single process, not synchronization across process
+ * boundaries.
+ */
+typedef CRITICAL_SECTION mutex;
+
+/*
+ * Thread data type defined as HANDLE to a Windows thread.
+ */
+typedef HANDLE threadId;
+
+#endif

+ 66 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include <stdio.h>
+#include <windows.h>
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by CreateThread.
+ *
+ * @param toRun thread to run
+ * @return DWORD result of running thread (always 0)
+ */
+static DWORD runThread(LPVOID toRun) {
+  const thread *t = toRun;
+  t->start(t->arg);
+  return 0;
+}
+
+int threadCreate(thread *t) {
+  DWORD ret = 0;
+  HANDLE h;
+  h = CreateThread(NULL, 0, runThread, t, 0, NULL);
+  if (h) {
+    t->id = h;
+  } else {
+    ret = GetLastError();
+    fprintf(stderr, "threadCreate: CreateThread failed with error %d\n", ret);
+  }
+  return ret;
+}
+
+int threadJoin(const thread *t) {
+  DWORD ret = WaitForSingleObject(t->id, INFINITE);
+  switch (ret) {
+  case WAIT_OBJECT_0:
+    break;
+  case WAIT_FAILED:
+    ret = GetLastError();
+    fprintf(stderr, "threadJoin: WaitForSingleObject failed with error %d\n",
+      ret);
+    break;
+  default:
+    fprintf(stderr, "threadJoin: WaitForSingleObject unexpected error %d\n",
+      ret);
+    break;
+  }
+  return ret;
+}

+ 164 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c

@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <stdio.h>
+#include <windows.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static DWORD gTlsIndex = TLS_OUT_OF_INDEXES;
+
+/**
+ * If the current thread has a JNIEnv in thread-local storage, then detaches the
+ * current thread from the JVM.
+ */
+static void detachCurrentThreadFromJvm()
+{
+  JNIEnv *env = NULL;
+  JavaVM *vm;
+  jint ret;
+  if (threadLocalStorageGet(&env) || !env) {
+    return;
+  }
+  ret = (*env)->GetJavaVM(env, &vm);
+  if (ret) {
+    fprintf(stderr,
+      "detachCurrentThreadFromJvm: GetJavaVM failed with error %d\n",
+      ret);
+    (*env)->ExceptionDescribe(env);
+  } else {
+    (*vm)->DetachCurrentThread(vm);
+  }
+}
+
+/**
+ * Unlike pthreads, the Windows API does not seem to provide a convenient way to
+ * hook a callback onto thread shutdown.  However, the Windows portable
+ * executable format does define a concept of thread-local storage callbacks.
+ * Here, we define a function and instruct the linker to set a pointer to that
+ * function in the segment for thread-local storage callbacks.  See page 85 of
+ * Microsoft Portable Executable and Common Object File Format Specification:
+ * http://msdn.microsoft.com/en-us/gg463119.aspx
+ * This technique only works for implicit linking (OS loads DLL on demand), not
+ * for explicit linking (user code calls LoadLibrary directly).  This effectively
+ * means that we have a known limitation: libhdfs may not work correctly if a
+ * Windows application attempts to use it via explicit linking.
+ *
+ * @param h module handle
+ * @param reason the reason for calling the callback
+ * @param pv reserved, unused
+ */
+static void NTAPI tlsCallback(PVOID h, DWORD reason, PVOID pv)
+{
+  DWORD tlsIndex;
+  switch (reason) {
+  case DLL_THREAD_DETACH:
+    detachCurrentThreadFromJvm();
+    break;
+  case DLL_PROCESS_DETACH:
+    detachCurrentThreadFromJvm();
+    tlsIndex = gTlsIndex;
+    gTlsIndex = TLS_OUT_OF_INDEXES;
+    if (!TlsFree(tlsIndex)) {
+      fprintf(stderr, "tlsCallback: TlsFree failed with error %d\n",
+        GetLastError());
+    }
+    break;
+  default:
+    break;
+  }
+}
+
+/*
+ * A variable named _tls_used contains the TLS directory, which contains a list
+ * of pointers to callback functions.  Normally, the linker won't retain this
+ * variable unless the executable has implicit thread-local variables, defined
+ * using the __declspec(thread) extended storage-class modifier.  libhdfs
+ * doesn't use __declspec(thread), and we have no guarantee that the executable
+ * linked to libhdfs will use __declspec(thread).  By forcing the linker to
+ * reference _tls_used, we guarantee that the binary retains the TLS directory.
+ * See Microsoft Visual Studio 10.0/VC/crt/src/tlssup.c .
+ */
+#pragma comment(linker, "/INCLUDE:_tls_used")
+
+/*
+ * We must retain a pointer to the callback function.  Force the linker to keep
+ * this symbol, even though it appears that nothing in our source code uses it.
+ */
+#pragma comment(linker, "/INCLUDE:pTlsCallback")
+
+/*
+ * Define constant pointer to our callback, and tell the linker to pin it into
+ * the TLS directory so that it receives thread callbacks.  Use external linkage
+ * to protect against the linker discarding the seemingly unused symbol.
+ */
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK pTlsCallback;
+const PIMAGE_TLS_CALLBACK pTlsCallback = tlsCallback;
+#pragma const_seg()
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+  LPVOID tls;
+  DWORD ret;
+  if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+    gTlsIndex = TlsAlloc();
+    if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+      fprintf(stderr,
+        "threadLocalStorageGet: TlsAlloc failed with error %d\n",
+        TLS_OUT_OF_INDEXES);
+      return TLS_OUT_OF_INDEXES;
+    }
+  }
+  tls = TlsGetValue(gTlsIndex);
+  if (tls) {
+    *env = tls;
+    return 0;
+  } else {
+    ret = GetLastError();
+    if (ERROR_SUCCESS == ret) {
+      /* Thread-local storage contains NULL, because we haven't set it yet. */
+      *env = NULL;
+      return 0;
+    } else {
+      /*
+       * The API call failed.  According to documentation, TlsGetValue cannot
+       * fail as long as the index is a valid index from a successful TlsAlloc
+       * call.  This error handling is purely defensive.
+       */
+      fprintf(stderr,
+        "threadLocalStorageGet: TlsGetValue failed with error %d\n", ret);
+      return ret;
+    }
+  }
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+  DWORD ret = 0;
+  if (!TlsSetValue(gTlsIndex, (LPVOID)env)) {
+    ret = GetLastError();
+    fprintf(stderr,
+      "threadLocalStorageSet: TlsSetValue failed with error %d\n",
+      ret);
+    detachCurrentThreadFromJvm(env);
+  }
+  return ret;
+}

+ 29 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_UNISTD_H
+#define LIBHDFS_UNISTD_H
+
+/* On Windows, unistd.h does not exist, so manually define what we need. */
+
+#include <process.h> /* Declares getpid(). */
+#include <windows.h>
+
+/* Re-route sleep to Sleep, converting units from seconds to milliseconds. */
+#define sleep(seconds) Sleep((seconds) * 1000)
+#endif

+ 119 - 113
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c

@@ -18,6 +18,7 @@
 
 #include "hdfs.h" 
 #include "hdfs_test.h" 
+#include "platform.h"
 
 #include <inttypes.h>
 #include <jni.h>
@@ -28,12 +29,13 @@
 #include <unistd.h>
 
 void permission_disp(short permissions, char *rtr) {
-  rtr[9] = '\0';
   int i;
+  short permissionsId;
+  char* perm;
+  rtr[9] = '\0';
   for(i=2;i>=0;i--)
     {
-      short permissionsId = permissions >> (i * 3) & (short)7;
-      char* perm;
+      permissionsId = permissions >> (i * 3) & (short)7;
       switch(permissionsId) {
       case 7:
         perm = "rwx"; break;
@@ -60,35 +62,56 @@ void permission_disp(short permissions, char *rtr) {
 } 
 
 int main(int argc, char **argv) {
-    char buffer[32];
-    tSize num_written_bytes;
-
-    hdfsFS fs = hdfsConnectNewInstance("default", 0);
+    const char *writePath = "/tmp/testfile.txt";
+    const char *fileContents = "Hello, World!";
+    const char *readPath = "/tmp/testfile.txt";
+    const char *srcPath = "/tmp/testfile.txt";
+    const char *dstPath = "/tmp/testfile2.txt";
+    const char *slashTmp = "/tmp";
+    const char *newDirectory = "/tmp/newdir";
+    const char *newOwner = "root";
+    const char *tuser = "nobody";
+    const char *appendPath = "/tmp/appends";
+    const char *userPath = "/tmp/usertestfile.txt";
+
+    char buffer[32], buffer2[256], rdbuffer[32];
+    tSize num_written_bytes, num_read_bytes;
+    hdfsFS fs, lfs;
+    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+    tOffset currentPos, seekPos;
+    int exists, totalResult, result, numEntries, i, j;
+    const char *resp;
+    hdfsFileInfo *fileInfo, *fileList, *finfo;
+    char *buffer3;
+    char permissions[10];
+    char ***hosts;
+    short newPerm = 0666;
+    tTime newMtime, newAtime;
+
+    fs = hdfsConnectNewInstance("default", 0);
     if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
  
-    hdfsFS lfs = hdfsConnectNewInstance(NULL, 0);
+    lfs = hdfsConnectNewInstance(NULL, 0);
     if(!lfs) {
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
         exit(-1);
     } 
 
-    const char* writePath = "/tmp/testfile.txt";
-    const char* fileContents = "Hello, World!";
-
     {
         //Write tests
         
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
             exit(-1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
         num_written_bytes =
-          hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
+          hdfsWrite(fs, writeFile, (void*)fileContents,
+            (tSize)(strlen(fileContents)+1));
         if (num_written_bytes != strlen(fileContents) + 1) {
           fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
                   (int)(strlen(fileContents) + 1), (int)num_written_bytes);
@@ -96,7 +119,7 @@ int main(int argc, char **argv) {
         }
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-        tOffset currentPos = -1;
+        currentPos = -1;
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %ld!\n",
@@ -123,15 +146,14 @@ int main(int argc, char **argv) {
     {
         //Read tests
         
-        const char* readPath = "/tmp/testfile.txt";
-        int exists = hdfsExists(fs, readPath);
+        exists = hdfsExists(fs, readPath);
 
         if (exists) {
           fprintf(stderr, "Failed to validate existence of %s\n", readPath);
           exit(-1);
         }
 
-        hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
             fprintf(stderr, "Failed to open %s for reading!\n", readPath);
             exit(-1);
@@ -146,13 +168,13 @@ int main(int argc, char **argv) {
 
         fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
 
-        tOffset seekPos = 1;
+        seekPos = 1;
         if(hdfsSeek(fs, readFile, seekPos)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
             exit(-1);
         }
 
-        tOffset currentPos = -1;
+        currentPos = -1;
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %ld!\n", 
@@ -175,7 +197,7 @@ int main(int argc, char **argv) {
             exit(-1);
         }
         memset(buffer, 0, sizeof(buffer));
-        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
                 sizeof(buffer));
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
             fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
@@ -208,14 +230,14 @@ int main(int argc, char **argv) {
         hdfsCloseFile(fs, readFile);
 
         // Test correct behaviour for unsupported filesystems
-        hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!localFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
             exit(-1);
         }
 
         num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
-                                      strlen(fileContents) + 1);
+                                      (tSize)(strlen(fileContents) + 1));
 
         hdfsCloseFile(lfs, localFile);
         localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
@@ -229,50 +251,43 @@ int main(int argc, char **argv) {
         hdfsCloseFile(lfs, localFile);
     }
 
-    int totalResult = 0;
-    int result = 0;
+    totalResult = 0;
+    result = 0;
     {
         //Generic file-system operations
 
-        const char* srcPath = "/tmp/testfile.txt";
-        const char* dstPath = "/tmp/testfile2.txt";
-
-        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        const char* slashTmp = "/tmp";
-        const char* newDirectory = "/tmp/newdir";
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        char buffer[256];
-        const char *resp;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
         totalResult += (resp ? 0 : 1);
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
         totalResult += (resp ? 0 : 1);
 
         fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
         fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
         fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
 
-        hdfsFileInfo *fileInfo = NULL;
+        fileInfo = NULL;
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
             fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
             fprintf(stderr, "Name: %s, ", fileInfo->mName);
@@ -283,7 +298,6 @@ int main(int argc, char **argv) {
             fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
             fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
-            char permissions[10];
             permission_disp(fileInfo->mPermissions, permissions);
             fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
             hdfsFreeFileInfo(fileInfo, 1);
@@ -292,10 +306,8 @@ int main(int argc, char **argv) {
             fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
         }
 
-        hdfsFileInfo *fileList = 0;
-        int numEntries = 0;
+        fileList = 0;
         if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
-            int i = 0;
             for(i=0; i < numEntries; ++i) {
                 fprintf(stderr, "Name: %s, ", fileList[i].mName);
                 fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
@@ -305,7 +317,6 @@ int main(int argc, char **argv) {
                 fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                 fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
-                char permissions[10];
                 permission_disp(fileList[i].mPermissions, permissions);
                 fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
             }
@@ -319,12 +330,12 @@ int main(int argc, char **argv) {
             }
         }
 
-        char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
         if(hosts) {
             fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
-            int i=0; 
+            i=0; 
             while(hosts[i]) {
-                int j = 0;
+                j = 0;
                 while(hosts[i][j]) {
                     fprintf(stderr, 
                             "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
@@ -337,131 +348,129 @@ int main(int argc, char **argv) {
             fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
         }
        
-        char *newOwner = "root";
         // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
-        short newPerm = 0666;
 
         // chown write
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
         // chmod write
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
 
 
         sleep(2);
-        tTime newMtime = time(NULL);
-        tTime newAtime = time(NULL);
+        newMtime = time(NULL);
+        newAtime = time(NULL);
 
         // utime write
-        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
 
         totalResult += result;
 
         // chown/chmod/utime read
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+        finfo = hdfsGetPathInfo(fs, writePath);
 
-        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
-        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         // will later use /tmp/ as a different user so enable it
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         fprintf(stderr,"newMTime=%ld\n",newMtime);
         fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
 
 
-        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
 
         // No easy way to turn on access times from hdfs_test right now
-        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
+        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
         //        totalResult += result;
 
         hdfsFreeFileInfo(finfo, 1);
 
         // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
-        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
         totalResult += (result ? 0 : 1);
     }
 
     {
       // TEST APPENDS
-      const char *writePath = "/tmp/appends";
 
       // CREATE
-      hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
-      if(!writeFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
         exit(-1);
       }
-      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
-      char* buffer = "Hello,";
-      tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+      buffer3 = "Hello,";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)strlen(buffer3));
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-      if (hdfsFlush(fs, writeFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
         exit(-1);
         }
-      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
 
-      hdfsCloseFile(fs, writeFile);
+      hdfsCloseFile(fs, appendFile);
 
       // RE-OPEN
-      writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
-      if(!writeFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
         exit(-1);
       }
-      fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
-      buffer = " World";
-      num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+      buffer3 = " World";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)(strlen(buffer3) + 1));
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-      if (hdfsFlush(fs, writeFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
         exit(-1);
       }
-      fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
 
-      hdfsCloseFile(fs, writeFile);
+      hdfsCloseFile(fs, appendFile);
 
       // CHECK size
-      hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
-      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+      finfo = hdfsGetPathInfo(fs, appendPath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
       totalResult += (result ? 0 : 1);
 
       // READ and check data
-      hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
       if (!readFile) {
-        fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
         exit(-1);
       }
 
-      char rdbuffer[32];
-      tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
       fprintf(stderr, "Read following %d bytes:\n%s\n", 
               num_read_bytes, rdbuffer);
 
-      fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
+      fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
 
       hdfsCloseFile(fs, readFile);
 
@@ -478,36 +487,33 @@ int main(int argc, char **argv) {
       // the actual fs user capabilities. Thus just create a file and read
       // the owner is correct.
 
-      const char *tuser = "nobody";
-      const char* writePath = "/tmp/usertestfile.txt";
-
       fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
       if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
         exit(-1);
       } 
 
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
-        if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!userFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
             exit(-1);
         }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
 
-        char* buffer = "Hello, World!";
-        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+          (tSize)(strlen(fileContents)+1));
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+        if (hdfsFlush(fs, userFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
             exit(-1);
         }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+        fprintf(stderr, "Flushed %s successfully!\n", userPath); 
 
-        hdfsCloseFile(fs, writeFile);
+        hdfsCloseFile(fs, userFile);
 
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
-        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+        finfo = hdfsGetPathInfo(fs, userPath);
+        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
         totalResult += result;
     }
     

+ 11 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c

@@ -22,35 +22,38 @@
 #include <stdlib.h>
 
 int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *rfile = argv[1];
+    tSize bufferSize = strtoul(argv[3], NULL, 10);
+    hdfsFile readFile;
+    char* buffer;
+    tSize curSize;
 
     if (argc != 4) {
         fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
     
-    hdfsFS fs = hdfsConnect("default", 0);
+    fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
- 
-    const char* rfile = argv[1];
-    tSize bufferSize = strtoul(argv[3], NULL, 10);
-   
-    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
     if (!readFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", rfile);
         exit(-2);
     }
 
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize);
+    buffer = malloc(sizeof(char) * bufferSize);
     if(buffer == NULL) {
         return -2;
     }
     
     // read from the file
-    tSize curSize = bufferSize;
+    curSize = bufferSize;
     for (; curSize == bufferSize;) {
         curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
     }

+ 17 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c

@@ -21,23 +21,31 @@
 #include <limits.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/types.h>
 
 int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *writeFileName = argv[1];
+    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+    tSize bufferSize;
+    hdfsFile writeFile;
+    char* buffer;
+    int i;
+    off_t nrRemaining;
+    tSize curSize;
+    tSize written;
 
     if (argc != 4) {
         fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
         exit(-1);
     }
     
-    hdfsFS fs = hdfsConnect("default", 0);
+    fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
     } 
- 
-    const char* writeFileName = argv[1];
-    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
-    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
 
     // sanity check
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -51,30 +59,27 @@ int main(int argc, char **argv) {
       exit(-3);
     }
 
-    tSize bufferSize = tmpBufferSize;
+    bufferSize = (tSize)tmpBufferSize;
 
-    hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+    writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
     if (!writeFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
         exit(-2);
     }
 
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize);
+    buffer = malloc(sizeof(char) * bufferSize);
     if(buffer == NULL) {
         fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
         return -2;
     }
-    int i = 0;
     for (i=0; i < bufferSize; ++i) {
         buffer[i] = 'a' + (i%26);
     }
 
     // write to the file
-    off_t nrRemaining;
     for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
-      tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
-      tSize written;
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
       if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
         fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
         exit(-3);

+ 11 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c

@@ -19,12 +19,12 @@
 #include "expect.h"
 #include "hdfs.h"
 #include "native_mini_dfs.h"
+#include "platform.h"
 
 #include <errno.h>
 #include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
 #include <unistd.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int blockIdx)
         exit(1);
     }
     for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
-      buf[i] = blockIdx + (i % 17);
+      buf[i] = (uint8_t)(blockIdx + (i % 17));
     }
     return buf;
 }
@@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int blockIdx)
     }
 }
 
-static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused));
-
-static void printBuf(const uint8_t *buf, size_t len)
-{
-  size_t i;
-
-  for (i = 0; i < len; i++) {
-    fprintf(stderr, "%02x", buf[i]);
-  }
-  fprintf(stderr, "\n");
-}
-
 static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
 {
     hdfsFile file = NULL;
@@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
     EXPECT_NONNULL(block);
     EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
     hadoopRzBufferFree(file, buffer);
-    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
-                  hdfsTell(fs, file));
+    EXPECT_INT64_EQ(
+          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+          hdfsTell(fs, file));
     EXPECT_ZERO(expectFileStats(file,
           TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
           TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
@@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
     free(block);
     block = getZeroCopyBlockData(2);
     EXPECT_NONNULL(block);
-    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) +
+    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
         (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
     hadoopRzBufferFree(file, buffer);
 
@@ -219,8 +208,10 @@ int main(void)
 {
     int port;
     struct NativeMiniDfsConf conf = {
-        .doFormat = 1,
-        .configureShortCircuit = 1,
+        1, /* doFormat */
+        0, /* webhdfsEnabled */
+        0, /* namenodeHttpPort */
+        1, /* configureShortCircuit */
     };
     char testFileName[TEST_FILE_NAME_LENGTH];
     hdfsFS fs;

+ 18 - 21
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c

@@ -19,11 +19,11 @@
 #include "expect.h"
 #include "hdfs.h"
 #include "native_mini_dfs.h"
+#include "os/thread.h"
 
 #include <errno.h>
 #include <inttypes.h>
-#include <semaphore.h>
-#include <pthread.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -35,8 +35,6 @@
 
 #define TLH_DEFAULT_BLOCK_SIZE 134217728
 
-static sem_t tlhSem;
-
 static struct NativeMiniDfsCluster* tlhCluster;
 
 struct tlhThreadInfo {
@@ -44,18 +42,19 @@ struct tlhThreadInfo {
     int threadIdx;
     /** 0 = thread was successful; error code otherwise */
     int success;
-    /** pthread identifier */
-    pthread_t thread;
+    /** thread identifier */
+    thread theThread;
 };
 
 static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
                                      const char *username)
 {
-    int ret, port;
+    int ret;
+    tPort port;
     hdfsFS hdfs;
     struct hdfsBuilder *bld;
     
-    port = nmdGetNameNodePort(cl);
+    port = (tPort)nmdGetNameNodePort(cl);
     if (port < 0) {
         fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
                 "returned error %d\n", port);
@@ -164,7 +163,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
     EXPECT_NONNULL(file);
 
     /* TODO: implement writeFully and use it here */
-    expected = strlen(paths->prefix);
+    expected = (int)strlen(paths->prefix);
     ret = hdfsWrite(fs, file, paths->prefix, expected);
     if (ret < 0) {
         ret = errno;
@@ -186,9 +185,9 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
 
     EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
     errno = 0;
-    EXPECT_ZERO(readStats->totalBytesRead);
-    EXPECT_ZERO(readStats->totalLocalBytesRead);
-    EXPECT_ZERO(readStats->totalShortCircuitBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
+    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
     hdfsFileFreeReadStatistics(readStats);
     /* TODO: implement readFully and use it here */
     ret = hdfsRead(fs, file, tmp, sizeof(tmp));
@@ -204,7 +203,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
     }
     EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
     errno = 0;
-    EXPECT_INT_EQ(expected, readStats->totalBytesRead);
+    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
     hdfsFileFreeReadStatistics(readStats);
     EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
@@ -262,12 +261,11 @@ static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
     return 0;
 }
 
-static void *testHdfsOperations(void *v)
+static void testHdfsOperations(void *v)
 {
     struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
     int ret = testHdfsOperationsImpl(ti);
     ti->success = ret;
-    return NULL;
 }
 
 static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
@@ -304,7 +302,7 @@ int main(void)
     const char *tlhNumThreadsStr;
     struct tlhThreadInfo ti[TLH_MAX_THREADS];
     struct NativeMiniDfsConf conf = {
-        .doFormat = 1,
+        1, /* doFormat */
     };
 
     tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
@@ -323,21 +321,20 @@ int main(void)
         ti[i].threadIdx = i;
     }
 
-    EXPECT_ZERO(sem_init(&tlhSem, 0, tlhNumThreads));
     tlhCluster = nmdCreate(&conf);
     EXPECT_NONNULL(tlhCluster);
     EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
 
     for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
-            testHdfsOperations, &ti[i]));
+        ti[i].theThread.start = testHdfsOperations;
+        ti[i].theThread.arg = &ti[i];
+        EXPECT_ZERO(threadCreate(&ti[i].theThread));
     }
     for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
+        EXPECT_ZERO(threadJoin(&ti[i].theThread));
     }
 
     EXPECT_ZERO(nmdShutdown(tlhCluster));
     nmdFree(tlhCluster);
-    EXPECT_ZERO(sem_destroy(&tlhSem));
     return checkFailures(ti, tlhNumThreads);
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c

@@ -22,7 +22,7 @@
 #include <errno.h>
 
 static struct NativeMiniDfsConf conf = {
-    .doFormat = 1,
+    1, /* doFormat */
 };
 
 /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html

@@ -281,7 +281,7 @@
   {#DeadNodes}
   <tr class="danger">
     <td>{name} ({xferaddr})</td>
-    <td>{lastContact}</td>
+    <td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
     <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
     <td>-</td>
     <td>-</td>

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js

@@ -139,6 +139,14 @@
   }
 
   function load_datanode_info() {
+
+    var HELPERS = {
+      'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        return chunk.write('' + new Date(Date.now()-1000*Number(value)));
+      }
+    };
+
     function workaround(r) {
       function node_map_to_array(nodes) {
         var res = [];
@@ -160,7 +168,8 @@
       '/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
       guard_with_startup_progress(function (resp) {
         var data = workaround(resp.beans[0]);
-        dust.render('datanode-info', data, function(err, out) {
+        var base = dust.makeBase(HELPERS);
+        dust.render('datanode-info', base.push(data), function(err, out) {
           $('#tab-datanode').html(out);
           $('#ui-tabs a[href="#tab-datanode"]').tab('show');
         });

+ 408 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm

@@ -0,0 +1,408 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~   http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+  ---
+  HDFS Commands Guide
+  ---
+  ---
+  ${maven.build.timestamp}
+
+HDFS Commands Guide
+
+%{toc|section=1|fromDepth=2|toDepth=4}
+
+* Overview
+
+   All HDFS commands are invoked by the <<<bin/hdfs>>> script. Running the
+   hdfs script without any arguments prints the description for all
+   commands.
+
+   Usage: <<<hdfs [--config confdir] [COMMAND] [GENERIC_OPTIONS]
+          [COMMAND_OPTIONS]>>>
+
+   Hadoop has an option parsing framework that employs parsing generic options
+   as well as running classes.
+
+*-----------------------+---------------+
+|| COMMAND_OPTION       || Description
+*-----------------------+---------------+
+| <<<--config confdir>>>| Overwrites the default Configuration directory.
+|                       | Default is <<<${HADOOP_HOME}/conf>>>.
+*-----------------------+---------------+
+| GENERIC_OPTIONS       | The common set of options supported by multiple
+|                       | commands. Full list is
+|                       | {{{../hadoop-common/CommandsManual.html#Generic_Options}here}}.
+*-----------------------+---------------+
+| COMMAND_OPTIONS       | Various commands with their options are described in
+|                       | the following sections. The commands have been
+|                       | grouped into {{{User Commands}}} and
+|                       | {{{Administration Commands}}}.
+*-----------------------+---------------+
+
+* User Commands
+
+   Commands useful for users of a hadoop cluster.
+
+** <<<dfs>>>
+
+   Usage: <<<hdfs dfs [GENERIC_OPTIONS] [COMMAND_OPTIONS]>>>
+
+   Run a filesystem command on the file system supported in Hadoop.
+   The various COMMAND_OPTIONS can be found at
+   {{{../hadoop-common/FileSystemShell.html}File System Shell Guide}}.
+
+** <<<fetchdt>>>
+
+   Gets Delegation Token from a NameNode.
+   See {{{./HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
+
+   Usage: <<<hdfs fetchdt [GENERIC_OPTIONS]
+          [--webservice <namenode_http_addr>] <path> >>>
+
+*------------------------------+---------------------------------------------+
+|| COMMAND_OPTION              || Description
+*------------------------------+---------------------------------------------+
+| <fileName>                   | File name to store the token into.
+*------------------------------+---------------------------------------------+
+| --webservice <https_address> | use http protocol instead of RPC
+*------------------------------+---------------------------------------------+
+
+** <<<fsck>>>
+
+   Runs a HDFS filesystem checking utility.
+   See {{{./HdfsUserGuide.html#fsck}fsck}} for more info.
+
+   Usage: <<<hdfs fsck [GENERIC_OPTIONS] <path>
+          [-move | -delete | -openforwrite]
+          [-files [-blocks [-locations | -racks]]]
+          [-showprogress]>>>
+
+*------------------+---------------------------------------------+
+||  COMMAND_OPTION || Description
+*------------------+---------------------------------------------+
+|   <path>         | Start checking from this path.
+*------------------+---------------------------------------------+
+|   -move          | Move corrupted files to /lost+found
+*------------------+---------------------------------------------+
+|   -delete        | Delete corrupted files.
+*------------------+---------------------------------------------+
+|   -openforwrite  | Print out files opened for write.
+*------------------+---------------------------------------------+
+|   -files         | Print out files being checked.
+*------------------+---------------------------------------------+
+|   -blocks        | Print out block report.
+*------------------+---------------------------------------------+
+|   -locations     | Print out locations for every block.
+*------------------+---------------------------------------------+
+|   -racks         | Print out network topology for data-node locations.
+*------------------+---------------------------------------------+
+|   -showprogress  | Print out dots for progress in output. Default is OFF
+|                  | (no progress).
+*------------------+---------------------------------------------+
+
+* Administration Commands
+
+   Commands useful for administrators of a hadoop cluster.
+
+** <<<balancer>>>
+
+   Runs a cluster balancing utility. An administrator can simply press Ctrl-C
+   to stop the rebalancing process. See
+   {{{./HdfsUserGuide.html#Balancer}Balancer}} for more details.
+
+   Usage: <<<hdfs balancer [-threshold <threshold>] [-policy <policy>]>>>
+
+*------------------------+----------------------------------------------------+
+|| COMMAND_OPTION        | Description
+*------------------------+----------------------------------------------------+
+| -threshold <threshold> | Percentage of disk capacity. This overwrites the
+|                        | default threshold.
+*------------------------+----------------------------------------------------+
+| -policy <policy>       | <<<datanode>>> (default): Cluster is balanced if
+|                        | each datanode is balanced. \
+|                        | <<<blockpool>>>: Cluster is balanced if each block
+|                        | pool in each datanode is balanced.
+*------------------------+----------------------------------------------------+
+
+   Note that the <<<blockpool>>> policy is more strict than the <<<datanode>>>
+   policy.
+
+** <<<datanode>>>
+
+   Runs a HDFS datanode.
+
+   Usage: <<<hdfs datanode [-regular | -rollback | -rollingupgrace rollback]>>>
+
+*-----------------+-----------------------------------------------------------+
+|| COMMAND_OPTION || Description
+*-----------------+-----------------------------------------------------------+
+| -regular        | Normal datanode startup (default).
+*-----------------+-----------------------------------------------------------+
+| -rollback       | Rollsback the datanode to the previous version. This should
+|                 | be used after stopping the datanode and distributing the
+|                 | old hadoop version.
+*-----------------+-----------------------------------------------------------+
+| -rollingupgrade rollback | Rollsback a rolling upgrade operation.
+*-----------------+-----------------------------------------------------------+
+
+** <<<dfsadmin>>>
+
+   Runs a HDFS dfsadmin client.
+
+   Usage: <<<hdfs dfsadmin [GENERIC_OPTIONS]
+          [-report [-live] [-dead] [-decommissioning]]
+          [-safemode enter | leave | get | wait]
+          [-saveNamespace]
+          [-rollEdits]
+          [-restoreFailedStorage true|false|check]
+          [-refreshNodes]
+          [-setQuota <quota> <dirname>...<dirname>]
+          [-clrQuota <dirname>...<dirname>]
+          [-setSpaceQuota <quota> <dirname>...<dirname>]
+          [-clrSpaceQuota <dirname>...<dirname>]
+          [-finalizeUpgrade]
+          [-rollingUpgrade [<query>|<prepare>|<finalize>]]
+          [-metasave filename]
+          [-refreshServiceAcl]
+          [-refreshUserToGroupsMappings]
+          [-refreshSuperUserGroupsConfiguration]
+          [-refreshCallQueue]
+          [-refresh <host:ipc_port> <key> [arg1..argn]]
+          [-printTopology]
+          [-refreshNamenodes datanodehost:port]
+          [-deleteBlockPool datanode-host:port blockpoolId [force]]
+          [-setBalancerBandwidth <bandwidth in bytes per second>]
+          [-allowSnapshot <snapshotDir>]
+          [-disallowSnapshot <snapshotDir>]
+          [-fetchImage <local directory>]
+          [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]
+          [-getDatanodeInfo <datanode_host:ipc_port>]
+          [-help [cmd]]>>>
+
+*-----------------+-----------------------------------------------------------+
+|| COMMAND_OPTION || Description
+*-----------------+-----------------------------------------------------------+
+| -report [-live] [-dead] [-decommissioning] | Reports basic filesystem
+                  | information and statistics. Optional flags may be used to
+                  | filter the list of displayed DataNodes.
+*-----------------+-----------------------------------------------------------+
+| -safemode enter\|leave\|get\|wait | Safe mode maintenance command. Safe
+                  | mode is a Namenode state in which it \
+                  | 1. does not accept changes to the name space (read-only) \
+                  | 2. does not replicate or delete blocks. \
+                  | Safe mode is entered automatically at Namenode startup, and
+                  | leaves safe mode automatically when the configured minimum
+                  | percentage of blocks satisfies the minimum replication
+                  | condition. Safe mode can also be entered manually, but then
+                  | it can only be turned off manually as well.
+*-----------------+-----------------------------------------------------------+
+| -saveNamespace  | Save current namespace into storage directories and reset
+                  | edits log. Requires safe mode.
+*-----------------+-----------------------------------------------------------+
+| -rollEdits      | Rolls the edit log on the active NameNode.
+*-----------------+-----------------------------------------------------------+
+| -restoreFailedStorage true\|false\|check | This option will turn on/off
+                  | automatic attempt to restore failed storage replicas.
+                  | If a failed storage becomes available again the system will
+                  | attempt to restore edits and/or fsimage during checkpoint.
+                  | 'check' option will return current setting.
+*-----------------+-----------------------------------------------------------+
+| -refreshNodes   | Re-read the hosts and exclude files to update the set of
+                  | Datanodes that are allowed to connect to the Namenode and
+                  | those that should be decommissioned or recommissioned.
+*-----------------+-----------------------------------------------------------+
+| -setQuota \<quota\> \<dirname\>...\<dirname\> | See
+                  | {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -clrQuota \<dirname\>...\<dirname\> | See
+                  | {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -setSpaceQuota \<quota\> \<dirname\>...\<dirname\> | See
+                  | {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -clrSpaceQuota \<dirname\>...\<dirname\> | See
+                  | {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -finalizeUpgrade| Finalize upgrade of HDFS. Datanodes delete their previous
+                  | version working directories, followed by Namenode doing the
+                  | same. This completes the upgrade process.
+*-----------------+-----------------------------------------------------------+
+| -rollingUpgrade [\<query\>\|\<prepare\>\|\<finalize\>] | See
+                  | {{{../hadoop-hdfs/HdfsRollingUpgrade.html#dfsadmin_-rollingUpgrade}Rolling Upgrade document}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -metasave filename | Save Namenode's primary data structures to <filename> in
+                  | the directory specified by hadoop.log.dir property.
+                  | <filename> is overwritten if it exists.
+                  | <filename> will contain one line for each of the following\
+                  | 1. Datanodes heart beating with Namenode\
+                  | 2. Blocks waiting to be replicated\
+                  | 3. Blocks currrently being replicated\
+                  | 4. Blocks waiting to be deleted
+*-----------------+-----------------------------------------------------------+
+| -refreshServiceAcl | Reload the service-level authorization policy file.
+*-----------------+-----------------------------------------------------------+
+| -refreshUserToGroupsMappings | Refresh user-to-groups mappings.
+*-----------------+-----------------------------------------------------------+
+| -refreshSuperUserGroupsConfiguration |Refresh superuser proxy groups mappings
+*-----------------+-----------------------------------------------------------+
+| -refreshCallQueue | Reload the call queue from config.
+*-----------------+-----------------------------------------------------------+
+| -refresh \<host:ipc_port\> \<key\> [arg1..argn] | Triggers a runtime-refresh
+                  | of the resource specified by \<key\> on \<host:ipc_port\>.
+                  | All other args after are sent to the host.
+*-----------------+-----------------------------------------------------------+
+| -printTopology  | Print a tree of the racks and their nodes as reported by
+                  | the Namenode
+*-----------------+-----------------------------------------------------------+
+| -refreshNamenodes datanodehost:port | For the given datanode, reloads the
+                  | configuration files, stops serving the removed block-pools
+                  | and starts serving new block-pools.
+*-----------------+-----------------------------------------------------------+
+| -deleteBlockPool datanode-host:port blockpoolId [force] | If force is passed,
+                  | block pool directory for the given blockpool id on the
+                  | given datanode is deleted along with its contents,
+                  | otherwise the directory is deleted only if it is empty.
+                  | The command will fail if datanode is still serving the
+                  | block pool. Refer to refreshNamenodes to shutdown a block
+                  | pool service on a datanode.
+*-----------------+-----------------------------------------------------------+
+| -setBalancerBandwidth \<bandwidth in bytes per second\> | Changes the network
+                  | bandwidth used by each datanode during HDFS block
+                  | balancing. \<bandwidth\> is the maximum number of bytes per
+                  | second that will be used by each datanode. This value
+                  | overrides the dfs.balance.bandwidthPerSec parameter.\
+                  | NOTE: The new value is not persistent on the DataNode.
+*-----------------+-----------------------------------------------------------+
+| -allowSnapshot \<snapshotDir\> | Allowing snapshots of a directory to be
+                  | created. If the operation completes successfully, the
+                  | directory becomes snapshottable.
+*-----------------+-----------------------------------------------------------+
+| -disallowSnapshot \<snapshotDir\> | Disallowing snapshots of a directory to
+                  | be created. All snapshots of the directory must be deleted
+                  | before disallowing snapshots.
+*-----------------+-----------------------------------------------------------+
+| -fetchImage \<local directory\> | Downloads the most recent fsimage from the
+                  | NameNode and saves it in the specified local directory.
+*-----------------+-----------------------------------------------------------+
+| -shutdownDatanode \<datanode_host:ipc_port\> [upgrade] | Submit a shutdown
+                  | request for the given datanode. See
+                  | {{{./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode}Rolling Upgrade document}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -getDatanodeInfo \<datanode_host:ipc_port\> | Get the information about the
+                  | given datanode. See
+                  | {{{./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo}Rolling Upgrade document}}
+                  | for the detail.
+*-----------------+-----------------------------------------------------------+
+| -help [cmd]     | Displays help for the given command or all commands if none
+                  | is specified.
+*-----------------+-----------------------------------------------------------+
+
+** <<<namenode>>>
+
+   Runs the namenode. More info about the upgrade, rollback and finalize is at
+   {{{./HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
+
+   Usage: <<<hdfs namenode [-backup] |
+          [-checkpoint] |
+          [-format [-clusterid cid ] [-force] [-nonInteractive] ] |
+          [-upgrade [-clusterid cid] [-renameReserved<k-v pairs>] ] |
+          [-upgradeOnly [-clusterid cid] [-renameReserved<k-v pairs>] ] |
+          [-rollback] |
+          [-rollingUpgrade <downgrade|rollback> ] |
+          [-finalize] |
+          [-importCheckpoint] |
+          [-initializeSharedEdits] |
+          [-bootstrapStandby] |
+          [-recover [-force] ] |
+          [-metadataVersion ]>>>
+
+*--------------------+--------------------------------------------------------+
+|| COMMAND_OPTION    || Description
+*--------------------+--------------------------------------------------------+
+| -backup            | Start backup node.
+*--------------------+--------------------------------------------------------+
+| -checkpoint        | Start checkpoint node.
+*--------------------+--------------------------------------------------------+
+| -format [-clusterid cid] [-force] [-nonInteractive] | Formats the specified
+                     | NameNode. It starts the NameNode, formats it and then
+                     | shut it down. -force option formats if the name
+                     | directory exists. -nonInteractive option aborts if the
+                     | name directory exists, unless -force option is specified.
+*--------------------+--------------------------------------------------------+
+| -upgrade [-clusterid cid] [-renameReserved\<k-v pairs\>] | Namenode should be
+                     | started with upgrade option after
+                     | the distribution of new Hadoop version.
+*--------------------+--------------------------------------------------------+
+| -upgradeOnly [-clusterid cid] [-renameReserved\<k-v pairs\>] | Upgrade the
+                     | specified NameNode and then shutdown it.
+*--------------------+--------------------------------------------------------+
+| -rollback          | Rollsback the NameNode to the previous version. This
+                     | should be used after stopping the cluster and
+                     | distributing the old Hadoop version.
+*--------------------+--------------------------------------------------------+
+| -rollingUpgrade \<downgrade\|rollback\|started\> | See
+                     | {{{./HdfsRollingUpgrade.html#NameNode_Startup_Options}Rolling Upgrade document}}
+                     | for the detail.
+*--------------------+--------------------------------------------------------+
+| -finalize          | Finalize will remove the previous state of the files
+                     | system. Recent upgrade will become permanent.  Rollback
+                     | option will not be available anymore. After finalization
+                     | it shuts the NameNode down.
+*--------------------+--------------------------------------------------------+
+| -importCheckpoint  | Loads image from a checkpoint directory and save it
+                     | into the current one. Checkpoint dir is read from
+                     | property fs.checkpoint.dir
+*--------------------+--------------------------------------------------------+
+| -initializeSharedEdits | Format a new shared edits dir and copy in enough
+                     | edit log segments so that the standby NameNode can start
+                     | up.
+*--------------------+--------------------------------------------------------+
+| -bootstrapStandby  | Allows the standby NameNode's storage directories to be
+                     | bootstrapped by copying the latest namespace snapshot
+                     | from the active NameNode. This is used when first
+                     | configuring an HA cluster.
+*--------------------+--------------------------------------------------------+
+| -recover [-force]  | Recover lost metadata on a corrupt filesystem. See
+                     | {{{./HdfsUserGuide.html#Recovery_Mode}HDFS User Guide}}
+                     | for the detail.
+*--------------------+--------------------------------------------------------+
+| -metadataVersion   | Verify that configured directories exist, then print the
+                     | metadata versions of the software and the image.
+*--------------------+--------------------------------------------------------+
+
+** <<<secondarynamenode>>>
+
+   Runs the HDFS secondary namenode.
+   See {{{./HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
+   for more info.
+
+   Usage: <<<hdfs secondarynamenode [-checkpoint [force]] | [-format] |
+          [-geteditsize]>>>
+
+*----------------------+------------------------------------------------------+
+|| COMMAND_OPTION      || Description
+*----------------------+------------------------------------------------------+
+| -checkpoint [force]  | Checkpoints the SecondaryNameNode if EditLog size
+                       | >= fs.checkpoint.size. If <<<force>>> is used,
+                       | checkpoint irrespective of EditLog size.
+*----------------------+------------------------------------------------------+
+| -format              | Format the local storage during startup.
+*----------------------+------------------------------------------------------+
+| -geteditsize         | Prints the number of uncheckpointed transactions on
+                       | the NameNode.
+*----------------------+------------------------------------------------------+

+ 14 - 15
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm

@@ -143,8 +143,8 @@ HDFS Users Guide
 
 **  DFSAdmin Command
 
-   The <<<bin/hadoop dfsadmin>>> command supports a few HDFS administration
-   related operations. The <<<bin/hadoop dfsadmin -help>>> command lists all the
+   The <<<bin/hdfs dfsadmin>>> command supports a few HDFS administration
+   related operations. The <<<bin/hdfs dfsadmin -help>>> command lists all the
    commands currently supported. For e.g.:
 
      * <<<-report>>>: reports basic statistics of HDFS. Some of this
@@ -172,7 +172,7 @@ HDFS Users Guide
        of racks and datanodes attached to the tracks as viewed by the
        NameNode.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
+   For command usage, see {{{./HDFSCommands.html#dfsadmin}dfsadmin}}.
 
 * Secondary NameNode
 
@@ -207,7 +207,7 @@ HDFS Users Guide
    primary NameNode if necessary.
 
    For command usage,
-   see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
+   see {{{./HDFSCommands.html#secondarynamenode}secondarynamenode}}.
 
 * Checkpoint Node
 
@@ -249,7 +249,7 @@ HDFS Users Guide
    Multiple checkpoint nodes may be specified in the cluster configuration
    file.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Backup Node
 
@@ -291,7 +291,7 @@ HDFS Users Guide
 
    For a complete discussion of the motivation behind the creation of the
    Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Import Checkpoint
 
@@ -314,7 +314,7 @@ HDFS Users Guide
    verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
    but does not modify it in any way.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
+   For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
 
 * Balancer
 
@@ -341,7 +341,7 @@ HDFS Users Guide
    A brief administrator's guide for balancer is available at 
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
-   For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
+   For command usage, see {{{./HDFSCommands.html#balancer}balancer}}.
 
 * Rack Awareness
 
@@ -368,7 +368,7 @@ HDFS Users Guide
    allow any modifications to file system or blocks. Normally the NameNode
    leaves Safemode automatically after the DataNodes have reported that
    most file system blocks are available. If required, HDFS could be
-   placed in Safemode explicitly using <<<bin/hadoop dfsadmin -safemode>>>
+   placed in Safemode explicitly using <<<bin/hdfs dfsadmin -safemode>>>
    command. NameNode front page shows whether Safemode is on or off. A
    more detailed description and configuration is maintained as JavaDoc
    for <<<setSafeMode()>>>.
@@ -383,8 +383,8 @@ HDFS Users Guide
    most of the recoverable failures. By default fsck ignores open files
    but provides an option to select all files during reporting. The HDFS
    fsck command is not a Hadoop shell command. It can be run as
-   <<<bin/hadoop fsck>>>. For command usage, see 
-   {{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
+   <<<bin/hdfs fsck>>>. For command usage, see
+   {{{./HDFSCommands.html#fsck}fsck}}. fsck can be run on
    the whole file system or on a subset of files.
 
 * fetchdt
@@ -395,11 +395,11 @@ HDFS Users Guide
    Utility uses either RPC or HTTPS (over Kerberos) to get the token, and
    thus requires kerberos tickets to be present before the run (run kinit
    to get the tickets). The HDFS fetchdt command is not a Hadoop shell
-   command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you got
+   command. It can be run as <<<bin/hdfs fetchdt DTfile>>>. After you got
    the token you can run an HDFS command without having Kerberos tickets,
    by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable to the
    delegation token file. For command usage, see
-   {{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
+   {{{./HDFSCommands.html#fetchdt}fetchdt}} command.
 
 * Recovery Mode
 
@@ -533,5 +533,4 @@ HDFS Users Guide
      * Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
        brief description of most of the configuration variables available.
 
-     * {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
-       Hadoop commands usage.
+     * {{{./HDFSCommands.html}HDFS Commands Guide}}: HDFS commands usage.

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -89,7 +89,7 @@ public class TestBalancer {
   private static final Random r = new Random();
 
   static {
-    Balancer.setBlockMoveWaitTime(1000L) ;
+    Dispatcher.setBlockMoveWaitTime(1000L) ;
   }
 
   static void initConf(Configuration conf) {
@@ -305,12 +305,12 @@ public class TestBalancer {
       for (DatanodeInfo datanode : datanodeReport) {
         double nodeUtilization = ((double)datanode.getDfsUsed())
             / datanode.getCapacity();
-        if (Balancer.Util.shouldBeExcluded(p.nodesToBeExcluded, datanode)) {
+        if (Dispatcher.Util.isExcluded(p.nodesToBeExcluded, datanode)) {
           assertTrue(nodeUtilization == 0);
           actualExcludedNodeCount++;
           continue;
         }
-        if (!Balancer.Util.shouldBeIncluded(p.nodesToBeIncluded, datanode)) {
+        if (!Dispatcher.Util.isIncluded(p.nodesToBeIncluded, datanode)) {
           assertTrue(nodeUtilization == 0);
           actualExcludedNodeCount++;
           continue;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java

@@ -44,7 +44,7 @@ public class TestBalancerWithHANameNodes {
   ClientProtocol client;
 
   static {
-    Balancer.setBlockMoveWaitTime(1000L);
+    Dispatcher.setBlockMoveWaitTime(1000L);
   }
 
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

@@ -73,7 +73,7 @@ public class TestBalancerWithMultipleNameNodes {
   private static final Random RANDOM = new Random();
 
   static {
-    Balancer.setBlockMoveWaitTime(1000L) ;
+    Dispatcher.setBlockMoveWaitTime(1000L) ;
   }
 
   /** Common objects used in various methods. */

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác