浏览代码

Mergng trunk to branch-trunk-win

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-trunk-win@1420375 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 年之前
父节点
当前提交
ff3d880ea7
共有 100 个文件被更改,包括 2786 次插入926 次删除
  1. 1 0
      hadoop-common-project/hadoop-auth/pom.xml
  2. 150 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java
  3. 67 0
      hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
  4. 5 0
      hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
  5. 110 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java
  6. 26 15
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  7. 26 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  8. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
  9. 89 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  10. 17 20
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  11. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  12. 78 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
  13. 95 31
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  14. 23 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  15. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
  16. 46 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
  17. 21 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  18. 14 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  19. 7 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
  20. 18 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  21. 21 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
  22. 20 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
  23. 20 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
  24. 16 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java
  25. 41 22
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
  26. 10 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
  27. 39 19
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  28. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java
  29. 11 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
  30. 0 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java
  31. 3 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  32. 0 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java
  33. 20 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
  34. 25 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java
  35. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
  36. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
  37. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
  38. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
  39. 6 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
  40. 70 49
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  41. 6 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
  42. 51 41
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  43. 5 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
  44. 18 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
  45. 74 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
  46. 33 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  47. 94 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  48. 3 26
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
  49. 185 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
  50. 2 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
  51. 0 13
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
  52. 175 111
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  53. 26 44
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
  54. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
  55. 21 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java
  56. 42 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java
  57. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
  58. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  59. 42 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  60. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
  61. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  62. 21 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
  63. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  64. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  65. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  66. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  67. 75 192
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  68. 24 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  69. 21 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  70. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  71. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
  72. 61 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java
  73. 51 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  74. 53 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
  75. 33 21
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  76. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  77. 1 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  78. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
  79. 173 115
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  80. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  81. 18 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
  82. 25 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  83. 9 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  84. 14 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  85. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  86. 4 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
  87. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
  88. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
  89. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  90. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  91. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
  92. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
  93. 15 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
  94. 11 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
  95. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  96. 183 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
  97. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
  98. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
  99. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
  100. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java

+ 1 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -110,6 +110,7 @@
             <exclude>**/${test.exclude}.java</exclude>
             <exclude>**/${test.exclude}.java</exclude>
             <exclude>${test.exclude.pattern}</exclude>
             <exclude>${test.exclude.pattern}</exclude>
             <exclude>**/TestKerberosAuth*.java</exclude>
             <exclude>**/TestKerberosAuth*.java</exclude>
+            <exclude>**/TestAltKerberosAuth*.java</exclude>
             <exclude>**/Test*$*.java</exclude>
             <exclude>**/Test*$*.java</exclude>
           </excludes>
           </excludes>
         </configuration>
         </configuration>

+ 150 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java

@@ -0,0 +1,150 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import java.io.IOException;
+import java.util.Properties;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+
+ /**
+ * The {@link AltKerberosAuthenticationHandler} behaves exactly the same way as
+ * the {@link KerberosAuthenticationHandler}, except that it allows for an
+ * alternative form of authentication for browsers while still using Kerberos
+ * for Java access.  This is an abstract class that should be subclassed
+ * to allow a developer to implement their own custom authentication for browser
+ * access.  The alternateAuthenticate method will be called whenever a request
+ * comes from a browser.
+ * <p/>
+ */
+public abstract class AltKerberosAuthenticationHandler
+                        extends KerberosAuthenticationHandler {
+
+  /**
+   * Constant that identifies the authentication mechanism.
+   */
+  public static final String TYPE = "alt-kerberos";
+
+  /**
+   * Constant for the configuration property that indicates which user agents
+   * are not considered browsers (comma separated)
+   */
+  public static final String NON_BROWSER_USER_AGENTS =
+          TYPE + ".non-browser.user-agents";
+  private static final String NON_BROWSER_USER_AGENTS_DEFAULT =
+          "java,curl,wget,perl";
+
+  private String[] nonBrowserUserAgents;
+
+  /**
+   * Returns the authentication type of the authentication handler,
+   * 'alt-kerberos'.
+   * <p/>
+   *
+   * @return the authentication type of the authentication handler,
+   * 'alt-kerberos'.
+   */
+  @Override
+  public String getType() {
+    return TYPE;
+  }
+
+  @Override
+  public void init(Properties config) throws ServletException {
+    super.init(config);
+
+    nonBrowserUserAgents = config.getProperty(
+            NON_BROWSER_USER_AGENTS, NON_BROWSER_USER_AGENTS_DEFAULT)
+            .split("\\W*,\\W*");
+    for (int i = 0; i < nonBrowserUserAgents.length; i++) {
+        nonBrowserUserAgents[i] = nonBrowserUserAgents[i].toLowerCase();
+    }
+  }
+
+  /**
+   * It enforces the the Kerberos SPNEGO authentication sequence returning an
+   * {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has
+   * completed successfully (in the case of Java access) and only after the
+   * custom authentication implemented by the subclass in alternateAuthenticate
+   * has completed successfully (in the case of browser access).
+   * <p/>
+   *
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return an authentication token if the request is authorized or null
+   *
+   * @throws IOException thrown if an IO error occurred
+   * @throws AuthenticationException thrown if an authentication error occurred
+   */
+  @Override
+  public AuthenticationToken authenticate(HttpServletRequest request,
+      HttpServletResponse response)
+      throws IOException, AuthenticationException {
+    AuthenticationToken token;
+    if (isBrowser(request.getHeader("User-Agent"))) {
+      token = alternateAuthenticate(request, response);
+    }
+    else {
+      token = super.authenticate(request, response);
+    }
+    return token;
+  }
+
+  /**
+   * This method parses the User-Agent String and returns whether or not it
+   * refers to a browser.  If its not a browser, then Kerberos authentication
+   * will be used; if it is a browser, alternateAuthenticate from the subclass
+   * will be used.
+   * <p/>
+   * A User-Agent String is considered to be a browser if it does not contain
+   * any of the values from alt-kerberos.non-browser.user-agents; the default
+   * behavior is to consider everything a browser unless it contains one of:
+   * "java", "curl", "wget", or "perl".  Subclasses can optionally override
+   * this method to use different behavior.
+   *
+   * @param userAgent The User-Agent String, or null if there isn't one
+   * @return true if the User-Agent String refers to a browser, false if not
+   */
+  protected boolean isBrowser(String userAgent) {
+    if (userAgent == null) {
+      return false;
+    }
+    userAgent = userAgent.toLowerCase();
+    boolean isBrowser = true;
+    for (String nonBrowserUserAgent : nonBrowserUserAgents) {
+        if (userAgent.contains(nonBrowserUserAgent)) {
+            isBrowser = false;
+            break;
+        }
+    }
+    return isBrowser;
+  }
+
+  /**
+   * Subclasses should implement this method to provide the custom
+   * authentication to be used for browsers.
+   *
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   * @return an authentication token if the request is authorized, or null
+   * @throws IOException thrown if an IO error occurs
+   * @throws AuthenticationException thrown if an authentication error occurs
+   */
+  public abstract AuthenticationToken alternateAuthenticate(
+      HttpServletRequest request, HttpServletResponse response)
+      throws IOException, AuthenticationException;
+}

+ 67 - 0
hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm

@@ -176,6 +176,73 @@ Configuration
 
 
     ...
     ...
 </web-app>
 </web-app>
++---+
+
+** AltKerberos Configuration
+
+  <<IMPORTANT>>: A KDC must be configured and running.
+
+  The AltKerberos authentication mechanism is a partially implemented derivative
+  of the Kerberos SPNEGO authentication mechanism which allows a "mixed" form of
+  authentication where Kerberos SPNEGO is used by non-browsers while an
+  alternate form of authentication (to be implemented by the user) is used for
+  browsers.  To use AltKerberos as the authentication mechanism (besides
+  providing an implementation), the authentication filter must be configured
+  with the following init parameters, in addition to the previously mentioned
+  Kerberos SPNEGO ones:
+
+    * <<<[PREFIX.]type>>>: the full class name of the implementation of
+      AltKerberosAuthenticationHandler to use.
+
+    * <<<[PREFIX.]alt-kerberos.non-browser.user-agents>>>: a comma-separated
+      list of which user-agents should be considered non-browsers.
+
+  <<Example>>:
+
++---+
+<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
+    ...
+
+    <filter>
+        <filter-name>kerberosFilter</filter-name>
+        <filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
+        <init-param>
+            <param-name>type</param-name>
+            <param-value>org.my.subclass.of.AltKerberosAuthenticationHandler</param-value>
+        </init-param>
+        <init-param>
+            <param-name>alt-kerberos.non-browser.user-agents</param-name>
+            <param-value>java,curl,wget,perl</param-value>
+        </init-param>
+        <init-param>
+            <param-name>token.validity</param-name>
+            <param-value>30</param-value>
+        </init-param>
+        <init-param>
+            <param-name>cookie.domain</param-name>
+            <param-value>.foo.com</param-value>
+        </init-param>
+        <init-param>
+            <param-name>cookie.path</param-name>
+            <param-value>/</param-value>
+        </init-param>
+        <init-param>
+            <param-name>kerberos.principal</param-name>
+            <param-value>HTTP/localhost@LOCALHOST</param-value>
+        </init-param>
+        <init-param>
+            <param-name>kerberos.keytab</param-name>
+            <param-value>/tmp/auth.keytab</param-value>
+        </init-param>
+    </filter>
+
+    <filter-mapping>
+        <filter-name>kerberosFilter</filter-name>
+        <url-pattern>/kerberos/*</url-pattern>
+    </filter-mapping>
+
+    ...
+</web-app>
 +---+
 +---+
 
 
   \[ {{{./index.html}Go Back}} \]
   \[ {{{./index.html}Go Back}} \]

+ 5 - 0
hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm

@@ -24,6 +24,11 @@ Hadoop Auth, Java HTTP SPNEGO ${project.version}
   Hadoop Auth also supports additional authentication mechanisms on the client
   Hadoop Auth also supports additional authentication mechanisms on the client
   and the server side via 2 simple interfaces.
   and the server side via 2 simple interfaces.
 
 
+  Additionally, it provides a partially implemented derivative of the Kerberos
+  SPNEGO authentication to allow a "mixed" form of authentication where Kerberos
+  SPNEGO is used by non-browsers while an alternate form of authentication
+  (to be implemented by the user) is used for browsers.
+
 * License
 * License
 
 
   Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache
   Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache

+ 110 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java

@@ -0,0 +1,110 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import java.io.IOException;
+import java.util.Properties;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.mockito.Mockito;
+
+public class TestAltKerberosAuthenticationHandler
+    extends TestKerberosAuthenticationHandler {
+
+  @Override
+  protected KerberosAuthenticationHandler getNewAuthenticationHandler() {
+    // AltKerberosAuthenticationHandler is abstract; a subclass would normally
+    // perform some other authentication when alternateAuthenticate() is called.
+    // For the test, we'll just return an AuthenticationToken as the other
+    // authentication is left up to the developer of the subclass
+    return new AltKerberosAuthenticationHandler() {
+      @Override
+      public AuthenticationToken alternateAuthenticate(
+              HttpServletRequest request,
+              HttpServletResponse response)
+              throws IOException, AuthenticationException {
+        return new AuthenticationToken("A", "B", getType());
+      }
+    };
+  }
+
+  @Override
+  protected String getExpectedType() {
+    return AltKerberosAuthenticationHandler.TYPE;
+  }
+
+  public void testAlternateAuthenticationAsBrowser() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    // By default, a User-Agent without "java", "curl", "wget", or "perl" in it
+    // is considered a browser
+    Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser");
+
+    AuthenticationToken token = handler.authenticate(request, response);
+    assertEquals("A", token.getUserName());
+    assertEquals("B", token.getName());
+    assertEquals(getExpectedType(), token.getType());
+  }
+
+  public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+    if (handler != null) {
+      handler.destroy();
+      handler = null;
+    }
+    handler = getNewAuthenticationHandler();
+    Properties props = getDefaultProperties();
+    props.setProperty("alt-kerberos.non-browser.user-agents", "foo, bar");
+    try {
+      handler.init(props);
+    } catch (Exception ex) {
+      handler = null;
+      throw ex;
+    }
+
+    // Pretend we're something that will not match with "foo" (or "bar")
+    Mockito.when(request.getHeader("User-Agent")).thenReturn("blah");
+    // Should use alt authentication
+    AuthenticationToken token = handler.authenticate(request, response);
+    assertEquals("A", token.getUserName());
+    assertEquals("B", token.getName());
+    assertEquals(getExpectedType(), token.getType());
+  }
+
+  public void testNonDefaultNonBrowserUserAgentAsNonBrowser() throws Exception {
+    if (handler != null) {
+      handler.destroy();
+      handler = null;
+    }
+    handler = getNewAuthenticationHandler();
+    Properties props = getDefaultProperties();
+    props.setProperty("alt-kerberos.non-browser.user-agents", "foo, bar");
+    try {
+      handler.init(props);
+    } catch (Exception ex) {
+      handler = null;
+      throw ex;
+    }
+
+    // Run the kerberos tests again
+    testRequestWithoutAuthorization();
+    testRequestWithInvalidAuthorization();
+    testRequestWithAuthorization();
+    testRequestWithInvalidKerberosAuthorization();
+  }
+}

+ 26 - 15
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -28,23 +28,37 @@ import org.ietf.jgss.Oid;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
-import java.lang.reflect.Field;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 
 
 public class TestKerberosAuthenticationHandler extends TestCase {
 public class TestKerberosAuthenticationHandler extends TestCase {
 
 
-  private KerberosAuthenticationHandler handler;
+  protected KerberosAuthenticationHandler handler;
+
+  protected KerberosAuthenticationHandler getNewAuthenticationHandler() {
+    return new KerberosAuthenticationHandler();
+  }
+
+  protected String getExpectedType() {
+    return KerberosAuthenticationHandler.TYPE;
+  }
+
+  protected Properties getDefaultProperties() {
+    Properties props = new Properties();
+    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
+            KerberosTestUtils.getServerPrincipal());
+    props.setProperty(KerberosAuthenticationHandler.KEYTAB,
+            KerberosTestUtils.getKeytabFile());
+    props.setProperty(KerberosAuthenticationHandler.NAME_RULES,
+            "RULE:[1:$1@$0](.*@" + KerberosTestUtils.getRealm()+")s/@.*//\n");
+    return props;
+  }
 
 
   @Override
   @Override
   protected void setUp() throws Exception {
   protected void setUp() throws Exception {
     super.setUp();
     super.setUp();
-    handler = new KerberosAuthenticationHandler();
-    Properties props = new Properties();
-    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, KerberosTestUtils.getServerPrincipal());
-    props.setProperty(KerberosAuthenticationHandler.KEYTAB, KerberosTestUtils.getKeytabFile());
-    props.setProperty(KerberosAuthenticationHandler.NAME_RULES,
-                      "RULE:[1:$1@$0](.*@" + KerberosTestUtils.getRealm()+")s/@.*//\n");
+    handler = getNewAuthenticationHandler();
+    Properties props = getDefaultProperties();
     try {
     try {
       handler.init(props);
       handler.init(props);
     } catch (Exception ex) {
     } catch (Exception ex) {
@@ -71,10 +85,8 @@ public class TestKerberosAuthenticationHandler extends TestCase {
 
 
     KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT");
     KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT");
     
     
-    handler = new KerberosAuthenticationHandler();
-    Properties props = new Properties();
-    props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, KerberosTestUtils.getServerPrincipal());
-    props.setProperty(KerberosAuthenticationHandler.KEYTAB, KerberosTestUtils.getKeytabFile());
+    handler = getNewAuthenticationHandler();
+    Properties props = getDefaultProperties();
     props.setProperty(KerberosAuthenticationHandler.NAME_RULES, "RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT");
     props.setProperty(KerberosAuthenticationHandler.NAME_RULES, "RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT");
     try {
     try {
       handler.init(props);
       handler.init(props);
@@ -97,8 +109,7 @@ public class TestKerberosAuthenticationHandler extends TestCase {
   }
   }
 
 
   public void testType() throws Exception {
   public void testType() throws Exception {
-    KerberosAuthenticationHandler handler = new KerberosAuthenticationHandler();
-    assertEquals(KerberosAuthenticationHandler.TYPE, handler.getType());
+    assertEquals(getExpectedType(), handler.getType());
   }
   }
 
 
   public void testRequestWithoutAuthorization() throws Exception {
   public void testRequestWithoutAuthorization() throws Exception {
@@ -182,7 +193,7 @@ public class TestKerberosAuthenticationHandler extends TestCase {
 
 
       assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName());
       assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName());
       assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName()));
       assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName()));
-      assertEquals(KerberosAuthenticationHandler.TYPE, authToken.getType());
+      assertEquals(getExpectedType(), authToken.getType());
     } else {
     } else {
       Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
       Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
                                          Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));
                                          Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));

+ 26 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -143,6 +143,9 @@ Trunk (Unreleased)
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-8418. Update UGI Principal classes name for running with
+    IBM JDK on 64 bits Windows.  (Yu Gao via eyang)
+
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     (Devaraj K via umamahesh)
     (Devaraj K via umamahesh)
 
 
@@ -289,6 +292,12 @@ Trunk (Unreleased)
     HADOOP-9037. Bug in test-patch.sh and precommit build process (Kihwal Lee
     HADOOP-9037. Bug in test-patch.sh and precommit build process (Kihwal Lee
     via jlowe)
     via jlowe)
 
 
+    HADOOP-9121. InodeTree.java has redundant check for vName while 
+    throwing exception. (Arup Malakar via suresh)
+
+    HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
+    Windows. (Chris Nauroth via suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -306,6 +315,12 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-9020. Add a SASL PLAIN server (daryn via bobby)
     HADOOP-9020. Add a SASL PLAIN server (daryn via bobby)
 
 
+    HADOOP-9090. Support on-demand publish of metrics. (Mostafa Elhemali via
+    suresh)
+
+    HADOOP-9054. Add AuthenticationHandler that uses Kerberos but allows for 
+    an alternate form of authentication for browsers. (rkanter via tucu)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
     HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
@@ -456,6 +471,17 @@ Release 2.0.3-alpha - Unreleased
     HADOOP-8958. ViewFs:Non absolute mount name failures when running 
     HADOOP-8958. ViewFs:Non absolute mount name failures when running 
     multiple tests on Windows. (Chris Nauroth via suresh)
     multiple tests on Windows. (Chris Nauroth via suresh)
 
 
+    HADOOP-9103. UTF8 class does not properly decode Unicode characters
+    outside the basic multilingual plane. (todd)
+
+    HADOOP-9070. Kerberos SASL server cannot find kerberos key. (daryn via atm)
+
+    HADOOP-6762. Exception while doing RPC I/O closes channel
+    (Sam Rash and todd via todd)
+
+    HADOOP-9126. FormatZK and ZKFC startup can fail due to zkclient connection
+    establishment delay. (Rakesh R and todd via todd)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java

@@ -118,8 +118,7 @@ abstract class InodeTree<T> {
       return result;
       return result;
     }
     }
     
     
-    INode<T> resolveInternal(final String pathComponent)
-        throws FileNotFoundException {
+    INode<T> resolveInternal(final String pathComponent) {
       return children.get(pathComponent);
       return children.get(pathComponent);
     }
     }
     
     
@@ -336,8 +335,8 @@ abstract class InodeTree<T> {
     }
     }
     if (!gotMountTableEntry) {
     if (!gotMountTableEntry) {
       throw new IOException(
       throw new IOException(
-          "ViewFs: Cannot initialize: Empty Mount table in config for " + 
-             vName == null ? "viewfs:///" : ("viewfs://" + vName + "/"));
+          "ViewFs: Cannot initialize: Empty Mount table in config for " +
+             "viewfs://" + vName + "/");
     }
     }
   }
   }
 
 

+ 89 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.ha;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.List;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantLock;
 
 
@@ -45,6 +47,7 @@ import org.apache.zookeeper.KeeperException.Code;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 
 
 /**
 /**
  * 
  * 
@@ -205,7 +208,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
       int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
       List<ZKAuthInfo> authInfo,
       List<ZKAuthInfo> authInfo,
       ActiveStandbyElectorCallback app) throws IOException,
       ActiveStandbyElectorCallback app) throws IOException,
-      HadoopIllegalArgumentException {
+      HadoopIllegalArgumentException, KeeperException {
     if (app == null || acl == null || parentZnodeName == null
     if (app == null || acl == null || parentZnodeName == null
         || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
         || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
       throw new HadoopIllegalArgumentException("Invalid argument");
       throw new HadoopIllegalArgumentException("Invalid argument");
@@ -602,10 +605,24 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    * 
    * 
    * @return new zookeeper client instance
    * @return new zookeeper client instance
    * @throws IOException
    * @throws IOException
+   * @throws KeeperException zookeeper connectionloss exception
    */
    */
-  protected synchronized ZooKeeper getNewZooKeeper() throws IOException {
-    ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
-    zk.register(new WatcherWithClientRef(zk));
+  protected synchronized ZooKeeper getNewZooKeeper() throws IOException,
+      KeeperException {
+    
+    // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
+    // may trigger the Connected event immediately. So, if we register the
+    // watcher after constructing ZooKeeper, we may miss that event. Instead,
+    // we construct the watcher first, and have it queue any events it receives
+    // before we can set its ZooKeeper reference.
+    WatcherWithClientRef watcher = new WatcherWithClientRef();
+    ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
+    watcher.setZooKeeperRef(zk);
+
+    // Wait for the asynchronous success/failure. This may throw an exception
+    // if we don't connect within the session timeout.
+    watcher.waitForZKConnectionEvent(zkSessionTimeout);
+    
     for (ZKAuthInfo auth : zkAuthInfo) {
     for (ZKAuthInfo auth : zkAuthInfo) {
       zk.addAuthInfo(auth.getScheme(), auth.getAuth());
       zk.addAuthInfo(auth.getScheme(), auth.getAuth());
     }
     }
@@ -710,13 +727,16 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       } catch(IOException e) {
       } catch(IOException e) {
         LOG.warn(e);
         LOG.warn(e);
         sleepFor(5000);
         sleepFor(5000);
+      } catch(KeeperException e) {
+        LOG.warn(e);
+        sleepFor(5000);
       }
       }
       ++connectionRetryCount;
       ++connectionRetryCount;
     }
     }
     return success;
     return success;
   }
   }
 
 
-  private void createConnection() throws IOException {
+  private void createConnection() throws IOException, KeeperException {
     if (zkClient != null) {
     if (zkClient != null) {
       try {
       try {
         zkClient.close();
         zkClient.close();
@@ -973,14 +993,76 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
    * events.
    * events.
    */
    */
   private final class WatcherWithClientRef implements Watcher {
   private final class WatcherWithClientRef implements Watcher {
-    private final ZooKeeper zk;
+    private ZooKeeper zk;
+    
+    /**
+     * Latch fired whenever any event arrives. This is used in order
+     * to wait for the Connected event when the client is first created.
+     */
+    private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
+
+    /**
+     * If any events arrive before the reference to ZooKeeper is set,
+     * they get queued up and later forwarded when the reference is
+     * available.
+     */
+    private final List<WatchedEvent> queuedEvents = Lists.newLinkedList();
+    
+    private WatcherWithClientRef() {
+    }
 
 
     private WatcherWithClientRef(ZooKeeper zk) {
     private WatcherWithClientRef(ZooKeeper zk) {
       this.zk = zk;
       this.zk = zk;
     }
     }
+    
+    /**
+     * Waits for the next event from ZooKeeper to arrive.
+     * 
+     * @param connectionTimeoutMs zookeeper connection timeout in milliseconds
+     * @throws KeeperException if the connection attempt times out. This will
+     * be a ZooKeeper ConnectionLoss exception code.
+     * @throws IOException if interrupted while connecting to ZooKeeper
+     */
+    private void waitForZKConnectionEvent(int connectionTimeoutMs)
+        throws KeeperException, IOException {
+      try {
+        if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
+          LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
+              + connectionTimeoutMs + " milliseconds");
+          synchronized (this) {
+            zk.close();
+          }
+          throw KeeperException.create(Code.CONNECTIONLOSS);
+        }
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new IOException(
+            "Interrupted when connecting to zookeeper server", e);
+      }
+    }
+
+    private synchronized void setZooKeeperRef(ZooKeeper zk) {
+      Preconditions.checkState(this.zk == null,
+          "zk already set -- must be set exactly once");
+      this.zk = zk;
+      
+      for (WatchedEvent e : queuedEvents) {
+        forwardEvent(e);
+      }
+      queuedEvents.clear();
+    }
 
 
     @Override
     @Override
-    public void process(WatchedEvent event) {
+    public synchronized void process(WatchedEvent event) {
+      if (zk != null) {
+        forwardEvent(event);
+      } else {
+        queuedEvents.add(event);
+      }
+    }
+    
+    private void forwardEvent(WatchedEvent event) {
+      hasReceivedEvent.countDown();
       try {
       try {
         ActiveStandbyElector.this.processWatchEvent(
         ActiveStandbyElector.this.processWatchEvent(
             zk, event);
             zk, event);
@@ -1024,5 +1106,4 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
       ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
       " cb=" + appClient;
       " cb=" + appClient;
   }
   }
-
 }
 }

+ 17 - 20
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -180,7 +180,15 @@ public abstract class ZKFailoverController {
 
 
   private int doRun(String[] args)
   private int doRun(String[] args)
       throws HadoopIllegalArgumentException, IOException, InterruptedException {
       throws HadoopIllegalArgumentException, IOException, InterruptedException {
-    initZK();
+    try {
+      initZK();
+    } catch (KeeperException ke) {
+      LOG.fatal("Unable to start failover controller. Unable to connect "
+          + "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
+          + "configured value for " + ZK_QUORUM_KEY + " and ensure that "
+          + "ZooKeeper is running.");
+      return ERR_CODE_NO_ZK;
+    }
     if (args.length > 0) {
     if (args.length > 0) {
       if ("-formatZK".equals(args[0])) {
       if ("-formatZK".equals(args[0])) {
         boolean force = false;
         boolean force = false;
@@ -199,24 +207,12 @@ public abstract class ZKFailoverController {
         badArg(args[0]);
         badArg(args[0]);
       }
       }
     }
     }
-    
-    try {
-      if (!elector.parentZNodeExists()) {
-        LOG.fatal("Unable to start failover controller. " +
-            "Parent znode does not exist.\n" +
-            "Run with -formatZK flag to initialize ZooKeeper.");
-        return ERR_CODE_NO_PARENT_ZNODE;
-      }
-    } catch (IOException ioe) {
-      if (ioe.getCause() instanceof KeeperException.ConnectionLossException) {
-        LOG.fatal("Unable to start failover controller. Unable to connect " +
-            "to ZooKeeper quorum at " + zkQuorum + ". Please check the " +
-            "configured value for " + ZK_QUORUM_KEY + " and ensure that " +
-            "ZooKeeper is running.");
-        return ERR_CODE_NO_ZK;
-      } else {
-        throw ioe;
-      }
+
+    if (!elector.parentZNodeExists()) {
+      LOG.fatal("Unable to start failover controller. "
+          + "Parent znode does not exist.\n"
+          + "Run with -formatZK flag to initialize ZooKeeper.");
+      return ERR_CODE_NO_PARENT_ZNODE;
     }
     }
 
 
     try {
     try {
@@ -310,7 +306,8 @@ public abstract class ZKFailoverController {
   }
   }
 
 
 
 
-  private void initZK() throws HadoopIllegalArgumentException, IOException {
+  private void initZK() throws HadoopIllegalArgumentException, IOException,
+      KeeperException {
     zkQuorum = conf.get(ZK_QUORUM_KEY);
     zkQuorum = conf.get(ZK_QUORUM_KEY);
     int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
     int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
         ZK_SESSION_TIMEOUT_DEFAULT);
         ZK_SESSION_TIMEOUT_DEFAULT);

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -1858,10 +1858,10 @@ public class SequenceFile {
         UTF8 className = new UTF8();
         UTF8 className = new UTF8();
 
 
         className.readFields(in);
         className.readFields(in);
-        keyClassName = className.toString(); // key class name
+        keyClassName = className.toStringChecked(); // key class name
 
 
         className.readFields(in);
         className.readFields(in);
-        valClassName = className.toString(); // val class name
+        valClassName = className.toStringChecked(); // val class name
       } else {
       } else {
         keyClassName = Text.readString(in);
         keyClassName = Text.readString(in);
         valClassName = Text.readString(in);
         valClassName = Text.readString(in);

+ 78 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java

@@ -21,7 +21,9 @@ package org.apache.hadoop.io;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
+import java.io.UTFDataFormatException;
 
 
+import org.apache.hadoop.util.StringUtils;
 
 
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +33,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  * 
  * 
  * <p>Also includes utilities for efficiently reading and writing UTF-8.
  * <p>Also includes utilities for efficiently reading and writing UTF-8.
  *
  *
+ * Note that this decodes UTF-8 but actually encodes CESU-8, a variant of
+ * UTF-8: see http://en.wikipedia.org/wiki/CESU-8
+ *
  * @deprecated replaced by Text
  * @deprecated replaced by Text
  */
  */
 @Deprecated
 @Deprecated
@@ -151,6 +156,21 @@ public class UTF8 implements WritableComparable<UTF8> {
     }
     }
     return buffer.toString();
     return buffer.toString();
   }
   }
+  
+  /**
+   * Convert to a string, checking for valid UTF8.
+   * @return the converted string
+   * @throws UTFDataFormatException if the underlying bytes contain invalid
+   * UTF8 data.
+   */
+  public String toStringChecked() throws IOException {
+    StringBuilder buffer = new StringBuilder(length);
+    synchronized (IBUF) {
+      IBUF.reset(bytes, length);
+      readChars(IBUF, buffer, length);
+    }
+    return buffer.toString();
+  }
 
 
   /** Returns true iff <code>o</code> is a UTF8 with the same contents.  */
   /** Returns true iff <code>o</code> is a UTF8 with the same contents.  */
   @Override
   @Override
@@ -209,6 +229,19 @@ public class UTF8 implements WritableComparable<UTF8> {
     return result;
     return result;
   }
   }
 
 
+  /**
+   * Convert a UTF-8 encoded byte array back into a string.
+   *
+   * @throws IOException if the byte array is invalid UTF8
+   */
+  public static String fromBytes(byte[] bytes) throws IOException {
+    DataInputBuffer dbuf = new DataInputBuffer();
+    dbuf.reset(bytes, 0, bytes.length);
+    StringBuilder buf = new StringBuilder(bytes.length);
+    readChars(dbuf, buf, bytes.length);
+    return buf.toString();
+  }
+
   /** Read a UTF-8 encoded string.
   /** Read a UTF-8 encoded string.
    *
    *
    * @see DataInput#readUTF()
    * @see DataInput#readUTF()
@@ -221,7 +254,7 @@ public class UTF8 implements WritableComparable<UTF8> {
   }
   }
 
 
   private static void readChars(DataInput in, StringBuilder buffer, int nBytes)
   private static void readChars(DataInput in, StringBuilder buffer, int nBytes)
-    throws IOException {
+    throws UTFDataFormatException, IOException {
     DataOutputBuffer obuf = OBUF_FACTORY.get();
     DataOutputBuffer obuf = OBUF_FACTORY.get();
     obuf.reset();
     obuf.reset();
     obuf.write(in, nBytes);
     obuf.write(in, nBytes);
@@ -230,18 +263,60 @@ public class UTF8 implements WritableComparable<UTF8> {
     while (i < nBytes) {
     while (i < nBytes) {
       byte b = bytes[i++];
       byte b = bytes[i++];
       if ((b & 0x80) == 0) {
       if ((b & 0x80) == 0) {
+        // 0b0xxxxxxx: 1-byte sequence
         buffer.append((char)(b & 0x7F));
         buffer.append((char)(b & 0x7F));
-      } else if ((b & 0xE0) != 0xE0) {
+      } else if ((b & 0xE0) == 0xC0) {
+        if (i >= nBytes) {
+          throw new UTFDataFormatException("Truncated UTF8 at " +
+              StringUtils.byteToHexString(bytes, i - 1, 1));
+        }
+        // 0b110xxxxx: 2-byte sequence
         buffer.append((char)(((b & 0x1F) << 6)
         buffer.append((char)(((b & 0x1F) << 6)
             | (bytes[i++] & 0x3F)));
             | (bytes[i++] & 0x3F)));
-      } else {
+      } else if ((b & 0xF0) == 0xE0) {
+        // 0b1110xxxx: 3-byte sequence
+        if (i + 1 >= nBytes) {
+          throw new UTFDataFormatException("Truncated UTF8 at " +
+              StringUtils.byteToHexString(bytes, i - 1, 2));
+        }
         buffer.append((char)(((b & 0x0F) << 12)
         buffer.append((char)(((b & 0x0F) << 12)
             | ((bytes[i++] & 0x3F) << 6)
             | ((bytes[i++] & 0x3F) << 6)
             |  (bytes[i++] & 0x3F)));
             |  (bytes[i++] & 0x3F)));
+      } else if ((b & 0xF8) == 0xF0) {
+        if (i + 2 >= nBytes) {
+          throw new UTFDataFormatException("Truncated UTF8 at " +
+              StringUtils.byteToHexString(bytes, i - 1, 3));
+        }
+        // 0b11110xxx: 4-byte sequence
+        int codepoint =
+            ((b & 0x07) << 18)
+          | ((bytes[i++] & 0x3F) <<  12)
+          | ((bytes[i++] & 0x3F) <<  6)
+          | ((bytes[i++] & 0x3F));
+        buffer.append(highSurrogate(codepoint))
+              .append(lowSurrogate(codepoint));
+      } else {
+        // The UTF8 standard describes 5-byte and 6-byte sequences, but
+        // these are no longer allowed as of 2003 (see RFC 3629)
+
+        // Only show the next 6 bytes max in the error code - in case the
+        // buffer is large, this will prevent an exceedingly large message.
+        int endForError = Math.min(i + 5, nBytes);
+        throw new UTFDataFormatException("Invalid UTF8 at " +
+            StringUtils.byteToHexString(bytes, i - 1, endForError));
       }
       }
     }
     }
   }
   }
 
 
+  private static char highSurrogate(int codePoint) {
+    return (char) ((codePoint >>> 10)
+        + (Character.MIN_HIGH_SURROGATE - (Character.MIN_SUPPLEMENTARY_CODE_POINT >>> 10)));
+  }
+
+  private static char lowSurrogate(int codePoint) {
+    return (char) ((codePoint & 0x3ff) + Character.MIN_LOW_SURROGATE);
+  }
+
   /** Write a UTF-8 encoded string.
   /** Write a UTF-8 encoded string.
    *
    *
    * @see DataOutput#writeUTF(String)
    * @see DataOutput#writeUTF(String)

+ 95 - 31
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -38,6 +38,11 @@ import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Random;
 import java.util.Set;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
@@ -78,6 +83,8 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * a port and is defined by a parameter class and a value class.
  * a port and is defined by a parameter class and a value class.
@@ -103,6 +110,19 @@ public class Client {
   
   
   final static int PING_CALL_ID = -1;
   final static int PING_CALL_ID = -1;
   
   
+  /**
+   * Executor on which IPC calls' parameters are sent. Deferring
+   * the sending of parameters to a separate thread isolates them
+   * from thread interruptions in the calling code.
+   */
+  private static final ExecutorService SEND_PARAMS_EXECUTOR = 
+    Executors.newCachedThreadPool(
+        new ThreadFactoryBuilder()
+        .setDaemon(true)
+        .setNameFormat("IPC Parameter Sending Thread #%d")
+        .build());
+
+  
   /**
   /**
    * set the ping interval value in configuration
    * set the ping interval value in configuration
    * 
    * 
@@ -245,6 +265,8 @@ public class Client {
     private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
     private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
     private AtomicBoolean shouldCloseConnection = new AtomicBoolean();  // indicate if the connection is closed
     private AtomicBoolean shouldCloseConnection = new AtomicBoolean();  // indicate if the connection is closed
     private IOException closeException; // close reason
     private IOException closeException; // close reason
+    
+    private final Object sendParamsLock = new Object();
 
 
     public Connection(ConnectionId remoteId) throws IOException {
     public Connection(ConnectionId remoteId) throws IOException {
       this.remoteId = remoteId;
       this.remoteId = remoteId;
@@ -831,43 +853,76 @@ public class Client {
      * Note: this is not called from the Connection thread, but by other
      * Note: this is not called from the Connection thread, but by other
      * threads.
      * threads.
      */
      */
-    public void sendParam(Call call) {
+    public void sendParam(final Call call)
+        throws InterruptedException, IOException {
       if (shouldCloseConnection.get()) {
       if (shouldCloseConnection.get()) {
         return;
         return;
       }
       }
 
 
-      DataOutputBuffer d=null;
-      try {
-        synchronized (this.out) {
-          if (LOG.isDebugEnabled())
-            LOG.debug(getName() + " sending #" + call.id);
+      // Serialize the call to be sent. This is done from the actual
+      // caller thread, rather than the SEND_PARAMS_EXECUTOR thread,
+      // so that if the serialization throws an error, it is reported
+      // properly. This also parallelizes the serialization.
+      //
+      // Format of a call on the wire:
+      // 0) Length of rest below (1 + 2)
+      // 1) PayloadHeader  - is serialized Delimited hence contains length
+      // 2) the Payload - the RpcRequest
+      //
+      // Items '1' and '2' are prepared here. 
+      final DataOutputBuffer d = new DataOutputBuffer();
+      RpcPayloadHeaderProto header = ProtoUtil.makeRpcPayloadHeader(
+         call.rpcKind, RpcPayloadOperationProto.RPC_FINAL_PAYLOAD, call.id);
+      header.writeDelimitedTo(d);
+      call.rpcRequest.write(d);
+
+      synchronized (sendParamsLock) {
+        Future<?> senderFuture = SEND_PARAMS_EXECUTOR.submit(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              synchronized (Connection.this.out) {
+                if (shouldCloseConnection.get()) {
+                  return;
+                }
+                
+                if (LOG.isDebugEnabled())
+                  LOG.debug(getName() + " sending #" + call.id);
+         
+                byte[] data = d.getData();
+                int totalLength = d.getLength();
+                out.writeInt(totalLength); // Total Length
+                out.write(data, 0, totalLength);//PayloadHeader + RpcRequest
+                out.flush();
+              }
+            } catch (IOException e) {
+              // exception at this point would leave the connection in an
+              // unrecoverable state (eg half a call left on the wire).
+              // So, close the connection, killing any outstanding calls
+              markClosed(e);
+            } finally {
+              //the buffer is just an in-memory buffer, but it is still polite to
+              // close early
+              IOUtils.closeStream(d);
+            }
+          }
+        });
+      
+        try {
+          senderFuture.get();
+        } catch (ExecutionException e) {
+          Throwable cause = e.getCause();
           
           
-          // Serializing the data to be written.
-          // Format:
-          // 0) Length of rest below (1 + 2)
-          // 1) PayloadHeader  - is serialized Delimited hence contains length
-          // 2) the Payload - the RpcRequest
-          //
-          d = new DataOutputBuffer();
-          RpcPayloadHeaderProto header = ProtoUtil.makeRpcPayloadHeader(
-             call.rpcKind, RpcPayloadOperationProto.RPC_FINAL_PAYLOAD, call.id);
-          header.writeDelimitedTo(d);
-          call.rpcRequest.write(d);
-          byte[] data = d.getData();
-   
-          int totalLength = d.getLength();
-          out.writeInt(totalLength); // Total Length
-          out.write(data, 0, totalLength);//PayloadHeader + RpcRequest
-          out.flush();
+          // cause should only be a RuntimeException as the Runnable above
+          // catches IOException
+          if (cause instanceof RuntimeException) {
+            throw (RuntimeException) cause;
+          } else {
+            throw new RuntimeException("unexpected checked exception", cause);
+          }
         }
         }
-      } catch(IOException e) {
-        markClosed(e);
-      } finally {
-        //the buffer is just an in-memory buffer, but it is still polite to
-        // close early
-        IOUtils.closeStream(d);
       }
       }
-    }  
+    }
 
 
     /* Receive a response.
     /* Receive a response.
      * Because only one receiver, so no synchronization on in.
      * Because only one receiver, so no synchronization on in.
@@ -1138,7 +1193,16 @@ public class Client {
       ConnectionId remoteId) throws InterruptedException, IOException {
       ConnectionId remoteId) throws InterruptedException, IOException {
     Call call = new Call(rpcKind, rpcRequest);
     Call call = new Call(rpcKind, rpcRequest);
     Connection connection = getConnection(remoteId, call);
     Connection connection = getConnection(remoteId, call);
-    connection.sendParam(call);                 // send the parameter
+    try {
+      connection.sendParam(call);                 // send the parameter
+    } catch (RejectedExecutionException e) {
+      throw new IOException("connection has been closed", e);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOG.warn("interrupted waiting to send params to server", e);
+      throw new IOException(e);
+    }
+
     boolean interrupted = false;
     boolean interrupted = false;
     synchronized (call) {
     synchronized (call) {
       while (!call.done) {
       while (!call.done) {

+ 23 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -199,7 +199,8 @@ public abstract class Server {
   //     in ObjectWritable to efficiently transmit arrays of primitives
   //     in ObjectWritable to efficiently transmit arrays of primitives
   // 6 : Made RPC payload header explicit
   // 6 : Made RPC payload header explicit
   // 7 : Changed Ipc Connection Header to use Protocol buffers
   // 7 : Changed Ipc Connection Header to use Protocol buffers
-  public static final byte CURRENT_VERSION = 7;
+  // 8 : SASL server always sends a final response
+  public static final byte CURRENT_VERSION = 8;
 
 
   /**
   /**
    * Initial and max size of response buffer
    * Initial and max size of response buffer
@@ -1220,8 +1221,8 @@ public abstract class Server {
           AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
           AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
           throw e;
           throw e;
         }
         }
-        if (replyToken == null && authMethod == AuthMethod.PLAIN) {
-          // client needs at least response to know if it should use SIMPLE
+        if (saslServer.isComplete() && replyToken == null) {
+          // send final response for success
           replyToken = new byte[0];
           replyToken = new byte[0];
         }
         }
         if (replyToken != null) {
         if (replyToken != null) {
@@ -1392,7 +1393,7 @@ public abstract class Server {
     }
     }
 
 
     private AuthMethod initializeAuthContext(AuthMethod authMethod)
     private AuthMethod initializeAuthContext(AuthMethod authMethod)
-        throws IOException {
+        throws IOException, InterruptedException {
       try {
       try {
         if (enabledAuthMethods.contains(authMethod)) {
         if (enabledAuthMethods.contains(authMethod)) {
           saslServer = createSaslServer(authMethod);
           saslServer = createSaslServer(authMethod);
@@ -1425,8 +1426,7 @@ public abstract class Server {
     }
     }
 
 
     private SaslServer createSaslServer(AuthMethod authMethod)
     private SaslServer createSaslServer(AuthMethod authMethod)
-        throws IOException {
-      SaslServer saslServer = null;
+        throws IOException, InterruptedException {
       String hostname = null;
       String hostname = null;
       String saslProtocol = null;
       String saslProtocol = null;
       CallbackHandler saslCallback = null;
       CallbackHandler saslCallback = null;
@@ -1462,10 +1462,23 @@ public abstract class Server {
               "Server does not support SASL " + authMethod);
               "Server does not support SASL " + authMethod);
       }
       }
       
       
-      String mechanism = authMethod.getMechanismName();
-      saslServer = Sasl.createSaslServer(
-          mechanism, saslProtocol, hostname,
-          SaslRpcServer.SASL_PROPS, saslCallback);
+      return createSaslServer(authMethod.getMechanismName(), saslProtocol,
+                              hostname, saslCallback);                                    
+    }
+
+    private SaslServer createSaslServer(final String mechanism,
+                                        final String protocol,
+                                        final String hostname,
+                                        final CallbackHandler callback
+        ) throws IOException, InterruptedException {
+      SaslServer saslServer = UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<SaslServer>() {
+            @Override
+            public SaslServer run() throws SaslException  {
+              return Sasl.createSaslServer(mechanism, protocol, hostname,
+                                           SaslRpcServer.SASL_PROPS, callback);
+            }
+          });
       if (saslServer == null) {
       if (saslServer == null) {
         throw new AccessControlException(
         throw new AccessControlException(
             "Unable to find SASL server implementation for " + mechanism);
             "Unable to find SASL server implementation for " + mechanism);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java

@@ -90,6 +90,17 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
    */
    */
   public abstract void register(Callback callback);
   public abstract void register(Callback callback);
 
 
+  /**
+   * Requests an immediate publish of all metrics from sources to sinks.
+   * 
+   * This is a "soft" request: the expectation is that a best effort will be
+   * done to synchronously snapshot the metrics from all the sources and put
+   * them in all the sinks (including flushing the sinks) before returning to
+   * the caller. If this can't be accomplished in reasonable time it's OK to
+   * return to the caller before everything is done. 
+   */
+  public abstract void publishMetricsNow();
+
   /**
   /**
    * Shutdown the metrics system completely (usually during server shutdown.)
    * Shutdown the metrics system completely (usually during server shutdown.)
    * The MetricsSystemMXBean will be unregistered.
    * The MetricsSystemMXBean will be unregistered.

+ 46 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.metrics2.impl;
 package org.apache.hadoop.metrics2.impl;
 
 
 import java.util.Random;
 import java.util.Random;
+import java.util.concurrent.*;
 
 
 import static com.google.common.base.Preconditions.*;
 import static com.google.common.base.Preconditions.*;
 
 
@@ -48,6 +49,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
   private volatile boolean stopping = false;
   private volatile boolean stopping = false;
   private volatile boolean inError = false;
   private volatile boolean inError = false;
   private final int period, firstRetryDelay, retryCount;
   private final int period, firstRetryDelay, retryCount;
+  private final long oobPutTimeout;
   private final float retryBackoff;
   private final float retryBackoff;
   private final MetricsRegistry registry = new MetricsRegistry("sinkadapter");
   private final MetricsRegistry registry = new MetricsRegistry("sinkadapter");
   private final MutableStat latency;
   private final MutableStat latency;
@@ -69,6 +71,8 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
     this.period = checkArg(period, period > 0, "period");
     this.period = checkArg(period, period > 0, "period");
     firstRetryDelay = checkArg(retryDelay, retryDelay > 0, "retry delay");
     firstRetryDelay = checkArg(retryDelay, retryDelay > 0, "retry delay");
     this.retryBackoff = checkArg(retryBackoff, retryBackoff>1, "retry backoff");
     this.retryBackoff = checkArg(retryBackoff, retryBackoff>1, "retry backoff");
+    oobPutTimeout = (long)
+        (firstRetryDelay * Math.pow(retryBackoff, retryCount) * 1000);
     this.retryCount = retryCount;
     this.retryCount = retryCount;
     this.queue = new SinkQueue<MetricsBuffer>(checkArg(queueCapacity,
     this.queue = new SinkQueue<MetricsBuffer>(checkArg(queueCapacity,
         queueCapacity > 0, "queue capacity"));
         queueCapacity > 0, "queue capacity"));
@@ -95,6 +99,23 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
     }
     }
     return true; // OK
     return true; // OK
   }
   }
+  
+  public boolean putMetricsImmediate(MetricsBuffer buffer) {
+    WaitableMetricsBuffer waitableBuffer =
+        new WaitableMetricsBuffer(buffer);
+    if (!queue.enqueue(waitableBuffer)) {
+      LOG.warn(name + " has a full queue and can't consume the given metrics.");
+      dropped.incr();
+      return false;
+    }
+    if (!waitableBuffer.waitTillNotified(oobPutTimeout)) {
+      LOG.warn(name +
+          " couldn't fulfill an immediate putMetrics request in time." +
+          " Abandoning.");
+      return false;
+    }
+    return true;
+  }
 
 
   void publishMetricsFromQueue() {
   void publishMetricsFromQueue() {
     int retryDelay = firstRetryDelay;
     int retryDelay = firstRetryDelay;
@@ -158,6 +179,9 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
       sink.flush();
       sink.flush();
       latency.add(Time.now() - ts);
       latency.add(Time.now() - ts);
     }
     }
+    if (buffer instanceof WaitableMetricsBuffer) {
+      ((WaitableMetricsBuffer)buffer).notifyAnyWaiters();
+    }
     LOG.debug("Done");
     LOG.debug("Done");
   }
   }
 
 
@@ -191,4 +215,26 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
   MetricsSink sink() {
   MetricsSink sink() {
     return sink;
     return sink;
   }
   }
+
+  static class WaitableMetricsBuffer extends MetricsBuffer {
+    private final Semaphore notificationSemaphore =
+        new Semaphore(0);
+
+    public WaitableMetricsBuffer(MetricsBuffer metricsBuffer) {
+      super(metricsBuffer);
+    }
+
+    public boolean waitTillNotified(long millisecondsToWait) {
+      try {
+        return notificationSemaphore.tryAcquire(millisecondsToWait,
+            TimeUnit.MILLISECONDS);
+      } catch (InterruptedException e) {
+        return false;
+      }
+    }
+
+    public void notifyAnyWaiters() {
+      notificationSemaphore.release();
+    }
+  }
 }
 }

+ 21 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -344,9 +344,19 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   synchronized void onTimerEvent() {
   synchronized void onTimerEvent() {
     logicalTime += period;
     logicalTime += period;
     if (sinks.size() > 0) {
     if (sinks.size() > 0) {
-      publishMetrics(sampleMetrics());
+      publishMetrics(sampleMetrics(), false);
     }
     }
   }
   }
+  
+  /**
+   * Requests an immediate publish of all metrics from sources to sinks.
+   */
+  @Override
+  public void publishMetricsNow() {
+    if (sinks.size() > 0) {
+      publishMetrics(sampleMetrics(), true);
+    }    
+  }
 
 
   /**
   /**
    * Sample all the sources for a snapshot of metrics/tags
    * Sample all the sources for a snapshot of metrics/tags
@@ -380,12 +390,20 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   /**
   /**
    * Publish a metrics snapshot to all the sinks
    * Publish a metrics snapshot to all the sinks
    * @param buffer  the metrics snapshot to publish
    * @param buffer  the metrics snapshot to publish
+   * @param immediate  indicates that we should publish metrics immediately
+   *                   instead of using a separate thread.
    */
    */
-  synchronized void publishMetrics(MetricsBuffer buffer) {
+  synchronized void publishMetrics(MetricsBuffer buffer, boolean immediate) {
     int dropped = 0;
     int dropped = 0;
     for (MetricsSinkAdapter sa : sinks.values()) {
     for (MetricsSinkAdapter sa : sinks.values()) {
       long startTime = Time.now();
       long startTime = Time.now();
-      dropped += sa.putMetrics(buffer, logicalTime) ? 0 : 1;
+      boolean result;
+      if (immediate) {
+        result = sa.putMetricsImmediate(buffer); 
+      } else {
+        result = sa.putMetrics(buffer, logicalTime);
+      }
+      dropped += result ? 0 : 1;
       publishStat.add(Time.now() - startTime);
       publishStat.add(Time.now() - startTime);
     }
     }
     droppedPubAll.incr(dropped);
     droppedPubAll.incr(dropped);

+ 14 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -299,13 +299,17 @@ public class UserGroupInformation {
   
   
   private static String OS_LOGIN_MODULE_NAME;
   private static String OS_LOGIN_MODULE_NAME;
   private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
   private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
-  private static final boolean windows = 
-                           System.getProperty("os.name").startsWith("Windows");
+  private static final boolean windows =
+      System.getProperty("os.name").startsWith("Windows");
+  private static final boolean is64Bit =
+      System.getProperty("os.arch").contains("64");
   /* Return the OS login module class name */
   /* Return the OS login module class name */
   private static String getOSLoginModuleName() {
   private static String getOSLoginModuleName() {
     if (System.getProperty("java.vendor").contains("IBM")) {
     if (System.getProperty("java.vendor").contains("IBM")) {
-      return windows ? "com.ibm.security.auth.module.NTLoginModule"
-       : "com.ibm.security.auth.module.LinuxLoginModule";
+      return windows ? (is64Bit
+          ? "com.ibm.security.auth.module.Win64LoginModule"
+          : "com.ibm.security.auth.module.NTLoginModule")
+        : "com.ibm.security.auth.module.LinuxLoginModule";
     } else {
     } else {
       return windows ? "com.sun.security.auth.module.NTLoginModule"
       return windows ? "com.sun.security.auth.module.NTLoginModule"
         : "com.sun.security.auth.module.UnixLoginModule";
         : "com.sun.security.auth.module.UnixLoginModule";
@@ -319,13 +323,13 @@ public class UserGroupInformation {
     try {
     try {
       if (System.getProperty("java.vendor").contains("IBM")) {
       if (System.getProperty("java.vendor").contains("IBM")) {
         if (windows) {
         if (windows) {
-          return (Class<? extends Principal>)
-            cl.loadClass("com.ibm.security.auth.UsernamePrincipal");
+          return (Class<? extends Principal>) (is64Bit
+            ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
+            : cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
         } else {
         } else {
-          return (Class<? extends Principal>)
-            (System.getProperty("os.arch").contains("64")
-             ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
-             : cl.loadClass("com.ibm.security.auth.LinuxPrincipal"));
+          return (Class<? extends Principal>) (is64Bit
+            ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
+            : cl.loadClass("com.ibm.security.auth.LinuxPrincipal"));
         }
         }
       } else {
       } else {
         return (Class<? extends Principal>) (windows
         return (Class<? extends Principal>) (windows

+ 7 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java

@@ -41,6 +41,9 @@ public abstract class FCStatisticsBaseTest {
   
   
   //fc should be set appropriately by the deriving test.
   //fc should be set appropriately by the deriving test.
   protected static FileContext fc = null;
   protected static FileContext fc = null;
+
+  private final FileContextTestHelper fileContextTestHelper =
+    new FileContextTestHelper();
   
   
   @Test
   @Test
   public void testStatistics() throws IOException, URISyntaxException {
   public void testStatistics() throws IOException, URISyntaxException {
@@ -97,4 +100,8 @@ public abstract class FCStatisticsBaseTest {
     }
     }
     return URI.create(SchemeAuthString);
     return URI.create(SchemeAuthString);
   }
   }
+
+  protected Path getTestRootPath(FileContext fc, String pathString){
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 18 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -65,6 +65,8 @@ public abstract class FSMainOperationsBaseTest  {
   
   
   
   
   protected static FileSystem fSys;
   protected static FileSystem fSys;
+
+  private final FileSystemTestHelper fileSystemTestHelper;
   
   
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {
     @Override
     @Override
@@ -73,6 +75,14 @@ public abstract class FSMainOperationsBaseTest  {
     }
     }
   };
   };
 
 
+  public FSMainOperationsBaseTest() {
+    this(new FileSystemTestHelper());
+  }
+
+  public FSMainOperationsBaseTest(FileSystemTestHelper fileSystemTestHelper) {
+    this.fileSystemTestHelper = fileSystemTestHelper;
+  }
+
   //A test filter with returns any path containing a "b" 
   //A test filter with returns any path containing a "b" 
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
   final private static PathFilter TEST_X_FILTER = new PathFilter() {
     @Override
     @Override
@@ -1134,4 +1144,12 @@ public abstract class FSMainOperationsBaseTest  {
       }
       }
     return false;
     return false;
  }
  }
+  
+  protected Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
+    return fileSystemTestHelper.getAbsoluteTestRootPath(fSys);
+  }
+
+  protected Path getTestRootPath(FileSystem fSys, String pathString) {
+    return fileSystemTestHelper.getTestRootPath(fSys, pathString);
+  }
 }
 }

+ 21 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java

@@ -52,6 +52,8 @@ import org.apache.commons.logging.impl.Log4JLogger;
 public abstract class FileContextCreateMkdirBaseTest {
 public abstract class FileContextCreateMkdirBaseTest {
    
    
   protected static FileContext fc;
   protected static FileContext fc;
+
+  private final FileContextTestHelper fileContextTestHelper;
       
       
   {
   {
     try {
     try {
@@ -63,6 +65,15 @@ public abstract class FileContextCreateMkdirBaseTest {
     }
     }
   }
   }
   
   
+  public FileContextCreateMkdirBaseTest() {
+    this(new FileContextTestHelper());
+  }
+
+  public FileContextCreateMkdirBaseTest(
+      FileContextTestHelper fileContextTestHelper) {
+
+    this.fileContextTestHelper = fileContextTestHelper;
+  }
 
 
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
@@ -127,7 +138,8 @@ public abstract class FileContextCreateMkdirBaseTest {
   @Test
   @Test
   public void testCreateNonRecursiveWithNonExistingDir() {
   public void testCreateNonRecursiveWithNonExistingDir() {
     try {
     try {
-      createFileNonRecursive(fc, getTestRootPath(fc, "NonExisting/foo"));
+      fileContextTestHelper.createFileNonRecursive(fc,
+        getTestRootPath(fc, "NonExisting/foo"));
       Assert.fail("Create with non existing parent dir should have failed");
       Assert.fail("Create with non existing parent dir should have failed");
     } catch (IOException e) {
     } catch (IOException e) {
       // As expected
       // As expected
@@ -149,4 +161,12 @@ public abstract class FileContextCreateMkdirBaseTest {
     createFile(fc, f);
     createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
     Assert.assertTrue(isFile(fc, f));
   }
   }
+
+  private Path getTestRootPath(FileContext fc) {
+    return fileContextTestHelper.getTestRootPath(fc);
+  }
+
+  private Path getTestRootPath(FileContext fc, String pathString) {
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 20 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java

@@ -73,6 +73,8 @@ public abstract class FileContextMainOperationsBaseTest  {
   }
   }
   
   
   protected static FileContext fc;
   protected static FileContext fc;
+
+  private final FileContextTestHelper fileContextTestHelper;
   
   
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {
     @Override
     @Override
@@ -95,6 +97,16 @@ public abstract class FileContextMainOperationsBaseTest  {
   private static byte[] data = getFileData(numBlocks,
   private static byte[] data = getFileData(numBlocks,
       getDefaultBlockSize());
       getDefaultBlockSize());
   
   
+  public FileContextMainOperationsBaseTest() {
+    this(new FileContextTestHelper());
+  }
+  
+  public FileContextMainOperationsBaseTest(
+      FileContextTestHelper fileContextTestHelper) {
+
+    this.fileContextTestHelper = fileContextTestHelper;
+  }
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
     fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
@@ -1190,4 +1202,12 @@ public abstract class FileContextMainOperationsBaseTest  {
       }
       }
     return false;
     return false;
  }
  }
+
+  protected Path getAbsoluteTestRootPath(FileContext fc) throws IOException {
+    return fileContextTestHelper.getAbsoluteTestRootPath(fc);
+  }
+
+  protected Path getTestRootPath(FileContext fc, String pathString) {
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 20 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java

@@ -70,6 +70,16 @@ public abstract class FileContextPermissionBase {
   
   
   protected static FileContext fc;
   protected static FileContext fc;
 
 
+  private final FileContextTestHelper fileContextTestHelper;
+
+  public FileContextPermissionBase() {
+    this(new FileContextTestHelper());
+  }
+
+  public FileContextPermissionBase(FileContextTestHelper fileContextTestHelper) {
+    this.fileContextTestHelper = fileContextTestHelper;
+  }
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     fc.mkdir(getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
     fc.mkdir(getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
@@ -94,7 +104,7 @@ public abstract class FileContextPermissionBase {
     }
     }
     String filename = "foo";
     String filename = "foo";
     Path f = getTestRootPath(fc, filename);
     Path f = getTestRootPath(fc, filename);
-    createFile(fc, filename);
+    fileContextTestHelper.createFile(fc, filename);
     doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()),
     doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()),
                         fc.getFileStatus(f).getPermission());
                         fc.getFileStatus(f).getPermission());
   }
   }
@@ -109,7 +119,7 @@ public abstract class FileContextPermissionBase {
 
 
     String filename = "foo";
     String filename = "foo";
     Path f = getTestRootPath(fc, filename);
     Path f = getTestRootPath(fc, filename);
-    createFile(fc, f);
+    fileContextTestHelper.createFile(fc, f);
 
 
     try {
     try {
       // create files and manipulate them.
       // create files and manipulate them.
@@ -211,4 +221,12 @@ public abstract class FileContextPermissionBase {
   FsPermission getFileMask() {
   FsPermission getFileMask() {
     return FILE_MASK_ZERO;
     return FILE_MASK_ZERO;
   }
   }
+
+  private Path getTestRootPath(FileContext fc) {
+    return fileContextTestHelper.getTestRootPath(fc);
+  }
+
+  private Path getTestRootPath(FileContext fc, String pathString) {
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 16 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java

@@ -45,6 +45,8 @@ public abstract class FileContextSymlinkBaseTest {
  
  
   protected static FileContext fc;
   protected static FileContext fc;
 
 
+  private final FileContextTestHelper fileContextTestHelper;
+
   abstract protected String getScheme();
   abstract protected String getScheme();
   abstract protected String testBaseDir1() throws IOException;
   abstract protected String testBaseDir1() throws IOException;
   abstract protected String testBaseDir2() throws IOException;
   abstract protected String testBaseDir2() throws IOException;
@@ -79,6 +81,16 @@ public abstract class FileContextSymlinkBaseTest {
         CreateOpts.blockSize(blockSize));
         CreateOpts.blockSize(blockSize));
   }
   }
 
 
+  public FileContextSymlinkBaseTest() {
+    this(new FileContextTestHelper());
+  }
+
+  public FileContextSymlinkBaseTest(
+      FileContextTestHelper fileContextTestHelper) {
+
+    this.fileContextTestHelper = fileContextTestHelper;
+  }
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     fc.mkdir(new Path(testBaseDir1()), FileContext.DEFAULT_PERM, true);
     fc.mkdir(new Path(testBaseDir1()), FileContext.DEFAULT_PERM, true);
@@ -1353,4 +1365,8 @@ public abstract class FileContextSymlinkBaseTest {
       assertEquals(2, fc.getFileStatus(file).getModificationTime());
       assertEquals(2, fc.getFileStatus(file).getModificationTime());
     }
     }
   }
   }
+
+  protected String getAbsoluteTestRootDir(FileContext fc) throws IOException {
+    return fileContextTestHelper.getAbsoluteTestRootDir(fc);
+  }
 }
 }

+ 41 - 22
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java

@@ -25,6 +25,7 @@ import java.util.EnumSet;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
 import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
 import org.junit.Assert;
 
 
 /**
 /**
@@ -32,15 +33,22 @@ import org.junit.Assert;
  */
  */
 public final class FileContextTestHelper {
 public final class FileContextTestHelper {
   // The test root is relative to the <wd>/build/test/data by default
   // The test root is relative to the <wd>/build/test/data by default
-  public static final String TEST_ROOT_DIR = 
+  public static String TEST_ROOT_DIR = 
     System.getProperty("test.build.data", "build/test/data") + "/test";
     System.getProperty("test.build.data", "build/test/data") + "/test";
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   private static final int DEFAULT_NUM_BLOCKS = 2;
   private static final int DEFAULT_NUM_BLOCKS = 2;
   private static String absTestRootDir = null;
   private static String absTestRootDir = null;
 
 
-  /** Hidden constructor */
-  private FileContextTestHelper() {}
+  private final boolean stripDriveSpec;
+
+  public FileContextTestHelper() {
+    this(false);
+  }
   
   
+  public FileContextTestHelper(boolean stripDriveSpec) {
+    this.stripDriveSpec = stripDriveSpec;
+  }
+
   public static int getDefaultBlockSize() {
   public static int getDefaultBlockSize() {
     return DEFAULT_BLOCK_SIZE;
     return DEFAULT_BLOCK_SIZE;
   }
   }
@@ -52,38 +60,40 @@ public final class FileContextTestHelper {
     }
     }
     return data;
     return data;
   }
   }
-  
-  public static Path getTestRootPath(FileContext fc) {
-    return fc.makeQualified(new Path(TEST_ROOT_DIR));
+
+  public Path getTestRootPath(FileContext fc) {
+    return fc.makeQualified(new Path(stripDriveSpec(TEST_ROOT_DIR,
+      stripDriveSpec)));
   }
   }
 
 
-  public static Path getTestRootPath(FileContext fc, String pathString) {
-    return fc.makeQualified(new Path(TEST_ROOT_DIR, pathString));
+  public Path getTestRootPath(FileContext fc, String pathString) {
+    return fc.makeQualified(new Path(
+      stripDriveSpec(TEST_ROOT_DIR, stripDriveSpec), pathString));
   }
   }
   
   
   
   
   // the getAbsolutexxx method is needed because the root test dir
   // the getAbsolutexxx method is needed because the root test dir
   // can be messed up by changing the working dir.
   // can be messed up by changing the working dir.
 
 
-  public static String getAbsoluteTestRootDir(FileContext fc)
+  public String getAbsoluteTestRootDir(FileContext fc)
       throws IOException {
       throws IOException {
     if (absTestRootDir == null) {
     if (absTestRootDir == null) {
-      if (new Path(TEST_ROOT_DIR).isAbsolute()) {
-        absTestRootDir = TEST_ROOT_DIR;
+      String testRootDir = stripDriveSpec(TEST_ROOT_DIR, stripDriveSpec);
+      if (new Path(testRootDir).isAbsolute()) {
+        absTestRootDir = testRootDir;
       } else {
       } else {
         absTestRootDir = fc.getWorkingDirectory().toString() + "/"
         absTestRootDir = fc.getWorkingDirectory().toString() + "/"
-            + TEST_ROOT_DIR;
+            + testRootDir;
       }
       }
     }
     }
     return absTestRootDir;
     return absTestRootDir;
   }
   }
   
   
-  public static Path getAbsoluteTestRootPath(FileContext fc) throws IOException {
+  public Path getAbsoluteTestRootPath(FileContext fc) throws IOException {
     return fc.makeQualified(new Path(getAbsoluteTestRootDir(fc)));
     return fc.makeQualified(new Path(getAbsoluteTestRootDir(fc)));
   }
   }
 
 
-  public static Path getDefaultWorkingDirectory(FileContext fc)
-      throws IOException {
+  public Path getDefaultWorkingDirectory(FileContext fc) throws IOException {
     return getTestRootPath(fc, "/user/" + System.getProperty("user.name"))
     return getTestRootPath(fc, "/user/" + System.getProperty("user.name"))
         .makeQualified(fc.getDefaultFileSystem().getUri(),
         .makeQualified(fc.getDefaultFileSystem().getUri(),
             fc.getWorkingDirectory());
             fc.getWorkingDirectory());
@@ -116,12 +126,12 @@ public final class FileContextTestHelper {
     return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
     return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
   }
   }
 
 
-  public static long createFile(FileContext fc, String name) throws IOException {
+  public long createFile(FileContext fc, String name) throws IOException {
     Path path = getTestRootPath(fc, name);
     Path path = getTestRootPath(fc, name);
     return createFile(fc, path);
     return createFile(fc, path);
   }
   }
   
   
-  public static long createFileNonRecursive(FileContext fc, String name)
+  public long createFileNonRecursive(FileContext fc, String name)
   throws IOException {
   throws IOException {
     Path path = getTestRootPath(fc, name);
     Path path = getTestRootPath(fc, name);
     return createFileNonRecursive(fc, path);
     return createFileNonRecursive(fc, path);
@@ -190,14 +200,12 @@ public final class FileContextTestHelper {
     return buffer;
     return buffer;
   }
   }
 
 
-  public static FileStatus containsPath(FileContext fc, Path path,
-      FileStatus[] dirList)
+  public FileStatus containsPath(FileContext fc, Path path, FileStatus[] dirList)
     throws IOException {
     throws IOException {
     return containsPath(getTestRootPath(fc, path.toString()), dirList);
     return containsPath(getTestRootPath(fc, path.toString()), dirList);
   }
   }
   
   
-  public static FileStatus containsPath(Path path,
-      FileStatus[] dirList)
+  public FileStatus containsPath(Path path, FileStatus[] dirList)
     throws IOException {
     throws IOException {
     for(int i = 0; i < dirList.length; i ++) { 
     for(int i = 0; i < dirList.length; i ++) { 
       if (path.equals(dirList[i].getPath()))
       if (path.equals(dirList[i].getPath()))
@@ -206,7 +214,7 @@ public final class FileContextTestHelper {
     return null;
     return null;
   }
   }
   
   
-  public static FileStatus containsPath(FileContext fc, String path,
+  public FileStatus containsPath(FileContext fc, String path,
       FileStatus[] dirList)
       FileStatus[] dirList)
      throws IOException {
      throws IOException {
     return containsPath(fc, new Path(path), dirList);
     return containsPath(fc, new Path(path), dirList);
@@ -241,4 +249,15 @@ public final class FileContextTestHelper {
     }
     }
     Assert.assertEquals(aFc.makeQualified(new Path(path)), s.getPath());
     Assert.assertEquals(aFc.makeQualified(new Path(path)), s.getPath());
   }
   }
+
+  private static String stripDriveSpec(String pathString, boolean strip) {
+    if (strip && Shell.WINDOWS && pathString.length() >= 2 &&
+        Character.toUpperCase(pathString.charAt(0)) >= 'A' &&
+        Character.toUpperCase(pathString.charAt(0)) <= 'Z' &&
+        pathString.charAt(1) == ':') {
+
+      return pathString.substring(2);
+    }
+    return pathString;
+  }
 }
 }

+ 10 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
-import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath;
 import static org.apache.hadoop.fs.FileContextTestHelper.readFile;
 import static org.apache.hadoop.fs.FileContextTestHelper.readFile;
 import static org.apache.hadoop.fs.FileContextTestHelper.writeFile;
 import static org.apache.hadoop.fs.FileContextTestHelper.writeFile;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
@@ -45,6 +44,8 @@ import org.junit.Test;
  */
  */
 public abstract class FileContextUtilBase {
 public abstract class FileContextUtilBase {
   protected FileContext fc;
   protected FileContext fc;
+  private final FileContextTestHelper fileContextTestHelper =
+    new FileContextTestHelper();
   
   
   {
   {
     try {
     try {
@@ -105,4 +106,12 @@ public abstract class FileContextUtilBase {
     assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
     assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
         readFile(fc,file2,ts.getBytes().length)));
         readFile(fc,file2,ts.getBytes().length)));
   }
   }
+
+  private Path getTestRootPath(FileContext fc) {
+    return fileContextTestHelper.getTestRootPath(fc);
+  }
+
+  private Path getTestRootPath(FileContext fc, String pathString) {
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 39 - 19
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -25,6 +25,7 @@ import java.util.Random;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
 import org.junit.Assert;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
@@ -34,15 +35,23 @@ import static org.mockito.Mockito.mock;
  */
  */
 public final class FileSystemTestHelper {
 public final class FileSystemTestHelper {
   // The test root is relative to the <wd>/build/test/data by default
   // The test root is relative to the <wd>/build/test/data by default
-  public static final String TEST_ROOT_DIR = 
+  public static String TEST_ROOT_DIR = 
     System.getProperty("test.build.data", "target/test/data") + "/test";
     System.getProperty("test.build.data", "target/test/data") + "/test";
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   private static final int DEFAULT_NUM_BLOCKS = 2;
   private static final int DEFAULT_NUM_BLOCKS = 2;
   private static final short DEFAULT_NUM_REPL = 1;
   private static final short DEFAULT_NUM_REPL = 1;
   private static String absTestRootDir = null;
   private static String absTestRootDir = null;
 
 
+  private final boolean stripDriveSpec;
+
   /** Hidden constructor */
   /** Hidden constructor */
-  private FileSystemTestHelper() {}
+  public FileSystemTestHelper() {
+    this(false);
+  }
+
+  public FileSystemTestHelper(boolean stripDriveSpec) {
+    this.stripDriveSpec = stripDriveSpec;
+  }
   
   
   public static void addFileSystemForTesting(URI uri, Configuration conf,
   public static void addFileSystemForTesting(URI uri, Configuration conf,
       FileSystem fs) throws IOException {
       FileSystem fs) throws IOException {
@@ -65,15 +74,17 @@ public final class FileSystemTestHelper {
   /*
   /*
    * get testRootPath qualified for fSys
    * get testRootPath qualified for fSys
    */
    */
-  public static Path getTestRootPath(FileSystem fSys) {
-    return fSys.makeQualified(new Path(TEST_ROOT_DIR));
+  public Path getTestRootPath(FileSystem fSys) {
+    return fSys.makeQualified(new Path(stripDriveSpec(TEST_ROOT_DIR,
+      this.stripDriveSpec)));
   }
   }
 
 
   /*
   /*
    * get testRootPath + pathString qualified for fSys
    * get testRootPath + pathString qualified for fSys
    */
    */
-  public static Path getTestRootPath(FileSystem fSys, String pathString) {
-    return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
+  public Path getTestRootPath(FileSystem fSys, String pathString) {
+    return fSys.makeQualified(new Path(
+      stripDriveSpec(TEST_ROOT_DIR, this.stripDriveSpec), pathString));
   }
   }
   
   
   
   
@@ -82,26 +93,25 @@ public final class FileSystemTestHelper {
   // is often relative to the working directory of process
   // is often relative to the working directory of process
   // running the unit tests.
   // running the unit tests.
 
 
-  static String getAbsoluteTestRootDir(FileSystem fSys)
-      throws IOException {
+  String getAbsoluteTestRootDir(FileSystem fSys) throws IOException {
     // NOTE: can't cache because of different filesystems!
     // NOTE: can't cache because of different filesystems!
     //if (absTestRootDir == null) 
     //if (absTestRootDir == null) 
-      if (new Path(TEST_ROOT_DIR).isAbsolute()) {
-        absTestRootDir = TEST_ROOT_DIR;
+      String testRootDir = stripDriveSpec(TEST_ROOT_DIR, this.stripDriveSpec);
+      if (new Path(testRootDir).isAbsolute()) {
+        absTestRootDir = testRootDir;
       } else {
       } else {
         absTestRootDir = fSys.getWorkingDirectory().toString() + "/"
         absTestRootDir = fSys.getWorkingDirectory().toString() + "/"
-            + TEST_ROOT_DIR;
+            + testRootDir;
       }
       }
     //}
     //}
     return absTestRootDir;
     return absTestRootDir;
   }
   }
   
   
-  public static Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
+  public Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
     return fSys.makeQualified(new Path(getAbsoluteTestRootDir(fSys)));
     return fSys.makeQualified(new Path(getAbsoluteTestRootDir(fSys)));
   }
   }
 
 
-  public static Path getDefaultWorkingDirectory(FileSystem fSys)
-      throws IOException {
+  public Path getDefaultWorkingDirectory(FileSystem fSys) throws IOException {
     return getTestRootPath(fSys, "/user/" + System.getProperty("user.name"))
     return getTestRootPath(fSys, "/user/" + System.getProperty("user.name"))
         .makeQualified(fSys.getUri(),
         .makeQualified(fSys.getUri(),
             fSys.getWorkingDirectory());
             fSys.getWorkingDirectory());
@@ -136,7 +146,7 @@ public final class FileSystemTestHelper {
     return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true);
     return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true);
   }
   }
 
 
-  public static long createFile(FileSystem fSys, String name) throws IOException {
+  public long createFile(FileSystem fSys, String name) throws IOException {
     Path path = getTestRootPath(fSys, name);
     Path path = getTestRootPath(fSys, name);
     return createFile(fSys, path);
     return createFile(fSys, path);
   }
   }
@@ -188,7 +198,7 @@ public final class FileSystemTestHelper {
     return s;
     return s;
   }
   }
 
 
-  public static FileStatus containsPath(FileSystem fSys, Path path,
+  public FileStatus containsPath(FileSystem fSys, Path path,
       FileStatus[] dirList)
       FileStatus[] dirList)
     throws IOException {
     throws IOException {
     for(int i = 0; i < dirList.length; i ++) { 
     for(int i = 0; i < dirList.length; i ++) { 
@@ -199,8 +209,7 @@ public final class FileSystemTestHelper {
     return null;
     return null;
   }
   }
   
   
-  public static FileStatus containsPath(Path path,
-      FileStatus[] dirList)
+  public static FileStatus containsPath(Path path, FileStatus[] dirList)
     throws IOException {
     throws IOException {
     for(int i = 0; i < dirList.length; i ++) { 
     for(int i = 0; i < dirList.length; i ++) { 
       if (path.equals(dirList[i].getPath()))
       if (path.equals(dirList[i].getPath()))
@@ -210,7 +219,7 @@ public final class FileSystemTestHelper {
   }
   }
   
   
   
   
-  public static FileStatus containsPath(FileSystem fSys, String path, FileStatus[] dirList)
+  public FileStatus containsPath(FileSystem fSys, String path, FileStatus[] dirList)
      throws IOException {
      throws IOException {
     return containsPath(fSys, new Path(path), dirList);
     return containsPath(fSys, new Path(path), dirList);
   }
   }
@@ -264,4 +273,15 @@ public final class FileSystemTestHelper {
       return fs.getDelegationToken(renewer);
       return fs.getDelegationToken(renewer);
     }    
     }    
   }
   }
+
+  private static String stripDriveSpec(String pathString, boolean strip) {
+    if (strip && Shell.WINDOWS && pathString.length() >= 2 &&
+        Character.toUpperCase(pathString.charAt(0)) >= 'A' &&
+        Character.toUpperCase(pathString.charAt(0)) <= 'Z' &&
+        pathString.charAt(1) == ':') {
+
+      return pathString.substring(2);
+    }
+    return pathString;
+  }
 }
 }

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java

@@ -52,8 +52,7 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTes
   @Test
   @Test
   @Override
   @Override
   public void testWDAbsolute() throws IOException {
   public void testWDAbsolute() throws IOException {
-    Path absoluteDir = FileSystemTestHelper.getTestRootPath(fSys,
-        "test/existingDir");
+    Path absoluteDir = getTestRootPath(fSys, "test/existingDir");
     fSys.mkdirs(absoluteDir);
     fSys.mkdirs(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());

+ 11 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java

@@ -36,6 +36,9 @@ public class TestFileContextDeleteOnExit {
   private static int numBlocks = 2;
   private static int numBlocks = 2;
   
   
   private FileContext fc;
   private FileContext fc;
+
+  private final FileContextTestHelper fileContextTestHelper =
+    new FileContextTestHelper();
   
   
   @Before
   @Before
   public void setup() throws IOException {
   public void setup() throws IOException {
@@ -86,4 +89,12 @@ public class TestFileContextDeleteOnExit {
     Assert.assertFalse(exists(fc, file2));
     Assert.assertFalse(exists(fc, file2));
     Assert.assertFalse(exists(fc, dir));
     Assert.assertFalse(exists(fc, dir));
   }
   }
+
+  private Path getTestRootPath(FileContext fc) {
+    return this.fileContextTestHelper.getTestRootPath(fc);
+  }
+
+  private Path getTestRootPath(FileContext fc, String pathString) {
+    return this.fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 0 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.Before;

+ 3 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -19,12 +19,14 @@ package org.apache.hadoop.fs;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.util.Shell;
 
 
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 
 import java.io.*;
 import java.io.*;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
@@ -262,6 +264,7 @@ public class TestLocalFileSystem {
 
 
   @Test
   @Test
   public void testListStatusWithColons() throws IOException {
   public void testListStatusWithColons() throws IOException {
+    assumeTrue(!Shell.WINDOWS);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     LocalFileSystem fs = FileSystem.getLocal(conf);
     LocalFileSystem fs = FileSystem.getLocal(conf);
     File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
     File colonFile = new File(TEST_ROOT_DIR, "foo:bar");

+ 0 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java

@@ -25,8 +25,6 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 
 
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
-
 /**
 /**
  * <p>
  * <p>
  *    Tests the File Context Statistics for {@link LocalFileSystem}
  *    Tests the File Context Statistics for {@link LocalFileSystem}

+ 20 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java

@@ -41,12 +41,15 @@ public class TestChRootedFileSystem {
   FileSystem fSysTarget; //
   FileSystem fSysTarget; //
   Path chrootedTo;
   Path chrootedTo;
 
 
+  private final FileSystemTestHelper fileSystemTestHelper =
+      new FileSystemTestHelper();
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     // create the test root on local_fs
     // create the test root on local_fs
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     fSysTarget = FileSystem.getLocal(conf);
     fSysTarget = FileSystem.getLocal(conf);
-    chrootedTo = FileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
+    chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fSysTarget.delete(chrootedTo, true);
     fSysTarget.delete(chrootedTo, true);
     
     
@@ -107,12 +110,12 @@ public class TestChRootedFileSystem {
     
     
 
 
     // Create file 
     // Create file 
-    FileSystemTestHelper.createFile(fSys, "/foo");
+    fileSystemTestHelper.createFile(fSys, "/foo");
     Assert.assertTrue(fSys.isFile(new Path("/foo")));
     Assert.assertTrue(fSys.isFile(new Path("/foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo, "foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo, "foo")));
     
     
     // Create file with recursive dir
     // Create file with recursive dir
-    FileSystemTestHelper.createFile(fSys, "/newDir/foo");
+    fileSystemTestHelper.createFile(fSys, "/newDir/foo");
     Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
     Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
     
     
@@ -122,7 +125,7 @@ public class TestChRootedFileSystem {
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/foo")));
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/foo")));
     
     
     // Create file with a 2 component dirs recursively
     // Create file with a 2 component dirs recursively
-    FileSystemTestHelper.createFile(fSys, "/newDir/newDir2/foo");
+    fileSystemTestHelper.createFile(fSys, "/newDir/newDir2/foo");
     Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
     Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
     
     
@@ -135,11 +138,11 @@ public class TestChRootedFileSystem {
   
   
   @Test
   @Test
   public void testMkdirDelete() throws IOException {
   public void testMkdirDelete() throws IOException {
-    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirX"));
+    fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX"));
     Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
     Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
     
     
-    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirX/dirY"));
+    fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirX/dirY"));
     Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
     Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
     
     
@@ -157,11 +160,12 @@ public class TestChRootedFileSystem {
   @Test
   @Test
   public void testRename() throws IOException {
   public void testRename() throws IOException {
     // Rename a file
     // Rename a file
-    FileSystemTestHelper.createFile(fSys, "/newDir/foo");
+    fileSystemTestHelper.createFile(fSys, "/newDir/foo");
     fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
     fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
     Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
     Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
-    Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
+    Assert.assertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys,
+      "/newDir/fooBar")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
     Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
     
     
     
     
@@ -170,7 +174,8 @@ public class TestChRootedFileSystem {
     fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
     fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
     Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
     Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
     Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
-    Assert.assertTrue(fSys.isDirectory(FileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
+    Assert.assertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(
+      fSys, "/newDir/dirFooBar")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
     Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
   }
   }
 
 
@@ -214,10 +219,10 @@ public class TestChRootedFileSystem {
     
     
     
     
 
 
-    FileSystemTestHelper.createFile(fSys, "/foo");
-    FileSystemTestHelper.createFile(fSys, "/bar");
+    fileSystemTestHelper.createFile(fSys, "/foo");
+    fileSystemTestHelper.createFile(fSys, "/bar");
     fSys.mkdirs(new Path("/dirX"));
     fSys.mkdirs(new Path("/dirX"));
-    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirY"));
+    fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirY"));
     fSys.mkdirs(new Path("/dirX/dirXX"));
     fSys.mkdirs(new Path("/dirX/dirXX"));
     
     
     dirPaths = fSys.listStatus(new Path("/"));
     dirPaths = fSys.listStatus(new Path("/"));
@@ -282,7 +287,7 @@ public class TestChRootedFileSystem {
 
 
     /* Filesystem impls (RawLocal and DistributedFileSystem do not check
     /* Filesystem impls (RawLocal and DistributedFileSystem do not check
      * for existing of working dir
      * for existing of working dir
-    absoluteDir = getTestRootPath(fSys, "nonexistingPath");
+    absoluteDir = fileSystemTestHelper.getTestRootPath(fSys, "nonexistingPath");
     try {
     try {
       fSys.setWorkingDirectory(absoluteDir);
       fSys.setWorkingDirectory(absoluteDir);
       Assert.fail("cd to non existing dir should have failed");
       Assert.fail("cd to non existing dir should have failed");
@@ -307,7 +312,7 @@ public class TestChRootedFileSystem {
   @Test
   @Test
   public void testResolvePath() throws IOException {
   public void testResolvePath() throws IOException {
     Assert.assertEquals(chrootedTo, fSys.resolvePath(new Path("/"))); 
     Assert.assertEquals(chrootedTo, fSys.resolvePath(new Path("/"))); 
-    FileSystemTestHelper.createFile(fSys, "/foo");
+    fileSystemTestHelper.createFile(fSys, "/foo");
     Assert.assertEquals(new Path(chrootedTo, "foo"),
     Assert.assertEquals(new Path(chrootedTo, "foo"),
         fSys.resolvePath(new Path("/foo"))); 
         fSys.resolvePath(new Path("/foo"))); 
   }
   }
@@ -359,4 +364,4 @@ public class TestChRootedFileSystem {
     @Override
     @Override
     public void initialize(URI name, Configuration conf) throws IOException {}
     public void initialize(URI name, Configuration conf) throws IOException {}
   }
   }
-}
+}

+ 25 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java

@@ -42,11 +42,14 @@ public class TestChRootedFs {
   FileContext fcTarget; // 
   FileContext fcTarget; // 
   Path chrootedTo;
   Path chrootedTo;
 
 
+  private final FileContextTestHelper fileContextTestHelper =
+    new FileContextTestHelper();
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     // create the test root on local_fs
     // create the test root on local_fs
     fcTarget = FileContext.getLocalFSFileContext();
     fcTarget = FileContext.getLocalFSFileContext();
-    chrootedTo = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+    chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fcTarget.delete(chrootedTo, true);
     fcTarget.delete(chrootedTo, true);
     
     
@@ -105,12 +108,12 @@ public class TestChRootedFs {
     
     
 
 
     // Create file 
     // Create file 
-    FileContextTestHelper.createFileNonRecursive(fc, "/foo");
+    fileContextTestHelper.createFileNonRecursive(fc, "/foo");
     Assert.assertTrue(isFile(fc, new Path("/foo")));
     Assert.assertTrue(isFile(fc, new Path("/foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo, "foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo, "foo")));
     
     
     // Create file with recursive dir
     // Create file with recursive dir
-    FileContextTestHelper.createFile(fc, "/newDir/foo");
+    fileContextTestHelper.createFile(fc, "/newDir/foo");
     Assert.assertTrue(isFile(fc, new Path("/newDir/foo")));
     Assert.assertTrue(isFile(fc, new Path("/newDir/foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/foo")));
     
     
@@ -120,7 +123,7 @@ public class TestChRootedFs {
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
     
     
     // Create file with a 2 component dirs recursively
     // Create file with a 2 component dirs recursively
-    FileContextTestHelper.createFile(fc, "/newDir/newDir2/foo");
+    fileContextTestHelper.createFile(fc, "/newDir/newDir2/foo");
     Assert.assertTrue(isFile(fc, new Path("/newDir/newDir2/foo")));
     Assert.assertTrue(isFile(fc, new Path("/newDir/newDir2/foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
     
     
@@ -133,11 +136,13 @@ public class TestChRootedFs {
   
   
   @Test
   @Test
   public void testMkdirDelete() throws IOException {
   public void testMkdirDelete() throws IOException {
-    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirX"), FileContext.DEFAULT_PERM, false);
+    fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX"),
+      FileContext.DEFAULT_PERM, false);
     Assert.assertTrue(isDir(fc, new Path("/dirX")));
     Assert.assertTrue(isDir(fc, new Path("/dirX")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX")));
     
     
-    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirX/dirY"), FileContext.DEFAULT_PERM, false);
+    fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirX/dirY"),
+      FileContext.DEFAULT_PERM, false);
     Assert.assertTrue(isDir(fc, new Path("/dirX/dirY")));
     Assert.assertTrue(isDir(fc, new Path("/dirX/dirY")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX/dirY")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX/dirY")));
     
     
@@ -155,11 +160,12 @@ public class TestChRootedFs {
   @Test
   @Test
   public void testRename() throws IOException {
   public void testRename() throws IOException {
     // Rename a file
     // Rename a file
-    FileContextTestHelper.createFile(fc, "/newDir/foo");
+    fileContextTestHelper.createFile(fc, "/newDir/foo");
     fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
     fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
     Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
     Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
-    Assert.assertTrue(isFile(fc, FileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
+    Assert.assertTrue(isFile(fc,
+      fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar")));
     Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar")));
     
     
     
     
@@ -168,7 +174,8 @@ public class TestChRootedFs {
     fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
     fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
     Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo")));
     Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo")));
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo")));
     Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo")));
-    Assert.assertTrue(isDir(fc, FileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
+    Assert.assertTrue(isDir(fc,
+      fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar")));
     Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar")));
   }
   }
   
   
@@ -202,10 +209,10 @@ public class TestChRootedFs {
     
     
     
     
 
 
-    FileContextTestHelper.createFileNonRecursive(fc, "/foo");
-    FileContextTestHelper.createFileNonRecursive(fc, "/bar");
+    fileContextTestHelper.createFileNonRecursive(fc, "/foo");
+    fileContextTestHelper.createFileNonRecursive(fc, "/bar");
     fc.mkdir(new Path("/dirX"), FileContext.DEFAULT_PERM, false);
     fc.mkdir(new Path("/dirX"), FileContext.DEFAULT_PERM, false);
-    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirY"),
+    fc.mkdir(fileContextTestHelper.getTestRootPath(fc, "/dirY"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
     fc.mkdir(new Path("/dirX/dirXX"), FileContext.DEFAULT_PERM, false);
     fc.mkdir(new Path("/dirX/dirXX"), FileContext.DEFAULT_PERM, false);
     
     
@@ -213,16 +220,16 @@ public class TestChRootedFs {
     Assert.assertEquals(4, dirPaths.length);
     Assert.assertEquals(4, dirPaths.length);
     
     
     // Note the the file status paths are the full paths on target
     // Note the the file status paths are the full paths on target
-    fs = FileContextTestHelper.containsPath(fcTarget, "foo", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcTarget, "foo", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue(fs.isFile());
       Assert.assertTrue(fs.isFile());
-    fs = FileContextTestHelper.containsPath(fcTarget, "bar", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcTarget, "bar", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue(fs.isFile());
       Assert.assertTrue(fs.isFile());
-    fs = FileContextTestHelper.containsPath(fcTarget, "dirX", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcTarget, "dirX", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue(fs.isDirectory());
       Assert.assertTrue(fs.isDirectory());
-    fs = FileContextTestHelper.containsPath(fcTarget, "dirY", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcTarget, "dirY", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue(fs.isDirectory());
       Assert.assertTrue(fs.isDirectory());
   }
   }
@@ -273,7 +280,7 @@ public class TestChRootedFs {
     fc.mkdir(new Path("newDir"), FileContext.DEFAULT_PERM, true);
     fc.mkdir(new Path("newDir"), FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, new Path(absoluteDir, "newDir")));
     Assert.assertTrue(isDir(fc, new Path(absoluteDir, "newDir")));
 
 
-    absoluteDir = getTestRootPath(fc, "nonexistingPath");
+    absoluteDir = fileContextTestHelper.getTestRootPath(fc, "nonexistingPath");
     try {
     try {
       fc.setWorkingDirectory(absoluteDir);
       fc.setWorkingDirectory(absoluteDir);
       Assert.fail("cd to non existing dir should have failed");
       Assert.fail("cd to non existing dir should have failed");
@@ -297,7 +304,7 @@ public class TestChRootedFs {
   @Test
   @Test
   public void testResolvePath() throws IOException {
   public void testResolvePath() throws IOException {
     Assert.assertEquals(chrootedTo, fc.getDefaultFileSystem().resolvePath(new Path("/"))); 
     Assert.assertEquals(chrootedTo, fc.getDefaultFileSystem().resolvePath(new Path("/"))); 
-    FileContextTestHelper.createFile(fc, "/foo");
+    fileContextTestHelper.createFile(fc, "/foo");
     Assert.assertEquals(new Path(chrootedTo, "foo"),
     Assert.assertEquals(new Path(chrootedTo, "foo"),
         fc.getDefaultFileSystem().resolvePath(new Path("/foo"))); 
         fc.getDefaultFileSystem().resolvePath(new Path("/foo"))); 
   }
   }

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java

@@ -53,8 +53,7 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTes
   @Test
   @Test
   @Override
   @Override
   public void testWDAbsolute() throws IOException {
   public void testWDAbsolute() throws IOException {
-    Path absoluteDir = FileSystemTestHelper.getTestRootPath(fSys,
-        "test/existingDir");
+    Path absoluteDir = getTestRootPath(fSys, "test/existingDir");
     fSys.mkdirs(absoluteDir);
     fSys.mkdirs(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
     Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java

@@ -52,7 +52,7 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup {
     conf.set("fs."+scheme+".impl", clazz.getName());
     conf.set("fs."+scheme+".impl", clazz.getName());
     FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
     FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
     assertEquals(uri, fs.getUri());
     assertEquals(uri, fs.getUri());
-    Path targetPath = FileSystemTestHelper.getAbsoluteTestRootPath(fs);
+    Path targetPath = new FileSystemTestHelper().getAbsoluteTestRootPath(fs);
     ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri());
     ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri());
     return fs;
     return fs;
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java

@@ -51,7 +51,7 @@ public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
   @Override
   @Override
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
-    fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
+    fsTarget.delete(getTestRootPath(fsTarget), true);
     super.tearDown();
     super.tearDown();
   }
   }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java

@@ -59,7 +59,7 @@ public class TestViewFileSystemWithAuthorityLocalFileSystem extends ViewFileSyst
   @Override
   @Override
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
-    fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
+    fsTarget.delete(getTestRootPath(fsTarget), true);
     super.tearDown();
     super.tearDown();
   }
   }
  
  

+ 6 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java

@@ -36,6 +36,9 @@ public class TestViewFsTrash {
   FileSystem fsView;
   FileSystem fsView;
   Configuration conf;
   Configuration conf;
 
 
+  private final FileSystemTestHelper fileSystemTestHelper =
+    new FileSystemTestHelper();
+
   static class TestLFS extends LocalFileSystem {
   static class TestLFS extends LocalFileSystem {
     Path home;
     Path home;
     TestLFS() throws IOException {
     TestLFS() throws IOException {
@@ -54,8 +57,8 @@ public class TestViewFsTrash {
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     fsTarget = FileSystem.getLocal(new Configuration());
     fsTarget = FileSystem.getLocal(new Configuration());
-    fsTarget.mkdirs(new Path(FileSystemTestHelper.
-        getTestRootPath(fsTarget), "dir1"));
+    fsTarget.mkdirs(new Path(fileSystemTestHelper.getTestRootPath(fsTarget),
+      "dir1"));
     conf = ViewFileSystemTestSetup.createConfig();
     conf = ViewFileSystemTestSetup.createConfig();
     fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, fsTarget);
     fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, fsTarget);
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
@@ -70,7 +73,7 @@ public class TestViewFsTrash {
   
   
   @Test
   @Test
   public void testTrash() throws IOException {
   public void testTrash() throws IOException {
-    TestTrash.trashShell(conf, FileSystemTestHelper.getTestRootPath(fsView),
+    TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
         fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
         fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
   }
   }
   
   

+ 70 - 49
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -69,6 +69,15 @@ public class ViewFileSystemBaseTest {
   Path targetTestRoot;
   Path targetTestRoot;
   Configuration conf;
   Configuration conf;
 
 
+  private final FileSystemTestHelper fileSystemTestHelper;
+
+  public ViewFileSystemBaseTest() {
+    this(new FileSystemTestHelper());
+  }
+
+  public ViewFileSystemBaseTest(FileSystemTestHelper fileSystemTestHelper) {
+    this.fileSystemTestHelper = fileSystemTestHelper;
+  }
 
 
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
@@ -94,11 +103,11 @@ public class ViewFileSystemBaseTest {
 
 
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
-    fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
+    fsTarget.delete(getTestRootPath(fsTarget), true);
   }
   }
   
   
   void initializeTargetTestRoot() throws IOException {
   void initializeTargetTestRoot() throws IOException {
-    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+    targetTestRoot = getAbsoluteTestRootPath(fsTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fsTarget.delete(targetTestRoot, true);
     fsTarget.delete(targetTestRoot, true);
     
     
@@ -199,7 +208,7 @@ public class ViewFileSystemBaseTest {
   @Test
   @Test
   public void testOperationsThroughMountLinks() throws IOException {
   public void testOperationsThroughMountLinks() throws IOException {
     // Create file 
     // Create file 
-    FileSystemTestHelper.createFile(fsView, "/user/foo");
+    fileSystemTestHelper.createFile(fsView, "/user/foo");
     Assert.assertTrue("Created file should be type file",
     Assert.assertTrue("Created file should be type file",
         fsView.isFile(new Path("/user/foo")));
         fsView.isFile(new Path("/user/foo")));
     Assert.assertTrue("Target of created file should be type file",
     Assert.assertTrue("Target of created file should be type file",
@@ -214,7 +223,7 @@ public class ViewFileSystemBaseTest {
         fsTarget.exists(new Path(targetTestRoot,"user/foo")));
         fsTarget.exists(new Path(targetTestRoot,"user/foo")));
     
     
     // Create file with a 2 component dirs
     // Create file with a 2 component dirs
-    FileSystemTestHelper.createFile(fsView, "/internalDir/linkToDir2/foo");
+    fileSystemTestHelper.createFile(fsView, "/internalDir/linkToDir2/foo");
     Assert.assertTrue("Created file should be type file",
     Assert.assertTrue("Created file should be type file",
         fsView.isFile(new Path("/internalDir/linkToDir2/foo")));
         fsView.isFile(new Path("/internalDir/linkToDir2/foo")));
     Assert.assertTrue("Target of created file should be type file",
     Assert.assertTrue("Target of created file should be type file",
@@ -230,14 +239,15 @@ public class ViewFileSystemBaseTest {
     
     
     
     
     // Create file with a 3 component dirs
     // Create file with a 3 component dirs
-    FileSystemTestHelper.createFile(fsView, "/internalDir/internalDir2/linkToDir3/foo");
+    fileSystemTestHelper.createFile(fsView,
+      "/internalDir/internalDir2/linkToDir3/foo");
     Assert.assertTrue("Created file should be type file",
     Assert.assertTrue("Created file should be type file",
         fsView.isFile(new Path("/internalDir/internalDir2/linkToDir3/foo")));
         fsView.isFile(new Path("/internalDir/internalDir2/linkToDir3/foo")));
     Assert.assertTrue("Target of created file should be type file",
     Assert.assertTrue("Target of created file should be type file",
         fsTarget.isFile(new Path(targetTestRoot,"dir3/foo")));
         fsTarget.isFile(new Path(targetTestRoot,"dir3/foo")));
     
     
     // Recursive Create file with missing dirs
     // Recursive Create file with missing dirs
-    FileSystemTestHelper.createFile(fsView,
+    fileSystemTestHelper.createFile(fsView,
         "/internalDir/linkToDir2/missingDir/miss2/foo");
         "/internalDir/linkToDir2/missingDir/miss2/foo");
     Assert.assertTrue("Created file should be type file",
     Assert.assertTrue("Created file should be type file",
         fsView.isFile(new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
         fsView.isFile(new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
@@ -256,14 +266,13 @@ public class ViewFileSystemBaseTest {
     
     
       
       
     // mkdir
     // mkdir
-    fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
+    fsView.mkdirs(getTestRootPath(fsView, "/user/dirX"));
     Assert.assertTrue("New dir should be type dir", 
     Assert.assertTrue("New dir should be type dir", 
         fsView.isDirectory(new Path("/user/dirX")));
         fsView.isDirectory(new Path("/user/dirX")));
     Assert.assertTrue("Target of new dir should be of type dir",
     Assert.assertTrue("Target of new dir should be of type dir",
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX")));
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX")));
     
     
-    fsView.mkdirs(
-        FileSystemTestHelper.getTestRootPath(fsView, "/user/dirX/dirY"));
+    fsView.mkdirs(getTestRootPath(fsView, "/user/dirX/dirY"));
     Assert.assertTrue("New dir should be type dir", 
     Assert.assertTrue("New dir should be type dir", 
         fsView.isDirectory(new Path("/user/dirX/dirY")));
         fsView.isDirectory(new Path("/user/dirX/dirY")));
     Assert.assertTrue("Target of new dir should be of type dir",
     Assert.assertTrue("Target of new dir should be of type dir",
@@ -285,14 +294,14 @@ public class ViewFileSystemBaseTest {
     Assert.assertFalse(fsTarget.exists(new Path(targetTestRoot,"user/dirX")));
     Assert.assertFalse(fsTarget.exists(new Path(targetTestRoot,"user/dirX")));
     
     
     // Rename a file 
     // Rename a file 
-    FileSystemTestHelper.createFile(fsView, "/user/foo");
+    fileSystemTestHelper.createFile(fsView, "/user/foo");
     fsView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
     fsView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
     Assert.assertFalse("Renamed src should not exist", 
     Assert.assertFalse("Renamed src should not exist", 
         fsView.exists(new Path("/user/foo")));
         fsView.exists(new Path("/user/foo")));
     Assert.assertFalse("Renamed src should not exist in target",
     Assert.assertFalse("Renamed src should not exist in target",
         fsTarget.exists(new Path(targetTestRoot,"user/foo")));
         fsTarget.exists(new Path(targetTestRoot,"user/foo")));
     Assert.assertTrue("Renamed dest should  exist as file",
     Assert.assertTrue("Renamed dest should  exist as file",
-        fsView.isFile(FileSystemTestHelper.getTestRootPath(fsView,"/user/fooBar")));
+        fsView.isFile(getTestRootPath(fsView, "/user/fooBar")));
     Assert.assertTrue("Renamed dest should  exist as file in target",
     Assert.assertTrue("Renamed dest should  exist as file in target",
         fsTarget.isFile(new Path(targetTestRoot,"user/fooBar")));
         fsTarget.isFile(new Path(targetTestRoot,"user/fooBar")));
     
     
@@ -303,7 +312,7 @@ public class ViewFileSystemBaseTest {
     Assert.assertFalse("Renamed src should not exist in target",
     Assert.assertFalse("Renamed src should not exist in target",
         fsTarget.exists(new Path(targetTestRoot,"user/dirFoo")));
         fsTarget.exists(new Path(targetTestRoot,"user/dirFoo")));
     Assert.assertTrue("Renamed dest should  exist as dir",
     Assert.assertTrue("Renamed dest should  exist as dir",
-        fsView.isDirectory(FileSystemTestHelper.getTestRootPath(fsView,"/user/dirFooBar")));
+        fsView.isDirectory(getTestRootPath(fsView, "/user/dirFooBar")));
     Assert.assertTrue("Renamed dest should  exist as dir in target",
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
         fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
     
     
@@ -322,12 +331,12 @@ public class ViewFileSystemBaseTest {
   // rename across mount points that point to same target also fail 
   // rename across mount points that point to same target also fail 
   @Test(expected=IOException.class) 
   @Test(expected=IOException.class) 
   public void testRenameAcrossMounts1() throws IOException {
   public void testRenameAcrossMounts1() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/user/foo");
+    fileSystemTestHelper.createFile(fsView, "/user/foo");
     fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
     fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
     /* - code if we had wanted this to suceed
     /* - code if we had wanted this to suceed
     Assert.assertFalse(fSys.exists(new Path("/user/foo")));
     Assert.assertFalse(fSys.exists(new Path("/user/foo")));
     Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
     Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
-    Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
+    Assert.assertTrue(fSys.isFile(getTestRootPath(fSys, "/user2/fooBarBar")));
     Assert.assertTrue(fSysLocal.isFile(new Path(targetTestRoot,"user/fooBarBar")));
     Assert.assertTrue(fSysLocal.isFile(new Path(targetTestRoot,"user/fooBarBar")));
     */
     */
   }
   }
@@ -338,7 +347,7 @@ public class ViewFileSystemBaseTest {
 
 
   @Test(expected=IOException.class) 
   @Test(expected=IOException.class) 
   public void testRenameAcrossMounts2() throws IOException {
   public void testRenameAcrossMounts2() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/user/foo");
+    fileSystemTestHelper.createFile(fsView, "/user/foo");
     fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
     fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
   }
   }
   
   
@@ -347,8 +356,7 @@ public class ViewFileSystemBaseTest {
   @Test
   @Test
   public void testGetBlockLocations() throws IOException {
   public void testGetBlockLocations() throws IOException {
     Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
     Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
-    FileSystemTestHelper.createFile(fsTarget, 
-        targetFilePath, 10, 1024);
+    fileSystemTestHelper.createFile(fsTarget,  targetFilePath, 10, 1024);
     Path viewFilePath = new Path("/data/largeFile");
     Path viewFilePath = new Path("/data/largeFile");
     Assert.assertTrue("Created File should be type File",
     Assert.assertTrue("Created File should be type File",
         fsView.isFile(viewFilePath));
         fsView.isFile(viewFilePath));
@@ -394,19 +402,22 @@ public class ViewFileSystemBaseTest {
     FileStatus[] dirPaths = fsView.listStatus(new Path("/"));
     FileStatus[] dirPaths = fsView.listStatus(new Path("/"));
     FileStatus fs;
     FileStatus fs;
     Assert.assertEquals(getExpectedDirPaths(), dirPaths.length);
     Assert.assertEquals(getExpectedDirPaths(), dirPaths.length);
-    fs = FileSystemTestHelper.containsPath(fsView, "/user", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/user", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileSystemTestHelper.containsPath(fsView, "/data", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/data", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileSystemTestHelper.containsPath(fsView, "/internalDir", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/internalDir",
+      dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
       Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
-    fs = FileSystemTestHelper.containsPath(fsView, "/danglingLink", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/danglingLink",
+      dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileSystemTestHelper.containsPath(fsView, "/linkToAFile", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/linkToAFile",
+      dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       
       
@@ -416,11 +427,12 @@ public class ViewFileSystemBaseTest {
       dirPaths = fsView.listStatus(new Path("/internalDir"));
       dirPaths = fsView.listStatus(new Path("/internalDir"));
       Assert.assertEquals(2, dirPaths.length);
       Assert.assertEquals(2, dirPaths.length);
 
 
-      fs = FileSystemTestHelper.containsPath(fsView, "/internalDir/internalDir2", dirPaths);
+      fs = fileSystemTestHelper.containsPath(fsView, "/internalDir/internalDir2",
+        dirPaths);
         Assert.assertNotNull(fs);
         Assert.assertNotNull(fs);
         Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
         Assert.assertTrue("A mount should appear as symlink", fs.isDirectory());
-      fs = FileSystemTestHelper.containsPath(fsView, "/internalDir/linkToDir2",
-          dirPaths);
+      fs = fileSystemTestHelper.containsPath(fsView, "/internalDir/linkToDir2",
+        dirPaths);
         Assert.assertNotNull(fs);
         Assert.assertNotNull(fs);
         Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
         Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
   }
   }
@@ -436,22 +448,22 @@ public class ViewFileSystemBaseTest {
     Assert.assertEquals(0, dirPaths.length);
     Assert.assertEquals(0, dirPaths.length);
     
     
     // add a file
     // add a file
-    long len = FileSystemTestHelper.createFile(fsView, "/data/foo");
+    long len = fileSystemTestHelper.createFile(fsView, "/data/foo");
     dirPaths = fsView.listStatus(new Path("/data"));
     dirPaths = fsView.listStatus(new Path("/data"));
     Assert.assertEquals(1, dirPaths.length);
     Assert.assertEquals(1, dirPaths.length);
-    fs = FileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
     Assert.assertNotNull(fs);
     Assert.assertNotNull(fs);
     Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
     Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
     Assert.assertEquals(len, fs.getLen());
     Assert.assertEquals(len, fs.getLen());
     
     
     // add a dir
     // add a dir
-    fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView, "/data/dirX"));
+    fsView.mkdirs(getTestRootPath(fsView, "/data/dirX"));
     dirPaths = fsView.listStatus(new Path("/data"));
     dirPaths = fsView.listStatus(new Path("/data"));
     Assert.assertEquals(2, dirPaths.length);
     Assert.assertEquals(2, dirPaths.length);
-    fs = FileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/data/foo", dirPaths);
     Assert.assertNotNull(fs);
     Assert.assertNotNull(fs);
     Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
     Assert.assertTrue("Created file shoudl appear as a file", fs.isFile());
-    fs = FileSystemTestHelper.containsPath(fsView, "/data/dirX", dirPaths);
+    fs = fileSystemTestHelper.containsPath(fsView, "/data/dirX", dirPaths);
     Assert.assertNotNull(fs);
     Assert.assertNotNull(fs);
     Assert.assertTrue("Created dir should appear as a dir", fs.isDirectory()); 
     Assert.assertTrue("Created dir should appear as a dir", fs.isDirectory()); 
   }
   }
@@ -504,18 +516,16 @@ public class ViewFileSystemBaseTest {
   
   
   @Test
   @Test
   public void testResolvePathThroughMountPoints() throws IOException {
   public void testResolvePathThroughMountPoints() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/user/foo");
+    fileSystemTestHelper.createFile(fsView, "/user/foo");
     Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
     Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
                           fsView.resolvePath(new Path("/user/foo")));
                           fsView.resolvePath(new Path("/user/foo")));
     
     
-    fsView.mkdirs(
-        FileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
+    fsView.mkdirs(getTestRootPath(fsView, "/user/dirX"));
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
         fsView.resolvePath(new Path("/user/dirX")));
         fsView.resolvePath(new Path("/user/dirX")));
 
 
     
     
-    fsView.mkdirs(
-        FileSystemTestHelper.getTestRootPath(fsView, "/user/dirX/dirY"));
+    fsView.mkdirs(getTestRootPath(fsView, "/user/dirX/dirY"));
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
         fsView.resolvePath(new Path("/user/dirX/dirY")));
         fsView.resolvePath(new Path("/user/dirX/dirY")));
   }
   }
@@ -533,8 +543,7 @@ public class ViewFileSystemBaseTest {
 
 
   @Test(expected=FileNotFoundException.class) 
   @Test(expected=FileNotFoundException.class) 
   public void testResolvePathMissingThroughMountPoints2() throws IOException {
   public void testResolvePathMissingThroughMountPoints2() throws IOException {
-    fsView.mkdirs(
-        FileSystemTestHelper.getTestRootPath(fsView, "/user/dirX"));
+    fsView.mkdirs(getTestRootPath(fsView, "/user/dirX"));
     fsView.resolvePath(new Path("/user/dirX/nonExisting"));
     fsView.resolvePath(new Path("/user/dirX/nonExisting"));
   }
   }
   
   
@@ -550,57 +559,55 @@ public class ViewFileSystemBaseTest {
   // Mkdir on existing internal mount table succeed except for /
   // Mkdir on existing internal mount table succeed except for /
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirSlash() throws IOException {
   public void testInternalMkdirSlash() throws IOException {
-    fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView, "/"));
+    fsView.mkdirs(getTestRootPath(fsView, "/"));
   }
   }
   
   
   public void testInternalMkdirExisting1() throws IOException {
   public void testInternalMkdirExisting1() throws IOException {
     Assert.assertTrue("mkdir of existing dir should succeed", 
     Assert.assertTrue("mkdir of existing dir should succeed", 
-        fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView,
-        "/internalDir")));
+        fsView.mkdirs(getTestRootPath(fsView, "/internalDir")));
   }
   }
 
 
   public void testInternalMkdirExisting2() throws IOException {
   public void testInternalMkdirExisting2() throws IOException {
     Assert.assertTrue("mkdir of existing dir should succeed", 
     Assert.assertTrue("mkdir of existing dir should succeed", 
-        fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView,
-        "/internalDir/linkToDir2")));
+        fsView.mkdirs(getTestRootPath(fsView, "/internalDir/linkToDir2")));
   }
   }
   
   
   // Mkdir for new internal mount table should fail
   // Mkdir for new internal mount table should fail
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirNew() throws IOException {
   public void testInternalMkdirNew() throws IOException {
-    fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView, "/dirNew"));
+    fsView.mkdirs(getTestRootPath(fsView, "/dirNew"));
   }
   }
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirNew2() throws IOException {
   public void testInternalMkdirNew2() throws IOException {
-    fsView.mkdirs(FileSystemTestHelper.getTestRootPath(fsView, "/internalDir/dirNew"));
+    fsView.mkdirs(getTestRootPath(fsView, "/internalDir/dirNew"));
   }
   }
   
   
   // Create File on internal mount table should fail
   // Create File on internal mount table should fail
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreate1() throws IOException {
   public void testInternalCreate1() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/foo"); // 1 component
+    fileSystemTestHelper.createFile(fsView, "/foo"); // 1 component
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreate2() throws IOException {  // 2 component
   public void testInternalCreate2() throws IOException {  // 2 component
-    FileSystemTestHelper.createFile(fsView, "/internalDir/foo");
+    fileSystemTestHelper.createFile(fsView, "/internalDir/foo");
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir() throws IOException {
   public void testInternalCreateMissingDir() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/missingDir/foo");
+    fileSystemTestHelper.createFile(fsView, "/missingDir/foo");
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir2() throws IOException {
   public void testInternalCreateMissingDir2() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/missingDir/miss2/foo");
+    fileSystemTestHelper.createFile(fsView, "/missingDir/miss2/foo");
   }
   }
   
   
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir3() throws IOException {
   public void testInternalCreateMissingDir3() throws IOException {
-    FileSystemTestHelper.createFile(fsView, "/internalDir/miss2/foo");
+    fileSystemTestHelper.createFile(fsView, "/internalDir/miss2/foo");
   }
   }
   
   
   // Delete on internal mount table should fail
   // Delete on internal mount table should fail
@@ -662,4 +669,18 @@ public class ViewFileSystemBaseTest {
   public void testInternalSetOwner() throws IOException {
   public void testInternalSetOwner() throws IOException {
     fsView.setOwner(new Path("/internalDir"), "foo", "bar");
     fsView.setOwner(new Path("/internalDir"), "foo", "bar");
   }
   }
+
+  protected Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
+    return fileSystemTestHelper.getAbsoluteTestRootPath(fSys);
+  }
+
+  protected Path getTestRootPath(FileSystem fSys) throws IOException {
+    return fileSystemTestHelper.getTestRootPath(fSys);
+  }
+
+  protected Path getTestRootPath(FileSystem fSys, String pathString)
+      throws IOException {
+
+    return fileSystemTestHelper.getTestRootPath(fSys, pathString);
+  }
 }
 }

+ 6 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java

@@ -50,6 +50,9 @@ public class ViewFileSystemTestSetup {
   
   
   static public String ViewFSTestDir = "/testDir";
   static public String ViewFSTestDir = "/testDir";
 
 
+  private static final FileSystemTestHelper FILE_SYSTEM_TEST_HELPER =
+    new FileSystemTestHelper();
+
   /**
   /**
    * 
    * 
    * @param fsTarget - the target fs of the view fs.
    * @param fsTarget - the target fs of the view fs.
@@ -60,14 +63,14 @@ public class ViewFileSystemTestSetup {
     /**
     /**
      * create the test root on local_fs - the  mount table will point here
      * create the test root on local_fs - the  mount table will point here
      */
      */
-    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+    Path targetOfTests = FILE_SYSTEM_TEST_HELPER.getTestRootPath(fsTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fsTarget.delete(targetOfTests, true);
     fsTarget.delete(targetOfTests, true);
     fsTarget.mkdirs(targetOfTests);
     fsTarget.mkdirs(targetOfTests);
 
 
 
 
     // Set up viewfs link for test dir as described above
     // Set up viewfs link for test dir as described above
-    String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
+    String testDir = FILE_SYSTEM_TEST_HELPER.getTestRootPath(fsTarget).toUri()
         .getPath();
         .getPath();
     linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
     linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
     
     
@@ -93,7 +96,7 @@ public class ViewFileSystemTestSetup {
    * delete the test directory in the target  fs
    * delete the test directory in the target  fs
    */
    */
   static public void tearDown(FileSystem fsTarget) throws Exception {
   static public void tearDown(FileSystem fsTarget) throws Exception {
-    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+    Path targetOfTests = FILE_SYSTEM_TEST_HELPER.getTestRootPath(fsTarget);
     fsTarget.delete(targetOfTests, true);
     fsTarget.delete(targetOfTests, true);
   }
   }
   
   

+ 51 - 41
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -76,6 +76,16 @@ public class ViewFsBaseTest {
   FileContext xfcViewWithAuthority; // same as fsView but with authority
   FileContext xfcViewWithAuthority; // same as fsView but with authority
   URI schemeWithAuthority;
   URI schemeWithAuthority;
 
 
+  private final FileContextTestHelper fileContextTestHelper;
+
+  public ViewFsBaseTest() {
+    this(new FileContextTestHelper());
+  }
+
+  public ViewFsBaseTest(FileContextTestHelper fileContextTestHelper) {
+    this.fileContextTestHelper = fileContextTestHelper;
+  }
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     initializeTargetTestRoot();
     initializeTargetTestRoot();
@@ -89,7 +99,7 @@ public class ViewFsBaseTest {
         FileContext.DEFAULT_PERM, true);
         FileContext.DEFAULT_PERM, true);
     fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
     fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
         FileContext.DEFAULT_PERM, true);
         FileContext.DEFAULT_PERM, true);
-    FileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));
+    fileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));
     
     
     
     
     // Now we use the mount fs to set links to user and dir
     // Now we use the mount fs to set links to user and dir
@@ -118,7 +128,7 @@ public class ViewFsBaseTest {
   }
   }
   
   
   void initializeTargetTestRoot() throws IOException {
   void initializeTargetTestRoot() throws IOException {
-    targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+    targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fcTarget.delete(targetTestRoot, true);
     fcTarget.delete(targetTestRoot, true);
     
     
@@ -127,7 +137,7 @@ public class ViewFsBaseTest {
 
 
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
-    fcTarget.delete(FileContextTestHelper.getTestRootPath(fcTarget), true);
+    fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
   }
   }
   
   
   @Test
   @Test
@@ -181,7 +191,7 @@ public class ViewFsBaseTest {
   @Test
   @Test
   public void testOperationsThroughMountLinks() throws IOException {
   public void testOperationsThroughMountLinks() throws IOException {
     // Create file 
     // Create file 
-    FileContextTestHelper.createFileNonRecursive(fcView, "/user/foo");
+    fileContextTestHelper.createFileNonRecursive(fcView, "/user/foo");
     Assert.assertTrue("Create file should be file",
     Assert.assertTrue("Create file should be file",
 		isFile(fcView, new Path("/user/foo")));
 		isFile(fcView, new Path("/user/foo")));
     Assert.assertTrue("Target of created file should be type file",
     Assert.assertTrue("Target of created file should be type file",
@@ -196,7 +206,7 @@ public class ViewFsBaseTest {
         exists(fcTarget, new Path(targetTestRoot,"user/foo")));
         exists(fcTarget, new Path(targetTestRoot,"user/foo")));
     
     
     // Create file with a 2 component dirs
     // Create file with a 2 component dirs
-    FileContextTestHelper.createFileNonRecursive(fcView,
+    fileContextTestHelper.createFileNonRecursive(fcView,
         "/internalDir/linkToDir2/foo");
         "/internalDir/linkToDir2/foo");
     Assert.assertTrue("Created file should be type file",
     Assert.assertTrue("Created file should be type file",
         isFile(fcView, new Path("/internalDir/linkToDir2/foo")));
         isFile(fcView, new Path("/internalDir/linkToDir2/foo")));
@@ -213,7 +223,7 @@ public class ViewFsBaseTest {
     
     
     
     
     // Create file with a 3 component dirs
     // Create file with a 3 component dirs
-    FileContextTestHelper.createFileNonRecursive(fcView,
+    fileContextTestHelper.createFileNonRecursive(fcView,
         "/internalDir/internalDir2/linkToDir3/foo");
         "/internalDir/internalDir2/linkToDir3/foo");
     Assert.assertTrue("Created file should be of type file", 
     Assert.assertTrue("Created file should be of type file", 
         isFile(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
         isFile(fcView, new Path("/internalDir/internalDir2/linkToDir3/foo")));
@@ -221,7 +231,7 @@ public class ViewFsBaseTest {
         isFile(fcTarget, new Path(targetTestRoot,"dir3/foo")));
         isFile(fcTarget, new Path(targetTestRoot,"dir3/foo")));
     
     
     // Recursive Create file with missing dirs
     // Recursive Create file with missing dirs
-    FileContextTestHelper.createFile(fcView,
+    fileContextTestHelper.createFile(fcView,
         "/internalDir/linkToDir2/missingDir/miss2/foo");
         "/internalDir/linkToDir2/missingDir/miss2/foo");
     Assert.assertTrue("Created file should be of type file",
     Assert.assertTrue("Created file should be of type file",
       isFile(fcView, new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
       isFile(fcView, new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
@@ -239,15 +249,15 @@ public class ViewFsBaseTest {
     
     
       
       
     // mkdir
     // mkdir
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
     Assert.assertTrue("New dir should be type dir", 
     Assert.assertTrue("New dir should be type dir", 
         isDir(fcView, new Path("/user/dirX")));
         isDir(fcView, new Path("/user/dirX")));
     Assert.assertTrue("Target of new dir should be of type dir",
     Assert.assertTrue("Target of new dir should be of type dir",
         isDir(fcTarget, new Path(targetTestRoot,"user/dirX")));
         isDir(fcTarget, new Path(targetTestRoot,"user/dirX")));
     
     
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
-        FileContext.DEFAULT_PERM, false);
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,
+        "/user/dirX/dirY"), FileContext.DEFAULT_PERM, false);
     Assert.assertTrue("New dir should be type dir", 
     Assert.assertTrue("New dir should be type dir", 
         isDir(fcView, new Path("/user/dirX/dirY")));
         isDir(fcView, new Path("/user/dirX/dirY")));
     Assert.assertTrue("Target of new dir should be of type dir",
     Assert.assertTrue("Target of new dir should be of type dir",
@@ -270,13 +280,13 @@ public class ViewFsBaseTest {
         exists(fcTarget, new Path(targetTestRoot,"user/dirX")));
         exists(fcTarget, new Path(targetTestRoot,"user/dirX")));
     
     
     // Rename a file 
     // Rename a file 
-    FileContextTestHelper.createFile(fcView, "/user/foo");
+    fileContextTestHelper.createFile(fcView, "/user/foo");
     fcView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
     fcView.rename(new Path("/user/foo"), new Path("/user/fooBar"));
     Assert.assertFalse("Renamed src should not exist", 
     Assert.assertFalse("Renamed src should not exist", 
         exists(fcView, new Path("/user/foo")));
         exists(fcView, new Path("/user/foo")));
     Assert.assertFalse(exists(fcTarget, new Path(targetTestRoot,"user/foo")));
     Assert.assertFalse(exists(fcTarget, new Path(targetTestRoot,"user/foo")));
     Assert.assertTrue(isFile(fcView,
     Assert.assertTrue(isFile(fcView,
-        FileContextTestHelper.getTestRootPath(fcView,"/user/fooBar")));
+        fileContextTestHelper.getTestRootPath(fcView,"/user/fooBar")));
     Assert.assertTrue(isFile(fcTarget, new Path(targetTestRoot,"user/fooBar")));
     Assert.assertTrue(isFile(fcTarget, new Path(targetTestRoot,"user/fooBar")));
     
     
     fcView.mkdir(new Path("/user/dirFoo"), FileContext.DEFAULT_PERM, false);
     fcView.mkdir(new Path("/user/dirFoo"), FileContext.DEFAULT_PERM, false);
@@ -287,7 +297,7 @@ public class ViewFsBaseTest {
         exists(fcTarget, new Path(targetTestRoot,"user/dirFoo")));
         exists(fcTarget, new Path(targetTestRoot,"user/dirFoo")));
     Assert.assertTrue("Renamed dest should  exist as dir",
     Assert.assertTrue("Renamed dest should  exist as dir",
         isDir(fcView,
         isDir(fcView,
-        FileContextTestHelper.getTestRootPath(fcView,"/user/dirFooBar")));
+        fileContextTestHelper.getTestRootPath(fcView,"/user/dirFooBar")));
     Assert.assertTrue("Renamed dest should  exist as dir in target",
     Assert.assertTrue("Renamed dest should  exist as dir in target",
         isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
         isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
     
     
@@ -309,13 +319,13 @@ public class ViewFsBaseTest {
   // rename across mount points that point to same target also fail 
   // rename across mount points that point to same target also fail 
   @Test(expected=IOException.class) 
   @Test(expected=IOException.class) 
   public void testRenameAcrossMounts1() throws IOException {
   public void testRenameAcrossMounts1() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/user/foo");
+    fileContextTestHelper.createFile(fcView, "/user/foo");
     fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
     fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
     /* - code if we had wanted this to succeed
     /* - code if we had wanted this to succeed
     Assert.assertFalse(exists(fc, new Path("/user/foo")));
     Assert.assertFalse(exists(fc, new Path("/user/foo")));
     Assert.assertFalse(exists(fclocal, new Path(targetTestRoot,"user/foo")));
     Assert.assertFalse(exists(fclocal, new Path(targetTestRoot,"user/foo")));
     Assert.assertTrue(isFile(fc,
     Assert.assertTrue(isFile(fc,
-       FileContextTestHelper.getTestRootPath(fc,"/user2/fooBarBar")));
+       fileContextTestHelper.getTestRootPath(fc,"/user2/fooBarBar")));
     Assert.assertTrue(isFile(fclocal,
     Assert.assertTrue(isFile(fclocal,
         new Path(targetTestRoot,"user/fooBarBar")));
         new Path(targetTestRoot,"user/fooBarBar")));
     */
     */
@@ -327,7 +337,7 @@ public class ViewFsBaseTest {
 
 
   @Test(expected=IOException.class) 
   @Test(expected=IOException.class) 
   public void testRenameAcrossMounts2() throws IOException {
   public void testRenameAcrossMounts2() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/user/foo");
+    fileContextTestHelper.createFile(fcView, "/user/foo");
     fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
     fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
   }
   }
   
   
@@ -339,7 +349,7 @@ public class ViewFsBaseTest {
   @Test
   @Test
   public void testGetBlockLocations() throws IOException {
   public void testGetBlockLocations() throws IOException {
     Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
     Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
-    FileContextTestHelper.createFile(fcTarget, targetFilePath, 10, 1024);
+    fileContextTestHelper.createFile(fcTarget, targetFilePath, 10, 1024);
     Path viewFilePath = new Path("/data/largeFile");
     Path viewFilePath = new Path("/data/largeFile");
     checkFileStatus(fcView, viewFilePath.toString(), fileType.isFile);
     checkFileStatus(fcView, viewFilePath.toString(), fileType.isFile);
     BlockLocation[] viewBL = fcView.getFileBlockLocations(viewFilePath,
     BlockLocation[] viewBL = fcView.getFileBlockLocations(viewFilePath,
@@ -382,19 +392,19 @@ public class ViewFsBaseTest {
     FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
     FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
     FileStatus fs;
     FileStatus fs;
     Assert.assertEquals(7, dirPaths.length);
     Assert.assertEquals(7, dirPaths.length);
-    fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcView, "/user", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileContextTestHelper.containsPath(fcView, "/data", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcView, "/data", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileContextTestHelper.containsPath(fcView, "/internalDir", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcView, "/internalDir", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("InternalDirs should appear as dir", fs.isDirectory());
       Assert.assertTrue("InternalDirs should appear as dir", fs.isDirectory());
-    fs = FileContextTestHelper.containsPath(fcView, "/danglingLink", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcView, "/danglingLink", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
-    fs = FileContextTestHelper.containsPath(fcView, "/linkToAFile", dirPaths);
+    fs = fileContextTestHelper.containsPath(fcView, "/linkToAFile", dirPaths);
       Assert.assertNotNull(fs);
       Assert.assertNotNull(fs);
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
       
       
@@ -404,12 +414,12 @@ public class ViewFsBaseTest {
       dirPaths = fcView.util().listStatus(new Path("/internalDir"));
       dirPaths = fcView.util().listStatus(new Path("/internalDir"));
       Assert.assertEquals(2, dirPaths.length);
       Assert.assertEquals(2, dirPaths.length);
 
 
-      fs = FileContextTestHelper.containsPath(fcView,
+      fs = fileContextTestHelper.containsPath(fcView,
           "/internalDir/internalDir2", dirPaths);
           "/internalDir/internalDir2", dirPaths);
         Assert.assertNotNull(fs);
         Assert.assertNotNull(fs);
         Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
         Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
-      fs = FileContextTestHelper.containsPath(fcView,
-          "/internalDir/linkToDir2", dirPaths);
+      fs = fileContextTestHelper.containsPath(fcView, "/internalDir/linkToDir2",
+        dirPaths);
         Assert.assertNotNull(fs);
         Assert.assertNotNull(fs);
         Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
         Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
   }
   }
@@ -534,19 +544,19 @@ public class ViewFsBaseTest {
   
   
   @Test
   @Test
   public void testResolvePathThroughMountPoints() throws IOException {
   public void testResolvePathThroughMountPoints() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/user/foo");
+    fileContextTestHelper.createFile(fcView, "/user/foo");
     Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
     Assert.assertEquals(new Path(targetTestRoot,"user/foo"),
                           fcView.resolvePath(new Path("/user/foo")));
                           fcView.resolvePath(new Path("/user/foo")));
     
     
     fcView.mkdir(
     fcView.mkdir(
-        FileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
+        fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),
         fcView.resolvePath(new Path("/user/dirX")));
         fcView.resolvePath(new Path("/user/dirX")));
 
 
     
     
     fcView.mkdir(
     fcView.mkdir(
-        FileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
+        fileContextTestHelper.getTestRootPath(fcView, "/user/dirX/dirY"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
     Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),
         fcView.resolvePath(new Path("/user/dirX/dirY")));
         fcView.resolvePath(new Path("/user/dirX/dirY")));
@@ -566,7 +576,7 @@ public class ViewFsBaseTest {
   @Test(expected=FileNotFoundException.class) 
   @Test(expected=FileNotFoundException.class) 
   public void testResolvePathMissingThroughMountPoints2() throws IOException {
   public void testResolvePathMissingThroughMountPoints2() throws IOException {
     fcView.mkdir(
     fcView.mkdir(
-        FileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
+        fileContextTestHelper.getTestRootPath(fcView, "/user/dirX"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
     fcView.resolvePath(new Path("/user/dirX/nonExisting"));
     fcView.resolvePath(new Path("/user/dirX/nonExisting"));
   }
   }
@@ -584,58 +594,58 @@ public class ViewFsBaseTest {
   // Mkdir on internal mount table should fail
   // Mkdir on internal mount table should fail
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirSlash() throws IOException {
   public void testInternalMkdirSlash() throws IOException {
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/"),
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirExisting1() throws IOException {
   public void testInternalMkdirExisting1() throws IOException {
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/internalDir"),
-        FileContext.DEFAULT_PERM, false);
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/internalDir"),
+      FileContext.DEFAULT_PERM, false);
   }
   }
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirExisting2() throws IOException {
   public void testInternalMkdirExisting2() throws IOException {
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView,
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,
         "/internalDir/linkToDir2"),
         "/internalDir/linkToDir2"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
   }
   }
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirNew() throws IOException {
   public void testInternalMkdirNew() throws IOException {
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/dirNew"),
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView, "/dirNew"),
         FileContext.DEFAULT_PERM, false);
         FileContext.DEFAULT_PERM, false);
   }
   }
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalMkdirNew2() throws IOException {
   public void testInternalMkdirNew2() throws IOException {
-    fcView.mkdir(FileContextTestHelper.getTestRootPath(fcView, "/internalDir/dirNew"),
-        FileContext.DEFAULT_PERM, false);
+    fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,
+        "/internalDir/dirNew"), FileContext.DEFAULT_PERM, false);
   }
   }
   
   
   // Create on internal mount table should fail
   // Create on internal mount table should fail
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreate1() throws IOException {
   public void testInternalCreate1() throws IOException {
-    FileContextTestHelper.createFileNonRecursive(fcView, "/foo"); // 1 component
+    fileContextTestHelper.createFileNonRecursive(fcView, "/foo"); // 1 component
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreate2() throws IOException {  // 2 component
   public void testInternalCreate2() throws IOException {  // 2 component
-    FileContextTestHelper.createFileNonRecursive(fcView, "/internalDir/foo");
+    fileContextTestHelper.createFileNonRecursive(fcView, "/internalDir/foo");
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir() throws IOException {
   public void testInternalCreateMissingDir() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/missingDir/foo");
+    fileContextTestHelper.createFile(fcView, "/missingDir/foo");
   }
   }
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir2() throws IOException {
   public void testInternalCreateMissingDir2() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/missingDir/miss2/foo");
+    fileContextTestHelper.createFile(fcView, "/missingDir/miss2/foo");
   }
   }
   
   
   
   
   @Test(expected=AccessControlException.class) 
   @Test(expected=AccessControlException.class) 
   public void testInternalCreateMissingDir3() throws IOException {
   public void testInternalCreateMissingDir3() throws IOException {
-    FileContextTestHelper.createFile(fcView, "/internalDir/miss2/foo");
+    fileContextTestHelper.createFile(fcView, "/internalDir/miss2/foo");
   }
   }
   
   
   // Delete on internal mount table should fail
   // Delete on internal mount table should fail

+ 5 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java

@@ -50,6 +50,8 @@ public class ViewFsTestSetup {
   
   
   static public String ViewFSTestDir = "/testDir";
   static public String ViewFSTestDir = "/testDir";
 
 
+  private static final FileContextTestHelper FILE_CONTEXT_TEST_HELPER =
+    new FileContextTestHelper();
 
 
    /* 
    /* 
    * return the ViewFS File context to be used for tests
    * return the ViewFS File context to be used for tests
@@ -59,7 +61,7 @@ public class ViewFsTestSetup {
      * create the test root on local_fs - the  mount table will point here
      * create the test root on local_fs - the  mount table will point here
      */
      */
     FileContext fsTarget = FileContext.getLocalFSFileContext();
     FileContext fsTarget = FileContext.getLocalFSFileContext();
-    Path targetOfTests = FileContextTestHelper.getTestRootPath(fsTarget);
+    Path targetOfTests = FILE_CONTEXT_TEST_HELPER.getTestRootPath(fsTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
     fsTarget.delete(targetOfTests, true);
     fsTarget.delete(targetOfTests, true);
     
     
@@ -67,7 +69,7 @@ public class ViewFsTestSetup {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
     // Set up viewfs link for test dir as described above
     // Set up viewfs link for test dir as described above
-    String testDir = FileContextTestHelper.getTestRootPath(fsTarget).toUri()
+    String testDir = FILE_CONTEXT_TEST_HELPER.getTestRootPath(fsTarget).toUri()
         .getPath();
         .getPath();
     linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
     linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
     
     
@@ -94,7 +96,7 @@ public class ViewFsTestSetup {
    */
    */
   static public void tearDownForViewFsLocalFs() throws Exception {
   static public void tearDownForViewFsLocalFs() throws Exception {
     FileContext fclocal = FileContext.getLocalFSFileContext();
     FileContext fclocal = FileContext.getLocalFSFileContext();
-    Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
+    Path targetOfTests = FILE_CONTEXT_TEST_HELPER.getTestRootPath(fclocal);
     fclocal.delete(targetOfTests, true);
     fclocal.delete(targetOfTests, true);
   }
   }
   
   

+ 18 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo;
+import org.apache.hadoop.test.GenericTestUtils;
 
 
 public class TestActiveStandbyElector {
 public class TestActiveStandbyElector {
 
 
@@ -56,7 +57,8 @@ public class TestActiveStandbyElector {
     private int sleptFor = 0;
     private int sleptFor = 0;
     
     
     ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
     ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
-        List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException {
+        List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException,
+        KeeperException {
       super(hostPort, timeout, parent, acl,
       super(hostPort, timeout, parent, acl,
           Collections.<ZKAuthInfo>emptyList(), app);
           Collections.<ZKAuthInfo>emptyList(), app);
     }
     }
@@ -83,7 +85,7 @@ public class TestActiveStandbyElector {
       ActiveStandbyElector.BREADCRUMB_FILENAME;
       ActiveStandbyElector.BREADCRUMB_FILENAME;
 
 
   @Before
   @Before
-  public void init() throws IOException {
+  public void init() throws IOException, KeeperException {
     count = 0;
     count = 0;
     mockZK = Mockito.mock(ZooKeeper.class);
     mockZK = Mockito.mock(ZooKeeper.class);
     mockApp = Mockito.mock(ActiveStandbyElectorCallback.class);
     mockApp = Mockito.mock(ActiveStandbyElectorCallback.class);
@@ -705,4 +707,18 @@ public class TestActiveStandbyElector {
         Mockito.eq(ZK_PARENT_NAME), Mockito.<byte[]>any(),
         Mockito.eq(ZK_PARENT_NAME), Mockito.<byte[]>any(),
         Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.eq(CreateMode.PERSISTENT));
         Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.eq(CreateMode.PERSISTENT));
   }
   }
+
+  /**
+   * verify the zookeeper connection establishment
+   */
+  @Test
+  public void testWithoutZKServer() throws Exception {
+    try {
+      new ActiveStandbyElector("127.0.0.1", 2000, ZK_PARENT_NAME,
+          Ids.OPEN_ACL_UNSAFE, Collections.<ZKAuthInfo> emptyList(), mockApp);
+      Assert.fail("Did not throw zookeeper connection loss exceptions!");
+    } catch (KeeperException ke) {
+      GenericTestUtils.assertExceptionContains( "ConnectionLoss", ke);
+    }
+  }
 }
 }

+ 74 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java

@@ -19,8 +19,13 @@
 package org.apache.hadoop.io;
 package org.apache.hadoop.io;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
+import java.io.IOException;
+import java.io.UTFDataFormatException;
 import java.util.Random;
 import java.util.Random;
 
 
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+
 /** Unit tests for UTF8. */
 /** Unit tests for UTF8. */
 @SuppressWarnings("deprecation")
 @SuppressWarnings("deprecation")
 public class TestUTF8 extends TestCase {
 public class TestUTF8 extends TestCase {
@@ -92,5 +97,73 @@ public class TestUTF8 extends TestCase {
 
 
     assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, "UTF-8"));
     assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, "UTF-8"));
   }
   }
-	
+
+  /**
+   * Test encoding and decoding of UTF8 outside the basic multilingual plane.
+   *
+   * This is a regression test for HADOOP-9103.
+   */
+  public void testNonBasicMultilingualPlane() throws Exception {
+    // Test using the "CAT FACE" character (U+1F431)
+    // See http://www.fileformat.info/info/unicode/char/1f431/index.htm
+    String catFace = "\uD83D\uDC31";
+
+    // This encodes to 4 bytes in UTF-8:
+    byte[] encoded = catFace.getBytes("UTF-8");
+    assertEquals(4, encoded.length);
+    assertEquals("f09f90b1", StringUtils.byteToHexString(encoded));
+
+    // Decode back to String using our own decoder
+    String roundTrip = UTF8.fromBytes(encoded);
+    assertEquals(catFace, roundTrip);
+  }
+
+  /**
+   * Test that decoding invalid UTF8 throws an appropriate error message.
+   */
+  public void testInvalidUTF8() throws Exception {
+    byte[] invalid = new byte[] {
+        0x01, 0x02, (byte)0xff, (byte)0xff, 0x01, 0x02, 0x03, 0x04, 0x05 };
+    try {
+      UTF8.fromBytes(invalid);
+      fail("did not throw an exception");
+    } catch (UTFDataFormatException utfde) {
+      GenericTestUtils.assertExceptionContains(
+          "Invalid UTF8 at ffff01020304", utfde);
+    }
+  }
+
+  /**
+   * Test for a 5-byte UTF8 sequence, which is now considered illegal.
+   */
+  public void test5ByteUtf8Sequence() throws Exception {
+    byte[] invalid = new byte[] {
+        0x01, 0x02, (byte)0xf8, (byte)0x88, (byte)0x80,
+        (byte)0x80, (byte)0x80, 0x04, 0x05 };
+    try {
+      UTF8.fromBytes(invalid);
+      fail("did not throw an exception");
+    } catch (UTFDataFormatException utfde) {
+      GenericTestUtils.assertExceptionContains(
+          "Invalid UTF8 at f88880808004", utfde);
+    }
+  }
+  
+  /**
+   * Test that decoding invalid UTF8 due to truncation yields the correct
+   * exception type.
+   */
+  public void testInvalidUTF8Truncated() throws Exception {
+    // Truncated CAT FACE character -- this is a 4-byte sequence, but we
+    // only have the first three bytes.
+    byte[] truncated = new byte[] {
+        (byte)0xF0, (byte)0x9F, (byte)0x90 };
+    try {
+      UTF8.fromBytes(truncated);
+      fail("did not throw an exception");
+    } catch (UTFDataFormatException utfde) {
+      GenericTestUtils.assertExceptionContains(
+          "Truncated UTF8 at f09f90", utfde);
+    }
+  }
 }
 }

+ 33 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -68,6 +68,7 @@ public class TestIPC {
    * of the various writables.
    * of the various writables.
    **/
    **/
   static boolean WRITABLE_FAULTS_ENABLED = true;
   static boolean WRITABLE_FAULTS_ENABLED = true;
+  static int WRITABLE_FAULTS_SLEEP = 0;
   
   
   static {
   static {
     Client.setPingInterval(conf, PING_INTERVAL);
     Client.setPingInterval(conf, PING_INTERVAL);
@@ -206,16 +207,27 @@ public class TestIPC {
   
   
   static void maybeThrowIOE() throws IOException {
   static void maybeThrowIOE() throws IOException {
     if (WRITABLE_FAULTS_ENABLED) {
     if (WRITABLE_FAULTS_ENABLED) {
+      maybeSleep();
       throw new IOException("Injected fault");
       throw new IOException("Injected fault");
     }
     }
   }
   }
 
 
   static void maybeThrowRTE() {
   static void maybeThrowRTE() {
     if (WRITABLE_FAULTS_ENABLED) {
     if (WRITABLE_FAULTS_ENABLED) {
+      maybeSleep();
       throw new RuntimeException("Injected fault");
       throw new RuntimeException("Injected fault");
     }
     }
   }
   }
 
 
+  private static void maybeSleep() {
+    if (WRITABLE_FAULTS_SLEEP > 0) {
+      try {
+        Thread.sleep(WRITABLE_FAULTS_SLEEP);
+      } catch (InterruptedException ie) {
+      }
+    }
+  }
+
   @SuppressWarnings("unused")
   @SuppressWarnings("unused")
   private static class IOEOnReadWritable extends LongWritable {
   private static class IOEOnReadWritable extends LongWritable {
     public IOEOnReadWritable() {}
     public IOEOnReadWritable() {}
@@ -370,6 +382,27 @@ public class TestIPC {
         RTEOnReadWritable.class);
         RTEOnReadWritable.class);
   }
   }
   
   
+  /**
+   * Test case that fails a write, but only after taking enough time
+   * that a ping should have been sent. This is a reproducer for a
+   * deadlock seen in one iteration of HADOOP-6762.
+   */
+  @Test
+  public void testIOEOnWriteAfterPingClient() throws Exception {
+    // start server
+    Client.setPingInterval(conf, 100);
+
+    try {
+      WRITABLE_FAULTS_SLEEP = 1000;
+      doErrorTest(IOEOnWriteWritable.class,
+          LongWritable.class,
+          LongWritable.class,
+          LongWritable.class);
+    } finally {
+      WRITABLE_FAULTS_SLEEP = 0;
+    }
+  }
+  
   private static void assertExceptionContains(
   private static void assertExceptionContains(
       Throwable t, String substring) {
       Throwable t, String substring) {
     String msg = StringUtils.stringifyException(t);
     String msg = StringUtils.stringifyException(t);

+ 94 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -38,6 +38,10 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
 
 
@@ -823,6 +827,96 @@ public class TestRPC {
     }
     }
   }
   }
   
   
+  @Test(timeout=90000)
+  public void testRPCInterruptedSimple() throws Exception {
+    final Configuration conf = new Configuration();
+    Server server = RPC.getServer(
+      TestProtocol.class, new TestImpl(), ADDRESS, 0, 5, true, conf, null
+    );
+    server.start();
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+    final TestProtocol proxy = (TestProtocol) RPC.getProxy(
+        TestProtocol.class, TestProtocol.versionID, addr, conf);
+    // Connect to the server
+    proxy.ping();
+    // Interrupt self, try another call
+    Thread.currentThread().interrupt();
+    try {
+      proxy.ping();
+      fail("Interruption did not cause IPC to fail");
+    } catch (IOException ioe) {
+      if (!ioe.toString().contains("InterruptedException")) {
+        throw ioe;
+      }
+      // clear interrupt status for future tests
+      Thread.interrupted();
+    }
+  }
+  
+  @Test(timeout=30000)
+  public void testRPCInterrupted() throws IOException, InterruptedException {
+    final Configuration conf = new Configuration();
+    Server server = RPC.getServer(
+      TestProtocol.class, new TestImpl(), ADDRESS, 0, 5, true, conf, null
+    );
+
+    server.start();
+
+    int numConcurrentRPC = 200;
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC);
+    final CountDownLatch latch = new CountDownLatch(numConcurrentRPC);
+    final AtomicBoolean leaderRunning = new AtomicBoolean(true);
+    final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
+    Thread leaderThread = null;
+    
+    for (int i = 0; i < numConcurrentRPC; i++) {
+      final int num = i;
+      final TestProtocol proxy = (TestProtocol) RPC.getProxy(
+      TestProtocol.class, TestProtocol.versionID, addr, conf);
+      Thread rpcThread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            barrier.await();
+            while (num == 0 || leaderRunning.get()) {
+              proxy.slowPing(false);
+            }
+
+            proxy.slowPing(false);
+          } catch (Exception e) {
+            if (num == 0) {
+              leaderRunning.set(false);
+            } else {
+              error.set(e);
+            }
+
+            LOG.error(e);
+          } finally {
+            latch.countDown();
+          }
+        }
+      });
+      rpcThread.start();
+
+      if (leaderThread == null) {
+       leaderThread = rpcThread;
+      }
+    }
+    // let threads get past the barrier
+    Thread.sleep(1000);
+    // stop a single thread
+    while (leaderRunning.get()) {
+      leaderThread.interrupt();
+    }
+    
+    latch.await();
+    
+    // should not cause any other thread to get an error
+    assertTrue("rpc got exception " + error.get(), error.get() == null);
+  }
+
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
     new TestRPC().testCallsInternal(conf);
     new TestRPC().testCallsInternal(conf);
 
 

+ 3 - 26
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java

@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
 import java.util.Set;
 import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -115,31 +113,23 @@ public class TestGangliaMetrics {
     final int expectedCountFromGanglia30 = expectedMetrics.length;
     final int expectedCountFromGanglia30 = expectedMetrics.length;
     final int expectedCountFromGanglia31 = 2 * expectedMetrics.length;
     final int expectedCountFromGanglia31 = 2 * expectedMetrics.length;
 
 
-    // use latch to make sure we received required records before shutting
-    // down the MetricSystem
-    CountDownLatch latch = new CountDownLatch(
-        expectedCountFromGanglia30 + expectedCountFromGanglia31);
-
     // Setup test for GangliaSink30
     // Setup test for GangliaSink30
     AbstractGangliaSink gsink30 = new GangliaSink30();
     AbstractGangliaSink gsink30 = new GangliaSink30();
     gsink30.init(cb.subset("test"));
     gsink30.init(cb.subset("test"));
-    MockDatagramSocket mockds30 = new MockDatagramSocket(latch);
+    MockDatagramSocket mockds30 = new MockDatagramSocket();
     GangliaMetricsTestHelper.setDatagramSocket(gsink30, mockds30);
     GangliaMetricsTestHelper.setDatagramSocket(gsink30, mockds30);
 
 
     // Setup test for GangliaSink31
     // Setup test for GangliaSink31
     AbstractGangliaSink gsink31 = new GangliaSink31();
     AbstractGangliaSink gsink31 = new GangliaSink31();
     gsink31.init(cb.subset("test"));
     gsink31.init(cb.subset("test"));
-    MockDatagramSocket mockds31 = new MockDatagramSocket(latch);
+    MockDatagramSocket mockds31 = new MockDatagramSocket();
     GangliaMetricsTestHelper.setDatagramSocket(gsink31, mockds31);
     GangliaMetricsTestHelper.setDatagramSocket(gsink31, mockds31);
 
 
     // register the sinks
     // register the sinks
     ms.register("gsink30", "gsink30 desc", gsink30);
     ms.register("gsink30", "gsink30 desc", gsink30);
     ms.register("gsink31", "gsink31 desc", gsink31);
     ms.register("gsink31", "gsink31 desc", gsink31);
-    ms.onTimerEvent();  // trigger something interesting
+    ms.publishMetricsNow(); // publish the metrics
 
 
-    // wait for all records and the stop MetricSystem.  Without this
-    // sometime the ms gets shutdown before all the sinks have consumed
-    latch.await(200, TimeUnit.MILLISECONDS);
     ms.stop();
     ms.stop();
 
 
     // check GanfliaSink30 data
     // check GanfliaSink30 data
@@ -198,7 +188,6 @@ public class TestGangliaMetrics {
    */
    */
   private class MockDatagramSocket extends DatagramSocket {
   private class MockDatagramSocket extends DatagramSocket {
     private ArrayList<byte[]> capture;
     private ArrayList<byte[]> capture;
-    private CountDownLatch latch;
 
 
     /**
     /**
      * @throws SocketException
      * @throws SocketException
@@ -207,15 +196,6 @@ public class TestGangliaMetrics {
       capture = new  ArrayList<byte[]>();
       capture = new  ArrayList<byte[]>();
     }
     }
 
 
-    /**
-     * @param latch
-     * @throws SocketException
-     */
-    public MockDatagramSocket(CountDownLatch latch) throws SocketException {
-      this();
-      this.latch = latch;
-    }
-
     /* (non-Javadoc)
     /* (non-Javadoc)
      * @see java.net.DatagramSocket#send(java.net.DatagramPacket)
      * @see java.net.DatagramSocket#send(java.net.DatagramPacket)
      */
      */
@@ -225,9 +205,6 @@ public class TestGangliaMetrics {
       byte[] bytes = new byte[p.getLength()];
       byte[] bytes = new byte[p.getLength()];
       System.arraycopy(p.getData(), p.getOffset(), bytes, 0, p.getLength());
       System.arraycopy(p.getData(), p.getOffset(), bytes, 0, p.getLength());
       capture.add(bytes);
       capture.add(bytes);
-
-      // decrement the latch
-      latch.countDown();
     }
     }
 
 
     /**
     /**

+ 185 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java

@@ -18,7 +18,11 @@
 
 
 package org.apache.hadoop.metrics2.impl;
 package org.apache.hadoop.metrics2.impl;
 
 
-import java.util.List;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+import javax.annotation.Nullable;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runner.RunWith;
@@ -26,9 +30,11 @@ import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Captor;
 import org.mockito.Captor;
 import org.mockito.runners.MockitoJUnitRunner;
 import org.mockito.runners.MockitoJUnitRunner;
+
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
 import static org.mockito.Mockito.*;
 
 
+import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Iterables;
 
 
 import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.commons.configuration.SubsetConfiguration;
@@ -36,6 +42,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsException;
 import static org.apache.hadoop.test.MoreAsserts.*;
 import static org.apache.hadoop.test.MoreAsserts.*;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSource;
@@ -47,6 +55,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.util.StringUtils;
 
 
 /**
 /**
  * Test the MetricsSystemImpl class
  * Test the MetricsSystemImpl class
@@ -72,7 +81,7 @@ public class TestMetricsSystemImpl {
   }
   }
 
 
   @Test public void testInitFirst() throws Exception {
   @Test public void testInitFirst() throws Exception {
-    ConfigBuilder cb = new ConfigBuilder().add("*.period", 8)
+    new ConfigBuilder().add("*.period", 8)
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
         //.add("test.sink.plugin.urls", getPluginUrlsAsString())
         .add("test.sink.test.class", TestSink.class.getName())
         .add("test.sink.test.class", TestSink.class.getName())
         .add("test.*.source.filter.exclude", "s0")
         .add("test.*.source.filter.exclude", "s0")
@@ -93,8 +102,9 @@ public class TestMetricsSystemImpl {
     MetricsSink sink2 = mock(MetricsSink.class);
     MetricsSink sink2 = mock(MetricsSink.class);
     ms.registerSink("sink1", "sink1 desc", sink1);
     ms.registerSink("sink1", "sink1 desc", sink1);
     ms.registerSink("sink2", "sink2 desc", sink2);
     ms.registerSink("sink2", "sink2 desc", sink2);
-    ms.onTimerEvent();  // trigger something interesting
+    ms.publishMetricsNow(); // publish the metrics
     ms.stop();
     ms.stop();
+    ms.shutdown();
 
 
     verify(sink1, times(2)).putMetrics(r1.capture());
     verify(sink1, times(2)).putMetrics(r1.capture());
     List<MetricsRecord> mr1 = r1.getAllValues();
     List<MetricsRecord> mr1 = r1.getAllValues();
@@ -104,6 +114,177 @@ public class TestMetricsSystemImpl {
     assertEquals("output", mr1, mr2);
     assertEquals("output", mr1, mr2);
   }
   }
 
 
+  @Test public void testMultiThreadedPublish() throws Exception {
+    new ConfigBuilder().add("*.period", 80)
+      .add("test.sink.Collector.queue.capacity", "20")
+      .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
+    final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
+    ms.start();
+    final int numThreads = 10;
+    final CollectingSink sink = new CollectingSink(numThreads);
+    ms.registerSink("Collector",
+        "Collector of values from all threads.", sink);
+    final TestSource[] sources = new TestSource[numThreads];
+    final Thread[] threads = new Thread[numThreads];
+    final String[] results = new String[numThreads];
+    final CyclicBarrier barrier1 = new CyclicBarrier(numThreads),
+        barrier2 = new CyclicBarrier(numThreads);
+    for (int i = 0; i < numThreads; i++) {
+      sources[i] = ms.register("threadSource" + i,
+          "A source of my threaded goodness.",
+          new TestSource("threadSourceRec" + i));
+      threads[i] = new Thread(new Runnable() {
+        private boolean safeAwait(int mySource, CyclicBarrier barrier) {
+          try {
+            barrier1.await(2, TimeUnit.SECONDS);
+          } catch (InterruptedException e) {
+            results[mySource] = "Interrupted";
+            return false;
+          } catch (BrokenBarrierException e) {
+            results[mySource] = "Broken Barrier";
+            return false;
+          } catch (TimeoutException e) {
+            results[mySource] = "Timed out on barrier";
+            return false;
+          }
+          return true;
+        }
+        
+        @Override
+        public void run() {
+          int mySource = Integer.parseInt(Thread.currentThread().getName());
+          if (sink.collected[mySource].get() != 0L) {
+            results[mySource] = "Someone else collected my metric!";
+            return;
+          }
+          // Wait for all the threads to come here so we can hammer
+          // the system at the same time
+          if (!safeAwait(mySource, barrier1)) return;
+          sources[mySource].g1.set(230);
+          ms.publishMetricsNow();
+          // Since some other thread may have snatched my metric,
+          // I need to wait for the threads to finish before checking.
+          if (!safeAwait(mySource, barrier2)) return;
+          if (sink.collected[mySource].get() != 230L) {
+            results[mySource] = "Metric not collected!";
+            return;
+          }
+          results[mySource] = "Passed";
+        }
+      }, "" + i);
+    }
+    for (Thread t : threads)
+      t.start();
+    for (Thread t : threads)
+      t.join();
+    assertEquals(0L, ms.droppedPubAll.value());
+    assertTrue(StringUtils.join("\n", Arrays.asList(results)),
+      Iterables.all(Arrays.asList(results), new Predicate<String>() {
+        @Override
+        public boolean apply(@Nullable String input) {
+          return input.equalsIgnoreCase("Passed");
+        }
+      }));
+    ms.stop();
+    ms.shutdown();
+  }
+
+  private static class CollectingSink implements MetricsSink {
+    private final AtomicLong[] collected;
+    
+    public CollectingSink(int capacity) {
+      collected = new AtomicLong[capacity];
+      for (int i = 0; i < capacity; i++) {
+        collected[i] = new AtomicLong();
+      }
+    }
+    
+    @Override
+    public void init(SubsetConfiguration conf) {
+    }
+
+    @Override
+    public void putMetrics(MetricsRecord record) {
+      final String prefix = "threadSourceRec";
+      if (record.name().startsWith(prefix)) {
+        final int recordNumber = Integer.parseInt(
+            record.name().substring(prefix.length()));
+        ArrayList<String> names = new ArrayList<String>();
+        for (AbstractMetric m : record.metrics()) {
+          if (m.name().equalsIgnoreCase("g1")) {
+            collected[recordNumber].set(m.value().longValue());
+            return;
+          }
+          names.add(m.name());
+        }
+      }
+    }
+
+    @Override
+    public void flush() {
+    }
+  }
+
+  @Test public void testHangingSink() {
+    new ConfigBuilder().add("*.period", 8)
+      .add("test.sink.test.class", TestSink.class.getName())
+      .add("test.sink.hanging.retry.delay", "1")
+      .add("test.sink.hanging.retry.backoff", "1.01")
+      .add("test.sink.hanging.retry.count", "0")
+      .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
+    MetricsSystemImpl ms = new MetricsSystemImpl("Test");
+    ms.start();
+    TestSource s = ms.register("s3", "s3 desc", new TestSource("s3rec"));
+    s.c1.incr();
+    HangingSink hanging = new HangingSink();
+    ms.registerSink("hanging", "Hang the sink!", hanging);
+    ms.publishMetricsNow();
+    assertEquals(1L, ms.droppedPubAll.value());
+    assertFalse(hanging.getInterrupted());
+    ms.stop();
+    ms.shutdown();
+    assertTrue(hanging.getInterrupted());
+    assertTrue("The sink didn't get called after its first hang " +
+               "for subsequent records.", hanging.getGotCalledSecondTime());
+  }
+
+  private static class HangingSink implements MetricsSink {
+    private volatile boolean interrupted;
+    private boolean gotCalledSecondTime;
+    private boolean firstTime = true;
+
+    public boolean getGotCalledSecondTime() {
+      return gotCalledSecondTime;
+    }
+
+    public boolean getInterrupted() {
+      return interrupted;
+    }
+
+    @Override
+    public void init(SubsetConfiguration conf) {
+    }
+
+    @Override
+    public void putMetrics(MetricsRecord record) {
+      // No need to hang every time, just the first record.
+      if (!firstTime) {
+        gotCalledSecondTime = true;
+        return;
+      }
+      firstTime = false;
+      try {
+        Thread.sleep(10 * 1000);
+      } catch (InterruptedException ex) {
+        interrupted = true;
+      }
+    }
+
+    @Override
+    public void flush() {
+    }
+  }
+
   @Test public void testRegisterDups() {
   @Test public void testRegisterDups() {
     MetricsSystem ms = new MetricsSystemImpl();
     MetricsSystem ms = new MetricsSystemImpl();
     TestSource ts1 = new TestSource("ts1");
     TestSource ts1 = new TestSource("ts1");
@@ -116,6 +297,7 @@ public class TestMetricsSystemImpl {
     MetricsSource s2 = ms.getSource("ts1");
     MetricsSource s2 = ms.getSource("ts1");
     assertNotNull(s2);
     assertNotNull(s2);
     assertNotSame(s1, s2);
     assertNotSame(s1, s2);
+    ms.shutdown();
   }
   }
 
 
   @Test(expected=MetricsException.class) public void testRegisterDupError() {
   @Test(expected=MetricsException.class) public void testRegisterDupError() {

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java

@@ -53,10 +53,9 @@ public class UserProvider extends AbstractHttpContextInjectable<Principal> imple
     public String parseParam(String str) {
     public String parseParam(String str) {
       if (str != null) {
       if (str != null) {
         int len = str.length();
         int len = str.length();
-        if (len < 1 || len > 31) {
+        if (len < 1) {
           throw new IllegalArgumentException(MessageFormat.format(
           throw new IllegalArgumentException(MessageFormat.format(
-            "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31",
-            getName(), str));
+            "Parameter [{0}], it's length must be at least 1", getName()));
         }
         }
       }
       }
       return super.parseParam(str);
       return super.parseParam(str);

+ 0 - 13
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java

@@ -108,13 +108,6 @@ public class TestUserProvider {
     userParam.parseParam("");
     userParam.parseParam("");
   }
   }
 
 
-  @Test
-  @TestException(exception = IllegalArgumentException.class)
-  public void userNameTooLong() {
-    UserProvider.UserParam userParam = new UserProvider.UserParam("username");
-    userParam.parseParam("a123456789012345678901234567890x");
-  }
-
   @Test
   @Test
   @TestException(exception = IllegalArgumentException.class)
   @TestException(exception = IllegalArgumentException.class)
   public void userNameInvalidStart() {
   public void userNameInvalidStart() {
@@ -135,12 +128,6 @@ public class TestUserProvider {
     assertNotNull(userParam.parseParam("a"));
     assertNotNull(userParam.parseParam("a"));
   }
   }
 
 
-  @Test
-  public void userNameMaxLength() {
-    UserProvider.UserParam userParam = new UserProvider.UserParam("username");
-    assertNotNull(userParam.parseParam("a123456789012345678901234567890"));
-  }
-
   @Test
   @Test
   public void userNameValidDollarSign() {
   public void userNameValidDollarSign() {
     UserProvider.UserParam userParam = new UserProvider.UserParam("username");
     UserProvider.UserParam userParam = new UserProvider.UserParam("username");

+ 175 - 111
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -92,17 +92,12 @@ Trunk (Unreleased)
     HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak
     HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak
     via atm)
     via atm)
 
 
-    HDFS-3049. During the normal NN startup process, fall back on a different
-    edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
-
     HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli)
     HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli)
 
 
     HDFS-3498. Support replica removal in BlockPlacementPolicy and make
     HDFS-3498. Support replica removal in BlockPlacementPolicy and make
     BlockPlacementPolicyDefault extensible for reusing code in subclasses.
     BlockPlacementPolicyDefault extensible for reusing code in subclasses.
     (Junping Du via szetszwo)
     (Junping Du via szetszwo)
 
 
-    HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd)
-
     HDFS-3510.  Editlog pre-allocation is performed prior to writing edits
     HDFS-3510.  Editlog pre-allocation is performed prior to writing edits
     to avoid partial edits case disk out of space.(Colin McCabe via suresh)
     to avoid partial edits case disk out of space.(Colin McCabe via suresh)
 
 
@@ -146,8 +141,6 @@ Trunk (Unreleased)
     HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
     HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
     (Jing Zhao via suresh)
     (Jing Zhao via suresh)
 
 
-    HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
-
     HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable 
     HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable 
     returning more than INode array. (Jing Zhao via suresh)
     returning more than INode array. (Jing Zhao via suresh)
 
 
@@ -160,10 +153,6 @@ Trunk (Unreleased)
     HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
     HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
     INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
     INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
 
 
-    HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
-
-    HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
-
     HDFS-4206. Change the fields in INode and its subclasses to private.
     HDFS-4206. Change the fields in INode and its subclasses to private.
     (szetszwo)
     (szetszwo)
 
 
@@ -176,6 +165,11 @@ Trunk (Unreleased)
     HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in
     HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in
     FSDirectory and INodeDirectory. (szetszwo)
     FSDirectory and INodeDirectory. (szetszwo)
 
 
+    HDFS-3358. Specify explicitly that the NN UI status total is talking
+    of persistent objects on heap. (harsh)
+
+    HDFS-4234. Use generic code for choosing datanode in Balancer.  (szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -268,107 +262,12 @@ Trunk (Unreleased)
     HDFS-4105. The SPNEGO user for secondary namenode should use the web 
     HDFS-4105. The SPNEGO user for secondary namenode should use the web 
     keytab. (Arpit Gupta via jitendra)
     keytab. (Arpit Gupta via jitendra)
 
 
-  BREAKDOWN OF HDFS-3077 SUBTASKS
+    HDFS-4240. For nodegroup-aware block placement, when a node is excluded,
+    the nodes in the same nodegroup should also be excluded.  (Junping Du
+    via szetszwo)
 
 
-    HDFS-3077. Quorum-based protocol for reading and writing edit logs.
-    (todd, Brandon Li, and Hari Mankude via todd)
-    
-    HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd)
-    
-    HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs
-    (todd)
-    
-    HDFS-3693. JNStorage should read its storage info even before a writer
-    becomes active (todd)
-    
-    HDFS-3725. Fix QJM startup when individual JNs have gaps (todd)
-    
-    HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd)
-    
-    HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm)
-    
-    HDFS-3793. Implement genericized format() in QJM (todd)
-    
-    HDFS-3795. QJM: validate journal dir at startup (todd)
-    
-    HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid
-    segment (todd)
-    
-    HDFS-3799. QJM: handle empty log segments during recovery (todd)
-    
-    HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd)
-    
-    HDFS-3800. improvements to QJM fault testing (todd)
-    
-    HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed
-    setting of HTTP port. (todd and atm)
-    
-    HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd
-    and atm)
-    
-    HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode"
-    (eli)
-    
-    HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
-    
-    HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
-    
-    HDFS-3863. Track last "committed" txid in QJM (todd)
-    
-    HDFS-3869. Expose non-file journal manager details in web UI (todd)
-    
-    HDFS-3884. Journal format() should reset cached values (todd)
-    
-    HDFS-3870. Add metrics to JournalNode (todd)
-    
-    HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd)
-    
-    HDFS-3726. If a logger misses an RPC, don't retry that logger until next
-    segment (todd)
-    
-    HDFS-3893. QJM: Make QJM work with security enabled. (atm)
-    
-    HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm)
-    
-    HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd)
-    
-    HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd)
-    
-    HDFS-3900. QJM: avoid validating log segments on log rolls (todd)
-    
-    HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are
-    out-of-sync (todd)
-    
-    HDFS-3899. QJM: Add client-side metrics (todd)
-    
-    HDFS-3914. QJM: acceptRecovery should abort current segment (todd)
-    
-    HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd)
-    
-    HDFS-3906. QJM: quorum timeout on failover with large log segment (todd)
-    
-    HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error
-    before being formatted (todd)
-    
-    HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC
-    client caching (todd)
-    
-    HDFS-3926. QJM: Add user documentation for QJM. (atm)
-    
-    HDFS-3943. QJM: remove currently-unused md5sum field (todd)
-    
-    HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd)
-    
-    HDFS-3955. QJM: Make acceptRecovery() atomic. (todd)
-    
-    HDFS-3956. QJM: purge temporary files when no longer within retention
-    period (todd)
-    
-    HDFS-4004. TestJournalNode#testJournal fails because of test case execution
-    order (Chao Shi via todd)
-    
-    HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
-    (Chao Shi via todd)
+    HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed
+    to the local build path (Chri Nauroth via Sanjay)
 
 
 Release 2.0.3-alpha - Unreleased 
 Release 2.0.3-alpha - Unreleased 
 
 
@@ -389,6 +288,9 @@ Release 2.0.3-alpha - Unreleased
 
 
     HDFS-4155. libhdfs implementation of hsync API (Liang Xie via todd)
     HDFS-4155. libhdfs implementation of hsync API (Liang Xie via todd)
 
 
+    HDFS-4213. Add an API to hsync for updating the last block length at the
+    namenode. (Jing Zhao via szetszwo)
+
   IMPROVEMENTS
   IMPROVEMENTS
   
   
     HDFS-3925. Prettify PipelineAck#toString() for printing to a log
     HDFS-3925. Prettify PipelineAck#toString() for printing to a log
@@ -490,6 +392,27 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4214. OfflineEditsViewer should print out the offset at which it
     HDFS-4214. OfflineEditsViewer should print out the offset at which it
     encountered an error. (Colin Patrick McCabe via atm)
     encountered an error. (Colin Patrick McCabe via atm)
 
 
+    HDFS-4199. Provide test for HdfsVolumeId. (Ivan A. Veselovsky via atm)
+
+    HDFS-3049. During the normal NN startup process, fall back on a different
+    edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
+
+    HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd)
+
+    HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
+
+    HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
+
+    HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd)
+
+    HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. (shv)
+
+    HDFS-3680. Allow customized audit logging in HDFS FSNamesystem. (Marcelo
+    Vanzin via atm)
+
+    HDFS-4130. BKJM: The reading for editlog at NN starting using bkjm is not efficient.
+    (Han Xiao via umamahesh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -643,6 +566,142 @@ Release 2.0.3-alpha - Unreleased
     of it is undefined after the iteration or modifications of the map.
     of it is undefined after the iteration or modifications of the map.
     (szetszwo)
     (szetszwo)
 
 
+    HDFS-4231. BackupNode: Introduce BackupState. (shv)
+
+    HDFS-4243. When replacing an INodeDirectory, the parent pointers of the
+    children of the child have to be updated to the new child.  (Jing Zhao
+    via szetszwo)
+
+    HDFS-4238. Standby namenode should not do purging of shared
+    storage edits. (todd)
+
+    HDFS-4282. TestEditLog.testFuzzSequences FAILED in all pre-commit test
+    (todd)
+
+    HDFS-4236. Remove artificial limit on username length introduced in
+    HDFS-4171. (tucu via suresh)
+
+    HDFS-4279. NameNode does not initialize generic conf keys when started
+    with -recover. (Colin Patrick McCabe via atm)
+
+    HDFS-4291. edit log unit tests leave stray test_edit_log_file around
+    (Colin Patrick McCabe via todd)
+
+    HDFS-4292. Sanity check not correct in RemoteBlockReader2.newBlockReader
+    (Binglin Chang via todd)
+
+    HDFS-4295. Using port 1023 should be valid when starting Secure DataNode
+    (Stephen Chu via todd)
+
+    HDFS-4294. Backwards compatibility is not maintained for TestVolumeId.
+    (Ivan A. Veselovsky and Robert Parker via atm)
+
+    HDFS-2264. NamenodeProtocol has the wrong value for clientPrincipal in
+    KerberosInfo annotation. (atm)
+
+  BREAKDOWN OF HDFS-3077 SUBTASKS
+
+    HDFS-3077. Quorum-based protocol for reading and writing edit logs.
+    (todd, Brandon Li, and Hari Mankude via todd)
+    
+    HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd)
+    
+    HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs
+    (todd)
+    
+    HDFS-3693. JNStorage should read its storage info even before a writer
+    becomes active (todd)
+    
+    HDFS-3725. Fix QJM startup when individual JNs have gaps (todd)
+    
+    HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd)
+    
+    HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm)
+    
+    HDFS-3793. Implement genericized format() in QJM (todd)
+    
+    HDFS-3795. QJM: validate journal dir at startup (todd)
+    
+    HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid
+    segment (todd)
+    
+    HDFS-3799. QJM: handle empty log segments during recovery (todd)
+    
+    HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd)
+    
+    HDFS-3800. improvements to QJM fault testing (todd)
+    
+    HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed
+    setting of HTTP port. (todd and atm)
+    
+    HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd
+    and atm)
+    
+    HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode"
+    (eli)
+    
+    HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
+    
+    HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
+    
+    HDFS-3863. Track last "committed" txid in QJM (todd)
+    
+    HDFS-3869. Expose non-file journal manager details in web UI (todd)
+    
+    HDFS-3884. Journal format() should reset cached values (todd)
+    
+    HDFS-3870. Add metrics to JournalNode (todd)
+    
+    HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd)
+    
+    HDFS-3726. If a logger misses an RPC, don't retry that logger until next
+    segment (todd)
+    
+    HDFS-3893. QJM: Make QJM work with security enabled. (atm)
+    
+    HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm)
+    
+    HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd)
+    
+    HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd)
+    
+    HDFS-3900. QJM: avoid validating log segments on log rolls (todd)
+    
+    HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are
+    out-of-sync (todd)
+    
+    HDFS-3899. QJM: Add client-side metrics (todd)
+    
+    HDFS-3914. QJM: acceptRecovery should abort current segment (todd)
+    
+    HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd)
+    
+    HDFS-3906. QJM: quorum timeout on failover with large log segment (todd)
+    
+    HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error
+    before being formatted (todd)
+    
+    HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC
+    client caching (todd)
+    
+    HDFS-3926. QJM: Add user documentation for QJM. (atm)
+    
+    HDFS-3943. QJM: remove currently-unused md5sum field (todd)
+    
+    HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd)
+    
+    HDFS-3955. QJM: Make acceptRecovery() atomic. (todd)
+    
+    HDFS-3956. QJM: purge temporary files when no longer within retention
+    period (todd)
+    
+    HDFS-4004. TestJournalNode#testJournal fails because of test case execution
+    order (Chao Shi via todd)
+    
+    HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
+    (Chao Shi via todd)
+
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2032,6 +2091,11 @@ Release 0.23.6 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HDFS-4247. saveNamespace should be tolerant of dangling lease (daryn)
+
+    HDFS-4248. Renaming directories may incorrectly remove the paths in leases
+    under the tree.  (daryn via szetszwo)
+
 Release 0.23.5 - UNRELEASED
 Release 0.23.5 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 26 - 44
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java

@@ -500,16 +500,18 @@ public class BookKeeperJournalManager implements JournalManager {
     } 
     } 
   }
   }
 
 
-  EditLogInputStream getInputStream(long fromTxId, boolean inProgressOk)
-      throws IOException {
-    for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
-      long lastTxId = l.getLastTxId();
-      if (l.isInProgress()) {
-        lastTxId = recoverLastTxId(l, false);
-      }
-
-      if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
-        try {
+  @Override
+  public void selectInputStreams(Collection<EditLogInputStream> streams,
+      long fromTxId, boolean inProgressOk) throws IOException {
+    List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk);
+    try {
+      BookKeeperEditLogInputStream elis = null;
+      for (EditLogLedgerMetadata l : currentLedgerList) {
+        long lastTxId = l.getLastTxId();
+        if (l.isInProgress()) {
+          lastTxId = recoverLastTxId(l, false);
+        }
+        if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
           LedgerHandle h;
           LedgerHandle h;
           if (l.isInProgress()) { // we don't want to fence the current journal
           if (l.isInProgress()) { // we don't want to fence the current journal
             h = bkc.openLedgerNoRecovery(l.getLedgerId(),
             h = bkc.openLedgerNoRecovery(l.getLedgerId(),
@@ -518,42 +520,22 @@ public class BookKeeperJournalManager implements JournalManager {
             h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
             h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
                 digestpw.getBytes());
                 digestpw.getBytes());
           }
           }
-          BookKeeperEditLogInputStream s = new BookKeeperEditLogInputStream(h,
-              l);
-          s.skipTo(fromTxId);
-          return s;
-        } catch (BKException e) {
-          throw new IOException("Could not open ledger for " + fromTxId, e);
-        } catch (InterruptedException ie) {
-          Thread.currentThread().interrupt();
-          throw new IOException("Interrupted opening ledger for "
-                                         + fromTxId, ie);
+          elis = new BookKeeperEditLogInputStream(h, l);
+          elis.skipTo(fromTxId);
+        } else {
+          return;
         }
         }
+        streams.add(elis);
+        if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
+          return;
+        }
+        fromTxId = elis.getLastTxId() + 1;
       }
       }
-    }
-    return null;
-  }
-
-  @Override
-  public void selectInputStreams(Collection<EditLogInputStream> streams,
-      long fromTxId, boolean inProgressOk) {
-    // NOTE: could probably be rewritten more efficiently
-    while (true) {
-      EditLogInputStream elis;
-      try {
-        elis = getInputStream(fromTxId, inProgressOk);
-      } catch (IOException e) {
-        LOG.error(e);
-        return;
-      }
-      if (elis == null) {
-        return;
-      }
-      streams.add(elis);
-      if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
-        return;
-      }
-      fromTxId = elis.getLastTxId() + 1;
+    } catch (BKException e) {
+      throw new IOException("Could not open ledger for " + fromTxId, e);
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted opening ledger for " + fromTxId, ie);
     }
     }
   }
   }
 
 

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java

@@ -28,6 +28,7 @@ import org.mockito.Mockito;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Random;
 import java.util.Random;
@@ -315,13 +316,13 @@ public class TestBookKeeperJournalManager {
     out.close();
     out.close();
     bkjm.finalizeLogSegment(1, numTransactions);
     bkjm.finalizeLogSegment(1, numTransactions);
 
 
-     
-    EditLogInputStream in = bkjm.getInputStream(1, true);
+    List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
+    bkjm.selectInputStreams(in, 1, true);
     try {
     try {
       assertEquals(numTransactions, 
       assertEquals(numTransactions, 
-                   FSEditLogTestUtil.countTransactionsInStream(in));
+                   FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
     } finally {
     } finally {
-      in.close();
+      in.get(0).close();
     }
     }
   }
   }
 
 

+ 21 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java

@@ -27,26 +27,38 @@ import org.apache.hadoop.classification.InterfaceStability;
  * HDFS-specific volume identifier which implements {@link VolumeId}. Can be
  * HDFS-specific volume identifier which implements {@link VolumeId}. Can be
  * used to differentiate between the data directories on a single datanode. This
  * used to differentiate between the data directories on a single datanode. This
  * identifier is only unique on a per-datanode basis.
  * identifier is only unique on a per-datanode basis.
+ * 
+ * Note that invalid IDs are represented by {@link VolumeId#INVALID_VOLUME_ID}.
  */
  */
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 public class HdfsVolumeId implements VolumeId {
 public class HdfsVolumeId implements VolumeId {
-
+  
   private final byte[] id;
   private final byte[] id;
-  private final boolean isValid;
 
 
-  public HdfsVolumeId(byte[] id, boolean isValid) {
+  public HdfsVolumeId(byte[] id) {
+    if (id == null) {
+      throw new NullPointerException("A valid Id can only be constructed " +
+      		"with a non-null byte array.");
+    }
     this.id = id;
     this.id = id;
-    this.isValid = isValid;
   }
   }
 
 
   @Override
   @Override
-  public boolean isValid() {
-    return isValid;
+  public final boolean isValid() {
+    return true;
   }
   }
 
 
   @Override
   @Override
   public int compareTo(VolumeId arg0) {
   public int compareTo(VolumeId arg0) {
+    if (arg0 == null) {
+      return 1;
+    }
+    if (!arg0.isValid()) {
+      // any valid ID is greater 
+      // than any invalid ID: 
+      return 1;
+    }
     return hashCode() - arg0.hashCode();
     return hashCode() - arg0.hashCode();
   }
   }
 
 
@@ -63,8 +75,10 @@ public class HdfsVolumeId implements VolumeId {
     if (obj == this) {
     if (obj == this) {
       return true;
       return true;
     }
     }
-
     HdfsVolumeId that = (HdfsVolumeId) obj;
     HdfsVolumeId that = (HdfsVolumeId) obj;
+    // NB: if (!obj.isValid()) { return false; } check is not necessary
+    // because we have class identity checking above, and for this class
+    // isValid() is always true.
     return new EqualsBuilder().append(this.id, that.id).isEquals();
     return new EqualsBuilder().append(this.id, that.id).isEquals();
   }
   }
 
 

+ 42 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java

@@ -28,6 +28,48 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 public interface VolumeId extends Comparable<VolumeId> {
 public interface VolumeId extends Comparable<VolumeId> {
 
 
+  /**
+   * Represents an invalid Volume ID (ID for unknown content).
+   */
+  public static final VolumeId INVALID_VOLUME_ID = new VolumeId() {
+    
+    @Override
+    public int compareTo(VolumeId arg0) {
+      // This object is equal only to itself;
+      // It is greater than null, and
+      // is always less than any other VolumeId:
+      if (arg0 == null) {
+        return 1;
+      }
+      if (arg0 == this) {
+        return 0;
+      } else {
+        return -1;
+      }
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+      // this object is equal only to itself:
+      return (obj == this);
+    }
+    
+    @Override
+    public int hashCode() {
+      return Integer.MIN_VALUE;
+    }
+    
+    @Override
+    public boolean isValid() {
+      return false;
+    }
+    
+    @Override
+    public String toString() {
+      return "Invalid VolumeId";
+    }
+  };
+  
   /**
   /**
    * Indicates if the disk identifier is valid. Invalid identifiers indicate
    * Indicates if the disk identifier is valid. Invalid identifiers indicate
    * that the block was not present, or the location could otherwise not be
    * that the block was not present, or the location could otherwise not be

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java

@@ -202,7 +202,7 @@ class BlockStorageLocationUtil {
       ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
       ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
       // Start off all IDs as invalid, fill it in later with results from RPCs
       // Start off all IDs as invalid, fill it in later with results from RPCs
       for (int i = 0; i < b.getLocations().length; i++) {
       for (int i = 0; i < b.getLocations().length; i++) {
-        l.add(new HdfsVolumeId(null, false));
+        l.add(VolumeId.INVALID_VOLUME_ID);
       }
       }
       blockVolumeIds.put(b, l);
       blockVolumeIds.put(b, l);
     }
     }
@@ -236,7 +236,7 @@ class BlockStorageLocationUtil {
         // Get the VolumeId by indexing into the list of VolumeIds
         // Get the VolumeId by indexing into the list of VolumeIds
         // provided by the datanode
         // provided by the datanode
         byte[] volumeId = metaVolumeIds.get(volumeIndex);
         byte[] volumeId = metaVolumeIds.get(volumeIndex);
-        HdfsVolumeId id = new HdfsVolumeId(volumeId, true);
+        HdfsVolumeId id = new HdfsVolumeId(volumeId);
         // Find out which index we are in the LocatedBlock's replicas
         // Find out which index we are in the LocatedBlock's replicas
         LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
         LocatedBlock locBlock = extBlockToLocBlock.get(extBlock);
         DatanodeInfo[] dnInfos = locBlock.getLocations();
         DatanodeInfo[] dnInfos = locBlock.getLocations();

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -246,6 +246,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_HOSTS = "dfs.hosts";
   public static final String  DFS_HOSTS = "dfs.hosts";
   public static final String  DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
   public static final String  DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
   public static final String  DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
   public static final String  DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
+  public static final String  DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
+  public static final String  DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default";
 
 
   // Much code in hdfs is not yet updated to use these keys.
   // Much code in hdfs is not yet updated to use these keys.
   public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
   public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";

+ 42 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -1487,9 +1488,14 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
    */
    */
   @Override
   @Override
   public void hflush() throws IOException {
   public void hflush() throws IOException {
-    flushOrSync(false);
+    flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
   }
   }
 
 
+  @Override
+  public void hsync() throws IOException {
+    hsync(EnumSet.noneOf(SyncFlag.class));
+  }
+  
   /**
   /**
    * The expected semantics is all data have flushed out to all replicas 
    * The expected semantics is all data have flushed out to all replicas 
    * and all replicas have done posix fsync equivalent - ie the OS has 
    * and all replicas have done posix fsync equivalent - ie the OS has 
@@ -1498,17 +1504,35 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
    * Note that only the current block is flushed to the disk device.
    * Note that only the current block is flushed to the disk device.
    * To guarantee durable sync across block boundaries the stream should
    * To guarantee durable sync across block boundaries the stream should
    * be created with {@link CreateFlag#SYNC_BLOCK}.
    * be created with {@link CreateFlag#SYNC_BLOCK}.
+   * 
+   * @param syncFlags
+   *          Indicate the semantic of the sync. Currently used to specify
+   *          whether or not to update the block length in NameNode.
    */
    */
-  @Override
-  public void hsync() throws IOException {
-    flushOrSync(true);
+  public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
+    flushOrSync(true, syncFlags);
   }
   }
 
 
-  private void flushOrSync(boolean isSync) throws IOException {
+  /**
+   * Flush/Sync buffered data to DataNodes.
+   * 
+   * @param isSync
+   *          Whether or not to require all replicas to flush data to the disk
+   *          device
+   * @param syncFlags
+   *          Indicate extra detailed semantic of the flush/sync. Currently
+   *          mainly used to specify whether or not to update the file length in
+   *          the NameNode
+   * @throws IOException
+   */
+  private void flushOrSync(boolean isSync, EnumSet<SyncFlag> syncFlags)
+      throws IOException {
     dfsClient.checkOpen();
     dfsClient.checkOpen();
     isClosed();
     isClosed();
     try {
     try {
       long toWaitFor;
       long toWaitFor;
+      long lastBlockLength = -1L;
+      boolean updateLength = syncFlags.contains(SyncFlag.UPDATE_LENGTH);
       synchronized (this) {
       synchronized (this) {
         /* Record current blockOffset. This might be changed inside
         /* Record current blockOffset. This might be changed inside
          * flushBuffer() where a partial checksum chunk might be flushed.
          * flushBuffer() where a partial checksum chunk might be flushed.
@@ -1572,13 +1596,20 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
       } // end synchronized
       } // end synchronized
 
 
       waitForAckedSeqno(toWaitFor);
       waitForAckedSeqno(toWaitFor);
-
-      // If any new blocks were allocated since the last flush, 
-      // then persist block locations on namenode. 
-      //
-      if (persistBlocks.getAndSet(false)) {
+      
+      if (updateLength) {
+        synchronized (this) {
+          if (streamer != null && streamer.block != null) {
+            lastBlockLength = streamer.block.getNumBytes();
+          }
+        }
+      }
+      // If 1) any new blocks were allocated since the last flush, or 2) to
+      // update length in NN is requried, then persist block locations on
+      // namenode.
+      if (persistBlocks.getAndSet(false) || updateLength) {
         try {
         try {
-          dfsClient.namenode.fsync(src, dfsClient.clientName);
+          dfsClient.namenode.fsync(src, dfsClient.clientName, lastBlockLength);
         } catch (IOException ioe) {
         } catch (IOException ioe) {
           DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
           DFSClient.LOG.warn("Unable to persist blocks in hflush for " + src, ioe);
           // If we got an error here, it might be because some other thread called
           // If we got an error here, it might be because some other thread called

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java

@@ -404,7 +404,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
     long firstChunkOffset = checksumInfo.getChunkOffset();
     long firstChunkOffset = checksumInfo.getChunkOffset();
     
     
     if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
     if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
-        firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
       throw new IOException("BlockReader: error in first chunk offset (" +
       throw new IOException("BlockReader: error in first chunk offset (" +
                             firstChunkOffset + ") startOffset is " + 
                             firstChunkOffset + ") startOffset is " + 
                             startOffset + " for file " + file);
                             startOffset + " for file " + file);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -413,7 +413,7 @@ public class RemoteBlockReader2  implements BlockReader {
     long firstChunkOffset = checksumInfo.getChunkOffset();
     long firstChunkOffset = checksumInfo.getChunkOffset();
 
 
     if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
     if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
-        firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+        firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
       throw new IOException("BlockReader: error in first chunk offset (" +
       throw new IOException("BlockReader: error in first chunk offset (" +
                             firstChunkOffset + ") startOffset is " +
                             firstChunkOffset + ") startOffset is " +
                             startOffset + " for file " + file);
                             startOffset + " for file " + file);

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.client;
 package org.apache.hadoop.hdfs.client;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.EnumSet;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -56,4 +57,24 @@ public class HdfsDataOutputStream extends FSDataOutputStream {
   public synchronized int getCurrentBlockReplication() throws IOException {
   public synchronized int getCurrentBlockReplication() throws IOException {
     return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
     return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
   }
   }
+  
+  /**
+   * Sync buffered data to DataNodes (flush to disk devices).
+   * 
+   * @param syncFlags
+   *          Indicate the detailed semantic and actions of the hsync.
+   * @throws IOException
+   * @see FSDataOutputStream#hsync()
+   */
+  public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
+    ((DFSOutputStream) getWrappedStream()).hsync(syncFlags);
+  }
+  
+  public static enum SyncFlag {
+    /**
+     * When doing sync to DataNodes, also update the metadata (block
+     * length) in the NameNode
+     */
+    UPDATE_LENGTH;
+  }
 }
 }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -815,14 +815,15 @@ public interface ClientProtocol {
    * The file must be currently open for writing.
    * The file must be currently open for writing.
    * @param src The string representation of the path
    * @param src The string representation of the path
    * @param client The string representation of the client
    * @param client The string representation of the client
-   * 
+   * @param lastBlockLength The length of the last block (under construction) 
+   *                        to be reported to NameNode 
    * @throws AccessControlException permission denied
    * @throws AccessControlException permission denied
    * @throws FileNotFoundException file <code>src</code> is not found
    * @throws FileNotFoundException file <code>src</code> is not found
    * @throws UnresolvedLinkException if <code>src</code> contains a symlink. 
    * @throws UnresolvedLinkException if <code>src</code> contains a symlink. 
    * @throws IOException If an I/O error occurred
    * @throws IOException If an I/O error occurred
    */
    */
   @Idempotent
   @Idempotent
-  public void fsync(String src, String client) 
+  public void fsync(String src, String client, long lastBlockLength) 
       throws AccessControlException, FileNotFoundException, 
       throws AccessControlException, FileNotFoundException, 
       UnresolvedLinkException, IOException;
       UnresolvedLinkException, IOException;
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -688,7 +688,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public FsyncResponseProto fsync(RpcController controller,
   public FsyncResponseProto fsync(RpcController controller,
       FsyncRequestProto req) throws ServiceException {
       FsyncRequestProto req) throws ServiceException {
     try {
     try {
-      server.fsync(req.getSrc(), req.getClient());
+      server.fsync(req.getSrc(), req.getClient(), req.getLastBlockLength());
       return VOID_FSYNC_RESPONSE;
       return VOID_FSYNC_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -659,12 +659,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public void fsync(String src, String client) throws AccessControlException,
-      FileNotFoundException, UnresolvedLinkException, IOException {
-    FsyncRequestProto req = FsyncRequestProto.newBuilder()
-        .setSrc(src)
-        .setClient(client)
-        .build();
+  public void fsync(String src, String client, long lastBlockLength)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    FsyncRequestProto req = FsyncRequestProto.newBuilder().setSrc(src)
+        .setClient(client).setLastBlockLength(lastBlockLength).build();
     try {
     try {
       rpcProxy.fsync(null, req);
       rpcProxy.fsync(null, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -1232,9 +1233,9 @@ public class PBHelper {
     if (s == null) return null;
     if (s == null) return null;
     switch (s.getState()) {
     switch (s.getState()) {
     case ACTIVE:
     case ACTIVE:
-      return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid());
+      return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
     case STANDBY:
     case STANDBY:
-      return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid());
+      return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
     default:
     default:
       throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
       throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
     }
     }

+ 75 - 192
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocat
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -557,7 +558,7 @@ public class Balancer {
     }
     }
     
     
     /** Decide if still need to move more bytes */
     /** Decide if still need to move more bytes */
-    protected boolean isMoveQuotaFull() {
+    protected boolean hasSpaceForScheduling() {
       return scheduledSize<maxSize2Move;
       return scheduledSize<maxSize2Move;
     }
     }
 
 
@@ -922,23 +923,53 @@ public class Balancer {
     LOG.info(nodes.size() + " " + name + ": " + nodes);
     LOG.info(nodes.size() + " " + name + ": " + nodes);
   }
   }
 
 
-  /* Decide all <source, target> pairs and
+  /** A matcher interface for matching nodes. */
+  private interface Matcher {
+    /** Given the cluster topology, does the left node match the right node? */
+    boolean match(NetworkTopology cluster, Node left,  Node right);
+  }
+
+  /** Match datanodes in the same node group. */
+  static final Matcher SAME_NODE_GROUP = new Matcher() {
+    @Override
+    public boolean match(NetworkTopology cluster, Node left, Node right) {
+      return cluster.isOnSameNodeGroup(left, right);
+    }
+  };
+
+  /** Match datanodes in the same rack. */
+  static final Matcher SAME_RACK = new Matcher() {
+    @Override
+    public boolean match(NetworkTopology cluster, Node left, Node right) {
+      return cluster.isOnSameRack(left, right);
+    }
+  };
+
+  /** Match any datanode with any other datanode. */
+  static final Matcher ANY_OTHER = new Matcher() {
+    @Override
+    public boolean match(NetworkTopology cluster, Node left, Node right) {
+      return left != right;
+    }
+  };
+
+  /**
+   * Decide all <source, target> pairs and
    * the number of bytes to move from a source to a target
    * the number of bytes to move from a source to a target
    * Maximum bytes to be moved per node is
    * Maximum bytes to be moved per node is
    * Min(1 Band worth of bytes,  MAX_SIZE_TO_MOVE).
    * Min(1 Band worth of bytes,  MAX_SIZE_TO_MOVE).
    * Return total number of bytes to move in this iteration
    * Return total number of bytes to move in this iteration
    */
    */
   private long chooseNodes() {
   private long chooseNodes() {
-    // First, match nodes on the same node group if cluster has nodegroup
-    // awareness
+    // First, match nodes on the same node group if cluster is node group aware
     if (cluster.isNodeGroupAware()) {
     if (cluster.isNodeGroupAware()) {
-      chooseNodesOnSameNodeGroup();
+      chooseNodes(SAME_NODE_GROUP);
     }
     }
     
     
     // Then, match nodes on the same rack
     // Then, match nodes on the same rack
-    chooseNodes(true);
-    // At last, match nodes on different racks
-    chooseNodes(false);
+    chooseNodes(SAME_RACK);
+    // At last, match all remaining nodes
+    chooseNodes(ANY_OTHER);
     
     
     assert (datanodes.size() >= sources.size()+targets.size())
     assert (datanodes.size() >= sources.size()+targets.size())
       : "Mismatched number of datanodes (" +
       : "Mismatched number of datanodes (" +
@@ -952,57 +983,55 @@ public class Balancer {
     }
     }
     return bytesToMove;
     return bytesToMove;
   }
   }
-  
-  /**
-   * Decide all <source, target> pairs where source and target are 
-   * on the same NodeGroup
-   */
-  private void chooseNodesOnSameNodeGroup() {
 
 
+  /** Decide all <source, target> pairs according to the matcher. */
+  private void chooseNodes(final Matcher matcher) {
     /* first step: match each overUtilized datanode (source) to
     /* first step: match each overUtilized datanode (source) to
-     * one or more underUtilized datanodes within same NodeGroup(targets).
+     * one or more underUtilized datanodes (targets).
      */
      */
-    chooseOnSameNodeGroup(overUtilizedDatanodes, underUtilizedDatanodes);
-
-    /* match each remaining overutilized datanode (source) to below average 
-     * utilized datanodes within the same NodeGroup(targets).
+    chooseDatanodes(overUtilizedDatanodes, underUtilizedDatanodes, matcher);
+    
+    /* match each remaining overutilized datanode (source) to 
+     * below average utilized datanodes (targets).
      * Note only overutilized datanodes that haven't had that max bytes to move
      * Note only overutilized datanodes that haven't had that max bytes to move
      * satisfied in step 1 are selected
      * satisfied in step 1 are selected
      */
      */
-    chooseOnSameNodeGroup(overUtilizedDatanodes, belowAvgUtilizedDatanodes);
+    chooseDatanodes(overUtilizedDatanodes, belowAvgUtilizedDatanodes, matcher);
 
 
-    /* match each remaining underutilized datanode to above average utilized 
-     * datanodes within the same NodeGroup.
+    /* match each remaining underutilized datanode (target) to 
+     * above average utilized datanodes (source).
      * Note only underutilized datanodes that have not had that max bytes to
      * Note only underutilized datanodes that have not had that max bytes to
      * move satisfied in step 1 are selected.
      * move satisfied in step 1 are selected.
      */
      */
-    chooseOnSameNodeGroup(underUtilizedDatanodes, aboveAvgUtilizedDatanodes);
+    chooseDatanodes(underUtilizedDatanodes, aboveAvgUtilizedDatanodes, matcher);
   }
   }
-  
+
   /**
   /**
-   * Match two sets of nodes within the same NodeGroup, one should be source
-   * nodes (utilization > Avg), and the other should be destination nodes 
-   * (utilization < Avg).
-   * @param datanodes
-   * @param candidates
+   * For each datanode, choose matching nodes from the candidates. Either the
+   * datanodes or the candidates are source nodes with (utilization > Avg), and
+   * the others are target nodes with (utilization < Avg).
    */
    */
   private <D extends BalancerDatanode, C extends BalancerDatanode> void 
   private <D extends BalancerDatanode, C extends BalancerDatanode> void 
-      chooseOnSameNodeGroup(Collection<D> datanodes, Collection<C> candidates) {
+      chooseDatanodes(Collection<D> datanodes, Collection<C> candidates,
+          Matcher matcher) {
     for (Iterator<D> i = datanodes.iterator(); i.hasNext();) {
     for (Iterator<D> i = datanodes.iterator(); i.hasNext();) {
       final D datanode = i.next();
       final D datanode = i.next();
-      for(; chooseOnSameNodeGroup(datanode, candidates.iterator()); );
-      if (!datanode.isMoveQuotaFull()) {
+      for(; chooseForOneDatanode(datanode, candidates, matcher); );
+      if (!datanode.hasSpaceForScheduling()) {
         i.remove();
         i.remove();
       }
       }
     }
     }
   }
   }
-  
+
   /**
   /**
-   * Match one datanode with a set of candidates nodes within the same NodeGroup.
+   * For the given datanode, choose a candidate and then schedule it.
+   * @return true if a candidate is chosen; false if no candidates is chosen.
    */
    */
-  private <T extends BalancerDatanode> boolean chooseOnSameNodeGroup(
-      BalancerDatanode dn, Iterator<T> candidates) {
-    final T chosen = chooseCandidateOnSameNodeGroup(dn, candidates);
+  private <C extends BalancerDatanode> boolean chooseForOneDatanode(
+      BalancerDatanode dn, Collection<C> candidates, Matcher matcher) {
+    final Iterator<C> i = candidates.iterator();
+    final C chosen = chooseCandidate(dn, i, matcher);
+
     if (chosen == null) {
     if (chosen == null) {
       return false;
       return false;
     }
     }
@@ -1011,8 +1040,8 @@ public class Balancer {
     } else {
     } else {
       matchSourceWithTargetToMove((Source)chosen, dn);
       matchSourceWithTargetToMove((Source)chosen, dn);
     }
     }
-    if (!chosen.isMoveQuotaFull()) {
-      candidates.remove();
+    if (!chosen.hasSpaceForScheduling()) {
+      i.remove();
     }
     }
     return true;
     return true;
   }
   }
@@ -1029,19 +1058,15 @@ public class Balancer {
         +source.datanode.getName() + " to " + target.datanode.getName());
         +source.datanode.getName() + " to " + target.datanode.getName());
   }
   }
   
   
-  /** choose a datanode from <code>candidates</code> within the same NodeGroup 
-   * of <code>dn</code>.
-   */
-  private <T extends BalancerDatanode> T chooseCandidateOnSameNodeGroup(
-      BalancerDatanode dn, Iterator<T> candidates) {
-    if (dn.isMoveQuotaFull()) {
+  /** Choose a candidate for the given datanode. */
+  private <D extends BalancerDatanode, C extends BalancerDatanode>
+      C chooseCandidate(D dn, Iterator<C> candidates, Matcher matcher) {
+    if (dn.hasSpaceForScheduling()) {
       for(; candidates.hasNext(); ) {
       for(; candidates.hasNext(); ) {
-        final T c = candidates.next();
-        if (!c.isMoveQuotaFull()) {
+        final C c = candidates.next();
+        if (!c.hasSpaceForScheduling()) {
           candidates.remove();
           candidates.remove();
-          continue;
-        }
-        if (cluster.isOnSameNodeGroup(dn.getDatanode(), c.getDatanode())) {
+        } else if (matcher.match(cluster, dn.getDatanode(), c.getDatanode())) {
           return c;
           return c;
         }
         }
       }
       }
@@ -1049,148 +1074,6 @@ public class Balancer {
     return null;
     return null;
   }
   }
 
 
-  /* if onRack is true, decide all <source, target> pairs
-   * where source and target are on the same rack; Otherwise
-   * decide all <source, target> pairs where source and target are
-   * on different racks
-   */
-  private void chooseNodes(boolean onRack) {
-    /* first step: match each overUtilized datanode (source) to
-     * one or more underUtilized datanodes (targets).
-     */
-    chooseTargets(underUtilizedDatanodes, onRack);
-    
-    /* match each remaining overutilized datanode (source) to 
-     * below average utilized datanodes (targets).
-     * Note only overutilized datanodes that haven't had that max bytes to move
-     * satisfied in step 1 are selected
-     */
-    chooseTargets(belowAvgUtilizedDatanodes, onRack);
-
-    /* match each remaining underutilized datanode (target) to 
-     * above average utilized datanodes (source).
-     * Note only underutilized datanodes that have not had that max bytes to
-     * move satisfied in step 1 are selected.
-     */
-    chooseSources(aboveAvgUtilizedDatanodes, onRack);
-  }
-   
-  /* choose targets from the target candidate list for each over utilized
-   * source datanode. OnRackTarget determines if the chosen target 
-   * should be on the same rack as the source
-   */
-  private void chooseTargets(
-      Collection<BalancerDatanode> targetCandidates, boolean onRackTarget ) {
-    for (Iterator<Source> srcIterator = overUtilizedDatanodes.iterator();
-        srcIterator.hasNext();) {
-      Source source = srcIterator.next();
-      while (chooseTarget(source, targetCandidates.iterator(), onRackTarget)) {
-      }
-      if (!source.isMoveQuotaFull()) {
-        srcIterator.remove();
-      }
-    }
-    return;
-  }
-  
-  /* choose sources from the source candidate list for each under utilized
-   * target datanode. onRackSource determines if the chosen source 
-   * should be on the same rack as the target
-   */
-  private void chooseSources(
-      Collection<Source> sourceCandidates, boolean onRackSource) {
-    for (Iterator<BalancerDatanode> targetIterator = 
-      underUtilizedDatanodes.iterator(); targetIterator.hasNext();) {
-      BalancerDatanode target = targetIterator.next();
-      while (chooseSource(target, sourceCandidates.iterator(), onRackSource)) {
-      }
-      if (!target.isMoveQuotaFull()) {
-        targetIterator.remove();
-      }
-    }
-    return;
-  }
-
-  /* For the given source, choose targets from the target candidate list.
-   * OnRackTarget determines if the chosen target 
-   * should be on the same rack as the source
-   */
-  private boolean chooseTarget(Source source,
-      Iterator<BalancerDatanode> targetCandidates, boolean onRackTarget) {
-    if (!source.isMoveQuotaFull()) {
-      return false;
-    }
-    boolean foundTarget = false;
-    BalancerDatanode target = null;
-    while (!foundTarget && targetCandidates.hasNext()) {
-      target = targetCandidates.next();
-      if (!target.isMoveQuotaFull()) {
-        targetCandidates.remove();
-        continue;
-      }
-      if (onRackTarget) {
-        // choose from on-rack nodes
-        if (cluster.isOnSameRack(source.datanode, target.datanode)) {
-          foundTarget = true;
-        }
-      } else {
-        // choose from off-rack nodes
-        if (!cluster.isOnSameRack(source.datanode, target.datanode)) {
-          foundTarget = true;
-        }
-      }
-    }
-    if (foundTarget) {
-      assert(target != null):"Choose a null target";
-      matchSourceWithTargetToMove(source, target);
-      if (!target.isMoveQuotaFull()) {
-        targetCandidates.remove();
-      }
-      return true;
-    }
-    return false;
-  }
-
-  /* For the given target, choose sources from the source candidate list.
-   * OnRackSource determines if the chosen source 
-   * should be on the same rack as the target
-   */
-  private boolean chooseSource(BalancerDatanode target,
-      Iterator<Source> sourceCandidates, boolean onRackSource) {
-    if (!target.isMoveQuotaFull()) {
-      return false;
-    }
-    boolean foundSource = false;
-    Source source = null;
-    while (!foundSource && sourceCandidates.hasNext()) {
-      source = sourceCandidates.next();
-      if (!source.isMoveQuotaFull()) {
-        sourceCandidates.remove();
-        continue;
-      }
-      if (onRackSource) {
-        // choose from on-rack nodes
-        if ( cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) {
-          foundSource = true;
-        }
-      } else {
-        // choose from off-rack nodes
-        if (!cluster.isOnSameRack(source.datanode, target.datanode)) {
-          foundSource = true;
-        }
-      }
-    }
-    if (foundSource) {
-      assert(source != null):"Choose a null source";
-      matchSourceWithTargetToMove(source, target);
-      if ( !source.isMoveQuotaFull()) {
-          sourceCandidates.remove();
-        }
-      return true;
-    }
-    return false;
-  }
-
   private static class BytesMoved {
   private static class BytesMoved {
     private long bytesMoved = 0L;;
     private long bytesMoved = 0L;;
     private synchronized void inc( long bytes ) {
     private synchronized void inc( long bytes ) {

+ 24 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -152,8 +152,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       
       
     List<DatanodeDescriptor> results = 
     List<DatanodeDescriptor> results = 
       new ArrayList<DatanodeDescriptor>(chosenNodes);
       new ArrayList<DatanodeDescriptor>(chosenNodes);
-    for (Node node:chosenNodes) {
-      excludedNodes.put(node, node);
+    for (DatanodeDescriptor node:chosenNodes) {
+      // add localMachine and related nodes to excludedNodes
+      addToExcludedNodes(node, excludedNodes);
       adjustExcludedNodes(excludedNodes, node);
       adjustExcludedNodes(excludedNodes, node);
     }
     }
       
       
@@ -235,7 +236,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                + totalReplicasExpected + "\n"
                + totalReplicasExpected + "\n"
                + e.getMessage());
                + e.getMessage());
       if (avoidStaleNodes) {
       if (avoidStaleNodes) {
-        // ecxludedNodes now has - initial excludedNodes, any nodes that were
+        // excludedNodes now has - initial excludedNodes, any nodes that were
         // chosen and nodes that were tried but were not chosen because they
         // chosen and nodes that were tried but were not chosen because they
         // were stale, decommissioned or for any other reason a node is not
         // were stale, decommissioned or for any other reason a node is not
         // chosen for write. Retry again now not avoiding stale node
         // chosen for write. Retry again now not avoiding stale node
@@ -273,6 +274,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false,
         if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false,
             results, avoidStaleNodes)) {
             results, avoidStaleNodes)) {
           results.add(localMachine);
           results.add(localMachine);
+          // add localMachine and related nodes to excludedNode
+          addToExcludedNodes(localMachine, excludedNodes);
           return localMachine;
           return localMachine;
         }
         }
       } 
       } 
@@ -281,7 +284,19 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     return chooseLocalRack(localMachine, excludedNodes, blocksize,
     return chooseLocalRack(localMachine, excludedNodes, blocksize,
         maxNodesPerRack, results, avoidStaleNodes);
         maxNodesPerRack, results, avoidStaleNodes);
   }
   }
-    
+  
+  /**
+   * Add <i>localMachine</i> and related nodes to <i>excludedNodes</i>
+   * for next replica choosing. In sub class, we can add more nodes within
+   * the same failure domain of localMachine
+   * @return number of new excluded nodes
+   */
+  protected int addToExcludedNodes(DatanodeDescriptor localMachine,
+      HashMap<Node, Node> excludedNodes) {
+    Node node = excludedNodes.put(localMachine, localMachine);
+    return node == null?1:0;
+  }
+
   /* choose one node from the rack that <i>localMachine</i> is on.
   /* choose one node from the rack that <i>localMachine</i> is on.
    * if no such node is available, choose one node from the rack where
    * if no such node is available, choose one node from the rack where
    * a second replica is on.
    * a second replica is on.
@@ -392,6 +407,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         if (isGoodTarget(chosenNode, blocksize, 
         if (isGoodTarget(chosenNode, blocksize, 
                 maxNodesPerRack, results, avoidStaleNodes)) {
                 maxNodesPerRack, results, avoidStaleNodes)) {
           results.add(chosenNode);
           results.add(chosenNode);
+          // add chosenNode and related nodes to excludedNode
+          addToExcludedNodes(chosenNode, excludedNodes);
           adjustExcludedNodes(excludedNodes, chosenNode);
           adjustExcludedNodes(excludedNodes, chosenNode);
           return chosenNode;
           return chosenNode;
         } else {
         } else {
@@ -441,6 +458,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
               maxNodesPerRack, results, avoidStaleNodes)) {
               maxNodesPerRack, results, avoidStaleNodes)) {
           numOfReplicas--;
           numOfReplicas--;
           results.add(chosenNode);
           results.add(chosenNode);
+          // add chosenNode and related nodes to excludedNode
+          int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes);
+          numOfAvailableNodes -= newExcludedNodes;
           adjustExcludedNodes(excludedNodes, chosenNode);
           adjustExcludedNodes(excludedNodes, chosenNode);
         } else {
         } else {
           badTarget = true;
           badTarget = true;

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -240,6 +240,27 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
     String nodeGroupString = cur.getNetworkLocation();
     String nodeGroupString = cur.getNetworkLocation();
     return NetworkTopology.getFirstHalf(nodeGroupString);
     return NetworkTopology.getFirstHalf(nodeGroupString);
   }
   }
+  
+  /**
+   * Find other nodes in the same nodegroup of <i>localMachine</i> and add them
+   * into <i>excludeNodes</i> as replica should not be duplicated for nodes 
+   * within the same nodegroup
+   * @return number of new excluded nodes
+   */
+  protected int addToExcludedNodes(DatanodeDescriptor localMachine,
+      HashMap<Node, Node> excludedNodes) {
+    int countOfExcludedNodes = 0;
+    String nodeGroupScope = localMachine.getNetworkLocation();
+    List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
+    for (Node leafNode : leafNodes) {
+      Node node = excludedNodes.put(leafNode, leafNode);
+      if (node == null) {
+        // not a existing node in excludedNodes
+        countOfExcludedNodes++;
+      }
+    }
+    return countOfExcludedNodes;
+  }
 
 
   /**
   /**
    * Pick up replica node set for deleting replica as over-replicated. 
    * Pick up replica node set for deleting replica as over-replicated. 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -26,6 +26,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -411,7 +412,7 @@ class BPOfferService {
     final long txid = nnHaState.getTxId();
     final long txid = nnHaState.getTxId();
     
     
     final boolean nnClaimsActive =
     final boolean nnClaimsActive =
-      nnHaState.getState() == NNHAStatusHeartbeat.State.ACTIVE;
+      nnHaState.getState() == HAServiceState.ACTIVE;
     final boolean bposThinksActive = bpServiceToActive == actor;
     final boolean bposThinksActive = bpServiceToActive == actor;
     final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; 
     final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; 
     
     

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java

@@ -140,7 +140,7 @@ public class SecureDataNodeStarter implements Daemon {
     System.err.println("Successfully obtained privileged resources (streaming port = "
     System.err.println("Successfully obtained privileged resources (streaming port = "
         + ss + " ) (http listener port = " + listener.getConnection() +")");
         + ss + " ) (http listener port = " + listener.getConnection() +")");
     
     
-    if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
+    if ((ss.getLocalPort() > 1023 || listener.getPort() > 1023) &&
         UserGroupInformation.isSecurityEnabled()) {
         UserGroupInformation.isSecurityEnabled()) {
       throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
       throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
     }
     }

+ 61 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java

@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.net.InetAddress;
+import java.security.Principal;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * Interface defining an audit logger.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface AuditLogger {
+
+  /**
+   * Called during initialization of the logger.
+   *
+   * @param conf The configuration object.
+   */
+  void initialize(Configuration conf);
+
+  /**
+   * Called to log an audit event.
+   * <p>
+   * This method must return as quickly as possible, since it's called
+   * in a critical section of the NameNode's operation.
+   *
+   * @param succeeded Whether authorization succeeded.
+   * @param userName Name of the user executing the request.
+   * @param addr Remote address of the request.
+   * @param cmd The requested command.
+   * @param src Path of affected source file.
+   * @param dst Path of affected destination file (if any).
+   * @param stat File information for operations that change the file's
+   *             metadata (permissions, owner, times, etc).
+   */
+  void logAuditEvent(boolean succeeded, String userName,
+      InetAddress addr, String cmd, String src, String dst,
+      FileStatus stat);
+
+}

+ 51 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -24,6 +24,7 @@ import java.net.SocketTimeoutException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxies;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
 import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
@@ -414,14 +416,23 @@ public class BackupNode extends NameNode {
       + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
       + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
     return nsInfo;
     return nsInfo;
   }
   }
-  
+
   @Override
   @Override
+  protected String getNameServiceId(Configuration conf) {
+    return DFSUtil.getBackupNameServiceId(conf);
+  }
+
+  protected HAState createHAState() {
+    return new BackupState();
+  }
+
+  @Override // NameNode
   protected NameNodeHAContext createHAContext() {
   protected NameNodeHAContext createHAContext() {
     return new BNHAContext();
     return new BNHAContext();
   }
   }
-  
+
   private class BNHAContext extends NameNodeHAContext {
   private class BNHAContext extends NameNodeHAContext {
-    @Override // NameNode
+    @Override // NameNodeHAContext
     public void checkOperation(OperationCategory op)
     public void checkOperation(OperationCategory op)
         throws StandbyException {
         throws StandbyException {
       if (op == OperationCategory.UNCHECKED ||
       if (op == OperationCategory.UNCHECKED ||
@@ -435,10 +446,42 @@ public class BackupNode extends NameNode {
         throw new StandbyException(msg);
         throw new StandbyException(msg);
       }
       }
     }
     }
-  }
-  
-  @Override
-  protected String getNameServiceId(Configuration conf) {
-    return DFSUtil.getBackupNameServiceId(conf);
+
+    @Override // NameNodeHAContext
+    public void prepareToStopStandbyServices() throws ServiceFailedException {
+    }
+
+    /**
+     * Start services for BackupNode.
+     * <p>
+     * The following services should be muted
+     * (not run or not pass any control commands to DataNodes)
+     * on BackupNode:
+     * {@link LeaseManager.Monitor} protected by SafeMode.
+     * {@link BlockManager.ReplicationMonitor} protected by SafeMode.
+     * {@link HeartbeatManager.Monitor} protected by SafeMode.
+     * {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
+     * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless,
+     * because ReplicationMonitor is muted.
+     */
+    @Override
+    public void startActiveServices() throws IOException {
+      try {
+        namesystem.startActiveServices();
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
+    }
+
+    @Override
+    public void stopActiveServices() throws IOException {
+      try {
+        if (namesystem != null) {
+          namesystem.stopActiveServices();
+        }
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
+    }
   }
   }
 }
 }

+ 53 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java

@@ -0,0 +1,53 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
+import org.apache.hadoop.ipc.StandbyException;
+
+@InterfaceAudience.Private
+public class BackupState extends HAState {
+
+  public BackupState() {
+    super(HAServiceState.STANDBY);
+  }
+
+  @Override // HAState
+  public void checkOperation(HAContext context, OperationCategory op)
+      throws StandbyException {
+    context.checkOperation(op);
+  }
+
+  @Override // HAState
+  public boolean shouldPopulateReplQueues() {
+    return false;
+  }
+
+  @Override // HAState
+  public void enterState(HAContext context) throws ServiceFailedException {
+    try {
+      context.startActiveServices();
+    } catch (IOException e) {
+      throw new ServiceFailedException("Failed to start backup services", e);
+    }
+  }
+
+  @Override // HAState
+  public void exitState(HAContext context) throws ServiceFailedException {
+    try {
+      context.stopActiveServices();
+    } catch (IOException e) {
+      throw new ServiceFailedException("Failed to stop backup services", e);
+    }
+  }
+
+  @Override // HAState
+  public void prepareToExitState(HAContext context) throws ServiceFailedException {
+    context.prepareToStopStandbyServices();
+  }
+}

+ 33 - 21
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -575,6 +575,8 @@ public class FSDirectory implements Closeable {
         // update modification time of dst and the parent of src
         // update modification time of dst and the parent of src
         srcInodes[srcInodes.length-2].setModificationTime(timestamp);
         srcInodes[srcInodes.length-2].setModificationTime(timestamp);
         dstInodes[dstInodes.length-2].setModificationTime(timestamp);
         dstInodes[dstInodes.length-2].setModificationTime(timestamp);
+        // update moved leases with new filename
+        getFSNamesystem().unprotectedChangeLease(src, dst);        
         return true;
         return true;
       }
       }
     } finally {
     } finally {
@@ -729,6 +731,8 @@ public class FSDirectory implements Closeable {
         }
         }
         srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
         srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
         dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
         dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
+        // update moved lease with new filename
+        getFSNamesystem().unprotectedChangeLease(src, dst);
 
 
         // Collect the blocks and remove the lease for previous dst
         // Collect the blocks and remove the lease for previous dst
         int filesDeleted = 0;
         int filesDeleted = 0;
@@ -1071,31 +1075,39 @@ public class FSDirectory implements Closeable {
       throws IOException, UnresolvedLinkException {    
       throws IOException, UnresolvedLinkException {    
     writeLock();
     writeLock();
     try {
     try {
-      //
-      // Remove the node from the namespace 
-      //
-      if (!oldnode.removeNode()) {
-        NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
-                                     "failed to remove " + path);
-        throw new IOException("FSDirectory.replaceNode: " +
-                              "failed to remove " + path);
-      } 
-      
-      /* Currently oldnode and newnode are assumed to contain the same
-       * blocks. Otherwise, blocks need to be removed from the blocksMap.
-       */
-      rootDir.addINode(path, newnode); 
-
-      int index = 0;
-      for (BlockInfo b : newnode.getBlocks()) {
-        BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
-        newnode.setBlock(index, info); // inode refers to the block in BlocksMap
-        index++;
-      }
+      unprotectedReplaceNode(path, oldnode, newnode);
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
   }
   }
+  
+  void unprotectedReplaceNode(String path, INodeFile oldnode, INodeFile newnode)
+      throws IOException, UnresolvedLinkException {
+    assert hasWriteLock();
+    INodeDirectory parent = oldnode.parent;
+    // Remove the node from the namespace 
+    if (!oldnode.removeNode()) {
+      NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
+                                   "failed to remove " + path);
+      throw new IOException("FSDirectory.replaceNode: " +
+                            "failed to remove " + path);
+    } 
+    
+    // Parent should be non-null, otherwise oldnode.removeNode() will return
+    // false
+    newnode.setLocalName(oldnode.getLocalNameBytes());
+    parent.addChild(newnode, true);
+    
+    /* Currently oldnode and newnode are assumed to contain the same
+     * blocks. Otherwise, blocks need to be removed from the blocksMap.
+     */
+    int index = 0;
+    for (BlockInfo b : newnode.getBlocks()) {
+      BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
+      newnode.setBlock(index, info); // inode refers to the block in BlocksMap
+      index++;
+    }
+  }
 
 
   /**
   /**
    * Get a partial listing of the indicated directory
    * Get a partial listing of the indicated directory

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -878,6 +878,11 @@ public class FSEditLog implements LogsPurgeable {
     return journalSet;
     return journalSet;
   }
   }
   
   
+  @VisibleForTesting
+  synchronized void setJournalSetForTesting(JournalSet js) {
+    this.journalSet = js;
+  }
+  
   /**
   /**
    * Used only by tests.
    * Used only by tests.
    */
    */
@@ -1031,9 +1036,18 @@ public class FSEditLog implements LogsPurgeable {
 
 
   /**
   /**
    * Archive any log files that are older than the given txid.
    * Archive any log files that are older than the given txid.
+   * 
+   * If the edit log is not open for write, then this call returns with no
+   * effect.
    */
    */
   @Override
   @Override
   public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
   public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
+    // Should not purge logs unless they are open for write.
+    // This prevents the SBN from purging logs on shared storage, for example.
+    if (!isOpenForWrite()) {
+      return;
+    }
+    
     assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
     assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
       minTxIdToKeep <= curSegmentTxId :
       minTxIdToKeep <= curSegmentTxId :
       "cannot purge logs older than txid " + minTxIdToKeep +
       "cannot purge logs older than txid " + minTxIdToKeep +

+ 1 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
@@ -322,7 +321,7 @@ public class FSEditLogLoader {
         INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
         INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
         fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
         fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
         INodeFile newFile = ucFile.convertToInodeFile();
         INodeFile newFile = ucFile.convertToInodeFile();
-        fsDir.replaceNode(addCloseOp.path, ucFile, newFile);
+        fsDir.unprotectedReplaceNode(addCloseOp.path, ucFile, newFile);
       }
       }
       break;
       break;
     }
     }
@@ -360,10 +359,8 @@ public class FSEditLogLoader {
     }
     }
     case OP_RENAME_OLD: {
     case OP_RENAME_OLD: {
       RenameOldOp renameOp = (RenameOldOp)op;
       RenameOldOp renameOp = (RenameOldOp)op;
-      HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
       fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
       fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
                                 renameOp.timestamp);
                                 renameOp.timestamp);
-      fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
       break;
       break;
     }
     }
     case OP_DELETE: {
     case OP_DELETE: {
@@ -433,11 +430,8 @@ public class FSEditLogLoader {
     }
     }
     case OP_RENAME: {
     case OP_RENAME: {
       RenameOp renameOp = (RenameOp)op;
       RenameOp renameOp = (RenameOp)op;
-
-      HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
       fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
       fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
                                 renameOp.timestamp, renameOp.options);
                                 renameOp.timestamp, renameOp.options);
-      fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
       break;
       break;
     }
     }
     case OP_GET_DELEGATION_TOKEN: {
     case OP_GET_DELEGATION_TOKEN: {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java

@@ -197,7 +197,7 @@ public class FSImageSerialization {
   public static String readString(DataInputStream in) throws IOException {
   public static String readString(DataInputStream in) throws IOException {
     DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
     DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
     ustr.readFields(in);
     ustr.readFields(in);
-    return ustr.toString();
+    return ustr.toStringChecked();
   }
   }
 
 
   static String readString_EmptyAsNull(DataInputStream in) throws IOException {
   static String readString_EmptyAsNull(DataInputStream in) throws IOException {

+ 173 - 115
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -34,6 +34,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT;
@@ -111,6 +113,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
@@ -121,6 +124,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -163,12 +167,10 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
-import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
-import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@@ -246,32 +248,32 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
   };
   };
 
 
-  private static final void logAuditEvent(UserGroupInformation ugi,
+  private boolean isAuditEnabled() {
+    return !isDefaultAuditLogger || auditLog.isInfoEnabled();
+  }
+
+  private void logAuditEvent(UserGroupInformation ugi,
       InetAddress addr, String cmd, String src, String dst,
       InetAddress addr, String cmd, String src, String dst,
       HdfsFileStatus stat) {
       HdfsFileStatus stat) {
     logAuditEvent(true, ugi, addr, cmd, src, dst, stat);
     logAuditEvent(true, ugi, addr, cmd, src, dst, stat);
   }
   }
 
 
-  private static final void logAuditEvent(boolean succeeded,
+  private void logAuditEvent(boolean succeeded,
       UserGroupInformation ugi, InetAddress addr, String cmd, String src,
       UserGroupInformation ugi, InetAddress addr, String cmd, String src,
       String dst, HdfsFileStatus stat) {
       String dst, HdfsFileStatus stat) {
-    final StringBuilder sb = auditBuffer.get();
-    sb.setLength(0);
-    sb.append("allowed=").append(succeeded).append("\t");
-    sb.append("ugi=").append(ugi).append("\t");
-    sb.append("ip=").append(addr).append("\t");
-    sb.append("cmd=").append(cmd).append("\t");
-    sb.append("src=").append(src).append("\t");
-    sb.append("dst=").append(dst).append("\t");
-    if (null == stat) {
-      sb.append("perm=null");
-    } else {
-      sb.append("perm=");
-      sb.append(stat.getOwner()).append(":");
-      sb.append(stat.getGroup()).append(":");
-      sb.append(stat.getPermission());
+    FileStatus status = null;
+    if (stat != null) {
+      Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
+      Path path = dst != null ? new Path(dst) : new Path(src);
+      status = new FileStatus(stat.getLen(), stat.isDir(),
+          stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(),
+          stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
+          stat.getGroup(), symlink, path);
+    }
+    for (AuditLogger logger : auditLoggers) {
+      logger.logAuditEvent(succeeded, ugi.toString(), addr,
+          cmd, src, dst, status);
     }
     }
-    auditLog.info(sb);
   }
   }
 
 
   /**
   /**
@@ -304,6 +306,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   final DelegationTokenSecretManager dtSecretManager;
   final DelegationTokenSecretManager dtSecretManager;
   private final boolean alwaysUseDelegationTokensForTests;
   private final boolean alwaysUseDelegationTokensForTests;
   
   
+  // Tracks whether the default audit logger is the only configured audit
+  // logger; this allows isAuditEnabled() to return false in case the
+  // underlying logger is disabled, and avoid some unnecessary work.
+  private final boolean isDefaultAuditLogger;
+  private final List<AuditLogger> auditLoggers;
 
 
   /** The namespace tree. */
   /** The namespace tree. */
   FSDirectory dir;
   FSDirectory dir;
@@ -536,14 +543,50 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       this.dtSecretManager = createDelegationTokenSecretManager(conf);
       this.dtSecretManager = createDelegationTokenSecretManager(conf);
       this.dir = new FSDirectory(fsImage, this, conf);
       this.dir = new FSDirectory(fsImage, this, conf);
       this.safeMode = new SafeModeInfo(conf);
       this.safeMode = new SafeModeInfo(conf);
-
+      this.auditLoggers = initAuditLoggers(conf);
+      this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
+        auditLoggers.get(0) instanceof DefaultAuditLogger;
     } catch(IOException e) {
     } catch(IOException e) {
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       close();
       close();
       throw e;
       throw e;
+    } catch (RuntimeException re) {
+      LOG.error(getClass().getSimpleName() + " initialization failed.", re);
+      close();
+      throw re;
     }
     }
   }
   }
 
 
+  private List<AuditLogger> initAuditLoggers(Configuration conf) {
+    // Initialize the custom access loggers if configured.
+    Collection<String> alClasses = conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
+    List<AuditLogger> auditLoggers = Lists.newArrayList();
+    if (alClasses != null && !alClasses.isEmpty()) {
+      for (String className : alClasses) {
+        try {
+          AuditLogger logger;
+          if (DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME.equals(className)) {
+            logger = new DefaultAuditLogger();
+          } else {
+            logger = (AuditLogger) Class.forName(className).newInstance();
+          }
+          logger.initialize(conf);
+          auditLoggers.add(logger);
+        } catch (RuntimeException re) {
+          throw re;
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }
+
+    // Make sure there is at least one logger installed.
+    if (auditLoggers.isEmpty()) {
+      auditLoggers.add(new DefaultAuditLogger());
+    }
+    return auditLoggers;
+  }
+
   void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)
   void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)
       throws IOException {
       throws IOException {
     // format before starting up if requested
     // format before starting up if requested
@@ -1003,8 +1046,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       // start in active.
       // start in active.
       return haEnabled;
       return haEnabled;
     }
     }
-  
-    return haContext.getState() instanceof StandbyState;
+
+    return HAServiceState.STANDBY == haContext.getState().getServiceState();
   }
   }
 
 
   /**
   /**
@@ -1030,7 +1073,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     long totalInodes = this.dir.totalInodes();
     long totalInodes = this.dir.totalInodes();
     long totalBlocks = this.getBlocksTotal();
     long totalBlocks = this.getBlocksTotal();
     out.println(totalInodes + " files and directories, " + totalBlocks
     out.println(totalInodes + " files and directories, " + totalBlocks
-        + " blocks = " + (totalInodes + totalBlocks) + " total");
+        + " blocks = " + (totalInodes + totalBlocks)
+        + " total filesystem objects");
 
 
     blockManager.metaSave(out);
     blockManager.metaSave(out);
   }
   }
@@ -1076,7 +1120,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       setPermissionInt(src, permission);
       setPermissionInt(src, permission);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "setPermission", src, null, null);
                       "setPermission", src, null, null);
@@ -1098,14 +1142,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
       checkOwner(src);
       checkOwner(src);
       dir.setPermission(src, permission);
       dir.setPermission(src, permission);
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(src, false);
         resultingStat = dir.getFileInfo(src, false);
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "setPermission", src, null, resultingStat);
                     "setPermission", src, null, resultingStat);
@@ -1122,7 +1166,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       setOwnerInt(src, username, group);
       setOwnerInt(src, username, group);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "setOwner", src, null, null);
                       "setOwner", src, null, null);
@@ -1153,14 +1197,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         }
         }
       }
       }
       dir.setOwner(src, username, group);
       dir.setOwner(src, username, group);
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(src, false);
         resultingStat = dir.getFileInfo(src, false);
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "setOwner", src, null, resultingStat);
                     "setOwner", src, null, resultingStat);
@@ -1203,7 +1247,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       return getBlockLocationsInt(src, offset, length, doAccessTime,
       return getBlockLocationsInt(src, offset, length, doAccessTime,
                                   needBlockToken, checkSafeMode);
                                   needBlockToken, checkSafeMode);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "open", src, null, null);
                       "open", src, null, null);
@@ -1229,7 +1273,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
     final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
     final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
         offset, length, doAccessTime, needBlockToken);  
         offset, length, doAccessTime, needBlockToken);  
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "open", src, null, null);
                     "open", src, null, null);
@@ -1310,7 +1354,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       concatInt(target, srcs);
       concatInt(target, srcs);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getLoginUser(),
         logAuditEvent(false, UserGroupInformation.getLoginUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "concat", Arrays.toString(srcs), target, null);
                       "concat", Arrays.toString(srcs), target, null);
@@ -1353,14 +1397,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot concat " + target, safeMode);
         throw new SafeModeException("Cannot concat " + target, safeMode);
       }
       }
       concatInternal(target, srcs);
       concatInternal(target, srcs);
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(target, false);
         resultingStat = dir.getFileInfo(target, false);
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getLoginUser(),
       logAuditEvent(UserGroupInformation.getLoginUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "concat", Arrays.toString(srcs), target, resultingStat);
                     "concat", Arrays.toString(srcs), target, resultingStat);
@@ -1481,7 +1525,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       setTimesInt(src, mtime, atime);
       setTimesInt(src, mtime, atime);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "setTimes", src, null, null);
                       "setTimes", src, null, null);
@@ -1507,7 +1551,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       INode inode = dir.getINode(src);
       INode inode = dir.getINode(src);
       if (inode != null) {
       if (inode != null) {
         dir.setTimes(src, inode, mtime, atime, true);
         dir.setTimes(src, inode, mtime, atime, true);
-        if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        if (isAuditEnabled() && isExternalInvocation()) {
           final HdfsFileStatus stat = dir.getFileInfo(src, false);
           final HdfsFileStatus stat = dir.getFileInfo(src, false);
           logAuditEvent(UserGroupInformation.getCurrentUser(),
           logAuditEvent(UserGroupInformation.getCurrentUser(),
                         getRemoteIp(),
                         getRemoteIp(),
@@ -1530,7 +1574,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       createSymlinkInt(target, link, dirPerms, createParent);
       createSymlinkInt(target, link, dirPerms, createParent);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "createSymlink", link, target, null);
                       "createSymlink", link, target, null);
@@ -1551,14 +1595,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         verifyParentDir(link);
         verifyParentDir(link);
       }
       }
       createSymlinkInternal(target, link, dirPerms, createParent);
       createSymlinkInternal(target, link, dirPerms, createParent);
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(link, false);
         resultingStat = dir.getFileInfo(link, false);
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "createSymlink", link, target, resultingStat);
                     "createSymlink", link, target, resultingStat);
@@ -1614,7 +1658,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return setReplicationInt(src, replication);
       return setReplicationInt(src, replication);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "setReplication", src, null, null);
                       "setReplication", src, null, null);
@@ -1650,7 +1694,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
 
 
     getEditLog().logSync();
     getEditLog().logSync();
-    if (isFile && auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isFile && isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "setReplication", src, null, null);
                     "setReplication", src, null, null);
@@ -1706,7 +1750,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       startFileInt(src, permissions, holder, clientMachine, flag, createParent,
       startFileInt(src, permissions, holder, clientMachine, flag, createParent,
                    replication, blockSize);
                    replication, blockSize);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "create", src, null, null);
                       "create", src, null, null);
@@ -1739,7 +1783,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
     } 
     } 
 
 
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       final HdfsFileStatus stat = dir.getFileInfo(src, false);
       final HdfsFileStatus stat = dir.getFileInfo(src, false);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
@@ -2040,7 +2084,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return appendFileInt(src, holder, clientMachine);
       return appendFileInt(src, holder, clientMachine);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "append", src, null, null);
                       "append", src, null, null);
@@ -2086,7 +2130,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
             +" block size " + lb.getBlock().getNumBytes());
             +" block size " + lb.getBlock().getNumBytes());
       }
       }
     }
     }
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "append", src, null, null);
                     "append", src, null, null);
@@ -2532,7 +2576,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return renameToInt(src, dst);
       return renameToInt(src, dst);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "rename", src, dst, null);
                       "rename", src, dst, null);
@@ -2554,14 +2598,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
 
 
       status = renameToInternal(src, dst);
       status = renameToInternal(src, dst);
-      if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (status && isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(dst, false);
         resultingStat = dir.getFileInfo(dst, false);
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (status && isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "rename", src, dst, resultingStat);
                     "rename", src, dst, resultingStat);
@@ -2583,15 +2627,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (isPermissionEnabled) {
     if (isPermissionEnabled) {
       //We should not be doing this.  This is move() not renameTo().
       //We should not be doing this.  This is move() not renameTo().
       //but for now,
       //but for now,
+      //NOTE: yes, this is bad!  it's assuming much lower level behavior
+      //      of rewriting the dst
       String actualdst = dir.isDir(dst)?
       String actualdst = dir.isDir(dst)?
           dst + Path.SEPARATOR + new Path(src).getName(): dst;
           dst + Path.SEPARATOR + new Path(src).getName(): dst;
       checkParentAccess(src, FsAction.WRITE);
       checkParentAccess(src, FsAction.WRITE);
       checkAncestorAccess(actualdst, FsAction.WRITE);
       checkAncestorAccess(actualdst, FsAction.WRITE);
     }
     }
 
 
-    HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
     if (dir.renameTo(src, dst)) {
     if (dir.renameTo(src, dst)) {
-      unprotectedChangeLease(src, dst, dinfo);     // update lease with new filename
       return true;
       return true;
     }
     }
     return false;
     return false;
@@ -2611,14 +2655,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
 
 
       renameToInternal(src, dst, options);
       renameToInternal(src, dst, options);
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         resultingStat = dir.getFileInfo(dst, false); 
         resultingStat = dir.getFileInfo(dst, false); 
       }
       }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       StringBuilder cmd = new StringBuilder("rename options=");
       StringBuilder cmd = new StringBuilder("rename options=");
       for (Rename option : options) {
       for (Rename option : options) {
         cmd.append(option.value()).append(" ");
         cmd.append(option.value()).append(" ");
@@ -2642,9 +2686,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkAncestorAccess(dst, FsAction.WRITE);
       checkAncestorAccess(dst, FsAction.WRITE);
     }
     }
 
 
-    HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
     dir.renameTo(src, dst, options);
     dir.renameTo(src, dst, options);
-    unprotectedChangeLease(src, dst, dinfo); // update lease with new filename
   }
   }
   
   
   /**
   /**
@@ -2659,7 +2701,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return deleteInt(src, recursive);
       return deleteInt(src, recursive);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "delete", src, null, null);
                       "delete", src, null, null);
@@ -2675,7 +2717,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
     }
     }
     boolean status = deleteInternal(src, recursive, true);
     boolean status = deleteInternal(src, recursive, true);
-    if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (status && isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "delete", src, null, null);
                     "delete", src, null, null);
@@ -2841,7 +2883,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
       stat = dir.getFileInfo(src, resolveLink);
       stat = dir.getFileInfo(src, resolveLink);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "getfileinfo", src, null, null);
                       "getfileinfo", src, null, null);
@@ -2850,7 +2892,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } finally {
     } finally {
       readUnlock();
       readUnlock();
     }
     }
-    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (isAuditEnabled() && isExternalInvocation()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
                     "getfileinfo", src, null, null);
                     "getfileinfo", src, null, null);
@@ -2866,7 +2908,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return mkdirsInt(src, permissions, createParent);
       return mkdirsInt(src, permissions, createParent);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "mkdirs", src, null, null);
                       "mkdirs", src, null, null);
@@ -2890,7 +2932,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       writeUnlock();
       writeUnlock();
     }
     }
     getEditLog().logSync();
     getEditLog().logSync();
-    if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
+    if (status && isAuditEnabled() && isExternalInvocation()) {
       final HdfsFileStatus stat = dir.getFileInfo(src, false);
       final HdfsFileStatus stat = dir.getFileInfo(src, false);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     getRemoteIp(),
@@ -2979,9 +3021,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   /** Persist all metadata about this file.
   /** Persist all metadata about this file.
    * @param src The string representation of the path
    * @param src The string representation of the path
    * @param clientName The string representation of the client
    * @param clientName The string representation of the client
+   * @param lastBlockLength The length of the last block 
+   *                        under construction reported from client.
    * @throws IOException if path does not exist
    * @throws IOException if path does not exist
    */
    */
-  void fsync(String src, String clientName) 
+  void fsync(String src, String clientName, long lastBlockLength) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     writeLock();
     writeLock();
@@ -2991,6 +3035,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot fsync file " + src, safeMode);
         throw new SafeModeException("Cannot fsync file " + src, safeMode);
       }
       }
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
+      if (lastBlockLength > 0) {
+        pendingFile.updateLengthOfLastBlock(lastBlockLength);
+      }
       dir.persistBlocks(src, pendingFile);
       dir.persistBlocks(src, pendingFile);
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
@@ -3319,7 +3366,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
     try {
       return getListingInt(src, startAfter, needLocation);
       return getListingInt(src, startAfter, needLocation);
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "listStatus", src, null, null);
                       "listStatus", src, null, null);
@@ -3343,7 +3390,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           checkTraverse(src);
           checkTraverse(src);
         }
         }
       }
       }
-      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+      if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(UserGroupInformation.getCurrentUser(),
         logAuditEvent(UserGroupInformation.getCurrentUser(),
                       getRemoteIp(),
                       getRemoteIp(),
                       "listStatus", src, null, null);
                       "listStatus", src, null, null);
@@ -3433,15 +3480,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
   private NNHAStatusHeartbeat createHaStatusHeartbeat() {
   private NNHAStatusHeartbeat createHaStatusHeartbeat() {
     HAState state = haContext.getState();
     HAState state = haContext.getState();
-    NNHAStatusHeartbeat.State hbState;
-    if (state instanceof ActiveState) {
-      hbState = NNHAStatusHeartbeat.State.ACTIVE;
-    } else if (state instanceof StandbyState) {
-      hbState = NNHAStatusHeartbeat.State.STANDBY;      
-    } else {
-      throw new AssertionError("Invalid state: " + state.getClass());
-    }
-    return new NNHAStatusHeartbeat(hbState,
+    return new NNHAStatusHeartbeat(state.getServiceState(),
         getFSImage().getLastAppliedOrWrittenTxId());
         getFSImage().getLastAppliedOrWrittenTxId());
   }
   }
 
 
@@ -3870,7 +3909,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     private synchronized void leave() {
     private synchronized void leave() {
       // if not done yet, initialize replication queues.
       // if not done yet, initialize replication queues.
       // In the standby, do not populate repl queues
       // In the standby, do not populate repl queues
-      if (!isPopulatingReplQueues() && !isInStandbyState()) {
+      if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
         initializeReplQueues();
         initializeReplQueues();
       }
       }
       long timeInSafemode = now() - startTime;
       long timeInSafemode = now() - startTime;
@@ -3913,7 +3952,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
      * initializing replication queues.
      * initializing replication queues.
      */
      */
     private synchronized boolean canInitializeReplQueues() {
     private synchronized boolean canInitializeReplQueues() {
-      return !isInStandbyState() && blockSafe >= blockReplQueueThreshold;
+      return shouldPopulateReplQueues()
+          && blockSafe >= blockReplQueueThreshold;
     }
     }
       
       
     /** 
     /** 
@@ -4253,7 +4293,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
   @Override
   @Override
   public boolean isPopulatingReplQueues() {
   public boolean isPopulatingReplQueues() {
-    if (isInStandbyState()) {
+    if (!shouldPopulateReplQueues()) {
       return false;
       return false;
     }
     }
     // safeMode is volatile, and may be set to null at any time
     // safeMode is volatile, and may be set to null at any time
@@ -4262,7 +4302,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       return true;
       return true;
     return safeMode.isPopulatingReplQueues();
     return safeMode.isPopulatingReplQueues();
   }
   }
-    
+
+  private boolean shouldPopulateReplQueues() {
+    if(haContext == null || haContext.getState() == null)
+      return false;
+    return haContext.getState().shouldPopulateReplQueues();
+  }
+
   @Override
   @Override
   public void incrementSafeBlockCount(int replication) {
   public void incrementSafeBlockCount(int replication) {
     // safeMode is volatile, and may be set to null at any time
     // safeMode is volatile, and may be set to null at any time
@@ -4880,31 +4926,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
   // rename was successful. If any part of the renamed subtree had
   // rename was successful. If any part of the renamed subtree had
   // files that were being written to, update with new filename.
   // files that were being written to, update with new filename.
-  void unprotectedChangeLease(String src, String dst, HdfsFileStatus dinfo) {
-    String overwrite;
-    String replaceBy;
+  void unprotectedChangeLease(String src, String dst) {
     assert hasWriteLock();
     assert hasWriteLock();
-
-    boolean destinationExisted = true;
-    if (dinfo == null) {
-      destinationExisted = false;
-    }
-
-    if (destinationExisted && dinfo.isDir()) {
-      Path spath = new Path(src);
-      Path parent = spath.getParent();
-      if (parent.isRoot()) {
-        overwrite = parent.toString();
-      } else {
-        overwrite = parent.toString() + Path.SEPARATOR;
-      }
-      replaceBy = dst + Path.SEPARATOR;
-    } else {
-      overwrite = src;
-      replaceBy = dst;
-    }
-
-    leaseManager.changeLease(src, dst, overwrite, replaceBy);
+    leaseManager.changeLease(src, dst);
   }
   }
 
 
   /**
   /**
@@ -4915,19 +4939,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     // lock on our behalf. If we took the read lock here, we could block
     // lock on our behalf. If we took the read lock here, we could block
     // for fairness if a writer is waiting on the lock.
     // for fairness if a writer is waiting on the lock.
     synchronized (leaseManager) {
     synchronized (leaseManager) {
-      out.writeInt(leaseManager.countPath()); // write the size
-
-      for (Lease lease : leaseManager.getSortedLeases()) {
-        for(String path : lease.getPaths()) {
-          // verify that path exists in namespace
-          final INodeFileUnderConstruction cons;
-          try {
-            cons = INodeFileUnderConstruction.valueOf(dir.getINode(path), path);
-          } catch (UnresolvedLinkException e) {
-            throw new AssertionError("Lease files should reside on this FS");
-          }
-          FSImageSerialization.writeINodeUnderConstruction(out, cons, path);
-        }
+      Map<String, INodeFileUnderConstruction> nodes =
+          leaseManager.getINodesUnderConstruction();
+      out.writeInt(nodes.size()); // write the size    
+      for (Map.Entry<String, INodeFileUnderConstruction> entry
+           : nodes.entrySet()) {
+        FSImageSerialization.writeINodeUnderConstruction(
+            out, entry.getValue(), entry.getKey());
       }
       }
     }
     }
   }
   }
@@ -5286,7 +5304,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * Log fsck event in the audit log 
    * Log fsck event in the audit log 
    */
    */
   void logFsckEvent(String src, InetAddress remoteAddress) throws IOException {
   void logFsckEvent(String src, InetAddress remoteAddress) throws IOException {
-    if (auditLog.isInfoEnabled()) {
+    if (isAuditEnabled()) {
       logAuditEvent(UserGroupInformation.getCurrentUser(),
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     remoteAddress,
                     remoteAddress,
                     "fsck", src, null, null);
                     "fsck", src, null, null);
@@ -5541,4 +5559,44 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return this.blockManager.getDatanodeManager()
     return this.blockManager.getDatanodeManager()
         .isAvoidingStaleDataNodesForWrite();
         .isAvoidingStaleDataNodesForWrite();
   }
   }
+
+  /**
+   * Default AuditLogger implementation; used when no access logger is
+   * defined in the config file. It can also be explicitly listed in the
+   * config file.
+   */
+  private static class DefaultAuditLogger implements AuditLogger {
+
+    @Override
+    public void initialize(Configuration conf) {
+      // Nothing to do.
+    }
+
+    @Override
+    public void logAuditEvent(boolean succeeded, String userName,
+        InetAddress addr, String cmd, String src, String dst,
+        FileStatus status) {
+      if (auditLog.isInfoEnabled()) {
+        final StringBuilder sb = auditBuffer.get();
+        sb.setLength(0);
+        sb.append("allowed=").append(succeeded).append("\t");
+        sb.append("ugi=").append(userName).append("\t");
+        sb.append("ip=").append(addr).append("\t");
+        sb.append("cmd=").append(cmd).append("\t");
+        sb.append("src=").append(src).append("\t");
+        sb.append("dst=").append(dst).append("\t");
+        if (null == status) {
+          sb.append("perm=null");
+        } else {
+          sb.append("perm=");
+          sb.append(status.getOwner()).append(":");
+          sb.append(status.getGroup()).append(":");
+          sb.append(status.getPermission());
+        }
+        auditLog.info(sb);
+      }
+    }
+
+  }
+
 }
 }

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -73,6 +73,11 @@ class INodeDirectory extends INode {
   INodeDirectory(INodeDirectory other) {
   INodeDirectory(INodeDirectory other) {
     super(other);
     super(other);
     this.children = other.children;
     this.children = other.children;
+    if (this.children != null) {
+      for (INode child : children) {
+        child.parent = this;
+      }
+    }
   }
   }
   
   
   /** @return true unconditionally. */
   /** @return true unconditionally. */
@@ -106,6 +111,7 @@ class INodeDirectory extends INode {
 
 
     final int low = searchChildren(newChild);
     final int low = searchChildren(newChild);
     if (low>=0) { // an old child exists so replace by the newChild
     if (low>=0) { // an old child exists so replace by the newChild
+      children.get(low).parent = null;
       children.set(low, newChild);
       children.set(low, newChild);
     } else {
     } else {
       throw new IllegalArgumentException("No child exists to be replaced");
       throw new IllegalArgumentException("No child exists to be replaced");

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java

@@ -171,4 +171,22 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
     setBlock(numBlocks()-1, ucBlock);
     setBlock(numBlocks()-1, ucBlock);
     return ucBlock;
     return ucBlock;
   }
   }
+
+  /**
+   * Update the length for the last block
+   * 
+   * @param lastBlockLength
+   *          The length of the last block reported from client
+   * @throws IOException
+   */
+  void updateLengthOfLastBlock(long lastBlockLength) throws IOException {
+    BlockInfo lastBlock = this.getLastBlock();
+    assert (lastBlock != null) : "The last block for path "
+        + this.getFullPathName() + " is null when updating its length";
+    assert (lastBlock instanceof BlockInfoUnderConstruction) : "The last block for path "
+        + this.getFullPathName()
+        + " is not a BlockInfoUnderConstruction when updating its length";
+    lastBlock.setNumBytes(lastBlockLength);
+  }
+  
 }
 }

+ 25 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -331,22 +331,19 @@ public class LeaseManager {
     }
     }
   }
   }
 
 
-  synchronized void changeLease(String src, String dst,
-      String overwrite, String replaceBy) {
+  synchronized void changeLease(String src, String dst) {
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".changelease: " +
       LOG.debug(getClass().getSimpleName() + ".changelease: " +
-               " src=" + src + ", dest=" + dst + 
-               ", overwrite=" + overwrite +
-               ", replaceBy=" + replaceBy);
+               " src=" + src + ", dest=" + dst);
     }
     }
 
 
-    final int len = overwrite.length();
+    final int len = src.length();
     for(Map.Entry<String, Lease> entry
     for(Map.Entry<String, Lease> entry
         : findLeaseWithPrefixPath(src, sortedLeasesByPath).entrySet()) {
         : findLeaseWithPrefixPath(src, sortedLeasesByPath).entrySet()) {
       final String oldpath = entry.getKey();
       final String oldpath = entry.getKey();
       final Lease lease = entry.getValue();
       final Lease lease = entry.getValue();
-      //overwrite must be a prefix of oldpath
-      final String newpath = replaceBy + oldpath.substring(len);
+      // replace stem of src with new destination
+      final String newpath = dst + oldpath.substring(len);
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
         LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
       }
       }
@@ -429,6 +426,26 @@ public class LeaseManager {
     }
     }
   }
   }
 
 
+  /**
+   * Get the list of inodes corresponding to valid leases.
+   * @return list of inodes
+   * @throws UnresolvedLinkException
+   */
+  Map<String, INodeFileUnderConstruction> getINodesUnderConstruction() {
+    Map<String, INodeFileUnderConstruction> inodes =
+        new TreeMap<String, INodeFileUnderConstruction>();
+    for (String p : sortedLeasesByPath.keySet()) {
+      // verify that path exists in namespace
+      try {
+        INode node = fsnamesystem.dir.getINode(p);
+        inodes.put(p, INodeFileUnderConstruction.valueOf(node, p));
+      } catch (IOException ioe) {
+        LOG.error(ioe);
+      }
+    }
+    return inodes;
+  }
+  
   /** Check the leases beginning from the oldest.
   /** Check the leases beginning from the oldest.
    *  @return true is sync is needed.
    *  @return true is sync is needed.
    */
    */

+ 9 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -598,11 +598,7 @@ public class NameNode {
     String nsId = getNameServiceId(conf);
     String nsId = getNameServiceId(conf);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
     this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
-    if (!haEnabled) {
-      state = ACTIVE_STATE;
-    } else {
-      state = STANDBY_STATE;
-    }
+    state = createHAState();
     this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
     this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
     this.haContext = createHAContext();
     this.haContext = createHAContext();
     try {
     try {
@@ -619,6 +615,10 @@ public class NameNode {
     }
     }
   }
   }
 
 
+  protected HAState createHAState() {
+    return !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
+  }
+
   protected HAContext createHAContext() {
   protected HAContext createHAContext() {
     return new NameNodeHAContext();
     return new NameNodeHAContext();
   }
   }
@@ -1050,6 +1050,9 @@ public class NameNode {
 
 
   private static void doRecovery(StartupOption startOpt, Configuration conf)
   private static void doRecovery(StartupOption startOpt, Configuration conf)
       throws IOException {
       throws IOException {
+    String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+    String namenodeId = HAUtil.getNameNodeId(conf, nsId);
+    initializeGenericKeys(conf, nsId, namenodeId);
     if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
     if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
       if (!confirmPrompt("You have selected Metadata Recovery mode.  " +
       if (!confirmPrompt("You have selected Metadata Recovery mode.  " +
           "This mode is intended to recover lost metadata on a corrupt " +
           "This mode is intended to recover lost metadata on a corrupt " +
@@ -1298,7 +1301,7 @@ public class NameNode {
    *          before exit.
    *          before exit.
    * @throws ExitException thrown only for testing.
    * @throws ExitException thrown only for testing.
    */
    */
-  private synchronized void doImmediateShutdown(Throwable t)
+  protected synchronized void doImmediateShutdown(Throwable t)
       throws ExitException {
       throws ExitException {
     String message = "Error encountered requiring NN shutdown. " +
     String message = "Error encountered requiring NN shutdown. " +
         "Shutting down immediately.";
         "Shutting down immediately.";

+ 14 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -338,11 +338,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
         "Unexpected not positive size: "+size);
         "Unexpected not positive size: "+size);
     }
     }
     namesystem.checkOperation(OperationCategory.READ);
     namesystem.checkOperation(OperationCategory.READ);
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getBlockManager().getBlocks(datanode, size); 
     return namesystem.getBlockManager().getBlocks(datanode, size); 
   }
   }
 
 
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public ExportedBlockKeys getBlockKeys() throws IOException {
   public ExportedBlockKeys getBlockKeys() throws IOException {
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getBlockManager().getBlockKeys();
     return namesystem.getBlockManager().getBlockKeys();
   }
   }
 
 
@@ -351,6 +353,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
                           int errorCode, 
                           int errorCode, 
                           String msg) throws IOException {
                           String msg) throws IOException {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     namesystem.checkOperation(OperationCategory.UNCHECKED);
+    namesystem.checkSuperuserPrivilege();
     verifyRequest(registration);
     verifyRequest(registration);
     LOG.info("Error report from " + registration + ": " + msg);
     LOG.info("Error report from " + registration + ": " + msg);
     if (errorCode == FATAL) {
     if (errorCode == FATAL) {
@@ -361,6 +364,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public NamenodeRegistration register(NamenodeRegistration registration)
   public NamenodeRegistration register(NamenodeRegistration registration)
   throws IOException {
   throws IOException {
+    namesystem.checkSuperuserPrivilege();
     verifyLayoutVersion(registration.getVersion());
     verifyLayoutVersion(registration.getVersion());
     NamenodeRegistration myRegistration = nn.setRegistration();
     NamenodeRegistration myRegistration = nn.setRegistration();
     namesystem.registerBackupNode(registration, myRegistration);
     namesystem.registerBackupNode(registration, myRegistration);
@@ -370,6 +374,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
   public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
   throws IOException {
   throws IOException {
+    namesystem.checkSuperuserPrivilege();
     verifyRequest(registration);
     verifyRequest(registration);
     if(!nn.isRole(NamenodeRole.NAMENODE))
     if(!nn.isRole(NamenodeRole.NAMENODE))
       throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
       throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
@@ -379,6 +384,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public void endCheckpoint(NamenodeRegistration registration,
   public void endCheckpoint(NamenodeRegistration registration,
                             CheckpointSignature sig) throws IOException {
                             CheckpointSignature sig) throws IOException {
+    namesystem.checkSuperuserPrivilege();
     namesystem.endCheckpoint(registration, sig);
     namesystem.endCheckpoint(registration, sig);
   }
   }
 
 
@@ -755,17 +761,20 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public long getTransactionID() throws IOException {
   public long getTransactionID() throws IOException {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     namesystem.checkOperation(OperationCategory.UNCHECKED);
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getFSImage().getLastAppliedOrWrittenTxId();
     return namesystem.getFSImage().getLastAppliedOrWrittenTxId();
   }
   }
   
   
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public long getMostRecentCheckpointTxId() throws IOException {
   public long getMostRecentCheckpointTxId() throws IOException {
     namesystem.checkOperation(OperationCategory.UNCHECKED);
     namesystem.checkOperation(OperationCategory.UNCHECKED);
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getFSImage().getMostRecentCheckpointTxId();
     return namesystem.getFSImage().getMostRecentCheckpointTxId();
   }
   }
   
   
   @Override // NamenodeProtocol
   @Override // NamenodeProtocol
   public CheckpointSignature rollEditLog() throws IOException {
   public CheckpointSignature rollEditLog() throws IOException {
+    namesystem.checkSuperuserPrivilege();
     return namesystem.rollEditLog();
     return namesystem.rollEditLog();
   }
   }
   
   
@@ -773,6 +782,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
   public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
   throws IOException {
   throws IOException {
     namesystem.checkOperation(OperationCategory.READ);
     namesystem.checkOperation(OperationCategory.READ);
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getEditLog().getEditLogManifest(sinceTxId);
     return namesystem.getEditLog().getEditLogManifest(sinceTxId);
   }
   }
     
     
@@ -825,8 +835,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
   
   
   @Override // ClientProtocol
   @Override // ClientProtocol
-  public void fsync(String src, String clientName) throws IOException {
-    namesystem.fsync(src, clientName);
+  public void fsync(String src, String clientName, long lastBlockLength)
+      throws IOException {
+    namesystem.fsync(src, clientName, lastBlockLength);
   }
   }
 
 
   @Override // ClientProtocol
   @Override // ClientProtocol
@@ -948,6 +959,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     
     
   @Override // DatanodeProtocol, NamenodeProtocol
   @Override // DatanodeProtocol, NamenodeProtocol
   public NamespaceInfo versionRequest() throws IOException {
   public NamespaceInfo versionRequest() throws IOException {
+    namesystem.checkSuperuserPrivilege();
     return namesystem.getNamespaceInfo();
     return namesystem.getNamespaceInfo();
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -102,7 +102,7 @@ class NamenodeJspHelper {
     long usedNonHeap = (totalNonHeap * 100) / commitedNonHeap;
     long usedNonHeap = (totalNonHeap * 100) / commitedNonHeap;
 
 
     String str = "<div>" + inodes + " files and directories, " + blocks + " blocks = "
     String str = "<div>" + inodes + " files and directories, " + blocks + " blocks = "
-        + (inodes + blocks) + " total";
+        + (inodes + blocks) + " total filesystem objects";
     if (maxobjects != 0) {
     if (maxobjects != 0) {
       long pct = ((inodes + blocks) * 100) / maxobjects;
       long pct = ((inodes + blocks) * 100) / maxobjects;
       str += " / " + maxobjects + " (" + pct + "%)";
       str += " / " + maxobjects + " (" + pct + "%)";

+ 4 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java

@@ -19,31 +19,26 @@ package org.apache.hadoop.hdfs.server.protocol;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class NNHAStatusHeartbeat {
 public class NNHAStatusHeartbeat {
 
 
-  private State state;
+  private HAServiceState state;
   private long txid = HdfsConstants.INVALID_TXID;
   private long txid = HdfsConstants.INVALID_TXID;
   
   
-  public NNHAStatusHeartbeat(State state, long txid) {
+  public NNHAStatusHeartbeat(HAServiceState state, long txid) {
     this.state = state;
     this.state = state;
     this.txid = txid;
     this.txid = txid;
   }
   }
 
 
-  public State getState() {
+  public HAServiceState getState() {
     return state;
     return state;
   }
   }
   
   
   public long getTxId() {
   public long getTxId() {
     return txid;
     return txid;
   }
   }
-  
-  @InterfaceAudience.Private
-  public enum State {
-    ACTIVE,
-    STANDBY;
-  }
 }
 }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java

@@ -32,8 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * It's used to get part of the name node state
  * It's used to get part of the name node state
  *****************************************************************************/
  *****************************************************************************/
 @KerberosInfo(
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
-    clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public interface NamenodeProtocol {
 public interface NamenodeProtocol {
   /**
   /**

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java

@@ -38,10 +38,9 @@ public class UserParam extends StringParam {
         MessageFormat.format("Parameter [{0}], cannot be NULL", NAME));
         MessageFormat.format("Parameter [{0}], cannot be NULL", NAME));
     }
     }
     int len = str.length();
     int len = str.length();
-    if (len < 1 || len > 31) {
+    if (len < 1) {
       throw new IllegalArgumentException(MessageFormat.format(
       throw new IllegalArgumentException(MessageFormat.format(
-        "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31",
-        NAME, str));
+        "Parameter [{0}], it's length must be at least 1", NAME));
     }
     }
     return str;
     return str;
   }
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -357,6 +357,7 @@ message SetQuotaResponseProto { // void response
 message FsyncRequestProto {
 message FsyncRequestProto {
   required string src = 1;
   required string src = 1;
   required string client = 2;
   required string client = 2;
+  optional sint64 lastBlockLength = 3 [default = -1];
 }
 }
 
 
 message FsyncResponseProto { // void response
 message FsyncResponseProto { // void response

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -1184,4 +1184,17 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.namenode.audit.loggers</name>
+  <value>default</value>
+  <description>
+    List of classes implementing audit loggers that will receive audit events.
+    These should be implementations of org.apache.hadoop.hdfs.server.namenode.AuditLogger.
+    The special value "default" can be used to reference the default audit
+    logger, which uses the configured log system. Installing custom audit loggers
+    may affect the performance and stability of the NameNode. Refer to the custom
+    logger's documentation for more details.
+  </description>
+</property>
+
 </configuration>
 </configuration>

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java

@@ -37,10 +37,15 @@ public class TestFcHdfsCreateMkdir extends
   
   
   private static MiniDFSCluster cluster;
   private static MiniDFSCluster cluster;
   private static Path defaultWorkingDirectory;
   private static Path defaultWorkingDirectory;
-  
+
+  public TestFcHdfsCreateMkdir() {
+    super(new FileContextTestHelper(true));
+  }
+
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining()
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
                                     throws IOException, LoginException, URISyntaxException  {
+    FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsCreateMkdir";
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fc = FileContext.getFileContext(cluster.getURI(0), conf);
     fc = FileContext.getFileContext(cluster.getURI(0), conf);

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java

@@ -41,6 +41,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining()
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
                                     throws IOException, LoginException, URISyntaxException  {
+    FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsPermission";
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fc = FileContext.getFileContext(cluster.getURI(0), conf);
     fc = FileContext.getFileContext(cluster.getURI(0), conf);
@@ -55,6 +56,10 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
     cluster.shutdown();   
     cluster.shutdown();   
   }
   }
   
   
+  public TestFcHdfsPermission() {
+    super(new FileContextTestHelper(true));
+  }
+
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {

+ 15 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java

@@ -78,10 +78,14 @@ public class TestFcHdfsSetUMask {
 
 
   private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
   private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
       .createImmutable((short) (0777 ^ 0777));
       .createImmutable((short) (0777 ^ 0777));
-  
+
+  private final FileContextTestHelper fileContextTestHelper =
+    new FileContextTestHelper(true);
+
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining()
   public static void clusterSetupAtBegining()
         throws IOException, LoginException, URISyntaxException  {
         throws IOException, LoginException, URISyntaxException  {
+    FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsSetUMask";
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     // set permissions very restrictive
     // set permissions very restrictive
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "077");
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "077");
@@ -226,7 +230,7 @@ public class TestFcHdfsSetUMask {
       FsPermission expectedPerms) throws IOException {
       FsPermission expectedPerms) throws IOException {
     Path f = getTestRootPath(fc,"foo");
     Path f = getTestRootPath(fc,"foo");
     fc.setUMask(umask);
     fc.setUMask(umask);
-    createFile(fc, f);
+    fileContextTestHelper.createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
     Assert.assertTrue(isFile(fc, f));
     Assert.assertEquals("permissions on file are wrong",  
     Assert.assertEquals("permissions on file are wrong",  
         expectedPerms , fc.getFileStatus(f).getPermission());
         expectedPerms , fc.getFileStatus(f).getPermission());
@@ -240,12 +244,19 @@ public class TestFcHdfsSetUMask {
     Path fParent = getTestRootPath(fc, "NonExisting");
     Path fParent = getTestRootPath(fc, "NonExisting");
     Assert.assertFalse(exists(fc, fParent));
     Assert.assertFalse(exists(fc, fParent));
     fc.setUMask(umask);
     fc.setUMask(umask);
-    createFile(fc, f);
+    fileContextTestHelper.createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
     Assert.assertTrue(isFile(fc, f));
     Assert.assertEquals("permissions on file are wrong",  
     Assert.assertEquals("permissions on file are wrong",  
         expectedFilePerms, fc.getFileStatus(f).getPermission());
         expectedFilePerms, fc.getFileStatus(f).getPermission());
     Assert.assertEquals("permissions on parent directory are wrong",  
     Assert.assertEquals("permissions on parent directory are wrong",  
         expectedDirPerms, fc.getFileStatus(fParent).getPermission());
         expectedDirPerms, fc.getFileStatus(fParent).getPermission());
   }
   }
- 
+
+  private Path getTestRootPath(FileContext fc) {
+    return fileContextTestHelper.getTestRootPath(fc);
+  }
+
+  private Path getTestRootPath(FileContext fc, String pathString) {
+    return fileContextTestHelper.getTestRootPath(fc, pathString);
+  }
 }
 }

+ 11 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
-import static org.apache.hadoop.fs.FileContextTestHelper.getAbsoluteTestRootDir;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
@@ -86,6 +85,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
 
 
   @BeforeClass
   @BeforeClass
   public static void testSetUp() throws Exception {
   public static void testSetUp() throws Exception {
+    FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsSymlink";
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     conf.set(FsPermission.UMASK_LABEL, "000");
@@ -100,11 +100,17 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
     cluster.shutdown();
     cluster.shutdown();
   }
   }
      
      
+  public TestFcHdfsSymlink() {
+    super(new FileContextTestHelper(true));
+  }
+
   @Test
   @Test
   /** Access a file using a link that spans Hdfs to LocalFs */
   /** Access a file using a link that spans Hdfs to LocalFs */
   public void testLinkAcrossFileSystems() throws IOException {
   public void testLinkAcrossFileSystems() throws IOException {
-    Path localDir  = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
-    Path localFile = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test/file");
+    Path localDir  = new Path("file", null, getAbsoluteTestRootDir(fc) +
+      "/test");
+    Path localFile = new Path("file", null, getAbsoluteTestRootDir(fc) +
+      "/test/file");
     Path link      = new Path(testBaseDir1(), "linkToFile");
     Path link      = new Path(testBaseDir1(), "linkToFile");
     FileContext localFc = FileContext.getLocalFSFileContext();
     FileContext localFc = FileContext.getLocalFSFileContext();
     localFc.delete(localDir, true);
     localFc.delete(localDir, true);
@@ -120,7 +126,8 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
   @Test
   @Test
   /** Test renaming a file across two file systems using a link */
   /** Test renaming a file across two file systems using a link */
   public void testRenameAcrossFileSystemsViaLink() throws IOException {
   public void testRenameAcrossFileSystemsViaLink() throws IOException {
-    Path localDir    = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
+    Path localDir    = new Path("file", null, getAbsoluteTestRootDir(fc) +
+      "/test");
     Path hdfsFile    = new Path(testBaseDir1(), "file");
     Path hdfsFile    = new Path(testBaseDir1(), "file");
     Path link        = new Path(testBaseDir1(), "link");
     Path link        = new Path(testBaseDir1(), "link");
     Path hdfsFileNew = new Path(testBaseDir1(), "fileNew");
     Path hdfsFileNew = new Path(testBaseDir1(), "fileNew");

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
 import static org.apache.hadoop.fs.FileContextTestHelper.exists;
 import static org.apache.hadoop.fs.FileContextTestHelper.exists;
-import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
@@ -49,6 +48,8 @@ public class TestHDFSFileContextMainOperations extends
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
       LoginException, URISyntaxException {
+    FileContextTestHelper.TEST_ROOT_DIR =
+      "/tmp/TestHDFSFileContextMainOperations";
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     cluster.waitClusterUp();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(0), CONF);
     fc = FileContext.getFileContext(cluster.getURI(0), CONF);
@@ -76,6 +77,10 @@ public class TestHDFSFileContextMainOperations extends
     cluster.shutdown();   
     cluster.shutdown();   
   }
   }
   
   
+  public TestHDFSFileContextMainOperations() {
+    super(new FileContextTestHelper(true));
+  }
+
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {

+ 183 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java

@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestVolumeId {
+
+  @Test
+  public void testEquality() {
+    final VolumeId id1 = new HdfsVolumeId(new byte[] { (byte)0, (byte)0 });
+    testEq(true, id1, id1);
+
+    final VolumeId id2 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
+    testEq(true, id2, id2);
+    testEq(false, id1, id2);
+
+    final VolumeId id3 = new HdfsVolumeId(new byte[] { (byte)1, (byte)0 });
+    testEq(true, id3, id3);
+    testEq(false, id1, id3);
+    
+    // same as 2, but "invalid":
+    final VolumeId id2copy1 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
+    
+    testEq(true, id2, id2copy1);
+
+    // same as 2copy1: 
+    final VolumeId id2copy2 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
+    
+    testEq(true, id2, id2copy2);
+    
+    testEqMany(true, new VolumeId[] { id2, id2copy1, id2copy2 });
+    
+    testEqMany(false, new VolumeId[] { id1, id2, id3 });
+  }
+  
+  @SuppressWarnings("unchecked")
+  private <T> void testEq(final boolean eq, Comparable<T> id1, Comparable<T> id2) {
+    final int h1 = id1.hashCode();
+    final int h2 = id2.hashCode();
+    
+    // eq reflectivity:
+    assertTrue(id1.equals(id1));
+    assertTrue(id2.equals(id2));
+    assertEquals(0, id1.compareTo((T)id1));
+    assertEquals(0, id2.compareTo((T)id2));
+
+    // eq symmetry:
+    assertEquals(eq, id1.equals(id2));
+    assertEquals(eq, id2.equals(id1));
+    
+    // null comparison:
+    assertFalse(id1.equals(null));
+    assertFalse(id2.equals(null));
+    
+    // compareTo:
+    assertEquals(eq, 0 == id1.compareTo((T)id2));
+    assertEquals(eq, 0 == id2.compareTo((T)id1));
+    // compareTo must be antisymmetric:
+    assertEquals(sign(id1.compareTo((T)id2)), -sign(id2.compareTo((T)id1)));
+    
+    // compare with null should never return 0 to be consistent with #equals(): 
+    assertTrue(id1.compareTo(null) != 0);
+    assertTrue(id2.compareTo(null) != 0);
+    
+    // check that hash codes did not change:
+    assertEquals(h1, id1.hashCode());
+    assertEquals(h2, id2.hashCode());
+    if (eq) {
+      // in this case the hash codes must be the same:
+      assertEquals(h1, h2);
+    }
+  }
+  
+  private static int sign(int x) {
+    if (x == 0) {
+      return 0;
+    } else if (x > 0) {
+      return 1;
+    } else {
+      return -1;
+    }
+  }
+  
+  @SuppressWarnings("unchecked")
+  private <T> void testEqMany(final boolean eq, Comparable<T>... volumeIds) {
+    Comparable<T> vidNext;
+    int sum = 0;
+    for (int i=0; i<volumeIds.length; i++) {
+      if (i == volumeIds.length - 1) {
+        vidNext = volumeIds[0];
+      } else {
+        vidNext = volumeIds[i + 1];
+      }
+      testEq(eq, volumeIds[i], vidNext);
+      sum += sign(volumeIds[i].compareTo((T)vidNext));
+    }
+    // the comparison relationship must always be acyclic:
+    assertTrue(sum < volumeIds.length);
+  }
+
+  /*
+   * Test HdfsVolumeId(new byte[0]) instances: show that we permit such
+   * objects, they are still valid, and obey the same equality
+   * rules other objects do. 
+   */
+  @Test
+  public void testIdEmptyBytes() {
+    final VolumeId idEmpty1   = new HdfsVolumeId(new byte[0]);
+    assertTrue(idEmpty1.isValid());
+    final VolumeId idEmpty2   = new HdfsVolumeId(new byte[0]);
+    assertTrue(idEmpty2.isValid());
+    final VolumeId idNotEmpty = new HdfsVolumeId(new byte[] { (byte)1 });
+    assertTrue(idNotEmpty.isValid());
+    
+    testEq(true, idEmpty1, idEmpty2);
+    testEq(false, idEmpty1, idNotEmpty);
+    testEq(false, idEmpty2, idNotEmpty);
+  }
+  
+  /*
+   * Test the VolumeId.INVALID_VOLUME_ID singleton.
+   */
+  @Test
+  public void testInvalidId() {
+    try {
+      new HdfsVolumeId(null);
+      assertTrue("NPE expected.", false);
+    } catch (NullPointerException npe) {
+      // okay
+    }
+    final VolumeId idEmpty   = new HdfsVolumeId(new byte[] {});
+    final VolumeId idNotEmpty = new HdfsVolumeId(new byte[] { (byte)1 });
+    
+    testEq(false, VolumeId.INVALID_VOLUME_ID, idNotEmpty);
+    testEq(false, VolumeId.INVALID_VOLUME_ID, idEmpty);
+    
+    testEqMany(true, 
+        new VolumeId[] { 
+          VolumeId.INVALID_VOLUME_ID, 
+          VolumeId.INVALID_VOLUME_ID, 
+          VolumeId.INVALID_VOLUME_ID } );
+    testEqMany(false, 
+        new VolumeId[] {
+          VolumeId.INVALID_VOLUME_ID, 
+          idEmpty, 
+          idNotEmpty });
+  }
+  
+  /*
+   * test #toString() for typical VolumeId equality classes
+   */
+  @Test
+  public void testToString() {
+    // The #toString() return value is only checked for != null.
+    // We cannot assert more.
+    String strInvalid = VolumeId.INVALID_VOLUME_ID.toString();
+    assertNotNull(strInvalid);
+    
+    String strEmpty = new HdfsVolumeId(new byte[] {}).toString();
+    assertNotNull(strEmpty);
+    
+    String strNotEmpty = new HdfsVolumeId(new byte[] { (byte)1 }).toString();
+    assertNotNull(strNotEmpty);
+  } 
+  
+}

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java

@@ -25,6 +25,7 @@ import javax.security.auth.login.LoginException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -45,6 +46,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
       LoginException, URISyntaxException {
+    FileSystemTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFileSystemAtHdfsRoot";
     SupportsBlocks = true;
     SupportsBlocks = true;
     CONF.setBoolean(
     CONF.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
@@ -64,6 +66,10 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
     }
     }
   }
   }
 
 
+  public TestViewFileSystemAtHdfsRoot() {
+    super(new FileSystemTestHelper(true));
+  }
+
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java

@@ -51,6 +51,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
       LoginException, URISyntaxException {
+    FileSystemTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFileSystemHdfs";
     SupportsBlocks = true;
     SupportsBlocks = true;
     CONF.setBoolean(
     CONF.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
@@ -80,13 +81,17 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     cluster.shutdown();   
     cluster.shutdown();   
   }
   }
 
 
+  public TestViewFileSystemHdfs() {
+    super(new FileSystemTestHelper(true));
+  }
+
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     // create the test root on local_fs
     // create the test root on local_fs
     fsTarget = fHdfs;
     fsTarget = fHdfs;
     fsTarget2 = fHdfs2;
     fsTarget2 = fHdfs2;
-    targetTestRoot2 = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
+    targetTestRoot2 = getAbsoluteTestRootPath(fsTarget2);
     super.setUp();
     super.setUp();
   }
   }
 
 

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java

@@ -23,6 +23,7 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.login.LoginException;
 
 
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestHelper;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -46,6 +47,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
   @BeforeClass
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
       LoginException, URISyntaxException {
+    FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFsAtHdfsRoot";
     SupportsBlocks = true;
     SupportsBlocks = true;
     CONF.setBoolean(
     CONF.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
@@ -61,6 +63,10 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
     cluster.shutdown();   
     cluster.shutdown();   
   }
   }
 
 
+  public TestViewFsAtHdfsRoot() {
+    super(new FileContextTestHelper(true));
+  }
+
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java

@@ -81,7 +81,7 @@ public class TestViewFsDefaultValue {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DFS_REPLICATION_DEFAULT + 1).build();
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DFS_REPLICATION_DEFAULT + 1).build();
     cluster.waitClusterUp();
     cluster.waitClusterUp();
     fHdfs = cluster.getFileSystem();
     fHdfs = cluster.getFileSystem();
-    FileSystemTestHelper.createFile(fHdfs, testFileName);
+    new FileSystemTestHelper(true).createFile(fHdfs, testFileName);
     Configuration conf = ViewFileSystemTestSetup.createConfig();
     Configuration conf = ViewFileSystemTestSetup.createConfig();
     ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() +
     ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() +
       "/tmp"));
       "/tmp"));

部分文件因为文件数量过多而无法显示