Переглянути джерело

Rebasing branch-2.2 from branch-2.2.0

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.2@1547143 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 11 роки тому
батько
коміт
17bd88d1b0
100 змінених файлів з 1058 додано та 3670 видалено
  1. 0 1
      .gitattributes
  2. 2 2
      hadoop-assemblies/pom.xml
  3. 2 2
      hadoop-client/pom.xml
  4. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  5. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  6. 2 7
      hadoop-common-project/hadoop-auth/pom.xml
  7. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  8. 1 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  9. 0 70
      hadoop-common-project/hadoop-common/CHANGES.txt
  10. 2 2
      hadoop-common-project/hadoop-common/pom.xml
  11. 12 12
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
  12. 0 1
      hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
  13. 88 220
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  14. 0 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  15. 0 41
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DirectoryListingStartAfterNotFoundException.java
  16. 0 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
  17. 4 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
  18. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  19. 0 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java
  20. 12 105
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  21. 35 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  22. 7 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
  23. 3 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
  24. 28 98
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
  25. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  26. 2 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  27. 3 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  28. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java
  29. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  30. 7 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  31. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  32. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  33. 0 41
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetriableException.java
  34. 21 27
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  35. 8 21
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
  36. 5 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
  37. 1 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
  38. 7 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  39. 2 46
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  40. 1 1
      hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
  41. 0 9
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  42. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
  43. 0 79
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
  44. 0 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
  45. 0 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
  46. 0 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
  47. 16 83
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
  48. 5 176
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  49. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java
  50. 0 91
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java
  51. 5 0
      hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
  52. 2 2
      hadoop-common-project/hadoop-nfs/pom.xml
  53. 5 5
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java
  54. 2 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java
  55. 5 10
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
  56. 20 11
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
  57. 1 10
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
  58. 2 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java
  59. 0 17
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
  60. 0 6
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
  61. 1 9
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java
  62. 1 14
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java
  63. 12 13
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
  64. 3 12
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
  65. 3 12
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
  66. 21 90
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
  67. 95 0
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java
  68. 2 1
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java
  69. 9 1
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapResponse.java
  70. 75 109
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
  71. 2 11
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
  72. 0 116
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java
  73. 2 2
      hadoop-common-project/pom.xml
  74. 2 2
      hadoop-dist/pom.xml
  75. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  77. 15 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
  78. 13 13
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  79. 0 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
  80. 4 95
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
  81. 12 19
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
  82. 0 14
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
  83. 68 318
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  84. 0 270
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
  85. 64 127
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  86. 151 163
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
  87. 7 7
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
  88. 1 5
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
  89. 141 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java
  90. 0 196
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java
  91. 4 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
  92. 0 65
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
  93. 0 141
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
  94. 2 277
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
  95. 0 128
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  96. 2 2
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  97. 1 10
      hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
  98. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  99. 1 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
  100. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

+ 0 - 1
.gitattributes

@@ -15,6 +15,5 @@
 
 *.bat    text eol=crlf
 *.cmd    text eol=crlf
-*.vcxproj text merge=union eol=crlf
 *.csproj text merge=union eol=crlf
 *.sln    text merge=union eol=crlf

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,12 +23,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 7
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>
@@ -53,11 +53,6 @@
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty-util</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.mortbay.jetty</groupId>
       <artifactId>jetty</artifactId>

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -185,7 +185,7 @@ public class KerberosAuthenticator implements Authenticator {
       conn.setRequestMethod(AUTH_HTTP_METHOD);
       conn.connect();
       
-      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+      if (conn.getRequestProperty(AUTHORIZATION) != null && conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
         LOG.debug("JDK performed authentication on our behalf.");
         // If the JDK already did the SPNEGO back-and-forth for
         // us, just pull out the token.

+ 1 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -138,6 +138,7 @@ public abstract class AuthenticatorTestCase extends TestCase {
       TestConnectionConfigurator connConf = new TestConnectionConfigurator();
       AuthenticatedURL aUrl = new AuthenticatedURL(authenticator, connConf);
       HttpURLConnection conn = aUrl.openConnection(url, token);
+      Assert.assertTrue(token.isSet());
       Assert.assertTrue(connConf.invoked);
       String tokenStr = token.toString();
       if (doPost) {

+ 0 - 70
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -1,72 +1,5 @@
 Hadoop Change Log
 
-Release 2.2.1 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HADOOP-10046. Print a log message when SSL is enabled.
-    (David S. Wang via wang)
-
-    HADOOP-10079. log a warning message if group resolution takes too long.
-    (cmccabe)
-
-    HADOOP-9623. Update jets3t dependency to 0.9.0 (Amandeep Khurana via Colin
-    Patrick McCabe)
-
-    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
-    is encountered (Ted yu via umamahesh)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
-
-    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
-    (Chuan Liu via cnauroth)
-
-    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
-    local path. (Chuan Liu via cnauroth)
-
-    HADOOP-10039. Add Hive to the list of projects using 
-    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
-
-    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
-    Windows. (cnauroth)
-
-    HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
-    (Akira Ajisaka via cnauroth)
-
-    HADOOP-10052. Temporarily disable client-side symlink resolution
-    (branch-2.2 only change). (wang)
-
-    HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
-    timing around cache expiry check. (cnauroth)
-
-    HADOOP-9898. Set SO_KEEPALIVE on all our sockets. (todd via wang)
-
-    HADOOP-9478. Fix race conditions during the initialization of Configuration
-    related to deprecatedKeyMap (cmccabe)
-
-    HADOOP-9660. [WINDOWS] Powershell / cmd parses -Dkey=value from command line
-    as [-Dkey, value] which breaks GenericsOptionParser.
-    (Enis Soztutar via cnauroth)
-
-    HADOOP-10078. KerberosAuthenticator always does SPNEGO. (rkanter via tucu)
-
-    HADOOP-10110. hadoop-auth has a build break due to missing dependency.
-    (Chuan Liu via arp)
-
-    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
-    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
-
-    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
-    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
-
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES
@@ -245,9 +178,6 @@ Release 2.1.1-beta - 2013-09-23
     HADOOP-9977. Hadoop services won't start with different keypass and
     keystorepass when https is enabled. (cnauroth)
 
-    HADOOP-10005. No need to check INFO severity level is enabled or not.
-    (Jackie Chang via suresh)
-
 Release 2.1.0-beta - 2013-08-22
 
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

+ 12 - 12
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

@@ -145,6 +145,18 @@ if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% (
 
 set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\*
 
+@rem
+@rem add user-specified CLASSPATH last
+@rem
+
+if defined HADOOP_CLASSPATH (
+  if defined HADOOP_USER_CLASSPATH_FIRST (
+    set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
+  ) else (
+    set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+  )
+)
+
 @rem
 @rem default log directory % file
 @rem
@@ -277,16 +289,4 @@ if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
   set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\*
 )
 
-@rem
-@rem add user-specified CLASSPATH last
-@rem
-
-if defined HADOOP_CLASSPATH (
-  if defined HADOOP_USER_CLASSPATH_FIRST (
-    set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
-  ) else (
-    set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
-  )
-)
-
 :eof

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example

@@ -44,7 +44,6 @@
   <value>10000</value>
   <description>Truststore reload check interval, in milliseconds.
   Default value is 10000 (10 seconds).
-  </description>
 </property>
 
 <property>

+ 88 - 220
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -55,8 +55,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
 
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
@@ -67,7 +65,6 @@ import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.dom.DOMSource;
 import javax.xml.transform.stream.StreamResult;
 
-import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -90,7 +87,6 @@ import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
-
 import com.google.common.base.Preconditions;
 
 /** 
@@ -258,13 +254,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * warning message which can be logged whenever the deprecated key is used.
    */
   private static class DeprecatedKeyInfo {
-    private final String[] newKeys;
-    private final String customMessage;
-    private final AtomicBoolean accessed = new AtomicBoolean(false);
-
+    private String[] newKeys;
+    private String customMessage;
+    private boolean accessed;
     DeprecatedKeyInfo(String[] newKeys, String customMessage) {
       this.newKeys = newKeys;
       this.customMessage = customMessage;
+      accessed = false;
     }
 
     /**
@@ -290,170 +286,26 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       else {
         warningMessage = customMessage;
       }
+      accessed = true;
       return warningMessage;
     }
-
-    boolean getAndSetAccessed() {
-      return accessed.getAndSet(true);
-    }
-
-    public void clearAccessed() {
-      accessed.set(false);
-    }
   }
   
   /**
-   * A pending addition to the global set of deprecated keys.
-   */
-  public static class DeprecationDelta {
-    private final String key;
-    private final String[] newKeys;
-    private final String customMessage;
-
-    DeprecationDelta(String key, String[] newKeys, String customMessage) {
-      Preconditions.checkNotNull(key);
-      Preconditions.checkNotNull(newKeys);
-      Preconditions.checkArgument(newKeys.length > 0);
-      this.key = key;
-      this.newKeys = newKeys;
-      this.customMessage = customMessage;
-    }
-
-    public DeprecationDelta(String key, String newKey, String customMessage) {
-      this(key, new String[] { newKey }, customMessage);
-    }
-
-    public DeprecationDelta(String key, String newKey) {
-      this(key, new String[] { newKey }, null);
-    }
-
-    public String getKey() {
-      return key;
-    }
-
-    public String[] getNewKeys() {
-      return newKeys;
-    }
-
-    public String getCustomMessage() {
-      return customMessage;
-    }
-  }
-
-  /**
-   * The set of all keys which are deprecated.
-   *
-   * DeprecationContext objects are immutable.
+   * Stores the deprecated keys, the new keys which replace the deprecated keys
+   * and custom message(if any provided).
    */
-  private static class DeprecationContext {
-    /**
-     * Stores the deprecated keys, the new keys which replace the deprecated keys
-     * and custom message(if any provided).
-     */
-    private final Map<String, DeprecatedKeyInfo> deprecatedKeyMap;
-
-    /**
-     * Stores a mapping from superseding keys to the keys which they deprecate.
-     */
-    private final Map<String, String> reverseDeprecatedKeyMap;
-
-    /**
-     * Create a new DeprecationContext by copying a previous DeprecationContext
-     * and adding some deltas.
-     *
-     * @param other   The previous deprecation context to copy, or null to start
-     *                from nothing.
-     * @param deltas  The deltas to apply.
-     */
-    @SuppressWarnings("unchecked")
-    DeprecationContext(DeprecationContext other, DeprecationDelta[] deltas) {
-      HashMap<String, DeprecatedKeyInfo> newDeprecatedKeyMap = 
-        new HashMap<String, DeprecatedKeyInfo>();
-      HashMap<String, String> newReverseDeprecatedKeyMap =
-        new HashMap<String, String>();
-      if (other != null) {
-        for (Entry<String, DeprecatedKeyInfo> entry :
-            other.deprecatedKeyMap.entrySet()) {
-          newDeprecatedKeyMap.put(entry.getKey(), entry.getValue());
-        }
-        for (Entry<String, String> entry :
-            other.reverseDeprecatedKeyMap.entrySet()) {
-          newReverseDeprecatedKeyMap.put(entry.getKey(), entry.getValue());
-        }
-      }
-      for (DeprecationDelta delta : deltas) {
-        if (!newDeprecatedKeyMap.containsKey(delta.getKey())) {
-          DeprecatedKeyInfo newKeyInfo =
-            new DeprecatedKeyInfo(delta.getNewKeys(), delta.getCustomMessage());
-          newDeprecatedKeyMap.put(delta.key, newKeyInfo);
-          for (String newKey : delta.getNewKeys()) {
-            newReverseDeprecatedKeyMap.put(newKey, delta.key);
-          }
-        }
-      }
-      this.deprecatedKeyMap =
-        UnmodifiableMap.decorate(newDeprecatedKeyMap);
-      this.reverseDeprecatedKeyMap =
-        UnmodifiableMap.decorate(newReverseDeprecatedKeyMap);
-    }
-
-    Map<String, DeprecatedKeyInfo> getDeprecatedKeyMap() {
-      return deprecatedKeyMap;
-    }
-
-    Map<String, String> getReverseDeprecatedKeyMap() {
-      return reverseDeprecatedKeyMap;
-    }
-  }
+  private static Map<String, DeprecatedKeyInfo> deprecatedKeyMap = 
+      new HashMap<String, DeprecatedKeyInfo>();
   
-  private static DeprecationDelta[] defaultDeprecations = 
-    new DeprecationDelta[] {
-      new DeprecationDelta("topology.script.file.name", 
-        CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY),
-      new DeprecationDelta("topology.script.number.args", 
-        CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY),
-      new DeprecationDelta("hadoop.configured.node.mapping", 
-        CommonConfigurationKeys.NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY),
-      new DeprecationDelta("topology.node.switch.mapping.impl", 
-        CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY),
-      new DeprecationDelta("dfs.df.interval", 
-        CommonConfigurationKeys.FS_DF_INTERVAL_KEY),
-      new DeprecationDelta("hadoop.native.lib", 
-        CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY),
-      new DeprecationDelta("fs.default.name", 
-        CommonConfigurationKeys.FS_DEFAULT_NAME_KEY),
-      new DeprecationDelta("dfs.umaskmode",
-        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY)
-    };
-
-  /**
-   * The global DeprecationContext.
-   */
-  private static AtomicReference<DeprecationContext> deprecationContext =
-      new AtomicReference<DeprecationContext>(
-          new DeprecationContext(null, defaultDeprecations));
-
   /**
-   * Adds a set of deprecated keys to the global deprecations.
-   *
-   * This method is lockless.  It works by means of creating a new
-   * DeprecationContext based on the old one, and then atomically swapping in
-   * the new context.  If someone else updated the context in between us reading
-   * the old context and swapping in the new one, we try again until we win the
-   * race.
-   *
-   * @param deltas   The deprecations to add.
+   * Stores a mapping from superseding keys to the keys which they deprecate.
    */
-  public static void addDeprecations(DeprecationDelta[] deltas) {
-    DeprecationContext prev, next;
-    do {
-      prev = deprecationContext.get();
-      next = new DeprecationContext(prev, deltas);
-    } while (!deprecationContext.compareAndSet(prev, next));
-  }
+  private static Map<String, String> reverseDeprecatedKeyMap =
+      new HashMap<String, String>();
 
   /**
-   * Adds the deprecated key to the global deprecation map.
+   * Adds the deprecated key to the deprecation map.
    * It does not override any existing entries in the deprecation map.
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
@@ -462,9 +314,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * If a key is deprecated in favor of multiple keys, they are all treated as 
    * aliases of each other, and setting any one of them resets all the others 
    * to the new value.
-   *
-   * If you have multiple deprecation entries to add, it is more efficient to
-   * use #addDeprecations(DeprecationDelta[] deltas) instead.
    * 
    * @param key
    * @param newKeys
@@ -473,35 +322,41 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       String customMessage)} instead
    */
   @Deprecated
-  public static void addDeprecation(String key, String[] newKeys,
+  public synchronized static void addDeprecation(String key, String[] newKeys,
       String customMessage) {
-    addDeprecations(new DeprecationDelta[] {
-      new DeprecationDelta(key, newKeys, customMessage)
-    });
+    if (key == null || key.length() == 0 ||
+        newKeys == null || newKeys.length == 0) {
+      throw new IllegalArgumentException();
+    }
+    if (!isDeprecated(key)) {
+      DeprecatedKeyInfo newKeyInfo;
+      newKeyInfo = new DeprecatedKeyInfo(newKeys, customMessage);
+      deprecatedKeyMap.put(key, newKeyInfo);
+      for (String newKey : newKeys) {
+        reverseDeprecatedKeyMap.put(newKey, key);
+      }
+    }
   }
-
+  
   /**
-   * Adds the deprecated key to the global deprecation map.
+   * Adds the deprecated key to the deprecation map.
    * It does not override any existing entries in the deprecation map.
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
    * 
-   * If you have multiple deprecation entries to add, it is more efficient to
-   * use #addDeprecations(DeprecationDelta[] deltas) instead.
-   *
    * @param key
    * @param newKey
    * @param customMessage
    */
-  public static void addDeprecation(String key, String newKey,
+  public synchronized static void addDeprecation(String key, String newKey,
 	      String customMessage) {
 	  addDeprecation(key, new String[] {newKey}, customMessage);
   }
 
   /**
-   * Adds the deprecated key to the global deprecation map when no custom
-   * message is provided.
+   * Adds the deprecated key to the deprecation map when no custom message
+   * is provided.
    * It does not override any existing entries in the deprecation map.
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
@@ -511,34 +366,28 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * aliases of each other, and setting any one of them resets all the others 
    * to the new value.
    * 
-   * If you have multiple deprecation entries to add, it is more efficient to
-   * use #addDeprecations(DeprecationDelta[] deltas) instead.
-   *
    * @param key Key that is to be deprecated
    * @param newKeys list of keys that take up the values of deprecated key
    * @deprecated use {@link #addDeprecation(String key, String newKey)} instead
    */
   @Deprecated
-  public static void addDeprecation(String key, String[] newKeys) {
+  public synchronized static void addDeprecation(String key, String[] newKeys) {
     addDeprecation(key, newKeys, null);
   }
   
   /**
-   * Adds the deprecated key to the global deprecation map when no custom
-   * message is provided.
+   * Adds the deprecated key to the deprecation map when no custom message
+   * is provided.
    * It does not override any existing entries in the deprecation map.
    * This is to be used only by the developers in order to add deprecation of
    * keys, and attempts to call this method after loading resources once,
    * would lead to <tt>UnsupportedOperationException</tt>
    * 
-   * If you have multiple deprecation entries to add, it is more efficient to
-   * use #addDeprecations(DeprecationDelta[] deltas) instead.
-   *
    * @param key Key that is to be deprecated
    * @param newKey key that takes up the value of deprecated key
    */
-  public static void addDeprecation(String key, String newKey) {
-    addDeprecation(key, new String[] {newKey}, null);
+  public synchronized static void addDeprecation(String key, String newKey) {
+	addDeprecation(key, new String[] {newKey}, null);
   }
   
   /**
@@ -549,7 +398,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         <code>false</code> otherwise.
    */
   public static boolean isDeprecated(String key) {
-    return deprecationContext.get().getDeprecatedKeyMap().containsKey(key);
+    return deprecatedKeyMap.containsKey(key);
   }
 
   /**
@@ -561,14 +410,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    */
   private String[] getAlternateNames(String name) {
     String altNames[] = null;
-    DeprecationContext cur = deprecationContext.get();
-    DeprecatedKeyInfo keyInfo = cur.getDeprecatedKeyMap().get(name);
+    DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
     if (keyInfo == null) {
-      altNames = (cur.getReverseDeprecatedKeyMap().get(name) != null ) ? 
-        new String [] {cur.getReverseDeprecatedKeyMap().get(name)} : null;
+      altNames = (reverseDeprecatedKeyMap.get(name) != null ) ? 
+        new String [] {reverseDeprecatedKeyMap.get(name)} : null;
       if(altNames != null && altNames.length > 0) {
     	//To help look for other new configs for this deprecated config
-    	keyInfo = cur.getDeprecatedKeyMap().get(altNames[0]);
+    	keyInfo = deprecatedKeyMap.get(altNames[0]);
       }      
     } 
     if(keyInfo != null && keyInfo.newKeys.length > 0) {
@@ -594,12 +442,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return the first property in the list of properties mapping
    *         the <code>name</code> or the <code>name</code> itself.
    */
-  private String[] handleDeprecation(DeprecationContext deprecations,
-      String name) {
+  private String[] handleDeprecation(String name) {
     ArrayList<String > names = new ArrayList<String>();
 	if (isDeprecated(name)) {
-      DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
-      warnOnceIfDeprecated(deprecations, name);
+      DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
+      warnOnceIfDeprecated(name);
       for (String newKey : keyInfo.newKeys) {
         if(newKey != null) {
           names.add(newKey);
@@ -610,7 +457,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     	names.add(name);
     }
     for(String n : names) {
-	  String deprecatedKey = deprecations.getReverseDeprecatedKeyMap().get(n);
+	  String deprecatedKey = reverseDeprecatedKeyMap.get(n);
 	  if (deprecatedKey != null && !getOverlay().containsKey(n) &&
 	      getOverlay().containsKey(deprecatedKey)) {
 	    getProps().setProperty(n, getOverlay().getProperty(deprecatedKey));
@@ -622,12 +469,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
  
   private void handleDeprecation() {
     LOG.debug("Handling deprecation for all properties in config...");
-    DeprecationContext deprecations = deprecationContext.get();
     Set<Object> keys = new HashSet<Object>();
     keys.addAll(getProps().keySet());
     for (Object item: keys) {
       LOG.debug("Handling deprecation for " + (String)item);
-      handleDeprecation(deprecations, (String)item);
+      handleDeprecation((String)item);
     }
   }
  
@@ -646,6 +492,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     addDefaultResource("core-default.xml");
     addDefaultResource("core-site.xml");
+    //Add code for managing deprecated key mapping
+    //for example
+    //addDeprecation("oldKey1",new String[]{"newkey1","newkey2"});
+    //adds deprecation for oldKey1 to two new keys(newkey1, newkey2).
+    //so get or set of oldKey1 will correctly populate/access values of 
+    //newkey1 and newkey2
+    addDeprecatedKeys();
   }
   
   private Properties properties;
@@ -862,7 +715,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         or null if no such property exists.
    */
   public String get(String name) {
-    String[] names = handleDeprecation(deprecationContext.get(), name);
+    String[] names = handleDeprecation(name);
     String result = null;
     for(String n : names) {
       result = substituteVars(getProps().getProperty(n));
@@ -919,7 +772,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         its replacing property and null if no such property exists.
    */
   public String getRaw(String name) {
-    String[] names = handleDeprecation(deprecationContext.get(), name);
+    String[] names = handleDeprecation(name);
     String result = null;
     for(String n : names) {
       result = getProps().getProperty(n);
@@ -957,8 +810,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     Preconditions.checkArgument(
         value != null,
         "Property value must not be null");
-    DeprecationContext deprecations = deprecationContext.get();
-    if (deprecations.getDeprecatedKeyMap().isEmpty()) {
+    if (deprecatedKeyMap.isEmpty()) {
       getProps();
     }
     getOverlay().setProperty(name, value);
@@ -979,12 +831,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         }
       }
     }
-    warnOnceIfDeprecated(deprecations, name);
+    warnOnceIfDeprecated(name);
   }
 
-  private void warnOnceIfDeprecated(DeprecationContext deprecations, String name) {
-    DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
-    if (keyInfo != null && !keyInfo.getAndSetAccessed()) {
+  private void warnOnceIfDeprecated(String name) {
+    DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
+    if (keyInfo != null && !keyInfo.accessed) {
       LOG_DEPRECATION.info(keyInfo.getWarningMessage(name));
     }
   }
@@ -1035,7 +887,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *         doesn't exist.                    
    */
   public String get(String name, String defaultValue) {
-    String[] names = handleDeprecation(deprecationContext.get(), name);
+    String[] names = handleDeprecation(name);
     String result = null;
     for(String n : names) {
       result = substituteVars(getProps().getProperty(n, defaultValue));
@@ -2231,7 +2083,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       if (!"configuration".equals(root.getTagName()))
         LOG.fatal("bad conf file: top-level element not <configuration>");
       NodeList props = root.getChildNodes();
-      DeprecationContext deprecations = deprecationContext.get();
       for (int i = 0; i < props.getLength(); i++) {
         Node propNode = props.item(i);
         if (!(propNode instanceof Element))
@@ -2269,10 +2120,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         
         // Ignore this parameter if it has already been marked as 'final'
         if (attr != null) {
-          if (deprecations.getDeprecatedKeyMap().containsKey(attr)) {
-            DeprecatedKeyInfo keyInfo =
-                deprecations.getDeprecatedKeyMap().get(attr);
-            keyInfo.clearAccessed();
+          if (deprecatedKeyMap.containsKey(attr)) {
+            DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(attr);
+            keyInfo.accessed = false;
             for (String key:keyInfo.newKeys) {
               // update new keys with deprecated key's value 
               loadProperty(toAddTo, name, key, value, finalParameter, 
@@ -2566,6 +2416,26 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return result;
   }
 
+  //Load deprecated keys in common
+  private static void addDeprecatedKeys() {
+    Configuration.addDeprecation("topology.script.file.name", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY});
+    Configuration.addDeprecation("topology.script.number.args", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY});
+    Configuration.addDeprecation("hadoop.configured.node.mapping", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY});
+    Configuration.addDeprecation("topology.node.switch.mapping.impl", 
+               new String[]{CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY});
+    Configuration.addDeprecation("dfs.df.interval", 
+               new String[]{CommonConfigurationKeys.FS_DF_INTERVAL_KEY});
+    Configuration.addDeprecation("hadoop.native.lib", 
+               new String[]{CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY});
+    Configuration.addDeprecation("fs.default.name", 
+               new String[]{CommonConfigurationKeys.FS_DEFAULT_NAME_KEY});
+    Configuration.addDeprecation("dfs.umaskmode",
+        new String[]{CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY});
+  }
+  
   /**
    * A unique class which is used as a sentinel value in the caching
    * for getClassByName. {@see Configuration#getClassByNameOrNull(String)}
@@ -2573,14 +2443,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private static abstract class NegativeCacheSentinel {}
 
   public static void dumpDeprecatedKeys() {
-    DeprecationContext deprecations = deprecationContext.get();
-    for (Map.Entry<String, DeprecatedKeyInfo> entry :
-        deprecations.getDeprecatedKeyMap().entrySet()) {
-      StringBuilder newKeys = new StringBuilder();
+    for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecatedKeyMap.entrySet()) {
+      String newKeys = "";
       for (String newKey : entry.getValue().newKeys) {
-        newKeys.append(newKey).append("\t");
+        newKeys += newKey + "\t";
       }
-      System.out.println(entry.getKey() + "\t" + newKeys.toString());
+      System.out.println(entry.getKey() + "\t" + newKeys);
     }
   }
 }

+ 0 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -235,14 +235,6 @@ public class CommonConfigurationKeysPublic {
   public static final String  HADOOP_SECURITY_GROUPS_CACHE_SECS =
     "hadoop.security.groups.cache.secs";
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
-  public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
-    300;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
-  public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
-    "hadoop.security.groups.cache.warn.after.ms";
-  public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
-    5000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
     "hadoop.security.authentication";
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */

+ 0 - 41
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DirectoryListingStartAfterNotFoundException.java

@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/** 
- * Thrown when the startAfter can't be found when listing a directory.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS"})
-@InterfaceStability.Stable
-public class DirectoryListingStartAfterNotFoundException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public DirectoryListingStartAfterNotFoundException() {
-    super();
-  }
-
-  public DirectoryListingStartAfterNotFoundException(String msg) {
-    super(msg);
-  }
-}

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java

@@ -76,7 +76,6 @@ public abstract class FSLinkResolver<T> {
    * @return Generic type determined by the implementation of next.
    * @throws IOException
    */
-  @SuppressWarnings("deprecation")
   public T resolve(final FileContext fc, final Path path) throws IOException {
     int count = 0;
     T in = null;
@@ -96,10 +95,6 @@ public abstract class FSLinkResolver<T> {
               + " and symlink resolution is disabled ("
               + CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY + ").", e);
         }
-        if (!FileSystem.isSymlinksEnabled()) {
-          throw new IOException("Symlink resolution is disabled in"
-              + " this version of Hadoop.");
-        }
         if (count++ > FsConstants.MAX_PATH_LINKS) {
           throw new IOException("Possible cyclic loop while " +
                                 "following symbolic link " + path);

+ 4 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java

@@ -183,13 +183,10 @@ abstract public class FSOutputSummer extends OutputStream {
   }
 
   static byte[] int2byte(int integer, byte[] bytes) {
-    if (bytes.length != 0) {
-      bytes[0] = (byte) ((integer >>> 24) & 0xFF);
-      bytes[1] = (byte) ((integer >>> 16) & 0xFF);
-      bytes[2] = (byte) ((integer >>> 8) & 0xFF);
-      bytes[3] = (byte) ((integer >>> 0) & 0xFF);
-      return bytes;
-    }
+    bytes[0] = (byte)((integer >>> 24) & 0xFF);
+    bytes[1] = (byte)((integer >>> 16) & 0xFF);
+    bytes[2] = (byte)((integer >>>  8) & 0xFF);
+    bytes[3] = (byte)((integer >>>  0) & 0xFF);
     return bytes;
   }
 

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2808,12 +2808,17 @@ public abstract class FileSystem extends Configured implements Closeable {
     }
   }
   
-  // Symlinks are temporarily disabled - see HADOOP-10020 and HADOOP-10052
+  // Symlinks are temporarily disabled - see Hadoop-10020
   private static boolean symlinkEnabled = false;
+  private static Configuration conf = null;
   
   @Deprecated
   @VisibleForTesting
   public static boolean isSymlinksEnabled() {
+    if (conf == null) {
+      Configuration conf = new Configuration();
+      symlinkEnabled = conf.getBoolean("test.SymlinkEnabledForTesting", false); 
+    }
     return symlinkEnabled;
   }
   

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java

@@ -68,7 +68,6 @@ public abstract class FileSystemLinkResolver<T> {
    * @return Generic type determined by implementation
    * @throws IOException
    */
-  @SuppressWarnings("deprecation")
   public T resolve(final FileSystem filesys, final Path path)
       throws IOException {
     int count = 0;
@@ -88,10 +87,6 @@ public abstract class FileSystemLinkResolver<T> {
               + CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY
               + ").", e);
         }
-        if (!FileSystem.isSymlinksEnabled()) {
-          throw new IOException("Symlink resolution is disabled in"
-              + " this version of Hadoop.");
-        }
         if (count++ > FsConstants.MAX_PATH_LINKS) {
           throw new IOException("Possible cyclic loop while " +
                                 "following symbolic link " + path);

+ 12 - 105
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -273,17 +273,7 @@ public class HarFileSystem extends FileSystem {
   public Path getWorkingDirectory() {
     return new Path(uri.toString());
   }
-
-  @Override
-  public Path getInitialWorkingDirectory() {
-    return getWorkingDirectory();
-  }
-
-  @Override
-  public FsStatus getStatus(Path p) throws IOException {
-    return fs.getStatus(p);
-  }
-
+  
   /**
    * Create a har specific auth 
    * har-underlyingfs:port
@@ -306,18 +296,9 @@ public class HarFileSystem extends FileSystem {
     return auth;
   }
 
-  /**
-   * Used for delegation token related functionality. Must delegate to
-   * underlying file system.
-   */
   @Override
   protected URI getCanonicalUri() {
-    return fs.getCanonicalUri();
-  }
-
-  @Override
-  protected URI canonicalizeUri(URI uri) {
-    return fs.canonicalizeUri(uri);
+    return fs.canonicalizeUri(getUri());
   }
 
   /**
@@ -330,16 +311,6 @@ public class HarFileSystem extends FileSystem {
     return this.uri;
   }
   
-  @Override
-  protected void checkPath(Path path) {
-    fs.checkPath(path);
-  }
-
-  @Override
-  public Path resolvePath(Path p) throws IOException {
-    return fs.resolvePath(p);
-  }
-
   /**
    * this method returns the path 
    * inside the har filesystem.
@@ -704,31 +675,18 @@ public class HarFileSystem extends FileSystem {
         hstatus.getPartName()),
         hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
   }
-
-  /**
-   * Used for delegation token related functionality. Must delegate to
-   * underlying file system.
-   */
-  @Override
-  public FileSystem[] getChildFileSystems() {
-    return new FileSystem[]{fs};
-  }
-
+ 
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
-      boolean overwrite, int bufferSize, short replication, long blockSize,
+  public FSDataOutputStream create(Path f,
+      FsPermission permission,
+      boolean overwrite,
+      int bufferSize,
+      short replication,
+      long blockSize,
       Progressable progress) throws IOException {
     throw new IOException("Har: create not allowed.");
   }
 
-  @SuppressWarnings("deprecation")
-  @Override
-  public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
-      int bufferSize, short replication, long blockSize, Progressable progress)
-      throws IOException {
-    throw new IOException("Har: create not allowed.");
-  }
-
   @Override
   public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
     throw new IOException("Har: append not allowed.");
@@ -736,7 +694,6 @@ public class HarFileSystem extends FileSystem {
 
   @Override
   public void close() throws IOException {
-    super.close();
     if (fs != null) {
       try {
         fs.close();
@@ -824,17 +781,11 @@ public class HarFileSystem extends FileSystem {
    * not implemented.
    */
   @Override
-  public void copyFromLocalFile(boolean delSrc, boolean overwrite,
-      Path src, Path dst) throws IOException {
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws 
+        IOException {
     throw new IOException("Har: copyfromlocalfile not allowed");
   }
-
-  @Override
-  public void copyFromLocalFile(boolean delSrc, boolean overwrite,
-      Path[] srcs, Path dst) throws IOException {
-    throw new IOException("Har: copyfromlocalfile not allowed");
-  }
-
+  
   /**
    * copies the file in the har filesystem to a local file.
    */
@@ -871,11 +822,6 @@ public class HarFileSystem extends FileSystem {
     throw new IOException("Har: setowner not allowed");
   }
 
-  @Override
-  public void setTimes(Path p, long mtime, long atime) throws IOException {
-    throw new IOException("Har: setTimes not allowed");
-  }
-
   /**
    * Not implemented.
    */
@@ -1201,43 +1147,4 @@ public class HarFileSystem extends FileSystem {
         return size() > MAX_ENTRIES;
     }
   }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public FsServerDefaults getServerDefaults() throws IOException {
-    return fs.getServerDefaults();
-  }
-
-  @Override
-  public FsServerDefaults getServerDefaults(Path f) throws IOException {
-    return fs.getServerDefaults(f);
-  }
-
-  @Override
-  public long getUsed() throws IOException{
-    return fs.getUsed();
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public long getDefaultBlockSize() {
-    return fs.getDefaultBlockSize();
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public long getDefaultBlockSize(Path f) {
-    return fs.getDefaultBlockSize(f);
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public short getDefaultReplication() {
-    return fs.getDefaultReplication();
-  }
-
-  @Override
-  public short getDefaultReplication(Path f) {
-    return fs.getDefaultReplication(f);
-  }
 }

+ 35 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -82,6 +82,39 @@ public class RawLocalFileSystem extends FileSystem {
     setConf(conf);
   }
   
+  class TrackingFileInputStream extends FileInputStream {
+    public TrackingFileInputStream(File f) throws IOException {
+      super(f);
+    }
+    
+    @Override
+    public int read() throws IOException {
+      int result = super.read();
+      if (result != -1) {
+        statistics.incrementBytesRead(1);
+      }
+      return result;
+    }
+    
+    @Override
+    public int read(byte[] data) throws IOException {
+      int result = super.read(data);
+      if (result != -1) {
+        statistics.incrementBytesRead(result);
+      }
+      return result;
+    }
+    
+    @Override
+    public int read(byte[] data, int offset, int length) throws IOException {
+      int result = super.read(data, offset, length);
+      if (result != -1) {
+        statistics.incrementBytesRead(result);
+      }
+      return result;
+    }
+  }
+
   /*******************************************************
    * For open()'s FSInputStream.
    *******************************************************/
@@ -90,7 +123,7 @@ public class RawLocalFileSystem extends FileSystem {
     private long position;
 
     public LocalFSFileInputStream(Path f) throws IOException {
-      fis = new FileInputStream(pathToFile(f));
+      this.fis = new TrackingFileInputStream(pathToFile(f));
     }
     
     @Override
@@ -125,7 +158,6 @@ public class RawLocalFileSystem extends FileSystem {
         int value = fis.read();
         if (value >= 0) {
           this.position++;
-          statistics.incrementBytesRead(1);
         }
         return value;
       } catch (IOException e) {                 // unexpected exception
@@ -139,7 +171,6 @@ public class RawLocalFileSystem extends FileSystem {
         int value = fis.read(b, off, len);
         if (value > 0) {
           this.position += value;
-          statistics.incrementBytesRead(value);
         }
         return value;
       } catch (IOException e) {                 // unexpected exception
@@ -152,11 +183,7 @@ public class RawLocalFileSystem extends FileSystem {
       throws IOException {
       ByteBuffer bb = ByteBuffer.wrap(b, off, len);
       try {
-        int value = fis.getChannel().read(bb, position);
-        if (value > 0) {
-          statistics.incrementBytesRead(value);
-        }
-        return value;
+        return fis.getChannel().read(bb, position);
       } catch (IOException e) {
         throw new FSError(e);
       }

+ 7 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java

@@ -42,7 +42,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3.INode.FileType;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.S3Bucket;
 import org.jets3t.service.model.S3Object;
@@ -61,8 +60,8 @@ class Jets3tFileSystemStore implements FileSystemStore {
   private static final String FILE_SYSTEM_VERSION_NAME = "fs-version";
   private static final String FILE_SYSTEM_VERSION_VALUE = "1";
   
-  private static final Map<String, Object> METADATA =
-    new HashMap<String, Object>();
+  private static final Map<String, String> METADATA =
+    new HashMap<String, String>();
   
   static {
     METADATA.put(FILE_SYSTEM_NAME, FILE_SYSTEM_VALUE);
@@ -160,7 +159,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
       throws IOException {
     
     try {
-      S3Object object = s3Service.getObject(bucket.getName(), key);
+      S3Object object = s3Service.getObject(bucket, key);
       if (checkMetadata) {
         checkMetadata(object);
       }
@@ -173,9 +172,6 @@ class Jets3tFileSystemStore implements FileSystemStore {
         throw (IOException) e.getCause();
       }
       throw new S3Exception(e);
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null;
     }
   }
 
@@ -192,9 +188,6 @@ class Jets3tFileSystemStore implements FileSystemStore {
         throw (IOException) e.getCause();
       }
       throw new S3Exception(e);
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null;
     }
   }
 
@@ -271,7 +264,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
       if (!prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, PATH_DELIMITER);
+      S3Object[] objects = s3Service.listObjects(bucket, prefix, PATH_DELIMITER);
       Set<Path> prefixes = new TreeSet<Path>();
       for (int i = 0; i < objects.length; i++) {
         prefixes.add(keyToPath(objects[i].getKey()));
@@ -293,7 +286,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
       if (!prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      S3Object[] objects = s3Service.listObjects(bucket, prefix, null);
       Set<Path> prefixes = new TreeSet<Path>();
       for (int i = 0; i < objects.length; i++) {
         prefixes.add(keyToPath(objects[i].getKey()));
@@ -376,7 +369,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
   @Override
   public void purge() throws IOException {
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName());
+      S3Object[] objects = s3Service.listObjects(bucket);
       for (int i = 0; i < objects.length; i++) {
         s3Service.deleteObject(bucket, objects[i].getKey());
       }
@@ -393,7 +386,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
     StringBuilder sb = new StringBuilder("S3 Filesystem, ");
     sb.append(bucket.getName()).append("\n");
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), PATH_DELIMITER, null);
+      S3Object[] objects = s3Service.listObjects(bucket, PATH_DELIMITER, null);
       for (int i = 0; i < objects.length; i++) {
         Path path = keyToPath(objects[i].getKey());
         sb.append(path).append("\n");
@@ -415,15 +408,4 @@ class Jets3tFileSystemStore implements FileSystemStore {
     System.out.println(sb);
   }
 
-  private void handleServiceException(ServiceException e) throws IOException {
-      if (e.getCause() instanceof IOException) {
-        throw (IOException) e.getCause();
-      }
-      else {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
-        }
-      }
-    }
-
 }

+ 3 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.S3Bucket;
 import org.jets3t.service.model.S3Object;
@@ -178,7 +177,7 @@ public class MigrationTool extends Configured implements Tool {
   
   private S3Object get(String key) {
     try {
-      return s3Service.getObject(bucket.getName(), key);
+      return s3Service.getObject(bucket, key);
     } catch (S3ServiceException e) {
       if ("NoSuchKey".equals(e.getS3ErrorCode())) {
         return null;
@@ -201,7 +200,7 @@ public class MigrationTool extends Configured implements Tool {
     public Set<Path> listAllPaths() throws IOException {
       try {
         String prefix = urlEncode(Path.SEPARATOR);
-        S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+        S3Object[] objects = s3Service.listObjects(bucket, prefix, null);
         Set<Path> prefixes = new TreeSet<Path>();
         for (int i = 0; i < objects.length; i++) {
           prefixes.add(keyToPath(objects[i].getKey()));
@@ -238,7 +237,7 @@ public class MigrationTool extends Configured implements Tool {
 
     private InputStream get(String key) throws IOException {
       try {
-        S3Object object = s3Service.getObject(bucket.getName(), key);
+        S3Object object = s3Service.getObject(bucket, key);
         return object.getDataInputStream();
       } catch (S3ServiceException e) {
         if ("NoSuchKey".equals(e.getS3ErrorCode())) {
@@ -248,8 +247,6 @@ public class MigrationTool extends Configured implements Tool {
           throw (IOException) e.getCause();
         }
         throw new S3Exception(e);
-      } catch (ServiceException e) {
-        return null;
       }
     }
     

+ 28 - 98
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java

@@ -29,21 +29,17 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.s3.S3Credentials;
 import org.apache.hadoop.fs.s3.S3Exception;
+import org.jets3t.service.S3ObjectsChunk;
 import org.jets3t.service.S3Service;
 import org.jets3t.service.S3ServiceException;
-import org.jets3t.service.ServiceException;
-import org.jets3t.service.StorageObjectsChunk;
 import org.jets3t.service.impl.rest.httpclient.RestS3Service;
 import org.jets3t.service.model.S3Bucket;
 import org.jets3t.service.model.S3Object;
-import org.jets3t.service.model.StorageObject;
 import org.jets3t.service.security.AWSCredentials;
 
 @InterfaceAudience.Private
@@ -52,9 +48,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   
   private S3Service s3Service;
   private S3Bucket bucket;
-  public static final Log LOG =
-      LogFactory.getLog(Jets3tNativeFileSystemStore.class);
-
+  
   @Override
   public void initialize(URI uri, Configuration conf) throws IOException {
     S3Credentials s3Credentials = new S3Credentials();
@@ -65,7 +59,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
             s3Credentials.getSecretAccessKey());
       this.s3Service = new RestS3Service(awsCredentials);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     }
     bucket = new S3Bucket(uri.getHost());
   }
@@ -86,7 +80,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       }
       s3Service.putObject(bucket, object);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     } finally {
       if (in != null) {
         try {
@@ -107,85 +101,53 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       object.setContentLength(0);
       s3Service.putObject(bucket, object);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     }
   }
   
   @Override
   public FileMetadata retrieveMetadata(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
-      }
-      S3Object object = s3Service.getObject(bucket.getName(), key);
+      S3Object object = s3Service.getObjectDetails(bucket, key);
       return new FileMetadata(key, object.getContentLength(),
           object.getLastModifiedDate().getTime());
     } catch (S3ServiceException e) {
       // Following is brittle. Is there a better way?
-      if (e.getS3ErrorCode().matches("NoSuchKey")) {
-        return null; //return null if key not found
+      if (e.getMessage().contains("ResponseCode=404")) {
+        return null;
       }
-      handleS3ServiceException(e);
+      handleServiceException(e);
       return null; //never returned - keep compiler happy
     }
   }
-
-  /**
-   * @param key
-   * The key is the object name that is being retrieved from the S3 bucket
-   * @return
-   * This method returns null if the key is not found
-   * @throws IOException
-   */
-
+  
   @Override
   public InputStream retrieve(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
-      }
-      S3Object object = s3Service.getObject(bucket.getName(), key);
+      S3Object object = s3Service.getObject(bucket, key);
       return object.getDataInputStream();
     } catch (S3ServiceException e) {
-      handleS3ServiceException(key, e);
+      handleServiceException(key, e);
       return null; //never returned - keep compiler happy
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null; //return null if key not found
     }
   }
-
-  /**
-   *
-   * @param key
-   * The key is the object name that is being retrieved from the S3 bucket
-   * @return
-   * This method returns null if the key is not found
-   * @throws IOException
-   */
-
+  
   @Override
   public InputStream retrieve(String key, long byteRangeStart)
-          throws IOException {
+    throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
-      }
       S3Object object = s3Service.getObject(bucket, key, null, null, null,
                                             null, byteRangeStart, null);
       return object.getDataInputStream();
     } catch (S3ServiceException e) {
-      handleS3ServiceException(key, e);
+      handleServiceException(key, e);
       return null; //never returned - keep compiler happy
-    } catch (ServiceException e) {
-      handleServiceException(e);
-      return null; //return null if key not found
     }
   }
 
   @Override
   public PartialListing list(String prefix, int maxListingLength)
-          throws IOException {
+    throws IOException {
     return list(prefix, maxListingLength, null, false);
   }
   
@@ -196,13 +158,6 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     return list(prefix, recurse ? null : PATH_DELIMITER, maxListingLength, priorLastKey);
   }
 
-  /**
-   *
-   * @return
-   * This method returns null if the list could not be populated
-   * due to S3 giving ServiceException
-   * @throws IOException
-   */
 
   private PartialListing list(String prefix, String delimiter,
       int maxListingLength, String priorLastKey) throws IOException {
@@ -210,63 +165,52 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
       if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
         prefix += PATH_DELIMITER;
       }
-      StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
+      S3ObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
           prefix, delimiter, maxListingLength, priorLastKey);
       
       FileMetadata[] fileMetadata =
         new FileMetadata[chunk.getObjects().length];
       for (int i = 0; i < fileMetadata.length; i++) {
-        StorageObject object = chunk.getObjects()[i];
+        S3Object object = chunk.getObjects()[i];
         fileMetadata[i] = new FileMetadata(object.getKey(),
             object.getContentLength(), object.getLastModifiedDate().getTime());
       }
       return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
           chunk.getCommonPrefixes());
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
-      return null; //never returned - keep compiler happy
-    } catch (ServiceException e) {
       handleServiceException(e);
-      return null; //return null if list could not be populated
+      return null; //never returned - keep compiler happy
     }
   }
 
   @Override
   public void delete(String key) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
-      }
       s3Service.deleteObject(bucket, key);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(key, e);
+      handleServiceException(key, e);
     }
   }
   
   @Override
   public void copy(String srcKey, String dstKey) throws IOException {
     try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Copying srcKey: " + srcKey + "to dstKey: " + dstKey + "in bucket: " + bucket.getName());
-      }
       s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
           new S3Object(dstKey), false);
     } catch (S3ServiceException e) {
-      handleS3ServiceException(srcKey, e);
-    } catch (ServiceException e) {
-      handleServiceException(e);
+      handleServiceException(srcKey, e);
     }
   }
 
   @Override
   public void purge(String prefix) throws IOException {
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
+      S3Object[] objects = s3Service.listObjects(bucket, prefix, null);
       for (S3Object object : objects) {
         s3Service.deleteObject(bucket, object.getKey());
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     }
   }
 
@@ -275,44 +219,30 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
     StringBuilder sb = new StringBuilder("S3 Native Filesystem, ");
     sb.append(bucket.getName()).append("\n");
     try {
-      S3Object[] objects = s3Service.listObjects(bucket.getName());
+      S3Object[] objects = s3Service.listObjects(bucket);
       for (S3Object object : objects) {
         sb.append(object.getKey()).append("\n");
       }
     } catch (S3ServiceException e) {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     }
     System.out.println(sb);
   }
 
-  private void handleS3ServiceException(String key, S3ServiceException e) throws IOException {
+  private void handleServiceException(String key, S3ServiceException e) throws IOException {
     if ("NoSuchKey".equals(e.getS3ErrorCode())) {
       throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
     } else {
-      handleS3ServiceException(e);
+      handleServiceException(e);
     }
   }
 
-  private void handleS3ServiceException(S3ServiceException e) throws IOException {
+  private void handleServiceException(S3ServiceException e) throws IOException {
     if (e.getCause() instanceof IOException) {
       throw (IOException) e.getCause();
     }
     else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
-      }
       throw new S3Exception(e);
     }
   }
-
-  private void handleServiceException(ServiceException e) throws IOException {
-    if (e.getCause() instanceof IOException) {
-      throw (IOException) e.getCause();
-    }
-    else {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
-      }
-    }
-  }
 }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -273,7 +273,7 @@ public class NativeS3FileSystem extends FileSystem {
     setConf(conf);
     this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
     this.workingDir =
-      new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory());
+      new Path("/user", System.getProperty("user.name")).makeQualified(this);
   }
   
   private static NativeFileSystemStore createDefaultStore(Configuration conf) {
@@ -511,11 +511,11 @@ public class NativeS3FileSystem extends FileSystem {
   
   private FileStatus newFile(FileMetadata meta, Path path) {
     return new FileStatus(meta.getLength(), false, 1, getDefaultBlockSize(),
-        meta.getLastModified(), path.makeQualified(this.getUri(), this.getWorkingDirectory()));
+        meta.getLastModified(), path.makeQualified(this));
   }
   
   private FileStatus newDirectory(Path path) {
-    return new FileStatus(0, true, 1, 0, 0, path.makeQualified(this.getUri(), this.getWorkingDirectory()));
+    return new FileStatus(0, true, 1, 0, 0, path.makeQualified(this));
   }
 
   @Override

+ 2 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -84,16 +84,11 @@ abstract class CommandWithDestination extends FsCommand {
    */
   protected void getLocalDestination(LinkedList<String> args)
   throws IOException {
-    String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
     try {
+      String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
       dst = new PathData(new URI(pathString), getConf());
     } catch (URISyntaxException e) {
-      if (Path.WINDOWS) {
-        // Unlike URI, PathData knows how to parse Windows drive-letter paths.
-        dst = new PathData(pathString, getConf());
-      } else {
-        throw new IOException("unexpected URISyntaxException", e);
-      }
+      throw new IOException("unexpected URISyntaxException", e);
     }
   }
 

+ 3 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -204,18 +204,13 @@ class CopyCommands {
     // commands operating on local paths have no need for glob expansion
     @Override
     protected List<PathData> expandArgument(String arg) throws IOException {
-      List<PathData> items = new LinkedList<PathData>();
       try {
+        List<PathData> items = new LinkedList<PathData>();
         items.add(new PathData(new URI(arg), getConf()));
+        return items;
       } catch (URISyntaxException e) {
-        if (Path.WINDOWS) {
-          // Unlike URI, PathData knows how to parse Windows drive-letter paths.
-          items.add(new PathData(arg, getConf()));
-        } else {
-          throw new IOException("unexpected URISyntaxException", e);
-        }
+        throw new IOException("unexpected URISyntaxException", e);
       }
-      return items;
     }
 
     @Override

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java

@@ -68,7 +68,7 @@ class SnapshotCommands extends FsCommand {
         throw new IllegalArgumentException("<snapshotDir> is missing.");
       } 
       if (args.size() > 2) {
-        throw new IllegalArgumentException("Too many arguments.");
+        throw new IllegalArgumentException("Too many arguements.");
       }
       if (args.size() == 2) {
         snapshotName = args.removeLast();
@@ -110,7 +110,7 @@ class SnapshotCommands extends FsCommand {
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       if (args.size() != 2) {
-        throw new IllegalArgumentException("Incorrect number of arguments.");
+        throw new IOException("args number not 2: " + args.size());
       }
       snapshotName = args.removeLast();
     }
@@ -150,7 +150,7 @@ class SnapshotCommands extends FsCommand {
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       if (args.size() != 3) {
-        throw new IllegalArgumentException("Incorrect number of arguments.");
+        throw new IOException("args number not 3: " + args.size());
       }
       newName = args.removeLast();
       oldName = args.removeLast();

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -233,7 +233,6 @@ public class HttpServer implements FilterContainer {
       }
       listener.setHost(bindAddress);
       listener.setPort(port);
-      LOG.info("SSL is enabled on " + toString());
     } else {
       listenerStartedExternally = true;
       listener = connector;

+ 7 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
 
@@ -532,15 +531,6 @@ public class RetryPolicies {
       this.maxDelayBase = maxDelayBase;
     }
 
-    /**
-     * @return 0 if this is our first failover/retry (i.e., retry immediately),
-     *         sleep exponentially otherwise
-     */
-    private long getFailoverOrRetrySleepTime(int times) {
-      return times == 0 ? 0 : 
-        calculateExponentialTime(delayMillis, times, maxDelayBase);
-    }
-    
     @Override
     public RetryAction shouldRetry(Exception e, int retries,
         int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
@@ -556,8 +546,11 @@ public class RetryPolicies {
           e instanceof StandbyException ||
           e instanceof ConnectTimeoutException ||
           isWrappedStandbyException(e)) {
-        return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
-            getFailoverOrRetrySleepTime(failovers));
+        return new RetryAction(
+            RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+            // retry immediately if this is our first failover, sleep otherwise
+            failovers == 0 ? 0 :
+                calculateExponentialTime(delayMillis, failovers, maxDelayBase));
       } else if (e instanceof SocketException ||
                  (e instanceof IOException && !(e instanceof RemoteException))) {
         if (isIdempotentOrAtMostOnce) {
@@ -568,14 +561,8 @@ public class RetryPolicies {
               "whether it was invoked");
         }
       } else {
-        RetriableException re = getWrappedRetriableException(e);
-        if (re != null) {
-          return new RetryAction(RetryAction.RetryDecision.RETRY,
-              getFailoverOrRetrySleepTime(retries));
-        } else {
-          return fallbackPolicy.shouldRetry(e, retries, failovers,
-              isIdempotentOrAtMostOnce);
-        }
+        return fallbackPolicy.shouldRetry(e, retries, failovers,
+            isIdempotentOrAtMostOnce);
       }
     }
     
@@ -609,14 +596,4 @@ public class RetryPolicies {
         StandbyException.class);
     return unwrapped instanceof StandbyException;
   }
-  
-  private static RetriableException getWrappedRetriableException(Exception e) {
-    if (!(e instanceof RemoteException)) {
-      return null;
-    }
-    Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
-        RetriableException.class);
-    return unwrapped instanceof RetriableException ? 
-        (RetriableException) unwrapped : null;
-  }
 }

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -522,7 +522,6 @@ public class Client {
         try {
           this.socket = socketFactory.createSocket();
           this.socket.setTcpNoDelay(tcpNoDelay);
-          this.socket.setKeepAlive(true);
           
           /*
            * Bind the socket to the host specified in the principal name of the

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -630,7 +630,7 @@ public class RPC {
     } catch (IOException e) {
       LOG.error("Closing proxy or invocation handler caused exception", e);
     } catch (IllegalArgumentException e) {
-      LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e);
+      LOG.error("RPC.stopProxy called on non proxy.", e);
     }
     
     // If you see this error on a mock object in a unit test you're

+ 0 - 41
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetriableException.java

@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Exception thrown by a server typically to indicate that server is in a state
- * where request cannot be processed temporarily (such as still starting up).
- * Client may retry the request. If the service is up, the server may be able to
- * process a retried request.
- */
-@InterfaceStability.Evolving
-public class RetriableException extends IOException {
-  private static final long serialVersionUID = 1915561725516487301L;
-  
-  public RetriableException(Exception e) {
-    super(e);
-  }
-  
-  public RetriableException(String msg) {
-    super(msg);
-  }
-}

+ 21 - 27
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -759,7 +759,6 @@ public abstract class Server {
 
         channel.configureBlocking(false);
         channel.socket().setTcpNoDelay(tcpNoDelay);
-        channel.socket().setKeepAlive(true);
         
         Reader reader = getReader();
         try {
@@ -1293,29 +1292,6 @@ public abstract class Server {
       }
     }
 
-    private Throwable getCauseForInvalidToken(IOException e) {
-      Throwable cause = e;
-      while (cause != null) {
-        if (cause instanceof RetriableException) {
-          return (RetriableException) cause;
-        } else if (cause instanceof StandbyException) {
-          return (StandbyException) cause;
-        } else if (cause instanceof InvalidToken) {
-          // FIXME: hadoop method signatures are restricting the SASL
-          // callbacks to only returning InvalidToken, but some services
-          // need to throw other exceptions (ex. NN + StandyException),
-          // so for now we'll tunnel the real exceptions via an
-          // InvalidToken's cause which normally is not set 
-          if (cause.getCause() != null) {
-            cause = cause.getCause();
-          }
-          return cause;
-        }
-        cause = cause.getCause();
-      }
-      return e;
-    }
-    
     private void saslProcess(RpcSaslProto saslMessage)
         throws WrappedRpcServerException, IOException, InterruptedException {
       if (saslContextEstablished) {
@@ -1328,11 +1304,29 @@ public abstract class Server {
         try {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
+          IOException sendToClient = e;
+          Throwable cause = e;
+          while (cause != null) {
+            if (cause instanceof InvalidToken) {
+              // FIXME: hadoop method signatures are restricting the SASL
+              // callbacks to only returning InvalidToken, but some services
+              // need to throw other exceptions (ex. NN + StandyException),
+              // so for now we'll tunnel the real exceptions via an
+              // InvalidToken's cause which normally is not set 
+              if (cause.getCause() != null) {
+                cause = cause.getCause();
+              }
+              sendToClient = (IOException) cause;
+              break;
+            }
+            cause = cause.getCause();
+          }
           rpcMetrics.incrAuthenticationFailures();
+          String clientIP = this.toString();
           // attempting user could be null
-          AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
-          throw (IOException) getCauseForInvalidToken(e);
+          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser +
+            " (" + e.getLocalizedMessage() + ")");
+          throw sendToClient;
         }
         
         if (saslServer != null && saslServer.isComplete()) {

+ 8 - 21
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java

@@ -50,7 +50,6 @@ public class Groups {
   private final Map<String, CachedGroups> userToGroupsMap = 
     new ConcurrentHashMap<String, CachedGroups>();
   private final long cacheTimeout;
-  private final long warningDeltaMs;
 
   public Groups(Configuration conf) {
     impl = 
@@ -61,16 +60,11 @@ public class Groups {
           conf);
     
     cacheTimeout = 
-      conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
-          CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
-    warningDeltaMs =
-      conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
-        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
+      conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 5*60) * 1000;
     
     if(LOG.isDebugEnabled())
       LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
-          "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
-          warningDeltaMs);
+          "; cacheTimeout=" + cacheTimeout);
   }
   
   /**
@@ -82,24 +76,17 @@ public class Groups {
   public List<String> getGroups(String user) throws IOException {
     // Return cached value if available
     CachedGroups groups = userToGroupsMap.get(user);
-    long startMs = Time.monotonicNow();
+    long now = Time.now();
     // if cache has a value and it hasn't expired
-    if (groups != null && (groups.getTimestamp() + cacheTimeout > startMs)) {
+    if (groups != null && (groups.getTimestamp() + cacheTimeout > now)) {
       if(LOG.isDebugEnabled()) {
         LOG.debug("Returning cached groups for '" + user + "'");
       }
       return groups.getGroups();
     }
-
+    
     // Create and cache user's groups
-    List<String> groupList = impl.getGroups(user);
-    long endMs = Time.monotonicNow();
-    long deltaMs = endMs - startMs ;
-    if (deltaMs > warningDeltaMs) {
-      LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
-          "took " + deltaMs + " milliseconds.");
-    }
-    groups = new CachedGroups(groupList, endMs);
+    groups = new CachedGroups(impl.getGroups(user));
     if (groups.getGroups().isEmpty()) {
       throw new IOException("No groups found for user " + user);
     }
@@ -146,9 +133,9 @@ public class Groups {
     /**
      * Create and initialize group cache
      */
-    CachedGroups(List<String> groups, long timestamp) {
+    CachedGroups(List<String> groups) {
       this.groups = groups;
-      this.timestamp = timestamp;
+      this.timestamp = Time.now();
     }
 
     /**

+ 5 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java

@@ -45,13 +45,11 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server.Connection;
-import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 /**
  * A utility class for dealing with SASL on RPC server
@@ -269,15 +267,13 @@ public class SaslRpcServer {
       this.connection = connection;
     }
 
-    private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken,
-        StandbyException, RetriableException, IOException {
-      return encodePassword(secretManager.retriableRetrievePassword(tokenid));
+    private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken {
+      return encodePassword(secretManager.retrievePassword(tokenid));
     }
 
     @Override
     public void handle(Callback[] callbacks) throws InvalidToken,
-        UnsupportedCallbackException, StandbyException, RetriableException,
-        IOException {
+        UnsupportedCallbackException {
       NameCallback nc = null;
       PasswordCallback pc = null;
       AuthorizeCallback ac = null;
@@ -296,8 +292,7 @@ public class SaslRpcServer {
         }
       }
       if (pc != null) {
-        TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(),
-            secretManager);
+        TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager);
         char[] password = getPassword(tokenIdentifier);
         UserGroupInformation user = null;
         user = tokenIdentifier.getUser(); // may throw exception

+ 1 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java

@@ -29,7 +29,6 @@ import javax.crypto.spec.SecretKeySpec;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 
 
@@ -67,29 +66,7 @@ public abstract class SecretManager<T extends TokenIdentifier> {
    * @return the password to use
    * @throws InvalidToken the token was invalid
    */
-  public abstract byte[] retrievePassword(T identifier)
-      throws InvalidToken;
-  
-  /**
-   * The same functionality with {@link #retrievePassword}, except that this 
-   * method can throw a {@link RetriableException} or a {@link StandbyException}
-   * to indicate that client can retry/failover the same operation because of 
-   * temporary issue on the server side.
-   * 
-   * @param identifier the identifier to validate
-   * @return the password to use
-   * @throws InvalidToken the token was invalid
-   * @throws StandbyException the server is in standby state, the client can
-   *         try other servers
-   * @throws RetriableException the token was invalid, and the server thinks 
-   *         this may be a temporary issue and suggests the client to retry
-   * @throws IOException to allow future exceptions to be added without breaking
-   *         compatibility        
-   */
-  public byte[] retriableRetrievePassword(T identifier)
-      throws InvalidToken, StandbyException, RetriableException, IOException {
-    return retrievePassword(identifier);
-  }
+  public abstract byte[] retrievePassword(T identifier) throws InvalidToken;
   
   /**
    * Create an empty token identifier.

+ 7 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -45,7 +45,7 @@ import org.apache.hadoop.util.Time;
 
 import com.google.common.base.Preconditions;
 
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Hive"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public abstract 
 class AbstractDelegationTokenSecretManager<TokenIdent 
@@ -289,30 +289,20 @@ extends AbstractDelegationTokenIdentifier>
         + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier)));
     return password;
   }
-  
-  /**
-   * Find the DelegationTokenInformation for the given token id, and verify that
-   * if the token is expired. Note that this method should be called with 
-   * acquiring the secret manager's monitor.
-   */
-  protected DelegationTokenInformation checkToken(TokenIdent identifier)
+
+  @Override
+  public synchronized byte[] retrievePassword(TokenIdent identifier)
       throws InvalidToken {
-    assert Thread.holdsLock(this);
     DelegationTokenInformation info = currentTokens.get(identifier);
     if (info == null) {
       throw new InvalidToken("token (" + identifier.toString()
           + ") can't be found in cache");
     }
-    if (info.getRenewDate() < Time.now()) {
+    long now = Time.now();
+    if (info.getRenewDate() < now) {
       throw new InvalidToken("token (" + identifier.toString() + ") is expired");
     }
-    return info;
-  }
-  
-  @Override
-  public synchronized byte[] retrievePassword(TokenIdent identifier)
-      throws InvalidToken {
-    return checkToken(identifier).getPassword();
+    return info.getPassword();
   }
 
   protected String getTrackingIdIfEnabled(TokenIdent ident) {

+ 2 - 46
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -25,7 +25,6 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.commons.cli.CommandLine;
@@ -414,50 +413,7 @@ public class GenericOptionsParser {
     }
     return StringUtils.arrayToString(finalArr);
   }
-
-  /**
-   * Windows powershell and cmd can parse key=value themselves, because
-   * /pkey=value is same as /pkey value under windows. However this is not
-   * compatible with how we get arbitrary key values in -Dkey=value format.
-   * Under windows -D key=value or -Dkey=value might be passed as
-   * [-Dkey, value] or [-D key, value]. This method does undo these and
-   * return a modified args list by manually changing [-D, key, value]
-   * into [-D, key=value]
-   *
-   * @param args command line arguments
-   * @return fixed command line arguments that GnuParser can parse
-   */
-  private String[] preProcessForWindows(String[] args) {
-    if (!Shell.WINDOWS) {
-      return args;
-    }
-    List<String> newArgs = new ArrayList<String>(args.length);
-    for (int i=0; i < args.length; i++) {
-      String prop = null;
-      if (args[i].equals("-D")) {
-        newArgs.add(args[i]);
-        if (i < args.length - 1) {
-          prop = args[++i];
-        }
-      } else if (args[i].startsWith("-D")) {
-        prop = args[i];
-      } else {
-        newArgs.add(args[i]);
-      }
-      if (prop != null) {
-        if (prop.contains("=")) {
-          // everything good
-        } else {
-          if (i < args.length - 1) {
-            prop += "=" + args[++i];
-          }
-        }
-        newArgs.add(prop);
-      }
-    }
-
-    return newArgs.toArray(new String[newArgs.size()]);
-  }
+  
 
   /**
    * Parse the user-specified options, get the generic options, and modify
@@ -471,7 +427,7 @@ public class GenericOptionsParser {
     opts = buildGeneralOptions(opts);
     CommandLineParser parser = new GnuParser();
     try {
-      commandLine = parser.parse(opts, preProcessForWindows(args), true);
+      commandLine = parser.parse(opts, args, true);
       processGeneralOptions(conf, commandLine);
     } catch(ParseException e) {
       LOG.warn("options parsing failed: "+e.getMessage());

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/native.vcxproj

@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
 
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more

+ 0 - 9
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -105,15 +105,6 @@
   </description>
 </property>
 
-<property>
-  <name>hadoop.security.groups.cache.warn.after.ms</name>
-  <value>5000</value>
-  <description>
-    If looking up a single user to group takes longer than this amount of
-    milliseconds, we will log a warning message.
-  </description>
-</property>
-
 <property>
   <name>hadoop.security.group.mapping.ldap.url</name>
   <value></value>

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -381,7 +381,7 @@ rmr
 
 setrep
 
-   Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>>
+   Usage: <<<hdfs dfs -setrep [-R] [-w] <numRepicas> <path> >>>
 
    Changes the replication factor of a file. If <path> is a directory then
    the command recursively changes the replication factor of all files under

+ 0 - 79
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java

@@ -26,28 +26,13 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
 import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import junit.framework.Assert;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.conf.Configuration.DeprecationDelta;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.common.util.concurrent.Uninterruptibles;
-
  
 public class TestConfigurationDeprecation {
   private Configuration conf;
@@ -335,68 +320,4 @@ public class TestConfigurationDeprecation {
     assertNull(conf.get("nK"));
   }
 
-  private static String getTestKeyName(int threadIndex, int testIndex) {
-    return "testConcurrentDeprecateAndManipulate.testKey." +
-                  threadIndex + "." + testIndex;
-  }
-  
-  /**
-   * Run a set of threads making changes to the deprecations
-   * concurrently with another set of threads calling get()
-   * and set() on Configuration objects.
-   */
-  @SuppressWarnings("deprecation")
-  @Test(timeout=60000)
-  public void testConcurrentDeprecateAndManipulate() throws Exception {
-    final int NUM_THREAD_IDS = 10;
-    final int NUM_KEYS_PER_THREAD = 1000;
-    ScheduledThreadPoolExecutor executor =
-      new ScheduledThreadPoolExecutor(2 * NUM_THREAD_IDS,
-      new ThreadFactoryBuilder().setDaemon(true).
-      setNameFormat("testConcurrentDeprecateAndManipulate modification thread %d").
-      build());
-    final CountDownLatch latch = new CountDownLatch(1);
-    final AtomicInteger highestModificationThreadId = new AtomicInteger(1);
-    List<Future<Void>> futures = new LinkedList<Future<Void>>();
-    for (int i = 0; i < NUM_THREAD_IDS; i++) {
-      futures.add(executor.schedule(new Callable<Void>() {
-        @Override
-        public Void call() throws Exception {
-          latch.await();
-          int threadIndex = highestModificationThreadId.addAndGet(1);
-          for (int i = 0; i < NUM_KEYS_PER_THREAD; i++) {
-            String testKey = getTestKeyName(threadIndex, i);
-            String testNewKey = testKey + ".new";
-            Configuration.addDeprecations(
-              new DeprecationDelta[] {
-                new DeprecationDelta(testKey, testNewKey)
-              });
-          }
-          return null;
-        }
-      }, 0, TimeUnit.SECONDS));
-    }
-    final AtomicInteger highestAccessThreadId = new AtomicInteger(1);
-    for (int i = 0; i < NUM_THREAD_IDS; i++) {
-      futures.add(executor.schedule(new Callable<Void>() {
-        @Override
-        public Void call() throws Exception {
-          Configuration conf = new Configuration();
-          latch.await();
-          int threadIndex = highestAccessThreadId.addAndGet(1);
-          for (int i = 0; i < NUM_KEYS_PER_THREAD; i++) {
-            String testNewKey = getTestKeyName(threadIndex, i) + ".new";
-            String value = "value." + threadIndex + "." + i;
-            conf.set(testNewKey, value);
-            Assert.assertEquals(value, conf.get(testNewKey));
-          }
-          return null;
-        }
-      }, 0, TimeUnit.SECONDS));
-    }
-    latch.countDown(); // allow all threads to proceed
-    for (Future<Void> future : futures) {
-      Uninterruptibles.getUninterruptibly(future);
-    }
-  }
 }

+ 0 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java

@@ -57,7 +57,6 @@ public abstract class FCStatisticsBaseTest {
     FSDataInputStream fstr = fc.open(filePath);
     byte[] buf = new byte[blockSize];
     int bytesRead = fstr.read(buf, 0, blockSize);
-    fstr.read(0, buf, 0, blockSize);
     Assert.assertEquals(blockSize, bytesRead);
     verifyReadBytes(stats);
     verifyWrittenBytes(stats);

+ 0 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.junit.Assert.*;
 import static org.junit.Assume.assumeTrue;
-
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
@@ -36,12 +35,7 @@ import org.junit.After;
 /**
  * Base test for symbolic links
  */
-@SuppressWarnings("deprecation")
 public abstract class SymlinkBaseTest {
-  // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
-  static {
-    FileSystem.enableSymlinks();
-  }
   static final long seed = 0xDEADBEEFL;
   static final int  blockSize =  8192;
   static final int  fileSize  = 16384;

+ 0 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java

@@ -31,10 +31,6 @@ import org.junit.Test;
  * Tests resolution of AbstractFileSystems for a given path with symlinks.
  */
 public class TestFileContextResolveAfs {
-  // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
-  static {
-    FileSystem.enableSymlinks();
-  }
   
   private static String TEST_ROOT_DIR_LOCAL
     = System.getProperty("test.build.data","/tmp");

+ 16 - 83
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java

@@ -19,9 +19,7 @@
 package org.apache.hadoop.fs;
 
 import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
 
-import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -108,7 +106,7 @@ public class TestFsShellCopy {
     Path targetDir = new Path(testRoot, "target");    
     Path filePath = new Path(testRoot, new Path("srcFile"));
     lfs.create(filePath).close();
-    checkPut(filePath, targetDir, false);
+    checkPut(filePath, targetDir);
   }
 
   @Test
@@ -121,42 +119,10 @@ public class TestFsShellCopy {
     Path dirPath = new Path(testRoot, new Path("srcDir"));
     lfs.mkdirs(dirPath);
     lfs.create(new Path(dirPath, "srcFile")).close();
-    checkPut(dirPath, targetDir, false);
+    checkPut(dirPath, targetDir);
   }
-
-  @Test
-  public void testCopyFileFromWindowsLocalPath() throws Exception {
-    assumeTrue(Path.WINDOWS);
-    String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
-        .toString())).getAbsolutePath();
-    Path testRoot = new Path(windowsTestRootPath, "testPutFile");
-    lfs.delete(testRoot, true);
-    lfs.mkdirs(testRoot);
-
-    Path targetDir = new Path(testRoot, "target");
-    Path filePath = new Path(testRoot, new Path("srcFile"));
-    lfs.create(filePath).close();
-    checkPut(filePath, targetDir, true);
-  }
-
-  @Test
-  public void testCopyDirFromWindowsLocalPath() throws Exception {
-    assumeTrue(Path.WINDOWS);
-    String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
-        .toString())).getAbsolutePath();
-    Path testRoot = new Path(windowsTestRootPath, "testPutDir");
-    lfs.delete(testRoot, true);
-    lfs.mkdirs(testRoot);
-
-    Path targetDir = new Path(testRoot, "target");
-    Path dirPath = new Path(testRoot, new Path("srcDir"));
-    lfs.mkdirs(dirPath);
-    lfs.create(new Path(dirPath, "srcFile")).close();
-    checkPut(dirPath, targetDir, true);
-  }
-
   
-  private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath)
+  private void checkPut(Path srcPath, Path targetDir)
   throws Exception {
     lfs.delete(targetDir, true);
     lfs.mkdirs(targetDir);    
@@ -168,37 +134,37 @@ public class TestFsShellCopy {
     
     // copy to new file, then again
     prepPut(dstPath, false, false);
-    checkPut(0, srcPath, dstPath, useWindowsPath);
+    checkPut(0, srcPath, dstPath);
     if (lfs.isFile(srcPath)) {
-      checkPut(1, srcPath, dstPath, useWindowsPath);
+      checkPut(1, srcPath, dstPath);
     } else { // directory works because it copies into the dir
       // clear contents so the check won't think there are extra paths
       prepPut(dstPath, true, true);
-      checkPut(0, srcPath, dstPath, useWindowsPath);
+      checkPut(0, srcPath, dstPath);
     }
 
     // copy to non-existent subdir
     prepPut(childPath, false, false);
-    checkPut(1, srcPath, dstPath, useWindowsPath);
+    checkPut(1, srcPath, dstPath);
 
     // copy into dir, then with another name
     prepPut(dstPath, true, true);
-    checkPut(0, srcPath, dstPath, useWindowsPath);
+    checkPut(0, srcPath, dstPath);
     prepPut(childPath, true, true);
-    checkPut(0, srcPath, childPath, useWindowsPath);
+    checkPut(0, srcPath, childPath);
 
     // try to put to pwd with existing dir
     prepPut(targetDir, true, true);
-    checkPut(0, srcPath, null, useWindowsPath);
+    checkPut(0, srcPath, null);
     prepPut(targetDir, true, true);
-    checkPut(0, srcPath, new Path("."), useWindowsPath);
+    checkPut(0, srcPath, new Path("."));
 
     // try to put to pwd with non-existent cwd
     prepPut(dstPath, false, true);
     lfs.setWorkingDirectory(dstPath);
-    checkPut(1, srcPath, null, useWindowsPath);
+    checkPut(1, srcPath, null);
     prepPut(dstPath, false, true);
-    checkPut(1, srcPath, new Path("."), useWindowsPath);
+    checkPut(1, srcPath, new Path("."));
   }
 
   private void prepPut(Path dst, boolean create,
@@ -217,17 +183,12 @@ public class TestFsShellCopy {
     }
   }
   
-  private void checkPut(int exitCode, Path src, Path dest,
-      boolean useWindowsPath) throws Exception {
+  private void checkPut(int exitCode, Path src, Path dest) throws Exception {
     String argv[] = null;
-    String srcPath = src.toString();
-    if (useWindowsPath) {
-      srcPath = (new File(srcPath)).getAbsolutePath();
-    }
     if (dest != null) {
-      argv = new String[]{ "-put", srcPath, pathAsString(dest) };
+      argv = new String[]{ "-put", src.toString(), pathAsString(dest) };
     } else {
-      argv = new String[]{ "-put", srcPath };
+      argv = new String[]{ "-put", src.toString() };
       dest = new Path(Path.CUR_DIR);
     }
     
@@ -457,34 +418,6 @@ public class TestFsShellCopy {
     assertTrue(lfs.exists(srcDir));
   }
   
-  @Test
-  public void testMoveFromWindowsLocalPath() throws Exception {
-    assumeTrue(Path.WINDOWS);
-    Path testRoot = new Path(testRootDir, "testPutFile");
-    lfs.delete(testRoot, true);
-    lfs.mkdirs(testRoot);
-
-    Path target = new Path(testRoot, "target");
-    Path srcFile = new Path(testRoot, new Path("srcFile"));
-    lfs.createNewFile(srcFile);
-
-    String winSrcFile = (new File(srcFile.toUri().getPath()
-        .toString())).getAbsolutePath();
-    shellRun(0, "-moveFromLocal", winSrcFile, target.toString());
-    assertFalse(lfs.exists(srcFile));
-    assertTrue(lfs.exists(target));
-    assertTrue(lfs.isFile(target));
-  }
-
-  @Test
-  public void testGetWindowsLocalPath() throws Exception {
-    assumeTrue(Path.WINDOWS);
-    String winDstFile = (new File(dstPath.toUri().getPath()
-        .toString())).getAbsolutePath();
-    shellRun(0, "-get", srcPath.toString(), winDstFile);
-    checkPath(dstPath, false);
-  }
-  
   private void createFile(Path ... paths) throws IOException {
     for (Path path : paths) {
       FSDataOutputStream out = lfs.create(path);

+ 5 - 176
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -18,155 +18,14 @@
 
 package org.apache.hadoop.fs;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Progressable;
 import org.junit.Assert;
+import static org.junit.Assert.*;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-import java.util.EnumSet;
-import java.util.Iterator;
-
-import static org.apache.hadoop.fs.Options.ChecksumOpt;
-import static org.apache.hadoop.fs.Options.CreateOpts;
-import static org.apache.hadoop.fs.Options.Rename;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-@SuppressWarnings("deprecation")
 public class TestHarFileSystem {
-  public static final Log LOG = LogFactory.getLog(TestHarFileSystem.class);
-
-  /**
-   * FileSystem methods that must not be overwritten by
-   * {@link HarFileSystem}. Either because there is a default implementation
-   * already available or because it is not relevant.
-   */
-  @SuppressWarnings("deprecation")
-  private interface MustNotImplement {
-    public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
-    public long getLength(Path f);
-    public FSDataOutputStream append(Path f, int bufferSize);
-    public void rename(Path src, Path dst, Rename... options);
-    public boolean exists(Path f);
-    public boolean isDirectory(Path f);
-    public boolean isFile(Path f);
-    public boolean createNewFile(Path f);
-
-    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
-        boolean overwrite, int bufferSize, short replication, long blockSize,
-        Progressable progress) throws IOException;
-
-    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
-        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
-        Progressable progress) throws IOException;
-
-    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
-        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
-        Progressable progress, ChecksumOpt checksumOpt);
-
-    public boolean mkdirs(Path f);
-    public FSDataInputStream open(Path f);
-    public FSDataOutputStream create(Path f);
-    public FSDataOutputStream create(Path f, boolean overwrite);
-    public FSDataOutputStream create(Path f, Progressable progress);
-    public FSDataOutputStream create(Path f, short replication);
-    public FSDataOutputStream create(Path f, short replication,
-        Progressable progress);
-
-    public FSDataOutputStream create(Path f, boolean overwrite,
-        int bufferSize);
-
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-        Progressable progress);
-
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-        short replication, long blockSize);
-
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-        short replication, long blockSize, Progressable progress);
-
-    public FSDataOutputStream create(Path f, FsPermission permission,
-        EnumSet<CreateFlag> flags, int bufferSize, short replication,
-        long blockSize, Progressable progress) throws IOException;
-
-    public FSDataOutputStream create(Path f, FsPermission permission,
-        EnumSet<CreateFlag> flags, int bufferSize, short replication,
-        long blockSize, Progressable progress, ChecksumOpt checksumOpt)
-        throws IOException;
-
-    public String getName();
-    public boolean delete(Path f);
-    public short getReplication(Path src);
-    public void processDeleteOnExit();
-    public ContentSummary getContentSummary(Path f);
-    public FsStatus getStatus();
-    public FileStatus[] listStatus(Path f, PathFilter filter);
-    public FileStatus[] listStatus(Path[] files);
-    public FileStatus[] listStatus(Path[] files, PathFilter filter);
-    public FileStatus[] globStatus(Path pathPattern);
-    public FileStatus[] globStatus(Path pathPattern, PathFilter filter);
-
-    public Iterator<LocatedFileStatus> listFiles(Path path,
-        boolean isRecursive);
-
-    public Iterator<LocatedFileStatus> listLocatedStatus(Path f);
-    public Iterator<LocatedFileStatus> listLocatedStatus(Path f,
-        PathFilter filter);
-    public void copyFromLocalFile(Path src, Path dst);
-    public void moveFromLocalFile(Path[] srcs, Path dst);
-    public void moveFromLocalFile(Path src, Path dst);
-    public void copyToLocalFile(Path src, Path dst);
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst,
-        boolean useRawLocalFileSystem);
-    public void moveToLocalFile(Path src, Path dst);
-    public long getBlockSize(Path f);
-    public FSDataOutputStream primitiveCreate(Path f,
-        EnumSet<CreateFlag> createFlag, CreateOpts... opts);
-    public void primitiveMkdir(Path f, FsPermission absolutePermission,
-        boolean createParent);
-    public int getDefaultPort();
-    public String getCanonicalServiceName();
-    public Token<?> getDelegationToken(String renewer) throws IOException;
-    public boolean deleteOnExit(Path f) throws IOException;
-    public boolean cancelDeleteOnExit(Path f) throws IOException;
-    public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
-        throws IOException;
-    public Path fixRelativePart(Path p);
-    public void concat(Path trg, Path [] psrcs) throws IOException;
-    public FSDataOutputStream primitiveCreate(Path f,
-        FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
-        short replication, long blockSize, Progressable progress,
-        ChecksumOpt checksumOpt) throws IOException;
-    public boolean primitiveMkdir(Path f, FsPermission absolutePermission)
-        throws IOException;
-    public RemoteIterator<Path> listCorruptFileBlocks(Path path)
-        throws IOException;
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
-        throws IOException;
-    public void createSymlink(Path target, Path link, boolean createParent)
-        throws IOException;
-    public FileStatus getFileLinkStatus(Path f) throws IOException;
-    public boolean supportsSymlinks();
-    public Path getLinkTarget(Path f) throws IOException;
-    public Path resolveLink(Path f) throws IOException;
-    public void setVerifyChecksum(boolean verifyChecksum);
-    public void setWriteChecksum(boolean writeChecksum);
-    public Path createSnapshot(Path path, String snapshotName) throws
-        IOException;
-    public void renameSnapshot(Path path, String snapshotOldName,
-        String snapshotNewName) throws IOException;
-    public void deleteSnapshot(Path path, String snapshotName)
-        throws IOException;
-  }
-
   @Test
   public void testHarUri() {
     final Configuration conf = new Configuration();
@@ -185,7 +44,8 @@ public class TestHarFileSystem {
       p.getFileSystem(conf);
       Assert.fail(p + " is an invalid path.");
     } catch (IOException e) {
-      // Expected
+      System.out.println("GOOD: Got an exception.");
+      e.printStackTrace(System.out);
     }
   }
 
@@ -273,37 +133,6 @@ public class TestHarFileSystem {
       assertEquals(b[1].getOffset(), 128);
       assertEquals(b[1].getLength(), 384);
     }
-  }
 
-  @Test
-  public void testInheritedMethodsImplemented() throws Exception {
-    int errors = 0;
-    for (Method m : FileSystem.class.getDeclaredMethods()) {
-      if (Modifier.isStatic(m.getModifiers()) ||
-          Modifier.isPrivate(m.getModifiers()) ||
-          Modifier.isFinal(m.getModifiers())) {
-        continue;
-      }
-
-      try {
-        MustNotImplement.class.getMethod(m.getName(), m.getParameterTypes());
-        try {
-          HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
-          LOG.error("HarFileSystem MUST not implement " + m);
-          errors++;
-        } catch (NoSuchMethodException ex) {
-          // Expected
-        }
-      } catch (NoSuchMethodException exc) {
-        try {
-          HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
-        } catch (NoSuchMethodException exc2) {
-          LOG.error("HarFileSystem MUST implement " + m);
-          errors++;
-        }
-      }
-    }
-    assertTrue((errors + " methods were not overridden correctly - see log"),
-        errors <= 0);
   }
 }

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java

@@ -47,8 +47,7 @@ public class TestLocalFsFCStatistics extends FCStatisticsBaseTest {
 
   @Override
   protected void verifyReadBytes(Statistics stats) {
-    // one blockSize for read, one for pread
-    Assert.assertEquals(2*blockSize, stats.getBytesRead());
+    Assert.assertEquals(blockSize, stats.getBytesRead());
   }
 
   @Override

+ 0 - 91
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java

@@ -21,8 +21,6 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
-import java.util.Arrays;
-import java.util.Map;
 
 import junit.framework.TestCase;
 
@@ -37,9 +35,6 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
-import org.junit.Assert;
-
-import com.google.common.collect.Maps;
 
 public class TestGenericOptionsParser extends TestCase {
   File testDir;
@@ -196,90 +191,4 @@ public class TestGenericOptionsParser extends TestCase {
     
     localFs.delete(new Path(testDir.getAbsolutePath()), true);
   }
-
-  /** Test -D parsing */
-  public void testDOptionParsing() throws Exception {
-    String[] args;
-    Map<String,String> expectedMap;
-    String[] expectedRemainingArgs;
-
-    args = new String[]{};
-    expectedRemainingArgs = new String[]{};
-    expectedMap = Maps.newHashMap();
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-    args = new String[]{"-Dkey1=value1"};
-    expectedRemainingArgs = new String[]{};
-    expectedMap = Maps.newHashMap();
-    expectedMap.put("key1", "value1");
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-    args = new String[]{"-fs", "hdfs://somefs/", "-Dkey1=value1", "arg1"};
-    expectedRemainingArgs = new String[]{"arg1"};
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-    args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1=value1", "arg1"};
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-    if (Shell.WINDOWS) {
-      args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1",
-        "value1", "arg1"};
-      assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-      args = new String[]{"-fs", "hdfs://somefs/", "-Dkey1", "value1", "arg1"};
-      assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-      args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
-        "-fs", "someother", "-D", "key2", "value2", "arg1", "arg2"};
-      expectedRemainingArgs = new String[]{"arg1", "arg2"};
-      expectedMap = Maps.newHashMap();
-      expectedMap.put("key1", "value1");
-      expectedMap.put("key2", "value2");
-      assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-      args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
-        "-fs", "someother", "-D", "key2", "value2"};
-      expectedRemainingArgs = new String[]{};
-      assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-      args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
-        "-fs", "someother", "-D", "key2"};
-      expectedMap = Maps.newHashMap();
-      expectedMap.put("key1", "value1");
-      expectedMap.put("key2", null); // we expect key2 not set
-      assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-    }
-
-    args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1=value1",
-      "-fs", "someother", "-Dkey2"};
-    expectedRemainingArgs = new String[]{};
-    expectedMap = Maps.newHashMap();
-    expectedMap.put("key1", "value1");
-    expectedMap.put("key2", null); // we expect key2 not set
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-
-    args = new String[]{"-fs", "hdfs://somefs/", "-D"};
-    expectedMap = Maps.newHashMap();
-    assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
-  }
-
-  private void assertDOptionParsing(String[] args,
-      Map<String,String> expectedMap, String[] expectedRemainingArgs)
-      throws Exception {
-    for (Map.Entry<String, String> entry : expectedMap.entrySet()) {
-      assertNull(conf.get(entry.getKey()));
-    }
-
-    Configuration conf = new Configuration();
-    GenericOptionsParser parser = new GenericOptionsParser(conf, args);
-    String[] remainingArgs = parser.getRemainingArgs();
-
-    for (Map.Entry<String, String> entry : expectedMap.entrySet()) {
-      assertEquals(entry.getValue(), conf.get(entry.getKey()));
-    }
-
-    Assert.assertArrayEquals(
-      Arrays.toString(remainingArgs) + Arrays.toString(expectedRemainingArgs),
-      expectedRemainingArgs, remainingArgs);
-  }
 }

+ 5 - 0
hadoop-common-project/hadoop-common/src/test/resources/core-site.xml

@@ -78,4 +78,9 @@
   <name>nfs3.mountd.port</name>
   <value>4272</value>
 </property>
+
+<property>
+  <name>test.SymlinkEnabledForTesting</name>
+  <value>true</value>
+</property>
 </configuration>

+ 2 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-nfs</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

+ 5 - 5
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.mount;
  * Represents a mount entry.
  */
 public class MountEntry {
-  /** Host corresponding to the mount entry */
+  /** Host correspoinding to the mount entry */
   private final String host;
-  /** Path corresponding to the mount entry */
+  /** Path correspoinding to the mount entry */
   private final String path;
 
   public MountEntry(String host, String path) {
@@ -31,11 +31,11 @@ public class MountEntry {
     this.path = path;
   }
 
-  public String getHost() {
+  public String host() {
     return this.host;
   }
 
-  public String getPath() {
+  public String path() {
     return this.path;
   }
 
@@ -49,7 +49,7 @@ public class MountEntry {
     }
 
     MountEntry m = (MountEntry) o;
-    return getHost().equals(m.getHost()) && getPath().equals(m.getPath());
+    return host().equals(m.host()) && path().equals(m.path());
   }
 
   @Override

+ 2 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java

@@ -54,8 +54,8 @@ public class MountResponse {
     RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (MountEntry mountEntry : mounts) {
       xdr.writeBoolean(true); // Value follows yes
-      xdr.writeString(mountEntry.getHost());
-      xdr.writeString(mountEntry.getPath());
+      xdr.writeString(mountEntry.host());
+      xdr.writeString(mountEntry.path());
     }
     xdr.writeBoolean(false); // Value follows no
     return xdr;

+ 5 - 10
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.mount;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.SimpleTcpServer;
@@ -33,8 +34,6 @@ import org.apache.hadoop.portmap.PortmapMapping;
  */
 abstract public class MountdBase {
   private final RpcProgram rpcProgram;
-  private int udpBoundPort; // Will set after server starts
-  private int tcpBoundPort; // Will set after server starts
 
   public RpcProgram getRpcProgram() {
     return rpcProgram;
@@ -42,10 +41,10 @@ abstract public class MountdBase {
   
   /**
    * Constructor
-   * @param program
+   * @param exports
    * @throws IOException 
    */
-  public MountdBase(RpcProgram program) throws IOException {
+  public MountdBase(List<String> exports, RpcProgram program) throws IOException {
     rpcProgram = program;
   }
 
@@ -53,26 +52,22 @@ abstract public class MountdBase {
   private void startUDPServer() {
     SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
         rpcProgram, 1);
-    rpcProgram.startDaemons();
     udpServer.run();
-    udpBoundPort = udpServer.getBoundPort();
   }
 
   /* Start TCP server */
   private void startTCPServer() {
     SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
         rpcProgram, 1);
-    rpcProgram.startDaemons();
     tcpServer.run();
-    tcpBoundPort = tcpServer.getBoundPort();
   }
 
   public void start(boolean register) {
     startUDPServer();
     startTCPServer();
     if (register) {
-      rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
-      rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
+      rpcProgram.register(PortmapMapping.TRANSPORT_UDP);
+      rpcProgram.register(PortmapMapping.TRANSPORT_TCP);
     }
   }
 }

+ 20 - 11
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mount.MountdBase;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.SimpleTcpServer;
 import org.apache.hadoop.portmap.PortmapMapping;
@@ -31,34 +32,42 @@ import org.apache.hadoop.portmap.PortmapMapping;
  */
 public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
+  private final MountdBase mountd;
   private final RpcProgram rpcProgram;
   private final int nfsPort;
-  private int nfsBoundPort; // Will set after server starts
-    
+  
+  public MountdBase getMountBase() {
+    return mountd;
+  }
+  
   public RpcProgram getRpcProgram() {
     return rpcProgram;
   }
 
-  protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
-    this.rpcProgram = rpcProgram;
-    this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT);
-    LOG.info("NFS server port set to: " + nfsPort);
+  protected Nfs3Base(MountdBase mountd, RpcProgram program, Configuration conf) {
+    this.mountd = mountd;
+    this.rpcProgram = program;
+    this.nfsPort = conf.getInt("nfs3.server.port", Nfs3Constant.PORT);
+    LOG.info("NFS server port set to: "+nfsPort);
+  }
+
+  protected Nfs3Base(MountdBase mountd, RpcProgram program) {
+    this.mountd = mountd;
+    this.rpcProgram = program;
+    this.nfsPort = Nfs3Constant.PORT;
   }
 
   public void start(boolean register) {
+    mountd.start(register); // Start mountd
     startTCPServer(); // Start TCP server
-    
     if (register) {
-      rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
+      rpcProgram.register(PortmapMapping.TRANSPORT_TCP);
     }
   }
 
   private void startTCPServer() {
     SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
         rpcProgram, 0);
-    rpcProgram.startDaemons();
     tcpServer.run();
-    nfsBoundPort = tcpServer.getBoundPort();
   }
 }

+ 1 - 10
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java

@@ -26,8 +26,7 @@ public class Nfs3Constant {
   public final static int SUN_RPCBIND = 111;
 
   // The IP port number for NFS.
-  public final static String NFS3_SERVER_PORT = "nfs3.server.port";
-  public final static int NFS3_SERVER_PORT_DEFAULT = 2049;
+  public final static int PORT = 2049;
 
   // The RPC program number for NFS.
   public final static int PROGRAM = 100003;
@@ -206,15 +205,7 @@ public class Nfs3Constant {
   public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs";
   public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump";
   public static final boolean ENABLE_FILE_DUMP_DEFAULT = true;
-  public static final String MAX_OPEN_FILES = "dfs.nfs3.max.open.files";
-  public static final int MAX_OPEN_FILES_DEFAULT = 256;
-  public static final String OUTPUT_STREAM_TIMEOUT = "dfs.nfs3.stream.timeout";
-  public static final long OUTPUT_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes
-  public static final long OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; //10 seconds
   
   public final static String UNKNOWN_USER = "nobody";
   public final static String UNKNOWN_GROUP = "nobody";
-  
-  public final static String EXPORT_POINT = "dfs.nfs3.export.point";
-  public final static String EXPORT_POINT_DEFAULT = "/";
 }

+ 2 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java

@@ -97,6 +97,6 @@ public interface Nfs3Interface {
       InetAddress client);
 
   /** COMMIT: Commit cached data on a server to stable storage */
-  public NFS3Response commit(XDR xdr, Channel channel, int xid,
-      SecurityHandler securityHandler, InetAddress client);
+  public NFS3Response commit(XDR xdr, SecurityHandler securityHandler,
+      InetAddress client);
 }

+ 0 - 17
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java

@@ -19,11 +19,8 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
-import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * READ3 Request
  */
@@ -37,13 +34,6 @@ public class READ3Request extends RequestWithHandle {
     count = xdr.readInt();
   }
 
-  @VisibleForTesting
-  public READ3Request(FileHandle handle, long offset, int count) {
-    super(handle);
-    this.offset = offset;
-    this.count = count;
-  }
-  
   public long getOffset() {
     return this.offset;
   }
@@ -51,11 +41,4 @@ public class READ3Request extends RequestWithHandle {
   public int getCount() {
     return this.count;
   }
-  
-  @Override
-  public void serialize(XDR xdr) {
-    handle.serialize(xdr);
-    xdr.writeLongAsHyper(offset);
-    xdr.writeInt(count);
-  }
 }

+ 0 - 6
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java

@@ -83,10 +83,4 @@ public class WRITE3Request extends RequestWithHandle {
     xdr.writeInt(count);
     xdr.writeFixedOpaque(data.array(), count);
   }
-  
-  @Override
-  public String toString() {
-    return String.format("fileId: %d offset: %d count: %d stableHow: %s",
-        handle.getFileId(), offset, count, stableHow.name());
-  }
 }

+ 1 - 9
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java

@@ -26,8 +26,6 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.Verifier;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * READDIR3 Response
  */
@@ -51,8 +49,7 @@ public class READDIR3Response extends NFS3Response {
       return fileId;
     }
 
-    @VisibleForTesting
-    public String getName() {
+    String getName() {
       return name;
     }
 
@@ -69,11 +66,6 @@ public class READDIR3Response extends NFS3Response {
       this.entries = Collections.unmodifiableList(Arrays.asList(entries));
       this.eof = eof;
     }
-    
-    @VisibleForTesting
-    public List<Entry3> getEntries() {
-      return this.entries;
-    }
   }
 
   public READDIR3Response(int status) {

+ 1 - 14
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java

@@ -27,8 +27,6 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.Verifier;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * READDIRPLUS3 Response
  */
@@ -53,11 +51,6 @@ public class READDIRPLUS3Response  extends NFS3Response {
       this.objFileHandle = objFileHandle;
     }
 
-    @VisibleForTesting
-    public String getName() {
-      return name;
-    }
-    
     void seralize(XDR xdr) {
       xdr.writeLongAsHyper(fileId);
       xdr.writeString(name);
@@ -78,8 +71,7 @@ public class READDIRPLUS3Response  extends NFS3Response {
       this.eof = eof;
     }
 
-    @VisibleForTesting
-    public List<EntryPlus3> getEntries() {
+    List<EntryPlus3> getEntries() {
       return entries;
     }
     
@@ -88,11 +80,6 @@ public class READDIRPLUS3Response  extends NFS3Response {
     }
   }
 
-  @VisibleForTesting
-  public DirListPlus3 getDirListPlus() {
-    return dirListPlus;
-  }
-  
   public READDIRPLUS3Response(int status) {
     this(status, null, 0, null);
   }

+ 12 - 13
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java

@@ -40,7 +40,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
   public static final int RPCB_PORT = 111;
   private final String program;
   private final String host;
-  private int port; // Ephemeral port is chosen later
+  private final int port;
   private final int progNumber;
   private final int lowProgVersion;
   private final int highProgVersion;
@@ -68,20 +68,22 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
   /**
    * Register this program with the local portmapper.
    */
-  public void register(int transport, int boundPort) {
-    if (boundPort != port) {
-      LOG.info("The bound port is " + boundPort
-          + ", different with configured port " + port);
-      port = boundPort;
-    }
+  public void register(int transport) {
     // Register all the program versions with portmapper for a given transport
     for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
-      PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
-          port);
-      register(mapEntry);
+      register(vers, transport);
     }
   }
   
+  /**
+   * Register this program with the local portmapper.
+   */
+  private void register(int progVersion, int transport) {
+    PortmapMapping mapEntry = new PortmapMapping(progNumber, progVersion,
+        transport, port);
+    register(mapEntry);
+  }
+  
   /**
    * Register the program with Portmap or Rpcbind
    */
@@ -98,9 +100,6 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
     }
   }
 
-  // Start extra daemons
-  public void startDaemons() {}
-  
   @Override
   public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
       throws Exception {

+ 3 - 12
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java

@@ -23,7 +23,6 @@ import java.util.concurrent.Executors;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.jboss.netty.bootstrap.ServerBootstrap;
-import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelFactory;
 import org.jboss.netty.channel.ChannelPipeline;
 import org.jboss.netty.channel.ChannelPipelineFactory;
@@ -37,7 +36,6 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
 public class SimpleTcpServer {
   public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class);
   protected final int port;
-  protected int boundPort = -1; // Will be set after server starts
   protected final SimpleChannelUpstreamHandler rpcProgram;
   
   /** The maximum number of I/O worker threads */
@@ -81,16 +79,9 @@ public class SimpleTcpServer {
     bootstrap.setOption("child.keepAlive", true);
     
     // Listen to TCP port
-    Channel ch = bootstrap.bind(new InetSocketAddress(port));
-    InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress();
-    boundPort = socketAddr.getPort();
-    
-    LOG.info("Started listening to TCP requests at port " + boundPort + " for "
+    bootstrap.bind(new InetSocketAddress(port));
+
+    LOG.info("Started listening to TCP requests at port " + port + " for "
         + rpcProgram + " with workerCount " + workerCount);
   }
-  
-  // boundPort will be set only after server starts
-  public int getBoundPort() {
-    return this.boundPort;
-  }
 }

+ 3 - 12
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java

@@ -23,7 +23,6 @@ import java.util.concurrent.Executors;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
-import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.Channels;
 import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
 import org.jboss.netty.channel.socket.DatagramChannelFactory;
@@ -40,7 +39,6 @@ public class SimpleUdpServer {
   protected final int port;
   protected final SimpleChannelUpstreamHandler rpcProgram;
   protected final int workerCount;
-  protected int boundPort = -1; // Will be set after server starts
 
   public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program, int workerCount) {
     this.port = port;
@@ -63,16 +61,9 @@ public class SimpleUdpServer {
     b.setOption("receiveBufferSize", RECEIVE_BUFFER_SIZE);
     
     // Listen to the UDP port
-    Channel ch = b.bind(new InetSocketAddress(port));
-    InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress();
-    boundPort = socketAddr.getPort();
-    
-    LOG.info("Started listening to UDP requests at port " + boundPort + " for "
-        + rpcProgram + " with workerCount " + workerCount);
-  }
+    b.bind(new InetSocketAddress(port));
 
-  // boundPort will be set only after server starts
-  public int getBoundPort() {
-    return this.boundPort;
+    LOG.info("Started listening to UDP requests at port " + port + " for "
+        + rpcProgram + " with workerCount " + workerCount);
   }
 }

+ 21 - 90
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java

@@ -17,111 +17,42 @@
  */
 package org.apache.hadoop.portmap;
 
-import java.net.InetSocketAddress;
-import java.net.SocketAddress;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcProgram;
-import org.apache.hadoop.oncrpc.RpcUtil;
+import org.apache.hadoop.oncrpc.SimpleTcpServer;
+import org.apache.hadoop.oncrpc.SimpleUdpServer;
 import org.apache.hadoop.util.StringUtils;
-import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
-import org.jboss.netty.bootstrap.ServerBootstrap;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.group.ChannelGroup;
-import org.jboss.netty.channel.group.DefaultChannelGroup;
-import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
-import org.jboss.netty.handler.timeout.IdleStateHandler;
-import org.jboss.netty.util.HashedWheelTimer;
-
-import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Portmap service for binding RPC protocols. See RFC 1833 for details.
  */
-final class Portmap {
-  private static final Log LOG = LogFactory.getLog(Portmap.class);
-  private static final int DEFAULT_IDLE_TIME_MILLISECONDS = 5000;
+public class Portmap {
+  public static final Log LOG = LogFactory.getLog(Portmap.class);
+
+  private static void startUDPServer(RpcProgramPortmap rpcProgram) {
+    rpcProgram.register(PortmapMapping.TRANSPORT_UDP);
+    SimpleUdpServer udpServer = new SimpleUdpServer(RpcProgram.RPCB_PORT,
+        rpcProgram, 1);
+    udpServer.run();
+  }
 
-  private ConnectionlessBootstrap udpServer;
-  private ServerBootstrap tcpServer;
-  private ChannelGroup allChannels = new DefaultChannelGroup();
-  private Channel udpChannel;
-  private Channel tcpChannel;
-  private final RpcProgramPortmap handler = new RpcProgramPortmap(allChannels);
+  private static void startTCPServer(final RpcProgramPortmap rpcProgram) {
+    rpcProgram.register(PortmapMapping.TRANSPORT_TCP);
+    SimpleTcpServer tcpServer = new SimpleTcpServer(RpcProgram.RPCB_PORT,
+        rpcProgram, 1);
+    tcpServer.run();
+  }
 
   public static void main(String[] args) {
     StringUtils.startupShutdownMessage(Portmap.class, args, LOG);
-
-    final int port = RpcProgram.RPCB_PORT;
-    Portmap pm = new Portmap();
+    RpcProgramPortmap program = new RpcProgramPortmap();
     try {
-      pm.start(DEFAULT_IDLE_TIME_MILLISECONDS,
-          new InetSocketAddress(port), new InetSocketAddress(port));
+      startUDPServer(program);
+      startTCPServer(program);
     } catch (Throwable e) {
-      LOG.fatal("Failed to start the server. Cause:" + e.getMessage());
-      pm.shutdown();
+      LOG.fatal("Start server failure");
       System.exit(-1);
     }
   }
-
-  void shutdown() {
-    allChannels.close().awaitUninterruptibly();
-    tcpServer.releaseExternalResources();
-    udpServer.releaseExternalResources();
-  }
-
-  @VisibleForTesting
-  SocketAddress getTcpServerLocalAddress() {
-    return tcpChannel.getLocalAddress();
-  }
-
-  @VisibleForTesting
-  SocketAddress getUdpServerLoAddress() {
-    return udpChannel.getLocalAddress();
-  }
-
-  @VisibleForTesting
-  RpcProgramPortmap getHandler() {
-    return handler;
-  }
-
-  void start(final int idleTimeMilliSeconds, final SocketAddress tcpAddress,
-      final SocketAddress udpAddress) {
-
-    tcpServer = new ServerBootstrap(new NioServerSocketChannelFactory(
-        Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
-    tcpServer.setPipelineFactory(new ChannelPipelineFactory() {
-      private final HashedWheelTimer timer = new HashedWheelTimer();
-      private final IdleStateHandler idleStateHandler = new IdleStateHandler(
-          timer, 0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS);
-
-      @Override
-      public ChannelPipeline getPipeline() throws Exception {
-        return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(),
-            RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler,
-            RpcUtil.STAGE_RPC_TCP_RESPONSE);
-      }
-    });
-
-    udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
-        Executors.newCachedThreadPool()));
-
-    udpServer.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER,
-        handler, RpcUtil.STAGE_RPC_UDP_RESPONSE));
-
-    tcpChannel = tcpServer.bind(tcpAddress);
-    udpChannel = udpServer.bind(udpAddress);
-    allChannels.add(tcpChannel);
-    allChannels.add(udpChannel);
-
-    LOG.info("Portmap server started at tcp://" + tcpChannel.getLocalAddress()
-        + ", udp://" + udpChannel.getLocalAddress());
-  }
 }

+ 95 - 0
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapInterface.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.portmap;
+
+import org.apache.hadoop.oncrpc.XDR;
+
+/**
+ * Methods that need to be implemented to provide Portmap RPC program.
+ * See RFC 1833 for details.
+ */
+public interface PortmapInterface {
+  public enum Procedure {
+    // the order of the values below are significant.
+    PMAPPROC_NULL,
+    PMAPPROC_SET,
+    PMAPPROC_UNSET,
+    PMAPPROC_GETPORT,
+    PMAPPROC_DUMP,
+    PMAPPROC_CALLIT,
+    PMAPPROC_GETTIME,
+    PMAPPROC_UADDR2TADDR,
+    PMAPPROC_TADDR2UADDR,
+    PMAPPROC_GETVERSADDR,
+    PMAPPROC_INDIRECT,
+    PMAPPROC_GETADDRLIST,
+    PMAPPROC_GETSTAT;
+    
+    public int getValue() {
+      return ordinal();
+    }
+    
+    public static Procedure fromValue(int value) {
+      if (value < 0 || value >= values().length) {
+        return null;
+      }
+      return values()[value];
+    }
+  }
+
+  /**
+   * This procedure does no work. By convention, procedure zero of any protocol
+   * takes no parameters and returns no results.
+   */
+  public XDR nullOp(int xidd, XDR in, XDR out);
+  
+  /**
+   * When a program first becomes available on a machine, it registers itself
+   * with the port mapper program on the same machine. The program passes its
+   * program number "prog", version number "vers", transport protocol number
+   * "prot", and the port "port" on which it awaits service request. The
+   * procedure returns a boolean reply whose value is "TRUE" if the procedure
+   * successfully established the mapping and "FALSE" otherwise. The procedure
+   * refuses to establish a mapping if one already exists for the tuple
+   * "(prog, vers, prot)".
+   */
+  public XDR set(int xid, XDR in, XDR out);
+  
+  /**
+   * When a program becomes unavailable, it should unregister itself with the
+   * port mapper program on the same machine. The parameters and results have
+   * meanings identical to those of "PMAPPROC_SET". The protocol and port number
+   * fields of the argument are ignored.
+   */
+  public XDR unset(int xid, XDR in, XDR out);
+  
+  /**
+   * Given a program number "prog", version number "vers", and transport
+   * protocol number "prot", this procedure returns the port number on which the
+   * program is awaiting call requests. A port value of zeros means the program
+   * has not been registered. The "port" field of the argument is ignored.
+   */
+  public XDR getport(int xid, XDR in, XDR out);
+  
+  /**
+   * This procedure enumerates all entries in the port mapper's database. The
+   * procedure takes no parameters and returns a list of program, version,
+   * protocol, and port values.
+   */
+  public XDR dump(int xid, XDR in, XDR out);
+}

+ 2 - 1
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.oncrpc.RpcUtil;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.hadoop.portmap.PortmapInterface.Procedure;
 
 /**
  * Helper utility for building portmap request
@@ -36,7 +37,7 @@ public class PortmapRequest {
     RpcCall call = RpcCall.getInstance(
         RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
         RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
-        RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(),
+        Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(),
         new VerifierNone());
     call.write(request);
     return mapping.serialize(request);

+ 9 - 1
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapResponse.java

@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.portmap;
 
+import java.util.Arrays;
+import java.util.Collection;
+
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
@@ -42,13 +45,18 @@ public class PortmapResponse {
     return xdr;
   }
 
-  public static XDR pmapList(XDR xdr, int xid, PortmapMapping[] list) {
+  public static XDR pmapList(XDR xdr, int xid, Collection<PortmapMapping> list) {
     RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (PortmapMapping mapping : list) {
+      System.out.println(mapping);
       xdr.writeBoolean(true); // Value follows
       mapping.serialize(xdr);
     }
     xdr.writeBoolean(false); // No value follows
     return xdr;
   }
+  
+  public static XDR pmapList(XDR xdr, int xid, PortmapMapping[] list) {
+    return pmapList(xdr, xid, Arrays.asList(list));
+  }
 }

+ 75 - 109
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java

@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.portmap;
 
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.HashMap;
+import java.util.Map.Entry;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -32,101 +34,75 @@ import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.ChannelStateEvent;
-import org.jboss.netty.channel.ExceptionEvent;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.group.ChannelGroup;
-import org.jboss.netty.handler.timeout.IdleState;
-import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
-import org.jboss.netty.handler.timeout.IdleStateEvent;
-
-final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
-  static final int PROGRAM = 100000;
-  static final int VERSION = 2;
-
-  static final int PMAPPROC_NULL = 0;
-  static final int PMAPPROC_SET = 1;
-  static final int PMAPPROC_UNSET = 2;
-  static final int PMAPPROC_GETPORT = 3;
-  static final int PMAPPROC_DUMP = 4;
-  static final int PMAPPROC_GETVERSADDR = 9;
 
+/**
+ * An rpcbind request handler.
+ */
+public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
+  public static final int PROGRAM = 100000;
+  public static final int VERSION = 2;
+  
   private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
 
-  private final ConcurrentHashMap<String, PortmapMapping> map = new ConcurrentHashMap<String, PortmapMapping>();
+  /** Map synchronized usis monitor lock of this instance */
+  private final HashMap<String, PortmapMapping> map;
 
-  /** ChannelGroup that remembers all active channels for gracefully shutdown. */
-  private final ChannelGroup allChannels;
-
-  RpcProgramPortmap(ChannelGroup allChannels) {
-    this.allChannels = allChannels;
-    PortmapMapping m = new PortmapMapping(PROGRAM, VERSION,
-        PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT);
-    PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION,
-        PortmapMapping.TRANSPORT_UDP, RpcProgram.RPCB_PORT);
-    map.put(PortmapMapping.key(m), m);
-    map.put(PortmapMapping.key(m1), m1);
+  public RpcProgramPortmap() {
+    super("portmap", "localhost", RPCB_PORT, PROGRAM, VERSION, VERSION);
+    map = new HashMap<String, PortmapMapping>(256);
   }
 
-  /**
-   * This procedure does no work. By convention, procedure zero of any protocol
-   * takes no parameters and returns no results.
-   */
-  private XDR nullOp(int xid, XDR in, XDR out) {
+  /** Dump all the register RPC services */
+  private synchronized void dumpRpcServices() {
+    Set<Entry<String, PortmapMapping>> entrySet = map.entrySet();
+    for (Entry<String, PortmapMapping> entry : entrySet) {
+      LOG.info("Service: " + entry.getKey() + " portmapping: "
+          + entry.getValue());
+    }
+  }
+  
+  @Override
+  public XDR nullOp(int xid, XDR in, XDR out) {
     return PortmapResponse.voidReply(out, xid);
   }
 
-  /**
-   * When a program first becomes available on a machine, it registers itself
-   * with the port mapper program on the same machine. The program passes its
-   * program number "prog", version number "vers", transport protocol number
-   * "prot", and the port "port" on which it awaits service request. The
-   * procedure returns a boolean reply whose value is "TRUE" if the procedure
-   * successfully established the mapping and "FALSE" otherwise. The procedure
-   * refuses to establish a mapping if one already exists for the tuple
-   * "(prog, vers, prot)".
-   */
-  private XDR set(int xid, XDR in, XDR out) {
+  @Override
+  public XDR set(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
     String key = PortmapMapping.key(mapping);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Portmap set key=" + key);
     }
 
-    map.put(key, mapping);
-    return PortmapResponse.intReply(out, xid, mapping.getPort());
+    PortmapMapping value = null;
+    synchronized(this) {
+      map.put(key, mapping);
+      dumpRpcServices();
+      value = map.get(key);
+    }  
+    return PortmapResponse.intReply(out, xid, value.getPort());
   }
 
-  /**
-   * When a program becomes unavailable, it should unregister itself with the
-   * port mapper program on the same machine. The parameters and results have
-   * meanings identical to those of "PMAPPROC_SET". The protocol and port number
-   * fields of the argument are ignored.
-   */
-  private XDR unset(int xid, XDR in, XDR out) {
+  @Override
+  public synchronized XDR unset(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
-    String key = PortmapMapping.key(mapping);
-
-    if (LOG.isDebugEnabled())
-      LOG.debug("Portmap remove key=" + key);
-
-    map.remove(key);
+    synchronized(this) {
+      map.remove(PortmapMapping.key(mapping));
+    }
     return PortmapResponse.booleanReply(out, xid, true);
   }
 
-  /**
-   * Given a program number "prog", version number "vers", and transport
-   * protocol number "prot", this procedure returns the port number on which the
-   * program is awaiting call requests. A port value of zeros means the program
-   * has not been registered. The "port" field of the argument is ignored.
-   */
-  private XDR getport(int xid, XDR in, XDR out) {
+  @Override
+  public synchronized XDR getport(int xid, XDR in, XDR out) {
     PortmapMapping mapping = PortmapRequest.mapping(in);
     String key = PortmapMapping.key(mapping);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Portmap GETPORT key=" + key + " " + mapping);
     }
-    PortmapMapping value = map.get(key);
+    PortmapMapping value = null;
+    synchronized(this) {
+      value = map.get(key);
+    }
     int res = 0;
     if (value != null) {
       res = value.getPort();
@@ -139,39 +115,45 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
     return PortmapResponse.intReply(out, xid, res);
   }
 
-  /**
-   * This procedure enumerates all entries in the port mapper's database. The
-   * procedure takes no parameters and returns a list of program, version,
-   * protocol, and port values.
-   */
-  private XDR dump(int xid, XDR in, XDR out) {
-    PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
+  @Override
+  public synchronized XDR dump(int xid, XDR in, XDR out) {
+    PortmapMapping[] pmapList = null;
+    synchronized(this) {
+      pmapList = new PortmapMapping[map.values().size()];
+      map.values().toArray(pmapList);
+    }
     return PortmapResponse.pmapList(out, xid, pmapList);
   }
 
   @Override
-  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
-      throws Exception {
+  public void register(PortmapMapping mapping) {
+    String key = PortmapMapping.key(mapping);
+    synchronized(this) {
+      map.put(key, mapping);
+    }
+  }
 
-    RpcInfo info = (RpcInfo) e.getMessage();
+  @Override
+  public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
     RpcCall rpcCall = (RpcCall) info.header();
-    final int portmapProc = rpcCall.getProcedure();
+    final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure());
     int xid = rpcCall.getXid();
-    XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
-        XDR.State.READING);
+    byte[] data = new byte[info.data().readableBytes()];
+    info.data().readBytes(data);
+    XDR in = new XDR(data);
     XDR out = new XDR();
 
-    if (portmapProc == PMAPPROC_NULL) {
+    if (portmapProc == Procedure.PMAPPROC_NULL) {
       out = nullOp(xid, in, out);
-    } else if (portmapProc == PMAPPROC_SET) {
+    } else if (portmapProc == Procedure.PMAPPROC_SET) {
       out = set(xid, in, out);
-    } else if (portmapProc == PMAPPROC_UNSET) {
+    } else if (portmapProc == Procedure.PMAPPROC_UNSET) {
       out = unset(xid, in, out);
-    } else if (portmapProc == PMAPPROC_DUMP) {
+    } else if (portmapProc == Procedure.PMAPPROC_DUMP) {
       out = dump(xid, in, out);
-    } else if (portmapProc == PMAPPROC_GETPORT) {
+    } else if (portmapProc == Procedure.PMAPPROC_GETPORT) {
       out = getport(xid, in, out);
-    } else if (portmapProc == PMAPPROC_GETVERSADDR) {
+    } else if (portmapProc == Procedure.PMAPPROC_GETVERSADDR) {
       out = getport(xid, in, out);
     } else {
       LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
@@ -180,29 +162,13 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
       reply.write(out);
     }
 
-    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-        .buffer());
+    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);
   }
-
-  @Override
-  public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e)
-      throws Exception {
-    allChannels.add(e.getChannel());
-  }
-
-  @Override
-  public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent e)
-      throws Exception {
-    if (e.getState() == IdleState.ALL_IDLE) {
-      e.getChannel().close();
-    }
-  }
-
+  
   @Override
-  public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
-    LOG.warn("Encountered ", e.getCause());
-    e.getChannel().close();
+  protected boolean isIdempotent(RpcCall call) {
+    return false;
   }
 }

+ 2 - 11
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java

@@ -35,7 +35,6 @@ public class TestNfsExports {
       Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
   
   private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT;
-  private static final long NanosPerMillis = 1000000;
 
   @Test
   public void testWildcardRW() {
@@ -186,15 +185,7 @@ public class TestNfsExports {
     
     Thread.sleep(1000);
     // no cache for address2 now
-    AccessPrivilege ap;
-    long startNanos = System.nanoTime();
-    do {
-      ap = matcher.getAccessPrivilege(address2, address2);
-      if (ap == AccessPrivilege.NONE) {
-        break;
-      }
-      Thread.sleep(500);
-    } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
-    Assert.assertEquals(AccessPrivilege.NONE, ap);
+    Assert.assertEquals(AccessPrivilege.NONE,
+        matcher.getAccessPrivilege(address2, address2));
   }
 }

+ 0 - 116
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java

@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.portmap;
-
-import java.io.IOException;
-import java.net.DatagramPacket;
-import java.net.DatagramSocket;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.util.Map;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.oncrpc.RpcCall;
-import org.apache.hadoop.oncrpc.XDR;
-import org.apache.hadoop.oncrpc.security.CredentialsNone;
-import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.internal.util.reflection.Whitebox;
-
-public class TestPortmap {
-  private static Portmap pm = new Portmap();
-  private static final int SHORT_TIMEOUT_MILLISECONDS = 10;
-  private static final int RETRY_TIMES = 5;
-  private int xid;
-
-  @BeforeClass
-  public static void setup() {
-    pm.start(SHORT_TIMEOUT_MILLISECONDS, new InetSocketAddress("localhost", 0),
-        new InetSocketAddress("localhost", 0));
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    pm.shutdown();
-  }
-
-  @Test(timeout = 1000)
-  public void testIdle() throws InterruptedException, IOException {
-    Socket s = new Socket();
-    try {
-      s.connect(pm.getTcpServerLocalAddress());
-
-      int i = 0;
-      while (!s.isConnected() && i < RETRY_TIMES) {
-        ++i;
-        Thread.sleep(SHORT_TIMEOUT_MILLISECONDS);
-      }
-
-      Assert.assertTrue("Failed to connect to the server", s.isConnected()
-          && i < RETRY_TIMES);
-
-      int b = s.getInputStream().read();
-      Assert.assertTrue("The server failed to disconnect", b == -1);
-    } finally {
-      s.close();
-    }
-  }
-
-  @Test(timeout = 1000)
-  public void testRegistration() throws IOException, InterruptedException {
-    XDR req = new XDR();
-    RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
-        RpcProgramPortmap.VERSION,
-        RpcProgramPortmap.PMAPPROC_SET,
-        new CredentialsNone(), new VerifierNone()).write(req);
-
-    PortmapMapping sent = new PortmapMapping(90000, 1,
-        PortmapMapping.TRANSPORT_TCP, 1234);
-    sent.serialize(req);
-
-    byte[] reqBuf = req.getBytes();
-    DatagramSocket s = new DatagramSocket();
-    DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
-        pm.getUdpServerLoAddress());
-    try {
-      s.send(p);
-    } finally {
-      s.close();
-    }
-
-    // Give the server a chance to process the request
-    Thread.sleep(100);
-    boolean found = false;
-    @SuppressWarnings("unchecked")
-    Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
-        .getInternalState(pm.getHandler(), "map");
-
-    for (PortmapMapping m : map.values()) {
-      if (m.getPort() == sent.getPort()
-          && PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
-        found = true;
-        break;
-      }
-    }
-    Assert.assertTrue("Registration failed", found);
-  }
-}

+ 2 - 2
hadoop-common-project/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common-project</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-dist</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -22,12 +22,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop HttpFS</name>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -20,12 +20,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>

+ 15 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.nfs.mount;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mount.MountdBase;
@@ -30,14 +32,23 @@ import org.apache.hadoop.mount.MountdBase;
  * handle for requested directory and returns it to the client.
  */
 public class Mountd extends MountdBase {
+  /**
+   * Constructor
+   * @param exports
+   * @throws IOException 
+   */
+  public Mountd(List<String> exports) throws IOException {
+    super(exports, new RpcProgramMountd(exports));
+  }
 
-  public Mountd(Configuration config) throws IOException {
-    super(new RpcProgramMountd(config));
+  public Mountd(List<String> exports, Configuration config) throws IOException {
+    super(exports, new RpcProgramMountd(exports, config));
   }
   
   public static void main(String[] args) throws IOException {
-    Configuration config = new Configuration();
-    Mountd mountd = new Mountd(config);
+    List<String> exports = new ArrayList<String>();
+    exports.add("/");
+    Mountd mountd = new Mountd(exports);
     mountd.start(true);
   }
 }

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.mount.MountResponse;
 import org.apache.hadoop.nfs.AccessPrivilege;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.RpcCall;
@@ -50,8 +49,6 @@ import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * RPC program corresponding to mountd daemon. See {@link Mountd}.
  */
@@ -74,15 +71,23 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
   
   private final NfsExports hostsMatcher;
 
-  public RpcProgramMountd(Configuration config) throws IOException {
+  public RpcProgramMountd() throws IOException {
+    this(new ArrayList<String>(0));
+  }
+
+  public RpcProgramMountd(List<String> exports) throws IOException {
+    this(exports, new Configuration());
+  }
+
+  public RpcProgramMountd(List<String> exports, Configuration config)
+      throws IOException {
     // Note that RPC cache is not enabled
     super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
         PROGRAM, VERSION_1, VERSION_3);
-    exports = new ArrayList<String>();
-    exports.add(config.get(Nfs3Constant.EXPORT_POINT,
-        Nfs3Constant.EXPORT_POINT_DEFAULT));
+    
     this.hostsMatcher = NfsExports.getInstance(config);
     this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
+    this.exports = Collections.unmodifiableList(exports);
     this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
   }
   
@@ -195,7 +200,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export "/"
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
       hostsMatchers.add(hostsMatcher);
       out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
@@ -215,9 +220,4 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     // Not required, because cache is turned off
     return false;
   }
-
-  @VisibleForTesting
-  public List<String> getExports() {
-    return this.exports;
-  }
 }

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java

@@ -46,7 +46,6 @@ public class AsyncDataService {
 
   public AsyncDataService() {
     threadFactory = new ThreadFactory() {
-      @Override
       public Thread newThread(Runnable r) {
         return new Thread(threadGroup, r);
       }
@@ -130,7 +129,6 @@ public class AsyncDataService {
           + openFileCtx.getNextOffset();
     }
 
-    @Override
     public void run() {
       try {
         openFileCtx.executeWriteBack();

+ 4 - 95
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java

@@ -20,19 +20,15 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Objects;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
@@ -45,52 +41,15 @@ import com.google.common.cache.RemovalNotification;
 class DFSClientCache {
   private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
   /**
-   * Cache that maps User id to the corresponding DFSClient.
+   * Cache that maps User id to corresponding DFSClient.
    */
   @VisibleForTesting
   final LoadingCache<String, DFSClient> clientCache;
 
   final static int DEFAULT_DFS_CLIENT_CACHE_SIZE = 256;
 
-  /**
-   * Cache that maps <DFSClient, inode path> to the corresponding
-   * FSDataInputStream.
-   */
-  final LoadingCache<DFSInputStreamCaheKey, FSDataInputStream> inputstreamCache;
-
-  /**
-   * Time to live for a DFSClient (in seconds)
-   */
-  final static int DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE = 1024;
-  final static int DEFAULT_DFS_INPUTSTREAM_CACHE_TTL = 10 * 60;
-
   private final Configuration config;
 
-  private static class DFSInputStreamCaheKey {
-    final String userId;
-    final String inodePath;
-
-    private DFSInputStreamCaheKey(String userId, String inodePath) {
-      super();
-      this.userId = userId;
-      this.inodePath = inodePath;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj instanceof DFSInputStreamCaheKey) {
-        DFSInputStreamCaheKey k = (DFSInputStreamCaheKey) obj;
-        return userId.equals(k.userId) && inodePath.equals(k.inodePath);
-      }
-      return false;
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hashCode(userId, inodePath);
-    }
-  }
-
   DFSClientCache(Configuration config) {
     this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
   }
@@ -99,14 +58,8 @@ class DFSClientCache {
     this.config = config;
     this.clientCache = CacheBuilder.newBuilder()
         .maximumSize(clientCache)
-        .removalListener(clientRemovalListener())
+        .removalListener(clientRemovealListener())
         .build(clientLoader());
-
-    this.inputstreamCache = CacheBuilder.newBuilder()
-        .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE)
-        .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
-        .removalListener(inputStreamRemovalListener())
-        .build(inputStreamLoader());
   }
 
   private CacheLoader<String, DFSClient> clientLoader() {
@@ -118,7 +71,6 @@ class DFSClientCache {
 
         // Guava requires CacheLoader never returns null.
         return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
-          @Override
           public DFSClient run() throws IOException {
             return new DFSClient(NameNode.getAddress(config), config);
           }
@@ -127,7 +79,7 @@ class DFSClientCache {
     };
   }
 
-  private RemovalListener<String, DFSClient> clientRemovalListener() {
+  private RemovalListener<String, DFSClient> clientRemovealListener() {
     return new RemovalListener<String, DFSClient>() {
       @Override
       public void onRemoval(RemovalNotification<String, DFSClient> notification) {
@@ -143,33 +95,7 @@ class DFSClientCache {
     };
   }
 
-  private RemovalListener<DFSInputStreamCaheKey, FSDataInputStream> inputStreamRemovalListener() {
-    return new RemovalListener<DFSClientCache.DFSInputStreamCaheKey, FSDataInputStream>() {
-
-      @Override
-      public void onRemoval(
-          RemovalNotification<DFSInputStreamCaheKey, FSDataInputStream> notification) {
-        try {
-          notification.getValue().close();
-        } catch (IOException e) {
-        }
-      }
-    };
-  }
-
-  private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() {
-    return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {
-
-      @Override
-      public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
-        DFSClient client = getDfsClient(key.userId);
-        DFSInputStream dis = client.open(key.inodePath);
-        return new FSDataInputStream(dis);
-      }
-    };
-  }
-
-  DFSClient getDfsClient(String userName) {
+  DFSClient get(String userName) {
     DFSClient client = null;
     try {
       client = clientCache.get(userName);
@@ -179,21 +105,4 @@ class DFSClientCache {
     }
     return client;
   }
-
-  FSDataInputStream getDfsInputStream(String userName, String inodePath) {
-    DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
-    FSDataInputStream s = null;
-    try {
-      s = inputstreamCache.get(k);
-    } catch (ExecutionException e) {
-      LOG.warn("Failed to create DFSInputStream for user:" + userName
-          + " Cause:" + e);
-    }
-    return s;
-  }
-
-  public void invalidateDfsInputStream(String userName, String inodePath) {
-    DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
-    inputstreamCache.invalidate(k);
-  }
 }

+ 12 - 19
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java

@@ -18,45 +18,38 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.util.StringUtils;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgramNfs3}.
  * Currently Mountd program is also started inside this class.
  * Only TCP server is supported and UDP is not supported.
  */
 public class Nfs3 extends Nfs3Base {
-  private Mountd mountd;
-  
   static {
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
   }
   
-  public Nfs3(Configuration conf) throws IOException {
-    super(new RpcProgramNfs3(conf), conf);
-    mountd = new Mountd(conf);
+  public Nfs3(List<String> exports) throws IOException {
+    super(new Mountd(exports), new RpcProgramNfs3());
   }
 
-  public Mountd getMountd() {
-    return mountd;
-  }
-  
-  @VisibleForTesting
-  public void startServiceInternal(boolean register) throws IOException {
-    mountd.start(register); // Start mountd
-    start(register);
+  public Nfs3(List<String> exports, Configuration config) throws IOException {
+    super(new Mountd(exports, config), new RpcProgramNfs3(config), config);
   }
-  
+
   public static void main(String[] args) throws IOException {
-    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);    
-    final Nfs3 nfsServer = new Nfs3(new Configuration());
-    nfsServer.startServiceInternal(true);
+    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
+    List<String> exports = new ArrayList<String>();
+    exports.add("/");
+    final Nfs3 nfsServer = new Nfs3(exports);
+    nfsServer.start(true);
   }
 }

+ 0 - 14
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java

@@ -109,26 +109,12 @@ public class Nfs3Utils {
    * Send a write response to the netty network socket channel
    */
   public static void writeChannel(Channel channel, XDR out, int xid) {
-    if (channel == null) {
-      RpcProgramNfs3.LOG
-          .info("Null channel should only happen in tests. Do nothing.");
-      return;
-    }
-    
     if (RpcProgramNfs3.LOG.isDebugEnabled()) {
       RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
     }
     ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
     channel.write(outBuf);
   }
-  
-  public static void writeChannelCommit(Channel channel, XDR out, int xid) {
-    if (RpcProgramNfs3.LOG.isDebugEnabled()) {
-      RpcProgramNfs3.LOG.debug("Commit done:" + xid);
-    }
-    ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
-    channel.write(outBuf);
-  }
 
   private static boolean isSet(int access, int bits) {
     return (access & bits) == bits;

+ 68 - 318
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
+import java.security.InvalidParameterException;
 import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.Map.Entry;
@@ -47,7 +48,6 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
-import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
 import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccAttr;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
@@ -69,18 +69,12 @@ class OpenFileCtx {
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
 
-  static enum COMMIT_STATUS {
-    COMMIT_FINISHED,
-    COMMIT_WAIT,
-    COMMIT_INACTIVE_CTX,
-    COMMIT_INACTIVE_WITH_PENDING_WRITE,
-    COMMIT_ERROR,
-    COMMIT_DO_SYNC;
-  }
+  public final static int COMMIT_FINISHED = 0;
+  public final static int COMMIT_WAIT = 1;
+  public final static int COMMIT_INACTIVE_CTX = 2;
+  public final static int COMMIT_INACTIVE_WITH_PENDING_WRITE = 3;
+  public final static int COMMIT_ERROR = 4;
 
-  private final DFSClient client;
-  private final IdUserGroup iug;
-  
   // The stream status. False means the stream is closed.
   private volatile boolean activeState;
   // The stream write-back status. True means one thread is doing write back.
@@ -93,57 +87,10 @@ class OpenFileCtx {
   private AtomicLong nextOffset;
   private final HdfsDataOutputStream fos;
   
-  // It's updated after each sync to HDFS
-  private Nfs3FileAttributes latestAttr;
-  
-  private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites;
-  
-  private final ConcurrentNavigableMap<Long, CommitCtx> pendingCommits;
-
-  static class CommitCtx {
-    private final long offset;
-    private final Channel channel;
-    private final int xid;
-    private final Nfs3FileAttributes preOpAttr;
-
-    // Remember time for debug purpose
-    private final long startTime;
-
-    long getOffset() {
-      return offset;
-    }
+  // TODO: make it mutable and update it after each writing back to HDFS
+  private final Nfs3FileAttributes latestAttr;
 
-    Channel getChannel() {
-      return channel;
-    }
-
-    int getXid() {
-      return xid;
-    }
-
-    Nfs3FileAttributes getPreOpAttr() {
-      return preOpAttr;
-    }
-
-    long getStartTime() {
-      return startTime;
-    }
-
-    CommitCtx(long offset, Channel channel, int xid,
-        Nfs3FileAttributes preOpAttr) {
-      this.offset = offset;
-      this.channel = channel;
-      this.xid = xid;
-      this.preOpAttr = preOpAttr;
-      this.startTime = System.currentTimeMillis();
-    }
-
-    @Override
-    public String toString() {
-      return String.format("offset: %d xid: %d startTime: %d", offset, xid,
-          startTime);
-    }
-  }
+  private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites;
   
   // The last write, commit request or write-back event. Updating time to keep
   // output steam alive.
@@ -164,22 +111,10 @@ class OpenFileCtx {
     return System.currentTimeMillis() - lastAccessTime > streamTimeout;
   }
   
-  long getLastAccessTime() {
-    return lastAccessTime;  
-  }
-  
   public long getNextOffset() {
     return nextOffset.get();
   }
   
-  boolean getActiveState() {
-    return this.activeState;
-  }
-  
-  boolean hasPendingWork() {
-    return (pendingWrites.size() != 0 || pendingCommits.size() != 0);
-  }
-  
   // Increase or decrease the memory occupation of non-sequential writes
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
@@ -195,7 +130,7 @@ class OpenFileCtx {
   }
   
   OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
-      String dumpFilePath, DFSClient client, IdUserGroup iug) {
+      String dumpFilePath) {
     this.fos = fos;
     this.latestAttr = latestAttr;
     // We use the ReverseComparatorOnMin as the comparator of the map. In this
@@ -203,9 +138,6 @@ class OpenFileCtx {
     // retrieve the last element to write back to HDFS.
     pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
         OffsetRange.ReverseComparatorOnMin);
-    
-    pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
-    
     updateLastAccessTime();
     activeState = true;
     asyncStatus = false;
@@ -221,8 +153,6 @@ class OpenFileCtx {
       assert(nextOffset.get() == this.fos.getPos());
     } catch (IOException e) {}
     dumpThread = null;
-    this.client = client;
-    this.iug = iug;
   }
 
   public Nfs3FileAttributes getLatestAttr() {
@@ -617,23 +547,19 @@ class OpenFileCtx {
         // of reordered writes and won't send more writes until it gets
         // responses of the previous batch. So here send response immediately
         // for unstable non-sequential write
-        if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Have to change stable write to unstable write:"
-              + request.getStableHow());
-          stableHow = WriteStableHow.UNSTABLE;
-        }
-
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("UNSTABLE write request, send response for offset: "
-              + writeCtx.getOffset());
+        if (request.getStableHow() == WriteStableHow.UNSTABLE) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("UNSTABLE write request, send response for offset: "
+                + writeCtx.getOffset());
+          }
+          WccData fileWcc = new WccData(preOpAttr, latestAttr);
+          WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
+              fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
+          Nfs3Utils
+              .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
+                  xid, new VerifierNone()), xid);
+          writeCtx.setReplied(true);
         }
-        WccData fileWcc = new WccData(preOpAttr, latestAttr);
-        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
-            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils
-            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
-                xid, new VerifierNone()), xid);
-        writeCtx.setReplied(true);
       }
     }
   }
@@ -710,65 +636,20 @@ class OpenFileCtx {
     }
     return response;
   }
-  
+
   /**
-   * Check the commit status with the given offset
-   * @param commitOffset the offset to commit
-   * @param channel the channel to return response
-   * @param xid the xid of the commit request
-   * @param preOpAttr the preOp attribute
-   * @param fromRead whether the commit is triggered from read request
-   * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
-   * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
+   * return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
+   * COMMIT_INACTIVE_CTX, COMMIT_ERROR
    */
-  public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
-      Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
-    if (!fromRead) {
-      Preconditions.checkState(channel != null && preOpAttr != null);
-      // Keep stream active
-      updateLastAccessTime();
-    }
-    Preconditions.checkState(commitOffset >= 0);
-
-    COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
-        preOpAttr, fromRead);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got commit status: " + ret.name());
-    }
-    // Do the sync outside the lock
-    if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
-        || ret == COMMIT_STATUS.COMMIT_FINISHED) {
-      try {
-        // Sync file data and length
-        fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
-        ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
-        // Nothing to do for metadata since attr related change is pass-through
-      } catch (ClosedChannelException cce) {
-        if (pendingWrites.isEmpty()) {
-          ret = COMMIT_STATUS.COMMIT_FINISHED;
-        } else {
-          ret = COMMIT_STATUS.COMMIT_ERROR;
-        }
-      } catch (IOException e) {
-        LOG.error("Got stream error during data sync:" + e);
-        // Do nothing. Stream will be closed eventually by StreamMonitor.
-        // status = Nfs3Status.NFS3ERR_IO;
-        ret = COMMIT_STATUS.COMMIT_ERROR;
-      }
-    }
-    return ret;
+  public int checkCommit(long commitOffset) {
+    return activeState ? checkCommitInternal(commitOffset)
+        : COMMIT_INACTIVE_CTX;
   }
   
-  @VisibleForTesting
-  synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
-      Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
-    if (!activeState) {
-      if (pendingWrites.isEmpty()) {
-        return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
-      } else {
-        // TODO: return success if already committed
-        return COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE;
-      }
+  private int checkCommitInternal(long commitOffset) {
+    if (commitOffset == 0) {
+      // Commit whole file
+      commitOffset = nextOffset.get();
     }
 
     long flushed = 0;
@@ -776,42 +657,39 @@ class OpenFileCtx {
       flushed = getFlushedOffset();
     } catch (IOException e) {
       LOG.error("Can't get flushed offset, error:" + e);
-      return COMMIT_STATUS.COMMIT_ERROR;
+      return COMMIT_ERROR;
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset);
     }
+    if (flushed < commitOffset) {
+      // Keep stream active
+      updateLastAccessTime();
+      return COMMIT_WAIT;
+    }
 
-    if (commitOffset > 0) {
-      if (commitOffset > flushed) {
-        if (!fromRead) {
-          CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
-              preOpAttr);
-          pendingCommits.put(commitOffset, commitCtx);
-        }
-        return COMMIT_STATUS.COMMIT_WAIT;
+    int ret = COMMIT_WAIT;
+    try {
+      // Sync file data and length
+      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+      // Nothing to do for metadata since attr related change is pass-through
+      ret = COMMIT_FINISHED;
+    } catch (ClosedChannelException cce) { 
+      ret = COMMIT_INACTIVE_CTX;
+      if (pendingWrites.isEmpty()) {
+        ret = COMMIT_INACTIVE_CTX;
       } else {
-        return COMMIT_STATUS.COMMIT_DO_SYNC;
+        ret = COMMIT_INACTIVE_WITH_PENDING_WRITE;
       }
+    } catch (IOException e) {
+      LOG.error("Got stream error during data sync:" + e);
+      // Do nothing. Stream will be closed eventually by StreamMonitor.
+      ret = COMMIT_ERROR;
     }
 
-    Entry<OffsetRange, WriteCtx> key = pendingWrites.firstEntry();
-
-    // Commit whole file, commitOffset == 0
-    if (pendingWrites.isEmpty()) {
-      // Note that, there is no guarantee data is synced. TODO: We could still
-      // do a sync here though the output stream might be closed.
-      return COMMIT_STATUS.COMMIT_FINISHED;
-    } else {
-      if (!fromRead) {
-        // Insert commit
-        long maxOffset = key.getKey().getMax() - 1;
-        Preconditions.checkState(maxOffset > 0);
-        CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
-        pendingCommits.put(maxOffset, commitCtx);
-      }
-      return COMMIT_STATUS.COMMIT_WAIT;
-    }
+    // Keep stream active
+    updateLastAccessTime();
+    return ret;
   }
   
   private void addWrite(WriteCtx writeCtx) {
@@ -826,18 +704,19 @@ class OpenFileCtx {
    * @return true, remove stream; false, keep stream
    */
   public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
-    Preconditions
-        .checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
-    if (!activeState) {
-      return true;
+    if (streamTimeout < WriteManager.MINIMIUM_STREAM_TIMEOUT) {
+      throw new InvalidParameterException("StreamTimeout" + streamTimeout
+          + "ms is less than MINIMIUM_STREAM_TIMEOUT "
+          + WriteManager.MINIMIUM_STREAM_TIMEOUT + "ms");
     }
     
     boolean flag = false;
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("stream can be closed for fileId:" + fileId);
+        LOG.debug("closing stream for fileId:" + fileId);
       }
+      cleanup();
       flag = true;
     }
     return flag;
@@ -855,18 +734,8 @@ class OpenFileCtx {
         LOG.debug("The asyn write task has no pending writes, fileId: "
             + latestAttr.getFileId());
       }
-      // process pending commit again to handle this race: a commit is added
-      // to pendingCommits map just after the last doSingleWrite returns.
-      // There is no pending write and the commit should be handled by the
-      // last doSingleWrite. Due to the race, the commit is left along and
-      // can't be processed until cleanup. Therefore, we should do another
-      // processCommits to fix the race issue.
-      processCommits(nextOffset.get()); // nextOffset has same value as
-                                        // flushedOffset
       this.asyncStatus = false;
-      return null;
-    } 
-    
+    } else {
       Entry<OffsetRange, WriteCtx> lastEntry = pendingWrites.lastEntry();
       OffsetRange range = lastEntry.getKey();
       WriteCtx toWrite = lastEntry.getValue();
@@ -881,7 +750,6 @@ class OpenFileCtx {
         if (LOG.isDebugEnabled()) {
           LOG.debug("The next sequencial write has not arrived yet");
         }
-        processCommits(nextOffset.get()); // handle race
         this.asyncStatus = false;
       } else if (range.getMin() < offset && range.getMax() > offset) {
         // shouldn't happen since we do sync for overlapped concurrent writers
@@ -889,7 +757,6 @@ class OpenFileCtx {
             + range.getMax() + "), nextOffset=" + offset
             + ". Silently drop it now");
         pendingWrites.remove(range);
-        processCommits(nextOffset.get()); // handle race
       } else {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Remove write(" + range.getMin() + "-" + range.getMax()
@@ -904,7 +771,7 @@ class OpenFileCtx {
         }
         return toWrite;
       }
-    
+    }
     return null;
   }
   
@@ -926,7 +793,7 @@ class OpenFileCtx {
       
       if (!activeState && LOG.isDebugEnabled()) {
         LOG.debug("The openFileCtx is not active anymore, fileId: "
-            + latestAttr.getFileId());
+            + +latestAttr.getFileId());
       }
     } finally {
       // make sure we reset asyncStatus to false
@@ -934,71 +801,6 @@ class OpenFileCtx {
     }
   }
 
-  private void processCommits(long offset) {
-    Preconditions.checkState(offset > 0);
-    long flushedOffset = 0;
-    Entry<Long, CommitCtx> entry = null;
-
-    int status = Nfs3Status.NFS3ERR_IO;
-    try {
-      flushedOffset = getFlushedOffset();
-      entry = pendingCommits.firstEntry();
-      if (entry == null || entry.getValue().offset > flushedOffset) {
-        return;
-      }
-
-      // Now do sync for the ready commits
-      // Sync file data and length
-      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
-      status = Nfs3Status.NFS3_OK;
-    } catch (ClosedChannelException cce) {
-      if (!pendingWrites.isEmpty()) {
-        LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
-            + ". Channel closed with writes pending");
-      }
-      status = Nfs3Status.NFS3ERR_IO;
-    } catch (IOException e) {
-      LOG.error("Got stream error during data sync:" + e);
-      // Do nothing. Stream will be closed eventually by StreamMonitor.
-      status = Nfs3Status.NFS3ERR_IO;
-    }
-
-    // Update latestAttr
-    try {
-      latestAttr = Nfs3Utils.getFileAttr(client,
-          Nfs3Utils.getFileIdPath(latestAttr.getFileId()), iug);
-    } catch (IOException e) {
-      LOG.error("Can't get new file attr for fileId: " + latestAttr.getFileId());
-      status = Nfs3Status.NFS3ERR_IO;
-    }
-
-    if (latestAttr.getSize() != offset) {
-      LOG.error("After sync, the expect file size: " + offset
-          + ", however actual file size is: " + latestAttr.getSize());
-      status = Nfs3Status.NFS3ERR_IO;
-    }
-    WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
-
-    // Send response for the ready commits
-    while (entry != null && entry.getValue().offset <= flushedOffset) {
-      pendingCommits.remove(entry.getKey());
-      CommitCtx commit = entry.getValue();
-
-      COMMIT3Response response = new COMMIT3Response(status, wccData,
-          Nfs3Constant.WRITE_COMMIT_VERF);
-      Nfs3Utils.writeChannelCommit(commit.getChannel(), response
-          .writeHeaderAndResponse(new XDR(), commit.getXid(),
-              new VerifierNone()), commit.getXid());
-      
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("FileId: " + latestAttr.getFileid() + " Service time:"
-            + (System.currentTimeMillis() - commit.getStartTime())
-            + "ms. Sent response for commit:" + commit);
-      }
-      entry = pendingCommits.firstEntry();
-    }
-  }
-  
   private void doSingleWrite(final WriteCtx writeCtx) {
     Channel channel = writeCtx.getChannel();
     int xid = writeCtx.getXid();
@@ -1010,7 +812,7 @@ class OpenFileCtx {
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
       LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
-          + offset + " length:" + count + " stableHow:" + stableHow.name());
+          + offset + " length:" + count + " stableHow:" + stableHow.getValue());
     }
 
     try {
@@ -1042,23 +844,6 @@ class OpenFileCtx {
       }
       
       if (!writeCtx.getReplied()) {
-        if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Do sync for stable write:" + writeCtx);
-          try {
-            if (stableHow == WriteStableHow.DATA_SYNC) {
-              fos.hsync();
-            } else {
-              Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
-                  "Unknown WriteStableHow:" + stableHow);
-              // Sync file data and length
-              fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
-            }
-          } catch (IOException e) {
-            LOG.error("hsync failed with writeCtx:" + writeCtx + " error:" + e);
-            throw e;
-          }
-        }
-        
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
@@ -1071,10 +856,6 @@ class OpenFileCtx {
         Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
             new XDR(), xid, new VerifierNone()), xid);
       }
-      
-      // Handle the waiting commits without holding any lock
-      processCommits(writeCtx.getOffset() + writeCtx.getCount());
-     
     } catch (IOException e) {
       LOG.error("Error writing to fileId " + handle.getFileId() + " at offset "
           + offset + " and length " + count, e);
@@ -1091,7 +872,7 @@ class OpenFileCtx {
     }
   }
 
-  synchronized void cleanup() {
+  private synchronized void cleanup() {
     if (!activeState) {
       LOG.info("Current OpenFileCtx is already inactive, no need to cleanup.");
       return;
@@ -1099,7 +880,7 @@ class OpenFileCtx {
     activeState = false;
 
     // stop the dump thread
-    if (dumpThread != null && dumpThread.isAlive()) {
+    if (dumpThread != null) {
       dumpThread.interrupt();
       try {
         dumpThread.join(3000);
@@ -1156,35 +937,4 @@ class OpenFileCtx {
       }
     }
   }
-  
-  @VisibleForTesting
-  ConcurrentNavigableMap<OffsetRange, WriteCtx> getPendingWritesForTest(){
-    return pendingWrites;
-  }
-  
-  @VisibleForTesting
-  ConcurrentNavigableMap<Long, CommitCtx> getPendingCommitsForTest(){
-    return pendingCommits;
-  }
-  
-  @VisibleForTesting
-  long getNextOffsetForTest() {
-    return nextOffset.get();
-  }
-  
-  @VisibleForTesting
-  void setNextOffsetForTest(long newValue) {
-    nextOffset.set(newValue);
-  }
-  
-  @VisibleForTesting
-  void setActiveStatusForTest(boolean activeState) {
-    this.activeState = activeState;
-  }
-  
-  @Override
-  public String toString() {
-    return String.format("activeState: %b asyncStatus: %b nextOffset: %d",
-        activeState, asyncStatus, nextOffset.get());
-  }
 }

+ 0 - 270
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java

@@ -1,270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.nfs.nfs3;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.apache.hadoop.util.Daemon;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * A cache saves OpenFileCtx objects for different users. Each cache entry is
- * used to maintain the writing context for a single file.
- */
-class OpenFileCtxCache {
-  private static final Log LOG = LogFactory.getLog(OpenFileCtxCache.class);
-  // Insert and delete with openFileMap are synced
-  private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
-      .newConcurrentMap();
-
-  private final int maxStreams;
-  private final long streamTimeout;
-  private final StreamMonitor streamMonitor;
-
-  OpenFileCtxCache(Configuration config, long streamTimeout) {
-    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
-        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
-    LOG.info("Maximum open streams is " + maxStreams);
-    this.streamTimeout = streamTimeout;
-    streamMonitor = new StreamMonitor();
-  }
-
-  /**
-   * The entry to be evicted is based on the following rules:<br>
-   * 1. if the OpenFileCtx has any pending task, it will not be chosen.<br>
-   * 2. if there is inactive OpenFileCtx, the first found one is to evict. <br>
-   * 3. For OpenFileCtx entries don't belong to group 1 or 2, the idlest one 
-   * is select. If it's idle longer than OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT, it
-   * will be evicted. Otherwise, the whole eviction request is failed.
-   */
-  @VisibleForTesting
-  Entry<FileHandle, OpenFileCtx> getEntryToEvict() {
-    Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
-        .iterator();
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("openFileMap size:" + openFileMap.size());
-    }
-
-    Entry<FileHandle, OpenFileCtx> idlest = null;
-    
-    while (it.hasNext()) {
-      Entry<FileHandle, OpenFileCtx> pairs = it.next();
-      OpenFileCtx ctx = pairs.getValue();
-      if (!ctx.getActiveState()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got one inactive stream: " + ctx);
-        }
-        return pairs;
-      }
-      if (ctx.hasPendingWork()) {
-        // Always skip files with pending work.
-        continue;
-      }
-      if (idlest == null) {
-        idlest = pairs;
-      } else {
-        if (ctx.getLastAccessTime() < idlest.getValue().getLastAccessTime()) {
-          idlest = pairs;
-        }
-      }
-    }
-
-    if (idlest == null) {
-      LOG.warn("No eviction candidate. All streams have pending work.");
-      return null;
-    } else {
-      long idleTime = System.currentTimeMillis()
-          - idlest.getValue().getLastAccessTime();
-      if (idleTime < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("idlest stream's idle time:" + idleTime);
-        }
-        LOG.warn("All opened streams are busy, can't remove any from cache.");
-        return null;
-      } else {
-        return idlest;
-      }
-    }
-  }
-
-  boolean put(FileHandle h, OpenFileCtx context) {
-    OpenFileCtx toEvict = null;
-    synchronized (this) {
-      Preconditions.checkState(openFileMap.size() <= this.maxStreams,
-          "stream cache size " + openFileMap.size()
-              + "  is larger than maximum" + this.maxStreams);
-      if (openFileMap.size() == this.maxStreams) {
-        Entry<FileHandle, OpenFileCtx> pairs = getEntryToEvict();
-        if (pairs ==null) {
-          return false;
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Evict stream ctx: " + pairs.getValue());
-          }
-          toEvict = openFileMap.remove(pairs.getKey());
-          Preconditions.checkState(toEvict == pairs.getValue(),
-              "The deleted entry is not the same as odlest found.");
-        }
-      }
-      openFileMap.put(h, context);
-    }
-    
-    // Cleanup the old stream outside the lock
-    if (toEvict != null) {
-      toEvict.cleanup();
-    }
-    return true;
-  }
-
-  @VisibleForTesting
-  void scan(long streamTimeout) {
-    ArrayList<OpenFileCtx> ctxToRemove = new ArrayList<OpenFileCtx>();
-    Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
-        .iterator();
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("openFileMap size:" + openFileMap.size());
-    }
-
-    while (it.hasNext()) {
-      Entry<FileHandle, OpenFileCtx> pairs = it.next();
-      FileHandle handle = pairs.getKey();
-      OpenFileCtx ctx = pairs.getValue();
-      if (!ctx.streamCleanup(handle.getFileId(), streamTimeout)) {
-        continue;
-      }
-
-      // Check it again inside lock before removing
-      synchronized (this) {
-        OpenFileCtx ctx2 = openFileMap.get(handle);
-        if (ctx2 != null) {
-          if (ctx2.streamCleanup(handle.getFileId(), streamTimeout)) {
-            openFileMap.remove(handle);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("After remove stream " + handle.getFileId()
-                  + ", the stream number:" + openFileMap.size());
-            }
-            ctxToRemove.add(ctx2);
-          }
-        }
-      }
-    }
-
-    // Invoke the cleanup outside the lock
-    for (OpenFileCtx ofc : ctxToRemove) {
-      ofc.cleanup();
-    }
-  }
-
-  OpenFileCtx get(FileHandle key) {
-    return openFileMap.get(key);
-  }
-
-  int size() {
-    return openFileMap.size();
-  }
-
-  void start() {
-    streamMonitor.start();
-  }
-
-  // Evict all entries
-  void cleanAll() {
-    ArrayList<OpenFileCtx> cleanedContext = new ArrayList<OpenFileCtx>();
-    synchronized (this) {
-      Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
-          .iterator();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("openFileMap size:" + openFileMap.size());
-      }
-
-      while (it.hasNext()) {
-        Entry<FileHandle, OpenFileCtx> pairs = it.next();
-        OpenFileCtx ctx = pairs.getValue();
-        it.remove();
-        cleanedContext.add(ctx);
-      }
-    }
-
-    // Invoke the cleanup outside the lock
-    for (OpenFileCtx ofc : cleanedContext) {
-      ofc.cleanup();
-    }
-  }
-
-  void shutdown() {
-    // stop the dump thread
-    if (streamMonitor != null && streamMonitor.isAlive()) {
-      streamMonitor.shouldRun(false);
-      streamMonitor.interrupt();
-      try {
-        streamMonitor.join(3000);
-      } catch (InterruptedException e) {
-      }
-    }
-    
-    cleanAll();
-  }
-
-  /**
-   * StreamMonitor wakes up periodically to find and closes idle streams.
-   */
-  class StreamMonitor extends Daemon {
-    private final static int rotation = 5 * 1000; // 5 seconds
-    private long lastWakeupTime = 0;
-    private boolean shouldRun = true;
-    
-    void shouldRun(boolean shouldRun) {
-      this.shouldRun = shouldRun;
-    }
-    
-    @Override
-    public void run() {
-      while (shouldRun) {
-        scan(streamTimeout);
-
-        // Check if it can sleep
-        try {
-          long workedTime = System.currentTimeMillis() - lastWakeupTime;
-          if (workedTime < rotation) {
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("StreamMonitor can still have a sleep:"
-                  + ((rotation - workedTime) / 1000));
-            }
-            Thread.sleep(rotation - workedTime);
-          }
-          lastWakeupTime = System.currentTimeMillis();
-
-        } catch (InterruptedException e) {
-          LOG.info("StreamMonitor got interrupted");
-          return;
-        }
-      }
-    }
-  }
-}

+ 64 - 127
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileUtil;
@@ -39,12 +38,12 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.nfs.AccessPrivilege;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.NfsFileType;
@@ -126,8 +125,6 @@ import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
  */
@@ -163,9 +160,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   
   private final RpcCallCache rpcCallCache;
 
+  public RpcProgramNfs3() throws IOException {
+    this(new Configuration());
+  }
+
   public RpcProgramNfs3(Configuration config) throws IOException {
-    super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
+    super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
         Nfs3Constant.VERSION, Nfs3Constant.VERSION);
    
     config.set(FsPermission.UMASK_LABEL, "000");
@@ -211,11 +211,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
   }
   
-  @Override
-  public void startDaemons() {
-     writeManager.startAsyncDataSerivce();
-  }
-  
   /******************************************************
    * RPC call handlers
    ******************************************************/
@@ -238,7 +233,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -313,7 +308,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -395,7 +390,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -457,7 +452,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -505,7 +500,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
 
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -566,14 +561,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public READ3Response read(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
-    final String userName = securityHandler.getUser();
     
     if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
       response.setStatus(Nfs3Status.NFS3ERR_ACCES);
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(userName);
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -628,40 +622,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
     }
     
-    // In case there is buffered data for the same file, flush it. This can be
-    // optimized later by reading from the cache.
-    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
-    if (ret != Nfs3Status.NFS3_OK) {
-      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
-          + ". Read may not get most recent data.");
-    }
-
     try {
       int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
       byte[] readbuffer = new byte[buffSize];
 
-      int readCount = 0;
-      /**
-       * Retry exactly once because the DFSInputStream can be stale.
-       */
-      for (int i = 0; i < 1; ++i) {
-        FSDataInputStream fis = clientCache.getDfsInputStream(userName,
-            Nfs3Utils.getFileIdPath(handle));
-
-        try {
-          readCount = fis.read(offset, readbuffer, 0, count);
-        } catch (IOException e) {
-          // TODO: A cleaner way is to throw a new type of exception
-          // which requires incompatible changes.
-          if (e.getMessage() == "Stream closed") {
-            clientCache.invalidateDfsInputStream(userName,
-                Nfs3Utils.getFileIdPath(handle));
-            continue;
-          } else {
-            throw e;
-          }
-        }
-      }
+      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
+      FSDataInputStream fis = new FSDataInputStream(is);
+      
+      int readCount = fis.read(offset, readbuffer, 0, count);
+      fis.close();
 
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
@@ -689,7 +658,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       SecurityHandler securityHandler, InetAddress client) {
     WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK);
 
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -764,7 +733,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public CREATE3Response create(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -788,8 +757,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     int createMode = request.getMode();
     if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE)
-        && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
-        && request.getObjAttr().getSize() != 0) {
+        && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
       LOG.error("Setting file size is not supported when creating file: "
           + fileName + " dir fileId:" + dirHandle.getFileId());
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -842,23 +810,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
           dfsClient, dirFileIdPath, iug);
-      
-      // Add open stream
-      OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr,
-          writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
-      fileHandle = new FileHandle(postOpObjAttr.getFileId());
-      if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
-        LOG.warn("Can't add more stream, close it."
-            + " Future write will become append");
-        fos.close();
-        fos = null;
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Opened stream for file:" + fileName + ", fileId:"
-              + fileHandle.getFileId());
-        }
-      }
-      
     } catch (IOException e) {
       LOG.error("Exception", e);
       if (fos != null) {
@@ -887,6 +838,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
     }
     
+    // Add open stream
+    OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir
+        + "/" + postOpObjAttr.getFileId());
+    fileHandle = new FileHandle(postOpObjAttr.getFileId());
+    writeManager.addOpenFileStream(fileHandle, openFileCtx);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("open stream for file:" + fileName + ", fileId:"
+          + fileHandle.getFileId());
+    }
+    
     return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr,
         dirWcc);
   }
@@ -895,7 +856,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -991,7 +952,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public REMOVE3Response remove(XDR xdr,
       SecurityHandler securityHandler, InetAddress client) {
     REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1066,7 +1027,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1148,7 +1109,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public RENAME3Response rename(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
     RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1242,7 +1203,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
 
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1297,29 +1258,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP);
   }
 
-  /**
-   * Used by readdir and readdirplus to get dirents. It retries the listing if
-   * the startAfter can't be found anymore.
-   */
-  private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
-      byte[] startAfter) throws IOException {
-    DirectoryListing dlisting = null;
-    try {
-      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
-    } catch (RemoteException e) {
-      IOException io = e.unwrapRemoteException();
-      if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
-        throw io;
-      }
-      // This happens when startAfter was just deleted
-      LOG.info("Cookie cound't be found: " + new String(startAfter)
-          + ", do listing from beginning");
-      dlisting = dfsClient
-          .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
-    }
-    return dlisting;
-  }
-  
   @Override
   public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
       InetAddress client) {
@@ -1330,7 +1268,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1360,7 +1298,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           + cookie + " count: " + count);
     }
 
-    HdfsFileStatus dirStatus = null;
+    HdfsFileStatus dirStatus;
     DirectoryListing dlisting = null;
     Nfs3FileAttributes postOpAttr = null;
     long dotdotFileId = 0;
@@ -1404,8 +1342,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
         startAfter = inodeIdPath.getBytes();
       }
-      
-      dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
+      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
+
       postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpAttr == null) {
         LOG.error("Can't get path for fileId:" + handle.getFileId());
@@ -1467,7 +1405,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
     }
@@ -1488,15 +1426,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
     long dirCount = request.getDirCount();
     if (dirCount <= 0) {
-      LOG.info("Nonpositive dircount in invalid READDIRPLUS request:" + dirCount);
-      return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
+      LOG.info("Nonpositive count in invalid READDIRPLUS request:" + dirCount);
+      return new READDIRPLUS3Response(Nfs3Status.NFS3_OK);
     }
     int maxCount = request.getMaxCount();
-    if (maxCount <= 0) {
-      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request:" + maxCount);
-      return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
-    }
-    
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
           + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount);
@@ -1546,8 +1480,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
         startAfter = inodeIdPath.getBytes();
       }
-      
-      dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
+      dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
+
       postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpDirAttr == null) {
         LOG.info("Can't get path for fileId:" + handle.getFileId());
@@ -1624,7 +1558,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1682,7 +1616,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1734,7 +1668,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return response;
     }
     
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1772,10 +1706,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   }
 
   @Override
-  public COMMIT3Response commit(XDR xdr, Channel channel, int xid,
-      SecurityHandler securityHandler, InetAddress client) {
+  public COMMIT3Response commit(XDR xdr, SecurityHandler securityHandler,
+      InetAddress client) {
     COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
-    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
+    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
       return response;
@@ -1814,10 +1748,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       long commitOffset = (request.getCount() == 0) ? 0
           : (request.getOffset() + request.getCount());
       
-      // Insert commit as an async request
-      writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid,
-          preOpAttr);
-      return null;
+      int status;
+      if (writeManager.handleCommit(handle, commitOffset)) {
+        status = Nfs3Status.NFS3_OK;
+      } else {
+        status = Nfs3Status.NFS3ERR_IO;
+      }
+      Nfs3FileAttributes postOpAttr = writeManager.getFileAttr(dfsClient,
+          handle, iug);
+      WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
+      return new COMMIT3Response(status, fileWcc,
+          Nfs3Constant.WRITE_COMMIT_VERF);
+
     } catch (IOException e) {
       LOG.warn("Exception ", e);
       Nfs3FileAttributes postOpAttr = null;
@@ -1950,7 +1892,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     } else if (nfsproc3 == NFSPROC3.PATHCONF) {
       response = pathconf(xdr, securityHandler, client);
     } else if (nfsproc3 == NFSPROC3.COMMIT) {
-      response = commit(xdr, channel, xid, securityHandler, client);
+      response = commit(xdr, securityHandler, client);
     } else {
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
@@ -1995,9 +1937,4 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
     return true;
   }
-  
-  @VisibleForTesting
-  WriteManager getWriteManager() {
-    return this.writeManager;
-  }
 }

+ 151 - 163
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -18,6 +18,9 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -25,24 +28,22 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.nfs.NfsFileType;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.IdUserGroup;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
-import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
 import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.hadoop.util.Daemon;
 import org.jboss.netty.channel.Channel;
 
-import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
 
 /**
  * Manage the writes and responds asynchronously.
@@ -52,70 +53,69 @@ public class WriteManager {
 
   private final Configuration config;
   private final IdUserGroup iug;
- 
+  private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
+      .newConcurrentMap();
+
   private AsyncDataService asyncDataService;
   private boolean asyncDataServiceStarted = false;
 
-  private final int maxStreams;
-
+  private final StreamMonitor streamMonitor;
+  
   /**
    * The time limit to wait for accumulate reordered sequential writes to the
    * same file before the write is considered done.
    */
   private long streamTimeout;
-
-  private final OpenFileCtxCache fileContextCache;
-
-  static public class MultipleCachedStreamException extends IOException {
-    private static final long serialVersionUID = 1L;
-
-    public MultipleCachedStreamException(String msg) {
-      super(msg);
+  
+  public static final long DEFAULT_STREAM_TIMEOUT = 10 * 60 * 1000; //10 minutes
+  public static final long MINIMIUM_STREAM_TIMEOUT = 10 * 1000; //10 seconds
+  
+  void addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
+    openFileMap.put(h, ctx);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("After add the new stream " + h.getFileId()
+          + ", the stream number:" + openFileMap.size());
     }
   }
 
-  boolean addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
-    return fileContextCache.put(h, ctx);
-  }
-  
   WriteManager(IdUserGroup iug, final Configuration config) {
     this.iug = iug;
     this.config = config;
-    streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
-        Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
+    
+    streamTimeout = config.getLong("dfs.nfs3.stream.timeout",
+        DEFAULT_STREAM_TIMEOUT);
     LOG.info("Stream timeout is " + streamTimeout + "ms.");
-    if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
+    if (streamTimeout < MINIMIUM_STREAM_TIMEOUT) {
       LOG.info("Reset stream timeout to minimum value "
-          + Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
-      streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
+          + MINIMIUM_STREAM_TIMEOUT + "ms.");
+      streamTimeout = MINIMIUM_STREAM_TIMEOUT;
     }
-    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
-        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
-    LOG.info("Maximum open streams is "+ maxStreams);
-    this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
+    
+    this.streamMonitor = new StreamMonitor();
   }
 
-  void startAsyncDataSerivce() {
-    if (asyncDataServiceStarted) {
-      return;
-    }
-    fileContextCache.start();
+  private void startAsyncDataSerivce() {
+    streamMonitor.start();
     this.asyncDataService = new AsyncDataService();
     asyncDataServiceStarted = true;
   }
 
-  void shutdownAsyncDataService() {
-    if (!asyncDataServiceStarted) {
-      return;
-    }
-    asyncDataServiceStarted = false;
+  private void shutdownAsyncDataService() {
     asyncDataService.shutdown();
-    fileContextCache.shutdown();
+    asyncDataServiceStarted = false;
+    streamMonitor.interrupt();
   }
 
   void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
       int xid, Nfs3FileAttributes preOpAttr) throws IOException {
+    // First write request starts the async data service
+    if (!asyncDataServiceStarted) {
+      startAsyncDataSerivce();
+    }
+
+    long offset = request.getOffset();
     int count = request.getCount();
+    WriteStableHow stableHow = request.getStableHow();
     byte[] data = request.getData().array();
     if (data.length < count) {
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -126,12 +126,13 @@ public class WriteManager {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("handleWrite " + request);
+      LOG.debug("handleWrite fileId: " + handle.getFileId() + " offset: "
+          + offset + " length:" + count + " stableHow:" + stableHow.getValue());
     }
 
     // Check if there is a stream to write
     FileHandle fileHandle = request.getHandle();
-    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
+    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
     if (openFileCtx == null) {
       LOG.info("No opened stream for fileId:" + fileHandle.getFileId());
 
@@ -146,15 +147,6 @@ public class WriteManager {
         fos = dfsClient.append(fileIdPath, bufferSize, null, null);
 
         latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
-      } catch (RemoteException e) {
-        IOException io = e.unwrapRemoteException();
-        if (io instanceof AlreadyBeingCreatedException) {
-          LOG.warn("Can't append file:" + fileIdPath
-              + ". Possibly the file is being closed. Drop the request:"
-              + request + ", wait for the client to retry...");
-          return;
-        }
-        throw e;
       } catch (IOException e) {
         LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
         if (fos != null) {
@@ -174,127 +166,81 @@ public class WriteManager {
       String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
           Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
       openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
-          + fileHandle.getFileId(), dfsClient, iug);
-
-      if (!addOpenFileStream(fileHandle, openFileCtx)) {
-        LOG.info("Can't add new stream. Close it. Tell client to retry.");
-        try {
-          fos.close();
-        } catch (IOException e) {
-          LOG.error("Can't close stream for fileId:" + handle.getFileId());
-        }
-        // Notify client to retry
-        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
-        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX,
-            fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel,
-            response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
-            xid);
-        return;
-      }
-
+          + fileHandle.getFileId());
+      addOpenFileStream(fileHandle, openFileCtx);
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
+        LOG.debug("opened stream for file:" + fileHandle.getFileId());
       }
     }
 
     // Add write into the async job queue
     openFileCtx.receivedNewWrite(dfsClient, request, channel, xid,
         asyncDataService, iug);
-    return;
-  }
-
-  // Do a possible commit before read request in case there is buffered data
-  // inside DFSClient which has been flushed but not synced.
-  int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
-      long commitOffset) {
-    int status;
-    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
-
-    if (openFileCtx == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
-            + " commitOffset=" + commitOffset
-            + ". Return success in this case.");
-      }
-      status = Nfs3Status.NFS3_OK;
-
-    } else {
-      COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
-          null, 0, null, true);
-      switch (ret) {
-      case COMMIT_FINISHED:
-      case COMMIT_INACTIVE_CTX:
-        status = Nfs3Status.NFS3_OK;
-        break;
-      case COMMIT_INACTIVE_WITH_PENDING_WRITE:
-      case COMMIT_ERROR:
-        status = Nfs3Status.NFS3ERR_IO;
-        break;
-      case COMMIT_WAIT:
-        /**
-         * This should happen rarely in some possible cases, such as read
-         * request arrives before DFSClient is able to quickly flush data to DN,
-         * or Prerequisite writes is not available. Won't wait since we don't
-         * want to block read.
-         */     
-        status = Nfs3Status.NFS3ERR_JUKEBOX;
-        break;
-      default:
-        LOG.error("Should not get commit return code:" + ret.name());
-        throw new RuntimeException("Should not get commit return code:"
-            + ret.name());
+    // Block stable write
+    if (request.getStableHow() != WriteStableHow.UNSTABLE) {
+      if (handleCommit(fileHandle, offset + count)) {
+        Nfs3FileAttributes postOpAttr = getFileAttr(dfsClient, handle, iug);
+        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
+            postOpAttr);
+        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
+            fileWcc, count, request.getStableHow(),
+            Nfs3Constant.WRITE_COMMIT_VERF);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
+      } else {
+        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
       }
     }
-    return status;
+
+    return;
   }
-  
-  void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
-      long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
-    int status;
-    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
 
+  boolean handleCommit(FileHandle fileHandle, long commitOffset) {
+    OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
     if (openFileCtx == null) {
       LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
-          + " commitOffset=" + commitOffset + ". Return success in this case.");
-      status = Nfs3Status.NFS3_OK;
+          + " commitOffset=" + commitOffset);
+      return true;
+    }
+    long timeout = 30 * 1000; // 30 seconds
+    long startCommit = System.currentTimeMillis();
+    while (true) {
+      int ret = openFileCtx.checkCommit(commitOffset);
+      if (ret == OpenFileCtx.COMMIT_FINISHED) {
+        // Committed
+        return true;
+      } else if (ret == OpenFileCtx.COMMIT_INACTIVE_CTX) {
+        LOG.info("Inactive stream, fileId=" + fileHandle.getFileId()
+            + " commitOffset=" + commitOffset);
+        return true;
+      } else if (ret == OpenFileCtx.COMMIT_INACTIVE_WITH_PENDING_WRITE) {
+        LOG.info("Inactive stream with pending writes, fileId="
+            + fileHandle.getFileId() + " commitOffset=" + commitOffset);
+        return false;
+      }
+      assert (ret == OpenFileCtx.COMMIT_WAIT || ret == OpenFileCtx.COMMIT_ERROR);
+      if (ret == OpenFileCtx.COMMIT_ERROR) {
+        return false;
+      }
       
-    } else {
-      COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
-          channel, xid, preOpAttr, false);
-      switch (ret) {
-      case COMMIT_FINISHED:
-      case COMMIT_INACTIVE_CTX:
-        status = Nfs3Status.NFS3_OK;
-        break;
-      case COMMIT_INACTIVE_WITH_PENDING_WRITE:
-      case COMMIT_ERROR:
-        status = Nfs3Status.NFS3ERR_IO;
-        break;
-      case COMMIT_WAIT:
-        // Do nothing. Commit is async now.
-        return;
-      default:
-        LOG.error("Should not get commit return code:" + ret.name());
-        throw new RuntimeException("Should not get commit return code:"
-            + ret.name());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Not committed yet, wait., fileId=" + fileHandle.getFileId()
+            + " commitOffset=" + commitOffset);
       }
-    }
-    
-    // Send out the response
-    Nfs3FileAttributes postOpAttr = null;
-    try {
-      String fileIdPath = Nfs3Utils.getFileIdPath(preOpAttr.getFileid());
-      postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
-    } catch (IOException e1) {
-      LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileid());
-    }
-    WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
-    COMMIT3Response response = new COMMIT3Response(status, fileWcc,
-        Nfs3Constant.WRITE_COMMIT_VERF);
-    Nfs3Utils.writeChannelCommit(channel,
-        response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
-        xid);
+      if (System.currentTimeMillis() - startCommit > timeout) {
+        // Commit took too long, return error
+        return false;
+      }
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException e) {
+        LOG.info("Commit is interrupted, fileId=" + fileHandle.getFileId()
+            + " commitOffset=" + commitOffset);
+        return false;
+      }
+    }// while
   }
 
   /**
@@ -305,7 +251,7 @@ public class WriteManager {
     String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
     Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
     if (attr != null) {
-      OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
+      OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
       if (openFileCtx != null) {
         attr.setSize(openFileCtx.getNextOffset());
         attr.setUsed(openFileCtx.getNextOffset());
@@ -320,8 +266,8 @@ public class WriteManager {
     Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
 
     if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
-      OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
-          .getFileId()));
+      OpenFileCtx openFileCtx = openFileMap
+          .get(new FileHandle(attr.getFileId()));
 
       if (openFileCtx != null) {
         attr.setSize(openFileCtx.getNextOffset());
@@ -330,9 +276,51 @@ public class WriteManager {
     }
     return attr;
   }
+  
+  /**
+   * StreamMonitor wakes up periodically to find and closes idle streams.
+   */
+  class StreamMonitor extends Daemon {
+    private int rotation = 5 * 1000; // 5 seconds
+    private long lastWakeupTime = 0;
 
-  @VisibleForTesting
-  OpenFileCtxCache getOpenFileCtxCache() {
-    return this.fileContextCache;
+    @Override
+    public void run() {
+      while (true) {
+        Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
+            .iterator();
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("openFileMap size:" + openFileMap.size());
+        }
+        while (it.hasNext()) {
+          Entry<FileHandle, OpenFileCtx> pairs = it.next();
+          OpenFileCtx ctx = pairs.getValue();
+          if (ctx.streamCleanup((pairs.getKey()).getFileId(), streamTimeout)) {
+            it.remove();
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("After remove stream " + pairs.getKey().getFileId()
+                  + ", the stream number:" + openFileMap.size());
+            }
+          }
+        }
+
+        // Check if it can sleep
+        try {
+          long workedTime = System.currentTimeMillis() - lastWakeupTime;
+          if (workedTime < rotation) {
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("StreamMonitor can still have a sleep:"
+                  + ((rotation - workedTime) / 1000));
+            }
+            Thread.sleep(rotation - workedTime);
+          }
+          lastWakeupTime = System.currentTimeMillis();
+
+        } catch (InterruptedException e) {
+          LOG.info("StreamMonitor got interrupted");
+          return;
+        }
+      }
+    }
   }
 }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.nfs;
 
 import java.io.IOException;
 import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -43,15 +45,13 @@ public class TestMountd {
         .build();
     cluster.waitActive();
     
-    // Use emphral port in case tests are running in parallel
-    config.setInt("nfs3.mountd.port", 0);
-    config.setInt("nfs3.server.port", 0);
-    
     // Start nfs
-    Nfs3 nfs3 = new Nfs3(config);
-    nfs3.startServiceInternal(false);
+    List<String> exports = new ArrayList<String>();
+    exports.add("/");
+    Nfs3 nfs3 = new Nfs3(exports, config);
+    nfs3.start(false);
 
-    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
+    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
         .getRpcProgram();
     mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
     

+ 1 - 5
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java

@@ -23,7 +23,6 @@ import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
@@ -136,7 +135,6 @@ public class TestOutOfOrderWrite {
     @Override
     protected ChannelPipelineFactory setPipelineFactory() {
       this.pipelineFactory = new ChannelPipelineFactory() {
-        @Override
         public ChannelPipeline getPipeline() {
           return Channels.pipeline(
               RpcUtil.constructRpcFrameDecoder(),
@@ -155,9 +153,7 @@ public class TestOutOfOrderWrite {
     Arrays.fill(data3, (byte) 9);
 
     // NFS3 Create request
-    Configuration conf = new Configuration();
-    WriteClient client = new WriteClient("localhost", conf.getInt(
-        Nfs3Constant.NFS3_SERVER_PORT, Nfs3Constant.NFS3_SERVER_PORT_DEFAULT),
+    WriteClient client = new WriteClient("localhost", Nfs3Constant.PORT,
         create(), false);
     client.run();
 

+ 141 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java

@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.nfs;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.oncrpc.RegistrationClient;
+import org.apache.hadoop.oncrpc.RpcCall;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.CredentialsNone;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.hadoop.portmap.PortmapMapping;
+import org.apache.hadoop.portmap.PortmapRequest;
+
+public class TestPortmapRegister {
+  
+  public static final Log LOG = LogFactory.getLog(TestPortmapRegister.class);
+  
+  static void testRequest(XDR request, XDR request2) {
+    RegistrationClient registrationClient = new RegistrationClient(
+        "localhost", Nfs3Constant.SUN_RPCBIND, request);
+    registrationClient.run();
+  }
+ 
+  public static void main(String[] args) throws InterruptedException {
+    PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
+        RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
+        RpcProgramMountd.PORT);
+    XDR mappingRequest = PortmapRequest.create(mapEntry);
+    RegistrationClient registrationClient = new RegistrationClient(
+        "localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
+    registrationClient.run();
+        
+    Thread t1 = new Runtest1();
+    //Thread t2 = testa.new Runtest2();
+    t1.start();
+    //t2.start();
+    t1.join();
+    //t2.join();
+    //testDump();
+  }
+  
+  static class Runtest1 extends Thread {
+    @Override
+    public void run() {
+      //testGetportMount();
+      PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
+          RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
+          RpcProgramMountd.PORT);
+      XDR req = PortmapRequest.create(mapEntry);
+      testRequest(req, req);
+    }
+  }
+  
+  static class Runtest2 extends Thread {
+    @Override
+    public void run() {
+      testDump();
+    }
+  }
+  
+  static void createPortmapXDRheader(XDR xdr_out, int procedure) {
+    // TODO: Move this to RpcRequest
+    RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
+        new VerifierNone()).write(xdr_out);
+    
+    /*
+    xdr_out.putInt(1); //unix auth
+    xdr_out.putVariableOpaque(new byte[20]);
+    xdr_out.putInt(0);
+    xdr_out.putInt(0);
+*/
+  }
+ 
+  static void testGetportMount() {
+    XDR xdr_out = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+
+    xdr_out.writeInt(100005);
+    xdr_out.writeInt(1);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100005);
+    request2.writeInt(1);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testGetport() {
+    XDR xdr_out = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+
+    xdr_out.writeInt(100003);
+    xdr_out.writeInt(3);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100003);
+    request2.writeInt(3);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testDump() {
+    XDR xdr_out = new XDR();
+    createPortmapXDRheader(xdr_out, 4);
+    testRequest(xdr_out, xdr_out);
+  }
+}

+ 0 - 196
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java

@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.nfs;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3;
-import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
-import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3;
-import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
-import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.EntryPlus3;
-import org.apache.hadoop.oncrpc.XDR;
-import org.apache.hadoop.oncrpc.security.SecurityHandler;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test READDIR and READDIRPLUS request with zero, nonzero cookies
- */
-public class TestReaddir {
-
-  static Configuration config = new Configuration();
-  static MiniDFSCluster cluster = null;
-  static DistributedFileSystem hdfs;
-  static NameNode nn;
-  static RpcProgramNfs3 nfsd;
-  static String testdir = "/tmp";
-  static SecurityHandler securityHandler;
-  
-  @BeforeClass
-  public static void setup() throws Exception {
-    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
-    cluster.waitActive();
-    hdfs = cluster.getFileSystem();
-    nn = cluster.getNameNode();
-
-    // Use emphral port in case tests are running in parallel
-    config.setInt("nfs3.mountd.port", 0);
-    config.setInt("nfs3.server.port", 0);
-    
-    // Start nfs
-    Nfs3 nfs3 = new Nfs3(config);
-    nfs3.startServiceInternal(false);
-
-    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
-
-    securityHandler = Mockito.mock(SecurityHandler.class);
-    Mockito.when(securityHandler.getUser()).thenReturn(
-        System.getProperty("user.name"));
-  }
-
-  @AfterClass
-  public static void shutdown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Before
-  public void createFiles() throws IllegalArgumentException, IOException {
-    hdfs.delete(new Path(testdir), true);
-    hdfs.mkdirs(new Path(testdir));
-    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
-    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f2"), 0, (short) 1, 0);
-    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f3"), 0, (short) 1, 0);
-  }
-  
-  @Test
-  public void testReaddirBasic() throws IOException {
-    // Get inodeId of /tmp
-    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
-    long dirId = status.getFileId();
-
-    // Create related part of the XDR request
-    XDR xdr_req = new XDR();
-    FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(0); // cookie
-    xdr_req.writeLongAsHyper(0); // verifier
-    xdr_req.writeInt(100); // count
-
-    READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(),
-        securityHandler, InetAddress.getLocalHost());
-    List<Entry3> dirents = response.getDirList().getEntries();
-    assertTrue(dirents.size() == 5); // inculding dot, dotdot
-
-    // Test start listing from f2
-    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
-    long f2Id = status.getFileId();
-
-    // Create related part of the XDR request
-    xdr_req = new XDR();
-    handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(f2Id); // cookie
-    xdr_req.writeLongAsHyper(0); // verifier
-    xdr_req.writeInt(100); // count
-
-    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
-        InetAddress.getLocalHost());
-    dirents = response.getDirList().getEntries();
-    assertTrue(dirents.size() == 1);
-    Entry3 entry = dirents.get(0);
-    assertTrue(entry.getName().equals("f3"));
-
-    // When the cookie is deleted, list starts over no including dot, dotdot
-    hdfs.delete(new Path(testdir + "/f2"), false);
-
-    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
-        InetAddress.getLocalHost());
-    dirents = response.getDirList().getEntries();
-    assertTrue(dirents.size() == 2); // No dot, dotdot
-  }
-  
-  @Test
-  // Test readdirplus
-  public void testReaddirPlus() throws IOException {
-    // Get inodeId of /tmp
-    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
-    long dirId = status.getFileId();
-    
-    // Create related part of the XDR request
-    XDR xdr_req = new XDR();
-    FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(0); // cookie
-    xdr_req.writeLongAsHyper(0); // verifier
-    xdr_req.writeInt(100); // dirCount
-    xdr_req.writeInt(1000); // maxCount
-
-    READDIRPLUS3Response responsePlus = nfsd.readdirplus(
-        xdr_req.asReadOnlyWrap(), securityHandler, InetAddress.getLocalHost());
-    List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
-    assertTrue(direntPlus.size() == 5); // including dot, dotdot
-
-    // Test start listing from f2
-    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
-    long f2Id = status.getFileId();
-
-    // Create related part of the XDR request
-    xdr_req = new XDR();
-    handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(f2Id); // cookie
-    xdr_req.writeLongAsHyper(0); // verifier
-    xdr_req.writeInt(100); // dirCount
-    xdr_req.writeInt(1000); // maxCount
-
-    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
-        InetAddress.getLocalHost());
-    direntPlus = responsePlus.getDirListPlus().getEntries();
-    assertTrue(direntPlus.size() == 1);
-    EntryPlus3 entryPlus = direntPlus.get(0);
-    assertTrue(entryPlus.getName().equals("f3"));
-
-    // When the cookie is deleted, list starts over no including dot, dotdot
-    hdfs.delete(new Path(testdir + "/f2"), false);
-
-    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
-        InetAddress.getLocalHost());
-    direntPlus = responsePlus.getDirListPlus().getEntries();
-    assertTrue(direntPlus.size() == 2); // No dot, dotdot
-  }
-}

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java

@@ -39,12 +39,12 @@ public class TestDFSClientCache {
 
     DFSClientCache cache = new DFSClientCache(conf, MAX_CACHE_SIZE);
 
-    DFSClient c1 = cache.getDfsClient("test1");
-    assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1"));
-    assertEquals(c1, cache.getDfsClient("test1"));
+    DFSClient c1 = cache.get("test1");
+    assertTrue(cache.get("test1").toString().contains("ugi=test1"));
+    assertEquals(c1, cache.get("test1"));
     assertFalse(isDfsClientClose(c1));
 
-    cache.getDfsClient("test2");
+    cache.get("test2");
     assertTrue(isDfsClientClose(c1));
     assertEquals(MAX_CACHE_SIZE - 1, cache.clientCache.size());
   }

+ 0 - 65
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.nfs.nfs3;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.nfs.mount.Mountd;
-import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.junit.Test;
-
-public class TestExportsTable {
- 
-  @Test
-  public void testExportPoint() throws IOException {
-    Configuration config = new Configuration();
-    MiniDFSCluster cluster = null;
-
-    String exportPoint = "/myexport1";
-    config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
-    // Use emphral port in case tests are running in parallel
-    config.setInt("nfs3.mountd.port", 0);
-    config.setInt("nfs3.server.port", 0);
-    
-    try {
-      cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
-      cluster.waitActive();
-
-      // Start nfs
-      final Nfs3 nfsServer = new Nfs3(config);
-      nfsServer.startServiceInternal(false);
-
-      Mountd mountd = nfsServer.getMountd();
-      RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
-      assertTrue(rpcMount.getExports().size() == 1);
-
-      String exportInMountd = rpcMount.getExports().get(0);
-      assertTrue(exportInMountd.equals(exportPoint));
-
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-}

+ 0 - 141
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java

@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.nfs.nfs3;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
-import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.IdUserGroup;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-public class TestOpenFileCtxCache {
-  static boolean cleaned = false;
-
-  @Test
-  public void testEviction() throws IOException, InterruptedException {
-    Configuration conf = new Configuration();
-
-    // Only two entries will be in the cache
-    conf.setInt(Nfs3Constant.MAX_OPEN_FILES, 2);
-
-    DFSClient dfsClient = Mockito.mock(DFSClient.class);
-    Nfs3FileAttributes attr = new Nfs3FileAttributes();
-    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
-    Mockito.when(fos.getPos()).thenReturn((long) 0);
-
-    OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-
-    OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
-
-    boolean ret = cache.put(new FileHandle(1), context1);
-    assertTrue(ret);
-    Thread.sleep(1000);
-    ret = cache.put(new FileHandle(2), context2);
-    assertTrue(ret);
-    ret = cache.put(new FileHandle(3), context3);
-    assertFalse(ret);
-    assertTrue(cache.size() == 2);
-
-    // Wait for the oldest stream to be evict-able, insert again
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
-    assertTrue(cache.size() == 2);
-
-    ret = cache.put(new FileHandle(3), context3);
-    assertTrue(ret);
-    assertTrue(cache.size() == 2);
-    assertTrue(cache.get(new FileHandle(1)) == null);
-
-    // Test inactive entry is evicted immediately
-    context3.setActiveStatusForTest(false);
-    ret = cache.put(new FileHandle(4), context4);
-    assertTrue(ret);
-
-    // Now the cache has context2 and context4
-    // Test eviction failure if all entries have pending work.
-    context2.getPendingWritesForTest().put(new OffsetRange(0, 100),
-        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
-    context4.getPendingCommitsForTest().put(new Long(100),
-        new CommitCtx(0, null, 0, attr));
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
-    ret = cache.put(new FileHandle(5), context5);
-    assertFalse(ret);
-  }
-
-  @Test
-  public void testScan() throws IOException, InterruptedException {
-    Configuration conf = new Configuration();
-
-    // Only two entries will be in the cache
-    conf.setInt(Nfs3Constant.MAX_OPEN_FILES, 2);
-
-    DFSClient dfsClient = Mockito.mock(DFSClient.class);
-    Nfs3FileAttributes attr = new Nfs3FileAttributes();
-    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
-    Mockito.when(fos.getPos()).thenReturn((long) 0);
-
-    OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-    OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
-
-    OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
-
-    // Test cleaning expired entry
-    boolean ret = cache.put(new FileHandle(1), context1);
-    assertTrue(ret);
-    ret = cache.put(new FileHandle(2), context2);
-    assertTrue(ret);
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + 1);
-    cache.scan(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
-    assertTrue(cache.size() == 0);
-
-    // Test cleaning inactive entry
-    ret = cache.put(new FileHandle(3), context3);
-    assertTrue(ret);
-    ret = cache.put(new FileHandle(4), context4);
-    assertTrue(ret);
-    context3.setActiveStatusForTest(false);
-    cache.scan(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
-    assertTrue(cache.size() == 1);
-    assertTrue(cache.get(new FileHandle(3)) == null);
-    assertTrue(cache.get(new FileHandle(4)) != null);
-  }
-}

+ 2 - 277
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

@@ -17,43 +17,15 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
-import java.net.InetAddress;
 import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.concurrent.ConcurrentNavigableMap;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
-import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import junit.framework.Assert;
+
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.IdUserGroup;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
-import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
-import org.apache.hadoop.nfs.nfs3.Nfs3Status;
-import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
-import org.apache.hadoop.nfs.nfs3.request.READ3Request;
-import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
-import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
-import org.apache.hadoop.nfs.nfs3.response.READ3Response;
-import org.apache.hadoop.oncrpc.XDR;
-import org.apache.hadoop.oncrpc.security.SecurityHandler;
-import org.jboss.netty.channel.Channel;
-import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 
 public class TestWrites {
   @Test
@@ -125,251 +97,4 @@ public class TestWrites {
     Assert.assertTrue(limit - position == 1);
     Assert.assertTrue(appendedData.get(position) == (byte) 19);
   }
-
-  @Test
-  // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
-  // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
-  // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
-  public void testCheckCommit() throws IOException {
-    DFSClient dfsClient = Mockito.mock(DFSClient.class);
-    Nfs3FileAttributes attr = new Nfs3FileAttributes();
-    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
-    Mockito.when(fos.getPos()).thenReturn((long) 0);
-
-    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
-        new IdUserGroup());
-
-    COMMIT_STATUS ret;
-
-    // Test inactive open file context
-    ctx.setActiveStatusForTest(false);
-    Channel ch = Mockito.mock(Channel.class);
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
-
-    ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
-        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
-
-    // Test request with non zero commit offset
-    ctx.setActiveStatusForTest(true);
-    Mockito.when(fos.getPos()).thenReturn((long) 10);
-    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
-    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
-    // Do_SYNC state will be updated to FINISHED after data sync
-    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
-    
-    status = ctx.checkCommitInternal(10, ch, 1, attr, false);
-    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
-    ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
-
-    ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
-        .getPendingCommitsForTest();
-    Assert.assertTrue(commits.size() == 0);
-    ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
-    Assert.assertTrue(commits.size() == 1);
-    long key = commits.firstKey();
-    Assert.assertTrue(key == 11);
-
-    // Test request with zero commit offset
-    commits.remove(new Long(11));
-    // There is one pending write [5,10]
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
-    Assert.assertTrue(commits.size() == 1);
-    key = commits.firstKey();
-    Assert.assertTrue(key == 9);
-
-    // Empty pending writes
-    ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
-    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
-  }
-
-  @Test
-  // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
-  // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
-  // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
-  public void testCheckCommitFromRead() throws IOException {
-    DFSClient dfsClient = Mockito.mock(DFSClient.class);
-    Nfs3FileAttributes attr = new Nfs3FileAttributes();
-    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
-    Mockito.when(fos.getPos()).thenReturn((long) 0);
-
-    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
-        new IdUserGroup());
-
-    FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
-    COMMIT_STATUS ret;
-    WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
-    assertTrue(wm.addOpenFileStream(h, ctx));
-    
-    // Test inactive open file context
-    ctx.setActiveStatusForTest(false);
-    Channel ch = Mockito.mock(Channel.class);
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
-    assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
-    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
-    
-    ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
-        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
-    assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
-    
-    // Test request with non zero commit offset
-    ctx.setActiveStatusForTest(true);
-    Mockito.when(fos.getPos()).thenReturn((long) 10);
-    COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
-    assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
-    // Do_SYNC state will be updated to FINISHED after data sync
-    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
-    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
- 
-    status = ctx.checkCommitInternal(10, ch, 1, attr, true);
-    assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
-    ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
-    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
-
-    ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
-        .getPendingCommitsForTest();
-    assertTrue(commits.size() == 0);
-    ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
-    assertEquals(0, commits.size()); // commit triggered by read doesn't wait
-    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11));
-
-    // Test request with zero commit offset
-    // There is one pending write [5,10]
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
-    assertEquals(0, commits.size());
-    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
-
-    // Empty pending writes
-    ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
-    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
-    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
-    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
-  }
-  
-  private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
-      throws InterruptedException {
-    int waitedTime = 0;
-    OpenFileCtx ctx = nfsd.getWriteManager()
-        .getOpenFileCtxCache().get(handle);
-    assertTrue(ctx != null);
-    do {
-      Thread.sleep(3000);
-      waitedTime += 3000;
-      if (ctx.getPendingWritesForTest().size() == 0) {
-        return;
-      }
-    } while (waitedTime < maxWaitTime);
-
-    fail("Write can't finish.");
-  }
-
-  @Test
-  public void testWriteStableHow() throws IOException, InterruptedException {
-    HdfsConfiguration config = new HdfsConfiguration();
-    DFSClient client = null;
-    MiniDFSCluster cluster = null;
-    RpcProgramNfs3 nfsd;
-    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
-    Mockito.when(securityHandler.getUser()).thenReturn(
-        System.getProperty("user.name"));
-
-    try {
-      cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
-      cluster.waitActive();
-      client = new DFSClient(NameNode.getAddress(config), config);
-
-      // Use emphral port in case tests are running in parallel
-      config.setInt("nfs3.mountd.port", 0);
-      config.setInt("nfs3.server.port", 0);
-      
-      // Start nfs
-      Nfs3 nfs3 = new Nfs3(config);
-      nfs3.startServiceInternal(false);
-      nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
-
-      HdfsFileStatus status = client.getFileInfo("/");
-      FileHandle rootHandle = new FileHandle(status.getFileId());
-      // Create file1
-      CREATE3Request createReq = new CREATE3Request(rootHandle, "file1",
-          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
-      XDR createXdr = new XDR();
-      createReq.serialize(createXdr);
-      CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
-          securityHandler, InetAddress.getLocalHost());
-      FileHandle handle = createRsp.getObjHandle();
-
-      // Test DATA_SYNC
-      byte[] buffer = new byte[10];
-      for (int i = 0; i < 10; i++) {
-        buffer[i] = (byte) i;
-      }
-      WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
-          WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
-      XDR writeXdr = new XDR();
-      writeReq.serialize(writeXdr);
-      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
-          InetAddress.getLocalHost());
-
-      waitWrite(nfsd, handle, 60000);
-
-      // Readback
-      READ3Request readReq = new READ3Request(handle, 0, 10);
-      XDR readXdr = new XDR();
-      readReq.serialize(readXdr);
-      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
-          securityHandler, InetAddress.getLocalHost());
-
-      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
-
-      // Test FILE_SYNC
-
-      // Create file2
-      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
-          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
-      XDR createXdr2 = new XDR();
-      createReq2.serialize(createXdr2);
-      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
-          securityHandler, InetAddress.getLocalHost());
-      FileHandle handle2 = createRsp2.getObjHandle();
-
-      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
-          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
-      XDR writeXdr2 = new XDR();
-      writeReq2.serialize(writeXdr2);
-      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
-          InetAddress.getLocalHost());
-
-      waitWrite(nfsd, handle2, 60000);
-
-      // Readback
-      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
-      XDR readXdr2 = new XDR();
-      readReq2.serialize(readXdr2);
-      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
-          securityHandler, InetAddress.getLocalHost());
-
-      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
-      // FILE_SYNC should sync the file size
-      status = client.getFileInfo("/file2");
-      assertTrue(status.getLen() == 10);
-
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
 }

+ 0 - 128
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -1,133 +1,5 @@
-
 Hadoop HDFS Change Log
 
-Release 2.2.1 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HDFS-5360. Improvement of usage message of renameSnapshot and
-    deleteSnapshot. (Shinichi Yamashita via wang)
-
-    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
-    (Vinayakumar B via umamahesh)
-
-    HDFS-4657.  Limit the number of blocks logged by the NN after a block
-    report to a configurable value.  (Aaron Twinning Meyers via Colin Patrick
-    McCabe)
-
-    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
-
-    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
-
-    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
-    brandonli)
-
-    HDFS-5291. Standby namenode after transition to active goes into safemode.
-    (jing9)
-
-    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
-    (Haohui Mai via brandonli)
-
-    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
-    brandonli)
-
-    HDFS-5281. COMMIT request should not block. (brandonli)
-
-    HDFS-5337. should do hsync for a commit request even there is no pending
-    writes (brandonli)
-
-    HDFS-5335. Hive query failed with possible race in dfs output stream.
-    (Haohui Mai via suresh)
-
-    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
-    clusters. (jing9)
-
-    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
-    token. (brandonli)
-
-    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
-
-    HDFS-5370. Typo in Error Message: different between range in condition
-    and range in error message. (Kousuke Saruta via suresh)
-
-    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
-    
-    HDFS-5347. Add HDFS NFS user guide. (brandonli)
-
-    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
-    post HDFS-5306. (atm)
-
-    HDFS-5171. NFS should create input stream for a file and try to share it
-    with multiple read requests. (Haohui Mai via brandonli)
-
-    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
-    (cnauroth)
-
-    HDFS-5433. When reloading fsimage during checkpointing, we should clear
-    existing snapshottable directories. (Aaron T. Myers via wang)
-
-    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
-    address resolves to host name localhost. (cnauroth)
-
-    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
-
-    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
-    cache expires too quickly  (Chris Nauroth via Sanjay)
-
-    HDFS-5037. Active NN should trigger its own edit log rolls. (wang)
-
-    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
-    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
-
-    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
-    create a counter for a step that doesn't already exist.  (cnauroth)
-
-    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
-    in getDataDirsFromURIs. (Mike Mellenthin via wang)
-
-    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
-
-    HDFS-5364. Add OpenFileCtx cache. (brandonli)
-
-    HDFS-5469. Add configuration property for the sub-directroy export path
-    (brandonli)
-
-    HADOOP-10109.  Fix test failure in TestOfflineEditsViewer introduced by
-    HADOOP-10052 (cmccabe)
-
-    HDFS-5519. COMMIT handler should update the commit status after sync
-    (brandonli)
-
-    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current 
-    thread holds the write lock (Vinaykumar B via umamahesh)
-
-    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
-    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
-
-    HDFS-5014. Process register commands with out holding BPOfferService lock. 
-    (Vinaykumar B via umamahesh)
-
-    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
-
-    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
-
-    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
-
-    HDFS-5577. NFS user guide update (brandonli)
-
-    HDFS-5563. NFS gateway should commit the buffered data when read request comes
-    after write to the same file (brandonli)
-
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -20,12 +20,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

+ 1 - 10
hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt

@@ -62,11 +62,6 @@ endfunction()
 INCLUDE(CheckCSourceCompiles)
 CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
 
-# Check if we need to link dl library to get dlopen.
-# dlopen on Linux is in separate library but on FreeBSD its in libc
-INCLUDE(CheckLibraryExists)
-CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
-
 find_package(JNI REQUIRED)
 if (NOT GENERATED_JAVAH)
     # Must identify where the generated headers have been placed
@@ -94,13 +89,9 @@ add_dual_library(hdfs
     main/native/libhdfs/jni_helper.c
     main/native/libhdfs/hdfs.c
 )
-if (NEED_LINK_DL)
-   set(LIB_DL dl)
-endif(NEED_LINK_DL)
-
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}
-    ${LIB_DL}
+    dl
     pthread
 )
 dual_output_directory(hdfs target/usr/local/lib)

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -20,13 +20,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.1-SNAPSHOT</version>
+    <version>2.2.0</version>
     <relativePath>../../../../../hadoop-project</relativePath>
   </parent>
 
   <groupId>org.apache.hadoop.contrib</groupId>
   <artifactId>hadoop-hdfs-bkjournal</artifactId>
-  <version>2.2.1-SNAPSHOT</version>
+  <version>2.2.0</version>
   <description>Apache Hadoop HDFS BookKeeper Journal</description>
   <name>Apache Hadoop HDFS BookKeeper Journal</name>
   <packaging>jar</packaging>

+ 1 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd

@@ -47,17 +47,7 @@ if "%1" == "--config" (
       goto print_usage
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
-  for %%i in ( %hdfscommands% ) do (
-    if %hdfs-command% == %%i set hdfscommand=true
-  )
-  if defined hdfscommand (
-    call :%hdfs-command%
-  ) else (
-    set CLASSPATH=%CLASSPATH%;%CD%
-    set CLASS=%hdfs-command%
-  )
-
+  call :%hdfs-command% %hdfs-command-arguments%
   set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments%
   call %JAVA% %java_arguments%
 
@@ -68,11 +58,6 @@ goto :eof
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS%
   goto :eof
 
-:journalnode
-  set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode
-  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS%
-  goto :eof
-
 :zkfc
   set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS%
@@ -168,11 +153,9 @@ goto :eof
   @echo   namenode -format     format the DFS filesystem
   @echo   secondarynamenode    run the DFS secondary namenode
   @echo   namenode             run the DFS namenode
-  @echo   journalnode          run the DFS journalnode
   @echo   zkfc                 run the ZK Failover Controller daemon
   @echo   datanode             run a DFS datanode
   @echo   dfsadmin             run a DFS admin client
-  @echo   haadmin              run a DFS HA admin client
   @echo   fsck                 run a DFS filesystem checking utility
   @echo   balancer             run a cluster balancing utility
   @echo   jmxget               get JMX exported values from NameNode or DataNode.

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2286,11 +2286,6 @@ public class DFSClient implements java.io.Closeable {
       throw re.unwrapRemoteException(AccessControlException.class);
     }
   }
-
-  @VisibleForTesting
-  ExtendedBlock getPreviousBlock(String file) {
-    return filesBeingWritten.get(file).getBlock();
-  }
   
   /**
    * enable/disable restore failed storage.

Деякі файли не було показано, через те що забагато файлів було змінено