Pārlūkot izejas kodu

Branching for 0.23.2 release.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23.2@1293512 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 13 gadi atpakaļ
vecāks
revīzija
050106df67
100 mainītis faili ar 1778 papildinājumiem un 773 dzēšanām
  1. 2 2
      hadoop-assemblies/pom.xml
  2. 2 2
      hadoop-client/pom.xml
  3. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  4. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  5. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  6. 80 6
      hadoop-common-project/hadoop-common/CHANGES.txt
  7. 3 3
      hadoop-common-project/hadoop-common/pom.xml
  8. 30 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  9. 20 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  10. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  11. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  12. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  13. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
  14. 13 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  15. 2 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  16. 0 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
  17. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  18. 7 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  19. 41 22
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
  20. 50 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java
  21. 17 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
  22. 17 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
  23. 22 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  24. 10 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
  25. 1 1
      hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
  26. 7 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  27. 97 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
  28. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
  29. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
  30. 5 39
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
  31. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  32. 13 23
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
  33. 26 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java
  34. 171 34
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
  35. 69 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java
  36. 77 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java
  37. 2 2
      hadoop-common-project/pom.xml
  38. 2 2
      hadoop-dist/pom.xml
  39. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  40. 10 15
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml
  41. 84 5
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  42. 2 2
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  43. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  44. 55 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  45. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  46. 3 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  47. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  48. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
  49. 16 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  50. 71 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  51. 15 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
  52. 25 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  53. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
  54. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
  55. 41 87
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  56. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  57. 39 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
  58. 28 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
  59. 134 155
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  60. 67 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
  61. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
  62. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
  63. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
  64. 11 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  65. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
  66. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
  67. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
  68. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
  69. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  70. 39 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  71. 19 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  72. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  73. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  74. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  75. 14 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  77. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml
  78. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
  79. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
  80. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  81. 0 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
  82. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
  83. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
  84. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
  85. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
  86. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  87. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
  88. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
  89. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
  90. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
  91. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
  92. 47 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  93. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
  94. 56 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
  95. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  96. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
  97. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
  98. 68 48
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  99. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
  100. 9 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
   <description>Apache Hadoop Assemblies</description>
 
 

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
   <artifactId>hadoop-client</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Client</description>
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop Auth Examples</name>
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
   <artifactId>hadoop-auth</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop Auth</name>
   <name>Apache Hadoop Auth</name>

+ 80 - 6
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -1,5 +1,85 @@
 Hadoop Change Log
 Hadoop Change Log
 
 
+Release 0.23.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 0.23.2 - UNRELEASED
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-8048. Allow merging of Credentials (Daryn Sharp via tgraves)
+ 
+    HADOOP-8032. mvn site:stage-deploy should be able to use the scp protocol
+    to stage documents (Ravi Prakash via tgraves)
+
+    HADOOP-7923. Automate the updating of version numbers in the doc system.
+    (szetszwo)
+
+  OPTIMIZATIONS
+
+    HADOOP-8071. Avoid an extra packet in client code when nagling is
+    disabled. (todd)
+
+    HADOOP-6502. Improve the performance of Configuration.getClassByName when
+    the class is not found by caching negative results.
+    (sharad, todd via todd)
+
+  BUG FIXES
+
+    HADOOP-7660. Maven generated .classpath doesnot includes 
+    "target/generated-test-source/java" as source directory.
+    (Laxman via bobby)
+
+    HADOOP-8042  When copying a file out of HDFS, modifying it, and uploading
+    it back into HDFS, the put fails due to a CRC mismatch
+    (Daryn Sharp via bobby)
+
+    HADOOP-8035 Hadoop Maven site is inefficient and runs phases redundantly
+    (abayer via tucu)
+
+    HADOOP-8051 HttpFS documentation it is not wired to the generated site (tucu)
+
+    HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh)
+
+    HADOOP-8052. Hadoop Metrics2 should emit Float.MAX_VALUE (instead of 
+    Double.MAX_VALUE) to avoid making Ganglia's gmetad core. (Varun Kapoor
+    via mattf)
+
+    HADOOP-8074. Small bug in hadoop error message for unknown commands.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8082 add hadoop-client and hadoop-minicluster to the 
+    dependency-management section. (tucu)
+
+    HADOOP-8066 The full docs build intermittently fails (abayer via tucu)
+
+    HADOOP-8083 javadoc generation for some modules is not done under target/ (tucu)
+
+    HADOOP-8036. TestViewFsTrash assumes the user's home directory is
+    2 levels deep. (Colin Patrick McCabe via eli)
+
+    HADOOP-8046 Revert StaticMapping semantics to the existing ones, add DNS
+    mapping diagnostics in progress (stevel)
+
+    HADOOP-8057 hadoop-setup-conf.sh not working because of some extra spaces.
+    (Vinayakumar B via stevel)
+
+    HADOOP-7680 TestHardLink fails on Mac OS X, when gnu stat is in path.
+    (Milind Bhandarkar via stevel)
+
+    HADOOP-8050. Deadlock in metrics. (Kihwal Lee via mattf)
+
 Release 0.23.1 - 2012-02-17 
 Release 0.23.1 - 2012-02-17 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -223,12 +303,6 @@ Release 0.23.1 - 2012-02-17
    HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
    HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
    (Daryn Sharp via bobby)
    (Daryn Sharp via bobby)
 
 
-   HADOOP-8055. Hadoop tarball distribution lacks a core-site.xml (harsh)
-
-   HADOOP-8052. Hadoop Metrics2 should emit Float.MAX_VALUE 
-   (instead of Double.MAX_VALUE) to avoid making Ganglia's gmetad core.
-   (Varun Kapoor via Matt)
-
    HADOOP-8054 NPE with FilterFileSystem (Daryn Sharp via bobby)
    HADOOP-8054 NPE with FilterFileSystem (Daryn Sharp via bobby)
 
 
 Release 0.23.0 - 2011-11-01 
 Release 0.23.0 - 2011-11-01 

+ 3 - 3
hadoop-common-project/hadoop-common/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
   <artifactId>hadoop-common</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
@@ -454,7 +454,7 @@
           </execution>
           </execution>
           <execution>
           <execution>
             <id>add-test-source</id>
             <id>add-test-source</id>
-            <phase>generate-test-sources</phase>
+            <phase>generate-sources</phase>
             <goals>
             <goals>
               <goal>add-test-source</goal>
               <goal>add-test-source</goal>
             </goals>
             </goals>

+ 30 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1146,6 +1146,22 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @throws ClassNotFoundException if the class is not found.
    * @throws ClassNotFoundException if the class is not found.
    */
    */
   public Class<?> getClassByName(String name) throws ClassNotFoundException {
   public Class<?> getClassByName(String name) throws ClassNotFoundException {
+    Class<?> ret = getClassByNameOrNull(name);
+    if (ret == null) {
+      throw new ClassNotFoundException("Class " + name + " not found");
+    }
+    return ret;
+  }
+  
+  /**
+   * Load a class by name, returning null rather than throwing an exception
+   * if it couldn't be loaded. This is to avoid the overhead of creating
+   * an exception.
+   * 
+   * @param name the class name
+   * @return the class object, or null if it could not be found.
+   */
+  public Class<?> getClassByNameOrNull(String name) {
     Map<String, Class<?>> map;
     Map<String, Class<?>> map;
     
     
     synchronized (CACHE_CLASSES) {
     synchronized (CACHE_CLASSES) {
@@ -1157,12 +1173,20 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
       }
     }
     }
 
 
-    Class<?> clazz = map.get(name);
-    if (clazz == null) {
-      clazz = Class.forName(name, true, classLoader);
-      if (clazz != null) {
-        // two putters can race here, but they'll put the same class
-        map.put(name, clazz);
+    Class<?> clazz = null;
+    if (!map.containsKey(name)) {
+      try {
+        clazz = Class.forName(name, true, classLoader);
+      } catch (ClassNotFoundException e) {
+        map.put(name, null); //cache negative that class is not found
+        return null;
+      }
+      // two putters can race here, but they'll put the same class
+      map.put(name, clazz);
+    } else { // check already performed on this class name
+      clazz = map.get(name);
+      if (clazz == null) { // found the negative
+        return null;
       }
       }
     }
     }
 
 

+ 20 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -43,6 +43,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
   private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
   private int bytesPerChecksum = 512;
   private int bytesPerChecksum = 512;
   private boolean verifyChecksum = true;
   private boolean verifyChecksum = true;
+  private boolean writeChecksum = true;
 
 
   public static double getApproxChkSumLength(long size) {
   public static double getApproxChkSumLength(long size) {
     return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
     return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
@@ -67,6 +68,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     this.verifyChecksum = verifyChecksum;
     this.verifyChecksum = verifyChecksum;
   }
   }
 
 
+  @Override
+  public void setWriteChecksum(boolean writeChecksum) {
+    this.writeChecksum = writeChecksum;
+  }
+  
   /** get the raw file system */
   /** get the raw file system */
   public FileSystem getRawFileSystem() {
   public FileSystem getRawFileSystem() {
     return fs;
     return fs;
@@ -428,9 +434,20 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         throw new IOException("Mkdirs failed to create " + parent);
         throw new IOException("Mkdirs failed to create " + parent);
       }
       }
     }
     }
-    final FSDataOutputStream out = new FSDataOutputStream(
-        new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
-            blockSize, progress), null);
+    final FSDataOutputStream out;
+    if (writeChecksum) {
+      out = new FSDataOutputStream(
+          new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
+              blockSize, progress), null);
+    } else {
+      out = fs.create(f, permission, overwrite, bufferSize, replication,
+          blockSize, progress);
+      // remove the checksum file since we aren't writing one
+      Path checkFile = getChecksumFile(f);
+      if (fs.exists(checkFile)) {
+        fs.delete(checkFile, true);
+      }
+    }
     if (permission != null) {
     if (permission != null) {
       setPermission(f, permission);
       setPermission(f, permission);
     }
     }

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -1936,6 +1936,15 @@ public abstract class FileSystem extends Configured implements Closeable {
     //doesn't do anything
     //doesn't do anything
   }
   }
 
 
+  /**
+   * Set the write checksum flag. This is only applicable if the 
+   * corresponding FileSystem supports checksum. By default doesn't do anything.
+   * @param writeChecksum
+   */
+  public void setWriteChecksum(boolean writeChecksum) {
+    //doesn't do anything
+  }
+
   /**
   /**
    * Return a list of file status objects that corresponds to the list of paths
    * Return a list of file status objects that corresponds to the list of paths
    * excluding those non-existent paths.
    * excluding those non-existent paths.

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -365,6 +365,11 @@ public class FilterFileSystem extends FileSystem {
   public void setVerifyChecksum(boolean verifyChecksum) {
   public void setVerifyChecksum(boolean verifyChecksum) {
     fs.setVerifyChecksum(verifyChecksum);
     fs.setVerifyChecksum(verifyChecksum);
   }
   }
+  
+  @Override
+  public void setWriteChecksum(boolean writeChecksum) {
+    fs.setVerifyChecksum(writeChecksum);
+  }
 
 
   @Override
   @Override
   public Configuration getConf() {
   public Configuration getConf() {

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -269,7 +269,15 @@ public class FsShell extends Configured implements Tool {
   
   
   private void displayError(String cmd, String message) {
   private void displayError(String cmd, String message) {
     for (String line : message.split("\n")) {
     for (String line : message.split("\n")) {
-      System.err.println(cmd.substring(1) + ": " + line);
+      System.err.println(cmd + ": " + line);
+      if (cmd.charAt(0) != '-') {
+        Command instance = null;
+        instance = commandFactory.getInstance("-" + cmd);
+        if (instance != null) {
+          System.err.println("Did you mean -" + cmd + "?  This command " +
+              "begins with a dash.");
+        }
+      }
     }
     }
   }
   }
   
   

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java

@@ -64,7 +64,7 @@ public class HardLink {
       //override getLinkCountCommand for the particular Unix variant
       //override getLinkCountCommand for the particular Unix variant
       //Linux is already set as the default - {"stat","-c%h", null}
       //Linux is already set as the default - {"stat","-c%h", null}
       if (osType == OSType.OS_TYPE_MAC) {
       if (osType == OSType.OS_TYPE_MAC) {
-        String[] linkCountCmdTemplate = {"stat","-f%l", null};
+        String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
       } else if (osType == OSType.OS_TYPE_SOLARIS) {
       } else if (osType == OSType.OS_TYPE_SOLARIS) {
         String[] linkCountCmdTemplate = {"ls","-l", null};
         String[] linkCountCmdTemplate = {"ls","-l", null};

+ 13 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.io.IOUtils;
  */
  */
 abstract class CommandWithDestination extends FsCommand {  
 abstract class CommandWithDestination extends FsCommand {  
   protected PathData dst;
   protected PathData dst;
-  protected boolean overwrite = false;
+  private boolean overwrite = false;
+  private boolean verifyChecksum = true;
+  private boolean writeChecksum = true;
   
   
   /**
   /**
    * 
    * 
@@ -53,6 +55,14 @@ abstract class CommandWithDestination extends FsCommand {
     overwrite = flag;
     overwrite = flag;
   }
   }
   
   
+  protected void setVerifyChecksum(boolean flag) {
+    verifyChecksum = flag;
+  }
+  
+  protected void setWriteChecksum(boolean flag) {
+    writeChecksum = flag;
+  }
+  
   /**
   /**
    *  The last arg is expected to be a local path, if only one argument is
    *  The last arg is expected to be a local path, if only one argument is
    *  given then the destination will be the current directory 
    *  given then the destination will be the current directory 
@@ -201,6 +211,7 @@ abstract class CommandWithDestination extends FsCommand {
    * @throws IOException if copy fails
    * @throws IOException if copy fails
    */ 
    */ 
   protected void copyFileToTarget(PathData src, PathData target) throws IOException {
   protected void copyFileToTarget(PathData src, PathData target) throws IOException {
+    src.fs.setVerifyChecksum(verifyChecksum);
     copyStreamToTarget(src.fs.open(src.path), target);
     copyStreamToTarget(src.fs.open(src.path), target);
   }
   }
   
   
@@ -217,6 +228,7 @@ abstract class CommandWithDestination extends FsCommand {
     if (target.exists && (target.stat.isDirectory() || !overwrite)) {
     if (target.exists && (target.stat.isDirectory() || !overwrite)) {
       throw new PathExistsException(target.toString());
       throw new PathExistsException(target.toString());
     }
     }
+    target.fs.setWriteChecksum(writeChecksum);
     PathData tempFile = null;
     PathData tempFile = null;
     try {
     try {
       tempFile = target.createTempFile(target+"._COPYING_");
       tempFile = target.createTempFile(target+"._COPYING_");

+ 2 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -25,7 +25,6 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 
 
 /** Various commands for copy files */
 /** Various commands for copy files */
@@ -103,43 +102,17 @@ class CopyCommands {
       "to the local name.  <src> is kept.  When copying multiple,\n" +
       "to the local name.  <src> is kept.  When copying multiple,\n" +
       "files, the destination must be a directory.";
       "files, the destination must be a directory.";
 
 
-    /**
-     * The prefix for the tmp file used in copyToLocal.
-     * It must be at least three characters long, required by
-     * {@link java.io.File#createTempFile(String, String, File)}.
-     */
-    private boolean copyCrc;
-    private boolean verifyChecksum;
-
     @Override
     @Override
     protected void processOptions(LinkedList<String> args)
     protected void processOptions(LinkedList<String> args)
     throws IOException {
     throws IOException {
       CommandFormat cf = new CommandFormat(
       CommandFormat cf = new CommandFormat(
           1, Integer.MAX_VALUE, "crc", "ignoreCrc");
           1, Integer.MAX_VALUE, "crc", "ignoreCrc");
       cf.parse(args);
       cf.parse(args);
-      copyCrc = cf.getOpt("crc");
-      verifyChecksum = !cf.getOpt("ignoreCrc");
-      
+      setWriteChecksum(cf.getOpt("crc"));
+      setVerifyChecksum(!cf.getOpt("ignoreCrc"));
       setRecursive(true);
       setRecursive(true);
       getLocalDestination(args);
       getLocalDestination(args);
     }
     }
-
-    @Override
-    protected void copyFileToTarget(PathData src, PathData target)
-    throws IOException {
-      src.fs.setVerifyChecksum(verifyChecksum);
-
-      if (copyCrc && !(src.fs instanceof ChecksumFileSystem)) {
-        displayWarning(src.fs + ": Does not support checksums");
-        copyCrc = false;
-      }      
-
-      super.copyFileToTarget(src, target);
-      if (copyCrc) {
-        // should we delete real file if crc copy fails?
-        super.copyFileToTarget(src.getChecksumFile(), target.getChecksumFile());
-      }
-    }
   }
   }
 
 
   /**
   /**

+ 0 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java

@@ -27,7 +27,6 @@ import java.net.URISyntaxException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
@@ -169,19 +168,6 @@ public class PathData {
     }
     }
   }
   }
   
   
-  /**
-   * Return the corresponding crc data for a file.  Avoids exposing the fs
-   * contortions to the caller.  
-   * @return PathData of the crc file
-   * @throws IOException is anything goes wrong
-   */
-  public PathData getChecksumFile() throws IOException {
-    checkIfExists(FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY);
-    ChecksumFileSystem srcFs = (ChecksumFileSystem)fs;
-    Path srcPath = srcFs.getChecksumFile(path);
-    return new PathData(srcFs.getRawFileSystem(), srcPath.toString());
-  }
-
   /**
   /**
    * Returns a temporary file for this PathData with the given extension.
    * Returns a temporary file for this PathData with the given extension.
    * The file will be deleted on exit.
    * The file will be deleted on exit.

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -470,6 +470,15 @@ public class ViewFileSystem extends FileSystem {
     }
     }
   }
   }
   
   
+  @Override
+  public void setWriteChecksum(final boolean writeChecksum) { 
+    List<InodeTree.MountPoint<FileSystem>> mountPoints = 
+        fsState.getMountPoints();
+    for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
+      mount.target.targetFileSystem.setWriteChecksum(writeChecksum);
+    }
+  }
+
   public MountPoint[] getMountPoints() {
   public MountPoint[] getMountPoints() {
     List<InodeTree.MountPoint<FileSystem>> mountPoints = 
     List<InodeTree.MountPoint<FileSystem>> mountPoints = 
                   fsState.getMountPoints();
                   fsState.getMountPoints();

+ 7 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -788,12 +788,16 @@ public class Client {
           //for serializing the
           //for serializing the
           //data to be written
           //data to be written
           d = new DataOutputBuffer();
           d = new DataOutputBuffer();
+          d.writeInt(0); // placeholder for data length
           d.writeInt(call.id);
           d.writeInt(call.id);
           call.param.write(d);
           call.param.write(d);
           byte[] data = d.getData();
           byte[] data = d.getData();
-          int dataLength = d.getLength();
-          out.writeInt(dataLength);      //first put the data length
-          out.write(data, 0, dataLength);//write the data
+          int dataLength = d.getLength() - 4;
+          data[0] = (byte)((dataLength >>> 24) & 0xff);
+          data[1] = (byte)((dataLength >>> 16) & 0xff);
+          data[2] = (byte)((dataLength >>> 8) & 0xff);
+          data[3] = (byte)(dataLength & 0xff);
+          out.write(data, 0, dataLength + 4);//write the data
           out.flush();
           out.flush();
         }
         }
       } catch(IOException e) {
       } catch(IOException e) {

+ 41 - 22
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java

@@ -94,17 +94,19 @@ class MetricsSourceAdapter implements DynamicMBean {
   }
   }
 
 
   @Override
   @Override
-  public synchronized Object getAttribute(String attribute)
+  public Object getAttribute(String attribute)
       throws AttributeNotFoundException, MBeanException, ReflectionException {
       throws AttributeNotFoundException, MBeanException, ReflectionException {
     updateJmxCache();
     updateJmxCache();
-    Attribute a = attrCache.get(attribute);
-    if (a == null) {
-      throw new AttributeNotFoundException(attribute +" not found");
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(attribute +": "+ a);
+    synchronized(this) {
+      Attribute a = attrCache.get(attribute);
+      if (a == null) {
+        throw new AttributeNotFoundException(attribute +" not found");
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(attribute +": "+ a);
+      }
+      return a.getValue();
     }
     }
-    return a.getValue();
   }
   }
 
 
   @Override
   @Override
@@ -115,17 +117,19 @@ class MetricsSourceAdapter implements DynamicMBean {
   }
   }
 
 
   @Override
   @Override
-  public synchronized AttributeList getAttributes(String[] attributes) {
+  public AttributeList getAttributes(String[] attributes) {
     updateJmxCache();
     updateJmxCache();
-    AttributeList ret = new AttributeList();
-    for (String key : attributes) {
-      Attribute attr = attrCache.get(key);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(key +": "+ attr);
+    synchronized(this) {
+      AttributeList ret = new AttributeList();
+      for (String key : attributes) {
+        Attribute attr = attrCache.get(key);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(key +": "+ attr);
+        }
+        ret.add(attr);
       }
       }
-      ret.add(attr);
+      return ret;
     }
     }
-    return ret;
   }
   }
 
 
   @Override
   @Override
@@ -140,17 +144,32 @@ class MetricsSourceAdapter implements DynamicMBean {
   }
   }
 
 
   @Override
   @Override
-  public synchronized MBeanInfo getMBeanInfo() {
+  public MBeanInfo getMBeanInfo() {
     updateJmxCache();
     updateJmxCache();
     return infoCache;
     return infoCache;
   }
   }
 
 
-  private synchronized void updateJmxCache() {
-    if (System.currentTimeMillis() - jmxCacheTS >= jmxCacheTTL) {
-      if (lastRecs == null) {
-        MetricsCollectorImpl builder = new MetricsCollectorImpl();
-        getMetrics(builder, true);
+  private void updateJmxCache() {
+    boolean getAllMetrics = false;
+    synchronized(this) {
+      if (System.currentTimeMillis() - jmxCacheTS >= jmxCacheTTL) {
+        // temporarilly advance the expiry while updating the cache
+        jmxCacheTS = System.currentTimeMillis() + jmxCacheTTL;
+        if (lastRecs == null) {
+          getAllMetrics = true;
+        }
+      }
+      else {
+        return;
       }
       }
+    }
+
+    if (getAllMetrics) {
+      MetricsCollectorImpl builder = new MetricsCollectorImpl();
+      getMetrics(builder, true);
+    }
+
+    synchronized(this) {
       int oldCacheSize = attrCache.size();
       int oldCacheSize = attrCache.size();
       int newCacheSize = updateAttrCache();
       int newCacheSize = updateAttrCache();
       if (oldCacheSize < newCacheSize) {
       if (oldCacheSize < newCacheSize) {

+ 50 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java

@@ -22,6 +22,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 
 
 /**
 /**
  * This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
  * This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
@@ -89,6 +95,49 @@ public abstract class AbstractDNSToSwitchMapping
     return false;
     return false;
   }
   }
 
 
+  /**
+   * Get a copy of the map (for diagnostics)
+   * @return a clone of the map or null for none known
+   */
+  public Map<String, String> getSwitchMap() {
+    return null;
+  }
+
+  /**
+   * Generate a string listing the switch mapping implementation,
+   * the mapping for every known node and the number of nodes and
+   * unique switches known about -each entry to a separate line.
+   * @return a string that can be presented to the ops team or used in
+   * debug messages.
+   */
+  public String dumpTopology() {
+    Map<String, String> rack = getSwitchMap();
+    StringBuilder builder = new StringBuilder();
+    builder.append("Mapping: ").append(toString()).append("\n");
+    if (rack != null) {
+      builder.append("Map:\n");
+      Set<String> switches = new HashSet<String>();
+      for (Map.Entry<String, String> entry : rack.entrySet()) {
+        builder.append("  ")
+            .append(entry.getKey())
+            .append(" -> ")
+            .append(entry.getValue())
+            .append("\n");
+        switches.add(entry.getValue());
+      }
+      builder.append("Nodes: ").append(rack.size()).append("\n");
+      builder.append("Switches: ").append(switches.size()).append("\n");
+    } else {
+      builder.append("No topology information");
+    }
+    return builder.toString();
+  }
+
+  protected boolean isSingleSwitchByScriptPolicy() {
+    return conf != null
+        && conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null;
+  }
+
   /**
   /**
    * Query for a {@link DNSToSwitchMapping} instance being on a single
    * Query for a {@link DNSToSwitchMapping} instance being on a single
    * switch.
    * switch.
@@ -100,7 +149,7 @@ public abstract class AbstractDNSToSwitchMapping
    * is not derived from this class.
    * is not derived from this class.
    */
    */
   public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
   public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
-    return mapping instanceof AbstractDNSToSwitchMapping
+    return mapping != null && mapping instanceof AbstractDNSToSwitchMapping
         && ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
         && ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
   }
   }
 
 

+ 17 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.net;
 package org.apache.hadoop.net;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
@@ -123,6 +124,22 @@ public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
 
 
   }
   }
 
 
+  /**
+   * Get the (host x switch) map.
+   * @return a copy of the cached map of hosts to rack
+   */
+  @Override
+  public Map<String, String> getSwitchMap() {
+    Map<String, String > switchMap = new HashMap<String, String>(cache);
+    return switchMap;
+  }
+
+
+  @Override
+  public String toString() {
+    return "cached switch mapping relaying to " + rawMapping;
+  }
+
   /**
   /**
    * Delegate the switch topology query to the raw mapping, via
    * Delegate the switch topology query to the raw mapping, via
    * {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
    * {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}

+ 17 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java

@@ -66,9 +66,15 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
   /**
   /**
    * key to the argument count that the script supports
    * key to the argument count that the script supports
+   * {@value}
    */
    */
   static final String SCRIPT_ARG_COUNT_KEY =
   static final String SCRIPT_ARG_COUNT_KEY =
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
+  /**
+   * Text used in the {@link #toString()} method if there is no string
+   * {@value}
+   */
+  public static final String NO_SCRIPT = "no script";
 
 
   /**
   /**
    * Create an instance with the default configuration.
    * Create an instance with the default configuration.
@@ -104,6 +110,11 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
     return getRawMapping().getConf();
     return getRawMapping().getConf();
   }
   }
 
 
+  @Override
+  public String toString() {
+    return "script-based mapping with " + getRawMapping().toString();
+  }
+
   /**
   /**
    * {@inheritDoc}
    * {@inheritDoc}
    * <p/>
    * <p/>
@@ -231,7 +242,7 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
           s.execute();
           s.execute();
           allOutput.append(s.getOutput()).append(" ");
           allOutput.append(s.getOutput()).append(" ");
         } catch (Exception e) {
         } catch (Exception e) {
-          LOG.warn("Exception: ", e);
+          LOG.warn("Exception running " + s, e);
           return null;
           return null;
         }
         }
         loopCount++;
         loopCount++;
@@ -248,5 +259,10 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
     public boolean isSingleSwitch() {
     public boolean isSingleSwitch() {
       return scriptName == null;
       return scriptName == null;
     }
     }
+
+    @Override
+    public String toString() {
+      return scriptName != null ? ("script " + scriptName) : NO_SCRIPT;
+    }
   }
   }
 }
 }

+ 22 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -230,14 +230,34 @@ public class Credentials implements Writable {
  
  
   /**
   /**
    * Copy all of the credentials from one credential object into another.
    * Copy all of the credentials from one credential object into another.
+   * Existing secrets and tokens are overwritten.
    * @param other the credentials to copy
    * @param other the credentials to copy
    */
    */
   public void addAll(Credentials other) {
   public void addAll(Credentials other) {
+    addAll(other, true);
+  }
+
+  /**
+   * Copy all of the credentials from one credential object into another.
+   * Existing secrets and tokens are not overwritten.
+   * @param other the credentials to copy
+   */
+  public void mergeAll(Credentials other) {
+    addAll(other, false);
+  }
+
+  private void addAll(Credentials other, boolean overwrite) {
     for(Map.Entry<Text, byte[]> secret: other.secretKeysMap.entrySet()) {
     for(Map.Entry<Text, byte[]> secret: other.secretKeysMap.entrySet()) {
-      secretKeysMap.put(secret.getKey(), secret.getValue());
+      Text key = secret.getKey();
+      if (!secretKeysMap.containsKey(key) || overwrite) {
+        secretKeysMap.put(key, secret.getValue());
+      }
     }
     }
     for(Map.Entry<Text, Token<?>> token: other.tokenMap.entrySet()){
     for(Map.Entry<Text, Token<?>> token: other.tokenMap.entrySet()){
-      tokenMap.put(token.getKey(), token.getValue());
+      Text key = token.getKey();
+      if (!tokenMap.containsKey(key) || overwrite) {
+        tokenMap.put(key, token.getValue());
+      }
     }
     }
   }
   }
 }
 }

+ 10 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java

@@ -86,17 +86,22 @@ public class ReflectionUtils {
     //invoke configure on theObject
     //invoke configure on theObject
     try {
     try {
       Class<?> jobConfClass = 
       Class<?> jobConfClass = 
-        conf.getClassByName("org.apache.hadoop.mapred.JobConf");
+        conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConf");
+      if (jobConfClass == null) {
+        return;
+      }
+      
       Class<?> jobConfigurableClass = 
       Class<?> jobConfigurableClass = 
-        conf.getClassByName("org.apache.hadoop.mapred.JobConfigurable");
-       if (jobConfClass.isAssignableFrom(conf.getClass()) &&
+        conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConfigurable");
+      if (jobConfigurableClass == null) {
+        return;
+      }
+      if (jobConfClass.isAssignableFrom(conf.getClass()) &&
             jobConfigurableClass.isAssignableFrom(theObject.getClass())) {
             jobConfigurableClass.isAssignableFrom(theObject.getClass())) {
         Method configureMethod = 
         Method configureMethod = 
           jobConfigurableClass.getMethod("configure", jobConfClass);
           jobConfigurableClass.getMethod("configure", jobConfClass);
         configureMethod.invoke(theObject, conf);
         configureMethod.invoke(theObject, conf);
       }
       }
-    } catch (ClassNotFoundException e) {
-      //JobConf/JobConfigurable not in classpath. no need to configure
     } catch (Exception e) {
     } catch (Exception e) {
       throw new RuntimeException("Error in configuring object", e);
       throw new RuntimeException("Error in configuring object", e);
     }
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh

@@ -246,7 +246,7 @@ OPTS=$(getopt \
   -l 'dfs-datanode-dir-perm:' \
   -l 'dfs-datanode-dir-perm:' \
   -l 'dfs-block-local-path-access-user:' \
   -l 'dfs-block-local-path-access-user:' \
   -l 'dfs-client-read-shortcircuit:' \
   -l 'dfs-client-read-shortcircuit:' \
-  -l 'dfs-client-read-shortcircuit-skip-checksum:' \   
+  -l 'dfs-client-read-shortcircuit-skip-checksum:' \
   -o 'h' \
   -o 'h' \
   -- "$@") 
   -- "$@") 
   
   

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -216,6 +216,13 @@
   determine the host, port, etc. for a filesystem.</description>
   determine the host, port, etc. for a filesystem.</description>
 </property>
 </property>
 
 
+<property>
+  <name>fs.default.name</name>
+  <value>file:///</value>
+  <description>Deprecated. Use (fs.defaultFS) property
+  instead</description>
+</property>
+
 <property>
 <property>
   <name>fs.trash.interval</name>
   <name>fs.trash.interval</name>
   <value>0</value>
   <value>0</value>

+ 97 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java

@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestFsShellCopy {  
+  static Configuration conf;
+  static FsShell shell; 
+  static LocalFileSystem lfs;
+  static Path testRootDir, srcPath, dstPath;
+  
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new Configuration();
+    shell = new FsShell(conf);
+    lfs = FileSystem.getLocal(conf);
+    testRootDir = new Path(
+        System.getProperty("test.build.data","test/build/data"), "testShellCopy");
+    lfs.mkdirs(testRootDir);    
+    srcPath = new Path(testRootDir, "srcFile");
+    dstPath = new Path(testRootDir, "dstFile");
+  }
+  
+  @Before
+  public void prepFiles() throws Exception {
+    lfs.setVerifyChecksum(true);
+    lfs.setWriteChecksum(true);
+    
+    lfs.delete(srcPath, true);
+    lfs.delete(dstPath, true);
+    FSDataOutputStream out = lfs.create(srcPath);
+    out.writeChars("hi");
+    out.close();
+    assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
+  }
+
+  @Test
+  public void testCopyNoCrc() throws Exception {
+    shellRun(0, "-get", srcPath.toString(), dstPath.toString());
+    checkPath(dstPath, false);
+  }
+
+  @Test
+  public void testCopyCrc() throws Exception {
+    shellRun(0, "-get", "-crc", srcPath.toString(), dstPath.toString());
+    checkPath(dstPath, true);
+  }
+
+  
+  @Test
+  public void testCorruptedCopyCrc() throws Exception {
+    FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath);
+    out.writeChars("bang");
+    out.close();
+    shellRun(1, "-get", srcPath.toString(), dstPath.toString());
+  }
+
+  @Test
+  public void testCorruptedCopyIgnoreCrc() throws Exception {
+    shellRun(0, "-get", "-ignoreCrc", srcPath.toString(), dstPath.toString());
+    checkPath(dstPath, false);
+  }
+
+  private void checkPath(Path p, boolean expectChecksum) throws IOException {
+    assertTrue(lfs.exists(p));
+    boolean hasChecksum = lfs.exists(lfs.getChecksumFile(p));
+    assertEquals(expectChecksum, hasChecksum);
+  }
+
+  private void shellRun(int n, String ... args) throws Exception {
+    assertEquals(n, shell.run(args));
+  }
+}

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java

@@ -37,15 +37,15 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTes
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     fcTarget = FileSystem.getLocal(conf);
     fcTarget = FileSystem.getLocal(conf);
-    fSys = ViewFileSystemTestSetup.setupForViewFs(
-        ViewFileSystemTestSetup.configWithViewfsScheme(), fcTarget);
+    fSys = ViewFileSystemTestSetup.setupForViewFileSystem(
+        ViewFileSystemTestSetup.createConfig(), fcTarget);
     super.setUp();
     super.setUp();
   }
   }
   
   
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
     super.tearDown();
     super.tearDown();
-    ViewFileSystemTestSetup.tearDownForViewFs(fcTarget);
+    ViewFileSystemTestSetup.tearDown(fcTarget);
   }
   }
   
   
   @Test
   @Test

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java

@@ -40,12 +40,12 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup {
 
 
   @BeforeClass
   @BeforeClass
   public static void setup() throws Exception {
   public static void setup() throws Exception {
-    conf = ViewFileSystemTestSetup.configWithViewfsScheme();    
+    conf = ViewFileSystemTestSetup.createConfig();
     fs1 = setupFileSystem(new URI("fs1:/"), FakeFileSystem.class);
     fs1 = setupFileSystem(new URI("fs1:/"), FakeFileSystem.class);
     fs2 = setupFileSystem(new URI("fs2:/"), FakeFileSystem.class);
     fs2 = setupFileSystem(new URI("fs2:/"), FakeFileSystem.class);
     viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
   }
   }
-  
+
   static FakeFileSystem setupFileSystem(URI uri, Class clazz)
   static FakeFileSystem setupFileSystem(URI uri, Class clazz)
       throws Exception {
       throws Exception {
     String scheme = uri.getScheme();
     String scheme = uri.getScheme();

+ 5 - 39
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java

@@ -35,7 +35,6 @@ import org.mortbay.log.Log;
 public class TestViewFsTrash {
 public class TestViewFsTrash {
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsView;
   FileSystem fsView;
-  Path targetTestRoot;
   Configuration conf;
   Configuration conf;
 
 
   static class TestLFS extends LocalFileSystem {
   static class TestLFS extends LocalFileSystem {
@@ -55,52 +54,19 @@ public class TestViewFsTrash {
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     fsTarget = FileSystem.getLocal(new Configuration());
     fsTarget = FileSystem.getLocal(new Configuration());
-    targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
-    // In case previous test was killed before cleanup
-    fsTarget.delete(targetTestRoot, true);
-    // cleanup trash from previous run if it stuck around
-    fsTarget.delete(new Path(fsTarget.getHomeDirectory(), ".Trash/Current"),
-        true);
-    
-    fsTarget.mkdirs(targetTestRoot);
-    fsTarget.mkdirs(new Path(targetTestRoot,"dir1"));
-    
-    
-    // Now we use the mount fs to set links to user and dir
-    // in the test root
-    
-    // Set up the defaultMT in the config with our mount point links
-
-
-    conf = ViewFileSystemTestSetup.configWithViewfsScheme();
-    
-    // create a link for home directory so that trash path works
-    // set up viewfs's home dir root to point to home dir root on target
-    // But home dir is different on linux, mac etc.
-    // Figure it out by calling home dir on target
-
-    String homeDirRoot = fsTarget.getHomeDirectory()
-        .getParent().toUri().getPath();
-    ConfigUtil.addLink(conf, homeDirRoot,
-        fsTarget.makeQualified(new Path(homeDirRoot)).toUri());
-    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
-    Log.info("Home dir base " + homeDirRoot);
-
-    fsView = ViewFileSystemTestSetup.setupForViewFs(conf, fsTarget);
-
-    // set working dir so that relative paths
-    //fsView.setWorkingDirectory(new Path(fsTarget.getWorkingDirectory().toUri().getPath()));
+    fsTarget.mkdirs(new Path(FileSystemTestHelper.
+        getTestRootPath(fsTarget), "dir1"));
+    conf = ViewFileSystemTestSetup.createConfig();
+    fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, fsTarget);
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
   }
   }
  
  
-
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
-    fsTarget.delete(targetTestRoot, true);
+    ViewFileSystemTestSetup.tearDown(fsTarget);
     fsTarget.delete(new Path(fsTarget.getHomeDirectory(), ".Trash/Current"),
     fsTarget.delete(new Path(fsTarget.getHomeDirectory(), ".Trash/Current"),
         true);
         true);
   }
   }
-
   
   
   @Test
   @Test
   public void testTrash() throws IOException {
   public void testTrash() throws IOException {

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -89,7 +89,7 @@ public class ViewFileSystemBaseTest {
     
     
     // Set up the defaultMT in the config with our mount point links
     // Set up the defaultMT in the config with our mount point links
     //Configuration conf = new Configuration();
     //Configuration conf = new Configuration();
-    conf = ViewFileSystemTestSetup.configWithViewfsScheme();
+    conf = ViewFileSystemTestSetup.createConfig();
     setupMountPoints();
     setupMountPoints();
     fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
   }
   }

+ 13 - 23
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.mortbay.log.Log;
 
 
 
 
 /**
 /**
@@ -46,32 +47,21 @@ public class ViewFileSystemTestSetup {
    * @return return the ViewFS File context to be used for tests
    * @return return the ViewFS File context to be used for tests
    * @throws Exception
    * @throws Exception
    */
    */
-  static public FileSystem setupForViewFs(Configuration conf, FileSystem fsTarget) throws Exception {
+  static public FileSystem setupForViewFileSystem(Configuration conf, FileSystem fsTarget) throws Exception {
     /**
     /**
      * create the test root on local_fs - the  mount table will point here
      * create the test root on local_fs - the  mount table will point here
      */
      */
-    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
-    // In case previous test was killed before cleanup
-    fsTarget.delete(targetOfTests, true);
-    
-    fsTarget.mkdirs(targetOfTests);
-  
+    fsTarget.mkdirs(FileSystemTestHelper.getTestRootPath(fsTarget));
+
+    // viewFs://home => fsTarget://home
+    String homeDirRoot = fsTarget.getHomeDirectory()
+        .getParent().toUri().getPath();
+    ConfigUtil.addLink(conf, homeDirRoot,
+        fsTarget.makeQualified(new Path(homeDirRoot)).toUri());
+    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
+    Log.info("Home dir base " + homeDirRoot);
 
 
-    // Now set up a link from viewfs to targetfs for the first component of
-    // path of testdir. For example, if testdir is /user/<userid>/xx then
-    // a link from /user to targetfs://user.
-    
-    String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri().getPath();
-    int indexOf2ndSlash = testDir.indexOf('/', 1);
-    String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
-    
-    
-    ConfigUtil.addLink(conf, testDirFirstComponent,
-        fsTarget.makeQualified(new Path(testDirFirstComponent)).toUri()); 
-    
     FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
-    //System.out.println("SRCOfTests = "+ getTestRootPath(fs, "test"));
-    //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
     return fsView;
     return fsView;
   }
   }
 
 
@@ -79,12 +69,12 @@ public class ViewFileSystemTestSetup {
    * 
    * 
    * delete the test directory in the target  fs
    * delete the test directory in the target  fs
    */
    */
-  static public void tearDownForViewFs(FileSystem fsTarget) throws Exception {
+  static public void tearDown(FileSystem fsTarget) throws Exception {
     Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
     Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
     fsTarget.delete(targetOfTests, true);
     fsTarget.delete(targetOfTests, true);
   }
   }
   
   
-  public static Configuration configWithViewfsScheme() {
+  public static Configuration createConfig() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
     conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
     return conf; 
     return conf; 

+ 26 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java

@@ -21,8 +21,10 @@ import org.apache.hadoop.conf.Configuration;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.Set;
 
 
 /**
 /**
  * Implements the {@link DNSToSwitchMapping} via static mappings. Used
  * Implements the {@link DNSToSwitchMapping} via static mappings. Used
@@ -34,6 +36,10 @@ import java.util.Map;
  * When an instance of the class has its {@link #setConf(Configuration)}
  * When an instance of the class has its {@link #setConf(Configuration)}
  * method called, nodes listed in the configuration will be added to the map.
  * method called, nodes listed in the configuration will be added to the map.
  * These do not get removed when the instance is garbage collected.
  * These do not get removed when the instance is garbage collected.
+ *
+ * The switch mapping policy of this class is the same as for the
+ * {@link ScriptBasedMapping} -the presence of a non-empty topology script.
+ * The script itself is not used.
  */
  */
 public class StaticMapping extends AbstractDNSToSwitchMapping  {
 public class StaticMapping extends AbstractDNSToSwitchMapping  {
 
 
@@ -109,12 +115,30 @@ public class StaticMapping extends AbstractDNSToSwitchMapping  {
   }
   }
 
 
   /**
   /**
-   * Declare that this mapping is always multi-switch
+   * The switch policy of this mapping is driven by the same policy
+   * as the Scripted mapping: the presence of the script name in
+   * the configuration file
    * @return false, always
    * @return false, always
    */
    */
   @Override
   @Override
   public boolean isSingleSwitch() {
   public boolean isSingleSwitch() {
-    return false;
+    return isSingleSwitchByScriptPolicy();
+  }
+
+  /**
+   * Get a copy of the map (for diagnostics)
+   * @return a clone of the map or null for none known
+   */
+  @Override
+  public Map<String, String> getSwitchMap() {
+    synchronized (nameToRackMap) {
+      return new HashMap<String, String>(nameToRackMap);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "static mapping with single switch = " + isSingleSwitch();
   }
   }
 
 
   /**
   /**

+ 171 - 34
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java

@@ -18,22 +18,27 @@
 
 
 package org.apache.hadoop.net;
 package org.apache.hadoop.net;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 
 
 /**
 /**
  * Test the static mapping class.
  * Test the static mapping class.
  * Because the map is actually static, this map needs to be reset for every test
  * Because the map is actually static, this map needs to be reset for every test
  */
  */
 public class TestStaticMapping extends Assert {
 public class TestStaticMapping extends Assert {
+  private static final Log LOG = LogFactory.getLog(TestStaticMapping.class);
 
 
   /**
   /**
    * Reset the map then create a new instance of the {@link StaticMapping}
    * Reset the map then create a new instance of the {@link StaticMapping}
-   * class
+   * class with a null configuration
    * @return a new instance
    * @return a new instance
    */
    */
   private StaticMapping newInstance() {
   private StaticMapping newInstance() {
@@ -41,63 +46,195 @@ public class TestStaticMapping extends Assert {
     return new StaticMapping();
     return new StaticMapping();
   }
   }
 
 
-  @Test
-  public void testStaticIsSingleSwitch() throws Throwable {
+
+  /**
+   * Reset the map then create a new instance of the {@link StaticMapping}
+   * class with the topology script in the configuration set to
+   * the parameter
+   * @param script a (never executed) script, can be null
+   * @return a new instance
+   */
+  private StaticMapping newInstance(String script) {
     StaticMapping mapping = newInstance();
     StaticMapping mapping = newInstance();
-    assertFalse("Empty maps should not be not single switch",
-                mapping.isSingleSwitch());
+    mapping.setConf(createConf(script));
+    return mapping;
   }
   }
 
 
+  /**
+   * Create a configuration with a specific topology script
+   * @param script a (never executed) script, can be null
+   * @return a configuration
+   */
+  private Configuration createConf(String script) {
+    Configuration conf = new Configuration();
+    if (script != null) {
+      conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
+               script);
+    } else {
+      conf.unset(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
+    }
+    return conf;
+  }
+
+  private void assertSingleSwitch(DNSToSwitchMapping mapping) {
+    assertEquals("Expected a single switch mapping "
+                     + mapping,
+                 true,
+                 AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
+  }
+
+  private void assertMultiSwitch(DNSToSwitchMapping mapping) {
+    assertEquals("Expected a multi switch mapping "
+                     + mapping,
+                 false,
+                 AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
+  }
+
+  protected void assertMapSize(AbstractDNSToSwitchMapping switchMapping, int expectedSize) {
+    assertEquals(
+        "Expected two entries in the map " + switchMapping.dumpTopology(),
+        expectedSize, switchMapping.getSwitchMap().size());
+  }
+
+  private List<String> createQueryList() {
+    List<String> l1 = new ArrayList<String>(2);
+    l1.add("n1");
+    l1.add("unknown");
+    return l1;
+  }
 
 
   @Test
   @Test
-  public void testCachingRelaysQueries() throws Throwable {
-    StaticMapping staticMapping = newInstance();
-    CachedDNSToSwitchMapping mapping =
-        new CachedDNSToSwitchMapping(staticMapping);
-    StaticMapping.addNodeToRack("n1", "r1");
-    assertFalse("Expected multi switch", mapping.isSingleSwitch());
+  public void testStaticIsSingleSwitchOnNullScript() throws Throwable {
+    StaticMapping mapping = newInstance(null);
+    mapping.setConf(createConf(null));
+    assertSingleSwitch(mapping);
+  }
+
+  @Test
+  public void testStaticIsMultiSwitchOnScript() throws Throwable {
+    StaticMapping mapping = newInstance("ls");
+    assertMultiSwitch(mapping);
   }
   }
 
 
   @Test
   @Test
   public void testAddResolveNodes() throws Throwable {
   public void testAddResolveNodes() throws Throwable {
     StaticMapping mapping = newInstance();
     StaticMapping mapping = newInstance();
-    StaticMapping.addNodeToRack("n1", "r1");
-    List<String> l1 = new ArrayList<String>(2);
-    l1.add("n1");
-    l1.add("unknown");
-    List<String> mappings = mapping.resolve(l1);
-    assertEquals(2, mappings.size());
-    assertEquals("r1", mappings.get(0));
-    assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
-    assertFalse("Mapping is still single switch", mapping.isSingleSwitch());
+    StaticMapping.addNodeToRack("n1", "/r1");
+    List<String> queryList = createQueryList();
+    List<String> resolved = mapping.resolve(queryList);
+    assertEquals(2, resolved.size());
+    assertEquals("/r1", resolved.get(0));
+    assertEquals(NetworkTopology.DEFAULT_RACK, resolved.get(1));
+    // get the switch map and examine it
+    Map<String, String> switchMap = mapping.getSwitchMap();
+    String topology = mapping.dumpTopology();
+    LOG.info(topology);
+    assertEquals(topology, 1, switchMap.size());
+    assertEquals(topology, "/r1", switchMap.get("n1"));
   }
   }
 
 
+  /**
+   * Verify that a configuration string builds a topology
+   */
   @Test
   @Test
   public void testReadNodesFromConfig() throws Throwable {
   public void testReadNodesFromConfig() throws Throwable {
     StaticMapping mapping = newInstance();
     StaticMapping mapping = newInstance();
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, "n1=r1,n2=r2");
+    conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, "n1=/r1,n2=/r2");
     mapping.setConf(conf);
     mapping.setConf(conf);
+    //even though we have inserted elements into the list, because 
+    //it is driven by the script key in the configuration, it still
+    //thinks that it is single rack
+    assertSingleSwitch(mapping);
     List<String> l1 = new ArrayList<String>(3);
     List<String> l1 = new ArrayList<String>(3);
     l1.add("n1");
     l1.add("n1");
     l1.add("unknown");
     l1.add("unknown");
     l1.add("n2");
     l1.add("n2");
-    List<String> mappings = mapping.resolve(l1);
-    assertEquals(3, mappings.size());
-    assertEquals("r1", mappings.get(0));
-    assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
-    assertEquals("r2", mappings.get(2));
-    assertFalse("Expected to be multi switch",
-                AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
+    List<String> resolved = mapping.resolve(l1);
+    assertEquals(3, resolved.size());
+    assertEquals("/r1", resolved.get(0));
+    assertEquals(NetworkTopology.DEFAULT_RACK, resolved.get(1));
+    assertEquals("/r2", resolved.get(2));
+
+    Map<String, String> switchMap = mapping.getSwitchMap();
+    String topology = mapping.dumpTopology();
+    LOG.info(topology);
+    assertEquals(topology, 2, switchMap.size());
+    assertEquals(topology, "/r1", switchMap.get("n1"));
+    assertNull(topology, switchMap.get("unknown"));
   }
   }
 
 
+
+  /**
+   * Verify that if the inner mapping is single-switch, so is the cached one
+   * @throws Throwable on any problem
+   */
   @Test
   @Test
-  public void testNullConfiguration() throws Throwable {
+  public void testCachingRelaysSingleSwitchQueries() throws Throwable {
+    //create a single switch map
+    StaticMapping staticMapping = newInstance(null);
+    assertSingleSwitch(staticMapping);
+    CachedDNSToSwitchMapping cachedMap =
+        new CachedDNSToSwitchMapping(staticMapping);
+    LOG.info("Mapping: " + cachedMap + "\n" + cachedMap.dumpTopology());
+    assertSingleSwitch(cachedMap);
+  }
+
+  /**
+   * Verify that if the inner mapping is multi-switch, so is the cached one
+   * @throws Throwable on any problem
+   */
+  @Test
+  public void testCachingRelaysMultiSwitchQueries() throws Throwable {
+    StaticMapping staticMapping = newInstance("top");
+    assertMultiSwitch(staticMapping);
+    CachedDNSToSwitchMapping cachedMap =
+        new CachedDNSToSwitchMapping(staticMapping);
+    LOG.info("Mapping: " + cachedMap + "\n" + cachedMap.dumpTopology());
+    assertMultiSwitch(cachedMap);
+  }
+
+
+  /**
+   * This test verifies that resultion queries get relayed to the inner rack
+   * @throws Throwable on any problem
+   */
+  @Test
+  public void testCachingRelaysResolveQueries() throws Throwable {
     StaticMapping mapping = newInstance();
     StaticMapping mapping = newInstance();
-    mapping.setConf(null);
-    assertFalse("Null maps are expected to be multi switch",
-                mapping.isSingleSwitch());
-    assertFalse("Expected to be multi switch",
-               AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
+    mapping.setConf(createConf("top"));
+    StaticMapping staticMapping = mapping;
+    CachedDNSToSwitchMapping cachedMap =
+        new CachedDNSToSwitchMapping(staticMapping);
+    assertMapSize(cachedMap, 0);
+    //add a node to the static map
+    StaticMapping.addNodeToRack("n1", "/r1");
+    //verify it is there
+    assertMapSize(staticMapping, 1);
+    //verify that the cache hasn't picked it up yet
+    assertMapSize(cachedMap, 0);
+    //now relay the query
+    cachedMap.resolve(createQueryList());
+    //and verify the cache is no longer empty
+    assertMapSize(cachedMap, 2);
+  }
+
+  /**
+   * This test verifies that resultion queries get relayed to the inner rack
+   * @throws Throwable on any problem
+   */
+  @Test
+  public void testCachingCachesNegativeEntries() throws Throwable {
+    StaticMapping staticMapping = newInstance();
+    CachedDNSToSwitchMapping cachedMap =
+        new CachedDNSToSwitchMapping(staticMapping);
+    assertMapSize(cachedMap, 0);
+    assertMapSize(staticMapping, 0);
+    List<String> resolved = cachedMap.resolve(createQueryList());
+    //and verify the cache is no longer empty while the static map is
+    assertMapSize(staticMapping, 0);
+    assertMapSize(cachedMap, 2);
   }
   }
+
+
 }
 }

+ 69 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java

@@ -18,6 +18,8 @@
 
 
 package org.apache.hadoop.net;
 package org.apache.hadoop.net;
 
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -28,22 +30,87 @@ import java.util.List;
  */
  */
 public class TestSwitchMapping extends Assert {
 public class TestSwitchMapping extends Assert {
 
 
+
+  /**
+   * Verify the switch mapping query handles arbitrary DNSToSwitchMapping
+   * implementations
+   *
+   * @throws Throwable on any problem
+   */
   @Test
   @Test
   public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
   public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
     DNSToSwitchMapping mapping = new StandaloneSwitchMapping();
     DNSToSwitchMapping mapping = new StandaloneSwitchMapping();
-    assertFalse("Expected to be multi switch",
+    assertFalse("Expected to be multi switch " + mapping,
                 AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
                 AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
   }
   }
 
 
 
 
+  /**
+   * Verify the cached mapper delegates the switch mapping query to the inner
+   * mapping, which again handles arbitrary DNSToSwitchMapping implementations
+   *
+   * @throws Throwable on any problem
+   */
   @Test
   @Test
   public void testCachingRelays() throws Throwable {
   public void testCachingRelays() throws Throwable {
     CachedDNSToSwitchMapping mapping =
     CachedDNSToSwitchMapping mapping =
         new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
         new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
-    assertFalse("Expected to be multi switch",
+    assertFalse("Expected to be multi switch " + mapping,
                 mapping.isSingleSwitch());
                 mapping.isSingleSwitch());
   }
   }
 
 
+
+  /**
+   * Verify the cached mapper delegates the switch mapping query to the inner
+   * mapping, which again handles arbitrary DNSToSwitchMapping implementations
+   *
+   * @throws Throwable on any problem
+   */
+  @Test
+  public void testCachingRelaysStringOperations() throws Throwable {
+    Configuration conf = new Configuration();
+    String scriptname = "mappingscript.sh";
+    conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
+             scriptname);
+    ScriptBasedMapping scriptMapping = new ScriptBasedMapping(conf);
+    assertTrue("Did not find " + scriptname + " in " + scriptMapping,
+               scriptMapping.toString().contains(scriptname));
+    CachedDNSToSwitchMapping mapping =
+        new CachedDNSToSwitchMapping(scriptMapping);
+    assertTrue("Did not find " + scriptname + " in " + mapping,
+               mapping.toString().contains(scriptname));
+  }
+
+  /**
+   * Verify the cached mapper delegates the switch mapping query to the inner
+   * mapping, which again handles arbitrary DNSToSwitchMapping implementations
+   *
+   * @throws Throwable on any problem
+   */
+  @Test
+  public void testCachingRelaysStringOperationsToNullScript() throws Throwable {
+    Configuration conf = new Configuration();
+    ScriptBasedMapping scriptMapping = new ScriptBasedMapping(conf);
+    assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT
+                   + " in " + scriptMapping,
+               scriptMapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
+    CachedDNSToSwitchMapping mapping =
+        new CachedDNSToSwitchMapping(scriptMapping);
+    assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT
+                   + " in " + mapping,
+               mapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
+  }
+
+  @Test
+  public void testNullMapping() {
+    assertFalse(AbstractDNSToSwitchMapping.isMappingSingleSwitch(null));
+  }
+
+  /**
+   * This class does not extend the abstract switch mapping, and verifies that
+   * the switch mapping logic assumes that this is multi switch
+   */
+
   private static class StandaloneSwitchMapping implements DNSToSwitchMapping {
   private static class StandaloneSwitchMapping implements DNSToSwitchMapping {
     @Override
     @Override
     public List<String> resolve(List<String> names) {
     public List<String> resolve(List<String> names) {

+ 77 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java

@@ -137,4 +137,81 @@ public class TestCredentials {
     }
     }
     tmpFileName.delete();
     tmpFileName.delete();
   }
   }
+
+  static Text secret[] = {
+      new Text("secret1"),
+      new Text("secret2"),
+      new Text("secret3"),
+      new Text("secret4")
+  };
+  static Text service[] = {
+      new Text("service1"),
+      new Text("service2"),
+      new Text("service3"),
+      new Text("service4")
+  };
+  static Token<?> token[] = {
+      new Token<TokenIdentifier>(),
+      new Token<TokenIdentifier>(),
+      new Token<TokenIdentifier>(),
+      new Token<TokenIdentifier>()
+  };
+  
+  @Test
+  public void addAll() {
+    Credentials creds = new Credentials();
+    creds.addToken(service[0], token[0]);
+    creds.addToken(service[1], token[1]);
+    creds.addSecretKey(secret[0], secret[0].getBytes());
+    creds.addSecretKey(secret[1], secret[1].getBytes());
+
+    Credentials credsToAdd = new Credentials();
+    // one duplicate with different value, one new
+    credsToAdd.addToken(service[0], token[3]);
+    credsToAdd.addToken(service[2], token[2]);
+    credsToAdd.addSecretKey(secret[0], secret[3].getBytes());
+    credsToAdd.addSecretKey(secret[2], secret[2].getBytes());
+    
+    creds.addAll(credsToAdd);
+    assertEquals(3, creds.numberOfTokens());
+    assertEquals(3, creds.numberOfSecretKeys());
+    // existing token & secret should be overwritten
+    assertEquals(token[3], creds.getToken(service[0]));
+    assertEquals(secret[3], new Text(creds.getSecretKey(secret[0])));
+    // non-duplicate token & secret should be present
+    assertEquals(token[1], creds.getToken(service[1]));
+    assertEquals(secret[1], new Text(creds.getSecretKey(secret[1])));
+    // new token & secret should be added
+    assertEquals(token[2], creds.getToken(service[2]));
+    assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
+  }
+
+  @Test
+  public void mergeAll() {
+    Credentials creds = new Credentials();
+    creds.addToken(service[0], token[0]);
+    creds.addToken(service[1], token[1]);
+    creds.addSecretKey(secret[0], secret[0].getBytes());
+    creds.addSecretKey(secret[1], secret[1].getBytes());
+    
+    Credentials credsToAdd = new Credentials();
+    // one duplicate with different value, one new
+    credsToAdd.addToken(service[0], token[3]);
+    credsToAdd.addToken(service[2], token[2]);
+    credsToAdd.addSecretKey(secret[0], secret[3].getBytes());
+    credsToAdd.addSecretKey(secret[2], secret[2].getBytes());
+    
+    creds.mergeAll(credsToAdd);
+    assertEquals(3, creds.numberOfTokens());
+    assertEquals(3, creds.numberOfSecretKeys());
+    // existing token & secret should not be overwritten
+    assertEquals(token[0], creds.getToken(service[0]));
+    assertEquals(secret[0], new Text(creds.getSecretKey(secret[0])));
+    // non-duplicate token & secret should be present
+    assertEquals(token[1], creds.getToken(service[1]));
+    assertEquals(secret[1], new Text(creds.getSecretKey(secret[1])));
+    // new token & secret should be added
+    assertEquals(token[2], creds.getToken(service[2]));
+    assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
  }
  }
+}

+ 2 - 2
hadoop-common-project/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common-project</artifactId>
   <artifactId>hadoop-common-project</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-dist</artifactId>
   <artifactId>hadoop-dist</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -19,12 +19,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop HttpFS</name>
   <name>Apache Hadoop HttpFS</name>

+ 10 - 15
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml

@@ -14,21 +14,16 @@
 -->
 -->
 <project name="HttpFS">
 <project name="HttpFS">
 
 
-  <version position="right"/>
+    <skin>
+      <groupId>org.apache.maven.skins</groupId>
+      <artifactId>maven-stylus-skin</artifactId>
+      <version>1.2</version>
+    </skin>
 
 
-  <bannerLeft>
-    <name>&nbsp;</name>
-  </bannerLeft>
-
-  <skin>
-    <groupId>org.apache.maven.skins</groupId>
-    <artifactId>maven-stylus-skin</artifactId>
-    <version>1.2</version>
-  </skin>
-
-  <body>
-    <links>
-    </links>
-  </body>
+    <body>
+      <links>
+        <item name="Apache Hadoop" href="http://hadoop.apache.org/"/>
+      </links>
+    </body>
 
 
 </project>
 </project>

+ 84 - 5
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -1,3 +1,87 @@
+Hadoop HDFS Change Log
+
+Release 0.23.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 0.23.2 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+    HDFS-2887. FSVolume, is a part of FSDatasetInterface implementation, should
+    not be referred outside FSDataset.  A new FSVolumeInterface is defined.
+    The BlockVolumeChoosingPolicy.chooseVolume(..) method signature is also
+    updated.  (szetszwo)
+
+  NEW FEATURES
+
+    HDFS-2943. Expose last checkpoint time and transaction stats as JMX
+    metrics. (atm)
+
+  IMPROVEMENTS
+
+    HDFS-2931. Switch DataNode's BlockVolumeChoosingPolicy to private-audience.
+    (harsh via szetszwo)
+
+    HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. (Brandon Li 
+    via jitendra)
+
+    HDFS-2725. hdfs script usage information is missing the information 
+    about "dfs" command (Prashant Sharma via stevel)
+
+    HDFS-2907.  Add a conf property dfs.datanode.fsdataset.factory to make
+    FSDataset in Datanode pluggable.  (szetszwo)
+
+    HDFS-2985. Improve logging when replicas are marked as corrupt. (todd)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+    HDFS-2923. Namenode IPC handler count uses the wrong configuration key
+    (todd)
+
+    HDFS-2764. TestBackupNode is racy. (atm)
+
+    HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh)
+
+    HDFS-776.  Fix exception handling in Balancer.  (Uma Maheswara Rao G
+    via szetszwo)
+
+    HDFS-2815. Namenode sometimes oes not come out of safemode during 
+    NN crash + restart. (Uma Maheswara Rao via suresh)
+
+    HDFS-2950. Secondary NN HTTPS address should be listed as a
+    NAMESERVICE_SPECIFIC_KEY. (todd)
+
+    HDFS-2525. Race between BlockPoolSliceScanner and append. (Brandon Li
+    via jitendra)
+
+    HDFS-2938. Recursive delete of a large directory make namenode
+    unresponsive. (Hari Mankude via suresh)
+
+    HDFS-2969. ExtendedBlock.equals is incorrectly implemented (todd)
+
+    HDFS-2944. Typo in hdfs-default.xml causes
+    dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly
+    disabled. (atm)
+
+    HDFS-2981. In hdfs-default.xml, the default value of
+    dfs.client.block.write.replace-datanode-on-failure.enable should be true.
+    (szetszwo)
+
+    HDFS-3008. Negative caching of local addrs doesn't work. (eli)
+
+    HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
+    to application/octet-stream instead of application/json.  (szetszwo)
+
 Release 0.23.1 - 2012-02-17 
 Release 0.23.1 - 2012-02-17 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -229,11 +313,6 @@ Release 0.23.1 - 2012-02-17
     HDFS-2893. The start/stop scripts don't start/stop the 2NN when
     HDFS-2893. The start/stop scripts don't start/stop the 2NN when
     using the default configuration. (eli)
     using the default configuration. (eli)
 
 
-    HDFS-2923. Namenode IPC handler count uses the wrong configuration key.
-    (Todd Lipcon)
-
-    HDFS-2869. Fix an error in the webhdfs docs for the mkdir op (harsh)
-
 Release 0.23.0 - 2011-11-01 
 Release 0.23.0 - 2011-11-01 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>0.23.1</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs</artifactId>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>0.23.1</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -26,6 +26,7 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 function print_usage(){
 function print_usage(){
   echo "Usage: hdfs [--config confdir] COMMAND"
   echo "Usage: hdfs [--config confdir] COMMAND"
   echo "       where COMMAND is one of:"
   echo "       where COMMAND is one of:"
+  echo "  dfs                  run a filesystem command on the file systems supported in Hadoop."
   echo "  namenode -format     format the DFS filesystem"
   echo "  namenode -format     format the DFS filesystem"
   echo "  secondarynamenode    run the DFS secondary namenode"
   echo "  secondarynamenode    run the DFS secondary namenode"
   echo "  namenode             run the DFS namenode"
   echo "  namenode             run the DFS namenode"

+ 55 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -369,26 +369,68 @@ class BlockReaderLocal implements BlockReader {
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("skip " + n);
       LOG.debug("skip " + n);
     }
     }
+    if (n <= 0) {
+      return 0;
+    }
     if (!verifyChecksum) {
     if (!verifyChecksum) {
       return dataIn.skip(n);
       return dataIn.skip(n);
     }
     }
-    // Skip by reading the data so we stay in sync with checksums.
-    // This could be implemented more efficiently in the future to
-    // skip to the beginning of the appropriate checksum chunk
-    // and then only read to the middle of that chunk.
+  
+    // caller made sure newPosition is not beyond EOF.
+    int remaining = dataBuff.remaining();
+    int position = dataBuff.position();
+    int newPosition = position + (int)n;
+  
+    // if the new offset is already read into dataBuff, just reposition
+    if (n <= remaining) {
+      assert offsetFromChunkBoundary == 0;
+      dataBuff.position(newPosition);
+      return n;
+    }
+  
+    // for small gap, read through to keep the data/checksum in sync
+    if (n - remaining <= bytesPerChecksum) {
+      dataBuff.position(position + remaining);
+      if (skipBuf == null) {
+        skipBuf = new byte[bytesPerChecksum];
+      }
+      int ret = read(skipBuf, 0, (int)(n - remaining));
+      return ret;
+    }
+  
+    // optimize for big gap: discard the current buffer, skip to
+    // the beginning of the appropriate checksum chunk and then
+    // read to the middle of that chunk to be in sync with checksums.
+    this.offsetFromChunkBoundary = newPosition % bytesPerChecksum;
+    long toskip = n - remaining - this.offsetFromChunkBoundary;
+  
+    dataBuff.clear();
+    checksumBuff.clear();
+  
+    long dataSkipped = dataIn.skip(toskip);
+    if (dataSkipped != toskip) {
+      throw new IOException("skip error in data input stream");
+    }
+    long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
+    if (checkSumOffset > 0) {
+      long skipped = checksumIn.skip(checkSumOffset);
+      if (skipped != checkSumOffset) {
+        throw new IOException("skip error in checksum input stream");
+      }
+    }
+
+    // read into the middle of the chunk
     if (skipBuf == null) {
     if (skipBuf == null) {
       skipBuf = new byte[bytesPerChecksum];
       skipBuf = new byte[bytesPerChecksum];
     }
     }
-    long nSkipped = 0;
-    while ( nSkipped < n ) {
-      int toSkip = (int)Math.min(n-nSkipped, skipBuf.length);
-      int ret = read(skipBuf, 0, toSkip);
-      if ( ret <= 0 ) {
-        return nSkipped;
-      }
-      nSkipped += ret;
+    assert skipBuf.length == bytesPerChecksum;
+    assert this.offsetFromChunkBoundary < bytesPerChecksum;
+    int ret = read(skipBuf, 0, this.offsetFromChunkBoundary);
+    if (ret == -1) {  // EOS
+      return toskip;
+    } else {
+      return (toskip + ret);
     }
     }
-    return nSkipped;
   }
   }
 
 
   @Override
   @Override

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -524,11 +524,12 @@ public class DFSClient implements java.io.Closeable {
   private static boolean isLocalAddress(InetSocketAddress targetAddr) {
   private static boolean isLocalAddress(InetSocketAddress targetAddr) {
     InetAddress addr = targetAddr.getAddress();
     InetAddress addr = targetAddr.getAddress();
     Boolean cached = localAddrMap.get(addr.getHostAddress());
     Boolean cached = localAddrMap.get(addr.getHostAddress());
-    if (cached != null && cached) {
+    if (cached != null) {
       if (LOG.isTraceEnabled()) {
       if (LOG.isTraceEnabled()) {
-        LOG.trace("Address " + targetAddr + " is local");
+        LOG.trace("Address " + targetAddr +
+                  (cached ? " is local" : " is not local"));
       }
       }
-      return true;
+      return cached;
     }
     }
 
 
     // Check if the address is any local or loop back
     // Check if the address is any local or loop back
@@ -543,7 +544,8 @@ public class DFSClient implements java.io.Closeable {
       }
       }
     }
     }
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
-      LOG.trace("Address " + targetAddr + " is local");
+      LOG.trace("Address " + targetAddr +
+                (local ? " is local" : " is not local"));
     }
     }
     localAddrMap.put(addr.getHostAddress(), local);
     localAddrMap.put(addr.getHostAddress(), local);
     return local;
     return local;

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -86,6 +86,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
   public static final int     DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
+  public static final String  DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
+  public static final int     DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
   public static final String  DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
   public static final String  DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
   public static final long    DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
   public static final long    DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
   public static final String  DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
   public static final String  DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
@@ -165,7 +167,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
   public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
   public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
-  public static final String  DFS_DATANODE_STORAGEID_KEY = "dfs.datanode.StorageId";
   public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
   public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
   public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
   public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
   public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
   public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
@@ -209,10 +210,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
   public static final int     DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
   public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
   public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
-  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_KEY = "dfs.datanode.simulateddatastorage";
-  public static final boolean DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT = false;
-  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY = "dfs.datanode.simulateddatastorage.capacity";
-  public static final long    DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT = 2L<<40;
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY = "dfs.datanode.block.volume.choice.policy";
   public static final String  DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY = "dfs.datanode.block.volume.choice.policy";
@@ -280,6 +277,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
 
   //Keys with no defaults
   //Keys with no defaults
   public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
   public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
+  public static final String  DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory";
   public static final String  DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
   public static final String  DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
   public static final String  DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
   public static final String  DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
   public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
   public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -81,13 +81,13 @@ public class HdfsConfiguration extends Configuration {
     deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
     deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
     deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
     deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
     deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
     deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
     deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
     deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
     deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
     deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
     deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
     deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
     deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
-    deprecate("StorageId", DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY);
     deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
     deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
     deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
     deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
     deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
     deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java

@@ -145,7 +145,7 @@ public class ExtendedBlock implements Writable {
       return false;
       return false;
     }
     }
     ExtendedBlock b = (ExtendedBlock)o;
     ExtendedBlock b = (ExtendedBlock)o;
-    return b.block.equals(block) || b.poolId.equals(poolId);
+    return b.block.equals(block) && b.poolId.equals(poolId);
   }
   }
   
   
   @Override // Object
   @Override // Object

+ 16 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -122,6 +122,10 @@ class NameNodeConnector {
     if (!isBlockTokenEnabled) {
     if (!isBlockTokenEnabled) {
       return BlockTokenSecretManager.DUMMY_TOKEN;
       return BlockTokenSecretManager.DUMMY_TOKEN;
     } else {
     } else {
+      if (!shouldRun) {
+        throw new IOException(
+            "Can not get access token. BlockKeyUpdater is not running");
+      }
       return blockTokenSecretManager.generateToken(null, eb,
       return blockTokenSecretManager.generateToken(null, eb,
           EnumSet.of(BlockTokenSecretManager.AccessMode.REPLACE,
           EnumSet.of(BlockTokenSecretManager.AccessMode.REPLACE,
           BlockTokenSecretManager.AccessMode.COPY));
           BlockTokenSecretManager.AccessMode.COPY));
@@ -217,16 +221,20 @@ class NameNodeConnector {
    */
    */
   class BlockKeyUpdater implements Runnable {
   class BlockKeyUpdater implements Runnable {
     public void run() {
     public void run() {
-      while (shouldRun) {
-        try {
-          blockTokenSecretManager.setKeys(namenode.getBlockKeys());
-        } catch (Exception e) {
-          LOG.error("Failed to set keys", e);
-        }
-        try {
+      try {
+        while (shouldRun) {
+          try {
+            blockTokenSecretManager.setKeys(namenode.getBlockKeys());
+          } catch (IOException e) {
+            LOG.error("Failed to set keys", e);
+          }
           Thread.sleep(keyUpdaterInterval);
           Thread.sleep(keyUpdaterInterval);
-        } catch (InterruptedException ie) {
         }
         }
+      } catch (InterruptedException e) {
+        LOG.info("InterruptedException in block key updater thread", e);
+      } catch (Throwable e) {
+        LOG.error("Exception in block key updater thread", e);
+        shouldRun = false;
       }
       }
     }
     }
   }
   }

+ 71 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -805,9 +805,11 @@ public class BlockManager {
    * Mark the block belonging to datanode as corrupt
    * Mark the block belonging to datanode as corrupt
    * @param blk Block to be marked as corrupt
    * @param blk Block to be marked as corrupt
    * @param dn Datanode which holds the corrupt replica
    * @param dn Datanode which holds the corrupt replica
+   * @param reason a textual reason why the block should be marked corrupt,
+   * for logging purposes
    */
    */
   public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
   public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
-      final DatanodeInfo dn) throws IOException {
+      final DatanodeInfo dn, String reason) throws IOException {
     namesystem.writeLock();
     namesystem.writeLock();
     try {
     try {
       final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock());
       final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock());
@@ -820,14 +822,15 @@ public class BlockManager {
             + blk + " not found.");
             + blk + " not found.");
         return;
         return;
       }
       }
-      markBlockAsCorrupt(storedBlock, dn);
+      markBlockAsCorrupt(storedBlock, dn, reason);
     } finally {
     } finally {
       namesystem.writeUnlock();
       namesystem.writeUnlock();
     }
     }
   }
   }
 
 
   private void markBlockAsCorrupt(BlockInfo storedBlock,
   private void markBlockAsCorrupt(BlockInfo storedBlock,
-                                  DatanodeInfo dn) throws IOException {
+                                  DatanodeInfo dn,
+                                  String reason) throws IOException {
     assert storedBlock != null : "storedBlock should not be null";
     assert storedBlock != null : "storedBlock should not be null";
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
     if (node == null) {
@@ -851,7 +854,7 @@ public class BlockManager {
     node.addBlock(storedBlock);
     node.addBlock(storedBlock);
 
 
     // Add this replica to corruptReplicas Map
     // Add this replica to corruptReplicas Map
-    corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
+    corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
     if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
     if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
       // the block is over-replicated so invalidate the replicas immediately
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(storedBlock, node);
       invalidateBlock(storedBlock, node);
@@ -1313,6 +1316,21 @@ public class BlockManager {
       this.reportedState = reportedState;
       this.reportedState = reportedState;
     }
     }
   }
   }
+  
+  /**
+   * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a
+   * list of blocks that should be considered corrupt due to a block report.
+   */
+  private static class BlockToMarkCorrupt {
+    final BlockInfo blockInfo;
+    final String reason;
+    
+    BlockToMarkCorrupt(BlockInfo blockInfo, String reason) {
+      super();
+      this.blockInfo = blockInfo;
+      this.reason = reason;
+    }
+  }
 
 
   /**
   /**
    * The given datanode is reporting all its blocks.
    * The given datanode is reporting all its blocks.
@@ -1367,7 +1385,7 @@ public class BlockManager {
     Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
     Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
     Collection<Block> toRemove = new LinkedList<Block>();
     Collection<Block> toRemove = new LinkedList<Block>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
-    Collection<BlockInfo> toCorrupt = new LinkedList<BlockInfo>();
+    Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC);
     reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC);
 
 
@@ -1387,8 +1405,8 @@ public class BlockManager {
           + " does not belong to any file.");
           + " does not belong to any file.");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
-    for (BlockInfo b : toCorrupt) {
-      markBlockAsCorrupt(b, node);
+    for (BlockToMarkCorrupt b : toCorrupt) {
+      markBlockAsCorrupt(b.blockInfo, node, b.reason);
     }
     }
   }
   }
 
 
@@ -1419,8 +1437,10 @@ public class BlockManager {
       
       
       // If block is corrupt, mark it and continue to next block.
       // If block is corrupt, mark it and continue to next block.
       BlockUCState ucState = storedBlock.getBlockUCState();
       BlockUCState ucState = storedBlock.getBlockUCState();
-      if (isReplicaCorrupt(iblk, reportedState, storedBlock, ucState, node)) {
-        markBlockAsCorrupt(storedBlock, node);
+      BlockToMarkCorrupt c = checkReplicaCorrupt(
+          iblk, reportedState, storedBlock, ucState, node);
+      if (c != null) {
+        markBlockAsCorrupt(c.blockInfo, node, c.reason);
         continue;
         continue;
       }
       }
       
       
@@ -1442,7 +1462,7 @@ public class BlockManager {
       Collection<BlockInfo> toAdd,              // add to DatanodeDescriptor
       Collection<BlockInfo> toAdd,              // add to DatanodeDescriptor
       Collection<Block> toRemove,           // remove from DatanodeDescriptor
       Collection<Block> toRemove,           // remove from DatanodeDescriptor
       Collection<Block> toInvalidate,       // should be removed from DN
       Collection<Block> toInvalidate,       // should be removed from DN
-      Collection<BlockInfo> toCorrupt,      // add to corrupt replicas list
+      Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
       Collection<StatefulBlockInfo> toUC) { // add to under-construction list
       Collection<StatefulBlockInfo> toUC) { // add to under-construction list
     // place a delimiter in the list which separates blocks 
     // place a delimiter in the list which separates blocks 
     // that have been reported from those that have not
     // that have been reported from those that have not
@@ -1505,7 +1525,7 @@ public class BlockManager {
       final Block block, final ReplicaState reportedState, 
       final Block block, final ReplicaState reportedState, 
       final Collection<BlockInfo> toAdd, 
       final Collection<BlockInfo> toAdd, 
       final Collection<Block> toInvalidate, 
       final Collection<Block> toInvalidate, 
-      final Collection<BlockInfo> toCorrupt,
+      final Collection<BlockToMarkCorrupt> toCorrupt,
       final Collection<StatefulBlockInfo> toUC) {
       final Collection<StatefulBlockInfo> toUC) {
     
     
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
@@ -1536,8 +1556,10 @@ public class BlockManager {
       return storedBlock;
       return storedBlock;
     }
     }
 
 
-    if (isReplicaCorrupt(block, reportedState, storedBlock, ucState, dn)) {
-      toCorrupt.add(storedBlock);
+    BlockToMarkCorrupt c = checkReplicaCorrupt(
+        block, reportedState, storedBlock, ucState, dn);
+    if (c != null) {
+      toCorrupt.add(c);
       return storedBlock;
       return storedBlock;
     }
     }
 
 
@@ -1561,8 +1583,11 @@ public class BlockManager {
    * as switch statements, on the theory that it is easier to understand
    * as switch statements, on the theory that it is easier to understand
    * the combinatorics of reportedState and ucState that way.  It should be
    * the combinatorics of reportedState and ucState that way.  It should be
    * at least as efficient as boolean expressions.
    * at least as efficient as boolean expressions.
+   * 
+   * @return a BlockToMarkCorrupt object, or null if the replica is not corrupt
    */
    */
-  private boolean isReplicaCorrupt(Block iblk, ReplicaState reportedState, 
+  private BlockToMarkCorrupt checkReplicaCorrupt(
+      Block iblk, ReplicaState reportedState, 
       BlockInfo storedBlock, BlockUCState ucState, 
       BlockInfo storedBlock, BlockUCState ucState, 
       DatanodeDescriptor dn) {
       DatanodeDescriptor dn) {
     switch(reportedState) {
     switch(reportedState) {
@@ -1570,17 +1595,31 @@ public class BlockManager {
       switch(ucState) {
       switch(ucState) {
       case COMPLETE:
       case COMPLETE:
       case COMMITTED:
       case COMMITTED:
-        return (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()
-            || storedBlock.getNumBytes() != iblk.getNumBytes());
+        if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
+          return new BlockToMarkCorrupt(storedBlock,
+              "block is " + ucState + " and reported genstamp " +
+              iblk.getGenerationStamp() + " does not match " +
+              "genstamp in block map " + storedBlock.getGenerationStamp());
+        } else if (storedBlock.getNumBytes() != iblk.getNumBytes()) {
+          return new BlockToMarkCorrupt(storedBlock,
+              "block is " + ucState + " and reported length " +
+              iblk.getNumBytes() + " does not match " +
+              "length in block map " + storedBlock.getNumBytes());
+        } else {
+          return null; // not corrupt
+        }
       default:
       default:
-        return false;
+        return null;
       }
       }
     case RBW:
     case RBW:
     case RWR:
     case RWR:
       if (!storedBlock.isComplete()) {
       if (!storedBlock.isComplete()) {
-        return false;
+        return null; // not corrupt
       } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
       } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
-        return true;
+        return new BlockToMarkCorrupt(storedBlock,
+            "reported " + reportedState + " replica with genstamp " +
+            iblk.getGenerationStamp() + " does not match COMPLETE block's " +
+            "genstamp in block map " + storedBlock.getGenerationStamp());
       } else { // COMPLETE block, same genstamp
       } else { // COMPLETE block, same genstamp
         if (reportedState == ReplicaState.RBW) {
         if (reportedState == ReplicaState.RBW) {
           // If it's a RBW report for a COMPLETE block, it may just be that
           // If it's a RBW report for a COMPLETE block, it may just be that
@@ -1590,18 +1629,22 @@ public class BlockManager {
           LOG.info("Received an RBW replica for block " + storedBlock +
           LOG.info("Received an RBW replica for block " + storedBlock +
               " on " + dn.getName() + ": ignoring it, since the block is " +
               " on " + dn.getName() + ": ignoring it, since the block is " +
               "complete with the same generation stamp.");
               "complete with the same generation stamp.");
-          return false;
+          return null;
         } else {
         } else {
-          return true;
+          return new BlockToMarkCorrupt(storedBlock,
+              "reported replica has invalid state " + reportedState);
         }
         }
       }
       }
     case RUR:       // should not be reported
     case RUR:       // should not be reported
     case TEMPORARY: // should not be reported
     case TEMPORARY: // should not be reported
     default:
     default:
-      LOG.warn("Unexpected replica state " + reportedState
-          + " for block: " + storedBlock + 
-          " on " + dn.getName() + " size " + storedBlock.getNumBytes());
-      return true;
+      String msg = "Unexpected replica state " + reportedState
+      + " for block: " + storedBlock + 
+      " on " + dn.getName() + " size " + storedBlock.getNumBytes();
+      // log here at WARN level since this is really a broken HDFS
+      // invariant
+      LOG.warn(msg);
+      return new BlockToMarkCorrupt(storedBlock, msg);
     }
     }
   }
   }
 
 
@@ -2132,7 +2175,7 @@ public class BlockManager {
     // blockReceived reports a finalized block
     // blockReceived reports a finalized block
     Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
     Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
-    Collection<BlockInfo> toCorrupt = new LinkedList<BlockInfo>();
+    Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     processReportedBlock(node, block, ReplicaState.FINALIZED,
     processReportedBlock(node, block, ReplicaState.FINALIZED,
                               toAdd, toInvalidate, toCorrupt, toUC);
                               toAdd, toInvalidate, toCorrupt, toUC);
@@ -2153,8 +2196,8 @@ public class BlockManager {
           + " does not belong to any file.");
           + " does not belong to any file.");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
-    for (BlockInfo b : toCorrupt) {
-      markBlockAsCorrupt(b, node);
+    for (BlockToMarkCorrupt b : toCorrupt) {
+      markBlockAsCorrupt(b.blockInfo, node, b.reason);
     }
     }
   }
   }
 
 

+ 15 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java

@@ -44,25 +44,37 @@ public class CorruptReplicasMap{
    *
    *
    * @param blk Block to be added to CorruptReplicasMap
    * @param blk Block to be added to CorruptReplicasMap
    * @param dn DatanodeDescriptor which holds the corrupt replica
    * @param dn DatanodeDescriptor which holds the corrupt replica
+   * @param reason a textual reason (for logging purposes)
    */
    */
-  public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn) {
+  public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
+      String reason) {
     Collection<DatanodeDescriptor> nodes = getNodes(blk);
     Collection<DatanodeDescriptor> nodes = getNodes(blk);
     if (nodes == null) {
     if (nodes == null) {
       nodes = new TreeSet<DatanodeDescriptor>();
       nodes = new TreeSet<DatanodeDescriptor>();
       corruptReplicasMap.put(blk, nodes);
       corruptReplicasMap.put(blk, nodes);
     }
     }
+    
+    String reasonText;
+    if (reason != null) {
+      reasonText = " because " + reason;
+    } else {
+      reasonText = "";
+    }
+    
     if (!nodes.contains(dn)) {
     if (!nodes.contains(dn)) {
       nodes.add(dn);
       nodes.add(dn);
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    blk.getBlockName() +
                                    blk.getBlockName() +
                                    " added as corrupt on " + dn.getName() +
                                    " added as corrupt on " + dn.getName() +
-                                   " by " + Server.getRemoteIp());
+                                   " by " + Server.getRemoteIp() +
+                                   reasonText);
     } else {
     } else {
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    "duplicate requested for " + 
                                    "duplicate requested for " + 
                                    blk.getBlockName() + " to add as corrupt " +
                                    blk.getBlockName() + " to add as corrupt " +
                                    "on " + dn.getName() +
                                    "on " + dn.getName() +
-                                   " by " + Server.getRemoteIp());
+                                   " by " + Server.getRemoteIp() +
+                                   reasonText);
     }
     }
   }
   }
 
 

+ 25 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -46,17 +46,13 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 /**
 /**
- * Performs two types of scanning:
- * <li> Gets block files from the data directories and reconciles the
- * difference between the blocks on the disk and in memory in
- * {@link FSDataset}</li>
- * <li> Scans the data directories for block files under a block pool
- * and verifies that the files are not corrupt</li>
+ * Scans the block files under a block pool and verifies that the
+ * files are not corrupt.
  * This keeps track of blocks and their last verification times.
  * This keeps track of blocks and their last verification times.
  * Currently it does not modify the metadata for block.
  * Currently it does not modify the metadata for block.
  */
  */
@@ -78,7 +74,7 @@ class BlockPoolSliceScanner {
 
 
   private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
   private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
   private DataNode datanode;
   private DataNode datanode;
-  private FSDataset dataset;
+  private final FSDatasetInterface dataset;
   
   
   // sorted set
   // sorted set
   private TreeSet<BlockScanInfo> blockInfoSet;
   private TreeSet<BlockScanInfo> blockInfoSet;
@@ -137,8 +133,8 @@ class BlockPoolSliceScanner {
     }
     }
   }
   }
   
   
-  BlockPoolSliceScanner(DataNode datanode, FSDataset dataset, Configuration conf,
-      String bpid) {
+  BlockPoolSliceScanner(DataNode datanode, FSDatasetInterface dataset,
+      Configuration conf, String bpid) {
     this.datanode = datanode;
     this.datanode = datanode;
     this.dataset = dataset;
     this.dataset = dataset;
     this.blockPoolId  = bpid;
     this.blockPoolId  = bpid;
@@ -220,16 +216,16 @@ class BlockPoolSliceScanner {
      * otherwise, pick the first directory.
      * otherwise, pick the first directory.
      */
      */
     File dir = null;
     File dir = null;
-    List<FSVolume> volumes = dataset.volumes.getVolumes();
-    for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
-      File bpDir = vol.getBlockPoolSlice(blockPoolId).getDirectory();
+    List<FSVolumeInterface> volumes = dataset.getVolumes();
+    for (FSVolumeInterface vol : volumes) {
+      File bpDir = vol.getDirectory(blockPoolId);
       if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
       if (LogFileHandler.isFilePresent(bpDir, verificationLogFile)) {
         dir = bpDir;
         dir = bpDir;
         break;
         break;
       }
       }
     }
     }
     if (dir == null) {
     if (dir == null) {
-      dir = volumes.get(0).getBlockPoolSlice(blockPoolId).getDirectory();
+      dir = volumes.get(0).getDirectory(blockPoolId);
     }
     }
     
     
     try {
     try {
@@ -431,6 +427,19 @@ class BlockPoolSliceScanner {
           return;
           return;
         }
         }
 
 
+        // If the block exists, the exception may due to a race with write:
+        // The BlockSender got an old block path in rbw. BlockReceiver removed
+        // the rbw block from rbw to finalized but BlockSender tried to open the
+        // file before BlockReceiver updated the VolumeMap. The state of the
+        // block can be changed again now, so ignore this error here. If there
+        // is a block really deleted by mistake, DirectoryScan should catch it.
+        if (e instanceof FileNotFoundException ) {
+          LOG.info("Verification failed for " + block +
+              ". It may be due to race with write.");
+          deleteBlock(block.getLocalBlock());
+          return;
+        }
+
         LOG.warn((second ? "Second " : "First ") + "Verification failed for "
         LOG.warn((second ? "Second " : "First ") + "Verification failed for "
             + block, e);
             + block, e);
         
         
@@ -577,8 +586,8 @@ class BlockPoolSliceScanner {
     bytesLeft += len;
     bytesLeft += len;
   }
   }
 
 
-  static File getCurrentFile(FSVolume vol, String bpid) throws IOException {
-    return LogFileHandler.getCurrentFile(vol.getBlockPoolSlice(bpid).getDirectory(),
+  static File getCurrentFile(FSVolumeInterface vol, String bpid) throws IOException {
+    return LogFileHandler.getCurrentFile(vol.getDirectory(bpid),
         BlockPoolSliceScanner.verificationLogFile);
         BlockPoolSliceScanner.verificationLogFile);
   }
   }
   
   

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java

@@ -22,16 +22,18 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 
 /**************************************************
 /**************************************************
  * BlockVolumeChoosingPolicy allows a DataNode to
  * BlockVolumeChoosingPolicy allows a DataNode to
  * specify what policy is to be used while choosing
  * specify what policy is to be used while choosing
  * a volume for a block request.
  * a volume for a block request.
- * 
+ *
+ * Note: This is an evolving i/f and is only for
+ * advanced use.
+ *
  ***************************************************/
  ***************************************************/
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceAudience.Private
 public interface BlockVolumeChoosingPolicy {
 public interface BlockVolumeChoosingPolicy {
 
 
   /**
   /**
@@ -46,7 +48,7 @@ public interface BlockVolumeChoosingPolicy {
    * @return the chosen volume to store the block.
    * @return the chosen volume to store the block.
    * @throws IOException when disks are unavailable or are full.
    * @throws IOException when disks are unavailable or are full.
    */
    */
-  public FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
+  public FSVolumeInterface chooseVolume(List<FSVolumeInterface> volumes, long blockSize)
     throws IOException;
     throws IOException;
 
 
 }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java

@@ -27,12 +27,12 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 
 /**
 /**
  * DataBlockScanner manages block scanning for all the block pools. For each
  * DataBlockScanner manages block scanning for all the block pools. For each
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
 public class DataBlockScanner implements Runnable {
 public class DataBlockScanner implements Runnable {
   public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
   public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
   private final DataNode datanode;
   private final DataNode datanode;
-  private final FSDataset dataset;
+  private final FSDatasetInterface dataset;
   private final Configuration conf;
   private final Configuration conf;
   
   
   /**
   /**
@@ -55,7 +55,7 @@ public class DataBlockScanner implements Runnable {
     new TreeMap<String, BlockPoolSliceScanner>();
     new TreeMap<String, BlockPoolSliceScanner>();
   Thread blockScannerThread = null;
   Thread blockScannerThread = null;
   
   
-  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
+  DataBlockScanner(DataNode datanode, FSDatasetInterface dataset, Configuration conf) {
     this.datanode = datanode;
     this.datanode = datanode;
     this.dataset = dataset;
     this.dataset = dataset;
     this.conf = conf;
     this.conf = conf;
@@ -135,7 +135,7 @@ public class DataBlockScanner implements Runnable {
               .iterator();
               .iterator();
           while (bpidIterator.hasNext()) {
           while (bpidIterator.hasNext()) {
             String bpid = bpidIterator.next();
             String bpid = bpidIterator.next();
-            for (FSDataset.FSVolume vol : dataset.volumes.getVolumes()) {
+            for (FSDatasetInterface.FSVolumeInterface vol : dataset.getVolumes()) {
               try {
               try {
                 File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
                 File currFile = BlockPoolSliceScanner.getCurrentFile(vol, bpid);
                 if (currFile.exists()) {
                 if (currFile.exists()) {

+ 41 - 87
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -43,10 +43,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
 
 
@@ -117,7 +114,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -153,13 +149,11 @@ import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
-import com.google.common.base.Preconditions;
 
 
 
 
 /**********************************************************
 /**********************************************************
@@ -428,13 +422,14 @@ public class DataNode extends Configured
     }
     }
   }
   }
 
 
-  private synchronized void setClusterId(String cid) throws IOException {
-    if(clusterId != null && !clusterId.equals(cid)) {
-      throw new IOException ("cluster id doesn't match. old cid=" + clusterId 
-          + " new cid="+ cid);
+  private synchronized void setClusterId(final String nsCid, final String bpid
+      ) throws IOException {
+    if(clusterId != null && !clusterId.equals(nsCid)) {
+      throw new IOException ("Cluster IDs not matched: dn cid=" + clusterId 
+          + " but ns cid="+ nsCid + "; bpid=" + bpid);
     }
     }
     // else
     // else
-    clusterId = cid;    
+    clusterId = nsCid;
   }
   }
 
 
   private static String getHostName(Configuration config)
   private static String getHostName(Configuration config)
@@ -556,11 +551,11 @@ public class DataNode extends Configured
     if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
     if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
                     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
                     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
       reason = "verification is turned off by configuration";
       reason = "verification is turned off by configuration";
-    } else if (!(data instanceof FSDataset)) {
-      reason = "verifcation is supported only with FSDataset";
+    } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+      reason = "verifcation is not supported by SimulatedFSDataset";
     } 
     } 
     if (reason == null) {
     if (reason == null) {
-      blockScanner = new DataBlockScanner(this, (FSDataset)data, conf);
+      blockScanner = new DataBlockScanner(this, data, conf);
       blockScanner.start();
       blockScanner.start();
     } else {
     } else {
       LOG.info("Periodic Block Verification scan is disabled because " +
       LOG.info("Periodic Block Verification scan is disabled because " +
@@ -585,11 +580,11 @@ public class DataNode extends Configured
     if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 
     if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 
                     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
                     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
       reason = "verification is turned off by configuration";
       reason = "verification is turned off by configuration";
-    } else if (!(data instanceof FSDataset)) {
-      reason = "verification is supported only with FSDataset";
+    } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
+      reason = "verifcation is not supported by SimulatedFSDataset";
     } 
     } 
     if (reason == null) {
     if (reason == null) {
-      directoryScanner = new DirectoryScanner(this, (FSDataset) data, conf);
+      directoryScanner = new DirectoryScanner(this, data, conf);
       directoryScanner.start();
       directoryScanner.start();
     } else {
     } else {
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -811,51 +806,22 @@ public class DataNode extends Configured
    */
    */
   void initBlockPool(BPOfferService bpos) throws IOException {
   void initBlockPool(BPOfferService bpos) throws IOException {
     NamespaceInfo nsInfo = bpos.getNamespaceInfo();
     NamespaceInfo nsInfo = bpos.getNamespaceInfo();
-    Preconditions.checkState(nsInfo != null,
-        "Block pool " + bpos + " should have retrieved " +
-        "its namespace info before calling initBlockPool.");
+    if (nsInfo == null) {
+      throw new IOException("NamespaceInfo not found: Block pool " + bpos
+          + " should have retrieved namespace info before initBlockPool.");
+    }
     
     
-    String blockPoolId = nsInfo.getBlockPoolID();
-
     // Register the new block pool with the BP manager.
     // Register the new block pool with the BP manager.
     blockPoolManager.addBlockPool(bpos);
     blockPoolManager.addBlockPool(bpos);
 
 
-    synchronized (this) {
-      // we do not allow namenode from different cluster to register
-      if(clusterId != null && !clusterId.equals(nsInfo.clusterID)) {
-        throw new IOException(
-            "cannot register with the namenode because clusterid do not match:"
-            + " nn=" + nsInfo.getBlockPoolID() + "; nn cid=" + nsInfo.clusterID + 
-            ";dn cid=" + clusterId);
-      }
-
-      setClusterId(nsInfo.clusterID);
-    }
-    
-    StartupOption startOpt = getStartupOption(conf);
-    assert startOpt != null : "Startup option must be set.";
-
-    boolean simulatedFSDataset = conf.getBoolean(
-        DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
-        DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
-    
-    if (!simulatedFSDataset) {
-      // read storage info, lock data dirs and transition fs state if necessary          
-      storage.recoverTransitionRead(DataNode.this, blockPoolId, nsInfo,
-          dataDirs, startOpt);
-      StorageInfo bpStorage = storage.getBPStorage(blockPoolId);
-      LOG.info("setting up storage: nsid=" +
-          bpStorage.getNamespaceID() + ";bpid="
-          + blockPoolId + ";lv=" + storage.getLayoutVersion() +
-          ";nsInfo=" + nsInfo);
-    }
+    setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
     
     
     // In the case that this is the first block pool to connect, initialize
     // In the case that this is the first block pool to connect, initialize
     // the dataset, block scanners, etc.
     // the dataset, block scanners, etc.
-    initFsDataSet();
+    initStorage(nsInfo);
     initPeriodicScanners(conf);
     initPeriodicScanners(conf);
     
     
-    data.addBlockPool(blockPoolId, conf);
+    data.addBlockPool(nsInfo.getBlockPoolID(), conf);
   }
   }
 
 
   /**
   /**
@@ -882,31 +848,28 @@ public class DataNode extends Configured
    * Initializes the {@link #data}. The initialization is done only once, when
    * Initializes the {@link #data}. The initialization is done only once, when
    * handshake with the the first namenode is completed.
    * handshake with the the first namenode is completed.
    */
    */
-  private synchronized void initFsDataSet() throws IOException {
-    if (data != null) { // Already initialized
-      return;
-    }
-
-    // get version and id info from the name-node
-    boolean simulatedFSDataset = conf.getBoolean(
-        DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
-        DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
-
-    if (simulatedFSDataset) {
-      storage.createStorageID(getPort());
-      // it would have been better to pass storage as a parameter to
-      // constructor below - need to augment ReflectionUtils used below.
-      conf.set(DFS_DATANODE_STORAGEID_KEY, getStorageId());
-      try {
-        data = (FSDatasetInterface) ReflectionUtils.newInstance(
-            Class.forName(
-            "org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"),
-            conf);
-      } catch (ClassNotFoundException e) {
-        throw new IOException(StringUtils.stringifyException(e));
+  private void initStorage(final NamespaceInfo nsInfo) throws IOException {
+    final FSDatasetInterface.Factory factory
+        = FSDatasetInterface.Factory.getFactory(conf);
+    
+    if (!factory.isSimulated()) {
+      final StartupOption startOpt = getStartupOption(conf);
+      if (startOpt == null) {
+        throw new IOException("Startup option not set.");
+      }
+      final String bpid = nsInfo.getBlockPoolID();
+      //read storage info, lock data dirs and transition fs state if necessary
+      storage.recoverTransitionRead(this, bpid, nsInfo, dataDirs, startOpt);
+      final StorageInfo bpStorage = storage.getBPStorage(bpid);
+      LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
+          + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
+          + ";nsInfo=" + nsInfo);
+    }
+
+    synchronized(this)  {
+      if (data == null) {
+        data = factory.createFSDatasetInterface(this, storage, conf);
       }
       }
-    } else {
-      data = new FSDataset(this, storage, conf);
     }
     }
   }
   }
 
 
@@ -2200,16 +2163,7 @@ public class DataNode extends Configured
    */
    */
   @Override // DataNodeMXBean
   @Override // DataNodeMXBean
   public String getVolumeInfo() {
   public String getVolumeInfo() {
-    final Map<String, Object> info = new HashMap<String, Object>();
-    Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
-    for (VolumeInfo v : volumes) {
-      final Map<String, Object> innerInfo = new HashMap<String, Object>();
-      innerInfo.put("usedSpace", v.usedSpace);
-      innerInfo.put("freeSpace", v.freeSpace);
-      innerInfo.put("reservedSpace", v.reservedSpace);
-      info.put(v.directory, innerInfo);
-    }
-    return JSON.toString(info);
+    return JSON.toString(data.getVolumeInfoMap());
   }
   }
   
   
   @Override // DataNodeMXBean
   @Override // DataNodeMXBean

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -751,7 +751,7 @@ public class DataStorage extends Storage {
     Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); 
     Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); 
     if (matcher.matches()) {
     if (matcher.matches()) {
       //return the current metadata file name
       //return the current metadata file name
-      return FSDataset.getMetaFileName(matcher.group(1),
+      return DatanodeUtil.getMetaFileName(matcher.group(1),
           GenerationStamp.GRANDFATHER_GENERATION_STAMP); 
           GenerationStamp.GRANDFATHER_GENERATION_STAMP); 
     }
     }
     return oldFileName;
     return oldFileName;

+ 39 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java

@@ -18,7 +18,9 @@
 package org.apache.hadoop.hdfs.server.datanode;
 package org.apache.hadoop.hdfs.server.datanode;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -26,6 +28,10 @@ import org.apache.hadoop.hdfs.protocol.Block;
 /** Provide utility methods for Datanode. */
 /** Provide utility methods for Datanode. */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 class DatanodeUtil {
 class DatanodeUtil {
+  static final String METADATA_EXTENSION = ".meta";
+
+  static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
+
   private final static String DISK_ERROR = "Possible disk error on file creation: ";
   private final static String DISK_ERROR = "Possible disk error on file creation: ";
 
 
   /** Get the cause of an I/O exception if caused by a possible disk error
   /** Get the cause of an I/O exception if caused by a possible disk error
@@ -64,4 +70,37 @@ class DatanodeUtil {
     }
     }
     return f;
     return f;
   }
   }
+  
+  static String getMetaFileName(String blockFileName, long genStamp) {
+    return blockFileName + "_" + genStamp + METADATA_EXTENSION;
+  }
+  
+  static File getMetaFile(File f, long genStamp) {
+    return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+  }
+
+  /** Find the corresponding meta data file from a given block file */
+  static File findMetaFile(final File blockFile) throws IOException {
+    final String prefix = blockFile.getName() + "_";
+    final File parent = blockFile.getParentFile();
+    File[] matches = parent.listFiles(new FilenameFilter() {
+      public boolean accept(File dir, String name) {
+        return dir.equals(parent)
+            && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
+      }
+    });
+
+    if (matches == null || matches.length == 0) {
+      throw new IOException("Meta file not found, blockFile=" + blockFile);
+    }
+    else if (matches.length > 1) {
+      throw new IOException("Found more than one meta files: " 
+          + Arrays.asList(matches));
+    }
+    return matches[0];
+  }
+  
+  static File getUnlinkTmpFile(File f) {
+    return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+  }
 }
 }

+ 28 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java

@@ -43,20 +43,19 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 
 
 /**
 /**
  * Periodically scans the data directories for block and block metadata files.
  * Periodically scans the data directories for block and block metadata files.
- * Reconciles the differences with block information maintained in
- * {@link FSDataset}
+ * Reconciles the differences with block information maintained in the dataset.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class DirectoryScanner implements Runnable {
 public class DirectoryScanner implements Runnable {
   private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
   private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
 
 
   private final DataNode datanode;
   private final DataNode datanode;
-  private final FSDataset dataset;
+  private final FSDatasetInterface dataset;
   private final ExecutorService reportCompileThreadPool;
   private final ExecutorService reportCompileThreadPool;
   private final ScheduledExecutorService masterThread;
   private final ScheduledExecutorService masterThread;
   private final long scanPeriodMsecs;
   private final long scanPeriodMsecs;
@@ -158,13 +157,13 @@ public class DirectoryScanner implements Runnable {
     private final long blockId;
     private final long blockId;
     private final File metaFile;
     private final File metaFile;
     private final File blockFile;
     private final File blockFile;
-    private final FSVolume volume;
+    private final FSVolumeInterface volume;
 
 
     ScanInfo(long blockId) {
     ScanInfo(long blockId) {
       this(blockId, null, null, null);
       this(blockId, null, null, null);
     }
     }
 
 
-    ScanInfo(long blockId, File blockFile, File metaFile, FSVolume vol) {
+    ScanInfo(long blockId, File blockFile, File metaFile, FSVolumeInterface vol) {
       this.blockId = blockId;
       this.blockId = blockId;
       this.metaFile = metaFile;
       this.metaFile = metaFile;
       this.blockFile = blockFile;
       this.blockFile = blockFile;
@@ -183,7 +182,7 @@ public class DirectoryScanner implements Runnable {
       return blockId;
       return blockId;
     }
     }
 
 
-    FSVolume getVolume() {
+    FSVolumeInterface getVolume() {
       return volume;
       return volume;
     }
     }
 
 
@@ -220,7 +219,7 @@ public class DirectoryScanner implements Runnable {
     }
     }
   }
   }
 
 
-  DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
+  DirectoryScanner(DataNode dn, FSDatasetInterface dataset, Configuration conf) {
     this.datanode = dn;
     this.datanode = dn;
     this.dataset = dataset;
     this.dataset = dataset;
     int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
     int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -269,7 +268,7 @@ public class DirectoryScanner implements Runnable {
         return;
         return;
       }
       }
 
 
-      String[] bpids = dataset.getBPIdlist();
+      String[] bpids = dataset.getBlockPoolList();
       for(String bpid : bpids) {
       for(String bpid : bpids) {
         UpgradeManagerDatanode um = 
         UpgradeManagerDatanode um = 
           datanode.getUpgradeManagerDatanode(bpid);
           datanode.getUpgradeManagerDatanode(bpid);
@@ -411,17 +410,29 @@ public class DirectoryScanner implements Runnable {
     diffRecord.add(new ScanInfo(blockId));
     diffRecord.add(new ScanInfo(blockId));
   }
   }
 
 
+  /** Is the given volume still valid in the dataset? */
+  private static boolean isValid(final FSDatasetInterface dataset,
+      final FSVolumeInterface volume) {
+    for (FSVolumeInterface vol : dataset.getVolumes()) {
+      if (vol == volume) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /** Get lists of blocks on the disk sorted by blockId, per blockpool */
   /** Get lists of blocks on the disk sorted by blockId, per blockpool */
   private Map<String, ScanInfo[]> getDiskReport() {
   private Map<String, ScanInfo[]> getDiskReport() {
     // First get list of data directories
     // First get list of data directories
-    List<FSVolume> volumes = dataset.volumes.getVolumes();
+    final List<FSVolumeInterface> volumes = dataset.getVolumes();
     ArrayList<ScanInfoPerBlockPool> dirReports =
     ArrayList<ScanInfoPerBlockPool> dirReports =
       new ArrayList<ScanInfoPerBlockPool>(volumes.size());
       new ArrayList<ScanInfoPerBlockPool>(volumes.size());
     
     
     Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
     Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
       new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
       new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
     for (int i = 0; i < volumes.size(); i++) {
     for (int i = 0; i < volumes.size(); i++) {
-      if (!dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+      if (!isValid(dataset, volumes.get(i))) {
+        // volume is invalid
         dirReports.add(i, null);
         dirReports.add(i, null);
       } else {
       } else {
         ReportCompiler reportCompiler =
         ReportCompiler reportCompiler =
@@ -446,7 +457,8 @@ public class DirectoryScanner implements Runnable {
     // Compile consolidated report for all the volumes
     // Compile consolidated report for all the volumes
     ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
     ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
     for (int i = 0; i < volumes.size(); i++) {
     for (int i = 0; i < volumes.size(); i++) {
-      if (dataset.volumes.isValid(volumes.get(i))) { // volume is still valid
+      if (isValid(dataset, volumes.get(i))) {
+        // volume is still valid
         list.addAll(dirReports.get(i));
         list.addAll(dirReports.get(i));
       }
       }
     }
     }
@@ -461,9 +473,9 @@ public class DirectoryScanner implements Runnable {
 
 
   private static class ReportCompiler 
   private static class ReportCompiler 
   implements Callable<ScanInfoPerBlockPool> {
   implements Callable<ScanInfoPerBlockPool> {
-    private FSVolume volume;
+    private FSVolumeInterface volume;
 
 
-    public ReportCompiler(FSVolume volume) {
+    public ReportCompiler(FSVolumeInterface volume) {
       this.volume = volume;
       this.volume = volume;
     }
     }
 
 
@@ -473,14 +485,14 @@ public class DirectoryScanner implements Runnable {
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
       for (String bpid : bpList) {
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
         LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
-        File bpFinalizedDir = volume.getBlockPoolSlice(bpid).getFinalizedDir();
+        File bpFinalizedDir = volume.getFinalizedDir(bpid);
         result.put(bpid, compileReport(volume, bpFinalizedDir, report));
         result.put(bpid, compileReport(volume, bpFinalizedDir, report));
       }
       }
       return result;
       return result;
     }
     }
 
 
     /** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
     /** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
-    private LinkedList<ScanInfo> compileReport(FSVolume vol, File dir,
+    private LinkedList<ScanInfo> compileReport(FSVolumeInterface vol, File dir,
         LinkedList<ScanInfo> report) {
         LinkedList<ScanInfo> report) {
       File[] files;
       File[] files;
       try {
       try {

+ 134 - 155
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -23,7 +23,6 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.RandomAccessFile;
 import java.io.RandomAccessFile;
@@ -77,19 +76,28 @@ import org.apache.hadoop.util.ReflectionUtils;
  ***************************************************/
  ***************************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 class FSDataset implements FSDatasetInterface {
 class FSDataset implements FSDatasetInterface {
+  /**
+   * A factory for creating FSDataset objects.
+   */
+  static class Factory extends FSDatasetInterface.Factory {
+    @Override
+    public FSDatasetInterface createFSDatasetInterface(DataNode datanode,
+        DataStorage storage, Configuration conf) throws IOException {
+      return new FSDataset(datanode, storage, conf);
+    }
+  }
 
 
   /**
   /**
    * A node type that can be built into a tree reflecting the
    * A node type that can be built into a tree reflecting the
    * hierarchy of blocks on the local disk.
    * hierarchy of blocks on the local disk.
    */
    */
-  class FSDir {
-    File dir;
+  private class FSDir {
+    final File dir;
     int numBlocks = 0;
     int numBlocks = 0;
     FSDir children[];
     FSDir children[];
     int lastChildIdx = 0;
     int lastChildIdx = 0;
-    /**
-     */
-    public FSDir(File dir) 
+
+    private FSDir(File dir) 
       throws IOException {
       throws IOException {
       this.dir = dir;
       this.dir = dir;
       this.children = null;
       this.children = null;
@@ -114,7 +122,7 @@ class FSDataset implements FSDatasetInterface {
       }
       }
     }
     }
         
         
-    public File addBlock(Block b, File src) throws IOException {
+    private File addBlock(Block b, File src) throws IOException {
       //First try without creating subdirectories
       //First try without creating subdirectories
       File file = addBlock(b, src, false, false);          
       File file = addBlock(b, src, false, false);          
       return (file != null) ? file : addBlock(b, src, true, true);
       return (file != null) ? file : addBlock(b, src, true, true);
@@ -162,7 +170,7 @@ class FSDataset implements FSDatasetInterface {
       return children[ lastChildIdx ].addBlock(b, src, true, false); 
       return children[ lastChildIdx ].addBlock(b, src, true, false); 
     }
     }
 
 
-    void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume) 
+    private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume) 
     throws IOException {
     throws IOException {
       if (children != null) {
       if (children != null) {
         for (int i = 0; i < children.length; i++) {
         for (int i = 0; i < children.length; i++) {
@@ -208,7 +216,7 @@ class FSDataset implements FSDatasetInterface {
      * check if a data diretory is healthy
      * check if a data diretory is healthy
      * @throws DiskErrorException
      * @throws DiskErrorException
      */
      */
-    public void checkDirTree() throws DiskErrorException {
+    private void checkDirTree() throws DiskErrorException {
       DiskChecker.checkDir(dir);
       DiskChecker.checkDir(dir);
             
             
       if (children != null) {
       if (children != null) {
@@ -218,7 +226,7 @@ class FSDataset implements FSDatasetInterface {
       }
       }
     }
     }
         
         
-    void clearPath(File f) {
+    private void clearPath(File f) {
       String root = dir.getAbsolutePath();
       String root = dir.getAbsolutePath();
       String dir = f.getAbsolutePath();
       String dir = f.getAbsolutePath();
       if (dir.startsWith(root)) {
       if (dir.startsWith(root)) {
@@ -271,7 +279,8 @@ class FSDataset implements FSDatasetInterface {
       }
       }
       return false;
       return false;
     }
     }
-        
+
+    @Override
     public String toString() {
     public String toString() {
       return "FSDir{" +
       return "FSDir{" +
         "dir=" + dir +
         "dir=" + dir +
@@ -285,7 +294,7 @@ class FSDataset implements FSDatasetInterface {
    * Taken together, all BlockPoolSlices sharing a block pool ID across a 
    * Taken together, all BlockPoolSlices sharing a block pool ID across a 
    * cluster represent a single block pool.
    * cluster represent a single block pool.
    */
    */
-  class BlockPoolSlice {
+  private class BlockPoolSlice {
     private final String bpid;
     private final String bpid;
     private final FSVolume volume; // volume to which this BlockPool belongs to
     private final FSVolume volume; // volume to which this BlockPool belongs to
     private final File currentDir; // StorageDirectory/current/bpid/current
     private final File currentDir; // StorageDirectory/current/bpid/current
@@ -343,11 +352,7 @@ class FSDataset implements FSDatasetInterface {
     File getDirectory() {
     File getDirectory() {
       return currentDir.getParentFile();
       return currentDir.getParentFile();
     }
     }
-    
-    File getCurrentDir() {
-      return currentDir;
-    }
-    
+
     File getFinalizedDir() {
     File getFinalizedDir() {
       return finalizedDir.dir;
       return finalizedDir.dir;
     }
     }
@@ -388,7 +393,7 @@ class FSDataset implements FSDatasetInterface {
 
 
     File addBlock(Block b, File f) throws IOException {
     File addBlock(Block b, File f) throws IOException {
       File blockFile = finalizedDir.addBlock(b, f);
       File blockFile = finalizedDir.addBlock(b, f);
-      File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
+      File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
       dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
       dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
       return blockFile;
       return blockFile;
     }
     }
@@ -456,7 +461,7 @@ class FSDataset implements FSDatasetInterface {
       DataInputStream checksumIn = null;
       DataInputStream checksumIn = null;
       InputStream blockIn = null;
       InputStream blockIn = null;
       try {
       try {
-        File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
+        final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
         long blockFileLen = blockFile.length();
         long blockFileLen = blockFile.length();
         long metaFileLen = metaFile.length();
         long metaFileLen = metaFile.length();
         int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
         int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@@ -522,7 +527,7 @@ class FSDataset implements FSDatasetInterface {
     }
     }
   }
   }
   
   
-  class FSVolume {
+  class FSVolume implements FSVolumeInterface {
     private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
     private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
     private final File currentDir;    // <StorageDirectory>/current
     private final File currentDir;    // <StorageDirectory>/current
     private final DF usage;           
     private final DF usage;           
@@ -535,11 +540,6 @@ class FSDataset implements FSDatasetInterface {
       File parent = currentDir.getParentFile();
       File parent = currentDir.getParentFile();
       this.usage = new DF(parent, conf);
       this.usage = new DF(parent, conf);
     }
     }
-
-    /** Return storage directory corresponding to the volume */
-    File getDir() {
-      return currentDir.getParentFile();
-    }
     
     
     File getCurrentDir() {
     File getCurrentDir() {
       return currentDir;
       return currentDir;
@@ -584,8 +584,9 @@ class FSDataset implements FSDatasetInterface {
       long remaining = usage.getCapacity() - reserved;
       long remaining = usage.getCapacity() - reserved;
       return remaining > 0 ? remaining : 0;
       return remaining > 0 ? remaining : 0;
     }
     }
-      
-    long getAvailable() throws IOException {
+
+    @Override
+    public long getAvailable() throws IOException {
       long remaining = getCapacity()-getDfsUsed();
       long remaining = getCapacity()-getDfsUsed();
       long available = usage.getAvailable();
       long available = usage.getAvailable();
       if (remaining>available) {
       if (remaining>available) {
@@ -601,19 +602,30 @@ class FSDataset implements FSDatasetInterface {
     String getMount() throws IOException {
     String getMount() throws IOException {
       return usage.getMount();
       return usage.getMount();
     }
     }
-    
-    BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
+
+    private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
       BlockPoolSlice bp = map.get(bpid);
       BlockPoolSlice bp = map.get(bpid);
       if (bp == null) {
       if (bp == null) {
         throw new IOException("block pool " + bpid + " is not found");
         throw new IOException("block pool " + bpid + " is not found");
       }
       }
       return bp;
       return bp;
     }
     }
-    
+
+    @Override
+    public File getDirectory(String bpid) throws IOException {
+      return getBlockPoolSlice(bpid).getDirectory();
+    }
+
+    @Override
+    public File getFinalizedDir(String bpid) throws IOException {
+      return getBlockPoolSlice(bpid).getFinalizedDir();
+    }
+
     /**
     /**
      * Make a deep copy of the list of currently active BPIDs
      * Make a deep copy of the list of currently active BPIDs
      */
      */
-    String[] getBlockPoolList() {
+    @Override
+    public String[] getBlockPoolList() {
       synchronized(FSDataset.this) {
       synchronized(FSDataset.this) {
         return map.keySet().toArray(new String[map.keySet().size()]);   
         return map.keySet().toArray(new String[map.keySet().size()]);   
       }
       }
@@ -682,7 +694,8 @@ class FSDataset implements FSDatasetInterface {
       BlockPoolSlice bp = getBlockPoolSlice(bpid);
       BlockPoolSlice bp = getBlockPoolSlice(bpid);
       bp.clearPath(f);
       bp.clearPath(f);
     }
     }
-      
+
+    @Override
     public String toString() {
     public String toString() {
       return currentDir.getAbsolutePath();
       return currentDir.getAbsolutePath();
     }
     }
@@ -774,21 +787,18 @@ class FSDataset implements FSDatasetInterface {
      * Read access to this unmodifiable list is not synchronized.
      * Read access to this unmodifiable list is not synchronized.
      * This list is replaced on modification holding "this" lock.
      * This list is replaced on modification holding "this" lock.
      */
      */
-    private volatile List<FSVolume> volumes = null;
+    private volatile List<FSVolumeInterface> volumes = null;
+
     BlockVolumeChoosingPolicy blockChooser;
     BlockVolumeChoosingPolicy blockChooser;
     int numFailedVolumes;
     int numFailedVolumes;
 
 
-    FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
-      List<FSVolume> list = Arrays.asList(volumes);
-      this.volumes = Collections.unmodifiableList(list);
+    FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
+        BlockVolumeChoosingPolicy blockChooser) {
+      this.volumes = Collections.unmodifiableList(volumes);
       this.blockChooser = blockChooser;
       this.blockChooser = blockChooser;
       this.numFailedVolumes = failedVols;
       this.numFailedVolumes = failedVols;
     }
     }
     
     
-    private int numberOfVolumes() {
-      return volumes.size();
-    }
-
     private int numberOfFailedVolumes() {
     private int numberOfFailedVolumes() {
       return numFailedVolumes;
       return numFailedVolumes;
     }
     }
@@ -801,36 +811,36 @@ class FSDataset implements FSDatasetInterface {
      * @return next volume to store the block in.
      * @return next volume to store the block in.
      */
      */
     synchronized FSVolume getNextVolume(long blockSize) throws IOException {
     synchronized FSVolume getNextVolume(long blockSize) throws IOException {
-      return blockChooser.chooseVolume(volumes, blockSize);
+      return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
     }
     }
       
       
     private long getDfsUsed() throws IOException {
     private long getDfsUsed() throws IOException {
       long dfsUsed = 0L;
       long dfsUsed = 0L;
-      for (FSVolume vol : volumes) {
-        dfsUsed += vol.getDfsUsed();
+      for (FSVolumeInterface v : volumes) {
+        dfsUsed += ((FSVolume)v).getDfsUsed();
       }
       }
       return dfsUsed;
       return dfsUsed;
     }
     }
 
 
     private long getBlockPoolUsed(String bpid) throws IOException {
     private long getBlockPoolUsed(String bpid) throws IOException {
       long dfsUsed = 0L;
       long dfsUsed = 0L;
-      for (FSVolume vol : volumes) {
-        dfsUsed += vol.getBlockPoolUsed(bpid);
+      for (FSVolumeInterface v : volumes) {
+        dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
       }
       }
       return dfsUsed;
       return dfsUsed;
     }
     }
 
 
     private long getCapacity() throws IOException {
     private long getCapacity() throws IOException {
       long capacity = 0L;
       long capacity = 0L;
-      for (FSVolume vol : volumes) {
-        capacity += vol.getCapacity();
+      for (FSVolumeInterface v : volumes) {
+        capacity += ((FSVolume)v).getCapacity();
       }
       }
       return capacity;
       return capacity;
     }
     }
       
       
     private long getRemaining() throws IOException {
     private long getRemaining() throws IOException {
       long remaining = 0L;
       long remaining = 0L;
-      for (FSVolume vol : volumes) {
+      for (FSVolumeInterface vol : volumes) {
         remaining += vol.getAvailable();
         remaining += vol.getAvailable();
       }
       }
       return remaining;
       return remaining;
@@ -838,15 +848,15 @@ class FSDataset implements FSDatasetInterface {
       
       
     private void getVolumeMap(ReplicasMap volumeMap)
     private void getVolumeMap(ReplicasMap volumeMap)
         throws IOException {
         throws IOException {
-      for (FSVolume vol : volumes) {
-        vol.getVolumeMap(volumeMap);
+      for (FSVolumeInterface v : volumes) {
+        ((FSVolume)v).getVolumeMap(volumeMap);
       }
       }
     }
     }
     
     
     private void getVolumeMap(String bpid, ReplicasMap volumeMap)
     private void getVolumeMap(String bpid, ReplicasMap volumeMap)
         throws IOException {
         throws IOException {
-      for (FSVolume vol : volumes) {
-        vol.getVolumeMap(bpid, volumeMap);
+      for (FSVolumeInterface v : volumes) {
+        ((FSVolume)v).getVolumeMap(bpid, volumeMap);
       }
       }
     }
     }
       
       
@@ -862,10 +872,10 @@ class FSDataset implements FSDatasetInterface {
       ArrayList<FSVolume> removedVols = null;
       ArrayList<FSVolume> removedVols = null;
       
       
       // Make a copy of volumes for performing modification 
       // Make a copy of volumes for performing modification 
-      List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes());
+      final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
       
       
       for (int idx = 0; idx < volumeList.size(); idx++) {
       for (int idx = 0; idx < volumeList.size(); idx++) {
-        FSVolume fsv = volumeList.get(idx);
+        FSVolume fsv = (FSVolume)volumeList.get(idx);
         try {
         try {
           fsv.checkDirs();
           fsv.checkDirs();
         } catch (DiskErrorException e) {
         } catch (DiskErrorException e) {
@@ -882,8 +892,8 @@ class FSDataset implements FSDatasetInterface {
       
       
       // Remove null volumes from the volumes array
       // Remove null volumes from the volumes array
       if (removedVols != null && removedVols.size() > 0) {
       if (removedVols != null && removedVols.size() > 0) {
-        List<FSVolume> newVols = new ArrayList<FSVolume>();
-        for (FSVolume vol : volumeList) {
+        List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
+        for (FSVolumeInterface vol : volumeList) {
           if (vol != null) {
           if (vol != null) {
             newVols.add(vol);
             newVols.add(vol);
           }
           }
@@ -896,44 +906,30 @@ class FSDataset implements FSDatasetInterface {
 
 
       return removedVols;
       return removedVols;
     }
     }
-      
+
+    @Override
     public String toString() {
     public String toString() {
       return volumes.toString();
       return volumes.toString();
     }
     }
 
 
-    boolean isValid(FSVolume volume) {
-      for (FSVolume vol : volumes) {
-        if (vol == volume) {
-          return true;
-        }
-      }
-      return false;
-    }
 
 
     private void addBlockPool(String bpid, Configuration conf)
     private void addBlockPool(String bpid, Configuration conf)
         throws IOException {
         throws IOException {
-      for (FSVolume v : volumes) {
-        v.addBlockPool(bpid, conf);
+      for (FSVolumeInterface v : volumes) {
+        ((FSVolume)v).addBlockPool(bpid, conf);
       }
       }
     }
     }
     
     
     private void removeBlockPool(String bpid) {
     private void removeBlockPool(String bpid) {
-      for (FSVolume v : volumes) {
-        v.shutdownBlockPool(bpid);
+      for (FSVolumeInterface v : volumes) {
+        ((FSVolume)v).shutdownBlockPool(bpid);
       }
       }
     }
     }
-    
-    /**
-     * @return unmodifiable list of volumes
-     */
-    public List<FSVolume> getVolumes() {
-      return volumes;
-    }
 
 
     private void shutdown() {
     private void shutdown() {
-      for (FSVolume volume : volumes) {
+      for (FSVolumeInterface volume : volumes) {
         if(volume != null) {
         if(volume != null) {
-          volume.shutdown();
+          ((FSVolume)volume).shutdown();
         }
         }
       }
       }
     }
     }
@@ -945,35 +941,20 @@ class FSDataset implements FSDatasetInterface {
   //
   //
   //////////////////////////////////////////////////////
   //////////////////////////////////////////////////////
 
 
-  //Find better place?
-  static final String METADATA_EXTENSION = ".meta";
-  static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
-
   private static boolean isUnlinkTmpFile(File f) {
   private static boolean isUnlinkTmpFile(File f) {
     String name = f.getName();
     String name = f.getName();
-    return name.endsWith(UNLINK_BLOCK_SUFFIX);
-  }
-  
-  static File getUnlinkTmpFile(File f) {
-    return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+    return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
   }
   }
   
   
   private static File getOrigFile(File unlinkTmpFile) {
   private static File getOrigFile(File unlinkTmpFile) {
     String fileName = unlinkTmpFile.getName();
     String fileName = unlinkTmpFile.getName();
     return new File(unlinkTmpFile.getParentFile(),
     return new File(unlinkTmpFile.getParentFile(),
-        fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
-  }
-  
-  static String getMetaFileName(String blockFileName, long genStamp) {
-    return blockFileName + "_" + genStamp + METADATA_EXTENSION;
-  }
-  
-  static File getMetaFile(File f , long genStamp) {
-    return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+        fileName.substring(0,
+            fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
   }
   }
   
   
   protected File getMetaFile(ExtendedBlock b) throws IOException {
   protected File getMetaFile(ExtendedBlock b) throws IOException {
-    return getMetaFile(getBlockFile(b), b.getGenerationStamp());
+    return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
   }
   }
 
 
   /** Find the metadata file for the specified block file.
   /** Find the metadata file for the specified block file.
@@ -995,34 +976,13 @@ class FSDataset implements FSDatasetInterface {
                       " does not have a metafile!");
                       " does not have a metafile!");
     return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
     return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
   }
-
-  /** Find the corresponding meta data file from a given block file */
-  private static File findMetaFile(final File blockFile) throws IOException {
-    final String prefix = blockFile.getName() + "_";
-    final File parent = blockFile.getParentFile();
-    File[] matches = parent.listFiles(new FilenameFilter() {
-      public boolean accept(File dir, String name) {
-        return dir.equals(parent)
-            && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
-      }
-    });
-
-    if (matches == null || matches.length == 0) {
-      throw new IOException("Meta file not found, blockFile=" + blockFile);
-    }
-    else if (matches.length > 1) {
-      throw new IOException("Found more than one meta files: " 
-          + Arrays.asList(matches));
-    }
-    return matches[0];
-  }
   
   
   /** Find the corresponding meta data file from a given block file */
   /** Find the corresponding meta data file from a given block file */
   private static long parseGenerationStamp(File blockFile, File metaFile
   private static long parseGenerationStamp(File blockFile, File metaFile
       ) throws IOException {
       ) throws IOException {
     String metaname = metaFile.getName();
     String metaname = metaFile.getName();
     String gs = metaname.substring(blockFile.getName().length() + 1,
     String gs = metaname.substring(blockFile.getName().length() + 1,
-        metaname.length() - METADATA_EXTENSION.length());
+        metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
     try {
     try {
       return Long.parseLong(gs);
       return Long.parseLong(gs);
     } catch(NumberFormatException nfe) {
     } catch(NumberFormatException nfe) {
@@ -1031,6 +991,11 @@ class FSDataset implements FSDatasetInterface {
     }
     }
   }
   }
 
 
+  @Override // FSDatasetInterface
+  public List<FSVolumeInterface> getVolumes() {
+    return volumes.volumes;
+  }
+
   @Override // FSDatasetInterface
   @Override // FSDatasetInterface
   public synchronized Block getStoredBlock(String bpid, long blkid)
   public synchronized Block getStoredBlock(String bpid, long blkid)
       throws IOException {
       throws IOException {
@@ -1038,7 +1003,7 @@ class FSDataset implements FSDatasetInterface {
     if (blockfile == null) {
     if (blockfile == null) {
       return null;
       return null;
     }
     }
-    File metafile = findMetaFile(blockfile);
+    final File metafile = DatanodeUtil.findMetaFile(blockfile);
     return new Block(blkid, blockfile.length(),
     return new Block(blkid, blockfile.length(),
         parseGenerationStamp(blockfile, metafile));
         parseGenerationStamp(blockfile, metafile));
   }
   }
@@ -1102,8 +1067,8 @@ class FSDataset implements FSDatasetInterface {
   /**
   /**
    * An FSDataset has a directory where it loads its data files.
    * An FSDataset has a directory where it loads its data files.
    */
    */
-  public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
-      throws IOException {
+  private FSDataset(DataNode datanode, DataStorage storage, Configuration conf
+      ) throws IOException {
     this.datanode = datanode;
     this.datanode = datanode;
     this.maxBlocksPerDir = 
     this.maxBlocksPerDir = 
       conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
       conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
@@ -1135,12 +1100,12 @@ class FSDataset implements FSDatasetInterface {
           + ", volume failures tolerated: " + volFailuresTolerated);
           + ", volume failures tolerated: " + volFailuresTolerated);
     }
     }
 
 
-    FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
+    final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
+        storage.getNumStorageDirs());
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
-      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
-          conf);
-      DataNode.LOG.info("FSDataset added volume - "
-          + storage.getStorageDir(idx).getCurrentDir());
+      final File dir = storage.getStorageDir(idx).getCurrentDir();
+      volArray.add(new FSVolume(dir, conf));
+      DataNode.LOG.info("FSDataset added volume - " + dir);
     }
     }
     volumeMap = new ReplicasMap(this);
     volumeMap = new ReplicasMap(this);
 
 
@@ -1186,7 +1151,7 @@ class FSDataset implements FSDatasetInterface {
    */
    */
   @Override // FSDatasetInterface
   @Override // FSDatasetInterface
   public boolean hasEnoughResource() {
   public boolean hasEnoughResource() {
-    return volumes.numberOfVolumes() >= validVolsRequired; 
+    return getVolumes().size() >= validVolsRequired; 
   }
   }
 
 
   /**
   /**
@@ -1369,8 +1334,8 @@ class FSDataset implements FSDatasetInterface {
   private static File moveBlockFiles(Block b, File srcfile, File destdir
   private static File moveBlockFiles(Block b, File srcfile, File destdir
       ) throws IOException {
       ) throws IOException {
     final File dstfile = new File(destdir, b.getBlockName());
     final File dstfile = new File(destdir, b.getBlockName());
-    final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
-    final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
+    final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
+    final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
     if (!srcmeta.renameTo(dstmeta)) {
     if (!srcmeta.renameTo(dstmeta)) {
       throw new IOException("Failed to move meta file for " + b
       throw new IOException("Failed to move meta file for " + b
           + " from " + srcmeta + " to " + dstmeta);
           + " from " + srcmeta + " to " + dstmeta);
@@ -1488,7 +1453,7 @@ class FSDataset implements FSDatasetInterface {
     
     
     // construct a RBW replica with the new GS
     // construct a RBW replica with the new GS
     File blkfile = replicaInfo.getBlockFile();
     File blkfile = replicaInfo.getBlockFile();
-    FSVolume v = replicaInfo.getVolume();
+    FSVolume v = (FSVolume)replicaInfo.getVolume();
     if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
     if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
       throw new DiskOutOfSpaceException("Insufficient space for appending to "
       throw new DiskOutOfSpaceException("Insufficient space for appending to "
           + replicaInfo);
           + replicaInfo);
@@ -1745,7 +1710,7 @@ class FSDataset implements FSDatasetInterface {
           + visible + ", temp=" + temp);
           + visible + ", temp=" + temp);
     }
     }
     // check volume
     // check volume
-    final FSVolume v = temp.getVolume();
+    final FSVolume v = (FSVolume)temp.getVolume();
     if (v == null) {
     if (v == null) {
       throw new IOException("r.getVolume() = null, temp="  + temp);
       throw new IOException("r.getVolume() = null, temp="  + temp);
     }
     }
@@ -1806,7 +1771,7 @@ class FSDataset implements FSDatasetInterface {
     if ( vol == null ) {
     if ( vol == null ) {
       ReplicaInfo replica = volumeMap.get(bpid, blk);
       ReplicaInfo replica = volumeMap.get(bpid, blk);
       if (replica != null) {
       if (replica != null) {
-        vol = volumeMap.get(bpid, blk).getVolume();
+        vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
       }
       }
       if ( vol == null ) {
       if ( vol == null ) {
         throw new IOException("Could not find volume for block " + blk);
         throw new IOException("Could not find volume for block " + blk);
@@ -1846,7 +1811,7 @@ class FSDataset implements FSDatasetInterface {
       newReplicaInfo = (FinalizedReplica)
       newReplicaInfo = (FinalizedReplica)
              ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
              ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
     } else {
     } else {
-      FSVolume v = replicaInfo.getVolume();
+      FSVolume v = (FSVolume)replicaInfo.getVolume();
       File f = replicaInfo.getBlockFile();
       File f = replicaInfo.getBlockFile();
       if (v == null) {
       if (v == null) {
         throw new IOException("No volume for temporary file " + f + 
         throw new IOException("No volume for temporary file " + f + 
@@ -1944,7 +1909,8 @@ class FSDataset implements FSDatasetInterface {
   /**
   /**
    * Get the list of finalized blocks from in-memory blockmap for a block pool.
    * Get the list of finalized blocks from in-memory blockmap for a block pool.
    */
    */
-  synchronized List<Block> getFinalizedBlocks(String bpid) {
+  @Override
+  public synchronized List<Block> getFinalizedBlocks(String bpid) {
     ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
     ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
     for (ReplicaInfo b : volumeMap.replicas(bpid)) {
     for (ReplicaInfo b : volumeMap.replicas(bpid)) {
       if(b.getState() == ReplicaState.FINALIZED) {
       if(b.getState() == ReplicaState.FINALIZED) {
@@ -2017,7 +1983,7 @@ class FSDataset implements FSDatasetInterface {
     }
     }
 
 
     //check replica's meta file
     //check replica's meta file
-    final File metafile = getMetaFile(f, r.getGenerationStamp());
+    final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
     if (!metafile.exists()) {
     if (!metafile.exists()) {
       throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
       throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
     }
     }
@@ -2048,7 +2014,7 @@ class FSDataset implements FSDatasetInterface {
           error = true;
           error = true;
           continue;
           continue;
         }
         }
-        v = dinfo.getVolume();
+        v = (FSVolume)dinfo.getVolume();
         if (f == null) {
         if (f == null) {
           DataNode.LOG.warn("Unexpected error trying to delete block "
           DataNode.LOG.warn("Unexpected error trying to delete block "
                             + invalidBlks[i] + 
                             + invalidBlks[i] + 
@@ -2082,7 +2048,7 @@ class FSDataset implements FSDatasetInterface {
         }
         }
         volumeMap.remove(bpid, invalidBlks[i]);
         volumeMap.remove(bpid, invalidBlks[i]);
       }
       }
-      File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
+      File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
       
       
       // Delete the block asynchronously to make sure we can do it fast enough
       // Delete the block asynchronously to make sure we can do it fast enough
       asyncDiskService.deleteAsync(v, bpid, f, metaFile,
       asyncDiskService.deleteAsync(v, bpid, f, metaFile,
@@ -2235,8 +2201,9 @@ class FSDataset implements FSDatasetInterface {
    * @param diskMetaFile Metadata file from on the disk
    * @param diskMetaFile Metadata file from on the disk
    * @param vol Volume of the block file
    * @param vol Volume of the block file
    */
    */
+  @Override
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
-      File diskMetaFile, FSVolume vol) {
+      File diskMetaFile, FSVolumeInterface vol) {
     Block corruptBlock = null;
     Block corruptBlock = null;
     ReplicaInfo memBlockInfo;
     ReplicaInfo memBlockInfo;
     synchronized (this) {
     synchronized (this) {
@@ -2324,7 +2291,7 @@ class FSDataset implements FSDatasetInterface {
 
 
       // Compare generation stamp
       // Compare generation stamp
       if (memBlockInfo.getGenerationStamp() != diskGS) {
       if (memBlockInfo.getGenerationStamp() != diskGS) {
-        File memMetaFile = getMetaFile(diskFile, 
+        File memMetaFile = DatanodeUtil.getMetaFile(diskFile, 
             memBlockInfo.getGenerationStamp());
             memBlockInfo.getGenerationStamp());
         if (memMetaFile.exists()) {
         if (memMetaFile.exists()) {
           if (memMetaFile.compareTo(diskMetaFile) != 0) {
           if (memMetaFile.compareTo(diskMetaFile) != 0) {
@@ -2559,18 +2526,15 @@ class FSDataset implements FSDatasetInterface {
     volumes.removeBlockPool(bpid);
     volumes.removeBlockPool(bpid);
   }
   }
   
   
-  /**
-   * get list of all bpids
-   * @return list of bpids
-   */
-  public String [] getBPIdlist() throws IOException {
+  @Override
+  public String[] getBlockPoolList() {
     return volumeMap.getBlockPoolList();
     return volumeMap.getBlockPoolList();
   }
   }
   
   
   /**
   /**
    * Class for representing the Datanode volume information
    * Class for representing the Datanode volume information
    */
    */
-  static class VolumeInfo {
+  private static class VolumeInfo {
     final String directory;
     final String directory;
     final long usedSpace;
     final long usedSpace;
     final long freeSpace;
     final long freeSpace;
@@ -2583,10 +2547,11 @@ class FSDataset implements FSDatasetInterface {
       this.reservedSpace = reservedSpace;
       this.reservedSpace = reservedSpace;
     }
     }
   }  
   }  
-  
-  Collection<VolumeInfo> getVolumeInfo() {
+
+  private Collection<VolumeInfo> getVolumeInfo() {
     Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
     Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
-    for (FSVolume volume : volumes.volumes) {
+    for (FSVolumeInterface v : volumes.volumes) {
+      final FSVolume volume = (FSVolume)v;
       long used = 0;
       long used = 0;
       long free = 0;
       long free = 0;
       try {
       try {
@@ -2603,13 +2568,27 @@ class FSDataset implements FSDatasetInterface {
     }
     }
     return info;
     return info;
   }
   }
-  
+
+  @Override
+  public Map<String, Object> getVolumeInfoMap() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    Collection<VolumeInfo> volumes = getVolumeInfo();
+    for (VolumeInfo v : volumes) {
+      final Map<String, Object> innerInfo = new HashMap<String, Object>();
+      innerInfo.put("usedSpace", v.usedSpace);
+      innerInfo.put("freeSpace", v.freeSpace);
+      innerInfo.put("reservedSpace", v.reservedSpace);
+      info.put(v.directory, innerInfo);
+    }
+    return info;
+  }
+
   @Override //FSDatasetInterface
   @Override //FSDatasetInterface
   public synchronized void deleteBlockPool(String bpid, boolean force)
   public synchronized void deleteBlockPool(String bpid, boolean force)
       throws IOException {
       throws IOException {
     if (!force) {
     if (!force) {
-      for (FSVolume volume : volumes.volumes) {
-        if (!volume.isBPDirEmpty(bpid)) {
+      for (FSVolumeInterface volume : volumes.volumes) {
+        if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
           DataNode.LOG.warn(bpid
           DataNode.LOG.warn(bpid
               + " has some block files, cannot delete unless forced");
               + " has some block files, cannot delete unless forced");
           throw new IOException("Cannot delete block pool, "
           throw new IOException("Cannot delete block pool, "
@@ -2617,8 +2596,8 @@ class FSDataset implements FSDatasetInterface {
         }
         }
       }
       }
     }
     }
-    for (FSVolume volume : volumes.volumes) {
-      volume.deleteBPDirectories(bpid, force);
+    for (FSVolumeInterface volume : volumes.volumes) {
+      ((FSVolume)volume).deleteBPDirectories(bpid, force);
     }
     }
   }
   }
   
   
@@ -2626,7 +2605,7 @@ class FSDataset implements FSDatasetInterface {
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
       throws IOException {
       throws IOException {
     File datafile = getBlockFile(block);
     File datafile = getBlockFile(block);
-    File metafile = getMetaFile(datafile, block.getGenerationStamp());
+    File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
     BlockLocalPathInfo info = new BlockLocalPathInfo(block,
     BlockLocalPathInfo info = new BlockLocalPathInfo(block,
         datafile.getAbsolutePath(), metafile.getAbsolutePath());
         datafile.getAbsolutePath(), metafile.getAbsolutePath());
     return info;
     return info;

+ 67 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java

@@ -19,13 +19,17 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 
 
 
 import java.io.Closeable;
 import java.io.Closeable;
+import java.io.File;
 import java.io.FilterInputStream;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@@ -35,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlo
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
 
 /**
 /**
@@ -46,8 +51,68 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public interface FSDatasetInterface extends FSDatasetMBean {
 public interface FSDatasetInterface extends FSDatasetMBean {
-  
-  
+  /**
+   * A factory for creating FSDatasetInterface objects.
+   */
+  public abstract class Factory {
+    /** @return the configured factory. */
+    public static Factory getFactory(Configuration conf) {
+      final Class<? extends Factory> clazz = conf.getClass(
+          DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
+          FSDataset.Factory.class,
+          Factory.class);
+      return ReflectionUtils.newInstance(clazz, conf);
+    }
+
+    /** Create a FSDatasetInterface object. */
+    public abstract FSDatasetInterface createFSDatasetInterface(
+        DataNode datanode, DataStorage storage, Configuration conf
+        ) throws IOException;
+
+    /** Does the factory create simulated objects? */
+    public boolean isSimulated() {
+      return false;
+    }
+  }
+
+  /**
+   * This is an interface for the underlying volume.
+   * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
+   */
+  interface FSVolumeInterface {
+    /** @return a list of block pools. */
+    public String[] getBlockPoolList();
+
+    /** @return the available storage space in bytes. */
+    public long getAvailable() throws IOException;
+
+    /** @return the directory for the block pool. */
+    public File getDirectory(String bpid) throws IOException;
+
+    /** @return the directory for the finalized blocks in the block pool. */
+    public File getFinalizedDir(String bpid) throws IOException;
+  }
+
+  /** @return a list of volumes. */
+  public List<FSVolumeInterface> getVolumes();
+
+  /** @return a volume information map (name => info). */
+  public Map<String, Object> getVolumeInfoMap();
+
+  /** @return a list of block pools. */
+  public String[] getBlockPoolList();
+
+  /** @return a list of finalized blocks for the given block pool. */
+  public List<Block> getFinalizedBlocks(String bpid);
+
+  /**
+   * Check whether the in-memory block record matches the block on the disk,
+   * and, in case that they are not matched, update the record or mark it
+   * as corrupted.
+   */
+  public void checkAndUpdate(String bpid, long blockId, File diskFile,
+      File diskMetaFile, FSVolumeInterface vol);
+
   /**
   /**
    * Returns the length of the metadata file of the specified block
    * Returns the length of the metadata file of the specified block
    * @param b - the block for which the metadata length is desired
    * @param b - the block for which the metadata length is desired

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java

@@ -21,7 +21,7 @@ import java.io.File;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 
 /**
 /**
  * This class describes a replica that has been finalized.
  * This class describes a replica that has been finalized.
@@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaInfo {
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
   FinalizedReplica(long blockId, long len, long genStamp,
   FinalizedReplica(long blockId, long len, long genStamp,
-      FSVolume vol, File dir) {
+      FSVolumeInterface vol, File dir) {
     super(blockId, len, genStamp, vol, dir);
     super(blockId, len, genStamp, vol, dir);
   }
   }
   
   
@@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaInfo {
    * @param vol volume where replica is located
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
-  FinalizedReplica(Block block, FSVolume vol, File dir) {
+  FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
     super(block, vol, dir);
     super(block, vol, dir);
   }
   }
 
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java

@@ -21,7 +21,7 @@ import java.io.File;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 
 /** This class represents replicas being written. 
 /** This class represents replicas being written. 
  * Those are the replicas that
  * Those are the replicas that
@@ -36,7 +36,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
   ReplicaBeingWritten(long blockId, long genStamp, 
   ReplicaBeingWritten(long blockId, long genStamp, 
-        FSVolume vol, File dir) {
+        FSVolumeInterface vol, File dir) {
     super( blockId, genStamp, vol, dir);
     super( blockId, genStamp, vol, dir);
   }
   }
   
   
@@ -48,7 +48,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
    * @param writer a thread that is writing to this replica
    * @param writer a thread that is writing to this replica
    */
    */
   ReplicaBeingWritten(Block block, 
   ReplicaBeingWritten(Block block, 
-      FSVolume vol, File dir, Thread writer) {
+      FSVolumeInterface vol, File dir, Thread writer) {
     super( block, vol, dir, writer);
     super( block, vol, dir, writer);
   }
   }
 
 
@@ -62,7 +62,7 @@ class ReplicaBeingWritten extends ReplicaInPipeline {
    * @param writer a thread that is writing to this replica
    * @param writer a thread that is writing to this replica
    */
    */
   ReplicaBeingWritten(long blockId, long len, long genStamp,
   ReplicaBeingWritten(long blockId, long len, long genStamp,
-      FSVolume vol, File dir, Thread writer ) {
+      FSVolumeInterface vol, File dir, Thread writer ) {
     super( blockId, len, genStamp, vol, dir, writer);
     super( blockId, len, genStamp, vol, dir, writer);
   }
   }
 
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java

@@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
@@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaInfo
    * @param state replica state
    * @param state replica state
    */
    */
     ReplicaInPipeline(long blockId, long genStamp, 
     ReplicaInPipeline(long blockId, long genStamp, 
-        FSVolume vol, File dir) {
+        FSVolumeInterface vol, File dir) {
     this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
     this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
   }
   }
 
 
@@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaInfo
    * @param writer a thread that is writing to this replica
    * @param writer a thread that is writing to this replica
    */
    */
   ReplicaInPipeline(Block block, 
   ReplicaInPipeline(Block block, 
-      FSVolume vol, File dir, Thread writer) {
+      FSVolumeInterface vol, File dir, Thread writer) {
     this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
     this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
         vol, dir, writer);
         vol, dir, writer);
   }
   }
@@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaInfo
    * @param writer a thread that is writing to this replica
    * @param writer a thread that is writing to this replica
    */
    */
   ReplicaInPipeline(long blockId, long len, long genStamp,
   ReplicaInPipeline(long blockId, long len, long genStamp,
-      FSVolume vol, File dir, Thread writer ) {
+      FSVolumeInterface vol, File dir, Thread writer ) {
     super( blockId, len, genStamp, vol, dir);
     super( blockId, len, genStamp, vol, dir);
     this.bytesAcked = len;
     this.bytesAcked = len;
     this.bytesOnDisk = len;
     this.bytesOnDisk = len;

+ 11 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 /**
 /**
@@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 abstract public class ReplicaInfo extends Block implements Replica {
 abstract public class ReplicaInfo extends Block implements Replica {
-  private FSVolume volume;      // volume where the replica belongs
-  private File     dir;         // directory where block & meta files belong
+  /** volume where the replica belongs */
+  private FSVolumeInterface volume;
+  /** directory where block & meta files belong */
+  private File dir;
 
 
   /**
   /**
    * Constructor for a zero length replica
    * Constructor for a zero length replica
@@ -45,7 +47,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * @param vol volume where replica is located
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
-  ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
+  ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
     this( blockId, 0L, genStamp, vol, dir);
     this( blockId, 0L, genStamp, vol, dir);
   }
   }
   
   
@@ -55,7 +57,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * @param vol volume where replica is located
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
-  ReplicaInfo(Block block, FSVolume vol, File dir) {
+  ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
     this(block.getBlockId(), block.getNumBytes(), 
     this(block.getBlockId(), block.getNumBytes(), 
         block.getGenerationStamp(), vol, dir);
         block.getGenerationStamp(), vol, dir);
   }
   }
@@ -69,7 +71,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
   ReplicaInfo(long blockId, long len, long genStamp,
   ReplicaInfo(long blockId, long len, long genStamp,
-      FSVolume vol, File dir) {
+      FSVolumeInterface vol, File dir) {
     super(blockId, len, genStamp);
     super(blockId, len, genStamp);
     this.volume = vol;
     this.volume = vol;
     this.dir = dir;
     this.dir = dir;
@@ -111,14 +113,14 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * Get the volume where this replica is located on disk
    * Get the volume where this replica is located on disk
    * @return the volume where this replica is located on disk
    * @return the volume where this replica is located on disk
    */
    */
-  FSVolume getVolume() {
+  FSVolumeInterface getVolume() {
     return volume;
     return volume;
   }
   }
   
   
   /**
   /**
    * Set the volume where this replica is located on disk
    * Set the volume where this replica is located on disk
    */
    */
-  void setVolume(FSVolume vol) {
+  void setVolume(FSVolumeInterface vol) {
     this.volume = vol;
     this.volume = vol;
   }
   }
   
   
@@ -162,7 +164,7 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * be recovered (especially on Windows) on datanode restart.
    * be recovered (especially on Windows) on datanode restart.
    */
    */
   private void unlinkFile(File file, Block b) throws IOException {
   private void unlinkFile(File file, Block b) throws IOException {
-    File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
+    File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
     try {
     try {
       FileInputStream in = new FileInputStream(file);
       FileInputStream in = new FileInputStream(file);
       try {
       try {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import java.io.File;
 import java.io.File;
 
 
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 
 
 /**
 /**
@@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends ReplicaInfo {
   }
   }
   
   
   @Override //ReplicaInfo
   @Override //ReplicaInfo
-  void setVolume(FSVolume vol) {
+  void setVolume(FSVolumeInterface vol) {
     super.setVolume(vol);
     super.setVolume(vol);
     original.setVolume(vol);
     original.setVolume(vol);
   }
   }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java

@@ -21,7 +21,7 @@ import java.io.File;
 
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 
 /**
 /**
  * This class represents a replica that is waiting to be recovered.
  * This class represents a replica that is waiting to be recovered.
@@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
   ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
   ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
-      FSVolume vol, File dir) {
+      FSVolumeInterface vol, File dir) {
     super(blockId, len, genStamp, vol, dir);
     super(blockId, len, genStamp, vol, dir);
   }
   }
   
   
@@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extends ReplicaInfo {
    * @param vol volume where replica is located
    * @param vol volume where replica is located
    * @param dir directory path where block and meta files are located
    * @param dir directory path where block and meta files are located
    */
    */
-  ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
+  ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
     super(block, vol, dir);
     super(block, vol, dir);
   }
   }
   
   

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
 
 public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
 public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
   private int curVolume = 0;
   private int curVolume = 0;
 
 
   @Override
   @Override
-  public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
-      throws IOException {
+  public synchronized FSVolumeInterface chooseVolume(
+      List<FSVolumeInterface> volumes, long blockSize) throws IOException {
     if(volumes.size() < 1) {
     if(volumes.size() < 1) {
       throw new DiskOutOfSpaceException("No more available volumes");
       throw new DiskOutOfSpaceException("No more available volumes");
     }
     }
@@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
     long maxAvailable = 0;
     long maxAvailable = 0;
     
     
     while (true) {
     while (true) {
-      FSVolume volume = volumes.get(curVolume);
+      FSVolumeInterface volume = volumes.get(curVolume);
       curVolume = (curVolume + 1) % volumes.size();
       curVolume = (curVolume + 1) % volumes.size();
       long availableVolumeSize = volume.getAvailable();
       long availableVolumeSize = volume.getAvailable();
       if (availableVolumeSize > blockSize) { return volume; }
       if (availableVolumeSize > blockSize) { return volume; }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java

@@ -117,7 +117,7 @@ public class DatanodeWebHdfsMethods {
   @PUT
   @PUT
   @Path("/")
   @Path("/")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response putRoot(
   public Response putRoot(
       final InputStream in,
       final InputStream in,
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
@@ -147,7 +147,7 @@ public class DatanodeWebHdfsMethods {
   @PUT
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response put(
   public Response put(
       final InputStream in,
       final InputStream in,
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
@@ -209,7 +209,7 @@ public class DatanodeWebHdfsMethods {
       final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
       final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
       final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
       final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
           nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null);
           nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null);
-      return Response.created(uri).type(MediaType.APPLICATION_JSON).build();
+      return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
@@ -222,7 +222,7 @@ public class DatanodeWebHdfsMethods {
   @POST
   @POST
   @Path("/")
   @Path("/")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response postRoot(
   public Response postRoot(
       final InputStream in,
       final InputStream in,
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
@@ -243,7 +243,7 @@ public class DatanodeWebHdfsMethods {
   @POST
   @POST
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response post(
   public Response post(
       final InputStream in,
       final InputStream in,
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
@@ -287,7 +287,7 @@ public class DatanodeWebHdfsMethods {
         IOUtils.cleanup(LOG, out);
         IOUtils.cleanup(LOG, out);
         IOUtils.cleanup(LOG, dfsclient);
         IOUtils.cleanup(LOG, dfsclient);
       }
       }
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -724,7 +724,7 @@ public class FSImage implements Closeable {
     long txId = loader.getLoadedImageTxId();
     long txId = loader.getLoadedImageTxId();
     LOG.info("Loaded image for txid " + txId + " from " + curFile);
     LOG.info("Loaded image for txid " + txId + " from " + curFile);
     lastAppliedTxId = txId;
     lastAppliedTxId = txId;
-    storage.setMostRecentCheckpointTxId(txId);
+    storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
   }
   }
 
 
   /**
   /**
@@ -739,7 +739,7 @@ public class FSImage implements Closeable {
     saver.save(newFile, txid, getFSNamesystem(), compression);
     saver.save(newFile, txid, getFSNamesystem(), compression);
     
     
     MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
     MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
-    storage.setMostRecentCheckpointTxId(txid);
+    storage.setMostRecentCheckpointInfo(txid, Util.now());
   }
   }
 
 
   /**
   /**
@@ -997,7 +997,7 @@ public class FSImage implements Closeable {
     // advertise it as such to other checkpointers
     // advertise it as such to other checkpointers
     // from now on
     // from now on
     if (txid > storage.getMostRecentCheckpointTxId()) {
     if (txid > storage.getMostRecentCheckpointTxId()) {
-      storage.setMostRecentCheckpointTxId(txid);
+      storage.setMostRecentCheckpointInfo(txid, Util.now());
     }
     }
   }
   }
 
 

+ 39 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1908,7 +1908,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       boolean enforcePermission)
       boolean enforcePermission)
       throws AccessControlException, SafeModeException, UnresolvedLinkException,
       throws AccessControlException, SafeModeException, UnresolvedLinkException,
              IOException {
              IOException {
-    boolean deleteNow = false;
     ArrayList<Block> collectedBlocks = new ArrayList<Block>();
     ArrayList<Block> collectedBlocks = new ArrayList<Block>();
 
 
     writeLock();
     writeLock();
@@ -1926,24 +1925,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       if (!dir.delete(src, collectedBlocks)) {
       if (!dir.delete(src, collectedBlocks)) {
         return false;
         return false;
       }
       }
-      deleteNow = collectedBlocks.size() <= BLOCK_DELETION_INCREMENT;
-      if (deleteNow) { // Perform small deletes right away
-        removeBlocks(collectedBlocks);
-      }
-    } finally {
-      writeUnlock();
-    }
-
-    getEditLog().logSync();
-
-    writeLock();
-    try {
-      if (!deleteNow) {
-        removeBlocks(collectedBlocks); // Incremental deletion of blocks
-      }
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
+    getEditLog().logSync(); 
+    removeBlocks(collectedBlocks); // Incremental deletion of blocks
     collectedBlocks.clear();
     collectedBlocks.clear();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
       NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
@@ -1952,16 +1938,24 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return true;
     return true;
   }
   }
 
 
-  /** From the given list, incrementally remove the blocks from blockManager */
+  /** 
+   * From the given list, incrementally remove the blocks from blockManager
+   * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
+   * ensure that other waiters on the lock can get in. See HDFS-2938
+   */
   private void removeBlocks(List<Block> blocks) {
   private void removeBlocks(List<Block> blocks) {
-    assert hasWriteLock();
     int start = 0;
     int start = 0;
     int end = 0;
     int end = 0;
     while (start < blocks.size()) {
     while (start < blocks.size()) {
       end = BLOCK_DELETION_INCREMENT + start;
       end = BLOCK_DELETION_INCREMENT + start;
       end = end > blocks.size() ? blocks.size() : end;
       end = end > blocks.size() ? blocks.size() : end;
-      for (int i=start; i<end; i++) {
-        blockManager.removeBlock(blocks.get(i));
+      writeLock();
+      try {
+        for (int i = start; i < end; i++) {
+          blockManager.removeBlock(blocks.get(i));
+        }
+      } finally {
+        writeUnlock();
       }
       }
       start = end;
       start = end;
     }
     }
@@ -2632,6 +2626,31 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   public int getExpiredHeartbeats() {
   public int getExpiredHeartbeats() {
     return datanodeStatistics.getExpiredHeartbeats();
     return datanodeStatistics.getExpiredHeartbeats();
   }
   }
+  
+  @Metric({"TransactionsSinceLastCheckpoint",
+      "Number of transactions since last checkpoint"})
+  public long getTransactionsSinceLastCheckpoint() {
+    return getEditLog().getLastWrittenTxId() -
+        getFSImage().getStorage().getMostRecentCheckpointTxId();
+  }
+  
+  @Metric({"TransactionsSinceLastLogRoll",
+      "Number of transactions since last edit log roll"})
+  public long getTransactionsSinceLastLogRoll() {
+    return (getEditLog().getLastWrittenTxId() -
+        getEditLog().getCurSegmentTxId()) + 1;
+  }
+  
+  @Metric({"LastWrittenTransactionId", "Transaction ID written to the edit log"})
+  public long getLastWrittenTransactionId() {
+    return getEditLog().getLastWrittenTxId();
+  }
+  
+  @Metric({"LastCheckpointTime",
+      "Time in milliseconds since the epoch of the last checkpoint"})
+  public long getLastCheckpointTime() {
+    return getFSImage().getStorage().getMostRecentCheckpointTime();
+  }
 
 
   /** @see ClientProtocol#getStats() */
   /** @see ClientProtocol#getStats() */
   long[] getStats() {
   long[] getStats() {

+ 19 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -125,6 +125,11 @@ public class NNStorage extends Storage implements Closeable {
    * that have since been written to the edit log.
    * that have since been written to the edit log.
    */
    */
   protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID;
   protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID;
+  
+  /**
+   * Time of the last checkpoint, in milliseconds since the epoch.
+   */
+  private long mostRecentCheckpointTime = 0;
 
 
   /**
   /**
    * list of failed (and thus removed) storages
    * list of failed (and thus removed) storages
@@ -417,18 +422,29 @@ public class NNStorage extends Storage implements Closeable {
   }
   }
 
 
   /**
   /**
-   * Set the transaction ID of the last checkpoint
+   * Set the transaction ID and time of the last checkpoint
+   * 
+   * @param txid transaction id of the last checkpoint
+   * @param time time of the last checkpoint, in millis since the epoch
    */
    */
-  void setMostRecentCheckpointTxId(long txid) {
+  void setMostRecentCheckpointInfo(long txid, long time) {
     this.mostRecentCheckpointTxId = txid;
     this.mostRecentCheckpointTxId = txid;
+    this.mostRecentCheckpointTime = time;
   }
   }
 
 
   /**
   /**
-   * Return the transaction ID of the last checkpoint.
+   * @return the transaction ID of the last checkpoint.
    */
    */
   long getMostRecentCheckpointTxId() {
   long getMostRecentCheckpointTxId() {
     return mostRecentCheckpointTxId;
     return mostRecentCheckpointTxId;
   }
   }
+  
+  /**
+   * @return the time of the most recent checkpoint in millis since the epoch.
+   */
+  long getMostRecentCheckpointTime() {
+    return mostRecentCheckpointTime;
+  }
 
 
   /**
   /**
    * Write a small file in all available storage directories that
    * Write a small file in all available storage directories that

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -117,6 +117,7 @@ public class NameNode {
     DFS_NAMENODE_HTTPS_ADDRESS_KEY,
     DFS_NAMENODE_HTTPS_ADDRESS_KEY,
     DFS_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+    DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
     DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
     DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -459,7 +459,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
       DatanodeInfo[] nodes = blocks[i].getLocations();
       DatanodeInfo[] nodes = blocks[i].getLocations();
       for (int j = 0; j < nodes.length; j++) {
       for (int j = 0; j < nodes.length; j++) {
         DatanodeInfo dn = nodes[j];
         DatanodeInfo dn = nodes[j];
-        namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn);
+        namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn,
+            "client machine reported it");
       }
       }
     }
     }
   }
   }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -263,7 +263,8 @@ public class SecondaryNameNode implements Runnable {
                 Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
                 Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
             InetSocketAddress secInfoSocAddr = 
             InetSocketAddress secInfoSocAddr = 
               NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
               NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
-                "dfs.secondary.https.port", 443));
+                DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
+                DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
             imagePort = secInfoSocAddr.getPort();
             imagePort = secInfoSocAddr.getPort();
             infoServer.addSslListener(secInfoSocAddr, conf, false, true);
             infoServer.addSslListener(secInfoSocAddr, conf, false, true);
           }
           }

+ 14 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -215,7 +215,7 @@ public class NamenodeWebHdfsMethods {
   @PUT
   @PUT
   @Path("/")
   @Path("/")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response putRoot(
   public Response putRoot(
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -263,7 +263,7 @@ public class NamenodeWebHdfsMethods {
   @PUT
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response put(
   public Response put(
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -324,7 +324,7 @@ public class NamenodeWebHdfsMethods {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
           fullpath, op.getValue(), -1L,
           fullpath, op.getValue(), -1L,
           permission, overwrite, bufferSize, replication, blockSize);
           permission, overwrite, bufferSize, replication, blockSize);
-      return Response.temporaryRedirect(uri).build();
+      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     } 
     } 
     case MKDIRS:
     case MKDIRS:
     {
     {
@@ -336,7 +336,7 @@ public class NamenodeWebHdfsMethods {
     {
     {
       np.createSymlink(destination.getValue(), fullpath,
       np.createSymlink(destination.getValue(), fullpath,
           PermissionParam.getDefaultFsPermission(), createParent.getValue());
           PermissionParam.getDefaultFsPermission(), createParent.getValue());
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case RENAME:
     case RENAME:
     {
     {
@@ -348,7 +348,7 @@ public class NamenodeWebHdfsMethods {
       } else {
       } else {
         np.rename2(fullpath, destination.getValue(),
         np.rename2(fullpath, destination.getValue(),
             s.toArray(new Options.Rename[s.size()]));
             s.toArray(new Options.Rename[s.size()]));
-        return Response.ok().type(MediaType.APPLICATION_JSON).build();
+        return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
       }
       }
     }
     }
     case SETREPLICATION:
     case SETREPLICATION:
@@ -364,17 +364,17 @@ public class NamenodeWebHdfsMethods {
       }
       }
 
 
       np.setOwner(fullpath, owner.getValue(), group.getValue());
       np.setOwner(fullpath, owner.getValue(), group.getValue());
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case SETPERMISSION:
     case SETPERMISSION:
     {
     {
       np.setPermission(fullpath, permission.getFsPermission());
       np.setPermission(fullpath, permission.getFsPermission());
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case SETTIMES:
     case SETTIMES:
     {
     {
       np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
       np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case RENEWDELEGATIONTOKEN:
     case RENEWDELEGATIONTOKEN:
     {
     {
@@ -389,7 +389,7 @@ public class NamenodeWebHdfsMethods {
       final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
       final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
       token.decodeFromUrlString(delegationTokenArgument.getValue());
       token.decodeFromUrlString(delegationTokenArgument.getValue());
       np.cancelDelegationToken(token);
       np.cancelDelegationToken(token);
-      return Response.ok().type(MediaType.APPLICATION_JSON).build();
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
@@ -406,7 +406,7 @@ public class NamenodeWebHdfsMethods {
   @POST
   @POST
   @Path("/")
   @Path("/")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response postRoot(
   public Response postRoot(
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -427,7 +427,7 @@ public class NamenodeWebHdfsMethods {
   @POST
   @POST
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Path("{" + UriFsPathParam.NAME + ":.*}")
   @Consumes({"*/*"})
   @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response post(
   public Response post(
       @Context final UserGroupInformation ugi,
       @Context final UserGroupInformation ugi,
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
       @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
@@ -459,7 +459,7 @@ public class NamenodeWebHdfsMethods {
     {
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
           fullpath, op.getValue(), -1L, bufferSize);
           fullpath, op.getValue(), -1L, bufferSize);
-      return Response.temporaryRedirect(uri).build();
+      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
@@ -542,7 +542,7 @@ public class NamenodeWebHdfsMethods {
     {
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
           fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
           fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
-      return Response.temporaryRedirect(uri).build();
+      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case GET_BLOCK_LOCATIONS:
     case GET_BLOCK_LOCATIONS:
     {
     {
@@ -578,7 +578,7 @@ public class NamenodeWebHdfsMethods {
     {
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
           fullpath, op.getValue(), -1L);
           fullpath, op.getValue(), -1L);
-      return Response.temporaryRedirect(uri).build();
+      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     }
     case GETDELEGATIONTOKEN:
     case GETDELEGATIONTOKEN:
     {
     {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -347,7 +347,7 @@ creations/deletions), or "all".</description>
 
 
 <property>
 <property>
   <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
   <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
-  <value>ture</value>
+  <value>true</value>
   <description>
   <description>
     If there is a datanode/network failure in the write pipeline,
     If there is a datanode/network failure in the write pipeline,
     DFSClient will try to remove the failed datanode from the pipeline
     DFSClient will try to remove the failed datanode from the pipeline
@@ -355,7 +355,7 @@ creations/deletions), or "all".</description>
     the number of datanodes in the pipeline is decreased.  The feature is
     the number of datanodes in the pipeline is decreased.  The feature is
     to add new datanodes to the pipeline.
     to add new datanodes to the pipeline.
 
 
-    This is a site-wise property to enable/disable the feature.
+    This is a site-wide property to enable/disable the feature.
 
 
     See also dfs.client.block.write.replace-datanode-on-failure.policy
     See also dfs.client.block.write.replace-datanode-on-failure.policy
   </description>
   </description>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/aop/build/aop.xml

@@ -21,7 +21,7 @@
   <property name="aspectversion" value="1.6.5"/>
   <property name="aspectversion" value="1.6.5"/>
   <!-- TODO this has to be changed synchronously with build.xml version prop.-->
   <!-- TODO this has to be changed synchronously with build.xml version prop.-->
   <!-- this workarounds of test-patch setting its own 'version' -->
   <!-- this workarounds of test-patch setting its own 'version' -->
-  <property name="project.version" value="0.23.1"/>
+  <property name="project.version" value="0.23.2-SNAPSHOT"/>
 
 
   <!-- Properties common for all fault injections -->
   <!-- Properties common for all fault injections -->
   <property name="build-fi.dir" value="${basedir}/build-fi"/>
   <property name="build-fi.dir" value="${basedir}/build-fi"/>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java

@@ -73,7 +73,7 @@ public class TestViewFsFileStatusHdfs {
 
 
    long len = FileSystemTestHelper.createFile(fHdfs, testfilename);
    long len = FileSystemTestHelper.createFile(fHdfs, testfilename);
 
 
-    Configuration conf = ViewFileSystemTestSetup.configWithViewfsScheme();
+    Configuration conf = ViewFileSystemTestSetup.createConfig();
     ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() + "/tmp"));
     ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() + "/tmp"));
     FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     assertEquals(ViewFileSystem.class, vfs.getClass());
     assertEquals(ViewFileSystem.class, vfs.getClass());

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
 import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.DNS;
@@ -122,10 +123,9 @@ public class DataNodeCluster {
         }
         }
         dataNodeDirs = args[i];
         dataNodeDirs = args[i];
       } else if (args[i].equals("-simulated")) {
       } else if (args[i].equals("-simulated")) {
-        conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+        SimulatedFSDataset.setFactory(conf);
       } else if (args[i].equals("-inject")) {
       } else if (args[i].equals("-inject")) {
-        if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED,
-                                                                false) ) {
+        if (!FSDatasetInterface.Factory.getFactory(conf).isSimulated()) {
           System.out.print("-inject is valid only for simulated");
           System.out.print("-inject is valid only for simulated");
           printUsageExit(); 
           printUsageExit(); 
         }
         }
@@ -158,7 +158,7 @@ public class DataNodeCluster {
       System.exit(-1);
       System.exit(-1);
     }
     }
     boolean simulated = 
     boolean simulated = 
-      conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false);
+        FSDatasetInterface.Factory.getFactory(conf).isSimulated();
     System.out.println("Starting " + numDataNodes + 
     System.out.println("Starting " + numDataNodes + 
           (simulated ? " Simulated " : " ") +
           (simulated ? " Simulated " : " ") +
           " Data Nodes that will connect to Name Node at " + nameNodeAdr);
           " Data Nodes that will connect to Name Node at " + nameNodeAdr);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -875,7 +875,7 @@ public class MiniDFSCluster {
         conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
       }
       if (simulatedCapacities != null) {
       if (simulatedCapacities != null) {
-        dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+        SimulatedFSDataset.setFactory(dnConf);
         dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
         dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
             simulatedCapacities[i-curDatanodesNum]);
             simulatedCapacities[i-curDatanodesNum]);
       }
       }

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java

@@ -47,12 +47,6 @@ public class TestAppendDifferentChecksum {
   public static void setupCluster() throws IOException {
   public static void setupCluster() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
-    
-    // disable block scanner, since otherwise this test can trigger
-    // HDFS-2525, which is a different bug than we're trying to unit test
-    // here! When HDFS-2525 is fixed, this can be removed.
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
-
     conf.set("fs.hdfs.impl.disable.cache", "true");
     conf.set("fs.hdfs.impl.disable.cache", "true");
     cluster = new MiniDFSCluster.Builder(conf)
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(1)
       .numDataNodes(1)

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -107,7 +107,7 @@ public class TestFileAppend{
   public void testCopyOnWrite() throws IOException {
   public void testCopyOnWrite() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
@@ -178,7 +178,7 @@ public class TestFileAppend{
   public void testSimpleFlush() throws IOException {
   public void testSimpleFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -234,7 +234,7 @@ public class TestFileAppend{
   public void testComplexFlush() throws IOException {
   public void testComplexFlush() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -283,7 +283,7 @@ public class TestFileAppend{
   public void testFileNotFound() throws IOException {
   public void testFileNotFound() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -82,7 +82,7 @@ public class TestFileAppend2 extends TestCase {
   public void testSimpleAppend() throws IOException {
   public void testSimpleAppend() throws IOException {
     final Configuration conf = new HdfsConfiguration();
     final Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
     conf.setBoolean("dfs.support.append", true);
     conf.setBoolean("dfs.support.append", true);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java

@@ -77,7 +77,7 @@ public class TestFileAppend4 {
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     this.conf = new Configuration();
     this.conf = new Configuration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -147,7 +147,7 @@ public class TestFileCorruption extends TestCase {
       DatanodeRegistration dnR = 
       DatanodeRegistration dnR = 
         DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
         DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
       cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
       cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
-          blk, new DatanodeInfo(dnR));
+          blk, new DatanodeInfo(dnR), "TEST");
       
       
       // open the file
       // open the file
       fs.open(FILE_PATH);
       fs.open(FILE_PATH);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -144,7 +144,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   public void testFileCreation() throws IOException {
   public void testFileCreation() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
@@ -223,7 +223,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   public void testDeleteOnExit() throws IOException {
   public void testDeleteOnExit() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
@@ -287,7 +287,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     // create cluster
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -361,7 +361,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     // create cluster
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -460,7 +460,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
 
 
     // create cluster
     // create cluster
@@ -599,7 +599,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
     System.out.println("Testing adbornal client death.");
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
@@ -634,7 +634,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   public void testFileCreationNonRecursive() throws IOException {
   public void testFileCreationNonRecursive() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -136,7 +136,7 @@ public class TestInjectionForSimulatedStorage extends TestCase {
       Configuration conf = new HdfsConfiguration();
       Configuration conf = new HdfsConfiguration();
       conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
       conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
       conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
       conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
       //first time format
       //first time format
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       cluster.waitActive();
@@ -159,7 +159,7 @@ public class TestInjectionForSimulatedStorage extends TestCase {
       
       
       LOG.info("Restarting minicluster");
       LOG.info("Restarting minicluster");
       conf = new HdfsConfiguration();
       conf = new HdfsConfiguration();
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
       
       
       cluster = new MiniDFSCluster.Builder(conf)
       cluster = new MiniDFSCluster.Builder(conf)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java

@@ -174,7 +174,7 @@ public class TestLargeBlock {
 
 
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java

@@ -206,7 +206,7 @@ public class TestPread extends TestCase {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean("dfs.datanode.simulateddatastorage", true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     if (disableTransferTo) {
     if (disableTransferTo) {
       conf.setBoolean("dfs.datanode.transferTo.allowed", false);
       conf.setBoolean("dfs.datanode.transferTo.allowed", false);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -199,7 +199,7 @@ public class TestReplication extends TestCase {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     if (simulated) {
     if (simulated) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(numDatanodes)
                                                .numDataNodes(numDatanodes)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java

@@ -28,7 +28,7 @@ public class TestSetrepIncreasing extends TestCase {
   static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
   static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
     conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

+ 47 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -124,7 +124,7 @@ public class TestShortCircuitLocalRead {
     conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
     conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
         UserGroupInformation.getCurrentUser().getShortUserName());
         UserGroupInformation.getCurrentUser().getShortUserName());
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .format(true).build();
         .format(true).build();
@@ -238,7 +238,53 @@ public class TestShortCircuitLocalRead {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+
+  @Test
+  public void testSkipWithVerifyChecksum() throws IOException {
+    int size = blockSize;
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
+    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+    if (simulatedStorage) {
+      SimulatedFSDataset.setFactory(conf);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      // check that / exists
+      Path path = new Path("/");
+      assertTrue("/ should be a directory", fs.getFileStatus(path)
+          .isDirectory() == true);
+      
+      byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
+      // create a new file in home directory. Do not close it.
+      Path file1 = new Path("filelocal.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
   
   
+      // write to file
+      stm.write(fileData);
+      stm.close();
+      
+      // now test the skip function
+      FSDataInputStream instm = fs.open(file1);
+      byte[] actual = new byte[fileData.length];
+      // read something from the block first, otherwise BlockReaderLocal.skip()
+      // will not be invoked
+      int nread = instm.read(actual, 0, 3);
+      long skipped = 2*size+3;
+      instm.seek(skipped);
+      nread = instm.read(actual, (int)(skipped + nread), 3);
+      instm.close();
+        
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+     
   /**
   /**
    * Test to run benchmarks between shortcircuit read vs regular read with
    * Test to run benchmarks between shortcircuit read vs regular read with
    * specified number of threads simultaneously reading.
    * specified number of threads simultaneously reading.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java

@@ -92,7 +92,7 @@ public class TestSmallBlock extends TestCase {
   public void testSmallBlock() throws IOException {
   public void testSmallBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
     if (simulatedStorage) {
-      conf.setBoolean("dfs.datanode.simulateddatastorage", true);
+      SimulatedFSDataset.setFactory(conf);
     }
     }
     conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
     conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

+ 56 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+
+
+public class TestExtendedBlock {
+  static final String POOL_A = "blockpool-a";
+  static final String POOL_B = "blockpool-b";
+  static final Block BLOCK_1_GS1 = new Block(1L, 100L, 1L);
+  static final Block BLOCK_1_GS2 = new Block(1L, 100L, 2L);
+  static final Block BLOCK_2_GS1 = new Block(2L, 100L, 1L);
+  
+  @Test
+  public void testEquals() {
+    // Same block -> equal
+    assertEquals(
+        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
+        new ExtendedBlock(POOL_A, BLOCK_1_GS1));
+    // Different pools, same block id -> not equal
+    assertNotEquals(
+        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
+        new ExtendedBlock(POOL_B, BLOCK_1_GS1));
+    // Same pool, different block id -> not equal
+    assertNotEquals(
+        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
+        new ExtendedBlock(POOL_A, BLOCK_2_GS1));
+    // Same block, different genstamps -> equal
+    assertEquals(
+        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
+        new ExtendedBlock(POOL_A, BLOCK_1_GS2));
+  }
+
+  private static void assertNotEquals(Object a, Object b) {
+    assertFalse("expected not equal: '" + a + "' and '" + b + "'",
+        a.equals(b));
+  }
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -77,7 +77,7 @@ public class TestBalancer extends TestCase {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
-    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    SimulatedFSDataset.setFactory(conf);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
   }
   }
 
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

@@ -57,14 +57,12 @@ public class TestBalancerWithMultipleNameNodes {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
-//    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
   }
   }
 
 
   
   
   private static final long CAPACITY = 500L;
   private static final long CAPACITY = 500L;
   private static final String RACK0 = "/rack0";
   private static final String RACK0 = "/rack0";
   private static final String RACK1 = "/rack1";
   private static final String RACK1 = "/rack1";
-  private static final String RACK2 = "/rack2";
 
 
   private static final String FILE_NAME = "/tmp.txt";
   private static final String FILE_NAME = "/tmp.txt";
   private static final Path FILE_PATH = new Path(FILE_NAME);
   private static final Path FILE_PATH = new Path(FILE_NAME);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java

@@ -83,14 +83,14 @@ public class TestCorruptReplicaInfo extends TestCase {
       DatanodeDescriptor dn1 = new DatanodeDescriptor();
       DatanodeDescriptor dn1 = new DatanodeDescriptor();
       DatanodeDescriptor dn2 = new DatanodeDescriptor();
       DatanodeDescriptor dn2 = new DatanodeDescriptor();
       
       
-      crm.addToCorruptReplicasMap(getBlock(0), dn1);
+      crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",
       assertEquals("Number of corrupt blocks not returning correctly",
                    1, crm.size());
                    1, crm.size());
-      crm.addToCorruptReplicasMap(getBlock(1), dn1);
+      crm.addToCorruptReplicasMap(getBlock(1), dn1, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",
       assertEquals("Number of corrupt blocks not returning correctly",
                    2, crm.size());
                    2, crm.size());
       
       
-      crm.addToCorruptReplicasMap(getBlock(1), dn2);
+      crm.addToCorruptReplicasMap(getBlock(1), dn2, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",
       assertEquals("Number of corrupt blocks not returning correctly",
                    2, crm.size());
                    2, crm.size());
       
       
@@ -103,7 +103,7 @@ public class TestCorruptReplicaInfo extends TestCase {
                    0, crm.size());
                    0, crm.size());
       
       
       for (Long block_id: block_ids) {
       for (Long block_id: block_ids) {
-        crm.addToCorruptReplicasMap(getBlock(block_id), dn1);
+        crm.addToCorruptReplicasMap(getBlock(block_id), dn1, "TEST");
       }
       }
             
             
       assertEquals("Number of corrupt blocks not returning correctly",
       assertEquals("Number of corrupt blocks not returning correctly",

+ 68 - 48
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -17,12 +17,13 @@
  */
  */
 package org.apache.hadoop.hdfs.server.datanode;
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
 
 
@@ -30,7 +31,6 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 import javax.management.StandardMBean;
 
 
-import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -38,11 +38,10 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -62,21 +61,33 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
  * 
  * 
  * Note the synchronization is coarse grained - it is at each method. 
  * Note the synchronization is coarse grained - it is at each method. 
  */
  */
+public class SimulatedFSDataset implements FSDatasetInterface {
+  static class Factory extends FSDatasetInterface.Factory {
+    @Override
+    public FSDatasetInterface createFSDatasetInterface(DataNode datanode,
+        DataStorage storage, Configuration conf) throws IOException {
+      return new SimulatedFSDataset(datanode, storage, conf);
+    }
 
 
-public class SimulatedFSDataset  implements FSDatasetInterface, Configurable{
+    @Override
+    public boolean isSimulated() {
+      return true;
+    }
+  }
+  
+  public static void setFactory(Configuration conf) {
+    conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
+        Factory.class.getName());
+  }
   
   
-  public static final String CONFIG_PROPERTY_SIMULATED =
-                                    "dfs.datanode.simulateddatastorage";
   public static final String CONFIG_PROPERTY_CAPACITY =
   public static final String CONFIG_PROPERTY_CAPACITY =
-                            "dfs.datanode.simulateddatastorage.capacity";
+      "dfs.datanode.simulateddatastorage.capacity";
   
   
   public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
   public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
-  public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte
-  byte simulatedDataByte = DEFAULT_DATABYTE;
-  Configuration conf = null;
+  public static final byte DEFAULT_DATABYTE = 9;
   
   
-  static byte[] nullCrcFileData;
-  {
+  static final byte[] nullCrcFileData;
+  static {
     DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.
     DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.
                               CHECKSUM_NULL, 16*1024 );
                               CHECKSUM_NULL, 16*1024 );
     byte[] nullCrcHeader = checksum.getHeader();
     byte[] nullCrcHeader = checksum.getHeader();
@@ -359,31 +370,22 @@ public class SimulatedFSDataset  implements FSDatasetInterface, Configurable{
     }
     }
   }
   }
   
   
-  private Map<String, Map<Block, BInfo>> blockMap = null;
-  private SimulatedStorage storage = null;
-  private String storageId;
+  private final Map<String, Map<Block, BInfo>> blockMap
+      = new HashMap<String, Map<Block,BInfo>>();
+  private final SimulatedStorage storage;
+  private final String storageId;
   
   
-  public SimulatedFSDataset(Configuration conf) throws IOException {
-    setConf(conf);
-  }
-  
-  // Constructor used for constructing the object using reflection
-  @SuppressWarnings("unused")
-  private SimulatedFSDataset() { // real construction when setConf called..
-  }
-  
-  public Configuration getConf() {
-    return conf;
-  }
-
-  public void setConf(Configuration iconf)  {
-    conf = iconf;
-    storageId = conf.get(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, "unknownStorageId" +
-                                        new Random().nextInt());
+  public SimulatedFSDataset(DataNode datanode, DataStorage storage,
+      Configuration conf) {
+    if (storage != null) {
+      storage.createStorageID(datanode.getPort());
+      this.storageId = storage.getStorageID();
+    } else {
+      this.storageId = "unknownStorageId" + new Random().nextInt();
+    }
     registerMBean(storageId);
     registerMBean(storageId);
-    storage = new SimulatedStorage(
+    this.storage = new SimulatedStorage(
         conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY));
         conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY));
-    blockMap = new HashMap<String, Map<Block,BInfo>>(); 
   }
   }
 
 
   public synchronized void injectBlocks(String bpid,
   public synchronized void injectBlocks(String bpid,
@@ -440,23 +442,16 @@ public class SimulatedFSDataset  implements FSDatasetInterface, Configurable{
 
 
   @Override
   @Override
   public synchronized BlockListAsLongs getBlockReport(String bpid) {
   public synchronized BlockListAsLongs getBlockReport(String bpid) {
+    final List<Block> blocks = new ArrayList<Block>();
     final Map<Block, BInfo> map = blockMap.get(bpid);
     final Map<Block, BInfo> map = blockMap.get(bpid);
-    Block[] blockTable = new Block[map.size()];
     if (map != null) {
     if (map != null) {
-      int count = 0;
       for (BInfo b : map.values()) {
       for (BInfo b : map.values()) {
         if (b.isFinalized()) {
         if (b.isFinalized()) {
-          blockTable[count++] = b.theBlock;
+          blocks.add(b.theBlock);
         }
         }
       }
       }
-      if (count != blockTable.length) {
-        blockTable = Arrays.copyOf(blockTable, count);
-      }
-    } else {
-      blockTable = new Block[0];
     }
     }
-    return new BlockListAsLongs(
-        new ArrayList<Block>(Arrays.asList(blockTable)), null);
+    return new BlockListAsLongs(blocks, null);
   }
   }
 
 
   @Override // FSDatasetMBean
   @Override // FSDatasetMBean
@@ -988,8 +983,33 @@ public class SimulatedFSDataset  implements FSDatasetInterface, Configurable{
   }
   }
 
 
   @Override
   @Override
-  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
-      throws IOException {
-    throw new IOException("getBlockLocalPathInfo not supported.");
+  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String[] getBlockPoolList() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void checkAndUpdate(String bpid, long blockId, File diskFile,
+      File diskMetaFile, FSVolumeInterface vol) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<FSVolumeInterface> getVolumes() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Block> getFinalizedBlocks(String bpid) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Map<String, Object> getVolumeInfoMap() {
+    throw new UnsupportedOperationException();
   }
   }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java

@@ -34,7 +34,7 @@ public class TestDataNodeMetrics extends TestCase {
   
   
   public void testDataNodeMetrics() throws Exception {
   public void testDataNodeMetrics() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    SimulatedFSDataset.setFactory(conf);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
     try {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();

+ 9 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java

@@ -23,7 +23,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNotSame;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.Collection;
+import java.util.Map;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -81,11 +80,11 @@ public class TestDataNodeMultipleRegistrations {
 
 
       // check number of volumes in fsdataset
       // check number of volumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       }
       // number of volumes should be 2 - [data1, data2]
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());
       assertEquals("number of volumes is wrong", 2, volInfos.size());
@@ -143,11 +142,11 @@ public class TestDataNodeMultipleRegistrations {
 
 
       // check number of vlumes in fsdataset
       // check number of vlumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       }
       // number of volumes should be 2 - [data1, data2]
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());
       assertEquals("number of volumes is wrong", 2, volInfos.size());

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels