Bläddra i källkod

Merging trunk to branch-trunk-win branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-trunk-win@1407217 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 år sedan
förälder
incheckning
d928b498a8
100 ändrade filer med 4246 tillägg och 2454 borttagningar
  1. 7 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  2. 36 1
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 9 1
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  4. 7 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  5. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  6. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
  7. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  8. 15 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
  9. 0 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  10. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  11. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  12. 11 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  13. 95 68
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  14. 39 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
  15. 4 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
  16. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  17. 54 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  18. 84 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
  19. 24 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
  20. 7 2
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
  21. 2 2
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/getGroup.c
  22. 3 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
  23. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
  24. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto
  25. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto
  26. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
  27. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
  28. 1 0
      hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto
  29. 11 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  30. 11 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
  31. 14 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  32. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
  33. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java
  34. 3 40
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java
  35. 57 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
  36. 65 23
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
  37. 4 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
  38. 1 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
  39. 19 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  40. 6 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
  41. 175 112
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
  42. 15 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java
  43. 3 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
  44. 52 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
  45. 74 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java
  46. 1 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java
  47. 12 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  48. 76 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringInterner.java
  49. 1 0
      hadoop-common-project/hadoop-common/src/test/proto/test.proto
  50. 1 0
      hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
  51. 2 2
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  52. 103 13
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  53. 115 58
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
  54. 10 8
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
  55. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto
  56. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
  57. 99 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
  58. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
  59. 318 181
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c
  60. 230 44
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
  61. 333 185
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_query.c
  62. 215 16
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_query.h
  63. 478 267
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
  64. 141 21
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
  65. 379 230
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
  66. 0 180
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
  67. 290 243
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
  68. 24 19
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
  69. 55 33
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
  70. 42 49
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
  71. 0 111
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
  72. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  73. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  74. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  75. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
  77. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
  78. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  79. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
  80. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
  81. 31 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  82. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  83. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  84. 17 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  85. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  86. 8 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  87. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
  88. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  89. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  90. 35 27
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  91. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  92. 11 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  93. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  94. 9 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  95. 23 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
  96. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
  97. 8 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  98. 2 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  99. 142 196
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  100. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

+ 7 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -62,6 +62,13 @@
         <include>**/*</include>
         <include>**/*</include>
       </includes>
       </includes>
     </fileSet>
     </fileSet>
+    <fileSet>
+      <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf</directory>
+      <outputDirectory>etc/hadoop</outputDirectory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+    </fileSet>
     <fileSet>
     <fileSet>
       <directory>${basedir}</directory>
       <directory>${basedir}</directory>
       <outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
       <outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>

+ 36 - 1
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -129,6 +129,9 @@ Trunk (Unreleased)
     HADOOP-8776. Provide an option in test-patch that can enable/disable
     HADOOP-8776. Provide an option in test-patch that can enable/disable
     compiling native code. (Chris Nauroth via suresh)
     compiling native code. (Chris Nauroth via suresh)
 
 
+    HADOOP-9004. Allow security unit tests to use external KDC. (Stephen Chu
+    via suresh)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -269,10 +272,13 @@ Trunk (Unreleased)
     HADOOP-8918. test-patch.sh is parsing modified files wrong.
     HADOOP-8918. test-patch.sh is parsing modified files wrong.
     (Raja Aluri via suresh)
     (Raja Aluri via suresh)
 
 
+    HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
 
+    HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 Release 2.0.3-alpha - Unreleased 
 Release 2.0.3-alpha - Unreleased 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -330,6 +336,22 @@ Release 2.0.3-alpha - Unreleased
 
 
     HADOOP-8925. Remove the packaging. (eli)
     HADOOP-8925. Remove the packaging. (eli)
 
 
+    HADOOP-8985. Add namespace declarations in .proto files for languages 
+    other than java. (Binglin Chan via suresh)
+
+    HADOOP-9009. Add SecurityUtil methods to get/set authentication method
+    (daryn via bobby)
+
+    HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via
+    bobby)
+
+    HADOOP-9013. UGI should not hardcode loginUser's authenticationType (daryn
+    via bobby)
+
+    HADOOP-9014. Standardize creation of SaslRpcClients (daryn via bobby)
+
+    HADOOP-9015. Standardize creation of SaslRpcServers (daryn via bobby)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -375,7 +397,7 @@ Release 2.0.3-alpha - Unreleased
     (rkanter via tucu)
     (rkanter via tucu)
 
 
     HADOOP-8900. BuiltInGzipDecompressor throws IOException - stored gzip size
     HADOOP-8900. BuiltInGzipDecompressor throws IOException - stored gzip size
-    doesn't match decompressed size. (Slavik Krassovsky via suresh)
+    doesn't match decompressed size. (Andy Isaacson via suresh)
 
 
     HADOOP-8948. TestFileUtil.testGetDU fails on Windows due to incorrect
     HADOOP-8948. TestFileUtil.testGetDU fails on Windows due to incorrect
     assumption of line separator. (Chris Nauroth via suresh)
     assumption of line separator. (Chris Nauroth via suresh)
@@ -383,6 +405,11 @@ Release 2.0.3-alpha - Unreleased
     HADOOP-8951. RunJar to fail with user-comprehensible error 
     HADOOP-8951. RunJar to fail with user-comprehensible error 
     message if jar missing. (stevel via suresh)
     message if jar missing. (stevel via suresh)
 
 
+    HADOOP-8713. TestRPCCompatibility fails intermittently with JDK7
+    (Trevor Robinson via tgraves)
+
+    HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby)
+
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1094,6 +1121,14 @@ Release 0.23.5 - UNRELEASED
     HADOOP-8906. paths with multiple globs are unreliable. (Daryn Sharp via
     HADOOP-8906. paths with multiple globs are unreliable. (Daryn Sharp via
     jlowe)
     jlowe)
 
 
+    HADOOP-8811. Compile hadoop native library in FreeBSD (Radim Kolar via
+    bobby)
+
+    HADOOP-8962. RawLocalFileSystem.listStatus fails when a child filename
+    contains a colon (jlowe via bobby)
+
+    HADOOP-8986. Server$Call object is never released after it is sent (bobby)
+
 Release 0.23.4 - UNRELEASED
 Release 0.23.4 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 9 - 1
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -67,6 +67,9 @@ macro(set_find_shared_library_version LVERS)
     IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
     IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
         # Mac OS uses .dylib
         # Mac OS uses .dylib
         SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
         SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
+    ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
+        # FreeBSD has always .so installed.
+        SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
     ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
     ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
         # Windows doesn't support finding shared libraries by version.
         # Windows doesn't support finding shared libraries by version.
     ELSE()
     ELSE()
@@ -95,8 +98,10 @@ GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
 
 
 INCLUDE(CheckFunctionExists)
 INCLUDE(CheckFunctionExists)
 INCLUDE(CheckCSourceCompiles)
 INCLUDE(CheckCSourceCompiles)
+INCLUDE(CheckLibraryExists)
 CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
 CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
 CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
 CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
+CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
 
 
 SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
 SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
 set_find_shared_library_version("1")
 set_find_shared_library_version("1")
@@ -159,6 +164,9 @@ add_dual_library(hadoop
     ${D}/util/NativeCrc32.c
     ${D}/util/NativeCrc32.c
     ${D}/util/bulk_crc32.c
     ${D}/util/bulk_crc32.c
 )
 )
+if (NEED_LINK_DL)
+   set(LIB_DL dl)
+endif (NEED_LINK_DL)
 
 
 IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
     #
     #
@@ -171,7 +179,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 ENDIF()
 ENDIF()
 
 
 target_link_dual_libraries(hadoop
 target_link_dual_libraries(hadoop
-    dl
+    ${LIB_DL}
     ${JAVA_JVM_LIBRARY}
     ${JAVA_JVM_LIBRARY}
 )
 )
 SET(LIBHADOOP_VERSION "1.0.0")
 SET(LIBHADOOP_VERSION "1.0.0")

+ 7 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -75,6 +75,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.jackson.JsonGenerator;
@@ -2002,13 +2003,16 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
             continue;
             continue;
           Element field = (Element)fieldNode;
           Element field = (Element)fieldNode;
           if ("name".equals(field.getTagName()) && field.hasChildNodes())
           if ("name".equals(field.getTagName()) && field.hasChildNodes())
-            attr = ((Text)field.getFirstChild()).getData().trim();
+            attr = StringInterner.weakIntern(
+                ((Text)field.getFirstChild()).getData().trim());
           if ("value".equals(field.getTagName()) && field.hasChildNodes())
           if ("value".equals(field.getTagName()) && field.hasChildNodes())
-            value = ((Text)field.getFirstChild()).getData();
+            value = StringInterner.weakIntern(
+                ((Text)field.getFirstChild()).getData());
           if ("final".equals(field.getTagName()) && field.hasChildNodes())
           if ("final".equals(field.getTagName()) && field.hasChildNodes())
             finalParameter = "true".equals(((Text)field.getFirstChild()).getData());
             finalParameter = "true".equals(((Text)field.getFirstChild()).getData());
           if ("source".equals(field.getTagName()) && field.hasChildNodes())
           if ("source".equals(field.getTagName()) && field.hasChildNodes())
-            source.add(((Text)field.getFirstChild()).getData());
+            source.add(StringInterner.weakIntern(
+                ((Text)field.getFirstChild()).getData()));
         }
         }
         source.add(name);
         source.add(name);
         
         

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -125,6 +125,11 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
     return fsImpl.getServerDefaults();
     return fsImpl.getServerDefaults();
   }
   }
+  
+  @Override
+  public Path getHomeDirectory() {
+    return fsImpl.getHomeDirectory();
+  }
 
 
   @Override
   @Override
   public int getUriDefaultPort() {
   public int getUriDefaultPort() {

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java

@@ -45,7 +45,8 @@ public class HardLink {
     OS_TYPE_UNIX,
     OS_TYPE_UNIX,
     OS_TYPE_WIN,
     OS_TYPE_WIN,
     OS_TYPE_SOLARIS,
     OS_TYPE_SOLARIS,
-    OS_TYPE_MAC
+    OS_TYPE_MAC,
+    OS_TYPE_FREEBSD
   }
   }
   
   
   public static OSType osType;
   public static OSType osType;
@@ -65,7 +66,7 @@ public class HardLink {
       getHardLinkCommand = new HardLinkCGUnix();
       getHardLinkCommand = new HardLinkCGUnix();
       //override getLinkCountCommand for the particular Unix variant
       //override getLinkCountCommand for the particular Unix variant
       //Linux is already set as the default - {"stat","-c%h", null}
       //Linux is already set as the default - {"stat","-c%h", null}
-      if (osType == OSType.OS_TYPE_MAC) {
+      if (osType == OSType.OS_TYPE_MAC || osType == OSType.OS_TYPE_FREEBSD) {
         String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
       } else if (osType == OSType.OS_TYPE_SOLARIS) {
       } else if (osType == OSType.OS_TYPE_SOLARIS) {
@@ -91,6 +92,9 @@ public class HardLink {
     else if (osName.contains("Mac")) {
     else if (osName.contains("Mac")) {
        return OSType.OS_TYPE_MAC;
        return OSType.OS_TYPE_MAC;
     }
     }
+    else if (osName.contains("FreeBSD")) {
+       return OSType.OS_TYPE_FREEBSD;
+    }
     else {
     else {
       return OSType.OS_TYPE_UNIX;
       return OSType.OS_TYPE_UNIX;
     }
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -349,7 +349,7 @@ public class RawLocalFileSystem extends FileSystem {
         new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
         new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
     }
     }
 
 
-    String[] names = localf.list();
+    File[] names = localf.listFiles();
     if (names == null) {
     if (names == null) {
       return null;
       return null;
     }
     }
@@ -357,7 +357,7 @@ public class RawLocalFileSystem extends FileSystem {
     int j = 0;
     int j = 0;
     for (int i = 0; i < names.length; i++) {
     for (int i = 0; i < names.length; i++) {
       try {
       try {
-        results[j] = getFileStatus(new Path(f, names[i]));
+        results[j] = getFileStatus(new Path(names[i].getAbsolutePath()));
         j++;
         j++;
       } catch (FileNotFoundException e) {
       } catch (FileNotFoundException e) {
         // ignore the files not found since the dir list may have have changed
         // ignore the files not found since the dir list may have have changed

+ 15 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java

@@ -37,16 +37,21 @@ class Test extends FsCommand {
   }
   }
 
 
   public static final String NAME = "test";
   public static final String NAME = "test";
-  public static final String USAGE = "-[ezd] <path>";
+  public static final String USAGE = "-[defsz] <path>";
   public static final String DESCRIPTION =
   public static final String DESCRIPTION =
-    "If file exists, has zero length, is a directory\n" +
-    "then return 0, else return 1.";
+    "Answer various questions about <path>, with result via exit status.\n" +
+    "  -d  return 0 if <path> is a directory.\n" +
+    "  -e  return 0 if <path> exists.\n" +
+    "  -f  return 0 if <path> is a file.\n" +
+    "  -s  return 0 if file <path> is greater than zero bytes in size.\n" +
+    "  -z  return 0 if file <path> is zero bytes in size.\n" +
+    "else, return 1.";
 
 
   private char flag;
   private char flag;
   
   
   @Override
   @Override
   protected void processOptions(LinkedList<String> args) {
   protected void processOptions(LinkedList<String> args) {
-    CommandFormat cf = new CommandFormat(1, 1, "e", "d", "z");
+    CommandFormat cf = new CommandFormat(1, 1, "e", "d", "f", "s", "z");
     cf.parse(args);
     cf.parse(args);
     
     
     String[] opts = cf.getOpts().toArray(new String[0]);
     String[] opts = cf.getOpts().toArray(new String[0]);
@@ -71,6 +76,12 @@ class Test extends FsCommand {
       case 'd':
       case 'd':
         test = item.stat.isDirectory();
         test = item.stat.isDirectory();
         break;
         break;
+      case 'f':
+        test = item.stat.isFile();
+        break;
+      case 's':
+        test = (item.stat.getLen() > 0);
+        break;
       case 'z':
       case 'z':
         test = (item.stat.getLen() == 0);
         test = (item.stat.getLen() == 0);
         break;
         break;

+ 0 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -153,12 +153,6 @@ class ChRootedFileSystem extends FilterFileSystem {
     return makeQualified(
     return makeQualified(
         new Path(chRootPathPartString + f.toUri().toString()));
         new Path(chRootPathPartString + f.toUri().toString()));
   }
   }
-  
-  @Override
-  public Path getHomeDirectory() {
-    return  new Path("/user/"+System.getProperty("user.name")).makeQualified(
-          getUri(), null);
-  }
 
 
   @Override
   @Override
   public Path getWorkingDirectory() {
   public Path getWorkingDirectory() {

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -256,8 +256,9 @@ public class ViewFileSystem extends FileSystem {
       if (base == null) {
       if (base == null) {
         base = "/user";
         base = "/user";
       }
       }
-      homeDir = 
-        this.makeQualified(new Path(base + "/" + ugi.getShortUserName()));
+      homeDir = (base.equals("/") ? 
+          this.makeQualified(new Path(base + ugi.getShortUserName())):
+          this.makeQualified(new Path(base + "/" + ugi.getShortUserName())));
     }
     }
     return homeDir;
     return homeDir;
   }
   }

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -248,8 +248,9 @@ public class ViewFs extends AbstractFileSystem {
       if (base == null) {
       if (base == null) {
         base = "/user";
         base = "/user";
       }
       }
-      homeDir = 
-        this.makeQualified(new Path(base + "/" + ugi.getShortUserName()));
+      homeDir = (base.equals("/") ? 
+        this.makeQualified(new Path(base + ugi.getShortUserName())):
+        this.makeQualified(new Path(base + "/" + ugi.getShortUserName())));
     }
     }
     return homeDir;
     return homeDir;
   }
   }

+ 11 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -69,6 +69,7 @@ import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
@@ -222,7 +223,6 @@ public class Client {
   private class Connection extends Thread {
   private class Connection extends Thread {
     private InetSocketAddress server;             // server ip:port
     private InetSocketAddress server;             // server ip:port
     private String serverPrincipal;  // server's krb5 principal name
     private String serverPrincipal;  // server's krb5 principal name
-    private IpcConnectionContextProto connectionContext;   // connection context
     private final ConnectionId remoteId;                // connection id
     private final ConnectionId remoteId;                // connection id
     private AuthMethod authMethod; // authentication method
     private AuthMethod authMethod; // authentication method
     private Token<? extends TokenIdentifier> token;
     private Token<? extends TokenIdentifier> token;
@@ -295,16 +295,14 @@ public class Client {
       }
       }
       
       
       if (token != null) {
       if (token != null) {
-        authMethod = AuthMethod.DIGEST;
+        authMethod = AuthenticationMethod.TOKEN.getAuthMethod();
       } else if (UserGroupInformation.isSecurityEnabled()) {
       } else if (UserGroupInformation.isSecurityEnabled()) {
+        // eventually just use the ticket's authMethod
         authMethod = AuthMethod.KERBEROS;
         authMethod = AuthMethod.KERBEROS;
       } else {
       } else {
         authMethod = AuthMethod.SIMPLE;
         authMethod = AuthMethod.SIMPLE;
       }
       }
       
       
-      connectionContext = ProtoUtil.makeIpcConnectionContext(
-          RPC.getProtocolName(protocol), ticket, authMethod);
-      
       if (LOG.isDebugEnabled())
       if (LOG.isDebugEnabled())
         LOG.debug("Use " + authMethod + " authentication for protocol "
         LOG.debug("Use " + authMethod + " authentication for protocol "
             + protocol.getSimpleName());
             + protocol.getSimpleName());
@@ -605,11 +603,6 @@ public class Client {
             } else {
             } else {
               // fall back to simple auth because server told us so.
               // fall back to simple auth because server told us so.
               authMethod = AuthMethod.SIMPLE;
               authMethod = AuthMethod.SIMPLE;
-              // remake the connectionContext             
-              connectionContext = ProtoUtil.makeIpcConnectionContext(
-                  connectionContext.getProtocol(), 
-                  ProtoUtil.getUgi(connectionContext.getUserInfo()),
-                  authMethod);
             }
             }
           }
           }
         
         
@@ -620,7 +613,7 @@ public class Client {
             this.in = new DataInputStream(new BufferedInputStream(inStream));
             this.in = new DataInputStream(new BufferedInputStream(inStream));
           }
           }
           this.out = new DataOutputStream(new BufferedOutputStream(outStream));
           this.out = new DataOutputStream(new BufferedOutputStream(outStream));
-          writeConnectionContext();
+          writeConnectionContext(remoteId, authMethod);
 
 
           // update last activity time
           // update last activity time
           touch();
           touch();
@@ -742,10 +735,15 @@ public class Client {
     /* Write the connection context header for each connection
     /* Write the connection context header for each connection
      * Out is not synchronized because only the first thread does this.
      * Out is not synchronized because only the first thread does this.
      */
      */
-    private void writeConnectionContext() throws IOException {
+    private void writeConnectionContext(ConnectionId remoteId,
+                                        AuthMethod authMethod)
+                                            throws IOException {
       // Write out the ConnectionHeader
       // Write out the ConnectionHeader
       DataOutputBuffer buf = new DataOutputBuffer();
       DataOutputBuffer buf = new DataOutputBuffer();
-      connectionContext.writeTo(buf);
+      ProtoUtil.makeIpcConnectionContext(
+          RPC.getProtocolName(remoteId.getProtocol()),
+          remoteId.getTicket(),
+          authMethod).writeTo(buf);
       
       
       // Write out the payload length
       // Write out the payload length
       int bufLen = buf.getLength();
       int bufLen = buf.getLength();

+ 95 - 68
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -57,6 +57,7 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
 
+import javax.security.auth.callback.CallbackHandler;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 import javax.security.sasl.SaslServer;
@@ -87,6 +88,7 @@ import org.apache.hadoop.security.SaslRpcServer.SaslDigestCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
 import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
@@ -974,6 +976,8 @@ public abstract class Server {
             return true;
             return true;
           }
           }
           if (!call.rpcResponse.hasRemaining()) {
           if (!call.rpcResponse.hasRemaining()) {
+            //Clear out the response buffer so it can be collected
+            call.rpcResponse = null;
             call.connection.decRpcCount();
             call.connection.decRpcCount();
             if (numElements == 1) {    // last call fully processes.
             if (numElements == 1) {    // last call fully processes.
               done = true;             // no more data for this channel.
               done = true;             // no more data for this channel.
@@ -1076,7 +1080,6 @@ public abstract class Server {
     
     
     IpcConnectionContextProto connectionContext;
     IpcConnectionContextProto connectionContext;
     String protocolName;
     String protocolName;
-    boolean useSasl;
     SaslServer saslServer;
     SaslServer saslServer;
     private AuthMethod authMethod;
     private AuthMethod authMethod;
     private boolean saslContextEstablished;
     private boolean saslContextEstablished;
@@ -1192,49 +1195,6 @@ public abstract class Server {
       if (!saslContextEstablished) {
       if (!saslContextEstablished) {
         byte[] replyToken = null;
         byte[] replyToken = null;
         try {
         try {
-          if (saslServer == null) {
-            switch (authMethod) {
-            case DIGEST:
-              if (secretManager == null) {
-                throw new AccessControlException(
-                    "Server is not configured to do DIGEST authentication.");
-              }
-              secretManager.checkAvailableForRead();
-              saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
-                  .getMechanismName(), null, SaslRpcServer.SASL_DEFAULT_REALM,
-                  SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(
-                      secretManager, this));
-              break;
-            default:
-              UserGroupInformation current = UserGroupInformation
-                  .getCurrentUser();
-              String fullName = current.getUserName();
-              if (LOG.isDebugEnabled())
-                LOG.debug("Kerberos principal name is " + fullName);
-              final String names[] = SaslRpcServer.splitKerberosName(fullName);
-              if (names.length != 3) {
-                throw new AccessControlException(
-                    "Kerberos principal name does NOT have the expected "
-                        + "hostname part: " + fullName);
-              }
-              current.doAs(new PrivilegedExceptionAction<Object>() {
-                @Override
-                public Object run() throws SaslException {
-                  saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS
-                      .getMechanismName(), names[0], names[1],
-                      SaslRpcServer.SASL_PROPS, new SaslGssCallbackHandler());
-                  return null;
-                }
-              });
-            }
-            if (saslServer == null)
-              throw new AccessControlException(
-                  "Unable to find SASL server implementation for "
-                      + authMethod.getMechanismName());
-            if (LOG.isDebugEnabled())
-              LOG.debug("Created SASL server with mechanism = "
-                  + authMethod.getMechanismName());
-          }
           if (LOG.isDebugEnabled())
           if (LOG.isDebugEnabled())
             LOG.debug("Have read input token of size " + saslToken.length
             LOG.debug("Have read input token of size " + saslToken.length
                 + " for processing by saslServer.evaluateResponse()");
                 + " for processing by saslServer.evaluateResponse()");
@@ -1373,38 +1333,27 @@ public abstract class Server {
           dataLengthBuffer.clear();
           dataLengthBuffer.clear();
           if (authMethod == null) {
           if (authMethod == null) {
             throw new IOException("Unable to read authentication method");
             throw new IOException("Unable to read authentication method");
-          }          
+          }
+          boolean useSaslServer = isSecurityEnabled;
           final boolean clientUsingSasl;
           final boolean clientUsingSasl;
           switch (authMethod) {
           switch (authMethod) {
             case SIMPLE: { // no sasl for simple
             case SIMPLE: { // no sasl for simple
-              if (isSecurityEnabled) {
-                AccessControlException ae = new AccessControlException("Authorization ("
-                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
-                    + ") is enabled but authentication ("
-                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
-                    + ") is configured as simple. Please configure another method "
-                    + "like kerberos or digest.");
-                setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
-                    null, ae.getClass().getName(), ae.getMessage());
-                responder.doRespond(authFailedCall);
-                throw ae;
-              }
               clientUsingSasl = false;
               clientUsingSasl = false;
-              useSasl = false; 
               break;
               break;
             }
             }
-            case DIGEST: {
+            case DIGEST: { // always allow tokens if there's a secret manager
+              useSaslServer |= (secretManager != null);
               clientUsingSasl = true;
               clientUsingSasl = true;
-              useSasl = (secretManager != null);
               break;
               break;
             }
             }
             default: {
             default: {
               clientUsingSasl = true;
               clientUsingSasl = true;
-              useSasl = isSecurityEnabled; 
               break;
               break;
             }
             }
-          }          
-          if (clientUsingSasl && !useSasl) {
+          }
+          if (useSaslServer) {
+            saslServer = createSaslServer(authMethod);
+          } else if (clientUsingSasl) { // security is off
             doSaslReply(SaslStatus.SUCCESS, new IntWritable(
             doSaslReply(SaslStatus.SUCCESS, new IntWritable(
                 SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
                 SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
             authMethod = AuthMethod.SIMPLE;
             authMethod = AuthMethod.SIMPLE;
@@ -1446,7 +1395,7 @@ public abstract class Server {
             continue;
             continue;
           }
           }
           boolean isHeaderRead = connectionContextRead;
           boolean isHeaderRead = connectionContextRead;
-          if (useSasl) {
+          if (saslServer != null) {
             saslReadAndProcess(data.array());
             saslReadAndProcess(data.array());
           } else {
           } else {
             processOneRpc(data.array());
             processOneRpc(data.array());
@@ -1460,6 +1409,84 @@ public abstract class Server {
       }
       }
     }
     }
 
 
+    private SaslServer createSaslServer(AuthMethod authMethod)
+        throws IOException {
+      try {
+        return createSaslServerInternal(authMethod);
+      } catch (IOException ioe) {
+        final String ioeClass = ioe.getClass().getName();
+        final String ioeMessage  = ioe.getLocalizedMessage();
+        if (authMethod == AuthMethod.SIMPLE) {
+          setupResponse(authFailedResponse, authFailedCall,
+              RpcStatusProto.FATAL, null, ioeClass, ioeMessage);
+          responder.doRespond(authFailedCall);
+        } else {
+          doSaslReply(SaslStatus.ERROR, null, ioeClass, ioeMessage);
+        }
+        throw ioe;
+      }
+    }
+
+    private SaslServer createSaslServerInternal(AuthMethod authMethod)
+        throws IOException {
+      SaslServer saslServer = null;
+      String hostname = null;
+      String saslProtocol = null;
+      CallbackHandler saslCallback = null;
+      
+      switch (authMethod) {
+        case SIMPLE: {
+          throw new AccessControlException("Authorization ("
+              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+              + ") is enabled but authentication ("
+              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+              + ") is configured as simple. Please configure another method "
+              + "like kerberos or digest.");
+        }
+        case DIGEST: {
+          if (secretManager == null) {
+            throw new AccessControlException(
+                "Server is not configured to do DIGEST authentication.");
+          }
+          secretManager.checkAvailableForRead();
+          hostname = SaslRpcServer.SASL_DEFAULT_REALM;
+          saslCallback = new SaslDigestCallbackHandler(secretManager, this);
+          break;
+        }
+        case KERBEROS: {
+          String fullName = UserGroupInformation.getCurrentUser().getUserName();
+          if (LOG.isDebugEnabled())
+            LOG.debug("Kerberos principal name is " + fullName);
+          KerberosName krbName = new KerberosName(fullName);
+          hostname = krbName.getHostName();
+          if (hostname == null) {
+            throw new AccessControlException(
+                "Kerberos principal name does NOT have the expected "
+                    + "hostname part: " + fullName);
+          }
+          saslProtocol = krbName.getServiceName();
+          saslCallback = new SaslGssCallbackHandler();
+          break;
+        }
+        default:
+          throw new AccessControlException(
+              "Server does not support SASL " + authMethod);
+      }
+      
+      String mechanism = authMethod.getMechanismName();
+      saslServer = Sasl.createSaslServer(
+          mechanism, saslProtocol, hostname,
+          SaslRpcServer.SASL_PROPS, saslCallback);
+      if (saslServer == null) {
+        throw new AccessControlException(
+            "Unable to find SASL server implementation for " + mechanism);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Created SASL server with mechanism = " + mechanism);
+      }
+      return saslServer;
+    }
+    
     /**
     /**
      * Try to set up the response to indicate that the client version
      * Try to set up the response to indicate that the client version
      * is incompatible with the server. This can contain special-case
      * is incompatible with the server. This can contain special-case
@@ -1521,14 +1548,14 @@ public abstract class Server {
           .getProtocol() : null;
           .getProtocol() : null;
 
 
       UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
       UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
-      if (!useSasl) {
+      if (saslServer == null) {
         user = protocolUser;
         user = protocolUser;
         if (user != null) {
         if (user != null) {
-          user.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod);
+          user.setAuthenticationMethod(AuthMethod.SIMPLE);
         }
         }
       } else {
       } else {
         // user is authenticated
         // user is authenticated
-        user.setAuthenticationMethod(authMethod.authenticationMethod);
+        user.setAuthenticationMethod(authMethod);
         //Now we check if this is a proxy user case. If the protocol user is
         //Now we check if this is a proxy user case. If the protocol user is
         //different from the 'user', it is a proxy user scenario. However, 
         //different from the 'user', it is a proxy user scenario. However, 
         //this is not allowed if user authenticated with DIGEST.
         //this is not allowed if user authenticated with DIGEST.
@@ -1997,7 +2024,7 @@ public abstract class Server {
   
   
   private void wrapWithSasl(ByteArrayOutputStream response, Call call)
   private void wrapWithSasl(ByteArrayOutputStream response, Call call)
       throws IOException {
       throws IOException {
-    if (call.connection.useSasl) {
+    if (call.connection.saslServer != null) {
       byte[] token = response.toByteArray();
       byte[] token = response.toByteArray();
       // synchronization may be needed since there can be multiple Handler
       // synchronization may be needed since there can be multiple Handler
       // threads using saslServer to wrap responses.
       // threads using saslServer to wrap responses.

+ 39 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

@@ -25,6 +25,7 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.util.Map;
 
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.CallbackHandler;
@@ -45,6 +46,7 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
 import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
 
@@ -69,40 +71,48 @@ public class SaslRpcClient {
   public SaslRpcClient(AuthMethod method,
   public SaslRpcClient(AuthMethod method,
       Token<? extends TokenIdentifier> token, String serverPrincipal)
       Token<? extends TokenIdentifier> token, String serverPrincipal)
       throws IOException {
       throws IOException {
+    String saslUser = null;
+    String saslProtocol = null;
+    String saslServerName = null;
+    Map<String, String> saslProperties = SaslRpcServer.SASL_PROPS;
+    CallbackHandler saslCallback = null;
+    
     switch (method) {
     switch (method) {
-    case DIGEST:
-      if (LOG.isDebugEnabled())
-        LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName()
-            + " client to authenticate to service at " + token.getService());
-      saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST
-          .getMechanismName() }, null, null, SaslRpcServer.SASL_DEFAULT_REALM,
-          SaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(token));
-      break;
-    case KERBEROS:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName()
-            + " client. Server's Kerberos principal name is "
-            + serverPrincipal);
-      }
-      if (serverPrincipal == null || serverPrincipal.length() == 0) {
-        throw new IOException(
-            "Failed to specify server's Kerberos principal name");
+      case DIGEST: {
+        saslServerName = SaslRpcServer.SASL_DEFAULT_REALM;
+        saslCallback = new SaslClientCallbackHandler(token);
+        break;
       }
       }
-      String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
-      if (names.length != 3) {
-        throw new IOException(
-          "Kerberos principal name does NOT have the expected hostname part: "
-                + serverPrincipal);
+      case KERBEROS: {
+        if (serverPrincipal == null || serverPrincipal.isEmpty()) {
+          throw new IOException(
+              "Failed to specify server's Kerberos principal name");
+        }
+        KerberosName name = new KerberosName(serverPrincipal);
+        saslProtocol = name.getServiceName();
+        saslServerName = name.getHostName();
+        if (saslServerName == null) {
+          throw new IOException(
+              "Kerberos principal name does NOT have the expected hostname part: "
+                  + serverPrincipal);
+        }
+        break;
       }
       }
-      saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS
-          .getMechanismName() }, null, names[0], names[1],
-          SaslRpcServer.SASL_PROPS, null);
-      break;
-    default:
-      throw new IOException("Unknown authentication method " + method);
+      default:
+        throw new IOException("Unknown authentication method " + method);
+    }
+    
+    String mechanism = method.getMechanismName();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Creating SASL " + mechanism
+          + " client to authenticate to service at " + saslServerName);
     }
     }
-    if (saslClient == null)
+    saslClient = Sasl.createSaslClient(
+        new String[] { mechanism }, saslUser, saslProtocol, saslServerName,
+        saslProperties, saslCallback);
+    if (saslClient == null) {
       throw new IOException("Unable to find SASL client implementation");
       throw new IOException("Unable to find SASL client implementation");
+    }
   }
   }
 
 
   private static void readStatus(DataInputStream inStream) throws IOException {
   private static void readStatus(DataInputStream inStream) throws IOException {

+ 4 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java

@@ -42,7 +42,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -137,20 +136,17 @@ public class SaslRpcServer {
   /** Authentication method */
   /** Authentication method */
   @InterfaceStability.Evolving
   @InterfaceStability.Evolving
   public static enum AuthMethod {
   public static enum AuthMethod {
-    SIMPLE((byte) 80, "", AuthenticationMethod.SIMPLE),
-    KERBEROS((byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS),
-    DIGEST((byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN);
+    SIMPLE((byte) 80, ""),
+    KERBEROS((byte) 81, "GSSAPI"),
+    DIGEST((byte) 82, "DIGEST-MD5");
 
 
     /** The code for this method. */
     /** The code for this method. */
     public final byte code;
     public final byte code;
     public final String mechanismName;
     public final String mechanismName;
-    public final AuthenticationMethod authenticationMethod;
 
 
-    private AuthMethod(byte code, String mechanismName, 
-                       AuthenticationMethod authMethod) {
+    private AuthMethod(byte code, String mechanismName) { 
       this.code = code;
       this.code = code;
       this.mechanismName = mechanismName;
       this.mechanismName = mechanismName;
-      this.authenticationMethod = authMethod;
     }
     }
 
 
     private static final int FIRST_CODE = values()[0].code;
     private static final int FIRST_CODE = values()[0].code;

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -16,6 +16,8 @@
  */
  */
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -44,6 +46,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.ssl.SSLFactory;
@@ -665,4 +668,22 @@ public class SecurityUtil {
     }
     }
   }
   }
 
 
+  public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
+    String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
+    try {
+      return Enum.valueOf(AuthenticationMethod.class, value.toUpperCase());
+    } catch (IllegalArgumentException iae) {
+      throw new IllegalArgumentException("Invalid attribute value for " +
+          HADOOP_SECURITY_AUTHENTICATION + " of " + value);
+    }
+  }
+
+  public static void setAuthenticationMethod(
+      AuthenticationMethod authenticationMethod, Configuration conf) {
+    if (authenticationMethod == null) {
+      authenticationMethod = AuthenticationMethod.SIMPLE;
+    }
+    conf.set(HADOOP_SECURITY_AUTHENTICATION,
+             authenticationMethod.toString().toLowerCase());
+  }
 }
 }

+ 54 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -236,15 +237,18 @@ public class UserGroupInformation {
    * @param conf the configuration to use
    * @param conf the configuration to use
    */
    */
   private static synchronized void initUGI(Configuration conf) {
   private static synchronized void initUGI(Configuration conf) {
-    String value = conf.get(HADOOP_SECURITY_AUTHENTICATION);
-    if (value == null || "simple".equals(value)) {
-      useKerberos = false;
-    } else if ("kerberos".equals(value)) {
-      useKerberos = true;
-    } else {
-      throw new IllegalArgumentException("Invalid attribute value for " +
-                                         HADOOP_SECURITY_AUTHENTICATION + 
-                                         " of " + value);
+    AuthenticationMethod auth = SecurityUtil.getAuthenticationMethod(conf);
+    switch (auth) {
+      case SIMPLE:
+        useKerberos = false;
+        break;
+      case KERBEROS:
+        useKerberos = true;
+        break;
+      default:
+        throw new IllegalArgumentException("Invalid attribute value for " +
+                                           HADOOP_SECURITY_AUTHENTICATION + 
+                                           " of " + auth);
     }
     }
     try {
     try {
         kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong(
         kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong(
@@ -636,19 +640,20 @@ public class UserGroupInformation {
       try {
       try {
         Subject subject = new Subject();
         Subject subject = new Subject();
         LoginContext login;
         LoginContext login;
+        AuthenticationMethod authenticationMethod;
         if (isSecurityEnabled()) {
         if (isSecurityEnabled()) {
+          authenticationMethod = AuthenticationMethod.KERBEROS;
           login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
           login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
               subject, new HadoopConfiguration());
               subject, new HadoopConfiguration());
         } else {
         } else {
+          authenticationMethod = AuthenticationMethod.SIMPLE;
           login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME, 
           login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME, 
               subject, new HadoopConfiguration());
               subject, new HadoopConfiguration());
         }
         }
         login.login();
         login.login();
         loginUser = new UserGroupInformation(subject);
         loginUser = new UserGroupInformation(subject);
         loginUser.setLogin(login);
         loginUser.setLogin(login);
-        loginUser.setAuthenticationMethod(isSecurityEnabled() ?
-                                          AuthenticationMethod.KERBEROS :
-                                          AuthenticationMethod.SIMPLE);
+        loginUser.setAuthenticationMethod(authenticationMethod);
         loginUser = new UserGroupInformation(login.getSubject());
         loginUser = new UserGroupInformation(login.getSubject());
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
         if (fileLocation != null) {
         if (fileLocation != null) {
@@ -1019,13 +1024,34 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   @InterfaceStability.Evolving
   public static enum AuthenticationMethod {
   public static enum AuthenticationMethod {
-    SIMPLE,
-    KERBEROS,
-    TOKEN,
-    CERTIFICATE,
-    KERBEROS_SSL,
-    PROXY;
-  }
+    // currently we support only one auth per method, but eventually a 
+    // subtype is needed to differentiate, ex. if digest is token or ldap
+    SIMPLE(AuthMethod.SIMPLE),
+    KERBEROS(AuthMethod.KERBEROS),
+    TOKEN(AuthMethod.DIGEST),
+    CERTIFICATE(null),
+    KERBEROS_SSL(null),
+    PROXY(null);
+    
+    private final AuthMethod authMethod;
+    private AuthenticationMethod(AuthMethod authMethod) {
+      this.authMethod = authMethod;
+    }
+    
+    public AuthMethod getAuthMethod() {
+      return authMethod;
+    }
+    
+    public static AuthenticationMethod valueOf(AuthMethod authMethod) {
+      for (AuthenticationMethod value : values()) {
+        if (value.getAuthMethod() == authMethod) {
+          return value;
+        }
+      }
+      throw new IllegalArgumentException(
+          "no authentication method for " + authMethod);
+    }
+  };
 
 
   /**
   /**
    * Create a proxy user using username of the effective user and the ugi of the
    * Create a proxy user using username of the effective user and the ugi of the
@@ -1290,6 +1316,15 @@ public class UserGroupInformation {
     user.setAuthenticationMethod(authMethod);
     user.setAuthenticationMethod(authMethod);
   }
   }
 
 
+  /**
+   * Sets the authentication method in the subject
+   * 
+   * @param authMethod
+   */
+  public void setAuthenticationMethod(AuthMethod authMethod) {
+    user.setAuthenticationMethod(AuthenticationMethod.valueOf(authMethod));
+  }
+
   /**
   /**
    * Get the authentication method from the subject
    * Get the authentication method from the subject
    * 
    * 

+ 84 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
+
+/**
+ * Provides equivalent behavior to String.intern() to optimize performance, 
+ * whereby does not consume memory in the permanent generation.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class StringInterner {
+  
+  /**
+   * Retains a strong reference to each string instance it has interned.
+   */
+  private final static Interner<String> strongInterner;
+  
+  /**
+   * Retains a weak reference to each string instance it has interned. 
+   */
+  private final static Interner<String> weakInterner;
+  
+  
+  
+  static {
+    strongInterner = Interners.newStrongInterner();
+    weakInterner = Interners.newWeakInterner();
+  }
+  
+  /**
+   * Interns and returns a reference to the representative instance 
+   * for any of a collection of string instances that are equal to each other.
+   * Retains strong reference to the instance, 
+   * thus preventing it from being garbage-collected. 
+   * 
+   * @param sample string instance to be interned
+   * @return strong reference to interned string instance
+   */
+  public static String strongIntern(String sample) {
+    if (sample == null) {
+      return null;
+    }
+    return strongInterner.intern(sample);
+  }
+  
+  /**
+   * Interns and returns a reference to the representative instance 
+   * for any of a collection of string instances that are equal to each other.
+   * Retains weak reference to the instance, 
+   * and so does not prevent it from being garbage-collected.
+   * 
+   * @param sample string instance to be interned
+   * @return weak reference to interned string instance
+   */
+  public static String weakIntern(String sample) {
+    if (sample == null) {
+      return null;
+    }
+    return weakInterner.intern(sample);
+  }
+
+}

+ 24 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c

@@ -254,7 +254,11 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
 
 
   int err = 0;
   int err = 0;
   if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) {
   if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) {
+#ifdef __FreeBSD__
+    throw_ioe(env, errno);
+#else
     throw_ioe(env, err);
     throw_ioe(env, err);
+#endif
   }
   }
 #endif
 #endif
 }
 }
@@ -310,6 +314,22 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
 #endif
 #endif
 }
 }
 
 
+#ifdef __FreeBSD__
+static int toFreeBSDFlags(int flags)
+{
+  int rc = flags & 03;
+  if ( flags &  0100 ) rc |= O_CREAT;
+  if ( flags &  0200 ) rc |= O_EXCL;
+  if ( flags &  0400 ) rc |= O_NOCTTY;
+  if ( flags & 01000 ) rc |= O_TRUNC;
+  if ( flags & 02000 ) rc |= O_APPEND;
+  if ( flags & 04000 ) rc |= O_NONBLOCK;
+  if ( flags &010000 ) rc |= O_SYNC;
+  if ( flags &020000 ) rc |= O_ASYNC;
+  return rc;
+}
+#endif
+
 /*
 /*
  * public static native FileDescriptor open(String path, int flags, int mode);
  * public static native FileDescriptor open(String path, int flags, int mode);
  */
  */
@@ -318,6 +338,9 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_open(
   JNIEnv *env, jclass clazz, jstring j_path,
   JNIEnv *env, jclass clazz, jstring j_path,
   jint flags, jint mode)
   jint flags, jint mode)
 {
 {
+#ifdef __FreeBSD__
+  flags = toFreeBSDFlags(flags);
+#endif
   jobject ret = NULL;
   jobject ret = NULL;
 
 
   const char *path = (*env)->GetStringUTFChars(env, j_path, NULL);
   const char *path = (*env)->GetStringUTFChars(env, j_path, NULL);
@@ -399,7 +422,7 @@ err:
  * Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r
  * Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r
  */
  */
 ssize_t get_pw_buflen() {
 ssize_t get_pw_buflen() {
-  size_t ret = 0;
+  long ret = 0;
   #ifdef _SC_GETPW_R_SIZE_MAX
   #ifdef _SC_GETPW_R_SIZE_MAX
   ret = sysconf(_SC_GETPW_R_SIZE_MAX);
   ret = sysconf(_SC_GETPW_R_SIZE_MAX);
   #endif
   #endif

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c

@@ -46,6 +46,7 @@ JNIEXPORT jobjectArray JNICALL
 Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
 Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNetgroupJNI
 (JNIEnv *env, jobject jobj, jstring jgroup) {
 (JNIEnv *env, jobject jobj, jstring jgroup) {
   UserList *userListHead = NULL;
   UserList *userListHead = NULL;
+  UserList *current = NULL;
   int       userListSize = 0;
   int       userListSize = 0;
 
 
   // pointers to free at the end
   // pointers to free at the end
@@ -72,8 +73,10 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   // was successful or not (as long as it was called we need to call
   // was successful or not (as long as it was called we need to call
   // endnetgrent)
   // endnetgrent)
   setnetgrentCalledFlag = 1;
   setnetgrentCalledFlag = 1;
+#ifndef __FreeBSD__
   if(setnetgrent(cgroup) == 1) {
   if(setnetgrent(cgroup) == 1) {
-    UserList *current = NULL;
+#endif
+    current = NULL;
     // three pointers are for host, user, domain, we only care
     // three pointers are for host, user, domain, we only care
     // about user now
     // about user now
     char *p[3];
     char *p[3];
@@ -87,7 +90,9 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
         userListSize++;
         userListSize++;
       }
       }
     }
     }
+#ifndef __FreeBSD__
   }
   }
+#endif
 
 
   //--------------------------------------------------
   //--------------------------------------------------
   // build return data (java array)
   // build return data (java array)
@@ -101,7 +106,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
     goto END;
     goto END;
   }
   }
 
 
-  UserList * current = NULL;
+  current = NULL;
 
 
   // note that the loop iterates over list but also over array (i)
   // note that the loop iterates over list but also over array (i)
   int i = 0;
   int i = 0;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/getGroup.c

@@ -78,7 +78,7 @@ int getGroupIDList(const char *user, int *ngroups, gid_t **groups) {
  */
  */
 int getGroupDetails(gid_t group, char **grpBuf) {
 int getGroupDetails(gid_t group, char **grpBuf) {
   struct group * grp = NULL;
   struct group * grp = NULL;
-  size_t currBufferSize = sysconf(_SC_GETGR_R_SIZE_MAX);
+  long currBufferSize = sysconf(_SC_GETGR_R_SIZE_MAX);
   if (currBufferSize < 1024) {
   if (currBufferSize < 1024) {
     currBufferSize = 1024;
     currBufferSize = 1024;
   }
   }
@@ -123,7 +123,7 @@ int getGroupDetails(gid_t group, char **grpBuf) {
  */
  */
 int getPW(const char *user, char **pwbuf) {
 int getPW(const char *user, char **pwbuf) {
   struct passwd *pwbufp = NULL;
   struct passwd *pwbufp = NULL;
-  size_t currBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX);
+  long currBufferSize = sysconf(_SC_GETPW_R_SIZE_MAX);
   if (currBufferSize < 1024) {
   if (currBufferSize < 1024) {
     currBufferSize = 1024;
     currBufferSize = 1024;
   }
   }

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c

@@ -32,7 +32,9 @@
 #include "bulk_crc32.h"
 #include "bulk_crc32.h"
 #include "gcc_optimizations.h"
 #include "gcc_optimizations.h"
 
 
+#ifndef __FreeBSD__
 #define USE_PIPELINED
 #define USE_PIPELINED
+#endif
 
 
 #define CRC_INITIAL_VAL 0xffffffff
 #define CRC_INITIAL_VAL 0xffffffff
 
 
@@ -260,7 +262,7 @@ static uint32_t crc32_zlib_sb8(
 // Begin code for SSE4.2 specific hardware support of CRC32C
 // Begin code for SSE4.2 specific hardware support of CRC32C
 ///////////////////////////////////////////////////////////////////////////
 ///////////////////////////////////////////////////////////////////////////
 
 
-#if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__)
+#if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__) && !defined(__FreeBSD__)
 #  define SSE42_FEATURE_BIT (1 << 20)
 #  define SSE42_FEATURE_BIT (1 << 20)
 #  define CPUID_FEATURES 1
 #  define CPUID_FEATURES 1
 /**
 /**

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto

@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop.ha.proto";
 option java_outer_classname = "HAServiceProtocolProtos";
 option java_outer_classname = "HAServiceProtocolProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 enum HAServiceStateProto {
 enum HAServiceStateProto {
   INITIALIZING = 0;
   INITIALIZING = 0;

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto

@@ -18,6 +18,7 @@
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "IpcConnectionContextProtos";
 option java_outer_classname = "IpcConnectionContextProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 /**
 /**
  * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext
  * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto

@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "ProtocolInfoProtos";
 option java_outer_classname = "ProtocolInfoProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 /**
 /**
  * Request to get protocol versions for all supported rpc kinds.
  * Request to get protocol versions for all supported rpc kinds.

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto

@@ -18,6 +18,7 @@
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_outer_classname = "RpcPayloadHeaderProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 /**
 /**
  * This is the rpc payload header. It is sent with every rpc call.
  * This is the rpc payload header. It is sent with every rpc call.

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto

@@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop.ha.proto";
 option java_outer_classname = "ZKFCProtocolProtos";
 option java_outer_classname = "ZKFCProtocolProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 message CedeActiveRequestProto {
 message CedeActiveRequestProto {
   required uint32 millisToCede = 1;
   required uint32 millisToCede = 1;

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/proto/hadoop_rpc.proto

@@ -23,6 +23,7 @@
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "HadoopRpcProtos";
 option java_outer_classname = "HadoopRpcProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 /**
 /**
  * This message is used for Protobuf Rpc Engine.
  * This message is used for Protobuf Rpc Engine.

+ 11 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -61,19 +61,28 @@ public final class FileSystemTestHelper {
     return data;
     return data;
   }
   }
   
   
+  
+  /*
+   * get testRootPath qualified for fSys
+   */
   public static Path getTestRootPath(FileSystem fSys) {
   public static Path getTestRootPath(FileSystem fSys) {
     return fSys.makeQualified(new Path(TEST_ROOT_DIR));
     return fSys.makeQualified(new Path(TEST_ROOT_DIR));
   }
   }
 
 
+  /*
+   * get testRootPath + pathString qualified for fSys
+   */
   public static Path getTestRootPath(FileSystem fSys, String pathString) {
   public static Path getTestRootPath(FileSystem fSys, String pathString) {
     return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
     return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
   }
   }
   
   
   
   
   // the getAbsolutexxx method is needed because the root test dir
   // the getAbsolutexxx method is needed because the root test dir
-  // can be messed up by changing the working dir.
+  // can be messed up by changing the working dir since the TEST_ROOT_PATH
+  // is often relative to the working directory of process
+  // running the unit tests.
 
 
-  public static String getAbsoluteTestRootDir(FileSystem fSys)
+  static String getAbsoluteTestRootDir(FileSystem fSys)
       throws IOException {
       throws IOException {
     // NOTE: can't cache because of different filesystems!
     // NOTE: can't cache because of different filesystems!
     //if (absTestRootDir == null) 
     //if (absTestRootDir == null) 

+ 11 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java

@@ -350,8 +350,12 @@ public class TestHardLink {
     callCount = createHardLinkMult(src, fileNames, tgt_mult, maxLength);
     callCount = createHardLinkMult(src, fileNames, tgt_mult, maxLength);
     //check the request was completed in exactly two "chunks"
     //check the request was completed in exactly two "chunks"
     assertEquals(2, callCount);
     assertEquals(2, callCount);
+    String[] tgt_multNames = tgt_mult.list();
+    //sort directory listings before comparsion
+    Arrays.sort(fileNames);
+    Arrays.sort(tgt_multNames);
     //and check the results were as expected in the dir tree
     //and check the results were as expected in the dir tree
-    assertTrue(Arrays.deepEquals(fileNames, tgt_mult.list()));
+    assertArrayEquals(fileNames, tgt_multNames);
     
     
     //Test the case where maxlength is too small even for one filename.
     //Test the case where maxlength is too small even for one filename.
     //It should go ahead and try the single files.
     //It should go ahead and try the single files.
@@ -368,8 +372,12 @@ public class TestHardLink {
         maxLength);
         maxLength);
     //should go ahead with each of the three single file names
     //should go ahead with each of the three single file names
     assertEquals(3, callCount);
     assertEquals(3, callCount);
-    //check the results were as expected in the dir tree
-    assertTrue(Arrays.deepEquals(fileNames, tgt_mult.list()));
+    tgt_multNames = tgt_mult.list();
+    //sort directory listings before comparsion
+    Arrays.sort(fileNames);
+    Arrays.sort(tgt_multNames);
+    //and check the results were as expected in the dir tree
+    assertArrayEquals(fileNames, tgt_multNames);
   }
   }
   
   
   /*
   /*

+ 14 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -249,6 +249,7 @@ public class TestLocalFileSystem {
     assertEquals(1, fileSchemeCount);
     assertEquals(1, fileSchemeCount);
   }
   }
 
 
+  @Test
   public void testHasFileDescriptor() throws IOException {
   public void testHasFileDescriptor() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     LocalFileSystem fs = FileSystem.getLocal(conf);
     LocalFileSystem fs = FileSystem.getLocal(conf);
@@ -258,4 +259,17 @@ public class TestLocalFileSystem {
         new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
         new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
     assertNotNull(bis.getFileDescriptor());
     assertNotNull(bis.getFileDescriptor());
   }
   }
+
+  @Test
+  public void testListStatusWithColons() throws IOException {
+    Configuration conf = new Configuration();
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
+    colonFile.mkdirs();
+    colonFile.createNewFile();
+    FileStatus[] stats = fs.listStatus(new Path(TEST_ROOT_DIR));
+    assertEquals("Unexpected number of stats", 1, stats.length);
+    assertEquals("Bad path from stat", colonFile.getAbsolutePath(),
+        stats[0].getPath().toUri().getPath());
+  }
 }
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java

@@ -73,10 +73,10 @@ public class TestChRootedFileSystem {
     URI uri = fSys.getUri();
     URI uri = fSys.getUri();
     Assert.assertEquals(chrootedTo.toUri(), uri);
     Assert.assertEquals(chrootedTo.toUri(), uri);
     Assert.assertEquals(fSys.makeQualified(
     Assert.assertEquals(fSys.makeQualified(
-        new Path("/user/" + System.getProperty("user.name"))),
+        new Path(System.getProperty("user.home"))),
         fSys.getWorkingDirectory());
         fSys.getWorkingDirectory());
     Assert.assertEquals(fSys.makeQualified(
     Assert.assertEquals(fSys.makeQualified(
-        new Path("/user/" + System.getProperty("user.name"))),
+        new Path(System.getProperty("user.home"))),
         fSys.getHomeDirectory());
         fSys.getHomeDirectory());
     /*
     /*
      * ChRootedFs as its uri like file:///chrootRoot.
      * ChRootedFs as its uri like file:///chrootRoot.

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java

@@ -70,10 +70,10 @@ public class TestChRootedFs {
     URI uri = fc.getDefaultFileSystem().getUri();
     URI uri = fc.getDefaultFileSystem().getUri();
     Assert.assertEquals(chrootedTo.toUri(), uri);
     Assert.assertEquals(chrootedTo.toUri(), uri);
     Assert.assertEquals(fc.makeQualified(
     Assert.assertEquals(fc.makeQualified(
-        new Path("/user/" + System.getProperty("user.name"))),
+        new Path(System.getProperty("user.home"))),
         fc.getWorkingDirectory());
         fc.getWorkingDirectory());
     Assert.assertEquals(fc.makeQualified(
     Assert.assertEquals(fc.makeQualified(
-        new Path("/user/" + System.getProperty("user.name"))),
+        new Path(System.getProperty("user.home"))),
         fc.getHomeDirectory());
         fc.getHomeDirectory());
     /*
     /*
      * ChRootedFs as its uri like file:///chrootRoot.
      * ChRootedFs as its uri like file:///chrootRoot.

+ 3 - 40
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java

@@ -39,44 +39,7 @@ public class TestFcMainOperationsLocalFs  extends
   @Override
   @Override
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
-    /**
-     * create the test root on local_fs - the  mount table will point here
-     */
-    fclocal = FileContext.getLocalFSFileContext();
-    targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
-    // In case previous test was killed before cleanup
-    fclocal.delete(targetOfTests, true);
-    
-    fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
-
-    
-    
-    
-    // We create mount table so that the test root on the viewFs points to 
-    // to the test root on the target.
-    // DOing this helps verify the FileStatus.path.
-    //
-    // The test root by default when running eclipse 
-    // is a test dir below the working directory. 
-    // (see FileContextTestHelper).
-    // Since viewFs has no built-in wd, its wd is /user/<username>.
-    // If this test launched via ant (build.xml) the test root is absolute path
-    
-    String srcTestRoot;
-    if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) {
-      srcTestRoot = FileContextTestHelper.TEST_ROOT_DIR;
-    } else {
-      srcTestRoot = "/user/"  + System.getProperty("user.name") + "/" +
-      FileContextTestHelper.TEST_ROOT_DIR;
-    }
-
-    Configuration conf = new Configuration();
-    ConfigUtil.addLink(conf, srcTestRoot,
-        targetOfTests.toUri());
-    
-    fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
-    //System.out.println("SRCOfTests = "+ FileContextTestHelper.getTestRootPath(fc, "test"));
-    //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
+    fc = ViewFsTestSetup.setupForViewFsLocalFs();
     super.setUp();
     super.setUp();
   }
   }
   
   
@@ -84,6 +47,6 @@ public class TestFcMainOperationsLocalFs  extends
   @After
   @After
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
     super.tearDown();
     super.tearDown();
-    fclocal.delete(targetOfTests, true);
+    ViewFsTestSetup.tearDownForViewFsLocalFs();
   }
   }
-}
+}

+ 57 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java

@@ -17,7 +17,10 @@
  */
  */
 package org.apache.hadoop.fs.viewfs;
 package org.apache.hadoop.fs.viewfs;
 
 
+import java.net.URI;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
@@ -32,14 +35,19 @@ import org.mortbay.log.Log;
  * 
  * 
  * If tests launched via ant (build.xml) the test root is absolute path
  * If tests launched via ant (build.xml) the test root is absolute path
  * If tests launched via eclipse, the test root is 
  * If tests launched via eclipse, the test root is 
- * is a test dir below the working directory. (see FileSystemTestHelper).
- * Since viewFs has no built-in wd, its wd is /user/<username> 
- *          (or /User/<username> on mac)
+ * is a test dir below the working directory. (see FileContextTestHelper)
+ * 
+ * We set a viewFileSystems with 3 mount points: 
+ * 1) /<firstComponent>" of testdir  pointing to same in  target fs
+ * 2)   /<firstComponent>" of home  pointing to same in  target fs 
+ * 3)  /<firstComponent>" of wd  pointing to same in  target fs
+ * (note in many cases the link may be the same - viewFileSytem handles this)
  * 
  * 
- * We set a viewFileSystems with mount point for 
- * /<firstComponent>" pointing to the target fs's  testdir 
+ * We also set the view file system's wd to point to the wd. 
  */
  */
 public class ViewFileSystemTestSetup {
 public class ViewFileSystemTestSetup {
+  
+  static public String ViewFSTestDir = "/testDir";
 
 
   /**
   /**
    * 
    * 
@@ -56,24 +64,26 @@ public class ViewFileSystemTestSetup {
     fsTarget.delete(targetOfTests, true);
     fsTarget.delete(targetOfTests, true);
     fsTarget.mkdirs(targetOfTests);
     fsTarget.mkdirs(targetOfTests);
 
 
-    // Setup a link from viewfs to targetfs for the first component of
-    // path of testdir.
+
+    // Set up viewfs link for test dir as described above
     String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
     String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
         .getPath();
         .getPath();
-    int indexOf2ndSlash = testDir.indexOf('/', 1);
-    String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
-    ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
-        new Path(testDirFirstComponent)).toUri());
+    linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
+    
+    
+    // Set up viewfs link for home dir as described above
+    setUpHomeDir(conf, fsTarget);
+    
+    
+    // the test path may be relative to working dir - we need to make that work:
+    // Set up viewfs link for wd as described above
+    String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
+    linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
 
 
-    // viewFs://home => fsTarget://home
-    String homeDirRoot = fsTarget.getHomeDirectory()
-        .getParent().toUri().getPath();
-    ConfigUtil.addLink(conf, homeDirRoot,
-        fsTarget.makeQualified(new Path(homeDirRoot)).toUri());
-    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
-    Log.info("Home dir base " + homeDirRoot);
 
 
     FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+    fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
+    Log.info("Working dir is: " + fsView.getWorkingDirectory());
     return fsView;
     return fsView;
   }
   }
 
 
@@ -91,4 +101,33 @@ public class ViewFileSystemTestSetup {
     conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
     conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
     return conf; 
     return conf; 
   }
   }
+  
+  static void setUpHomeDir(Configuration conf, FileSystem fsTarget) {
+    String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
+    int indexOf2ndSlash = homeDir.indexOf('/', 1);
+    if (indexOf2ndSlash >0) {
+      linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
+    } else { // home dir is at root. Just link the home dir itse
+      URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
+      ConfigUtil.addLink(conf, homeDir, linkTarget);
+      Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
+    }
+    // Now set the root of the home dir for viewfs
+    String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
+    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
+    Log.info("Home dir base for viewfs" + homeDirRoot);  
+  }
+  
+  /*
+   * Set up link in config for first component of path to the same
+   * in the target file system.
+   */
+  static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) {
+    int indexOf2ndSlash = path.indexOf('/', 1);
+    String firstComponent = path.substring(0, indexOf2ndSlash);
+    URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
+    ConfigUtil.addLink(conf, firstComponent, linkTarget);
+    Log.info("Added link for " + info + " " 
+        + firstComponent + "->" + linkTarget);    
+  }
 }
 }

+ 65 - 23
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java

@@ -17,12 +17,15 @@
  */
  */
 package org.apache.hadoop.fs.viewfs;
 package org.apache.hadoop.fs.viewfs;
 
 
+import java.net.URI;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestHelper;
 import org.apache.hadoop.fs.FileContextTestHelper;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.mortbay.log.Log;
 
 
 
 
 /**
 /**
@@ -31,13 +34,20 @@ import org.apache.hadoop.fs.viewfs.ConfigUtil;
  * 
  * 
  * If tests launched via ant (build.xml) the test root is absolute path
  * If tests launched via ant (build.xml) the test root is absolute path
  * If tests launched via eclipse, the test root is 
  * If tests launched via eclipse, the test root is 
- * is a test dir below the working directory. (see FileContextTestHelper).
- * Since viewFs has no built-in wd, its wd is /user/<username>.
+ * is a test dir below the working directory. (see FileContextTestHelper)
+ * 
+ * We set a viewfs with 3 mount points: 
+ * 1) /<firstComponent>" of testdir  pointing to same in  target fs
+ * 2)   /<firstComponent>" of home  pointing to same in  target fs 
+ * 3)  /<firstComponent>" of wd  pointing to same in  target fs
+ * (note in many cases the link may be the same - viewfs handles this)
  * 
  * 
- * We set up fc to be the viewFs with mount point for 
- * /<firstComponent>" pointing to the local file system's testdir 
+ * We also set the view file system's wd to point to the wd.  
  */
  */
+
 public class ViewFsTestSetup {
 public class ViewFsTestSetup {
+  
+  static public String ViewFSTestDir = "/testDir";
 
 
 
 
    /* 
    /* 
@@ -47,30 +57,31 @@ public class ViewFsTestSetup {
     /**
     /**
      * create the test root on local_fs - the  mount table will point here
      * create the test root on local_fs - the  mount table will point here
      */
      */
-    FileContext fclocal = FileContext.getLocalFSFileContext();
-    Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
+    FileContext fsTarget = FileContext.getLocalFSFileContext();
+    Path targetOfTests = FileContextTestHelper.getTestRootPath(fsTarget);
     // In case previous test was killed before cleanup
     // In case previous test was killed before cleanup
-    fclocal.delete(targetOfTests, true);
+    fsTarget.delete(targetOfTests, true);
     
     
-    fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
-  
-    String srcTestFirstDir;
-    if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) {
-      int indexOf2ndSlash = FileContextTestHelper.TEST_ROOT_DIR.indexOf('/', 1);
-      srcTestFirstDir = FileContextTestHelper.TEST_ROOT_DIR.substring(0, indexOf2ndSlash);
-    } else {
-      srcTestFirstDir = "/user"; 
-  
-    }
-    //System.out.println("srcTestFirstDir=" + srcTestFirstDir);
-  
-    // Set up the defaultMT in the config with mount point links
-    // The test dir is root is below  /user/<userid>
+    fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    ConfigUtil.addLink(conf, srcTestFirstDir,
-        targetOfTests.toUri());
+    
+    // Set up viewfs link for test dir as described above
+    String testDir = FileContextTestHelper.getTestRootPath(fsTarget).toUri()
+        .getPath();
+    linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
+    
+    
+    // Set up viewfs link for home dir as described above
+    setUpHomeDir(conf, fsTarget);
+      
+    // the test path may be relative to working dir - we need to make that work:
+    // Set up viewfs link for wd as described above
+    String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
+    linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
     
     
     FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
     FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
+    fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
+    Log.info("Working dir is: " + fc.getWorkingDirectory());
     //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
     //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
     //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
     //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
     return fc;
     return fc;
@@ -85,5 +96,36 @@ public class ViewFsTestSetup {
     Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
     Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
     fclocal.delete(targetOfTests, true);
     fclocal.delete(targetOfTests, true);
   }
   }
+  
+  
+  static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
+    String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
+    int indexOf2ndSlash = homeDir.indexOf('/', 1);
+    if (indexOf2ndSlash >0) {
+      linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
+    } else { // home dir is at root. Just link the home dir itse
+      URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
+      ConfigUtil.addLink(conf, homeDir, linkTarget);
+      Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
+    }
+    // Now set the root of the home dir for viewfs
+    String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
+    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
+    Log.info("Home dir base for viewfs" + homeDirRoot);  
+  }
+  
+  /*
+   * Set up link in config for first component of path to the same
+   * in the target file system.
+   */
+  static void linkUpFirstComponents(Configuration conf, String path,
+      FileContext fsTarget, String info) {
+    int indexOf2ndSlash = path.indexOf('/', 1);
+    String firstComponent = path.substring(0, indexOf2ndSlash);
+    URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
+    ConfigUtil.addLink(conf, firstComponent, linkTarget);
+    Log.info("Added link for " + info + " " 
+        + firstComponent + "->" + linkTarget);    
+  }
 
 
 }
 }

+ 4 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java

@@ -224,7 +224,10 @@ public class TestNativeIO {
       // we should just skip the unit test on machines where we don't
       // we should just skip the unit test on machines where we don't
       // have fadvise support
       // have fadvise support
       assumeTrue(false);
       assumeTrue(false);
-    } finally {
+    } catch (NativeIOException nioe) {
+      // ignore this error as FreeBSD returns EBADF even if length is zero
+    }
+      finally {
       fis.close();
       fis.close();
     }
     }
 
 

+ 1 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java

@@ -30,7 +30,6 @@ import junit.framework.Assert;
 
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
@@ -380,9 +379,7 @@ public class MiniRPCBenchmark {
       elapsedTime = mb.runMiniBenchmarkWithDelegationToken(
       elapsedTime = mb.runMiniBenchmarkWithDelegationToken(
                               conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
                               conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
     } else {
     } else {
-      String auth = 
-        conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, 
-                        "simple");
+      String auth = SecurityUtil.getAuthenticationMethod(conf).toString();
       System.out.println(
       System.out.println(
           "Running MiniRPCBenchmark with " + auth + " authentication.");
           "Running MiniRPCBenchmark with " + auth + " authentication.");
       elapsedTime = mb.runMiniBenchmark(
       elapsedTime = mb.runMiniBenchmark(

+ 19 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -55,13 +55,16 @@ import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.test.MockitoUtil;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import com.google.protobuf.DescriptorProtos;
 import com.google.protobuf.DescriptorProtos;
@@ -75,11 +78,14 @@ public class TestRPC {
   public static final Log LOG =
   public static final Log LOG =
     LogFactory.getLog(TestRPC.class);
     LogFactory.getLog(TestRPC.class);
   
   
-  private static Configuration conf = new Configuration();
+  private static Configuration conf;
   
   
-  static {
+  @Before
+  public void setupConf() {
+    conf = new Configuration();
     conf.setClass("rpc.engine." + StoppedProtocol.class.getName(),
     conf.setClass("rpc.engine." + StoppedProtocol.class.getName(),
         StoppedRpcEngine.class, RpcEngine.class);
         StoppedRpcEngine.class, RpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
   }
   }
 
 
   int datasize = 1024*100;
   int datasize = 1024*100;
@@ -676,11 +682,17 @@ public class TestRPC {
   
   
   @Test
   @Test
   public void testErrorMsgForInsecureClient() throws Exception {
   public void testErrorMsgForInsecureClient() throws Exception {
-    final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
+    Configuration serverConf = new Configuration(conf);
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
+                                         serverConf);
+    UserGroupInformation.setConfiguration(serverConf);
+    
+    final Server server = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(5).setVerbose(true).build();
         .setNumHandlers(5).setVerbose(true).build();
-    server.enableSecurity();
     server.start();
     server.start();
+
+    UserGroupInformation.setConfiguration(conf);
     boolean succeeded = false;
     boolean succeeded = false;
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     TestProtocol proxy = null;
     TestProtocol proxy = null;
@@ -702,17 +714,18 @@ public class TestRPC {
 
 
     conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
     conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
 
 
-    final Server multiServer = new RPC.Builder(conf)
+    UserGroupInformation.setConfiguration(serverConf);
+    final Server multiServer = new RPC.Builder(serverConf)
         .setProtocol(TestProtocol.class).setInstance(new TestImpl())
         .setProtocol(TestProtocol.class).setInstance(new TestImpl())
         .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
         .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
         .build();
         .build();
-    multiServer.enableSecurity();
     multiServer.start();
     multiServer.start();
     succeeded = false;
     succeeded = false;
     final InetSocketAddress mulitServerAddr =
     final InetSocketAddress mulitServerAddr =
                       NetUtils.getConnectAddress(multiServer);
                       NetUtils.getConnectAddress(multiServer);
     proxy = null;
     proxy = null;
     try {
     try {
+      UserGroupInformation.setConfiguration(conf);
       proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
       proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
           TestProtocol.versionID, mulitServerAddr, conf);
           TestProtocol.versionID, mulitServerAddr, conf);
       proxy.echo("");
       proxy.echo("");

+ 6 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRes
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.After;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /** Unit test for supporting method-name based compatible RPCs. */
 /** Unit test for supporting method-name based compatible RPCs. */
@@ -114,6 +115,11 @@ public class TestRPCCompatibility {
     }
     }
 
 
   }
   }
+
+  @Before
+  public void setUp() {
+    ProtocolSignature.resetCache();
+  }
   
   
   @After
   @After
   public void tearDown() throws IOException {
   public void tearDown() throws IOException {
@@ -219,7 +225,6 @@ System.out.println("echo int is NOT supported");
   
   
   @Test // equal version client and server
   @Test // equal version client and server
   public void testVersion2ClientVersion2Server() throws Exception {
   public void testVersion2ClientVersion2Server() throws Exception {
-    ProtocolSignature.resetCache();
     // create a server with two handlers
     // create a server with two handlers
     TestImpl2 impl = new TestImpl2();
     TestImpl2 impl = new TestImpl2();
     server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
     server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)

+ 175 - 112
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java

@@ -18,8 +18,9 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.*;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
+
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
@@ -28,6 +29,7 @@ import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Set;
 import java.util.Set;
+import java.util.regex.Pattern;
 
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.Sasl;
 
 
@@ -41,15 +43,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.SaslInputStream;
-import org.apache.hadoop.security.SaslRpcClient;
-import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.SecurityInfo;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.TestUserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.*;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -57,7 +51,10 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
+import org.apache.tools.ant.types.Assertions.EnabledAssertion;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -73,13 +70,21 @@ public class TestSaslRPC {
   static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
   static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
   static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
   static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
   static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
   static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
-  
   private static Configuration conf;
   private static Configuration conf;
+  static Boolean forceSecretManager = null;
+  
   @BeforeClass
   @BeforeClass
-  public static void setup() {
+  public static void setupKerb() {
+    System.setProperty("java.security.krb5.kdc", "");
+    System.setProperty("java.security.krb5.realm", "NONE"); 
+  }    
+
+  @Before
+  public void setup() {
     conf = new Configuration();
     conf = new Configuration();
-    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
+    forceSecretManager = null;
   }
   }
 
 
   static {
   static {
@@ -186,6 +191,7 @@ public class TestSaslRPC {
   @TokenInfo(TestTokenSelector.class)
   @TokenInfo(TestTokenSelector.class)
   public interface TestSaslProtocol extends TestRPC.TestProtocol {
   public interface TestSaslProtocol extends TestRPC.TestProtocol {
     public AuthenticationMethod getAuthMethod() throws IOException;
     public AuthenticationMethod getAuthMethod() throws IOException;
+    public String getAuthUser() throws IOException;
   }
   }
   
   
   public static class TestSaslImpl extends TestRPC.TestImpl implements
   public static class TestSaslImpl extends TestRPC.TestImpl implements
@@ -194,6 +200,10 @@ public class TestSaslRPC {
     public AuthenticationMethod getAuthMethod() throws IOException {
     public AuthenticationMethod getAuthMethod() throws IOException {
       return UserGroupInformation.getCurrentUser().getAuthenticationMethod();
       return UserGroupInformation.getCurrentUser().getAuthenticationMethod();
     }
     }
+    @Override
+    public String getAuthUser() throws IOException {
+      return UserGroupInformation.getCurrentUser().getUserName();
+    }
   }
   }
 
 
   public static class CustomSecurityInfo extends SecurityInfo {
   public static class CustomSecurityInfo extends SecurityInfo {
@@ -258,16 +268,6 @@ public class TestSaslRPC {
     }
     }
   }
   }
 
 
-  @Test
-  public void testSecureToInsecureRpc() throws Exception {
-    Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class)
-        .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(5).setVerbose(true).build();
-    server.disableSecurity();
-    TestTokenSecretManager sm = new TestTokenSecretManager();
-    doDigestRpc(server, sm);
-  }
-  
   @Test
   @Test
   public void testErrorMessage() throws Exception {
   public void testErrorMessage() throws Exception {
     BadTokenSecretManager sm = new BadTokenSecretManager();
     BadTokenSecretManager sm = new BadTokenSecretManager();
@@ -345,7 +345,7 @@ public class TestSaslRPC {
           new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
           new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
       assertEquals(SERVER_PRINCIPAL_1, remoteId.getServerPrincipal());
       assertEquals(SERVER_PRINCIPAL_1, remoteId.getServerPrincipal());
       // this following test needs security to be off
       // this following test needs security to be off
-      newConf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
+      SecurityUtil.setAuthenticationMethod(SIMPLE, newConf);
       UserGroupInformation.setConfiguration(newConf);
       UserGroupInformation.setConfiguration(newConf);
       remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
       remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
           TestSaslProtocol.class, null, 0, newConf);
           TestSaslProtocol.class, null, 0, newConf);
@@ -448,127 +448,176 @@ public class TestSaslRPC {
     System.out.println("Test is successful.");
     System.out.println("Test is successful.");
   }
   }
 
 
-  // insecure -> insecure
+  private static Pattern BadToken =
+      Pattern.compile(".*DIGEST-MD5: digest response format violation.*");
+  private static Pattern KrbFailed =
+      Pattern.compile(".*Failed on local exception:.* " +
+                      "Failed to specify server's Kerberos principal name.*");
+  private static Pattern Denied = 
+      Pattern.compile(".*Authorization .* is enabled .*");
+  private static Pattern NoDigest =
+      Pattern.compile(".*Server is not configured to do DIGEST auth.*");
+  
+  /*
+   *  simple server
+   */
   @Test
   @Test
-  public void testInsecureClientInsecureServer() throws Exception {
-    assertEquals(AuthenticationMethod.SIMPLE,
-                 getAuthMethod(false, false, false));
+  public void testSimpleServer() throws Exception {
+    assertAuthEquals(SIMPLE,    getAuthMethod(SIMPLE,   SIMPLE));
+    // SASL methods are reverted to SIMPLE, but test setup fails
+    assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, SIMPLE));
   }
   }
 
 
   @Test
   @Test
-  public void testInsecureClientInsecureServerWithToken() throws Exception {
-    assertEquals(AuthenticationMethod.TOKEN,
-                 getAuthMethod(false, false, true));
+  public void testSimpleServerWithTokens() throws Exception {
+    // Tokens are ignored because client is reverted to simple
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, true));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
+    forceSecretManager = true;
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, true));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
   }
   }
-
-  // insecure -> secure
+    
   @Test
   @Test
-  public void testInsecureClientSecureServer() throws Exception {
-    RemoteException e = null;
-    try {
-      getAuthMethod(false, true, false);
-    } catch (RemoteException re) {
-      e = re;
-    }
-    assertNotNull(e);
-    assertEquals(AccessControlException.class.getName(), e.getClassName());
+  public void testSimpleServerWithInvalidTokens() throws Exception {
+    // Tokens are ignored because client is reverted to simple
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, false));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
+    forceSecretManager = true;
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, false));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
   }
   }
-
+  
+  /*
+   * kerberos server
+   */
   @Test
   @Test
-  public void testInsecureClientSecureServerWithToken() throws Exception {
-    assertEquals(AuthenticationMethod.TOKEN,
-                 getAuthMethod(false, true, true));
+  public void testKerberosServer() throws Exception {
+    assertAuthEquals(Denied,    getAuthMethod(SIMPLE,   KERBEROS));
+    assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS));    
   }
   }
 
 
-  // secure -> secure
   @Test
   @Test
-  public void testSecureClientSecureServer() throws Exception {
-    /* Should be this when multiple secure auths are supported and we can
-     * dummy one out:
-     *     assertEquals(AuthenticationMethod.SECURE_AUTH_METHOD,
-     *                  getAuthMethod(true, true, false));
-     */
-    try {
-      getAuthMethod(true, true, false);
-    } catch (IOException ioe) {
-      // can't actually test kerberos w/o kerberos...
-      String expectedError = "Failed to specify server's Kerberos principal";
-      String actualError = ioe.getMessage();
-      assertTrue("["+actualError+"] doesn't start with ["+expectedError+"]",
-          actualError.contains(expectedError));
-    }
+  public void testKerberosServerWithTokens() throws Exception {
+    // can use tokens regardless of auth
+    assertAuthEquals(TOKEN, getAuthMethod(SIMPLE,   KERBEROS, true));
+    assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true));
+    // can't fallback to simple when using kerberos w/o tokens
+    forceSecretManager = false;
+    assertAuthEquals(NoDigest, getAuthMethod(SIMPLE,   KERBEROS, true));
+    assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true));
   }
   }
 
 
   @Test
   @Test
-  public void testSecureClientSecureServerWithToken() throws Exception {
-    assertEquals(AuthenticationMethod.TOKEN,
-                 getAuthMethod(true, true, true));
+  public void testKerberosServerWithInvalidTokens() throws Exception {
+    assertAuthEquals(BadToken, getAuthMethod(SIMPLE,   KERBEROS, false));
+    assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false));
+    forceSecretManager = false;
+    assertAuthEquals(NoDigest, getAuthMethod(SIMPLE,   KERBEROS, true));
+    assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true));
   }
   }
 
 
-  // secure -> insecure
-  @Test
-  public void testSecureClientInsecureServerWithToken() throws Exception {
-    assertEquals(AuthenticationMethod.TOKEN,
-                 getAuthMethod(true, false, true));
-  }
 
 
-  @Test
-  public void testSecureClientInsecureServer() throws Exception {
-    /* Should be this when multiple secure auths are supported and we can
-     * dummy one out:
-     *     assertEquals(AuthenticationMethod.SIMPLE
-     *                  getAuthMethod(true, false, false));
-     */
+  // test helpers
+
+  private String getAuthMethod(
+      final AuthenticationMethod clientAuth,
+      final AuthenticationMethod serverAuth) throws Exception {
     try {
     try {
-      getAuthMethod(true, false, false);
-    } catch (IOException ioe) {
-      // can't actually test kerberos w/o kerberos...
-      String expectedError = "Failed to specify server's Kerberos principal";
-      String actualError = ioe.getMessage();
-      assertTrue("["+actualError+"] doesn't start with ["+expectedError+"]",
-          actualError.contains(expectedError));
+      return internalGetAuthMethod(clientAuth, serverAuth, false, false);
+    } catch (Exception e) {
+      return e.toString();
     }
     }
   }
   }
 
 
-
-  private AuthenticationMethod getAuthMethod(final boolean isSecureClient,
-                                             final boolean isSecureServer,
-                                             final boolean useToken
-                                             
-      ) throws Exception {
-    TestTokenSecretManager sm = new TestTokenSecretManager();
-    Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class)
-        .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();      
-    if (isSecureServer) {
-      server.enableSecurity();
-    } else {
-      server.disableSecurity();
+  private String getAuthMethod(
+      final AuthenticationMethod clientAuth,
+      final AuthenticationMethod serverAuth,
+      final boolean useValidToken) throws Exception {
+    try {
+      return internalGetAuthMethod(clientAuth, serverAuth, true, useValidToken);
+    } catch (Exception e) {
+      return e.toString();
     }
     }
-    server.start();
+  }
+  
+  private String internalGetAuthMethod(
+      final AuthenticationMethod clientAuth,
+      final AuthenticationMethod serverAuth,
+      final boolean useToken,
+      final boolean useValidToken) throws Exception {
+    
+    String currentUser = UserGroupInformation.getCurrentUser().getUserName();
+    
+    final Configuration serverConf = new Configuration(conf);
+    SecurityUtil.setAuthenticationMethod(serverAuth, serverConf);
+    UserGroupInformation.setConfiguration(serverConf);
+    
+    final UserGroupInformation serverUgi =
+        UserGroupInformation.createRemoteUser(currentUser + "-SERVER");
+    serverUgi.setAuthenticationMethod(serverAuth);
+
+    final TestTokenSecretManager sm = new TestTokenSecretManager();
+    boolean useSecretManager = (serverAuth != SIMPLE);
+    if (forceSecretManager != null) {
+      useSecretManager &= forceSecretManager.booleanValue();
+    }
+    final SecretManager<?> serverSm = useSecretManager ? sm : null;
+    
+    Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
+      @Override
+      public Server run() throws IOException {
+        Server server = new RPC.Builder(serverConf)
+        .setProtocol(TestSaslProtocol.class)
+        .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
+        .setNumHandlers(5).setVerbose(true)
+        .setSecretManager(serverSm)
+        .build();      
+        server.start();
+        return server;
+      }
+    });
+
+    final Configuration clientConf = new Configuration(conf);
+    SecurityUtil.setAuthenticationMethod(clientAuth, clientConf);
+    UserGroupInformation.setConfiguration(clientConf);
+    
+    final UserGroupInformation clientUgi =
+        UserGroupInformation.createRemoteUser(currentUser + "-CLIENT");
+    clientUgi.setAuthenticationMethod(clientAuth);    
 
 
-    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     if (useToken) {
     if (useToken) {
       TestTokenIdentifier tokenId = new TestTokenIdentifier(
       TestTokenIdentifier tokenId = new TestTokenIdentifier(
-          new Text(current.getUserName()));
-      Token<TestTokenIdentifier> token =
-          new Token<TestTokenIdentifier>(tokenId, sm);
+          new Text(clientUgi.getUserName()));
+      Token<TestTokenIdentifier> token = useValidToken
+          ? new Token<TestTokenIdentifier>(tokenId, sm)
+          : new Token<TestTokenIdentifier>(
+              tokenId.getBytes(), "bad-password!".getBytes(),
+              tokenId.getKind(), null);
+      
       SecurityUtil.setTokenService(token, addr);
       SecurityUtil.setTokenService(token, addr);
-      current.addToken(token);
+      clientUgi.addToken(token);
     }
     }
 
 
-    conf.set(HADOOP_SECURITY_AUTHENTICATION, isSecureClient ? "kerberos" : "simple");
-    UserGroupInformation.setConfiguration(conf);
     try {
     try {
-      return current.doAs(new PrivilegedExceptionAction<AuthenticationMethod>() {
+      return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
         @Override
         @Override
-        public AuthenticationMethod run() throws IOException {
+        public String run() throws IOException {
           TestSaslProtocol proxy = null;
           TestSaslProtocol proxy = null;
           try {
           try {
             proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
             proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
-                TestSaslProtocol.versionID, addr, conf);
-            return proxy.getAuthMethod();
+                TestSaslProtocol.versionID, addr, clientConf);
+            
+            proxy.ping();
+            // verify sasl completed
+            if (serverAuth != SIMPLE) {
+              assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
+            }
+            
+            // make sure the other side thinks we are who we said we are!!!
+            assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
+            return proxy.getAuthMethod().toString();
           } finally {
           } finally {
             if (proxy != null) {
             if (proxy != null) {
               RPC.stopProxy(proxy);
               RPC.stopProxy(proxy);
@@ -580,7 +629,22 @@ public class TestSaslRPC {
       server.stop();
       server.stop();
     }
     }
   }
   }
+
+  private static void assertAuthEquals(AuthenticationMethod expect,
+      String actual) {
+    assertEquals(expect.toString(), actual);
+  }
   
   
+  private static void assertAuthEquals(Pattern expect,
+      String actual) {
+    // this allows us to see the regexp and the value it didn't match
+    if (!expect.matcher(actual).matches()) {
+      assertEquals(expect, actual); // it failed
+    } else {
+      assertTrue(true); // it matched
+    }
+  }
+
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
     System.out.println("Testing Kerberos authentication over RPC");
     System.out.println("Testing Kerberos authentication over RPC");
     if (args.length != 2) {
     if (args.length != 2) {
@@ -593,5 +657,4 @@ public class TestSaslRPC {
     String keytab = args[1];
     String keytab = args[1];
     testKerberosRpc(principal, keytab);
     testKerberosRpc(principal, keytab);
   }
   }
-
 }
 }

+ 15 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java

@@ -27,4 +27,19 @@ public class SecurityUtilTestHelper {
   public static void setTokenServiceUseIp(boolean flag) {
   public static void setTokenServiceUseIp(boolean flag) {
     SecurityUtil.setTokenServiceUseIp(flag);
     SecurityUtil.setTokenServiceUseIp(flag);
   }
   }
+
+  /**
+   * Return true if externalKdc=true and the location of the krb5.conf
+   * file has been specified, and false otherwise.
+   */
+  public static boolean isExternalKdcRunning() {
+    String externalKdc = System.getProperty("externalKdc");
+    String krb5Conf = System.getProperty("java.security.krb5.conf");
+    if(externalKdc == null || !externalKdc.equals("true") ||
+       krb5Conf == null) {
+      return false;
+    }
+    return true;
+  }
+
 }
 }

+ 3 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java

@@ -28,13 +28,13 @@ import java.util.Enumeration;
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
@@ -416,8 +416,7 @@ public class TestDoAsEffectiveUser {
   public void testProxyWithToken() throws Exception {
   public void testProxyWithToken() throws Exception {
     final Configuration conf = new Configuration(masterConf);
     final Configuration conf = new Configuration(masterConf);
     TestTokenSecretManager sm = new TestTokenSecretManager();
     TestTokenSecretManager sm = new TestTokenSecretManager();
-    conf
-        .set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@@ -471,8 +470,7 @@ public class TestDoAsEffectiveUser {
   public void testTokenBySuperUser() throws Exception {
   public void testTokenBySuperUser() throws Exception {
     TestTokenSecretManager sm = new TestTokenSecretManager();
     TestTokenSecretManager sm = new TestTokenSecretManager();
     final Configuration newConf = new Configuration(masterConf);
     final Configuration newConf = new Configuration(masterConf);
-    newConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
     UserGroupInformation.setConfiguration(newConf);
     UserGroupInformation.setConfiguration(newConf);
     final Server server = new RPC.Builder(newConf)
     final Server server = new RPC.Builder(newConf)
         .setProtocol(TestProtocol.class).setInstance(new TestImpl())
         .setProtocol(TestProtocol.class).setInstance(new TestImpl())

+ 52 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java

@@ -16,6 +16,8 @@
  */
  */
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.*;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -29,10 +31,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 public class TestSecurityUtil {
 public class TestSecurityUtil {
+  @BeforeClass
+  public static void unsetKerberosRealm() {
+    // prevent failures if kinit-ed or on os x with no realm
+    System.setProperty("java.security.krb5.kdc", "");
+    System.setProperty("java.security.krb5.realm", "NONE");    
+  }
+
   @Test
   @Test
   public void isOriginalTGTReturnsCorrectValues() {
   public void isOriginalTGTReturnsCorrectValues() {
     assertTrue(SecurityUtil.isTGSPrincipal
     assertTrue(SecurityUtil.isTGSPrincipal
@@ -111,9 +122,7 @@ public class TestSecurityUtil {
   @Test
   @Test
   public void testStartsWithIncorrectSettings() throws IOException {
   public void testStartsWithIncorrectSettings() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set(
-        org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
+    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
     String keyTabKey="key";
     String keyTabKey="key";
     conf.set(keyTabKey, "");
     conf.set(keyTabKey, "");
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
@@ -256,7 +265,7 @@ public class TestSecurityUtil {
     SecurityUtil.setTokenServiceUseIp(useIp);
     SecurityUtil.setTokenServiceUseIp(useIp);
     String serviceHost = useIp ? ip : host.toLowerCase();
     String serviceHost = useIp ? ip : host.toLowerCase();
     
     
-    Token token = new Token();
+    Token<?> token = new Token<TokenIdentifier>();
     Text service = new Text(serviceHost+":"+port);
     Text service = new Text(serviceHost+":"+port);
     
     
     assertEquals(service, SecurityUtil.buildTokenService(addr));
     assertEquals(service, SecurityUtil.buildTokenService(addr));
@@ -345,4 +354,43 @@ public class TestSecurityUtil {
     NetUtils.addStaticResolution(staticHost, "255.255.255.255");
     NetUtils.addStaticResolution(staticHost, "255.255.255.255");
     verifyServiceAddr(staticHost, "255.255.255.255");
     verifyServiceAddr(staticHost, "255.255.255.255");
   }
   }
+  
+  @Test
+  public void testGetAuthenticationMethod() {
+    Configuration conf = new Configuration();
+    // default is simple
+    conf.unset(HADOOP_SECURITY_AUTHENTICATION);
+    assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
+    // simple
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
+    assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
+    // kerberos
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
+    // bad value
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
+    String error = null;
+    try {
+      SecurityUtil.getAuthenticationMethod(conf);
+    } catch (Exception e) {
+      error = e.toString();
+    }
+    assertEquals("java.lang.IllegalArgumentException: " +
+                 "Invalid attribute value for " +
+                 HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
+  }
+  
+  @Test
+  public void testSetAuthenticationMethod() {
+    Configuration conf = new Configuration();
+    // default
+    SecurityUtil.setAuthenticationMethod(null, conf);
+    assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION));
+    // simple
+    SecurityUtil.setAuthenticationMethod(SIMPLE, conf);
+    assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION));
+    // kerberos
+    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
+    assertEquals("kerberos", conf.get(HADOOP_SECURITY_AUTHENTICATION));
+  }
 }
 }

+ 74 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java

@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.security;
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests kerberos keytab login using a user-specified external KDC
+ *
+ * To run, users must specify the following system properties:
+ *   externalKdc=true
+ *   java.security.krb5.conf
+ *   user.principal
+ *   user.keytab
+ */
+public class TestUGIWithExternalKdc {
+
+  @Before
+  public void testExternalKdcRunning() {
+    Assume.assumeTrue(isExternalKdcRunning());
+  }
+
+  @Test
+  public void testLogin() throws IOException {
+    String userPrincipal = System.getProperty("user.principal");
+    String userKeyTab = System.getProperty("user.keytab");
+    Assert.assertNotNull("User principal was not specified", userPrincipal);
+    Assert.assertNotNull("User keytab was not specified", userKeyTab);
+
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+
+    UserGroupInformation ugi = UserGroupInformation
+        .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);
+
+    Assert.assertEquals(AuthenticationMethod.KERBEROS,
+        ugi.getAuthenticationMethod());
+    
+    try {
+      UserGroupInformation
+      .loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM", userKeyTab);
+      Assert.fail("Login should have failed");
+    } catch (Exception ex) {
+      ex.printStackTrace();
+    }
+  }
+
+}

+ 1 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java

@@ -21,7 +21,6 @@ import java.io.IOException;
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.junit.Assume;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Before;
@@ -49,8 +48,7 @@ public class TestUGIWithSecurityOn {
     String user1keyTabFilepath = System.getProperty("kdc.resource.dir") 
     String user1keyTabFilepath = System.getProperty("kdc.resource.dir") 
         + "/keytabs/user1.keytab";
         + "/keytabs/user1.keytab";
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, 
-        "kerberos");
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     
     
     UserGroupInformation ugiNn = UserGroupInformation
     UserGroupInformation ugiNn = UserGroupInformation

+ 12 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -324,7 +324,6 @@ public class TestUserGroupInformation {
     assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
     assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
   }
   }
 
 
-  @SuppressWarnings("unchecked") // from Mockito mocks
   @Test
   @Test
   public <T extends TokenIdentifier> void testGetCredsNotSame()
   public <T extends TokenIdentifier> void testGetCredsNotSame()
       throws Exception {
       throws Exception {
@@ -448,6 +447,18 @@ public class TestUserGroupInformation {
     assertEquals(2, otherSet.size());
     assertEquals(2, otherSet.size());
   }
   }
 
 
+  @Test
+  public void testTestAuthMethod() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    // verify the reverse mappings works
+    for (AuthenticationMethod am : AuthenticationMethod.values()) {
+      if (am.getAuthMethod() != null) {
+        ugi.setAuthenticationMethod(am.getAuthMethod());
+        assertEquals(am, ugi.getAuthenticationMethod());
+      }
+    }
+  }
+  
   @Test
   @Test
   public void testUGIAuthMethod() throws Exception {
   public void testUGIAuthMethod() throws Exception {
     final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

+ 76 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringInterner.java

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import static org.junit.Assert.*;
+import static org.apache.hadoop.util.StringInterner.*;
+
+import org.junit.Test;
+
+/**
+ * 
+ * Tests string interning {@link StringInterner}
+ */
+public class TestStringInterner {
+
+  /**
+   * Test different references are returned for any of string 
+   * instances that are equal to each other but not interned.
+   */
+  @Test
+  public void testNoIntern() {
+    String literalABC = "ABC";
+    String substringABC = "ABCDE".substring(0,3);
+    String heapABC = new String("ABC");
+    assertNotSame(literalABC, substringABC);
+    assertNotSame(literalABC, heapABC);
+    assertNotSame(substringABC, heapABC);
+  }
+  
+  
+  /**
+   * Test the same strong reference is returned for any 
+   * of string instances that are equal to each other.
+   */
+  @Test
+  public void testStrongIntern() {
+    String strongInternLiteralABC = strongIntern("ABC");
+    String strongInternSubstringABC = strongIntern("ABCDE".substring(0,3));
+    String strongInternHeapABC = strongIntern(new String("ABC"));
+    assertSame(strongInternLiteralABC, strongInternSubstringABC);
+    assertSame(strongInternLiteralABC, strongInternHeapABC);
+    assertSame(strongInternSubstringABC, strongInternHeapABC);
+  }
+  
+  
+  /**
+   * Test the same weak reference is returned for any 
+   * of string instances that are equal to each other.
+   */
+  @Test
+  public void testWeakIntern() {
+    String weakInternLiteralABC = weakIntern("ABC");
+    String weakInternSubstringABC = weakIntern("ABCDE".substring(0,3));
+    String weakInternHeapABC = weakIntern(new String("ABC"));
+    assertSame(weakInternLiteralABC, weakInternSubstringABC);
+    assertSame(weakInternLiteralABC, weakInternHeapABC);
+    assertSame(weakInternSubstringABC, weakInternHeapABC);
+  }
+
+}

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/proto/test.proto

@@ -19,6 +19,7 @@
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "TestProtos";
 option java_outer_classname = "TestProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 message EmptyRequestProto {
 message EmptyRequestProto {
 }
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto

@@ -19,6 +19,7 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "TestRpcServiceProtos";
 option java_outer_classname = "TestRpcServiceProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.common;
 
 
 import "test.proto";
 import "test.proto";
 
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -591,11 +591,11 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-test -\[ezd\] &lt;path&gt;:\s+If file exists, has zero length, is a directory( )*</expected-output>
+          <expected-output>^-test -\[defsz\] &lt;path&gt;:\sAnswer various questions about &lt;path&gt;, with result via exit status.</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*then return 0, else return 1.( )*</expected-output>
+          <expected-output>^( |\t)*else, return 1.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>

+ 103 - 13
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -103,18 +103,11 @@ Trunk (Unreleased)
     HDFS-3510.  Editlog pre-allocation is performed prior to writing edits
     HDFS-3510.  Editlog pre-allocation is performed prior to writing edits
     to avoid partial edits case disk out of space.(Colin McCabe via suresh)
     to avoid partial edits case disk out of space.(Colin McCabe via suresh)
 
 
-    HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
-
     HDFS-3630 Modify TestPersistBlocks to use both flush and hflush  (sanjay)
     HDFS-3630 Modify TestPersistBlocks to use both flush and hflush  (sanjay)
 
 
     HDFS-3768. Exception in TestJettyHelper is incorrect. 
     HDFS-3768. Exception in TestJettyHelper is incorrect. 
     (Eli Reisman via jghoman)
     (Eli Reisman via jghoman)
 
 
-    HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
-
-    HDFS-3789. JournalManager#format() should be able to throw IOException
-    (Ivan Kelly via todd)
-
     HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
     HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
     suresh)
     suresh)
 
 
@@ -150,6 +143,22 @@ Trunk (Unreleased)
     HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
     HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
     (Jing Zhao via suresh)
     (Jing Zhao via suresh)
 
 
+    HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
+
+    HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable 
+    returning more than INode array. (Jing Zhao via suresh)
+
+    HDFS-4129. Add utility methods to dump NameNode in memory tree for 
+    testing. (szetszwo via suresh)
+
+    HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead
+    of INode[] as a parameter. (szetszwo)
+
+    HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
+    INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
+
+    HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -216,9 +225,6 @@ Trunk (Unreleased)
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     (acmurthy via eli)
     (acmurthy via eli)
 
 
-    HDFS-3625. Fix TestBackupNode by properly initializing edit log during
-    startup. (Junping Du via todd)
-
     HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
     HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
 
 
     HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
     HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
@@ -230,6 +236,18 @@ Trunk (Unreleased)
     HADOOP-8158. Interrupting hadoop fs -put from the command line
     HADOOP-8158. Interrupting hadoop fs -put from the command line
     causes a LeaseExpiredException. (daryn via harsh)
     causes a LeaseExpiredException. (daryn via harsh)
 
 
+    HDFS-2434. TestNameNodeMetrics.testCorruptBlock fails intermittently.
+    (Jing Zhao via suresh)
+
+    HDFS-4067. TestUnderReplicatedBlocks intermittently fails due to 
+    ReplicaAlreadyExistsException. (Jing Zhao via suresh)
+
+    HDFS-4115. TestHDFSCLI.testAll fails one test due to number format.
+    (Trevor Robinson via suresh)
+
+    HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
+    lastDeletedReport should be volatile. (Jing Zhao via suresh)
+
   BREAKDOWN OF HDFS-3077 SUBTASKS
   BREAKDOWN OF HDFS-3077 SUBTASKS
 
 
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -336,6 +354,9 @@ Release 2.0.3-alpha - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
+    (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@@ -346,6 +367,8 @@ Release 2.0.3-alpha - Unreleased
 
 
     HDFS-4059. Add number of stale DataNodes to metrics. (Jing Zhao via suresh)
     HDFS-4059. Add number of stale DataNodes to metrics. (Jing Zhao via suresh)
 
 
+    HDFS-4155. libhdfs implementation of hsync API (Liang Xie via todd)
+
   IMPROVEMENTS
   IMPROVEMENTS
   
   
     HDFS-3925. Prettify PipelineAck#toString() for printing to a log
     HDFS-3925. Prettify PipelineAck#toString() for printing to a log
@@ -410,6 +433,31 @@ Release 2.0.3-alpha - Unreleased
 
 
     HDFS-4099. Clean up replication code and add more javadoc. (szetszwo)
     HDFS-4099. Clean up replication code and add more javadoc. (szetszwo)
 
 
+    HDFS-4107. Add utility methods for casting INode to INodeFile and
+    INodeFileUnderConstruction. (szetszwo)
+
+    HDFS-4112. A few improvements on INodeDirectory include adding a utility
+    method for casting; avoiding creation of new empty lists; cleaning up 
+    some code and rewriting some javadoc. (szetszwo)
+
+    HDFS-4121. Add namespace declarations in hdfs .proto files for languages 
+    other than java. (Binglin Chang via suresh)
+
+    HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
+
+    HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
+
+    HDFS-3789. JournalManager#format() should be able to throw IOException
+    (Ivan Kelly via todd)
+
+    HDFS-3916. libwebhdfs testing code cleanup. (Jing Zhao via suresh)
+
+    HDFS-4143. Change blocks to private in INodeFile and renames isLink() to
+    isSymlink() in INode. (szetszwo)
+
+    HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in
+    C/C++. (Binglin Chang via suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -482,7 +530,43 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4072. On file deletion remove corresponding blocks pending
     HDFS-4072. On file deletion remove corresponding blocks pending
     replications. (Jing Zhao via suresh)
     replications. (Jing Zhao via suresh)
 
 
-    HDFS-4022. Replication not happening for appended block. (Vinay via umamahesh)
+    HDFS-4022. Replication not happening for appended block.
+    (Vinay via umamahesh)
+
+    HDFS-3948. Do not use hflush in TestWebHDFS.testNamenodeRestart() since the
+    out stream returned by WebHdfsFileSystem does not support it. (Jing Zhao
+    via szetszwo)
+
+    HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads
+    may not be shutdown properly in DataNode.  (Jing Zhao via szetszwo)
+
+    HDFS-4127. Log message is not correct in case of short of replica.
+    (Junping Du via suresh)
+
+    HADOOP-8994. TestDFSShell creates file named "noFileHere", making further
+    tests hard to understand (Andy Isaacson via daryn)
+
+    HDFS-3809. Make BKJM use protobufs for all serialization with ZK.
+    (Ivan Kelly via umamahesh)
+
+    HDFS-3804.  TestHftpFileSystem fails intermittently with JDK7
+    (Trevor Robinson via daryn)
+
+    HDFS-4132. When libwebhdfs is not enabled, nativeMiniDfsClient frees
+    uninitialized memory (Colin Patrick McCabe via todd)
+
+    HDFS-1331. dfs -test should work like /bin/test (Andy Isaacson via daryn)
+
+    HDFS-3979. For hsync, datanode should wait for the local sync to complete
+    before sending ack. (Lars Hofhansl via szetszwo)
+
+    HDFS-3625. Fix TestBackupNode by properly initializing edit log during
+    startup. (Junping Du via todd)
+
+    HDFS-4138. BackupNode startup fails due to uninitialized edit log.
+    (Kihwal Lee via shv)
+
+    HDFS-3810. Implement format() for BKJM (Ivan Kelly via umamahesh)
 
 
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
 
 
@@ -726,6 +810,9 @@ Release 2.0.2-alpha - 2012-09-07
     HDFS-3907. Allow multiple users for local block readers. (eli)
     HDFS-3907. Allow multiple users for local block readers. (eli)
 
 
     HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
     HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
+    
+    HDFS-3920. libwebdhfs string processing and using strerror consistently
+    to handle all errors. (Jing Zhao via suresh)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -1007,8 +1094,6 @@ Release 2.0.2-alpha - 2012-09-07
     HDFS-3828. Block Scanner rescans blocks too frequently.
     HDFS-3828. Block Scanner rescans blocks too frequently.
     (Andy Isaacson via eli)
     (Andy Isaacson via eli)
 
 
-    HDFS-3809. Make BKJM use protobufs for all serialization with ZK.(Ivan Kelly via umamahesh)
-
     HDFS-3895. hadoop-client must include commons-cli (tucu)
     HDFS-3895. hadoop-client must include commons-cli (tucu)
 
 
     HDFS-2757. Cannot read a local block that's being written to when
     HDFS-2757. Cannot read a local block that's being written to when
@@ -1870,6 +1955,8 @@ Release 0.23.5 - UNRELEASED
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor
     HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor
@@ -1881,6 +1968,9 @@ Release 0.23.5 - UNRELEASED
     HDFS-3224. Bug in check for DN re-registration with different storage ID
     HDFS-3224. Bug in check for DN re-registration with different storage ID
     (jlowe)
     (jlowe)
 
 
+    HDFS-4090. getFileChecksum() result incompatible when called against
+    zero-byte files. (Kihwal Lee via daryn)
+
 Release 0.23.4 - UNRELEASED
 Release 0.23.4 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 115 - 58
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java

@@ -39,6 +39,7 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.AsyncCallback.StringCallback;
 import org.apache.zookeeper.AsyncCallback.StringCallback;
+import org.apache.zookeeper.ZKUtil;
 
 
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
@@ -46,6 +47,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import java.net.URI;
 import java.net.URI;
@@ -142,13 +144,16 @@ public class BookKeeperJournalManager implements JournalManager {
   private final Configuration conf;
   private final Configuration conf;
   private final BookKeeper bkc;
   private final BookKeeper bkc;
   private final CurrentInprogress ci;
   private final CurrentInprogress ci;
+  private final String basePath;
   private final String ledgerPath;
   private final String ledgerPath;
+  private final String versionPath;
   private final MaxTxId maxTxId;
   private final MaxTxId maxTxId;
   private final int ensembleSize;
   private final int ensembleSize;
   private final int quorumSize;
   private final int quorumSize;
   private final String digestpw;
   private final String digestpw;
   private final CountDownLatch zkConnectLatch;
   private final CountDownLatch zkConnectLatch;
   private final NamespaceInfo nsInfo;
   private final NamespaceInfo nsInfo;
+  private boolean initialized = false;
   private LedgerHandle currentLedger = null;
   private LedgerHandle currentLedger = null;
 
 
   /**
   /**
@@ -160,16 +165,16 @@ public class BookKeeperJournalManager implements JournalManager {
     this.nsInfo = nsInfo;
     this.nsInfo = nsInfo;
 
 
     String zkConnect = uri.getAuthority().replace(";", ",");
     String zkConnect = uri.getAuthority().replace(";", ",");
-    String zkPath = uri.getPath();
+    basePath = uri.getPath();
     ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
     ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
                                BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
                                BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
     quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
     quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
                              BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
                              BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
 
 
-    ledgerPath = zkPath + "/ledgers";
-    String maxTxIdPath = zkPath + "/maxtxid";
-    String currentInprogressNodePath = zkPath + "/CurrentInprogress";
-    String versionPath = zkPath + "/version";
+    ledgerPath = basePath + "/ledgers";
+    String maxTxIdPath = basePath + "/maxtxid";
+    String currentInprogressNodePath = basePath + "/CurrentInprogress";
+    versionPath = basePath + "/version";
     digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
     digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
                         BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
                         BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
 
 
@@ -180,47 +185,7 @@ public class BookKeeperJournalManager implements JournalManager {
       if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
       if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
         throw new IOException("Error connecting to zookeeper");
         throw new IOException("Error connecting to zookeeper");
       }
       }
-      if (zkc.exists(zkPath, false) == null) {
-        zkc.create(zkPath, new byte[] {'0'},
-            Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
 
 
-      Stat versionStat = zkc.exists(versionPath, false);
-      if (versionStat != null) {
-        byte[] d = zkc.getData(versionPath, false, versionStat);
-        VersionProto.Builder builder = VersionProto.newBuilder();
-        TextFormat.merge(new String(d, UTF_8), builder);
-        if (!builder.isInitialized()) {
-          throw new IOException("Invalid/Incomplete data in znode");
-        }
-        VersionProto vp = builder.build();
-
-        // There's only one version at the moment
-        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
-
-        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
-
-        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
-            !nsInfo.clusterID.equals(readns.getClusterID()) ||
-            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
-          String err = String.format("Environment mismatch. Running process %s"
-                                     +", stored in ZK %s", nsInfo, readns);
-          LOG.error(err);
-          throw new IOException(err);
-        }
-      } else if (nsInfo.getNamespaceID() > 0) {
-        VersionProto.Builder builder = VersionProto.newBuilder();
-        builder.setNamespaceInfo(PBHelper.convert(nsInfo))
-          .setLayoutVersion(BKJM_LAYOUT_VERSION);
-        byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
-        zkc.create(versionPath, data,
-                   Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
-
-      if (zkc.exists(ledgerPath, false) == null) {
-        zkc.create(ledgerPath, new byte[] {'0'},
-            Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
       prepareBookKeeperEnv();
       prepareBookKeeperEnv();
       bkc = new BookKeeper(new ClientConfiguration(), zkc);
       bkc = new BookKeeper(new ClientConfiguration(), zkc);
     } catch (KeeperException e) {
     } catch (KeeperException e) {
@@ -244,6 +209,7 @@ public class BookKeeperJournalManager implements JournalManager {
         BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
         BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
     final CountDownLatch zkPathLatch = new CountDownLatch(1);
     final CountDownLatch zkPathLatch = new CountDownLatch(1);
 
 
+    final AtomicBoolean success = new AtomicBoolean(false);
     StringCallback callback = new StringCallback() {
     StringCallback callback = new StringCallback() {
       @Override
       @Override
       public void processResult(int rc, String path, Object ctx, String name) {
       public void processResult(int rc, String path, Object ctx, String name) {
@@ -251,22 +217,23 @@ public class BookKeeperJournalManager implements JournalManager {
             || KeeperException.Code.NODEEXISTS.intValue() == rc) {
             || KeeperException.Code.NODEEXISTS.intValue() == rc) {
           LOG.info("Successfully created bookie available path : "
           LOG.info("Successfully created bookie available path : "
               + zkAvailablePath);
               + zkAvailablePath);
-          zkPathLatch.countDown();
+          success.set(true);
         } else {
         } else {
           KeeperException.Code code = KeeperException.Code.get(rc);
           KeeperException.Code code = KeeperException.Code.get(rc);
-          LOG
-              .error("Error : "
+          LOG.error("Error : "
                   + KeeperException.create(code, path).getMessage()
                   + KeeperException.create(code, path).getMessage()
                   + ", failed to create bookie available path : "
                   + ", failed to create bookie available path : "
                   + zkAvailablePath);
                   + zkAvailablePath);
         }
         }
+        zkPathLatch.countDown();
       }
       }
     };
     };
     ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
     ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
         Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
         Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
 
 
     try {
     try {
-      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) {
+      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)
+          || !success.get()) {
         throw new IOException("Couldn't create bookie available path :"
         throw new IOException("Couldn't create bookie available path :"
             + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
             + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
             + " millis");
             + " millis");
@@ -281,19 +248,101 @@ public class BookKeeperJournalManager implements JournalManager {
 
 
   @Override
   @Override
   public void format(NamespaceInfo ns) throws IOException {
   public void format(NamespaceInfo ns) throws IOException {
-    // Currently, BKJM automatically formats itself when first accessed.
-    // TODO: change over to explicit formatting so that the admin can
-    // clear out the BK storage when reformatting a cluster.
-    LOG.info("Not formatting " + this + " - BKJM does not currently " +
-        "support reformatting. If it has not been used before, it will" +
-        "be formatted automatically upon first use.");
+    try {
+      // delete old info
+      Stat baseStat = null;
+      Stat ledgerStat = null;
+      if ((baseStat = zkc.exists(basePath, false)) != null) {
+        if ((ledgerStat = zkc.exists(ledgerPath, false)) != null) {
+          for (EditLogLedgerMetadata l : getLedgerList(true)) {
+            try {
+              bkc.deleteLedger(l.getLedgerId());
+            } catch (BKException.BKNoSuchLedgerExistsException bke) {
+              LOG.warn("Ledger " + l.getLedgerId() + " does not exist;"
+                       + " Cannot delete.");
+            }
+          }
+        }
+        ZKUtil.deleteRecursive(zkc, basePath);
+      }
+
+      // should be clean now.
+      zkc.create(basePath, new byte[] {'0'},
+          Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+
+      VersionProto.Builder builder = VersionProto.newBuilder();
+      builder.setNamespaceInfo(PBHelper.convert(ns))
+        .setLayoutVersion(BKJM_LAYOUT_VERSION);
+
+      byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
+      zkc.create(versionPath, data,
+                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+
+      zkc.create(ledgerPath, new byte[] {'0'},
+                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+    } catch (KeeperException ke) {
+      LOG.error("Error accessing zookeeper to format", ke);
+      throw new IOException("Error accessing zookeeper to format", ke);
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted during format", ie);
+    } catch (BKException bke) {
+      throw new IOException("Error cleaning up ledgers during format", bke);
+    }
   }
   }
   
   
   @Override
   @Override
   public boolean hasSomeData() throws IOException {
   public boolean hasSomeData() throws IOException {
-    // Don't confirm format on BKJM, since format() is currently a
-    // no-op anyway
-    return false;
+    try {
+      return zkc.exists(basePath, false) != null;
+    } catch (KeeperException ke) {
+      throw new IOException("Couldn't contact zookeeper", ke);
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted while checking for data", ie);
+    }
+  }
+
+  synchronized private void checkEnv() throws IOException {
+    if (!initialized) {
+      try {
+        Stat versionStat = zkc.exists(versionPath, false);
+        if (versionStat == null) {
+          throw new IOException("Environment not initialized. "
+                                +"Have you forgotten to format?");
+        }
+        byte[] d = zkc.getData(versionPath, false, versionStat);
+
+        VersionProto.Builder builder = VersionProto.newBuilder();
+        TextFormat.merge(new String(d, UTF_8), builder);
+        if (!builder.isInitialized()) {
+          throw new IOException("Invalid/Incomplete data in znode");
+        }
+        VersionProto vp = builder.build();
+
+        // There's only one version at the moment
+        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
+
+        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
+
+        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
+            !nsInfo.clusterID.equals(readns.getClusterID()) ||
+            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
+          String err = String.format("Environment mismatch. Running process %s"
+                                     +", stored in ZK %s", nsInfo, readns);
+          LOG.error(err);
+          throw new IOException(err);
+        }
+
+        ci.init();
+        initialized = true;
+      } catch (KeeperException ke) {
+        throw new IOException("Cannot access ZooKeeper", ke);
+      } catch (InterruptedException ie) {
+        Thread.currentThread().interrupt();
+        throw new IOException("Interrupted while checking environment", ie);
+      }
+    }
   }
   }
 
 
   /**
   /**
@@ -307,6 +356,8 @@ public class BookKeeperJournalManager implements JournalManager {
    */
    */
   @Override
   @Override
   public EditLogOutputStream startLogSegment(long txId) throws IOException {
   public EditLogOutputStream startLogSegment(long txId) throws IOException {
+    checkEnv();
+
     if (txId <= maxTxId.get()) {
     if (txId <= maxTxId.get()) {
       throw new IOException("We've already seen " + txId
       throw new IOException("We've already seen " + txId
           + ". A new stream cannot be created with it");
           + ". A new stream cannot be created with it");
@@ -384,6 +435,8 @@ public class BookKeeperJournalManager implements JournalManager {
   @Override
   @Override
   public void finalizeLogSegment(long firstTxId, long lastTxId)
   public void finalizeLogSegment(long firstTxId, long lastTxId)
       throws IOException {
       throws IOException {
+    checkEnv();
+
     String inprogressPath = inprogressZNode(firstTxId);
     String inprogressPath = inprogressZNode(firstTxId);
     try {
     try {
       Stat inprogressStat = zkc.exists(inprogressPath, false);
       Stat inprogressStat = zkc.exists(inprogressPath, false);
@@ -537,6 +590,8 @@ public class BookKeeperJournalManager implements JournalManager {
 
 
   @Override
   @Override
   public void recoverUnfinalizedSegments() throws IOException {
   public void recoverUnfinalizedSegments() throws IOException {
+    checkEnv();
+
     synchronized (this) {
     synchronized (this) {
       try {
       try {
         List<String> children = zkc.getChildren(ledgerPath, false);
         List<String> children = zkc.getChildren(ledgerPath, false);
@@ -589,6 +644,8 @@ public class BookKeeperJournalManager implements JournalManager {
   @Override
   @Override
   public void purgeLogsOlderThan(long minTxIdToKeep)
   public void purgeLogsOlderThan(long minTxIdToKeep)
       throws IOException {
       throws IOException {
+    checkEnv();
+
     for (EditLogLedgerMetadata l : getLedgerList(false)) {
     for (EditLogLedgerMetadata l : getLedgerList(false)) {
       if (l.getLastTxId() < minTxIdToKeep) {
       if (l.getLastTxId() < minTxIdToKeep) {
         try {
         try {

+ 10 - 8
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java

@@ -56,6 +56,9 @@ class CurrentInprogress {
   CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
   CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
     this.currentInprogressNode = lockpath;
     this.currentInprogressNode = lockpath;
     this.zkc = zkc;
     this.zkc = zkc;
+  }
+
+  void init() throws IOException {
     try {
     try {
       Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
       Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
                                                       false);
                                                       false);
@@ -96,15 +99,14 @@ class CurrentInprogress {
           this.versionNumberForPermission);
           this.versionNumberForPermission);
     } catch (KeeperException e) {
     } catch (KeeperException e) {
       throw new IOException("Exception when setting the data "
       throw new IOException("Exception when setting the data "
-          + "[layout version number,hostname,inprogressNode path]= [" + content
-          + "] to CurrentInprogress. ", e);
+          + "[" + content + "] to CurrentInprogress. ", e);
     } catch (InterruptedException e) {
     } catch (InterruptedException e) {
       throw new IOException("Interrupted while setting the data "
       throw new IOException("Interrupted while setting the data "
-          + "[layout version number,hostname,inprogressNode path]= [" + content
-          + "] to CurrentInprogress", e);
+          + "[" + content + "] to CurrentInprogress", e);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Updated data[" + content + "] to CurrentInprogress");
     }
     }
-    LOG.info("Updated data[layout version number,hostname,inprogressNode path]"
-        + "= [" + content + "] to CurrentInprogress");
   }
   }
 
 
   /**
   /**
@@ -136,7 +138,7 @@ class CurrentInprogress {
       }
       }
       return builder.build().getPath();
       return builder.build().getPath();
     } else {
     } else {
-      LOG.info("No data available in CurrentInprogress");
+      LOG.debug("No data available in CurrentInprogress");
     }
     }
     return null;
     return null;
   }
   }
@@ -152,7 +154,7 @@ class CurrentInprogress {
       throw new IOException(
       throw new IOException(
           "Interrupted when setting the data to CurrentInprogress node", e);
           "Interrupted when setting the data to CurrentInprogress node", e);
     }
     }
-    LOG.info("Cleared the data from CurrentInprogress");
+    LOG.debug("Cleared the data from CurrentInprogress");
   }
   }
 
 
 }
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto

@@ -22,6 +22,7 @@
 option java_package = "org.apache.hadoop.contrib.bkjournal";
 option java_package = "org.apache.hadoop.contrib.bkjournal";
 option java_outer_classname = "BKJournalProtos";
 option java_outer_classname = "BKJournalProtos";
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
 
 
 import "hdfs.proto";
 import "hdfs.proto";
 
 

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java

@@ -149,6 +149,7 @@ public class TestBookKeeperConfiguration {
     bkjm = new BookKeeperJournalManager(conf,
     bkjm = new BookKeeperJournalManager(conf,
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
         nsi);
         nsi);
+    bkjm.format(nsi);
     Assert.assertNotNull("Bookie available path : " + bkAvailablePath
     Assert.assertNotNull("Bookie available path : " + bkAvailablePath
         + " doesn't exists", zkc.exists(bkAvailablePath, false));
         + " doesn't exists", zkc.exists(bkAvailablePath, false));
   }
   }
@@ -166,6 +167,7 @@ public class TestBookKeeperConfiguration {
     bkjm = new BookKeeperJournalManager(conf,
     bkjm = new BookKeeperJournalManager(conf,
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
         nsi);
         nsi);
+    bkjm.format(nsi);
     Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
     Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
         + " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
         + " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
   }
   }

+ 99 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java

@@ -29,8 +29,16 @@ import org.mockito.Mockito;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.util.List;
 import java.util.List;
+import java.util.ArrayList;
 import java.util.Random;
 import java.util.Random;
 
 
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 
 
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -90,6 +98,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
     for (long i = 1 ; i <= 100; i++) {
@@ -112,6 +121,8 @@ public class TestBookKeeperJournalManager {
 
 
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
+    bkjm.format(nsi);
+
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
@@ -130,6 +141,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
+    bkjm.format(nsi);
 
 
     long txid = 1;
     long txid = 1;
     for (long i = 0; i < 3; i++) {
     for (long i = 0; i < 3; i++) {
@@ -167,6 +179,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
+    bkjm.format(nsi);
 
 
     long txid = 1;
     long txid = 1;
     for (long i = 0; i < 3; i++) {
     for (long i = 0; i < 3; i++) {
@@ -208,6 +221,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
+    bkjm.format(nsi);
 
 
     long txid = 1;
     long txid = 1;
     long start = txid;
     long start = txid;
@@ -266,6 +280,7 @@ public class TestBookKeeperJournalManager {
 
 
     BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
+    bkjm1.format(nsi);
 
 
     BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
@@ -288,6 +303,7 @@ public class TestBookKeeperJournalManager {
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
         BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
         nsi);
         nsi);
+    bkjm.format(nsi);
 
 
     final long numTransactions = 10000;
     final long numTransactions = 10000;
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
@@ -315,6 +331,7 @@ public class TestBookKeeperJournalManager {
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
         BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
         nsi);
         nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
     for (long i = 1 ; i <= 100; i++) {
@@ -365,6 +382,7 @@ public class TestBookKeeperJournalManager {
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
           BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
           BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
           nsi);
           nsi);
+      bkjm.format(nsi);
       EditLogOutputStream out = bkjm.startLogSegment(txid);
       EditLogOutputStream out = bkjm.startLogSegment(txid);
 
 
       for (long i = 1 ; i <= 3; i++) {
       for (long i = 1 ; i <= 3; i++) {
@@ -450,6 +468,7 @@ public class TestBookKeeperJournalManager {
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
           BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
           BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
           nsi);
           nsi);
+      bkjm.format(nsi);
 
 
       EditLogOutputStream out = bkjm.startLogSegment(txid);
       EditLogOutputStream out = bkjm.startLogSegment(txid);
       for (long i = 1 ; i <= 3; i++) {
       for (long i = 1 ; i <= 3; i++) {
@@ -500,6 +519,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
                                                                  nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
     for (long i = 1; i <= 100; i++) {
@@ -541,6 +561,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
                                                                  nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
     for (long i = 1; i <= 100; i++) {
@@ -583,6 +604,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
                                                                  nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
     for (long i = 1; i <= 100; i++) {
@@ -622,6 +644,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
                                                                  nsi);
+    bkjm.format(nsi);
 
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
     for (long i = 1; i <= 100; i++) {
@@ -669,6 +692,7 @@ public class TestBookKeeperJournalManager {
     NamespaceInfo nsi = newNSInfo();
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
                                                                  nsi);
+    bkjm.format(nsi);
 
 
     try {
     try {
       // start new inprogress log segment with txid=1
       // start new inprogress log segment with txid=1
@@ -697,6 +721,81 @@ public class TestBookKeeperJournalManager {
     }
     }
   }
   }
 
 
+  private enum ThreadStatus {
+    COMPLETED, GOODEXCEPTION, BADEXCEPTION;
+  };
+
+  /**
+   * Tests that concurrent calls to format will still allow one to succeed.
+   */
+  @Test
+  public void testConcurrentFormat() throws Exception {
+    final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
+    final NamespaceInfo nsi = newNSInfo();
+
+    // populate with data first
+    BookKeeperJournalManager bkjm
+      = new BookKeeperJournalManager(conf, uri, nsi);
+    bkjm.format(nsi);
+    for (int i = 1; i < 100*2; i += 2) {
+      bkjm.startLogSegment(i);
+      bkjm.finalizeLogSegment(i, i+1);
+    }
+    bkjm.close();
+
+    final int numThreads = 40;
+    List<Callable<ThreadStatus>> threads
+      = new ArrayList<Callable<ThreadStatus>>();
+    final CyclicBarrier barrier = new CyclicBarrier(numThreads);
+
+    for (int i = 0; i < numThreads; i++) {
+      threads.add(new Callable<ThreadStatus>() {
+          public ThreadStatus call() {
+            BookKeeperJournalManager bkjm = null;
+            try {
+              bkjm = new BookKeeperJournalManager(conf, uri, nsi);
+              barrier.await();
+              bkjm.format(nsi);
+              return ThreadStatus.COMPLETED;
+            } catch (IOException ioe) {
+              LOG.info("Exception formatting ", ioe);
+              return ThreadStatus.GOODEXCEPTION;
+            } catch (InterruptedException ie) {
+              LOG.error("Interrupted. Something is broken", ie);
+              Thread.currentThread().interrupt();
+              return ThreadStatus.BADEXCEPTION;
+            } catch (Exception e) {
+              LOG.error("Some other bad exception", e);
+              return ThreadStatus.BADEXCEPTION;
+            } finally {
+              if (bkjm != null) {
+                try {
+                  bkjm.close();
+                } catch (IOException ioe) {
+                  LOG.error("Error closing journal manager", ioe);
+                }
+              }
+            }
+          }
+        });
+    }
+    ExecutorService service = Executors.newFixedThreadPool(numThreads);
+    List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60,
+                                                      TimeUnit.SECONDS);
+    int numCompleted = 0;
+    for (Future<ThreadStatus> s : statuses) {
+      assertTrue(s.isDone());
+      assertTrue("Thread threw invalid exception",
+          s.get() == ThreadStatus.COMPLETED
+          || s.get() == ThreadStatus.GOODEXCEPTION);
+      if (s.get() == ThreadStatus.COMPLETED) {
+        numCompleted++;
+      }
+    }
+    LOG.info("Completed " + numCompleted + " formats");
+    assertTrue("No thread managed to complete formatting", numCompleted > 0);
+  }
+
   private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
   private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
       int startTxid, int endTxid) throws IOException, KeeperException,
       int startTxid, int endTxid) throws IOException, KeeperException,
       InterruptedException {
       InterruptedException {

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java

@@ -118,6 +118,7 @@ public class TestCurrentInprogress {
   public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
   public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
     String data = "inprogressNode";
     String data = "inprogressNode";
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update(data);
     ci.update(data);
     String inprogressNodePath = ci.read();
     String inprogressNodePath = ci.read();
     assertEquals("Not returning inprogressZnode", "inprogressNode",
     assertEquals("Not returning inprogressZnode", "inprogressNode",
@@ -131,6 +132,7 @@ public class TestCurrentInprogress {
   @Test
   @Test
   public void testReadShouldReturnNullAfterClear() throws Exception {
   public void testReadShouldReturnNullAfterClear() throws Exception {
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update("myInprogressZnode");
     ci.update("myInprogressZnode");
     ci.read();
     ci.read();
     ci.clear();
     ci.clear();
@@ -146,6 +148,7 @@ public class TestCurrentInprogress {
   public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
   public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
       throws Exception {
       throws Exception {
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update("myInprogressZnode");
     ci.update("myInprogressZnode");
     assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
     assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
         .read());
         .read());
@@ -154,4 +157,4 @@ public class TestCurrentInprogress {
     ci.update("myInprogressZnode");
     ci.update("myInprogressZnode");
   }
   }
 
 
-}
+}

+ 318 - 181
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.c

@@ -15,28 +15,43 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
+
 #include <stdlib.h>
 #include <stdlib.h>
 #include <string.h>
 #include <string.h>
 #include <curl/curl.h>
 #include <curl/curl.h>
-#include <pthread.h>
+
 #include "hdfs_http_client.h"
 #include "hdfs_http_client.h"
 
 
 static pthread_mutex_t curlInitMutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t curlInitMutex = PTHREAD_MUTEX_INITIALIZER;
 static volatile int curlGlobalInited = 0;
 static volatile int curlGlobalInited = 0;
 
 
-ResponseBuffer initResponseBuffer() {
-    ResponseBuffer info = (ResponseBuffer) calloc(1, sizeof(ResponseBufferInternal));
+const char *hdfs_strerror(int errnoval)
+{
+    const char *msg = NULL;
+    if (errnoval < 0 || errnoval >= sys_nerr) {
+        msg = "Invalid Error Code";
+    } else if (sys_errlist == NULL) {
+        msg = "Unknown Error";
+    } else {
+        msg = sys_errlist[errnoval];
+    }
+    return msg;
+}
+
+int initResponseBuffer(struct ResponseBuffer **buffer)
+{
+    struct ResponseBuffer *info = NULL;
+    int ret = 0;
+    info = calloc(1, sizeof(struct ResponseBuffer));
     if (!info) {
     if (!info) {
-        fprintf(stderr, "Cannot allocate memory for responseInfo\n");
-        return NULL;
+        ret = ENOMEM;
     }
     }
-    info->remaining = 0;
-    info->offset = 0;
-    info->content = NULL;
-    return info;
+    *buffer = info;
+    return ret;
 }
 }
 
 
-void freeResponseBuffer(ResponseBuffer buffer) {
+void freeResponseBuffer(struct ResponseBuffer *buffer)
+{
     if (buffer) {
     if (buffer) {
         if (buffer->content) {
         if (buffer->content) {
             free(buffer->content);
             free(buffer->content);
@@ -46,8 +61,9 @@ void freeResponseBuffer(ResponseBuffer buffer) {
     }
     }
 }
 }
 
 
-void freeResponse(Response resp)  {
-    if(resp) {
+void freeResponse(struct Response *resp)
+{
+    if (resp) {
         freeResponseBuffer(resp->body);
         freeResponseBuffer(resp->body);
         freeResponseBuffer(resp->header);
         freeResponseBuffer(resp->header);
         free(resp);
         free(resp);
@@ -55,21 +71,30 @@ void freeResponse(Response resp)  {
     }
     }
 }
 }
 
 
-/* Callback for allocating local buffer and reading data to local buffer */
-static size_t writefunc(void *ptr, size_t size, size_t nmemb, ResponseBuffer rbuffer) {
+/** 
+ * Callback used by libcurl for allocating local buffer and 
+ * reading data to local buffer
+ */
+static size_t writefunc(void *ptr, size_t size,
+                        size_t nmemb, struct ResponseBuffer *rbuffer)
+{
+    void *temp = NULL;
     if (size * nmemb < 1) {
     if (size * nmemb < 1) {
         return 0;
         return 0;
     }
     }
     if (!rbuffer) {
     if (!rbuffer) {
-        fprintf(stderr, "In writefunc, ResponseBuffer is NULL.\n");
-        return -1;
+        fprintf(stderr,
+                "ERROR: ResponseBuffer is NULL for the callback writefunc.\n");
+        return 0;
     }
     }
     
     
     if (rbuffer->remaining < size * nmemb) {
     if (rbuffer->remaining < size * nmemb) {
-        rbuffer->content = realloc(rbuffer->content, rbuffer->offset + size * nmemb + 1);
-        if (rbuffer->content == NULL) {
-            return -1;
+        temp = realloc(rbuffer->content, rbuffer->offset + size * nmemb + 1);
+        if (temp == NULL) {
+            fprintf(stderr, "ERROR: fail to realloc in callback writefunc.\n");
+            return 0;
         }
         }
+        rbuffer->content = temp;
         rbuffer->remaining = size * nmemb;
         rbuffer->remaining = size * nmemb;
     }
     }
     memcpy(rbuffer->content + rbuffer->offset, ptr, size * nmemb);
     memcpy(rbuffer->content + rbuffer->offset, ptr, size * nmemb);
@@ -80,67 +105,84 @@ static size_t writefunc(void *ptr, size_t size, size_t nmemb, ResponseBuffer rbu
 }
 }
 
 
 /**
 /**
- * Callback for reading data to buffer provided by user, 
+ * Callback used by libcurl for reading data into buffer provided by user,
  * thus no need to reallocate buffer.
  * thus no need to reallocate buffer.
  */
  */
-static size_t writefunc_withbuffer(void *ptr, size_t size, size_t nmemb, ResponseBuffer rbuffer) {
+static size_t writeFuncWithUserBuffer(void *ptr, size_t size,
+                                   size_t nmemb, struct ResponseBuffer *rbuffer)
+{
+    size_t toCopy = 0;
     if (size * nmemb < 1) {
     if (size * nmemb < 1) {
         return 0;
         return 0;
     }
     }
     if (!rbuffer || !rbuffer->content) {
     if (!rbuffer || !rbuffer->content) {
-        fprintf(stderr, "In writefunc_withbuffer, the buffer provided by user is NULL.\n");
+        fprintf(stderr,
+                "ERROR: buffer to read is NULL for the "
+                "callback writeFuncWithUserBuffer.\n");
         return 0;
         return 0;
     }
     }
     
     
-    size_t toCopy = rbuffer->remaining < (size * nmemb) ? rbuffer->remaining : (size * nmemb);
+    toCopy = rbuffer->remaining < (size * nmemb) ?
+                            rbuffer->remaining : (size * nmemb);
     memcpy(rbuffer->content + rbuffer->offset, ptr, toCopy);
     memcpy(rbuffer->content + rbuffer->offset, ptr, toCopy);
     rbuffer->offset += toCopy;
     rbuffer->offset += toCopy;
     rbuffer->remaining -= toCopy;
     rbuffer->remaining -= toCopy;
     return toCopy;
     return toCopy;
 }
 }
 
 
-//callback for writing data to remote peer
-static size_t readfunc(void *ptr, size_t size, size_t nmemb, void *stream) {
+/**
+ * Callback used by libcurl for writing data to remote peer
+ */
+static size_t readfunc(void *ptr, size_t size, size_t nmemb, void *stream)
+{
+    struct webhdfsBuffer *wbuffer = NULL;
     if (size * nmemb < 1) {
     if (size * nmemb < 1) {
-        fprintf(stderr, "In readfunc callback: size * nmemb == %ld\n", size * nmemb);
         return 0;
         return 0;
     }
     }
-    webhdfsBuffer *wbuffer = (webhdfsBuffer *) stream;
     
     
+    wbuffer = stream;
     pthread_mutex_lock(&wbuffer->writeMutex);
     pthread_mutex_lock(&wbuffer->writeMutex);
     while (wbuffer->remaining == 0) {
     while (wbuffer->remaining == 0) {
         /*
         /*
-         * the current remainning bytes to write is 0,
-         * check whether need to finish the transfer
+         * The current remainning bytes to write is 0,
+         * check closeFlag to see whether need to finish the transfer.
          * if yes, return 0; else, wait
          * if yes, return 0; else, wait
          */
          */
-        if (wbuffer->closeFlag) {
-            //we can close the transfer now
+        if (wbuffer->closeFlag) { // We can close the transfer now
+            //For debug
             fprintf(stderr, "CloseFlag is set, ready to close the transfer\n");
             fprintf(stderr, "CloseFlag is set, ready to close the transfer\n");
             pthread_mutex_unlock(&wbuffer->writeMutex);
             pthread_mutex_unlock(&wbuffer->writeMutex);
             return 0;
             return 0;
         } else {
         } else {
-            // len == 0 indicates that user's buffer has been transferred
+            // remaining == 0 but closeFlag is not set
+            // indicates that user's buffer has been transferred
             pthread_cond_signal(&wbuffer->transfer_finish);
             pthread_cond_signal(&wbuffer->transfer_finish);
-            pthread_cond_wait(&wbuffer->newwrite_or_close, &wbuffer->writeMutex);
+            pthread_cond_wait(&wbuffer->newwrite_or_close,
+                                    &wbuffer->writeMutex);
         }
         }
     }
     }
     
     
-    if(wbuffer->remaining > 0 && !wbuffer->closeFlag) {
-        size_t copySize = wbuffer->remaining < size * nmemb ? wbuffer->remaining : size * nmemb;
+    if (wbuffer->remaining > 0 && !wbuffer->closeFlag) {
+        size_t copySize = wbuffer->remaining < size * nmemb ?
+                                wbuffer->remaining : size * nmemb;
         memcpy(ptr, wbuffer->wbuffer + wbuffer->offset, copySize);
         memcpy(ptr, wbuffer->wbuffer + wbuffer->offset, copySize);
         wbuffer->offset += copySize;
         wbuffer->offset += copySize;
         wbuffer->remaining -= copySize;
         wbuffer->remaining -= copySize;
         pthread_mutex_unlock(&wbuffer->writeMutex);
         pthread_mutex_unlock(&wbuffer->writeMutex);
         return copySize;
         return copySize;
     } else {
     } else {
-        fprintf(stderr, "Webhdfs buffer is %ld, it should be a positive value!\n", wbuffer->remaining);
+        fprintf(stderr, "ERROR: webhdfsBuffer's remaining is %ld, "
+                "it should be a positive value!\n", wbuffer->remaining);
         pthread_mutex_unlock(&wbuffer->writeMutex);
         pthread_mutex_unlock(&wbuffer->writeMutex);
         return 0;
         return 0;
     }
     }
 }
 }
 
 
-static void initCurlGlobal() {
+/**
+ * Initialize the global libcurl environment
+ */
+static void initCurlGlobal()
+{
     if (!curlGlobalInited) {
     if (!curlGlobalInited) {
         pthread_mutex_lock(&curlInitMutex);
         pthread_mutex_lock(&curlInitMutex);
         if (!curlGlobalInited) {
         if (!curlGlobalInited) {
@@ -151,202 +193,297 @@ static void initCurlGlobal() {
     }
     }
 }
 }
 
 
-static Response launchCmd(char *url, enum HttpHeader method, enum Redirect followloc) {
-    CURL *curl;
-    CURLcode res;
-    Response resp;
+/**
+ * Launch simple commands (commands without file I/O) and return response
+ *
+ * @param url       Target URL
+ * @param method    HTTP method (GET/PUT/POST)
+ * @param followloc Whether or not need to set CURLOPT_FOLLOWLOCATION
+ * @param response  Response from remote service
+ * @return 0 for success and non-zero value to indicate error
+ */
+static int launchCmd(const char *url, enum HttpHeader method,
+                     enum Redirect followloc, struct Response **response)
+{
+    CURL *curl = NULL;
+    CURLcode curlCode;
+    int ret = 0;
+    struct Response *resp = NULL;
     
     
-    resp = (Response) calloc(1, sizeof(*resp));
+    resp = calloc(1, sizeof(struct Response));
     if (!resp) {
     if (!resp) {
-        return NULL;
+        return ENOMEM;
+    }
+    ret = initResponseBuffer(&(resp->body));
+    if (ret) {
+        goto done;
+    }
+    ret = initResponseBuffer(&(resp->header));
+    if (ret) {
+        goto done;
     }
     }
-    resp->body = initResponseBuffer();
-    resp->header = initResponseBuffer();
     initCurlGlobal();
     initCurlGlobal();
-    curl = curl_easy_init();                     /* get a curl handle */
-    if(curl) {
-        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-        curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-        curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-        curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-        curl_easy_setopt(curl, CURLOPT_URL, url);       /* specify target URL */
-        switch(method) {
-            case GET:
-                break;
-            case PUT:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"PUT");
-                break;
-            case POST:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"POST");
-                break;
-            case DELETE:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"DELETE");
-                break;
-            default:
-                fprintf(stderr, "\nHTTP method not defined\n");
-                exit(EXIT_FAILURE);
-        }
-        if(followloc == YES) {
-            curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-        }
-        
-        res = curl_easy_perform(curl);                 /* Now run the curl handler */
-        if(res != CURLE_OK) {
-            fprintf(stderr, "preform the URL %s failed\n", url);
-            return NULL;
-        }
+    curl = curl_easy_init();
+    if (!curl) {
+        ret = ENOMEM;       // curl_easy_init does not return error code,
+                            // and most of its errors are caused by malloc()
+        fprintf(stderr, "ERROR in curl_easy_init.\n");
+        goto done;
+    }
+    /* Set callback function for reading data from remote service */
+    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
+    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
+    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
+    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
+    curl_easy_setopt(curl, CURLOPT_URL, url);
+    switch(method) {
+        case GET:
+            break;
+        case PUT:
+            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
+            break;
+        case POST:
+            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
+            break;
+        case DELETE:
+            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE");
+            break;
+        default:
+            ret = EINVAL;
+            fprintf(stderr, "ERROR: Invalid HTTP method\n");
+            goto done;
+    }
+    if (followloc == YES) {
+        curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
+    }
+    /* Now run the curl handler */
+    curlCode = curl_easy_perform(curl);
+    if (curlCode != CURLE_OK) {
+        ret = EIO;
+        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
+                url, curlCode, curl_easy_strerror(curlCode));
+    }
+done:
+    if (curl != NULL) {
         curl_easy_cleanup(curl);
         curl_easy_cleanup(curl);
     }
     }
-    return resp;
+    if (ret) {
+        free(resp);
+        resp = NULL;
+    }
+    *response = resp;
+    return ret;
 }
 }
 
 
-static Response launchRead_internal(char *url, enum HttpHeader method, enum Redirect followloc, Response resp) {
+/**
+ * Launch the read request. The request is sent to the NameNode and then 
+ * redirected to corresponding DataNode
+ *
+ * @param url   The URL for the read request
+ * @param resp  The response containing the buffer provided by user
+ * @return 0 for success and non-zero value to indicate error
+ */
+static int launchReadInternal(const char *url, struct Response* resp)
+{
+    CURL *curl;
+    CURLcode curlCode;
+    int ret = 0;
+    
     if (!resp || !resp->body || !resp->body->content) {
     if (!resp || !resp->body || !resp->body->content) {
-        fprintf(stderr, "The user provided buffer should not be NULL!\n");
-        return NULL;
+        fprintf(stderr,
+                "ERROR: invalid user-provided buffer!\n");
+        return EINVAL;
     }
     }
     
     
-    CURL *curl;
-    CURLcode res;
     initCurlGlobal();
     initCurlGlobal();
-    curl = curl_easy_init();                     /* get a curl handle */
-    if(curl) {
-        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc_withbuffer);
-        curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
-        curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-        curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
-        curl_easy_setopt(curl, CURLOPT_URL, url);       /* specify target URL */
-        if(followloc == YES) {
-            curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
-        }
-        
-        res = curl_easy_perform(curl);                 /* Now run the curl handler */
-        if(res != CURLE_OK && res != CURLE_PARTIAL_FILE) {
-            fprintf(stderr, "preform the URL %s failed\n", url);
-            return NULL;
-        }
-        curl_easy_cleanup(curl);
+    /* get a curl handle */
+    curl = curl_easy_init();
+    if (!curl) {
+        fprintf(stderr, "ERROR in curl_easy_init.\n");
+        return ENOMEM;
     }
     }
-    return resp;
-
+    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeFuncWithUserBuffer);
+    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
+    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
+    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
+    curl_easy_setopt(curl, CURLOPT_URL, url);
+    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
+    
+    curlCode = curl_easy_perform(curl);
+    if (curlCode != CURLE_OK && curlCode != CURLE_PARTIAL_FILE) {
+        ret = EIO;
+        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
+                url, curlCode, curl_easy_strerror(curlCode));
+    }
+    
+    curl_easy_cleanup(curl);
+    return ret;
 }
 }
 
 
-static Response launchWrite(const char *url, enum HttpHeader method, webhdfsBuffer *uploadBuffer) {
+/**
+ * The function does the write operation by connecting to a DataNode. 
+ * The function keeps the connection with the DataNode until 
+ * the closeFlag is set. Whenever the current data has been sent out, 
+ * the function blocks waiting for further input from user or close.
+ *
+ * @param url           URL of the remote DataNode
+ * @param method        PUT for create and POST for append
+ * @param uploadBuffer  Buffer storing user's data to write
+ * @param response      Response from remote service
+ * @return 0 for success and non-zero value to indicate error
+ */
+static int launchWrite(const char *url, enum HttpHeader method,
+                       struct webhdfsBuffer *uploadBuffer,
+                       struct Response **response)
+{
+    CURLcode curlCode;
+    struct Response* resp = NULL;
+    struct curl_slist *chunk = NULL;
+    CURL *curl = NULL;
+    int ret = 0;
+    
     if (!uploadBuffer) {
     if (!uploadBuffer) {
-        fprintf(stderr, "upload buffer is NULL!\n");
-        errno = EINVAL;
-        return NULL;
+        fprintf(stderr, "ERROR: upload buffer is NULL!\n");
+        return EINVAL;
     }
     }
+    
     initCurlGlobal();
     initCurlGlobal();
-    CURLcode res;
-    Response response = (Response) calloc(1, sizeof(*response));
-    if (!response) {
-        fprintf(stderr, "failed to allocate memory for response\n");
-        return NULL;
-    }
-    response->body = initResponseBuffer();
-    response->header = initResponseBuffer();
+    resp = calloc(1, sizeof(struct Response));
+    if (!resp) {
+        return ENOMEM;
+    }
+    ret = initResponseBuffer(&(resp->body));
+    if (ret) {
+        goto done;
+    }
+    ret = initResponseBuffer(&(resp->header));
+    if (ret) {
+        goto done;
+    }
     
     
-    //connect to the datanode in order to create the lease in the namenode
-    CURL *curl = curl_easy_init();
+    // Connect to the datanode in order to create the lease in the namenode
+    curl = curl_easy_init();
     if (!curl) {
     if (!curl) {
-        fprintf(stderr, "Failed to initialize the curl handle.\n");
-        return NULL;
+        fprintf(stderr, "ERROR: failed to initialize the curl handle.\n");
+        return ENOMEM;
     }
     }
     curl_easy_setopt(curl, CURLOPT_URL, url);
     curl_easy_setopt(curl, CURLOPT_URL, url);
     
     
-    if(curl) {
-        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
-        curl_easy_setopt(curl, CURLOPT_WRITEDATA, response->body);
-        curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
-        curl_easy_setopt(curl, CURLOPT_WRITEHEADER, response->header);
-        curl_easy_setopt(curl, CURLOPT_READFUNCTION, readfunc);
-        curl_easy_setopt(curl, CURLOPT_READDATA, uploadBuffer);
-        curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);
-        curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
-        
-        struct curl_slist *chunk = NULL;
-        chunk = curl_slist_append(chunk, "Transfer-Encoding: chunked");
-        res = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-        chunk = curl_slist_append(chunk, "Expect:");
-        res = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
-        
-        switch(method) {
-            case GET:
-                break;
-            case PUT:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"PUT");
-                break;
-            case POST:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"POST");
-                break;
-            case DELETE:
-                curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"DELETE");
-                break;
-            default:
-                fprintf(stderr, "\nHTTP method not defined\n");
-                exit(EXIT_FAILURE);
-        }
-        res = curl_easy_perform(curl);
+    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc);
+    curl_easy_setopt(curl, CURLOPT_WRITEDATA, resp->body);
+    curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, writefunc);
+    curl_easy_setopt(curl, CURLOPT_WRITEHEADER, resp->header);
+    curl_easy_setopt(curl, CURLOPT_READFUNCTION, readfunc);
+    curl_easy_setopt(curl, CURLOPT_READDATA, uploadBuffer);
+    curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);
+    
+    chunk = curl_slist_append(chunk, "Transfer-Encoding: chunked");
+    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
+    chunk = curl_slist_append(chunk, "Expect:");
+    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
+    
+    switch(method) {
+        case PUT:
+            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT");
+            break;
+        case POST:
+            curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
+            break;
+        default:
+            ret = EINVAL;
+            fprintf(stderr, "ERROR: Invalid HTTP method\n");
+            goto done;
+    }
+    curlCode = curl_easy_perform(curl);
+    if (curlCode != CURLE_OK) {
+        ret = EIO;
+        fprintf(stderr, "ERROR: preform the URL %s failed, <%d>: %s\n",
+                url, curlCode, curl_easy_strerror(curlCode));
+    }
+    
+done:
+    if (chunk != NULL) {
         curl_slist_free_all(chunk);
         curl_slist_free_all(chunk);
+    }
+    if (curl != NULL) {
         curl_easy_cleanup(curl);
         curl_easy_cleanup(curl);
     }
     }
-    
-    return response;
+    if (ret) {
+        free(resp);
+        resp = NULL;
+    }
+    *response = resp;
+    return ret;
 }
 }
 
 
-Response launchMKDIR(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchMKDIR(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchRENAME(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchRENAME(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchGFS(char *url) {
-    return launchCmd(url, GET, NO);
+int launchGFS(const char *url, struct Response **resp)
+{
+    return launchCmd(url, GET, NO, resp);
 }
 }
 
 
-Response launchLS(char *url) {
-    return launchCmd(url, GET, NO);
+int launchLS(const char *url, struct Response **resp)
+{
+    return launchCmd(url, GET, NO, resp);
 }
 }
 
 
-Response launchCHMOD(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchCHMOD(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchCHOWN(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchCHOWN(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchDELETE(char *url) {
-    return launchCmd(url, DELETE, NO);
+int launchDELETE(const char *url, struct Response **resp)
+{
+    return launchCmd(url, DELETE, NO, resp);
 }
 }
 
 
-Response launchOPEN(char *url, Response resp) {
-    return launchRead_internal(url, GET, YES, resp);
+int launchOPEN(const char *url, struct Response* resp)
+{
+    return launchReadInternal(url, resp);
 }
 }
 
 
-Response launchUTIMES(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchUTIMES(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchNnWRITE(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchNnWRITE(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }
 
 
-Response launchNnAPPEND(char *url) {
-    return launchCmd(url, POST, NO);
+int launchNnAPPEND(const char *url, struct Response **resp)
+{
+    return launchCmd(url, POST, NO, resp);
 }
 }
 
 
-Response launchDnWRITE(const char *url, webhdfsBuffer *buffer) {
-    return launchWrite(url, PUT, buffer);
+int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
+                               struct Response **resp)
+{
+    return launchWrite(url, PUT, buffer, resp);
 }
 }
 
 
-Response launchDnAPPEND(const char *url, webhdfsBuffer *buffer) {
-    return launchWrite(url, POST, buffer);
+int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
+                                struct Response **resp)
+{
+    return launchWrite(url, POST, buffer, resp);
 }
 }
 
 
-Response launchSETREPLICATION(char *url) {
-    return launchCmd(url, PUT, NO);
+int launchSETREPLICATION(const char *url, struct Response **resp)
+{
+    return launchCmd(url, PUT, NO, resp);
 }
 }

+ 230 - 44
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h

@@ -26,6 +26,7 @@
 #include <pthread.h> /* for pthread_t */
 #include <pthread.h> /* for pthread_t */
 #include <unistd.h> /* for size_t */
 #include <unistd.h> /* for size_t */
 
 
+/** enum indicating the type of hdfs stream */
 enum hdfsStreamType
 enum hdfsStreamType
 {
 {
     UNINITIALIZED = 0,
     UNINITIALIZED = 0,
@@ -36,28 +37,39 @@ enum hdfsStreamType
 /**
 /**
  * webhdfsBuffer - used for hold the data for read/write from/to http connection
  * webhdfsBuffer - used for hold the data for read/write from/to http connection
  */
  */
-typedef struct {
-    const char *wbuffer;  // The user's buffer for uploading
-    size_t remaining;     // Length of content
-    size_t offset;        // offset for reading
-    int openFlag;         // Check whether the hdfsOpenFile has been called before
-    int closeFlag;        // Whether to close the http connection for writing
-    pthread_mutex_t writeMutex; // Synchronization between the curl and hdfsWrite threads
-    pthread_cond_t newwrite_or_close; // Transferring thread waits for this condition
-                                      // when there is no more content for transferring in the buffer
-    pthread_cond_t transfer_finish; // Condition used to indicate finishing transferring (one buffer)
-} webhdfsBuffer;
+struct webhdfsBuffer {
+    const char *wbuffer;  /* The user's buffer for uploading */
+    size_t remaining;     /* Length of content */
+    size_t offset;        /* offset for reading */
+    /* Check whether the hdfsOpenFile has been called before */
+    int openFlag;
+    /* Whether to close the http connection for writing */
+    int closeFlag;
+    /* Synchronization between the curl and hdfsWrite threads */
+    pthread_mutex_t writeMutex;
+    /* 
+     * Transferring thread waits for this condition
+     * when there is no more content for transferring in the buffer
+     */
+    pthread_cond_t newwrite_or_close;
+    /* Condition used to indicate finishing transferring (one buffer) */
+    pthread_cond_t transfer_finish;
+};
 
 
+/** File handle for webhdfs */
 struct webhdfsFileHandle {
 struct webhdfsFileHandle {
-    char *absPath;
-    int bufferSize;
-    short replication;
-    tSize blockSize;
-    char *datanode;
-    webhdfsBuffer *uploadBuffer;
+    char *absPath;        /* Absolute path of file */
+    int bufferSize;       /* Size of buffer */
+    short replication;    /* Number of replication */
+    tSize blockSize;      /* Block size */
+    char *datanode;       /* URL of the DataNode */
+    /* webhdfsBuffer handle used to store the upload data */
+    struct webhdfsBuffer *uploadBuffer;
+    /* The thread used for data transferring */
     pthread_t connThread;
     pthread_t connThread;
 };
 };
 
 
+/** Type of http header */
 enum HttpHeader {
 enum HttpHeader {
     GET,
     GET,
     PUT,
     PUT,
@@ -65,44 +77,218 @@ enum HttpHeader {
     DELETE
     DELETE
 };
 };
 
 
+/** Whether to redirect */
 enum Redirect {
 enum Redirect {
     YES,
     YES,
     NO
     NO
 };
 };
 
 
-typedef struct {
+/** Buffer used for holding response */
+struct ResponseBuffer {
     char *content;
     char *content;
     size_t remaining;
     size_t remaining;
     size_t offset;
     size_t offset;
-} ResponseBufferInternal;
-typedef ResponseBufferInternal *ResponseBuffer;
+};
 
 
 /**
 /**
  * The response got through webhdfs
  * The response got through webhdfs
  */
  */
-typedef struct {
-    ResponseBuffer body;
-    ResponseBuffer header;
-}* Response;
-
-ResponseBuffer initResponseBuffer();
-void freeResponseBuffer(ResponseBuffer buffer);
-void freeResponse(Response resp);
-
-Response launchMKDIR(char *url);
-Response launchRENAME(char *url);
-Response launchCHMOD(char *url);
-Response launchGFS(char *url);
-Response launchLS(char *url);
-Response launchDELETE(char *url);
-Response launchCHOWN(char *url);
-Response launchOPEN(char *url, Response resp);
-Response launchUTIMES(char *url);
-Response launchNnWRITE(char *url);
-
-Response launchDnWRITE(const char *url, webhdfsBuffer *buffer);
-Response launchNnAPPEND(char *url);
-Response launchSETREPLICATION(char *url);
-Response launchDnAPPEND(const char *url, webhdfsBuffer *buffer);
+struct Response {
+    struct ResponseBuffer *body;
+    struct ResponseBuffer *header;
+};
+
+/**
+ * Create and initialize a ResponseBuffer
+ *
+ * @param buffer Pointer pointing to new created ResponseBuffer handle
+ * @return 0 for success, non-zero value to indicate error
+ */
+int initResponseBuffer(struct ResponseBuffer **buffer) __attribute__ ((warn_unused_result));
+
+/**
+ * Free the given ResponseBuffer
+ *
+ * @param buffer The ResponseBuffer to free
+ */
+void freeResponseBuffer(struct ResponseBuffer *buffer);
+
+/**
+ * Free the given Response
+ *
+ * @param resp The Response to free
+ */
+void freeResponse(struct Response *resp);
+
+/**
+ * Send the MKDIR request to NameNode using the given URL. 
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for MKDIR operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchMKDIR(const char *url,
+                struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the RENAME request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for RENAME operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchRENAME(const char *url,
+                 struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the CHMOD request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for CHMOD operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchCHMOD(const char *url,
+                struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the GetFileStatus request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for GetFileStatus operation
+ * @param response Response handle to store response returned from the NameNode,
+ *                 containing either file status or exception information
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchGFS(const char *url,
+              struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the LS (LISTSTATUS) request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for LISTSTATUS operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchLS(const char *url,
+             struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the DELETE request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for DELETE operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchDELETE(const char *url,
+                 struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the CHOWN request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for CHOWN operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchCHOWN(const char *url,
+                struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the OPEN request to NameNode using the given URL, 
+ * asking for reading a file (within a range). 
+ * The NameNode first redirects the request to the datanode
+ * that holds the corresponding first block of the file (within a range),
+ * and the datanode returns the content of the file through the HTTP connection.
+ *
+ * @param url The URL for OPEN operation
+ * @param resp The response holding user's buffer. 
+               The file content will be written into the buffer.
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchOPEN(const char *url,
+               struct Response* resp) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the SETTIMES request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for SETTIMES operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchUTIMES(const char *url,
+                 struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the WRITE/CREATE request to NameNode using the given URL.
+ * The NameNode will choose the writing target datanodes 
+ * and return the first datanode in the pipeline as response
+ *
+ * @param url The URL for WRITE/CREATE operation connecting to NameNode
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchNnWRITE(const char *url,
+                  struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the WRITE request along with to-write content to 
+ * the corresponding DataNode using the given URL. 
+ * The DataNode will write the data and return the response.
+ *
+ * @param url The URL for WRITE operation connecting to DataNode
+ * @param buffer The webhdfsBuffer containing data to be written to hdfs
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchDnWRITE(const char *url, struct webhdfsBuffer *buffer,
+                  struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the WRITE (APPEND) request to NameNode using the given URL. 
+ * The NameNode determines the DataNode for appending and 
+ * sends its URL back as response.
+ *
+ * @param url The URL for APPEND operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchNnAPPEND(const char *url, struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the SETREPLICATION request to NameNode using the given URL.
+ * The NameNode will execute the operation and return the result as response.
+ *
+ * @param url The URL for SETREPLICATION operation
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchSETREPLICATION(const char *url,
+                         struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Send the APPEND request along with the content to DataNode.
+ * The DataNode will do the appending and return the result as response.
+ *
+ * @param url The URL for APPEND operation connecting to DataNode
+ * @param buffer The webhdfsBuffer containing data to be appended
+ * @param response Response handle to store response returned from the NameNode
+ * @return 0 for success, non-zero value to indicate error
+ */
+int launchDnAPPEND(const char *url, struct webhdfsBuffer *buffer,
+                   struct Response **response) __attribute__ ((warn_unused_result));
+
+/**
+ * Call sys_errlist to get the error message string for the given error code
+ *
+ * @param errnoval  The error code value
+ * @return          The error message string mapped to the given error code
+ */
+const char *hdfs_strerror(int errnoval);
 
 
 #endif //_HDFS_HTTP_CLIENT_H_
 #endif //_HDFS_HTTP_CLIENT_H_

+ 333 - 185
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_query.c

@@ -22,233 +22,381 @@
 #include <stdio.h>
 #include <stdio.h>
 #include <errno.h>
 #include <errno.h>
 
 
-#define NUM_OF_PERMISSION_BITS 4
-#define NUM_OF_PORT_BITS 6
-#define NUM_OF_REPLICATION_BITS 6
-
-static char *prepareQUERY(const char *host, int nnPort, const char *srcpath, const char *OP, const char *user) {
-    size_t length;
-    char *url;
-    const char *const protocol = "http://";
-    const char *const prefix = "/webhdfs/v1";
-    char *temp;
-    char *port;
-    port= (char*) malloc(NUM_OF_PORT_BITS);
-    if (!port) {
-        return NULL;
-    }
-    sprintf(port,"%d",nnPort);
-    if (user != NULL) {
-        length = strlen(protocol) + strlen(host) + strlen(":") + strlen(port) + strlen(prefix) + strlen(srcpath) + strlen ("?op=") + strlen(OP) + strlen("&user.name=") + strlen(user);
-    } else {
-        length = strlen(protocol) + strlen(host) + strlen(":") + strlen(port) + strlen(prefix) + strlen(srcpath) +  strlen ("?op=") + strlen(OP);
-    }
-    
-    temp = (char*) malloc(length + 1);
-    if (!temp) {
-        return NULL;
-    }
-    strcpy(temp,protocol);
-    temp = strcat(temp,host);
-    temp = strcat(temp,":");
-    temp = strcat(temp,port);
-    temp = strcat(temp,prefix);
-    temp = strcat(temp,srcpath);
-    temp = strcat(temp,"?op=");
-    temp = strcat(temp,OP);
-    if (user) {
-        temp = strcat(temp,"&user.name=");
-        temp = strcat(temp,user);
-    }
-    url = temp;
-    return url;
-}
-
+#define PERM_STR_LEN 4  // "644" + one byte for NUL
+#define SHORT_STR_LEN 6 // 65535 + NUL
+#define LONG_STR_LEN 21 // 2^64-1 = 18446744073709551615 + NUL
 
 
-static int decToOctal(int decNo) {
-    int octNo=0;
-    int expo =0;
-    while (decNo != 0)  {
-        octNo = ((decNo % 8) * pow(10,expo)) + octNo;
-        decNo = decNo / 8;
-        expo++;
+/**
+ * Create query based on NameNode hostname,
+ * NameNode port, path, operation and other parameters
+ *
+ * @param host          NameNode hostName
+ * @param nnPort        Port of NameNode
+ * @param path          Absolute path for the corresponding file
+ * @param op            Operations
+ * @param paraNum       Number of remaining parameters
+ * @param paraNames     Names of remaining parameters
+ * @param paraValues    Values of remaining parameters
+ * @param url           Holding the created URL
+ * @return 0 on success and non-zero value to indicate error
+ */
+static int createQueryURL(const char *host, unsigned int nnPort,
+                          const char *path, const char *op, int paraNum,
+                          const char **paraNames, const char **paraValues,
+                          char **queryUrl)
+{
+    size_t length = 0;
+    int i = 0, offset = 0, ret = 0;
+    char *url = NULL;
+    const char *protocol = "http://";
+    const char *prefix = "/webhdfs/v1";
+    
+    if (!paraNames || !paraValues) {
+        return EINVAL;
+    }
+    length = strlen(protocol) + strlen(host) + strlen(":") +
+                SHORT_STR_LEN + strlen(prefix) + strlen(path) +
+                strlen ("?op=") + strlen(op);
+    for (i = 0; i < paraNum; i++) {
+        if (paraNames[i] && paraValues[i]) {
+            length += 2 + strlen(paraNames[i]) + strlen(paraValues[i]);
+        }
+    }
+    url = malloc(length);   // The '\0' has already been included
+                            // when using SHORT_STR_LEN
+    if (!url) {
+        return ENOMEM;
+    }
+    
+    offset = snprintf(url, length, "%s%s:%d%s%s?op=%s",
+                      protocol, host, nnPort, prefix, path, op);
+    if (offset >= length || offset < 0) {
+        ret = EIO;
+        goto done;
+    }
+    for (i = 0; i < paraNum; i++) {
+        if (!paraNames[i] || !paraValues[i] || paraNames[i][0] == '\0' ||
+            paraValues[i][0] == '\0') {
+            continue;
+        }
+        offset += snprintf(url + offset, length - offset,
+                           "&%s=%s", paraNames[i], paraValues[i]);
+        if (offset >= length || offset < 0) {
+            ret = EIO;
+            goto done;
+        }
     }
     }
-    return octNo;
+done:
+    if (ret) {
+        free(url);
+        return ret;
+    }
+    *queryUrl = url;
+    return 0;
 }
 }
 
 
-
-char *prepareMKDIR(const char *host, int nnPort, const char *dirsubpath, const char *user) {
-    return prepareQUERY(host, nnPort, dirsubpath, "MKDIRS", user);
+int createUrlForMKDIR(const char *host, int nnPort,
+                      const char *path, const char *user, char **url)
+{
+    const char *userPara = "user.name";
+    return createQueryURL(host, nnPort, path, "MKDIRS", 1,
+                          &userPara, &user, url);
 }
 }
 
 
-
-char *prepareMKDIRwithMode(const char *host, int nnPort, const char *dirsubpath, int mode, const char *user) {
-    char *url;
-    char *permission;
-    permission = (char*) malloc(NUM_OF_PERMISSION_BITS);
-    if (!permission) {
-        return NULL;
-    }
-    mode = decToOctal(mode);
-    sprintf(permission,"%d",mode);
-    url = prepareMKDIR(host, nnPort, dirsubpath, user);
-    url = realloc(url,(strlen(url) + strlen("&permission=") + strlen(permission) + 1));
-    if (!url) {
-        return NULL;
-    }
-    url = strcat(url,"&permission=");
-    url = strcat(url,permission);
-    return url;
+int createUrlForGetFileStatus(const char *host, int nnPort, const char *path,
+                              const char *user, char **url)
+{
+    const char *userPara = "user.name";
+    return createQueryURL(host, nnPort, path, "GETFILESTATUS", 1,
+                          &userPara, &user, url);
 }
 }
 
 
+int createUrlForLS(const char *host, int nnPort, const char *path,
+                   const char *user, char **url)
+{
+    const char *userPara = "user.name";
+    return createQueryURL(host, nnPort, path, "LISTSTATUS",
+                          1, &userPara, &user, url);
+}
 
 
-char *prepareRENAME(const char *host, int nnPort, const char *srcpath, const char *destpath, const char *user) {
-    char *url;
-    url = prepareQUERY(host, nnPort, srcpath, "RENAME", user);
-    url = realloc(url,(strlen(url) + strlen("&destination=") + strlen(destpath) + 1));
-    if (!url) {
-        return NULL;
-    }
-    url = strcat(url,"&destination=");
-    url = strcat(url,destpath);
-    return url;
+int createUrlForNnAPPEND(const char *host, int nnPort, const char *path,
+                         const char *user, char **url)
+{
+    const char *userPara = "user.name";
+    return createQueryURL(host, nnPort, path, "APPEND",
+                          1, &userPara, &user, url);
 }
 }
 
 
-char *prepareGFS(const char *host, int nnPort, const char *dirsubpath, const char *user) {
-    return (prepareQUERY(host, nnPort, dirsubpath, "GETFILESTATUS", user));
+int createUrlForMKDIRwithMode(const char *host, int nnPort, const char *path,
+                              int mode, const char *user, char **url)
+{
+    int strlength;
+    char permission[PERM_STR_LEN];
+    const char *paraNames[2], *paraValues[2];
+    
+    paraNames[0] = "permission";
+    paraNames[1] = "user.name";
+    memset(permission, 0, PERM_STR_LEN);
+    strlength = snprintf(permission, PERM_STR_LEN, "%o", mode);
+    if (strlength < 0 || strlength >= PERM_STR_LEN) {
+        return EIO;
+    }
+    paraValues[0] = permission;
+    paraValues[1] = user;
+    
+    return createQueryURL(host, nnPort, path, "MKDIRS", 2,
+                          paraNames, paraValues, url);
 }
 }
 
 
-char *prepareLS(const char *host, int nnPort, const char *dirsubpath, const char *user) {
-    return (prepareQUERY(host, nnPort, dirsubpath, "LISTSTATUS", user));
+int createUrlForRENAME(const char *host, int nnPort, const char *srcpath,
+                         const char *destpath, const char *user, char **url)
+{
+    const char *paraNames[2], *paraValues[2];
+    paraNames[0] = "destination";
+    paraNames[1] = "user.name";
+    paraValues[0] = destpath;
+    paraValues[1] = user;
+    
+    return createQueryURL(host, nnPort, srcpath,
+                          "RENAME", 2, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareCHMOD(const char *host, int nnPort, const char *dirsubpath, int mode, const char *user) {
-    char *url;
-    char *permission;
-    permission = (char*) malloc(NUM_OF_PERMISSION_BITS);
-    if (!permission) {
-        return NULL;
-    }
-    mode &= 0x3FFF;
-    mode = decToOctal(mode);
-    sprintf(permission,"%d",mode);
-    url = prepareQUERY(host, nnPort, dirsubpath, "SETPERMISSION", user);
-    url = realloc(url,(strlen(url) + strlen("&permission=") + strlen(permission) + 1));
-    if (!url) {
-        return NULL;
+int createUrlForCHMOD(const char *host, int nnPort, const char *path,
+                      int mode, const char *user, char **url)
+{
+    int strlength;
+    char permission[PERM_STR_LEN];
+    const char *paraNames[2], *paraValues[2];
+    
+    paraNames[0] = "permission";
+    paraNames[1] = "user.name";
+    memset(permission, 0, PERM_STR_LEN);
+    strlength = snprintf(permission, PERM_STR_LEN, "%o", mode);
+    if (strlength < 0 || strlength >= PERM_STR_LEN) {
+        return EIO;
     }
     }
-    url = strcat(url,"&permission=");
-    url = strcat(url,permission);
-    return url;
+    paraValues[0] = permission;
+    paraValues[1] = user;
+    
+    return createQueryURL(host, nnPort, path, "SETPERMISSION",
+                          2, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareDELETE(const char *host, int nnPort, const char *dirsubpath, int recursive, const char *user) {
-    char *url = (prepareQUERY(host, nnPort, dirsubpath, "DELETE", user));
-    char *recursiveFlag = (char *)malloc(6);
-    if (!recursive) {
-        strcpy(recursiveFlag, "false");
+int createUrlForDELETE(const char *host, int nnPort, const char *path,
+                       int recursive, const char *user, char **url)
+{
+    const char *paraNames[2], *paraValues[2];
+    paraNames[0] = "recursive";
+    paraNames[1] = "user.name";
+    if (recursive) {
+        paraValues[0] = "true";
     } else {
     } else {
-        strcpy(recursiveFlag, "true");
-    }
-    url = (char *) realloc(url, strlen(url) + strlen("&recursive=") + strlen(recursiveFlag) + 1);
-    if (!url) {
-        return NULL;
+        paraValues[0] = "false";
     }
     }
+    paraValues[1] = user;
     
     
-    strcat(url, "&recursive=");
-    strcat(url, recursiveFlag);
-    return url;
+    return createQueryURL(host, nnPort, path, "DELETE",
+                          2, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareCHOWN(const char *host, int nnPort, const char *dirsubpath, const char *owner, const char *group, const char *user) {
-    char *url;
-    url = prepareQUERY(host, nnPort, dirsubpath, "SETOWNER", user);
-    if (!url) {
-        return NULL;
-    }
-    if(owner != NULL) {
-        url = realloc(url,(strlen(url) + strlen("&owner=") + strlen(owner) + 1));
-        url = strcat(url,"&owner=");
-        url = strcat(url,owner);
-    }
-    if (group != NULL) {
-        url = realloc(url,(strlen(url) + strlen("&group=") + strlen(group) + 1));
-        url = strcat(url,"&group=");
-        url = strcat(url,group);
-    }
-    return url;
+int createUrlForCHOWN(const char *host, int nnPort, const char *path,
+                      const char *owner, const char *group,
+                      const char *user, char **url)
+{
+    const char *paraNames[3], *paraValues[3];
+    paraNames[0] = "owner";
+    paraNames[1] = "group";
+    paraNames[2] = "user.name";
+    paraValues[0] = owner;
+    paraValues[1] = group;
+    paraValues[2] = user;
+    
+    return createQueryURL(host, nnPort, path, "SETOWNER",
+                          3, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareOPEN(const char *host, int nnPort, const char *dirsubpath, const char *user, size_t offset, size_t length) {
-    char *base_url = prepareQUERY(host, nnPort, dirsubpath, "OPEN", user);
-    char *url = (char *) malloc(strlen(base_url) + strlen("&offset=") + 15 + strlen("&length=") + 15);
-    if (!url) {
-        return NULL;
+int createUrlForOPEN(const char *host, int nnPort, const char *path,
+                     const char *user, size_t offset, size_t length, char **url)
+{
+    int strlength;
+    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
+    const char *paraNames[3], *paraValues[3];
+    
+    paraNames[0] = "offset";
+    paraNames[1] = "length";
+    paraNames[2] = "user.name";
+    memset(offsetStr, 0, LONG_STR_LEN);
+    memset(lengthStr, 0, LONG_STR_LEN);
+    strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
+    if (strlength < 0 || strlength >= LONG_STR_LEN) {
+        return EIO;
     }
     }
-    sprintf(url, "%s&offset=%ld&length=%ld", base_url, offset, length);
-    return url;
+    strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
+    if (strlength < 0 || strlength >= LONG_STR_LEN) {
+        return EIO;
+    }
+    paraValues[0] = offsetStr;
+    paraValues[1] = lengthStr;
+    paraValues[2] = user;
+    
+    return createQueryURL(host, nnPort, path, "OPEN",
+                          3, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareUTIMES(const char *host, int nnPort, const char *dirsubpath, long unsigned mTime, long unsigned aTime, const char *user) {
-    char *url;
-    char *modTime;
-    char *acsTime;
-    modTime = (char*) malloc(12);
-    acsTime = (char*) malloc(12);
-    url = prepareQUERY(host, nnPort, dirsubpath, "SETTIMES", user);
-    sprintf(modTime,"%lu",mTime);
-    sprintf(acsTime,"%lu",aTime);
-    url = realloc(url,(strlen(url) + strlen("&modificationtime=") + strlen(modTime) + strlen("&accesstime=") + strlen(acsTime) + 1));
-    if (!url) {
-        return NULL;
+int createUrlForUTIMES(const char *host, int nnPort, const char *path,
+                       long unsigned mTime, long unsigned aTime,
+                       const char *user, char **url)
+{
+    int strlength;
+    char modTime[LONG_STR_LEN], acsTime[LONG_STR_LEN];
+    const char *paraNames[3], *paraValues[3];
+    
+    memset(modTime, 0, LONG_STR_LEN);
+    memset(acsTime, 0, LONG_STR_LEN);
+    strlength = snprintf(modTime, LONG_STR_LEN, "%lu", mTime);
+    if (strlength < 0 || strlength >= LONG_STR_LEN) {
+        return EIO;
     }
     }
-    url = strcat(url, "&modificationtime=");
-    url = strcat(url, modTime);
-    url = strcat(url,"&accesstime=");
-    url = strcat(url, acsTime);
-    return url;
+    strlength = snprintf(acsTime, LONG_STR_LEN, "%lu", aTime);
+    if (strlength < 0 || strlength >= LONG_STR_LEN) {
+        return EIO;
+    }
+    paraNames[0] = "modificationtime";
+    paraNames[1] = "accesstime";
+    paraNames[2] = "user.name";
+    paraValues[0] = modTime;
+    paraValues[1] = acsTime;
+    paraValues[2] = user;
+    
+    return createQueryURL(host, nnPort, path, "SETTIMES",
+                          3, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareNnWRITE(const char *host, int nnPort, const char *dirsubpath, const char *user, int16_t replication, size_t blockSize) {
-    char *url;
-    url = prepareQUERY(host, nnPort, dirsubpath, "CREATE", user);
-    url = realloc(url, (strlen(url) + strlen("&overwrite=true") + 1));
-    if (!url) {
-        return NULL;
-    }
-    url = strcat(url, "&overwrite=true");
+int createUrlForNnWRITE(const char *host, int nnPort,
+                        const char *path, const char *user,
+                        int16_t replication, size_t blockSize, char **url)
+{
+    int strlength;
+    char repStr[SHORT_STR_LEN], blockSizeStr[LONG_STR_LEN];
+    const char *paraNames[4], *paraValues[4];
+    
+    memset(repStr, 0, SHORT_STR_LEN);
+    memset(blockSizeStr, 0, LONG_STR_LEN);
     if (replication > 0) {
     if (replication > 0) {
-        url = realloc(url, (strlen(url) + strlen("&replication=") + 6));
-        if (!url) {
-            return NULL;
+        strlength = snprintf(repStr, SHORT_STR_LEN, "%u", replication);
+        if (strlength < 0 || strlength >= SHORT_STR_LEN) {
+            return EIO;
         }
         }
-        sprintf(url, "%s&replication=%d", url, replication);
     }
     }
     if (blockSize > 0) {
     if (blockSize > 0) {
-        url = realloc(url, (strlen(url) + strlen("&blocksize=") + 16));
-        if (!url) {
-            return NULL;
+        strlength = snprintf(blockSizeStr, LONG_STR_LEN, "%lu", blockSize);
+        if (strlength < 0 || strlength >= LONG_STR_LEN) {
+            return EIO;
+        }
+    }
+    paraNames[0] = "overwrite";
+    paraNames[1] = "replication";
+    paraNames[2] = "blocksize";
+    paraNames[3] = "user.name";
+    paraValues[0] = "true";
+    paraValues[1] = repStr;
+    paraValues[2] = blockSizeStr;
+    paraValues[3] = user;
+    
+    return createQueryURL(host, nnPort, path, "CREATE",
+                          4, paraNames, paraValues, url);
+}
+
+int createUrlForSETREPLICATION(const char *host, int nnPort,
+                               const char *path, int16_t replication,
+                               const char *user, char **url)
+{
+    char repStr[SHORT_STR_LEN];
+    const char *paraNames[2], *paraValues[2];
+    int strlength;
+
+    memset(repStr, 0, SHORT_STR_LEN);
+    if (replication > 0) {
+        strlength = snprintf(repStr, SHORT_STR_LEN, "%u", replication);
+        if (strlength < 0 || strlength >= SHORT_STR_LEN) {
+            return EIO;
         }
         }
-        sprintf(url, "%s&blocksize=%ld", url, blockSize);
     }
     }
-    return url;
+    paraNames[0] = "replication";
+    paraNames[1] = "user.name";
+    paraValues[0] = repStr;
+    paraValues[1] = user;
+    
+    return createQueryURL(host, nnPort, path, "SETREPLICATION",
+                          2, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareNnAPPEND(const char *host, int nnPort, const char *dirsubpath, const char *user) {
-    return (prepareQUERY(host, nnPort, dirsubpath, "APPEND", user));
+int createUrlForGetBlockLocations(const char *host, int nnPort,
+                                  const char *path, size_t offset,
+                                  size_t length, const char *user, char **url)
+{
+    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
+    const char *paraNames[3], *paraValues[3];
+    int strlength;
+    
+    memset(offsetStr, 0, LONG_STR_LEN);
+    memset(lengthStr, 0, LONG_STR_LEN);
+    if (offset > 0) {
+        strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
+        if (strlength < 0 || strlength >= LONG_STR_LEN) {
+            return EIO;
+        }
+    }
+    if (length > 0) {
+        strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
+        if (strlength < 0 || strlength >= LONG_STR_LEN) {
+            return EIO;
+        }
+    }
+    paraNames[0] = "offset";
+    paraNames[1] = "length";
+    paraNames[2] = "user.name";
+    paraValues[0] = offsetStr;
+    paraValues[1] = lengthStr;
+    paraValues[2] = user;
+    
+    return createQueryURL(host, nnPort, path, "GET_BLOCK_LOCATIONS",
+                          3, paraNames, paraValues, url);
 }
 }
 
 
-char *prepareSETREPLICATION(const char *host, int nnPort, const char *path, int16_t replication, const char *user)
+int createUrlForReadFromDatanode(const char *dnHost, int dnPort,
+                                 const char *path, size_t offset,
+                                 size_t length, const char *user,
+                                 const char *namenodeRpcAddr, char **url)
 {
 {
-    char *url = prepareQUERY(host, nnPort, path, "SETREPLICATION", user);
-    char *replicationNum = (char *) malloc(NUM_OF_REPLICATION_BITS);
-    sprintf(replicationNum, "%u", replication);
-    url = realloc(url, strlen(url) + strlen("&replication=") + strlen(replicationNum)+ 1);
-    if (!url) {
-        return NULL;
+    char offsetStr[LONG_STR_LEN], lengthStr[LONG_STR_LEN];
+    const char *paraNames[4], *paraValues[4];
+    int strlength;
+    
+    memset(offsetStr, 0, LONG_STR_LEN);
+    memset(lengthStr, 0, LONG_STR_LEN);
+    if (offset > 0) {
+        strlength = snprintf(offsetStr, LONG_STR_LEN, "%lu", offset);
+        if (strlength < 0 || strlength >= LONG_STR_LEN) {
+            return EIO;
+        }
     }
     }
+    if (length > 0) {
+        strlength = snprintf(lengthStr, LONG_STR_LEN, "%lu", length);
+        if (strlength < 0 || strlength >= LONG_STR_LEN) {
+            return EIO;
+        }
+    }
+    
+    paraNames[0] = "offset";
+    paraNames[1] = "length";
+    paraNames[2] = "user.name";
+    paraNames[3] = "namenoderpcaddress";
+    paraValues[0] = offsetStr;
+    paraValues[1] = lengthStr;
+    paraValues[2] = user;
+    paraValues[3] = namenodeRpcAddr;
     
     
-    url = strcat(url, "&replication=");
-    url = strcat(url, replicationNum);
-    return url;
+    return createQueryURL(dnHost, dnPort, path, "OPEN",
+                          4, paraNames, paraValues, url);
 }
 }

+ 215 - 16
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_query.h

@@ -20,22 +20,221 @@
 #ifndef _HDFS_HTTP_QUERY_H_
 #ifndef _HDFS_HTTP_QUERY_H_
 #define _HDFS_HTTP_QUERY_H_
 #define _HDFS_HTTP_QUERY_H_
 
 
-#include <stdint.h>
-#include <stdio.h>
-
-char *prepareMKDIR(const char *host, int nnPort, const char *dirsubpath, const char *user);
-char *prepareMKDIRwithMode(const char *host, int nnPort, const char *dirsubpath, int mode, const char *user);
-char *prepareRENAME(const char *host, int nnPort, const char *srcpath, const char *destpath, const char *user);
-char *prepareCHMOD(const char *host, int nnPort, const char *dirsubpath, int mode, const char *user);
-char *prepareGFS(const char *host, int nnPort, const char *dirsubpath, const char *user);
-char *prepareLS(const char *host, int nnPort, const char *dirsubpath, const char *user);
-char *prepareDELETE(const char *host, int nnPort, const char *dirsubpath, int recursive, const char *user);
-char *prepareCHOWN(const char *host, int nnPort, const char *dirsubpath, const char *owner, const char *group, const char *user);
-char *prepareOPEN(const char *host, int nnPort, const char *dirsubpath, const char *user, size_t offset, size_t length);
-char *prepareUTIMES(const char *host, int nnPort, const char *dirsubpath, long unsigned mTime, long unsigned aTime, const char *user);
-char *prepareNnWRITE(const char *host, int nnPort, const char *dirsubpath, const char *user, int16_t replication, size_t blockSize);
-char *prepareNnAPPEND(const char *host, int nnPort, const char *dirsubpath, const char *user);
-char *prepareSETREPLICATION(const char *host, int nnPort, const char *path, int16_t replication, const char *user);
+#include <unistd.h> /* for size_t */
+#include <inttypes.h> /* for int16_t */
+
+/**
+ * Create the URL for a MKDIR request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the dir to create
+ * @param user User name
+ * @param url Holding the generated URL for MKDIR request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForMKDIR(const char *host, int nnPort,
+                      const char *path, const char *user,
+                      char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a MKDIR (with mode) request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the dir to create
+ * @param mode Mode of MKDIR
+ * @param user User name
+ * @param url Holding the generated URL for MKDIR request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForMKDIRwithMode(const char *host, int nnPort, const char *path,
+                              int mode, const char *user,
+                              char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a RENAME request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param srcpath Source path
+ * @param dstpath Destination path
+ * @param user User name
+ * @param url Holding the generated URL for RENAME request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForRENAME(const char *host, int nnPort, const char *srcpath,
+                       const char *dstpath, const char *user,
+                       char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a CHMOD request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Target path
+ * @param mode New mode for the file
+ * @param user User name
+ * @param url Holding the generated URL for CHMOD request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForCHMOD(const char *host, int nnPort, const char *path,
+                      int mode, const char *user,
+                      char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a GETFILESTATUS request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the target file
+ * @param user User name
+ * @param url Holding the generated URL for GETFILESTATUS request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForGetFileStatus(const char *host, int nnPort,
+                              const char *path, const char *user,
+                              char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a LISTSTATUS request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the directory for listing
+ * @param user User name
+ * @param url Holding the generated URL for LISTSTATUS request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForLS(const char *host, int nnPort,
+                   const char *path, const char *user,
+                   char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a DELETE request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the file to be deletected
+ * @param recursive Whether or not to delete in a recursive way
+ * @param user User name
+ * @param url Holding the generated URL for DELETE request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForDELETE(const char *host, int nnPort, const char *path,
+                       int recursive, const char *user,
+                       char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a CHOWN request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the target
+ * @param owner New owner
+ * @param group New group
+ * @param user User name
+ * @param url Holding the generated URL for CHOWN request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForCHOWN(const char *host, int nnPort, const char *path,
+                      const char *owner, const char *group, const char *user,
+                      char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a OPEN/READ request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the file to read
+ * @param user User name
+ * @param offset Offset for reading (the start position for this read)
+ * @param length Length of the file to read
+ * @param url Holding the generated URL for OPEN/READ request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForOPEN(const char *host, int nnPort, const char *path,
+                     const char *user, size_t offset, size_t length,
+                     char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a UTIMES (update time) request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the file for updating time
+ * @param mTime Modified time to set
+ * @param aTime Access time to set
+ * @param user User name
+ * @param url Holding the generated URL for UTIMES request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForUTIMES(const char *host, int nnPort, const char *path,
+                       long unsigned mTime, long unsigned aTime,
+                       const char *user,
+                       char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a WRITE/CREATE request (sent to NameNode)
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the dir to create
+ * @param user User name
+ * @param replication Number of replication of the file
+ * @param blockSize Size of the block for the file
+ * @param url Holding the generated URL for WRITE request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForNnWRITE(const char *host, int nnPort, const char *path,
+                        const char *user, int16_t replication, size_t blockSize,
+                        char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for an APPEND request (sent to NameNode)
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the file for appending
+ * @param user User name
+ * @param url Holding the generated URL for APPEND request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForNnAPPEND(const char *host, int nnPort,
+                         const char *path, const char *user,
+                         char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a SETREPLICATION request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the target file
+ * @param replication New replication number
+ * @param user User name
+ * @param url Holding the generated URL for SETREPLICATION request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForSETREPLICATION(const char *host, int nnPort, const char *path,
+                               int16_t replication, const char *user,
+                               char **url) __attribute__ ((warn_unused_result));
+
+/**
+ * Create the URL for a GET_BLOCK_LOCATIONS request
+ *
+ * @param host The hostname of the NameNode
+ * @param nnPort Port of the NameNode
+ * @param path Path of the target file
+ * @param offset The offset in the file
+ * @param length Length of the file content
+ * @param user User name
+ * @param url Holding the generated URL for GET_BLOCK_LOCATIONS request
+ * @return 0 on success and non-zero value on errors
+ */
+int createUrlForGetBlockLocations(const char *host, int nnPort,
+                            const char *path, size_t offset,
+                            size_t length, const char *user,
+                            char **url) __attribute__ ((warn_unused_result));
 
 
 
 
 #endif  //_HDFS_HTTP_QUERY_H_
 #endif  //_HDFS_HTTP_QUERY_H_

+ 478 - 267
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c

@@ -25,6 +25,11 @@
 #include <ctype.h>
 #include <ctype.h>
 #include <jansson.h>
 #include <jansson.h>
 
 
+static const char * const temporaryRedirectCode = "307 TEMPORARY_REDIRECT";
+static const char * const twoHundredOKCode = "200 OK";
+static const char * const twoHundredOneCreatedCode = "201 Created";
+static const char * const httpHeaderString = "HTTP/1.1";
+
 /**
 /**
  * Exception information after calling JSON operations
  * Exception information after calling JSON operations
  */
  */
@@ -34,9 +39,6 @@ struct jsonException {
   const char *message;
   const char *message;
 };
 };
 
 
-static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
-                           int *numEntries, const char *operation);
-
 static void dotsToSlashes(char *str)
 static void dotsToSlashes(char *str)
 {
 {
     for (; *str != '\0'; str++) {
     for (; *str != '\0'; str++) {
@@ -45,8 +47,9 @@ static void dotsToSlashes(char *str)
     }
     }
 }
 }
 
 
-int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
-                        const char *fmt, va_list ap)
+/** Print out the JSON exception information */
+static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
+                               const char *fmt, va_list ap)
 {
 {
     char *javaClassName = NULL;
     char *javaClassName = NULL;
     int excErrno = EINTERNAL, shouldPrint = 0;
     int excErrno = EINTERNAL, shouldPrint = 0;
@@ -74,11 +77,23 @@ int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
     return excErrno;
     return excErrno;
 }
 }
 
 
-int printJsonException(struct jsonException *exc, int noPrintFlags,
-                       const char *fmt, ...)
+/**
+ * Print out JSON exception information.
+ *
+ * @param exc             The exception information to print and free
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+static int printJsonException(struct jsonException *exc, int noPrintFlags,
+                              const char *fmt, ...)
 {
 {
     va_list ap;
     va_list ap;
-    int ret;
+    int ret = 0;
     
     
     va_start(ap, fmt);
     va_start(ap, fmt);
     ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
     ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
@@ -86,81 +101,20 @@ int printJsonException(struct jsonException *exc, int noPrintFlags,
     return ret;
     return ret;
 }
 }
 
 
-static hdfsFileInfo *json_parse_array(json_t *jobj, char *key, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
-    int arraylen = json_array_size(jobj);                      //Getting the length of the array
-    *numEntries = arraylen;
-    if (!key) {
-        return NULL;
-    }
-    if(arraylen > 0) {
-        fileStat = (hdfsFileInfo *)realloc(fileStat,sizeof(hdfsFileInfo)*arraylen);
-    }
-    json_t *jvalue;
-    int i;
-    for (i=0; i< arraylen; i++) {
-        jvalue = json_array_get(jobj, i);            //Getting the array element at position i
-        if (json_is_array(jvalue)) {                 // array within an array - program should never come here for now
-            json_parse_array(jvalue, NULL, &fileStat[i], numEntries, operation);
-        }
-        else if (json_is_object(jvalue)) {           // program will definitely come over here
-            parseJsonGFS(jvalue, &fileStat[i], numEntries, operation);
-        }
-        else {
-            return NULL;                               // program will never come over here for now
-        }
-    }
-    *numEntries = arraylen;
-    return fileStat;
-}
-
-int parseBoolean(char *response) {
-    json_t *root;
-    json_error_t error;
-    size_t flags = 0;
-    int result = 0;
-    const char *key;
-    json_t *value;
-    root = json_loads(response, flags, &error);
-    void *iter = json_object_iter(root);
-    while(iter)  {
-        key = json_object_iter_key(iter);
-        value = json_object_iter_value(iter);
-        switch (json_typeof(value))  {
-            case JSON_TRUE:
-                result = 1;
-                break;
-            default:
-                result = 0;
-                break;
-        }
-        iter = json_object_iter_next(root, iter);
-    }
-    return result;
-}
-
-int parseMKDIR(char *response) {
-    return (parseBoolean(response));
-}
-
-int parseRENAME(char *response) {
-    return (parseBoolean(response));
-}
-
-int parseDELETE(char *response) {
-    return (parseBoolean(response));
-}
-
-struct jsonException *parseJsonException(json_t *jobj) {
-    const char *key;
-    json_t *value;
+/** Parse the exception information from JSON */
+static struct jsonException *parseJsonException(json_t *jobj)
+{
+    const char *key = NULL;
+    json_t *value = NULL;
     struct jsonException *exception = NULL;
     struct jsonException *exception = NULL;
+    void *iter = NULL;
     
     
     exception = calloc(1, sizeof(*exception));
     exception = calloc(1, sizeof(*exception));
     if (!exception) {
     if (!exception) {
         return NULL;
         return NULL;
     }
     }
     
     
-    void *iter = json_object_iter(jobj);
+    iter = json_object_iter(jobj);
     while (iter) {
     while (iter) {
         key = json_object_iter_key(iter);
         key = json_object_iter_key(iter);
         value = json_object_iter_value(iter);
         value = json_object_iter_value(iter);
@@ -175,23 +129,31 @@ struct jsonException *parseJsonException(json_t *jobj) {
         
         
         iter = json_object_iter_next(jobj, iter);
         iter = json_object_iter_next(jobj, iter);
     }
     }
-    
     return exception;
     return exception;
 }
 }
 
 
-struct jsonException *parseException(const char *content) {
-    if (!content) {
-        return NULL;
-    }
-    
+/** 
+ * Parse the exception information which is presented in JSON
+ * 
+ * @param content   Exception information in JSON
+ * @return          jsonException for printing out
+ */
+static struct jsonException *parseException(const char *content)
+{
     json_error_t error;
     json_error_t error;
     size_t flags = 0;
     size_t flags = 0;
-    const char *key;
+    const char *key = NULL;
     json_t *value;
     json_t *value;
-    json_t *jobj = json_loads(content, flags, &error);
+    json_t *jobj;
+    struct jsonException *exception = NULL;
     
     
+    if (!content) {
+        return NULL;
+    }
+    jobj = json_loads(content, flags, &error);
     if (!jobj) {
     if (!jobj) {
-        fprintf(stderr, "JSon parsing failed\n");
+        fprintf(stderr, "JSon parsing error: on line %d: %s\n",
+                error.line, error.text);
         return NULL;
         return NULL;
     }
     }
     void *iter = json_object_iter(jobj);
     void *iter = json_object_iter(jobj);
@@ -199,254 +161,503 @@ struct jsonException *parseException(const char *content) {
         key = json_object_iter_key(iter);
         key = json_object_iter_key(iter);
         value = json_object_iter_value(iter);
         value = json_object_iter_value(iter);
         
         
-        if (!strcmp(key, "RemoteException") && json_typeof(value) == JSON_OBJECT) {
-            return parseJsonException(value);
+        if (!strcmp(key, "RemoteException") &&
+                    json_typeof(value) == JSON_OBJECT) {
+            exception = parseJsonException(value);
+            break;
         }
         }
         iter = json_object_iter_next(jobj, iter);
         iter = json_object_iter_next(jobj, iter);
     }
     }
-    return NULL;
+    
+    json_decref(jobj);
+    return exception;
 }
 }
 
 
-static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
-                                  int *numEntries, const char *operation)
+/**
+ * Parse the response information which uses TRUE/FALSE 
+ * to indicate whether the operation succeeded
+ *
+ * @param response  Response information
+ * @return          0 to indicate success
+ */
+static int parseBoolean(const char *response)
 {
 {
-    const char *tempstr;
-    const char *key;
-    json_t *value;
-    void *iter = json_object_iter(jobj);
-    while(iter)  {
-        key = json_object_iter_key(iter);
-        value = json_object_iter_value(iter);
-        
-        switch (json_typeof(value)) {
-            case JSON_INTEGER:
-                if(!strcmp(key,"accessTime")) {
-                    fileStat->mLastAccess = (time_t)(json_integer_value(value)/1000);
-                } else if (!strcmp(key,"blockSize")) {
-                    fileStat->mBlockSize = (tOffset)json_integer_value(value);
-                } else if (!strcmp(key,"length")) {
-                    fileStat->mSize = (tOffset)json_integer_value(value);
-                } else if(!strcmp(key,"modificationTime")) {
-                    fileStat->mLastMod = (time_t)(json_integer_value(value)/1000);
-                } else if (!strcmp(key,"replication")) {
-                    fileStat->mReplication = (short)json_integer_value(value);
-                }
-                break;
-                
-            case JSON_STRING:
-                if(!strcmp(key,"group")) {
-                    fileStat->mGroup=(char *)json_string_value(value);
-                } else if (!strcmp(key,"owner")) {
-                    fileStat->mOwner=(char *)json_string_value(value);
-                } else if (!strcmp(key,"pathSuffix")) {
-                    fileStat->mName=(char *)json_string_value(value);
-                } else if (!strcmp(key,"permission")) {
-                    tempstr=(char *)json_string_value(value);
-                    fileStat->mPermissions = (short)strtol(tempstr,(char **)NULL,8);
-                } else if (!strcmp(key,"type")) {
-                    char *cvalue = (char *)json_string_value(value);
-                    if (!strcmp(cvalue, "DIRECTORY")) {
-                        fileStat->mKind = kObjectKindDirectory;
-                    } else {
-                        fileStat->mKind = kObjectKindFile;
-                    }
-                }
-                break;
-                
-            case JSON_OBJECT:
-                if(!strcmp(key,"FileStatus")) {
-                    parseJsonGFS(value, fileStat, numEntries, operation);
-                } else if (!strcmp(key,"FileStatuses")) {
-                    fileStat = parseJsonGFS(value, &fileStat[0], numEntries, operation);
-                } else if (!strcmp(key,"RemoteException")) {
-                    //Besides returning NULL, we also need to print the exception information
-                    struct jsonException *exception = parseJsonException(value);
-                    if (exception) {
-                        errno = printJsonException(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
-                    }
-                    
-                    if(fileStat != NULL) {
-                        free(fileStat);
-                        fileStat = NULL;
-                    }
-                }
-                break;
-                
-            case JSON_ARRAY:
-                if (!strcmp(key,"FileStatus")) {
-                    fileStat = json_parse_array(value,(char *) key,fileStat,numEntries, operation);
-                }
-                break;
-                
-            default:
-                if(fileStat != NULL) {
-                    free(fileStat);
-                    fileStat = NULL;
-                }
-        }
-        iter = json_object_iter_next(jobj, iter);
+    json_t *root, *value;
+    json_error_t error;
+    size_t flags = 0;
+    int result = 0;
+    
+    root = json_loads(response, flags, &error);
+    if (!root) {
+        fprintf(stderr, "JSon parsing error: on line %d: %s\n",
+                error.line, error.text);
+        return EIO;
+    }
+    void *iter = json_object_iter(root);
+    value = json_object_iter_value(iter);
+    if (json_typeof(value) == JSON_TRUE)  {
+        result = 0;
+    } else {
+        result = EIO;  // FALSE means error in remote NN/DN
     }
     }
-    return fileStat;
+    json_decref(root);
+    return result;
 }
 }
 
 
+int parseMKDIR(const char *response)
+{
+    return parseBoolean(response);
+}
+
+int parseRENAME(const char *response)
+{
+    return parseBoolean(response);
+}
+
+int parseDELETE(const char *response)
+{
+    return parseBoolean(response);
+}
 
 
-int checkHeader(char *header, const char *content, const char *operation) {
+int parseSETREPLICATION(const char *response)
+{
+    return parseBoolean(response);
+}
+
+/**
+ * Check the header of response to see if it's 200 OK
+ * 
+ * @param header    Header information for checking
+ * @param content   Stores exception information if there are errors
+ * @param operation Indicate the operation for exception printing
+ * @return 0 for success
+ */
+static int checkHeader(const char *header, const char *content,
+                       const char *operation)
+{
     char *result = NULL;
     char *result = NULL;
-    char delims[] = ":";
-    char *responseCode= "200 OK";
-    if(header == '\0' || strncmp(header, "HTTP/", strlen("HTTP/"))) {
-        return 0;
+    const char delims[] = ":";
+    char *savepter;
+    int ret = 0;
+    
+    if (!header || strncmp(header, "HTTP/", strlen("HTTP/"))) {
+        return EINVAL;
     }
     }
-    if(!(strstr(header, responseCode)) || !(header = strstr(header, "Content-Length"))) {
+    if (!(strstr(header, twoHundredOKCode)) ||
+       !(result = strstr(header, "Content-Length"))) {
         struct jsonException *exc = parseException(content);
         struct jsonException *exc = parseException(content);
         if (exc) {
         if (exc) {
-            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            ret = printJsonException(exc, PRINT_EXC_ALL,
+                                       "Calling WEBHDFS (%s)", operation);
+        } else {
+            ret = EIO;
         }
         }
-        return 0;
+        return ret;
     }
     }
-    result = strtok(header, delims);
-    result = strtok(NULL,delims);
+    result = strtok_r(result, delims, &savepter);
+    result = strtok_r(NULL, delims, &savepter);
     while (isspace(*result)) {
     while (isspace(*result)) {
         result++;
         result++;
     }
     }
-    if(strcmp(result,"0")) {                 //Content-Length should be equal to 0
-        return 1;
-    } else {
-        return 0;
+    // Content-Length should be equal to 0,
+    // and the string should be "0\r\nServer"
+    if (strncmp(result, "0\r\n", 3)) {
+        ret = EIO;
     }
     }
+    return ret;
 }
 }
 
 
-int parseOPEN(const char *header, const char *content) {
-    const char *responseCode1 = "307 TEMPORARY_REDIRECT";
-    const char *responseCode2 = "200 OK";
-    if(header == '\0' || strncmp(header,"HTTP/",strlen("HTTP/"))) {
-        return -1;
-    }
-    if(!(strstr(header,responseCode1) && strstr(header, responseCode2))) {
-        struct jsonException *exc = parseException(content);
-        if (exc) {
-            //if the exception is an IOException and it is because the offset is out of the range
-            //do not print out the exception
-            if (!strcasecmp(exc->exception, "IOException") && strstr(exc->message, "out of the range")) {
-                return 0;
-            }
-            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
-        }
-        return -1;
-    }
-    
-    return 1;
-}
-
-int parseCHMOD(char *header, const char *content) {
+int parseCHMOD(const char *header, const char *content)
+{
     return checkHeader(header, content, "CHMOD");
     return checkHeader(header, content, "CHMOD");
 }
 }
 
 
-
-int parseCHOWN(char *header, const char *content) {
+int parseCHOWN(const char *header, const char *content)
+{
     return checkHeader(header, content, "CHOWN");
     return checkHeader(header, content, "CHOWN");
 }
 }
 
 
-int parseUTIMES(char *header, const char *content) {
-    return checkHeader(header, content, "UTIMES");
+int parseUTIMES(const char *header, const char *content)
+{
+    return checkHeader(header, content, "SETTIMES");
 }
 }
 
 
-
-int checkIfRedirect(const char *const headerstr, const char *content, const char *operation) {
-    char *responseCode = "307 TEMPORARY_REDIRECT";
-    char * locTag = "Location";
-    char * tempHeader;
-    if(headerstr == '\0' || strncmp(headerstr,"HTTP/", 5)) {
-        return 0;
-    }
-    if(!(tempHeader = strstr(headerstr,responseCode))) {
-        //process possible exception information
+/**
+ * Check if the header contains correct information
+ * ("307 TEMPORARY_REDIRECT" and "Location")
+ * 
+ * @param header    Header for parsing
+ * @param content   Contains exception information 
+ *                  if the remote operation failed
+ * @param operation Specify the remote operation when printing out exception
+ * @return 0 for success
+ */
+static int checkRedirect(const char *header,
+                         const char *content, const char *operation)
+{
+    const char *locTag = "Location";
+    int ret = 0, offset = 0;
+    
+    // The header must start with "HTTP/1.1"
+    if (!header || strncmp(header, httpHeaderString,
+                           strlen(httpHeaderString))) {
+        return EINVAL;
+    }
+    
+    offset += strlen(httpHeaderString);
+    while (isspace(header[offset])) {
+        offset++;
+    }
+    // Looking for "307 TEMPORARY_REDIRECT" in header
+    if (strncmp(header + offset, temporaryRedirectCode,
+                strlen(temporaryRedirectCode))) {
+        // Process possible exception information
         struct jsonException *exc = parseException(content);
         struct jsonException *exc = parseException(content);
         if (exc) {
         if (exc) {
-            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            ret = printJsonException(exc, PRINT_EXC_ALL,
+                                     "Calling WEBHDFS (%s)", operation);
+        } else {
+            ret = EIO;
         }
         }
-        return 0;
+        return ret;
     }
     }
-    if(!(strstr(tempHeader,locTag))) {
-        return 0;
+    // Here we just simply check if header contains "Location" tag,
+    // detailed processing is in parseDnLoc
+    if (!(strstr(header, locTag))) {
+        ret = EIO;
     }
     }
-    return 1;
+    return ret;
 }
 }
 
 
-
-int parseNnWRITE(const char *header, const char *content) {
-    return checkIfRedirect(header, content, "Write(NameNode)");
+int parseNnWRITE(const char *header, const char *content)
+{
+    return checkRedirect(header, content, "Write(NameNode)");
 }
 }
 
 
+int parseNnAPPEND(const char *header, const char *content)
+{
+    return checkRedirect(header, content, "Append(NameNode)");
+}
 
 
-int parseNnAPPEND(const char *header, const char *content) {
-    return checkIfRedirect(header, content, "Append(NameNode)");
+/** 0 for success , -1 for out of range, other values for error */
+int parseOPEN(const char *header, const char *content)
+{
+    int ret = 0, offset = 0;
+    
+    if (!header || strncmp(header, httpHeaderString,
+                           strlen(httpHeaderString))) {
+        return EINVAL;
+    }
+    
+    offset += strlen(httpHeaderString);
+    while (isspace(header[offset])) {
+        offset++;
+    }
+    if (strncmp(header + offset, temporaryRedirectCode,
+                strlen(temporaryRedirectCode)) ||
+        !strstr(header, twoHundredOKCode)) {
+        struct jsonException *exc = parseException(content);
+        if (exc) {
+            // If the exception is an IOException and it is because
+            // the offset is out of the range, do not print out the exception
+            if (!strcasecmp(exc->exception, "IOException") &&
+                    strstr(exc->message, "out of the range")) {
+                ret = -1;
+            } else {
+                ret = printJsonException(exc, PRINT_EXC_ALL,
+                                       "Calling WEBHDFS (OPEN)");
+            }
+        } else {
+            ret = EIO;
+        }
+    }
+    return ret;
 }
 }
 
 
-char *parseDnLoc(char *content) {
-    char delims[] = "\r\n";
-    char *url = NULL;
-    char *DnLocation = NULL;
-    char *savepter;
-    DnLocation = strtok_r(content, delims, &savepter);
-    while (DnLocation && strncmp(DnLocation, "Location:", strlen("Location:"))) {
-        DnLocation = strtok_r(NULL, delims, &savepter);
+int parseDnLoc(char *content, char **dn)
+{
+    char *url = NULL, *dnLocation = NULL, *savepter, *tempContent;
+    const char *prefix = "Location: http://";
+    const char *prefixToRemove = "Location: ";
+    const char *delims = "\r\n";
+    
+    tempContent = strdup(content);
+    if (!tempContent) {
+        return ENOMEM;
     }
     }
-    if (!DnLocation) {
-        return NULL;
+    
+    dnLocation = strtok_r(tempContent, delims, &savepter);
+    while (dnLocation && strncmp(dnLocation, "Location:",
+                                 strlen("Location:"))) {
+        dnLocation = strtok_r(NULL, delims, &savepter);
     }
     }
-    DnLocation = strstr(DnLocation, "http");
-    if (!DnLocation) {
-        return NULL;
+    if (!dnLocation) {
+        return EIO;
     }
     }
-    url = malloc(strlen(DnLocation) + 1);
+    
+    while (isspace(*dnLocation)) {
+        dnLocation++;
+    }
+    if (strncmp(dnLocation, prefix, strlen(prefix))) {
+        return EIO;
+    }
+    url = strdup(dnLocation + strlen(prefixToRemove));
     if (!url) {
     if (!url) {
-        return NULL;
+        return ENOMEM;
     }
     }
-    strcpy(url, DnLocation);
-    return url;
+    *dn = url;
+    return 0;
 }
 }
 
 
-int parseDnWRITE(const char *header, const char *content) {
-    char *responseCode = "201 Created";
-    fprintf(stderr, "\nheaderstr is: %s\n", header);
-    if(header == '\0' || strncmp(header,"HTTP/",strlen("HTTP/"))) {
-        return 0;
+int parseDnWRITE(const char *header, const char *content)
+{
+    int ret = 0;
+    if (header == NULL || header[0] == '\0' ||
+                         strncmp(header, "HTTP/", strlen("HTTP/"))) {
+        return EINVAL;
     }
     }
-    if(!(strstr(header,responseCode))) {
+    if (!(strstr(header, twoHundredOneCreatedCode))) {
         struct jsonException *exc = parseException(content);
         struct jsonException *exc = parseException(content);
         if (exc) {
         if (exc) {
-            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
+            ret = printJsonException(exc, PRINT_EXC_ALL,
+                                     "Calling WEBHDFS (WRITE(DataNode))");
+        } else {
+            ret = EIO;
         }
         }
-        return 0;
     }
     }
-    return 1;
+    return ret;
 }
 }
 
 
-int parseDnAPPEND(const char *header, const char *content) {
-    char *responseCode = "200 OK";
-    if(header == '\0' || strncmp(header, "HTTP/", strlen("HTTP/"))) {
-        return 0;
+int parseDnAPPEND(const char *header, const char *content)
+{
+    int ret = 0;
+    
+    if (header == NULL || header[0] == '\0' ||
+                         strncmp(header, "HTTP/", strlen("HTTP/"))) {
+        return EINVAL;
     }
     }
-    if(!(strstr(header, responseCode))) {
+    if (!(strstr(header, twoHundredOKCode))) {
         struct jsonException *exc = parseException(content);
         struct jsonException *exc = parseException(content);
         if (exc) {
         if (exc) {
-            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
+            ret = printJsonException(exc, PRINT_EXC_ALL,
+                                     "Calling WEBHDFS (APPEND(DataNode))");
+        } else {
+            ret = EIO;
+        }
+    }
+    return ret;
+}
+
+/**
+ * Retrieve file status from the JSON object 
+ *
+ * @param jobj          JSON object for parsing, which contains 
+ *                      file status information
+ * @param fileStat      hdfsFileInfo handle to hold file status information
+ * @return 0 on success
+ */
+static int parseJsonForFileStatus(json_t *jobj, hdfsFileInfo *fileStat)
+{
+    const char *key, *tempstr;
+    json_t *value;
+    void *iter = NULL;
+    
+    iter = json_object_iter(jobj);
+    while (iter) {
+        key = json_object_iter_key(iter);
+        value = json_object_iter_value(iter);
+        
+        if (!strcmp(key, "accessTime")) {
+            // json field contains time in milliseconds,
+            // hdfsFileInfo is counted in seconds
+            fileStat->mLastAccess = json_integer_value(value) / 1000;
+        } else if (!strcmp(key, "blockSize")) {
+            fileStat->mBlockSize = json_integer_value(value);
+        } else if (!strcmp(key, "length")) {
+            fileStat->mSize = json_integer_value(value);
+        } else if (!strcmp(key, "modificationTime")) {
+            fileStat->mLastMod = json_integer_value(value) / 1000;
+        } else if (!strcmp(key, "replication")) {
+            fileStat->mReplication = json_integer_value(value);
+        } else if (!strcmp(key, "group")) {
+            fileStat->mGroup = strdup(json_string_value(value));
+            if (!fileStat->mGroup) {
+                return ENOMEM;
+            }
+        } else if (!strcmp(key, "owner")) {
+            fileStat->mOwner = strdup(json_string_value(value));
+            if (!fileStat->mOwner) {
+                return ENOMEM;
+            }
+        } else if (!strcmp(key, "pathSuffix")) {
+            fileStat->mName = strdup(json_string_value(value));
+            if (!fileStat->mName) {
+                return ENOMEM;
+            }
+        } else if (!strcmp(key, "permission")) {
+            tempstr = json_string_value(value);
+            fileStat->mPermissions = (short) strtol(tempstr, NULL, 8);
+        } else if (!strcmp(key, "type")) {
+            tempstr = json_string_value(value);
+            if (!strcmp(tempstr, "DIRECTORY")) {
+                fileStat->mKind = kObjectKindDirectory;
+            } else {
+                fileStat->mKind = kObjectKindFile;
+            }
         }
         }
-        return 0;
+        // Go to the next key-value pair in the json object
+        iter = json_object_iter_next(jobj, iter);
     }
     }
-    return 1;
+    return 0;
 }
 }
 
 
-hdfsFileInfo *parseGFS(char *str, hdfsFileInfo *fileStat, int *numEntries) {
+int parseGFS(const char *response, hdfsFileInfo *fileStat, int printError)
+{
+    int ret = 0, printFlag;
     json_error_t error;
     json_error_t error;
     size_t flags = 0;
     size_t flags = 0;
-    json_t *jobj = json_loads(str, flags, &error);
-    fileStat = parseJsonGFS(jobj, fileStat, numEntries, "GETPATHSTATUS/LISTSTATUS");
-    return fileStat;
+    json_t *jobj, *value;
+    const char *key;
+    void *iter = NULL;
+    
+    if (!response || !fileStat) {
+        return EIO;
+    }
+    jobj = json_loads(response, flags, &error);
+    if (!jobj) {
+        fprintf(stderr, "error while parsing json: on line %d: %s\n",
+                error.line, error.text);
+        return EIO;
+    }
+    iter = json_object_iter(jobj);
+    key = json_object_iter_key(iter);
+    value = json_object_iter_value(iter);
+    if (json_typeof(value) == JSON_OBJECT) {
+        if (!strcmp(key, "RemoteException")) {
+            struct jsonException *exception = parseJsonException(value);
+            if (exception) {
+                if (printError) {
+                    printFlag = PRINT_EXC_ALL;
+                } else {
+                    printFlag = NOPRINT_EXC_FILE_NOT_FOUND |
+                                NOPRINT_EXC_ACCESS_CONTROL |
+                                NOPRINT_EXC_PARENT_NOT_DIRECTORY;
+                }
+                ret = printJsonException(exception, printFlag,
+                                         "Calling WEBHDFS GETFILESTATUS");
+            } else {
+                ret = EIO;
+            }
+        } else if (!strcmp(key, "FileStatus")) {
+            ret = parseJsonForFileStatus(value, fileStat);
+        } else {
+            ret = EIO;
+        }
+        
+    } else {
+        ret = EIO;
+    }
+    
+    json_decref(jobj);
+    return ret;
 }
 }
 
 
-int parseSETREPLICATION(char *response) {
-    return (parseBoolean(response));
+/**
+ * Parse the JSON array. Called to parse the result of 
+ * the LISTSTATUS operation. Thus each element of the JSON array is 
+ * a JSON object with the information of a file entry contained 
+ * in the folder.
+ *
+ * @param jobj          The JSON array to be parsed
+ * @param fileStat      The hdfsFileInfo handle used to 
+ *                      store a group of file information
+ * @param numEntries    Capture the number of files in the folder
+ * @return              0 for success
+ */
+static int parseJsonArrayForFileStatuses(json_t *jobj, hdfsFileInfo **fileStat,
+                                         int *numEntries)
+{
+    json_t *jvalue = NULL;
+    int i = 0, ret = 0, arraylen = 0;
+    hdfsFileInfo *fileInfo = NULL;
+    
+    arraylen = (int) json_array_size(jobj);
+    if (arraylen > 0) {
+        fileInfo = calloc(arraylen, sizeof(hdfsFileInfo));
+        if (!fileInfo) {
+            return ENOMEM;
+        }
+    }
+    for (i = 0; i < arraylen; i++) {
+        //Getting the array element at position i
+        jvalue = json_array_get(jobj, i);
+        if (json_is_object(jvalue)) {
+            ret = parseJsonForFileStatus(jvalue, &fileInfo[i]);
+            if (ret) {
+                goto done;
+            }
+        } else {
+            ret = EIO;
+            goto done;
+        }
+    }
+done:
+    if (ret) {
+        free(fileInfo);
+    } else {
+        *numEntries = arraylen;
+        *fileStat = fileInfo;
+    }
+    return ret;
 }
 }
 
 
+int parseLS(const char *response, hdfsFileInfo **fileStats, int *numOfEntries)
+{
+    int ret = 0;
+    json_error_t error;
+    size_t flags = 0;
+    json_t *jobj, *value;
+    const char *key;
+    void *iter = NULL;
+    
+    if (!response || response[0] == '\0' || !fileStats) {
+        return EIO;
+    }
+    jobj = json_loads(response, flags, &error);
+    if (!jobj) {
+        fprintf(stderr, "error while parsing json: on line %d: %s\n",
+                error.line, error.text);
+        return EIO;
+    }
+    
+    iter = json_object_iter(jobj);
+    key = json_object_iter_key(iter);
+    value = json_object_iter_value(iter);
+    if (json_typeof(value) == JSON_OBJECT) {
+        if (!strcmp(key, "RemoteException")) {
+            struct jsonException *exception = parseJsonException(value);
+            if (exception) {
+                ret = printJsonException(exception, PRINT_EXC_ALL,
+                                         "Calling WEBHDFS GETFILESTATUS");
+            } else {
+                ret = EIO;
+            }
+        } else if (!strcmp(key, "FileStatuses")) {
+            iter = json_object_iter(value);
+            value = json_object_iter_value(iter);
+            if (json_is_array(value)) {
+                ret = parseJsonArrayForFileStatuses(value, fileStats,
+                                                    numOfEntries);
+            } else {
+                ret = EIO;
+            }
+        } else {
+            ret = EIO;
+        }
+    } else {
+        ret = EIO;
+    }
+    
+    json_decref(jobj);
+    return ret;
+}

+ 141 - 21
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h

@@ -18,41 +18,161 @@
 #ifndef _HDFS_JSON_PARSER_H_
 #ifndef _HDFS_JSON_PARSER_H_
 #define _HDFS_JSON_PARSER_H_
 #define _HDFS_JSON_PARSER_H_
 
 
-struct jsonException;
+/**
+ * Parse the response for MKDIR request. The response uses TRUE/FALSE 
+ * to indicate whether the operation succeeded.
+ *
+ * @param response  The response information to parse.
+ * @return 0 for success
+ */
+int parseMKDIR(const char *response);
 
 
 /**
 /**
- * Print out JSON exception information.
+ * Parse the response for RENAME request. The response uses TRUE/FALSE
+ * to indicate whether the operation succeeded.
  *
  *
- * @param exc             The exception information to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
+ * @param response  The response information to parse.
+ * @return 0 for success
+ */
+int parseRENAME(const char *response);
+
+/**
+ * Parse the response for DELETE request. The response uses TRUE/FALSE
+ * to indicate whether the operation succeeded.
  *
  *
- * @return                The POSIX error number associated with the exception
- *                        object.
+ * @param response  The response information to parse.
+ * @return 0 for success
  */
  */
-int printJsonException(struct jsonException *exc, int noPrintFlags,
-                       const char *fmt, ...);
+int parseDELETE(const char *response);
 
 
-int parseMKDIR(char *response);
-int parseRENAME(char *response);
-int parseDELETE (char *response);
-int parseSETREPLICATION(char *response);
+/**
+ * Parse the response for SETREPLICATION request. The response uses TRUE/FALSE
+ * to indicate whether the operation succeeded.
+ *
+ * @param response  The response information to parse.
+ * @return 0 for success
+ */
+int parseSETREPLICATION(const char *response);
 
 
+/**
+ * Parse the response for OPEN (read) request. A successful operation 
+ * will return "200 OK".
+ *
+ * @param response  The response information for parsing
+ * @return          0 for success , -1 for out of range, other values for error
+ */
 int parseOPEN(const char *header, const char *content);
 int parseOPEN(const char *header, const char *content);
 
 
+/**
+ * Parse the response for WRITE (from NameNode) request. 
+ * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message 
+ *                  sent from NameNode is stored in content
+ * @return          0 for success
+ */
 int parseNnWRITE(const char *header, const char *content);
 int parseNnWRITE(const char *header, const char *content);
+
+/**
+ * Parse the response for WRITE (from DataNode) request. 
+ * A successful operation should return "201 Created" in its header.
+ * 
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message
+ *                  sent from DataNode is stored in content
+ * @return          0 for success
+ */
 int parseDnWRITE(const char *header, const char *content);
 int parseDnWRITE(const char *header, const char *content);
+
+/**
+ * Parse the response for APPEND (sent from NameNode) request.
+ * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message
+ *                  sent from NameNode is stored in content
+ * @return          0 for success
+ */
 int parseNnAPPEND(const char *header, const char *content);
 int parseNnAPPEND(const char *header, const char *content);
+
+/**
+ * Parse the response for APPEND (from DataNode) request.
+ * A successful operation should return "200 OK" in its header.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message
+ *                  sent from DataNode is stored in content
+ * @return          0 for success
+ */
 int parseDnAPPEND(const char *header, const char *content);
 int parseDnAPPEND(const char *header, const char *content);
 
 
-char* parseDnLoc(char *content);
+/**
+ * Parse the response (from NameNode) to get the location information 
+ * of the DataNode that should be contacted for the following write operation.
+ *
+ * @param content   Content of the http header
+ * @param dn        To store the location of the DataNode for writing
+ * @return          0 for success
+ */
+int parseDnLoc(char *content, char **dn) __attribute__ ((warn_unused_result));
+
+/**
+ * Parse the response for GETFILESTATUS operation.
+ *
+ * @param response      Response to parse. Its detailed format is specified in 
+ *            "http://hadoop.apache.org/docs/stable/webhdfs.html#GETFILESTATUS"
+ * @param fileStat      A hdfsFileInfo handle for holding file information
+ * @param printError    Whether or not print out exception 
+ *                      when file does not exist
+ * @return 0 for success, non-zero value to indicate error
+ */
+int parseGFS(const char *response, hdfsFileInfo *fileStat, int printError);
 
 
-hdfsFileInfo *parseGFS(char *response, hdfsFileInfo *fileStat, int *numEntries);
+/**
+ * Parse the response for LISTSTATUS operation.
+ *
+ * @param response      Response to parse. Its detailed format is specified in
+ *            "http://hadoop.apache.org/docs/r1.0.3/webhdfs.html#LISTSTATUS"
+ * @param fileStats     Pointer pointing to a list of hdfsFileInfo handles 
+ *                      holding file/dir information in the directory
+ * @param numEntries    After parsing, the value of this parameter indicates
+ *                      the number of file entries.
+ * @return 0 for success, non-zero value to indicate error
+ */
+int parseLS(const char *response, hdfsFileInfo **fileStats, int *numOfEntries);
 
 
-int parseCHOWN (char *header, const char *content);
-int parseCHMOD (char *header, const char *content);
-int parseUTIMES(char *header, const char *content);
+/**
+ * Parse the response for CHOWN request.
+ * A successful operation should contains "200 OK" in its header, 
+ * and the Content-Length should be 0.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message is stored in content
+ * @return          0 for success
+ */
+int parseCHOWN(const char *header, const char *content);
+
+/**
+ * Parse the response for CHMOD request.
+ * A successful operation should contains "200 OK" in its header,
+ * and the Content-Length should be 0.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message is stored in content
+ * @return          0 for success
+ */
+int parseCHMOD(const char *header, const char *content);
+
+/**
+ * Parse the response for SETTIMES request.
+ * A successful operation should contains "200 OK" in its header,
+ * and the Content-Length should be 0.
+ *
+ * @param header    The header of the http response
+ * @param content   If failing, the exception message is stored in content
+ * @return          0 for success
+ */
+int parseUTIMES(const char *header, const char *content);
 
 
-#endif //_FUSE_JSON_PARSER_H
+#endif //_HDFS_JSON_PARSER_H_

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 379 - 230
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c


+ 0 - 180
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c

@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "hdfs.h"
-
-#include <errno.h>
-#include <semaphore.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "hdfs_http_client.h"
-#include "hdfs_http_query.h"
-#include "hdfs_json_parser.h"
-#include <unistd.h>
-#include <curl/curl.h>
-
-#define TLH_MAX_THREADS 100
-
-static sem_t *tlhSem;
-
-static const char *nn;
-static const char *user;
-static int port;
-
-static const char *fileName = "/tmp/tlhData";
-
-struct tlhThreadInfo {
-    /** Thread index */
-    int threadIdx;
-    /** 0 = thread was successful; error code otherwise */
-    int success;
-    /** pthread identifier */
-    pthread_t thread;
-};
-
-static int hdfsSingleNameNodeConnect(const char *nn, int port, const char *user, hdfsFS *fs)
-{
-    hdfsFS hdfs;
-    if (port < 0) {
-        fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
-                "returned error %d\n", port);
-        return port;
-    }
-    
-    hdfs = hdfsConnectAsUserNewInstance(nn, port, user);
-    if (!hdfs) {
-        return -errno;
-    }
-    *fs = hdfs;
-    return 0;
-}
-
-static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
-{
-    hdfsFile file;
-    int ret = 0;
-    char buffer[1024 * (ti->threadIdx + 1)];
-    memset(buffer, 'a', sizeof(buffer));
-
-    file = hdfsOpenFile(fs, "/tmp/thread_test.txt", O_WRONLY, 0, 0, 0);
-    sleep(1);
-    hdfsCloseFile(fs, file);
-    return ret;
-}
-
-static void *testHdfsOperations(void *v)
-{
-    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
-    hdfsFS fs = NULL;
-    int ret;
-    
-    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
-            ti->threadIdx);
-    ret = hdfsSingleNameNodeConnect(nn, port, user, &fs);
-    if (ret) {
-        fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
-                "hdfsSingleNameNodeConnect failed with error %d.\n",
-                ti->threadIdx, ret);
-        ti->success = EIO;
-        return NULL;
-    }
-    ti->success = doTestHdfsOperations(ti, fs);
-    if (hdfsDisconnect(fs)) {
-        ret = errno;
-        fprintf(stderr, "hdfsDisconnect error %d\n", ret);
-        ti->success = ret;
-    }
-    return NULL;
-}
-
-static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
-{
-    int i, threadsFailed = 0;
-    const char *sep = "";
-    
-    for (i = 0; i < tlhNumThreads; i++) {
-        if (ti[i].success != 0) {
-            threadsFailed = 1;
-        }
-    }
-    if (!threadsFailed) {
-        fprintf(stderr, "testLibHdfs: all threads succeeded.  SUCCESS.\n");
-        return EXIT_SUCCESS;
-    }
-    fprintf(stderr, "testLibHdfs: some threads failed: [");
-    for (i = 0; i < tlhNumThreads; i++) {
-        if (ti[i].success != 0) {
-            fprintf(stderr, "%s%d", sep, i);
-            sep = ", ";
-        }
-    }
-    fprintf(stderr, "].  FAILURE.\n");
-    return EXIT_FAILURE;
-}
-
-/**
- * Test that we can write a file with libhdfs and then read it back
- */
-int main(int argc, const char *args[])
-{
-    if (argc != 4) {
-        fprintf(stderr, "usage: test_libhdfs_threaded <namenode> <port> <username>");
-        return -1;
-    }
-    
-    nn = args[1];
-    port = atoi(args[2]);
-    user = args[3];
-    
-    int i, tlhNumThreads;
-    const char *tlhNumThreadsStr;
-    struct tlhThreadInfo ti[TLH_MAX_THREADS];
-    
-    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
-    if (!tlhNumThreadsStr) {
-        tlhNumThreadsStr = "3";
-    }
-    tlhNumThreads = atoi(tlhNumThreadsStr);
-    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
-        fprintf(stderr, "testLibHdfs: must have a number of threads "
-                "between 1 and %d inclusive, not %d\n",
-                TLH_MAX_THREADS, tlhNumThreads);
-        return EXIT_FAILURE;
-    }
-    memset(&ti[0], 0, sizeof(ti));
-    for (i = 0; i < tlhNumThreads; i++) {
-        ti[i].threadIdx = i;
-    }
-    
-    tlhSem = sem_open("sem", O_CREAT, 0644, tlhNumThreads);
-    
-    for (i = 0; i < tlhNumThreads; i++) {
-        fprintf(stderr, "\ncreating thread %d\n", i);
-        EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
-                                   testHdfsOperations, &ti[i]));
-    }
-    for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
-    }
-    
-    EXPECT_ZERO(sem_close(tlhSem));
-    return checkFailures(ti, tlhNumThreads);
-}

+ 290 - 243
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c

@@ -17,6 +17,7 @@
  */
  */
 
 
 #include "hdfs.h"
 #include "hdfs.h"
+#include "native_mini_dfs.h"
 
 
 #include <inttypes.h>
 #include <inttypes.h>
 #include <jni.h>
 #include <jni.h>
@@ -26,228 +27,254 @@
 #include <time.h>
 #include <time.h>
 #include <unistd.h>
 #include <unistd.h>
 
 
-void permission_disp(short permissions, char *rtr) {
+static struct NativeMiniDfsCluster *cluster;
+
+void permission_disp(short permissions, char *rtr)
+{
     rtr[9] = '\0';
     rtr[9] = '\0';
     int i;
     int i;
-    for(i=2;i>=0;i--)
+    short perm;
+    for(i = 2; i >= 0; i--)
     {
     {
-        short permissionsId = permissions >> (i * 3) & (short)7;
-        char* perm;
-        switch(permissionsId) {
-            case 7:
-                perm = "rwx"; break;
-            case 6:
-                perm = "rw-"; break;
-            case 5:
-                perm = "r-x"; break;
-            case 4:
-                perm = "r--"; break;
-            case 3:
-                perm = "-wx"; break;
-            case 2:
-                perm = "-w-"; break;
-            case 1:
-                perm = "--x"; break;
-            case 0:
-                perm = "---"; break;
-            default:
-                perm = "???";
-        }
-        strncpy(rtr, perm, 3);
-        rtr+=3;
+        perm = permissions >> (i * 3);
+        rtr[0] = perm & 4 ? 'r' : '-';
+        rtr[1] = perm & 2 ? 'w' : '-';
+        rtr[2] = perm & 1 ? 'x' : '-';
+        rtr += 3;
     }
     }
 }
 }
 
 
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
+    char buffer[32];
+    tSize num_written_bytes;
+    const char* slashTmp = "/tmp";
+    int nnPort;
+    char *rwTemplate, *rwTemplate2, *newDirTemplate,
+    *appendTemplate, *userTemplate, *rwPath = NULL;
+    const char* fileContents = "Hello, World!";
+    const char* nnHost = NULL;
+    
     if (argc != 2) {
     if (argc != 2) {
         fprintf(stderr, "usage: test_libwebhdfs_ops <username>\n");
         fprintf(stderr, "usage: test_libwebhdfs_ops <username>\n");
-        return -1;
+        exit(1);
     }
     }
     
     
-    char buffer[32];
-    tSize num_written_bytes;
+    struct NativeMiniDfsConf conf = {
+        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+    };
+    cluster = nmdCreate(&conf);
+    if (!cluster) {
+        fprintf(stderr, "Failed to create the NativeMiniDfsCluster.\n");
+        exit(1);
+    }
+    if (nmdWaitClusterUp(cluster)) {
+        fprintf(stderr, "Error when waiting for cluster to be ready.\n");
+        exit(1);
+    }
+    if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+        fprintf(stderr, "Error when retrieving namenode host address.\n");
+        exit(1);
+    }
     
     
-    hdfsFS fs = hdfsConnectAsUserNewInstance("default", 50070, argv[1]);
+    hdfsFS fs = hdfsConnectAsUserNewInstance(nnHost, nnPort, argv[1]);
     if(!fs) {
     if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
         exit(-1);
     }
     }
     
     
-    const char* writePath = "/tmp/testfile.txt";
-    const char* fileContents = "Hello, World!";
-    
     {
     {
-        //Write tests
-        
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        // Write tests
+        rwTemplate = strdup("/tmp/helloWorldXXXXXX");
+        if (!rwTemplate) {
+            fprintf(stderr, "Failed to create rwTemplate!\n");
+            exit(1);
+        }
+        rwPath = mktemp(rwTemplate);
+        // hdfsOpenFile
+        hdfsFile writeFile = hdfsOpenFile(fs, rwPath,
+                                          O_WRONLY|O_CREAT, 0, 0, 0);
+
         if(!writeFile) {
         if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for writing!\n", rwPath);
+            exit(1);
         }
         }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
-        num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents) + 1);
+        fprintf(stderr, "Opened %s for writing successfully...\n", rwPath);
+        // hdfsWrite
+        num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents,
+                                      (int) strlen(fileContents) + 1);
         if (num_written_bytes != strlen(fileContents) + 1) {
         if (num_written_bytes != strlen(fileContents) + 1) {
-            fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
-                    (int)(strlen(fileContents) + 1), (int)num_written_bytes);
-            exit(-1);
+            fprintf(stderr, "Failed to write correct number of bytes - "
+                    "expected %d, got %d\n",
+                    (int)(strlen(fileContents) + 1), (int) num_written_bytes);
+            exit(1);
         }
         }
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         
         
+        // hdfsTell
         tOffset currentPos = -1;
         tOffset currentPos = -1;
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
             fprintf(stderr,
             fprintf(stderr,
-                    "Failed to get current file position correctly! Got %lld!\n",
-                    currentPos);
-            exit(-1);
-        }
-        fprintf(stderr, "Current position: %lld\n", currentPos);
-        
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath);
-        
-        if (hdfsHFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
-            exit(-1);
+                    "Failed to get current file position correctly. Got %"
+                    PRId64 "!\n", currentPos);
+            exit(1);
         }
         }
-        fprintf(stderr, "HFlushed %s successfully!\n", writePath);
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
         
         
         hdfsCloseFile(fs, writeFile);
         hdfsCloseFile(fs, writeFile);
+        // Done test write
     }
     }
     
     
+    sleep(1);
+    
     {
     {
         //Read tests
         //Read tests
-        sleep(1);
-        const char* readPath = "/tmp/testfile.txt";
-        int exists = hdfsExists(fs, readPath);
+        int available = 0, exists = 0;
         
         
+        // hdfsExists
+        exists = hdfsExists(fs, rwPath);
         if (exists) {
         if (exists) {
-            fprintf(stderr, "Failed to validate existence of %s\n", readPath);
-            exists = hdfsExists(fs, readPath);
+            fprintf(stderr, "Failed to validate existence of %s\n", rwPath);
+            exists = hdfsExists(fs, rwPath);
             if (exists) {
             if (exists) {
-                fprintf(stderr, "Still failed to validate existence of %s\n", readPath);
-                exit(-1);
+                fprintf(stderr,
+                        "Still failed to validate existence of %s\n", rwPath);
+                exit(1);
             }
             }
         }
         }
         
         
-        hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        hdfsFile readFile = hdfsOpenFile(fs, rwPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
         if (!readFile) {
-            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for reading!\n", rwPath);
+            exit(1);
         }
         }
-        
         if (!hdfsFileIsOpenForRead(readFile)) {
         if (!hdfsFileIsOpenForRead(readFile)) {
             fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
             fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
                     "with O_RDONLY, and it did not show up as 'open for "
                     "with O_RDONLY, and it did not show up as 'open for "
                     "read'\n");
                     "read'\n");
-            exit(-1);
+            exit(1);
         }
         }
         
         
-        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
+        available = hdfsAvailable(fs, readFile);
+        fprintf(stderr, "hdfsAvailable: %d\n", available);
         
         
+        // hdfsSeek, hdfsTell
         tOffset seekPos = 1;
         tOffset seekPos = 1;
         if(hdfsSeek(fs, readFile, seekPos)) {
         if(hdfsSeek(fs, readFile, seekPos)) {
-            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+            exit(1);
         }
         }
         
         
         tOffset currentPos = -1;
         tOffset currentPos = -1;
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
             fprintf(stderr,
             fprintf(stderr,
-                    "Failed to get current file position correctly! Got %lld!\n",
-                    currentPos);
-            exit(-1);
-        }
-        fprintf(stderr, "Current position: %lld\n", currentPos);
-        
-        if (!hdfsFileUsesDirectRead(readFile)) {
-            fprintf(stderr, "Direct read support incorrectly not detected "
-                    "for HDFS filesystem\n");
-            exit(-1);
+                    "Failed to get current file position correctly! Got %"
+                    PRId64 "!\n", currentPos);
+
+            exit(1);
         }
         }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
         
         
-        fprintf(stderr, "Direct read support detected for HDFS\n");
-        
-        // Test the direct read path
         if(hdfsSeek(fs, readFile, 0)) {
         if(hdfsSeek(fs, readFile, 0)) {
-            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+            exit(1);
         }
         }
+        
+        // hdfsRead
         memset(buffer, 0, sizeof(buffer));
         memset(buffer, 0, sizeof(buffer));
-        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
-                                        sizeof(buffer));
+        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
-            fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
+            fprintf(stderr, "Failed to read (direct). "
+                    "Expected %s but got %s (%d bytes)\n",
                     fileContents, buffer, num_read_bytes);
                     fileContents, buffer, num_read_bytes);
-            exit(-1);
+            exit(1);
         }
         }
         fprintf(stderr, "Read following %d bytes:\n%s\n",
         fprintf(stderr, "Read following %d bytes:\n%s\n",
                 num_read_bytes, buffer);
                 num_read_bytes, buffer);
+        
         if (hdfsSeek(fs, readFile, 0L)) {
         if (hdfsSeek(fs, readFile, 0L)) {
             fprintf(stderr, "Failed to seek to file start!\n");
             fprintf(stderr, "Failed to seek to file start!\n");
-            exit(-1);
+            exit(1);
         }
         }
         
         
-        // Disable the direct read path so that we really go through the slow
-        // read path
-        hdfsFileDisableDirectRead(readFile);
-        
-        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
-                                  sizeof(buffer));
-        fprintf(stderr, "Read following %d bytes:\n%s\n",
-                num_read_bytes, buffer);
-        
+        // hdfsPread
         memset(buffer, 0, strlen(fileContents + 1));
         memset(buffer, 0, strlen(fileContents + 1));
-        
-        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer,
-                                   sizeof(buffer));
+        num_read_bytes = hdfsPread(fs, readFile, 0, buffer, sizeof(buffer));
         fprintf(stderr, "Read following %d bytes:\n%s\n",
         fprintf(stderr, "Read following %d bytes:\n%s\n",
                 num_read_bytes, buffer);
                 num_read_bytes, buffer);
         
         
         hdfsCloseFile(fs, readFile);
         hdfsCloseFile(fs, readFile);
+        // Done test read
     }
     }
     
     
     int totalResult = 0;
     int totalResult = 0;
     int result = 0;
     int result = 0;
     {
     {
         //Generic file-system operations
         //Generic file-system operations
+        char *srcPath = rwPath;
+        char buffer[256];
+        const char *resp;
+        rwTemplate2 = strdup("/tmp/helloWorld2XXXXXX");
+        if (!rwTemplate2) {
+            fprintf(stderr, "Failed to create rwTemplate2!\n");
+            exit(1);
+        }
+        char *dstPath = mktemp(rwTemplate2);
+        newDirTemplate = strdup("/tmp/newdirXXXXXX");
+        if (!newDirTemplate) {
+            fprintf(stderr, "Failed to create newDirTemplate!\n");
+            exit(1);
+        }
+        char *newDirectory = mktemp(newDirTemplate);
         
         
-        const char* srcPath = "/tmp/testfile.txt";
-        const char* dstPath = "/tmp/testfile2.txt";
-        const char* copyPath = "/tmp/testfile_copy.txt";
-        const char* movePath = "/tmp/testfile_move.txt";
-        
-        fprintf(stderr, "hdfsCopy: %s\n", ((result = hdfsCopy(fs, srcPath, fs, copyPath)) ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsMove: %s\n", ((result = hdfsMove(fs, copyPath, fs, movePath)) ? "Failed!" : "Success!"));
-        totalResult += result;
-        
-        fprintf(stderr, "hdfsGetDefaultBlockSize: %lld\n", hdfsGetDefaultBlockSize(fs));
-        
-        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, srcPath, dstPath)) ? "Failed!" : "Success!"));
+        // hdfsRename
+        fprintf(stderr, "hdfsRename: %s\n",
+                ((result = hdfsRename(fs, rwPath, dstPath)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        fprintf(stderr, "hdfsRename back: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsRename back: %s\n",
+                ((result = hdfsRename(fs, dstPath, srcPath)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        const char* slashTmp = "/tmp";
-        const char* newDirectory = "/tmp/newdir";
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+        // hdfsCreateDirectory
+        fprintf(stderr, "hdfsCreateDirectory: %s\n",
+                ((result = hdfsCreateDirectory(fs, newDirectory)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+        // hdfsSetReplication
+        fprintf(stderr, "hdfsSetReplication: %s\n",
+                ((result = hdfsSetReplication(fs, srcPath, 1)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        
-        char buffer[256];
-        const char *resp;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+
+        // hdfsGetWorkingDirectory, hdfsSetWorkingDirectory
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+                 buffer : "Failed!"));
         totalResult += (resp ? 0 : 1);
         totalResult += (resp ? 0 : 1);
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+
+        const char* path[] = {"/foo", "/foo/bar", "foobar", "//foo/bar//foobar",
+                              "foo//bar", "foo/bar///", "/", "////"};
+        for (int i = 0; i < 8; i++) {
+            fprintf(stderr, "hdfsSetWorkingDirectory: %s, %s\n",
+                    ((result = hdfsSetWorkingDirectory(fs, path[i])) ?
+                     "Failed!" : "Success!"),
+                    hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)));
+            totalResult += result;
+        }
+
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n",
+                ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+                 buffer : "Failed!"));
         totalResult += (resp ? 0 : 1);
         totalResult += (resp ? 0 : 1);
-        
+
+        // hdfsGetPathInfo
         hdfsFileInfo *fileInfo = NULL;
         hdfsFileInfo *fileInfo = NULL;
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
             fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
             fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
@@ -261,13 +288,15 @@ int main(int argc, char **argv) {
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
             char permissions[10];
             char permissions[10];
             permission_disp(fileInfo->mPermissions, permissions);
             permission_disp(fileInfo->mPermissions, permissions);
-            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
+            fprintf(stderr, "Permissions: %d (%s)\n",
+                    fileInfo->mPermissions, permissions);
             hdfsFreeFileInfo(fileInfo, 1);
             hdfsFreeFileInfo(fileInfo, 1);
         } else {
         } else {
             totalResult++;
             totalResult++;
-            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
+            fprintf(stderr, "hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
         }
         }
         
         
+        // hdfsListDirectory
         hdfsFileInfo *fileList = 0;
         hdfsFileInfo *fileList = 0;
         int numEntries = 0;
         int numEntries = 0;
         if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
         if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
@@ -283,7 +312,8 @@ int main(int argc, char **argv) {
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                 char permissions[10];
                 char permissions[10];
                 permission_disp(fileList[i].mPermissions, permissions);
                 permission_disp(fileList[i].mPermissions, permissions);
-                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
+                fprintf(stderr, "Permissions: %d (%s)\n",
+                        fileList[i].mPermissions, permissions);
             }
             }
             hdfsFreeFileInfo(fileList, numEntries);
             hdfsFreeFileInfo(fileList, numEntries);
         } else {
         } else {
@@ -295,203 +325,220 @@ int main(int argc, char **argv) {
             }
             }
         }
         }
         
         
-        //        char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
-        //        if(hosts) {
-        //            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
-        //            int i=0;
-        //            while(hosts[i]) {
-        //                int j = 0;
-        //                while(hosts[i][j]) {
-        //                    fprintf(stderr,
-        //                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
-        //                    ++j;
-        //                }
-        //                ++i;
-        //            }
-        //        } else {
-        //            totalResult++;
-        //            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
-        //        }
-        
         char *newOwner = "root";
         char *newOwner = "root";
-        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
+        // Setting tmp dir to 777 so later when connectAsUser nobody,
+        // we can write to it
         short newPerm = 0666;
         short newPerm = 0666;
         
         
-        // chown write
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
+        // hdfsChown
+        fprintf(stderr, "hdfsChown: %s\n",
+                ((result = hdfsChown(fs, rwPath, NULL, "users")) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown: %s\n",
+                ((result = hdfsChown(fs, rwPath, newOwner, NULL)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        // chmod write
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
+        // hdfsChmod
+        fprintf(stderr, "hdfsChmod: %s\n",
+                ((result = hdfsChmod(fs, rwPath, newPerm)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        
-        
         sleep(2);
         sleep(2);
         tTime newMtime = time(NULL);
         tTime newMtime = time(NULL);
         tTime newAtime = time(NULL);
         tTime newAtime = time(NULL);
         
         
         // utime write
         // utime write
-        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
-        
+        fprintf(stderr, "hdfsUtime: %s\n",
+                ((result = hdfsUtime(fs, rwPath, newMtime, newAtime)) ?
+                 "Failed!" : "Success!"));        
         totalResult += result;
         totalResult += result;
         
         
         // chown/chmod/utime read
         // chown/chmod/utime read
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, rwPath);
         
         
-        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChown read: %s\n",
+                ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod read: %s\n",
+                ((result = (finfo->mPermissions != newPerm)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
         // will later use /tmp/ as a different user so enable it
         // will later use /tmp/ as a different user so enable it
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsChmod: %s\n",
+                ((result = hdfsChmod(fs, slashTmp, 0777)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
         fprintf(stderr,"newMTime=%ld\n",newMtime);
         fprintf(stderr,"newMTime=%ld\n",newMtime);
         fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
         fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
         
         
         
         
-        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime / 1000)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n",
+                ((result = (finfo->mLastMod != newMtime / 1000)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        hdfsFreeFileInfo(finfo, 1);
-        
         // Clean up
         // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
+        hdfsFreeFileInfo(finfo, 1);
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, newDirectory, 1)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, srcPath, 1)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
-//        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, movePath, 1)) ? "Failed!" : "Success!"));
-//        totalResult += result;
-        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+        fprintf(stderr, "hdfsExists: %s\n",
+                ((result = hdfsExists(fs, newDirectory)) ?
+                 "Success!" : "Failed!"));
         totalResult += (result ? 0 : 1);
         totalResult += (result ? 0 : 1);
+        // Done test generic operations
     }
     }
     
     
     {
     {
-        // TEST APPENDS
-        const char *writePath = "/tmp/appends";
+        // Test Appends
+        appendTemplate = strdup("/tmp/appendsXXXXXX");
+        if (!appendTemplate) {
+            fprintf(stderr, "Failed to create appendTemplate!\n");
+            exit(1);
+        }
+        char *appendPath = mktemp(appendTemplate);
+        const char* helloBuffer = "Hello,";
+        hdfsFile writeFile = NULL;
         
         
-        // CREATE
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
+        // Create
+        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
         if(!writeFile) {
         if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+            exit(1);
         }
         }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
         
         
-        const char* buffer = "Hello,";
-        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+                                      (int) strlen(helloBuffer));
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-        
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath);
-        
         hdfsCloseFile(fs, writeFile);
         hdfsCloseFile(fs, writeFile);
         
         
-        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, writePath, 1)) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsSetReplication: %s\n",
+                ((result = hdfsSetReplication(fs, appendPath, 1)) ?
+                 "Failed!" : "Success!"));
         totalResult += result;
         totalResult += result;
         
         
-        // RE-OPEN
-        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
+        // Re-Open for Append
+        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY | O_APPEND, 0, 0, 0);
         if(!writeFile) {
         if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+            exit(1);
         }
         }
-        fprintf(stderr, "Opened %s for appending successfully...\n", writePath);
+        fprintf(stderr, "Opened %s for appending successfully...\n",
+                appendPath);
         
         
-        buffer = " World";
-        num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+        helloBuffer = " World";
+        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+                                      (int)strlen(helloBuffer) + 1);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         
         
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath);
-        
         hdfsCloseFile(fs, writeFile);
         hdfsCloseFile(fs, writeFile);
 
 
-        // CHECK size
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
-        fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+        // Check size
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, appendPath);
+        fprintf(stderr, "fileinfo->mSize: == total %s\n",
+                ((result = (finfo->mSize == strlen("Hello, World") + 1)) ?
+                 "Success!" : "Failed!"));
         totalResult += (result ? 0 : 1);
         totalResult += (result ? 0 : 1);
         
         
-        // READ and check data
-        hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+        // Read and check data
+        hdfsFile readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
         if (!readFile) {
-            fprintf(stderr, "Failed to open %s for reading!\n", writePath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
+            exit(1);
         }
         }
         
         
-        char rdbuffer[32];
-        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
         fprintf(stderr, "Read following %d bytes:\n%s\n",
         fprintf(stderr, "Read following %d bytes:\n%s\n",
-                num_read_bytes, rdbuffer);
-        
-        fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
-        
+                num_read_bytes, buffer);
+        fprintf(stderr, "read == Hello, World %s\n",
+                (result = (strcmp(buffer, "Hello, World") == 0)) ?
+                "Success!" : "Failed!");
         hdfsCloseFile(fs, readFile);
         hdfsCloseFile(fs, readFile);
         
         
-        // DONE test appends
+        // Cleanup
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, appendPath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        // Done test appends
     }
     }
     
     
-    
     totalResult += (hdfsDisconnect(fs) != 0);
     totalResult += (hdfsDisconnect(fs) != 0);
     
     
     {
     {
         //
         //
         // Now test as connecting as a specific user
         // Now test as connecting as a specific user
-        // This is only meant to test that we connected as that user, not to test
+        // This only meant to test that we connected as that user, not to test
         // the actual fs user capabilities. Thus just create a file and read
         // the actual fs user capabilities. Thus just create a file and read
         // the owner is correct.
         // the owner is correct.
-        
         const char *tuser = "nobody";
         const char *tuser = "nobody";
-        const char* writePath = "/tmp/usertestfile.txt";
+        userTemplate = strdup("/tmp/usertestXXXXXX");
+        if (!userTemplate) {
+            fprintf(stderr, "Failed to create userTemplate!\n");
+            exit(1);
+        }
+        char* userWritePath = mktemp(userTemplate);
+        hdfsFile writeFile = NULL;
         
         
         fs = hdfsConnectAsUserNewInstance("default", 50070, tuser);
         fs = hdfsConnectAsUserNewInstance("default", 50070, tuser);
         if(!fs) {
         if(!fs) {
-            fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
-            exit(-1);
+            fprintf(stderr,
+                    "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+            exit(1);
         }
         }
         
         
-        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        writeFile = hdfsOpenFile(fs, userWritePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
         if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            fprintf(stderr, "Failed to open %s for writing!\n", userWritePath);
+            exit(1);
         }
         }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        fprintf(stderr, "Opened %s for writing successfully...\n",
+                userWritePath);
         
         
-        char* buffer = "Hello, World!";
-        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+        num_written_bytes = hdfsWrite(fs, writeFile, fileContents,
+                                      (int)strlen(fileContents) + 1);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-        
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath);
-        
         hdfsCloseFile(fs, writeFile);
         hdfsCloseFile(fs, writeFile);
         
         
-        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, userWritePath);
         if (finfo) {
         if (finfo) {
-            fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+            fprintf(stderr, "hdfs new file user is correct: %s\n",
+                    ((result = (strcmp(finfo->mOwner, tuser) != 0)) ?
+                     "Failed!" : "Success!"));
         } else {
         } else {
-            fprintf(stderr, "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
+            fprintf(stderr,
+                    "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
             result = -1;
             result = -1;
         }
         }
         totalResult += result;
         totalResult += result;
+        
+        // Cleanup
+        fprintf(stderr, "hdfsDelete: %s\n",
+                ((result = hdfsDelete(fs, userWritePath, 1)) ?
+                 "Failed!" : "Success!"));
+        totalResult += result;
+        // Done test specific user
     }
     }
-    
+
     totalResult += (hdfsDisconnect(fs) != 0);
     totalResult += (hdfsDisconnect(fs) != 0);
-    fprintf(stderr, "totalResult == %d\n", totalResult);
     
     
+    // Shutdown the native minidfscluster
+    nmdShutdown(cluster);
+    nmdFree(cluster);
+    
+    fprintf(stderr, "totalResult == %d\n", totalResult);
     if (totalResult != 0) {
     if (totalResult != 0) {
         return -1;
         return -1;
     } else {
     } else {

+ 24 - 19
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c

@@ -22,43 +22,52 @@
 #include <stdlib.h>
 #include <stdlib.h>
 
 
 int main(int argc, char **argv) {
 int main(int argc, char **argv) {
+
+    const char* rfile;
+    tSize fileTotalSize, bufferSize, curSize, totalReadSize;
+    hdfsFS fs;
+    hdfsFile readFile;
+    char *buffer = NULL;
     
     
     if (argc != 4) {
     if (argc != 4) {
-        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
-        exit(-1);
+        fprintf(stderr, "Usage: test_libwebhdfs_read"
+                " <filename> <filesize> <buffersize>\n");
+        exit(1);
     }
     }
     
     
-    hdfsFS fs = hdfsConnect("0.0.0.0", 50070);
+    fs = hdfsConnect("localhost", 50070);
     if (!fs) {
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
+        exit(1);
     }
     }
     
     
-    const char* rfile = argv[1];
-    tSize fileTotalSize = strtoul(argv[2], NULL, 10);
-    tSize bufferSize = strtoul(argv[3], NULL, 10);
+    rfile = argv[1];
+    fileTotalSize = strtoul(argv[2], NULL, 10);
+    bufferSize = strtoul(argv[3], NULL, 10);
     
     
-    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
     if (!readFile) {
     if (!readFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", rfile);
         fprintf(stderr, "Failed to open %s for writing!\n", rfile);
-        exit(-2);
+        exit(1);
     }
     }
     
     
     // data to be written to the file
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize);
+    buffer = malloc(sizeof(char) * bufferSize);
     if(buffer == NULL) {
     if(buffer == NULL) {
-        return -2;
+        fprintf(stderr, "Failed to allocate buffer.\n");
+        exit(1);
     }
     }
     
     
     // read from the file
     // read from the file
-    tSize curSize = bufferSize;
-    tSize totalReadSize = 0;
-    for (; (curSize = hdfsRead(fs, readFile, (void*)buffer, bufferSize)) == bufferSize ;) {
+    curSize = bufferSize;
+    totalReadSize = 0;
+    for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) {
         totalReadSize += curSize;
         totalReadSize += curSize;
     }
     }
     totalReadSize += curSize;
     totalReadSize += curSize;
     
     
-    fprintf(stderr, "size of the file: %d; reading size: %d\n", fileTotalSize, totalReadSize);
+    fprintf(stderr, "size of the file: %d; reading size: %d\n",
+            fileTotalSize, totalReadSize);
     
     
     free(buffer);
     free(buffer);
     hdfsCloseFile(fs, readFile);
     hdfsCloseFile(fs, readFile);
@@ -67,7 +76,3 @@ int main(int argc, char **argv) {
     return 0;
     return 0;
 }
 }
 
 
-/**
- * vim: ts=4: sw=4: et:
- */
-

+ 55 - 33
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c

@@ -18,6 +18,7 @@
 
 
 #include "expect.h"
 #include "expect.h"
 #include "hdfs.h"
 #include "hdfs.h"
+#include "native_mini_dfs.h"
 
 
 #include <errno.h>
 #include <errno.h>
 #include <semaphore.h>
 #include <semaphore.h>
@@ -28,11 +29,9 @@
 
 
 #define TLH_MAX_THREADS 100
 #define TLH_MAX_THREADS 100
 
 
-static sem_t *tlhSem;
+static struct NativeMiniDfsCluster* cluster;
 
 
-static const char *nn;
 static const char *user;
 static const char *user;
-static int port;
 
 
 struct tlhThreadInfo {
 struct tlhThreadInfo {
     /** Thread index */
     /** Thread index */
@@ -43,19 +42,24 @@ struct tlhThreadInfo {
     pthread_t thread;
     pthread_t thread;
 };
 };
 
 
-static int hdfsSingleNameNodeConnect(const char *nn, int port, const char *user, hdfsFS *fs)
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cluster,
+                                     hdfsFS *fs)
 {
 {
+    int nnPort;
+    const char *nnHost;
     hdfsFS hdfs;
     hdfsFS hdfs;
-    if (port < 0) {
-        fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
-                "returned error %d\n", port);
-        return port;
+    
+    if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+        fprintf(stderr, "Error when retrieving namenode host address.\n");
+        return 1;
     }
     }
     
     
-    hdfs = hdfsConnectAsUserNewInstance(nn, port, user);
-    if (!hdfs) {
-        return -errno;
+    hdfs = hdfsConnectAsUser(nnHost, nnPort, user);
+    if(!hdfs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        return 1;
     }
     }
+
     *fs = hdfs;
     *fs = hdfs;
     return 0;
     return 0;
 }
 }
@@ -65,6 +69,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
     char prefix[256], tmp[256];
     char prefix[256], tmp[256];
     hdfsFile file;
     hdfsFile file;
     int ret, expected;
     int ret, expected;
+    hdfsFileInfo *fileInfo;
     
     
     snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
     snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
     
     
@@ -74,18 +79,13 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
     EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
     EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
     snprintf(tmp, sizeof(tmp), "%s/file", prefix);
     snprintf(tmp, sizeof(tmp), "%s/file", prefix);
     
     
-    /*
-     * Although there should not be any file to open for reading,
-     * the right now implementation only construct a local
-     * information struct when opening file
-     */
     EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
     EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
     
     
     file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
     file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
     EXPECT_NONNULL(file);
     EXPECT_NONNULL(file);
     
     
     /* TODO: implement writeFully and use it here */
     /* TODO: implement writeFully and use it here */
-    expected = strlen(prefix);
+    expected = (int)strlen(prefix);
     ret = hdfsWrite(fs, file, prefix, expected);
     ret = hdfsWrite(fs, file, prefix, expected);
     if (ret < 0) {
     if (ret < 0) {
         ret = errno;
         ret = errno;
@@ -118,9 +118,28 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
     }
     }
     EXPECT_ZERO(memcmp(prefix, tmp, expected));
     EXPECT_ZERO(memcmp(prefix, tmp, expected));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
-    
-    // TODO: Non-recursive delete should fail?
-    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
+        
+    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+    EXPECT_NONZERO(hdfsChown(fs, tmp, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+    
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
+    
+    EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_NONNULL(fileInfo);
+    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+    hdfsFreeFileInfo(fileInfo, 1);
     
     
     EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
     EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
     return 0;
     return 0;
@@ -134,7 +153,7 @@ static void *testHdfsOperations(void *v)
     
     
     fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
     fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
             ti->threadIdx);
             ti->threadIdx);
-    ret = hdfsSingleNameNodeConnect(nn, port, user, &fs);
+    ret = hdfsSingleNameNodeConnect(cluster, &fs);
     if (ret) {
     if (ret) {
         fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
         fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
                 "hdfsSingleNameNodeConnect failed with error %d.\n",
                 "hdfsSingleNameNodeConnect failed with error %d.\n",
@@ -181,19 +200,23 @@ static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
  */
  */
 int main(int argc, const char *args[])
 int main(int argc, const char *args[])
 {
 {
-    if (argc != 4) {
-        fprintf(stderr, "usage: test_libhdfs_threaded <namenode> <port> <username>");
-        return -1;
-    }
-    
-    nn = args[1];
-    port = atoi(args[2]);
-    user = args[3];
-    
     int i, tlhNumThreads;
     int i, tlhNumThreads;
     const char *tlhNumThreadsStr;
     const char *tlhNumThreadsStr;
     struct tlhThreadInfo ti[TLH_MAX_THREADS];
     struct tlhThreadInfo ti[TLH_MAX_THREADS];
     
     
+    if (argc != 2) {
+        fprintf(stderr, "usage: test_libwebhdfs_threaded <username>\n");
+        exit(1);
+    }
+    user = args[1];
+    
+    struct NativeMiniDfsConf conf = {
+        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+    };
+    cluster = nmdCreate(&conf);
+    EXPECT_NONNULL(cluster);
+    EXPECT_ZERO(nmdWaitClusterUp(cluster));
+    
     tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
     tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
     if (!tlhNumThreadsStr) {
     if (!tlhNumThreadsStr) {
         tlhNumThreadsStr = "3";
         tlhNumThreadsStr = "3";
@@ -210,8 +233,6 @@ int main(int argc, const char *args[])
         ti[i].threadIdx = i;
         ti[i].threadIdx = i;
     }
     }
     
     
-//    tlhSem = sem_open("sem", O_CREAT, 0644, tlhNumThreads);
-    
     for (i = 0; i < tlhNumThreads; i++) {
     for (i = 0; i < tlhNumThreads; i++) {
         EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
         EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
                                    testHdfsOperations, &ti[i]));
                                    testHdfsOperations, &ti[i]));
@@ -220,6 +241,7 @@ int main(int argc, const char *args[])
         EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
         EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
     }
     }
     
     
-//    EXPECT_ZERO(sem_close(tlhSem));
+    EXPECT_ZERO(nmdShutdown(cluster));
+    nmdFree(cluster);
     return checkFailures(ti, tlhNumThreads);
     return checkFailures(ti, tlhNumThreads);
 }
 }

+ 42 - 49
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c

@@ -22,97 +22,90 @@
 #include <stdio.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdlib.h>
 #include <string.h>
 #include <string.h>
+#include <inttypes.h>
 
 
 int main(int argc, char **argv) {
 int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char* writeFileName;
+    off_t fileTotalSize;
+    long long tmpBufferSize;
+    tSize bufferSize = 0, totalWriteSize = 0, toWrite = 0, written = 0;
+    hdfsFile writeFile = NULL;
+    int append, i = 0;
+    char* buffer = NULL;
     
     
     if (argc != 6) {
     if (argc != 6) {
-        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize> <username> <append>\n");
-        exit(-1);
+        fprintf(stderr, "Usage: test_libwebhdfs_write <filename> <filesize> "
+                "<buffersize> <username> <append>\n");
+        exit(1);
     }
     }
     
     
-    hdfsFS fs = hdfsConnectAsUser("0.0.0.0", 50070, argv[4]);
+    fs = hdfsConnectAsUser("default", 50070, argv[4]);
     if (!fs) {
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
+        exit(1);
     }
     }
     
     
-    const char* writeFileName = argv[1];
-    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
-    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+    writeFileName = argv[1];
+    fileTotalSize = strtoul(argv[2], NULL, 10);
+    tmpBufferSize = strtoul(argv[3], NULL, 10);
     
     
     // sanity check
     // sanity check
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
-        fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX);
-        exit(-3);
+        fprintf(stderr, "invalid file size %s - must be <= %lu\n",
+                argv[2], ULONG_MAX);
+        exit(1);
     }
     }
     
     
     // currently libhdfs writes are of tSize which is int32
     // currently libhdfs writes are of tSize which is int32
     if(tmpBufferSize > INT_MAX) {
     if(tmpBufferSize > INT_MAX) {
-        fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX);
-        exit(-3);
+        fprintf(stderr,
+                "invalid buffer size libhdfs API write chunks must be <= %d\n",
+                INT_MAX);
+        exit(1);
     }
     }
     
     
-    tSize bufferSize = tmpBufferSize;
-    
-    hdfsFile writeFile = NULL;
-    int append = atoi(argv[5]);
+    bufferSize = (tSize) tmpBufferSize;
+    append = atoi(argv[5]);
     if (!append) {
     if (!append) {
         writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0);
         writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0);
     } else {
     } else {
-        writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, bufferSize, 2, 0);
+        writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND,
+                                 bufferSize, 2, 0);
     }
     }
     if (!writeFile) {
     if (!writeFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
         fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
-        exit(-2);
+        exit(1);
     }
     }
     
     
     // data to be written to the file
     // data to be written to the file
-    char* buffer = malloc(sizeof(char) * bufferSize + 1);
+    buffer = malloc(sizeof(char) * bufferSize + 1);
     if(buffer == NULL) {
     if(buffer == NULL) {
         fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
         fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
-        return -2;
+        exit(1);
     }
     }
-    int i = 0;
-    for (i=0; i < bufferSize; ++i) {
+    for (i = 0; i < bufferSize; ++i) {
         buffer[i] = 'a' + (i%26);
         buffer[i] = 'a' + (i%26);
     }
     }
     buffer[bufferSize] = '\0';
     buffer[bufferSize] = '\0';
 
 
-    size_t totalWriteSize = 0;
+    // write to the file
+    totalWriteSize = 0;
     for (; totalWriteSize < fileTotalSize; ) {
     for (; totalWriteSize < fileTotalSize; ) {
-        tSize toWrite = bufferSize < (fileTotalSize - totalWriteSize) ? bufferSize : (fileTotalSize - totalWriteSize);
-        size_t written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
-        fprintf(stderr, "written size %ld, to write size %d\n", written, toWrite);
+        toWrite = bufferSize < (fileTotalSize - totalWriteSize) ?
+                            bufferSize : (fileTotalSize - totalWriteSize);
+        written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
+        fprintf(stderr, "written size %d, to write size %d\n",
+                written, toWrite);
         totalWriteSize += written;
         totalWriteSize += written;
-        //sleep(1);
     }
     }
     
     
+    // cleanup
     free(buffer);
     free(buffer);
     hdfsCloseFile(fs, writeFile);
     hdfsCloseFile(fs, writeFile);
-    
-    fprintf(stderr, "file total size: %lld, total write size: %ld\n", fileTotalSize, totalWriteSize);
-    
-    hdfsFile readFile = hdfsOpenFile(fs, writeFileName, O_RDONLY, 0, 0, 0);
-    //sleep(1);
-    fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
-    
-    hdfsFile writeFile2 = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, 0, 2, 0);
-    fprintf(stderr, "Opened %s for writing successfully...\n", writeFileName);
-    const char *content = "Hello, World!";
-    size_t num_written_bytes = hdfsWrite(fs, writeFile2, content, strlen(content) + 1);
-    if (num_written_bytes != strlen(content) + 1) {
-        fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
-                                    (int)(strlen(content) + 1), (int)num_written_bytes);
-        exit(-1);
-    }
-    fprintf(stderr, "Wrote %zd bytes\n", num_written_bytes);
-    
+    fprintf(stderr, "file total size: %" PRId64 ", total write size: %d\n",
+            fileTotalSize, totalWriteSize);
     hdfsDisconnect(fs);
     hdfsDisconnect(fs);
     
     
     return 0;
     return 0;
 }
 }
-
-/**
- * vim: ts=4: sw=4: et:
- */
-

+ 0 - 111
hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c

@@ -1,111 +0,0 @@
-#include "hdfs.h"
-
-#include <time.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/time.h>
-
-#ifdef __MACH__
-#include <mach/clock.h>
-#include <mach/mach.h>
-#endif
-
-void current_utc_time(struct timespec *ts) {
-#ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
-    clock_serv_t cclock;
-    mach_timespec_t mts;
-    host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
-    clock_get_time(cclock, &mts);
-    mach_port_deallocate(mach_task_self(), cclock);
-    ts->tv_sec = mts.tv_sec;
-    ts->tv_nsec = mts.tv_nsec;
-#else
-    clock_gettime(CLOCK_REALTIME, ts);
-#endif
-    
-}
-
-long get_time() {
-    struct timespec tp;
-    current_utc_time(&tp);
-    return (long)((tp.tv_sec * 1000000000) + tp.tv_nsec);
-}
-
-#define SIZE 512*1024*1024
-#define READ_SIZE 512*1024*1024
-#define DISCARD_COUNT 5
-
-int main(int argc, char** argv) {
-    if (argc != 4) {
-        fprintf(stderr, "Usage: test_read_bm <namenode> <user_name> <iteration_number>\n");
-        exit(0);
-    }
-    
-    hdfsFS fs = hdfsConnectAsUser(argv[1], 50070, argv[2]);
-    
-    /* printf("File is null: %d\n", file == NULL ? 1 : 0); */
-    
-    char *buf = (char *) malloc(sizeof(unsigned char) * SIZE);
-    
-    printf("Read size: %d\n", READ_SIZE);
-    
-    int iterations = atoi(argv[3]);
-    
-    if (iterations <= DISCARD_COUNT) {
-        printf("Iterations should be at least %d\n", DISCARD_COUNT + 1);
-        exit(0);
-    }
-    
-    printf("Running %d iterations\n", iterations);
-    float time_total;
-    float max = 0.f;
-    float min = 999999999999999.f;
-    
-    printf("Start...\n");
-    int i;
-    for (i=0; i<iterations; ++i) {
-        long start = get_time();
-        hdfsFile file = hdfsOpenFile(fs, "/tmp/512_mb.txt", O_RDONLY, 0, 0, 0);
-        int n = 0;
-        
-        while (n < SIZE) {
-            int nread = hdfsRead(fs, file, buf + n, READ_SIZE);
-            if (nread <= 0) {
-                printf("EOF before finished, read %d bytes\n", n);
-                hdfsDisconnect(fs);
-                return 0;
-            }
-            n += nread;
-            printf("Read %d kilobytes\n", nread / 1024);
-        }
-        
-        long end = get_time();
-        printf("Read %d bytes, hoping for %d.\n", n, SIZE);
-        long elapsed = (end - start);
-        printf("Start: %lu, end: %lu\n", start, end);
-        float time = elapsed / (1000000000.0f);
-        printf ("Took %2.6fs\n", time);
-        printf("Throughput: %2.2fMB/s\n", SIZE * 1.0f / (1024 * 1024 * time));
-        if (i >= DISCARD_COUNT) {
-            time_total += time;
-            if (time < min) {
-                min = time;
-            }
-            if (time > max) {
-                max = time;
-            }
-        }
-    }
-    hdfsDisconnect(fs);
-    printf("------\n");
-    printf("Average time: %2.2fs\n", time_total / (iterations - DISCARD_COUNT));
-    printf("Max. time: %2.2f, min. time: %2.2f\n", max, min);
-    float maxt = SIZE * 1.f / (1024 * 1024 * max);
-    float mint = SIZE * 1.f / (1024 * 1024 * min);
-    printf("Average throughput: %2.2fMB/s\n", 1.f * SIZE * (iterations - DISCARD_COUNT) / (1024 * 1024 * time_total));
-    printf("Max. throughput: %2.2f, min. throughput: %2.2f\n", maxt, mint);
-    
-    //  printf("File contents: %d\n", buf[0]);
-    return 0;
-}
-

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -652,7 +652,7 @@ public class DFSClient implements java.io.Closeable {
       // if there is no more clients under the renewer.
       // if there is no more clients under the renewer.
       getLeaseRenewer().closeClient(this);
       getLeaseRenewer().closeClient(this);
     } catch (IOException ioe) {
     } catch (IOException ioe) {
-       LOG.info("Exception occurred while aborting the client. " + ioe);
+       LOG.info("Exception occurred while aborting the client " + ioe);
     }
     }
     closeConnectionToNamenode();
     closeConnectionToNamenode();
   }
   }
@@ -1769,6 +1769,13 @@ public class DFSClient implements java.io.Closeable {
         return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
         return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
             crcPerBlock, fileMD5);
             crcPerBlock, fileMD5);
       default:
       default:
+        // If there is no block allocated for the file,
+        // return one with the magic entry that matches what previous
+        // hdfs versions return.
+        if (locatedblocks.size() == 0) {
+          return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
+        }
+
         // we should never get here since the validity was checked
         // we should never get here since the validity was checked
         // when getCrcType() was called above.
         // when getCrcType() was called above.
         return null;
         return null;
@@ -2104,7 +2111,7 @@ public class DFSClient implements java.io.Closeable {
       reportBadBlocks(lblocks);
       reportBadBlocks(lblocks);
     } catch (IOException ie) {
     } catch (IOException ie) {
       LOG.info("Found corruption while reading " + file
       LOG.info("Found corruption while reading " + file
-          + ".  Error repairing corrupt blocks.  Bad blocks remain.", ie);
+          + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
     }
     }
   }
   }
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -457,7 +457,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
             buffersize, verifyChecksum, dfsClient.clientName);
             buffersize, verifyChecksum, dfsClient.clientName);
         if(connectFailedOnce) {
         if(connectFailedOnce) {
           DFSClient.LOG.info("Successfully connected to " + targetAddr +
           DFSClient.LOG.info("Successfully connected to " + targetAddr +
-                             " for block " + blk.getBlockId());
+                             " for " + blk);
         }
         }
         return chosenNode;
         return chosenNode;
       } catch (IOException ex) {
       } catch (IOException ex) {
@@ -736,9 +736,9 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
         }
         }
         
         
         if (nodes == null || nodes.length == 0) {
         if (nodes == null || nodes.length == 0) {
-          DFSClient.LOG.info("No node available for block: " + blockInfo);
+          DFSClient.LOG.info("No node available for " + blockInfo);
         }
         }
-        DFSClient.LOG.info("Could not obtain block " + block.getBlock()
+        DFSClient.LOG.info("Could not obtain " + block.getBlock()
             + " from any node: " + ie
             + " from any node: " + ie
             + ". Will get new block locations from namenode and retry...");
             + ". Will get new block locations from namenode and retry...");
         try {
         try {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -735,7 +735,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
     //
     //
     private boolean processDatanodeError() throws IOException {
     private boolean processDatanodeError() throws IOException {
       if (response != null) {
       if (response != null) {
-        DFSClient.LOG.info("Error Recovery for block " + block +
+        DFSClient.LOG.info("Error Recovery for " + block +
         " waiting for responder to exit. ");
         " waiting for responder to exit. ");
         return true;
         return true;
       }
       }
@@ -1008,7 +1008,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         success = createBlockOutputStream(nodes, 0L, false);
         success = createBlockOutputStream(nodes, 0L, false);
 
 
         if (!success) {
         if (!success) {
-          DFSClient.LOG.info("Abandoning block " + block);
+          DFSClient.LOG.info("Abandoning " + block);
           dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName);
           dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName);
           block = null;
           block = null;
           DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
           DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
@@ -1773,7 +1773,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         try {
         try {
           Thread.sleep(400);
           Thread.sleep(400);
           if (Time.now() - localstart > 5000) {
           if (Time.now() - localstart > 5000) {
-            DFSClient.LOG.info("Could not complete file " + src + " retrying...");
+            DFSClient.LOG.info("Could not complete " + src + " retrying...");
           }
           }
         } catch (InterruptedException ie) {
         } catch (InterruptedException ie) {
           DFSClient.LOG.warn("Caught exception ", ie);
           DFSClient.LOG.warn("Caught exception ", ie);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -702,7 +702,7 @@ public class DistributedFileSystem extends FileSystem {
     }
     }
     DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()}; 
     DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()}; 
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
-    LOG.info("Found checksum error in data stream at block="
+    LOG.info("Found checksum error in data stream at "
         + dataBlock + " on datanode="
         + dataBlock + " on datanode="
         + dataNode[0]);
         + dataNode[0]);
 
 
@@ -715,7 +715,7 @@ public class DistributedFileSystem extends FileSystem {
     }
     }
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
-    LOG.info("Found checksum error in checksum stream at block="
+    LOG.info("Found checksum error in checksum stream at "
         + sumsBlock + " on datanode=" + sumsNode[0]);
         + sumsBlock + " on datanode=" + sumsNode[0]);
 
 
     // Ask client to delete blocks.
     // Ask client to delete blocks.

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java

@@ -157,11 +157,11 @@ public abstract class HdfsProtoUtil {
   }
   }
 
 
   public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
   public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
-    return DataChecksum.Type.valueOf(type.name());
+    return DataChecksum.Type.valueOf(type.getNumber());
   }
   }
 
 
   public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
   public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
-    return HdfsProtos.ChecksumTypeProto.valueOf(type.name());
+    return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
   }
   }
 
 
   public static InputStream vintPrefixed(final InputStream input)
   public static InputStream vintPrefixed(final InputStream input)

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java

@@ -52,7 +52,7 @@ public abstract class DataTransferProtoUtil {
   }
   }
 
 
   public static ChecksumProto toProto(DataChecksum checksum) {
   public static ChecksumProto toProto(DataChecksum checksum) {
-    ChecksumTypeProto type = ChecksumTypeProto.valueOf(checksum.getChecksumType().name());
+    ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType());
     if (type == null) {
     if (type == null) {
       throw new IllegalArgumentException(
       throw new IllegalArgumentException(
           "Can't convert checksum to protobuf: " + checksum);
           "Can't convert checksum to protobuf: " + checksum);
@@ -68,7 +68,7 @@ public abstract class DataTransferProtoUtil {
     if (proto == null) return null;
     if (proto == null) return null;
 
 
     int bytesPerChecksum = proto.getBytesPerChecksum();
     int bytesPerChecksum = proto.getBytesPerChecksum();
-    DataChecksum.Type type = DataChecksum.Type.valueOf(proto.getType().name());
+    DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType());
     
     
     return DataChecksum.newDataChecksum(type, bytesPerChecksum);
     return DataChecksum.newDataChecksum(type, bytesPerChecksum);
   }
   }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
@@ -67,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@@ -129,7 +129,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -961,7 +960,7 @@ public class PBHelper {
         fs.getFileBufferSize(),
         fs.getFileBufferSize(),
         fs.getEncryptDataTransfer(),
         fs.getEncryptDataTransfer(),
         fs.getTrashInterval(),
         fs.getTrashInterval(),
-        DataChecksum.Type.valueOf(fs.getChecksumType().name()));
+        HdfsProtoUtil.fromProto(fs.getChecksumType()));
   }
   }
   
   
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@@ -974,7 +973,7 @@ public class PBHelper {
       .setFileBufferSize(fs.getFileBufferSize())
       .setFileBufferSize(fs.getFileBufferSize())
       .setEncryptDataTransfer(fs.getEncryptDataTransfer())
       .setEncryptDataTransfer(fs.getEncryptDataTransfer())
       .setTrashInterval(fs.getTrashInterval())
       .setTrashInterval(fs.getTrashInterval())
-      .setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name()))
+      .setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType()))
       .build();
       .build();
   }
   }
   
   

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java

@@ -171,8 +171,7 @@ class JNStorage extends Storage {
 
 
   void format(NamespaceInfo nsInfo) throws IOException {
   void format(NamespaceInfo nsInfo) throws IOException {
     setStorageInfo(nsInfo);
     setStorageInfo(nsInfo);
-    LOG.info("Formatting journal storage directory " + 
-        sd + " with nsid: " + getNamespaceID());
+    LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
     // Unlock the directory before formatting, because we will
     // Unlock the directory before formatting, because we will
     // re-analyze it after format(). The analyzeStorage() call
     // re-analyze it after format(). The analyzeStorage() call
     // below is reponsible for re-locking it. This is a no-op
     // below is reponsible for re-locking it. This is a no-op

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
@@ -230,6 +231,7 @@ public class JournalNode implements Tool, Configurable {
   }
   }
 
 
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
+    StringUtils.startupShutdownMessage(JournalNode.class, args, LOG);
     System.exit(ToolRunner.run(new JournalNode(), args));
     System.exit(ToolRunner.run(new JournalNode(), args));
   }
   }
 }
 }

+ 31 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -950,8 +950,8 @@ public class BlockManager {
       datanodes.append(node).append(" ");
       datanodes.append(node).append(" ");
     }
     }
     if (datanodes.length() != 0) {
     if (datanodes.length() != 0) {
-      NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
-          + b + " to " + datanodes.toString());
+      NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " "
+          + datanodes);
     }
     }
   }
   }
 
 
@@ -972,7 +972,7 @@ public class BlockManager {
       // thread of Datanode reports bad block before Block reports are sent
       // thread of Datanode reports bad block before Block reports are sent
       // by the Datanode on startup
       // by the Datanode on startup
       NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
       NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
-          + blk + " not found.");
+          + blk + " not found");
       return;
       return;
     }
     }
     markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn);
     markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn);
@@ -1026,7 +1026,7 @@ public class BlockManager {
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
           "invalidation of " + b + " on " + dn + " because " +
           "invalidation of " + b + " on " + dn + " because " +
           nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
           nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
-          "with potentially out-of-date block reports.");
+          "with potentially out-of-date block reports");
       postponeBlock(b.corrupted);
       postponeBlock(b.corrupted);
 
 
     } else if (nr.liveReplicas() >= 1) {
     } else if (nr.liveReplicas() >= 1) {
@@ -1039,7 +1039,7 @@ public class BlockManager {
       }
       }
     } else {
     } else {
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
-          + " on " + dn + " is the only copy and was not deleted.");
+          + " on " + dn + " is the only copy and was not deleted");
     }
     }
   }
   }
 
 
@@ -1160,9 +1160,8 @@ public class BlockManager {
                    (blockHasEnoughRacks(block)) ) {
                    (blockHasEnoughRacks(block)) ) {
                 neededReplications.remove(block, priority); // remove from neededReplications
                 neededReplications.remove(block, priority); // remove from neededReplications
                 neededReplications.decrementReplicationIndex(priority);
                 neededReplications.decrementReplicationIndex(priority);
-                NameNode.stateChangeLog.info("BLOCK* "
-                    + "Removing block " + block
-                    + " from neededReplications as it has enough replicas.");
+                NameNode.stateChangeLog.info("BLOCK* Removing " + block
+                    + " from neededReplications as it has enough replicas");
                 continue;
                 continue;
               }
               }
             }
             }
@@ -1236,9 +1235,8 @@ public class BlockManager {
               neededReplications.remove(block, priority); // remove from neededReplications
               neededReplications.remove(block, priority); // remove from neededReplications
               neededReplications.decrementReplicationIndex(priority);
               neededReplications.decrementReplicationIndex(priority);
               rw.targets = null;
               rw.targets = null;
-              NameNode.stateChangeLog.info("BLOCK* "
-                  + "Removing block " + block
-                  + " from neededReplications as it has enough replicas.");
+              NameNode.stateChangeLog.info("BLOCK* Removing " + block
+                  + " from neededReplications as it has enough replicas");
               continue;
               continue;
             }
             }
           }
           }
@@ -1290,10 +1288,8 @@ public class BlockManager {
             targetList.append(' ');
             targetList.append(' ');
             targetList.append(targets[k]);
             targetList.append(targets[k]);
           }
           }
-          NameNode.stateChangeLog.info(
-                  "BLOCK* ask "
-                  + rw.srcNode + " to replicate "
-                  + rw.block + " to " + targetList);
+          NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode
+              + " to replicate " + rw.block + " to " + targetList);
         }
         }
       }
       }
     }
     }
@@ -1527,10 +1523,9 @@ public class BlockManager {
       boolean staleBefore = node.areBlockContentsStale();
       boolean staleBefore = node.areBlockContentsStale();
       node.receivedBlockReport();
       node.receivedBlockReport();
       if (staleBefore && !node.areBlockContentsStale()) {
       if (staleBefore && !node.areBlockContentsStale()) {
-        LOG.info("BLOCK* processReport: " +
-            "Received first block report from " + node +
-            " after becoming active. Its block contents are no longer" +
-            " considered stale.");
+        LOG.info("BLOCK* processReport: Received first block report from "
+            + node + " after becoming active. Its block contents are no longer"
+            + " considered stale");
         rescanPostponedMisreplicatedBlocks();
         rescanPostponedMisreplicatedBlocks();
       }
       }
       
       
@@ -1601,9 +1596,9 @@ public class BlockManager {
       addStoredBlock(b, node, null, true);
       addStoredBlock(b, node, null, true);
     }
     }
     for (Block b : toInvalidate) {
     for (Block b : toInvalidate) {
-      NameNode.stateChangeLog.info("BLOCK* processReport: block "
+      NameNode.stateChangeLog.info("BLOCK* processReport: "
           + b + " on " + node + " size " + b.getNumBytes()
           + b + " on " + node + " size " + b.getNumBytes()
-          + " does not belong to any file.");
+          + " does not belong to any file");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
     for (BlockToMarkCorrupt b : toCorrupt) {
     for (BlockToMarkCorrupt b : toCorrupt) {
@@ -1870,7 +1865,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     int count = pendingDNMessages.count();
     int count = pendingDNMessages.count();
     if (count > 0) {
     if (count > 0) {
       LOG.info("Processing " + count + " messages from DataNodes " +
       LOG.info("Processing " + count + " messages from DataNodes " +
-          "that were previously queued during standby state.");
+          "that were previously queued during standby state");
     }
     }
     processQueuedMessages(pendingDNMessages.takeAll());
     processQueuedMessages(pendingDNMessages.takeAll());
     assert pendingDNMessages.count() == 0;
     assert pendingDNMessages.count() == 0;
@@ -1927,9 +1922,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
           // the block report got a little bit delayed after the pipeline
           // the block report got a little bit delayed after the pipeline
           // closed. So, ignore this report, assuming we will get a
           // closed. So, ignore this report, assuming we will get a
           // FINALIZED replica later. See HDFS-2791
           // FINALIZED replica later. See HDFS-2791
-          LOG.info("Received an RBW replica for block " + storedBlock +
-              " on " + dn + ": ignoring it, since the block is " +
-              "complete with the same generation stamp.");
+          LOG.info("Received an RBW replica for " + storedBlock +
+              " on " + dn + ": ignoring it, since it is " +
+              "complete with the same genstamp");
           return null;
           return null;
         } else {
         } else {
           return new BlockToMarkCorrupt(storedBlock,
           return new BlockToMarkCorrupt(storedBlock,
@@ -2041,7 +2036,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       // If this block does not belong to anyfile, then we are done.
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
           + node + " size " + block.getNumBytes()
           + node + " size " + block.getNumBytes()
-          + " but it does not belong to any file.");
+          + " but it does not belong to any file");
       // we could add this block to invalidate set of this datanode.
       // we could add this block to invalidate set of this datanode.
       // it will happen in next block report otherwise.
       // it will happen in next block report otherwise.
       return block;
       return block;
@@ -2158,9 +2153,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       try {
       try {
         invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
         invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
       } catch (IOException e) {
       } catch (IOException e) {
-        NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
-                                      "error in deleting bad block " + blk +
-                                      " on " + node, e);
+        NameNode.stateChangeLog.info("invalidateCorruptReplicas "
+            + "error in deleting bad block " + blk + " on " + node, e);
         gotException = true;
         gotException = true;
       }
       }
     }
     }
@@ -2308,7 +2302,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       DatanodeDescriptor cur = it.next();
       DatanodeDescriptor cur = it.next();
       if (cur.areBlockContentsStale()) {
       if (cur.areBlockContentsStale()) {
         LOG.info("BLOCK* processOverReplicatedBlock: " +
         LOG.info("BLOCK* processOverReplicatedBlock: " +
-            "Postponing processing of over-replicated block " +
+            "Postponing processing of over-replicated " +
             block + " since datanode " + cur + " does not yet have up-to-date " +
             block + " since datanode " + cur + " does not yet have up-to-date " +
             "block information.");
             "block information.");
         postponeBlock(block);
         postponeBlock(block);
@@ -2398,7 +2392,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       //
       //
       addToInvalidates(b, cur);
       addToInvalidates(b, cur);
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
-                +"("+cur+", "+b+") is added to invalidated blocks set.");
+                +"("+cur+", "+b+") is added to invalidated blocks set");
     }
     }
   }
   }
 
 
@@ -2540,7 +2534,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     for (Block b : toInvalidate) {
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
           + b + " on " + node + " size " + b.getNumBytes()
           + b + " on " + node + " size " + b.getNumBytes()
-          + " does not belong to any file.");
+          + " does not belong to any file");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
     for (BlockToMarkCorrupt b : toCorrupt) {
     for (BlockToMarkCorrupt b : toCorrupt) {
@@ -2651,7 +2645,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
    * of live nodes.  If in startup safemode (or its 30-sec extension period),
    * of live nodes.  If in startup safemode (or its 30-sec extension period),
    * then it gains speed by ignoring issues of excess replicas or nodes
    * then it gains speed by ignoring issues of excess replicas or nodes
    * that are decommissioned or in process of becoming decommissioned.
    * that are decommissioned or in process of becoming decommissioned.
-   * If not in startup, then it calls {@link countNodes()} instead.
+   * If not in startup, then it calls {@link #countNodes(Block)} instead.
    * 
    * 
    * @param b - the block being tested
    * @param b - the block being tested
    * @return count of live nodes for this block
    * @return count of live nodes for this block
@@ -2702,6 +2696,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
   void processOverReplicatedBlocksOnReCommission(
   void processOverReplicatedBlocksOnReCommission(
       final DatanodeDescriptor srcNode) {
       final DatanodeDescriptor srcNode) {
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
+    int numOverReplicated = 0;
     while(it.hasNext()) {
     while(it.hasNext()) {
       final Block block = it.next();
       final Block block = it.next();
       BlockCollection bc = blocksMap.getBlockCollection(block);
       BlockCollection bc = blocksMap.getBlockCollection(block);
@@ -2711,8 +2706,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       if (numCurrentReplica > expectedReplication) {
       if (numCurrentReplica > expectedReplication) {
         // over-replicated block 
         // over-replicated block 
         processOverReplicatedBlock(block, expectedReplication, null, null);
         processOverReplicatedBlock(block, expectedReplication, null, null);
+        numOverReplicated++;
       }
       }
     }
     }
+    LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " +
+        srcNode + " during recommissioning");
   }
   }
 
 
   /**
   /**

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -185,7 +185,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return writer;
       return writer;
     }
     }
-    int totalReplicasExpected = numOfReplicas;
+    int totalReplicasExpected = numOfReplicas + results.size();
       
       
     int numOfResults = results.size();
     int numOfResults = results.size();
     boolean newBlock = (numOfResults==0);
     boolean newBlock = (numOfResults==0);
@@ -231,7 +231,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           maxNodesPerRack, results, avoidStaleNodes);
           maxNodesPerRack, results, avoidStaleNodes);
     } catch (NotEnoughReplicasException e) {
     } catch (NotEnoughReplicasException e) {
       LOG.warn("Not able to place enough replicas, still in need of "
       LOG.warn("Not able to place enough replicas, still in need of "
-               + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+               + (totalReplicasExpected - results.size()) + " to reach "
+               + totalReplicasExpected + "\n"
                + e.getMessage());
                + e.getMessage());
       if (avoidStaleNodes) {
       if (avoidStaleNodes) {
         // ecxludedNodes now has - initial excludedNodes, any nodes that were
         // ecxludedNodes now has - initial excludedNodes, any nodes that were

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -362,8 +362,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
   void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
     if(recoverBlocks.contains(block)) {
     if(recoverBlocks.contains(block)) {
       // this prevents adding the same block twice to the recovery queue
       // this prevents adding the same block twice to the recovery queue
-      BlockManager.LOG.info("Block " + block +
-                            " is already in the recovery queue.");
+      BlockManager.LOG.info(block + " is already in the recovery queue");
       return;
       return;
     }
     }
     recoverBlocks.offer(block);
     recoverBlocks.offer(block);

+ 17 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -584,7 +584,7 @@ public class DatanodeManager {
     if (node.isDecommissionInProgress()) {
     if (node.isDecommissionInProgress()) {
       if (!blockManager.isReplicationInProgress(node)) {
       if (!blockManager.isReplicationInProgress(node)) {
         node.setDecommissioned();
         node.setDecommissioned();
-        LOG.info("Decommission complete for node " + node);
+        LOG.info("Decommission complete for " + node);
       }
       }
     }
     }
     return node.isDecommissioned();
     return node.isDecommissioned();
@@ -593,8 +593,8 @@ public class DatanodeManager {
   /** Start decommissioning the specified datanode. */
   /** Start decommissioning the specified datanode. */
   private void startDecommission(DatanodeDescriptor node) {
   private void startDecommission(DatanodeDescriptor node) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
-      LOG.info("Start Decommissioning node " + node + " with " + 
-          node.numBlocks() +  " blocks.");
+      LOG.info("Start Decommissioning " + node + " with " + 
+          node.numBlocks() +  " blocks");
       heartbeatManager.startDecommission(node);
       heartbeatManager.startDecommission(node);
       node.decommissioningStatus.setStartTime(now());
       node.decommissioningStatus.setStartTime(now());
       
       
@@ -606,9 +606,13 @@ public class DatanodeManager {
   /** Stop decommissioning the specified datanodes. */
   /** Stop decommissioning the specified datanodes. */
   void stopDecommission(DatanodeDescriptor node) {
   void stopDecommission(DatanodeDescriptor node) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stop Decommissioning node " + node);
+      LOG.info("Stop Decommissioning " + node);
       heartbeatManager.stopDecommission(node);
       heartbeatManager.stopDecommission(node);
-      blockManager.processOverReplicatedBlocksOnReCommission(node);
+      // Over-replicated blocks will be detected and processed when 
+      // the dead node comes back and send in its full block report.
+      if (node.isAlive) {
+        blockManager.processOverReplicatedBlocksOnReCommission(node);
+      }
     }
     }
   }
   }
 
 
@@ -658,17 +662,15 @@ public class DatanodeManager {
       throw new DisallowedDatanodeException(nodeReg);
       throw new DisallowedDatanodeException(nodeReg);
     }
     }
       
       
-    NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
-        + "node registration from " + nodeReg
-        + " storage " + nodeReg.getStorageID());
+    NameNode.stateChangeLog.info("BLOCK* registerDatanode: from "
+        + nodeReg + " storage " + nodeReg.getStorageID());
 
 
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
     DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
     DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
         nodeReg.getIpAddr(), nodeReg.getXferPort());
         nodeReg.getIpAddr(), nodeReg.getXferPort());
       
       
     if (nodeN != null && nodeN != nodeS) {
     if (nodeN != null && nodeN != nodeS) {
-      NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
-                        + "node from name: " + nodeN);
+      NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN);
       // nodeN previously served a different data storage, 
       // nodeN previously served a different data storage, 
       // which is not served by anybody anymore.
       // which is not served by anybody anymore.
       removeDatanode(nodeN);
       removeDatanode(nodeN);
@@ -683,8 +685,8 @@ public class DatanodeManager {
         // storage. We do not need to remove old data blocks, the delta will
         // storage. We do not need to remove old data blocks, the delta will
         // be calculated on the next block report from the datanode
         // be calculated on the next block report from the datanode
         if(NameNode.stateChangeLog.isDebugEnabled()) {
         if(NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
-                                        + "node restarted.");
+          NameNode.stateChangeLog.debug("BLOCK* registerDatanode: "
+              + "node restarted.");
         }
         }
       } else {
       } else {
         // nodeS is found
         // nodeS is found
@@ -696,11 +698,9 @@ public class DatanodeManager {
           value in "VERSION" file under the data directory of the datanode,
           value in "VERSION" file under the data directory of the datanode,
           but this is might not work if VERSION file format has changed 
           but this is might not work if VERSION file format has changed 
        */        
        */        
-        NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
-                                      + "node " + nodeS
-                                      + " is replaced by " + nodeReg + 
-                                      " with the same storageID " +
-                                      nodeReg.getStorageID());
+        NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS
+            + " is replaced by " + nodeReg + " with the same storageID "
+            + nodeReg.getStorageID());
       }
       }
       // update cluster map
       // update cluster map
       getNetworkTopology().remove(nodeS);
       getNetworkTopology().remove(nodeS);

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -433,7 +433,7 @@ public abstract class Storage extends StorageInfo {
         if (!root.exists()) {
         if (!root.exists()) {
           // storage directory does not exist
           // storage directory does not exist
           if (startOpt != StartupOption.FORMAT) {
           if (startOpt != StartupOption.FORMAT) {
-            LOG.info("Storage directory " + rootPath + " does not exist.");
+            LOG.info("Storage directory " + rootPath + " does not exist");
             return StorageState.NON_EXISTENT;
             return StorageState.NON_EXISTENT;
           }
           }
           LOG.info(rootPath + " does not exist. Creating ...");
           LOG.info(rootPath + " does not exist. Creating ...");
@@ -442,7 +442,7 @@ public abstract class Storage extends StorageInfo {
         }
         }
         // or is inaccessible
         // or is inaccessible
         if (!root.isDirectory()) {
         if (!root.isDirectory()) {
-          LOG.info(rootPath + "is not a directory.");
+          LOG.info(rootPath + "is not a directory");
           return StorageState.NON_EXISTENT;
           return StorageState.NON_EXISTENT;
         }
         }
         if (!root.canWrite()) {
         if (!root.canWrite()) {
@@ -539,34 +539,34 @@ public abstract class Storage extends StorageInfo {
       switch(curState) {
       switch(curState) {
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
         LOG.info("Completing previous upgrade for storage directory " 
         LOG.info("Completing previous upgrade for storage directory " 
-                 + rootPath + ".");
+                 + rootPath);
         rename(getPreviousTmp(), getPreviousDir());
         rename(getPreviousTmp(), getPreviousDir());
         return;
         return;
       case RECOVER_UPGRADE:   // mv previous.tmp -> current
       case RECOVER_UPGRADE:   // mv previous.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
         LOG.info("Recovering storage directory " + rootPath
-                 + " from previous upgrade.");
+                 + " from previous upgrade");
         if (curDir.exists())
         if (curDir.exists())
           deleteDir(curDir);
           deleteDir(curDir);
         rename(getPreviousTmp(), curDir);
         rename(getPreviousTmp(), curDir);
         return;
         return;
       case COMPLETE_ROLLBACK: // rm removed.tmp
       case COMPLETE_ROLLBACK: // rm removed.tmp
         LOG.info("Completing previous rollback for storage directory "
         LOG.info("Completing previous rollback for storage directory "
-                 + rootPath + ".");
+                 + rootPath);
         deleteDir(getRemovedTmp());
         deleteDir(getRemovedTmp());
         return;
         return;
       case RECOVER_ROLLBACK:  // mv removed.tmp -> current
       case RECOVER_ROLLBACK:  // mv removed.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
         LOG.info("Recovering storage directory " + rootPath
-                 + " from previous rollback.");
+                 + " from previous rollback");
         rename(getRemovedTmp(), curDir);
         rename(getRemovedTmp(), curDir);
         return;
         return;
       case COMPLETE_FINALIZE: // rm finalized.tmp
       case COMPLETE_FINALIZE: // rm finalized.tmp
         LOG.info("Completing previous finalize for storage directory "
         LOG.info("Completing previous finalize for storage directory "
-                 + rootPath + ".");
+                 + rootPath);
         deleteDir(getFinalizedTmp());
         deleteDir(getFinalizedTmp());
         return;
         return;
       case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
       case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
         LOG.info("Completing previous checkpoint for storage directory " 
         LOG.info("Completing previous checkpoint for storage directory " 
-                 + rootPath + ".");
+                 + rootPath);
         File prevCkptDir = getPreviousCheckpoint();
         File prevCkptDir = getPreviousCheckpoint();
         if (prevCkptDir.exists())
         if (prevCkptDir.exists())
           deleteDir(prevCkptDir);
           deleteDir(prevCkptDir);
@@ -574,7 +574,7 @@ public abstract class Storage extends StorageInfo {
         return;
         return;
       case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
       case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
         LOG.info("Recovering storage directory " + rootPath
         LOG.info("Recovering storage directory " + rootPath
-                 + " from failed checkpoint.");
+                 + " from failed checkpoint");
         if (curDir.exists())
         if (curDir.exists())
           deleteDir(curDir);
           deleteDir(curDir);
         rename(getLastCheckpointTmp(), curDir);
         rename(getLastCheckpointTmp(), curDir);
@@ -629,7 +629,7 @@ public abstract class Storage extends StorageInfo {
       FileLock newLock = tryLock();
       FileLock newLock = tryLock();
       if (newLock == null) {
       if (newLock == null) {
         String msg = "Cannot lock storage " + this.root 
         String msg = "Cannot lock storage " + this.root 
-          + ". The directory is already locked.";
+          + ". The directory is already locked";
         LOG.info(msg);
         LOG.info(msg);
         throw new IOException(msg);
         throw new IOException(msg);
       }
       }

+ 8 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -75,14 +75,18 @@ class BPServiceActor implements Runnable {
 
 
   BPOfferService bpos;
   BPOfferService bpos;
   
   
-  long lastBlockReport = 0;
-  long lastDeletedReport = 0;
+  // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+  // by testing threads (through BPServiceActor#triggerXXX), while also 
+  // assigned/read by the actor thread. Thus they should be declared as volatile
+  // to make sure the "happens-before" consistency.
+  volatile long lastBlockReport = 0;
+  volatile long lastDeletedReport = 0;
 
 
   boolean resetBlockReportTime = true;
   boolean resetBlockReportTime = true;
 
 
   Thread bpThread;
   Thread bpThread;
   DatanodeProtocolClientSideTranslatorPB bpNamenode;
   DatanodeProtocolClientSideTranslatorPB bpNamenode;
-  private long lastHeartbeat = 0;
+  private volatile long lastHeartbeat = 0;
   private volatile boolean initialized = false;
   private volatile boolean initialized = false;
   
   
   /**
   /**
@@ -637,8 +641,7 @@ class BPServiceActor implements Runnable {
     try {
     try {
       Thread.sleep(millis);
       Thread.sleep(millis);
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
-      LOG.info("BPOfferService " + this +
-          " interrupted while " + stateString);
+      LOG.info("BPOfferService " + this + " interrupted while " + stateString);
     }
     }
   }
   }
 
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java

@@ -106,15 +106,15 @@ class BlockPoolManager {
     }
     }
   }
   }
   
   
-  void shutDownAll() throws InterruptedException {
-    BPOfferService[] bposArray = this.getAllNamenodeThreads();
-    
-    for (BPOfferService bpos : bposArray) {
-      bpos.stop(); //interrupts the threads
-    }
-    //now join
-    for (BPOfferService bpos : bposArray) {
-      bpos.join();
+  void shutDownAll(BPOfferService[] bposArray) throws InterruptedException {
+    if (bposArray != null) {
+      for (BPOfferService bpos : bposArray) {
+        bpos.stop(); //interrupts the threads
+      }
+      //now join
+      for (BPOfferService bpos : bposArray) {
+        bpos.join();
+      }
     }
     }
   }
   }
   
   

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -154,7 +154,7 @@ class BlockPoolSliceScanner {
     }
     }
     this.scanPeriod = hours * 3600 * 1000;
     this.scanPeriod = hours * 3600 * 1000;
     LOG.info("Periodic Block Verification Scanner initialized with interval "
     LOG.info("Periodic Block Verification Scanner initialized with interval "
-        + hours + " hours for block pool " + bpid + ".");
+        + hours + " hours for block pool " + bpid);
 
 
     // get the list of blocks and arrange them in random order
     // get the list of blocks and arrange them in random order
     List<Block> arr = dataset.getFinalizedBlocks(blockPoolId);
     List<Block> arr = dataset.getFinalizedBlocks(blockPoolId);
@@ -310,12 +310,12 @@ class BlockPoolSliceScanner {
   }
   }
   
   
   private void handleScanFailure(ExtendedBlock block) {
   private void handleScanFailure(ExtendedBlock block) {
-    LOG.info("Reporting bad block " + block);
+    LOG.info("Reporting bad " + block);
     try {
     try {
       datanode.reportBadBlocks(block);
       datanode.reportBadBlocks(block);
     } catch (IOException ie) {
     } catch (IOException ie) {
       // it is bad, but not bad enough to shutdown the scanner
       // it is bad, but not bad enough to shutdown the scanner
-      LOG.warn("Cannot report bad block=" + block.getBlockId());
+      LOG.warn("Cannot report bad " + block.getBlockId());
     }
     }
   }
   }
   
   
@@ -411,7 +411,7 @@ class BlockPoolSliceScanner {
 
 
         // If the block does not exists anymore, then its not an error
         // If the block does not exists anymore, then its not an error
         if (!dataset.contains(block)) {
         if (!dataset.contains(block)) {
-          LOG.info(block + " is no longer in the dataset.");
+          LOG.info(block + " is no longer in the dataset");
           deleteBlock(block.getLocalBlock());
           deleteBlock(block.getLocalBlock());
           return;
           return;
         }
         }
@@ -424,7 +424,7 @@ class BlockPoolSliceScanner {
         // is a block really deleted by mistake, DirectoryScan should catch it.
         // is a block really deleted by mistake, DirectoryScan should catch it.
         if (e instanceof FileNotFoundException ) {
         if (e instanceof FileNotFoundException ) {
           LOG.info("Verification failed for " + block +
           LOG.info("Verification failed for " + block +
-              ". It may be due to race with write.");
+              " - may be due to race with write");
           deleteBlock(block.getLocalBlock());
           deleteBlock(block.getLocalBlock());
           return;
           return;
         }
         }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -332,7 +332,7 @@ public class BlockPoolSliceStorage extends Storage {
     // 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
     // 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
     rename(bpTmpDir, bpPrevDir);
     rename(bpTmpDir, bpPrevDir);
     LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
     LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
-        + " is complete.");
+        + " is complete");
   }
   }
 
 
   /**
   /**
@@ -409,7 +409,7 @@ public class BlockPoolSliceStorage extends Storage {
     
     
     // 3. delete removed.tmp dir
     // 3. delete removed.tmp dir
     deleteDir(tmpDir);
     deleteDir(tmpDir);
-    LOG.info("Rollback of " + bpSd.getRoot() + " is complete.");
+    LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
   }
   }
 
 
   /*
   /*

+ 35 - 27
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -319,9 +319,6 @@ class BlockReceiver implements Closeable {
    * @throws IOException
    * @throws IOException
    */
    */
   void flushOrSync(boolean isSync) throws IOException {
   void flushOrSync(boolean isSync) throws IOException {
-    if (isSync && (out != null || checksumOut != null)) {
-      datanode.metrics.incrFsyncCount();      
-    }
     long flushTotalNanos = 0;
     long flushTotalNanos = 0;
     if (checksumOut != null) {
     if (checksumOut != null) {
       long flushStartNanos = System.nanoTime();
       long flushStartNanos = System.nanoTime();
@@ -347,6 +344,9 @@ class BlockReceiver implements Closeable {
     }
     }
     if (checksumOut != null || out != null) {
     if (checksumOut != null || out != null) {
       datanode.metrics.addFlushNanos(flushTotalNanos);
       datanode.metrics.addFlushNanos(flushTotalNanos);
+      if (isSync) {
+    	  datanode.metrics.incrFsyncCount();      
+      }
     }
     }
   }
   }
 
 
@@ -357,7 +357,7 @@ class BlockReceiver implements Closeable {
   private void handleMirrorOutError(IOException ioe) throws IOException {
   private void handleMirrorOutError(IOException ioe) throws IOException {
     String bpid = block.getBlockPoolId();
     String bpid = block.getBlockPoolId();
     LOG.info(datanode.getDNRegistrationForBP(bpid)
     LOG.info(datanode.getDNRegistrationForBP(bpid)
-        + ":Exception writing block " + block + " to mirror " + mirrorAddr, ioe);
+        + ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
     if (Thread.interrupted()) { // shut down if the thread is interrupted
     if (Thread.interrupted()) { // shut down if the thread is interrupted
       throw ioe;
       throw ioe;
     } else { // encounter an error while writing to mirror
     } else { // encounter an error while writing to mirror
@@ -379,16 +379,16 @@ class BlockReceiver implements Closeable {
       LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
       LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
       if (srcDataNode != null) {
       if (srcDataNode != null) {
         try {
         try {
-          LOG.info("report corrupt block " + block + " from datanode " +
+          LOG.info("report corrupt " + block + " from datanode " +
                     srcDataNode + " to namenode");
                     srcDataNode + " to namenode");
           datanode.reportRemoteBadBlock(srcDataNode, block);
           datanode.reportRemoteBadBlock(srcDataNode, block);
         } catch (IOException e) {
         } catch (IOException e) {
-          LOG.warn("Failed to report bad block " + block + 
+          LOG.warn("Failed to report bad " + block + 
                     " from datanode " + srcDataNode + " to namenode");
                     " from datanode " + srcDataNode + " to namenode");
         }
         }
       }
       }
-      throw new IOException("Unexpected checksum mismatch " + 
-                            "while writing " + block + " from " + inAddr);
+      throw new IOException("Unexpected checksum mismatch while writing "
+          + block + " from " + inAddr);
     }
     }
   }
   }
   
   
@@ -438,8 +438,10 @@ class BlockReceiver implements Closeable {
     int len = header.getDataLen();
     int len = header.getDataLen();
     boolean syncBlock = header.getSyncBlock();
     boolean syncBlock = header.getSyncBlock();
 
 
-    // make sure the block gets sync'ed upon close
-    this.syncOnClose |= syncBlock && lastPacketInBlock;
+    // avoid double sync'ing on close
+    if (syncBlock && lastPacketInBlock) {
+      this.syncOnClose = false;
+    }
 
 
     // update received bytes
     // update received bytes
     long firstByteInBlock = offsetInBlock;
     long firstByteInBlock = offsetInBlock;
@@ -448,11 +450,11 @@ class BlockReceiver implements Closeable {
       replicaInfo.setNumBytes(offsetInBlock);
       replicaInfo.setNumBytes(offsetInBlock);
     }
     }
     
     
-    // put in queue for pending acks
-    if (responder != null) {
-      ((PacketResponder)responder.getRunnable()).enqueue(seqno,
-                                      lastPacketInBlock, offsetInBlock); 
-    }  
+    // put in queue for pending acks, unless sync was requested
+    if (responder != null && !syncBlock) {
+      ((PacketResponder) responder.getRunnable()).enqueue(seqno,
+          lastPacketInBlock, offsetInBlock);
+    }
 
 
     //First write the packet to the mirror:
     //First write the packet to the mirror:
     if (mirrorOut != null && !mirrorError) {
     if (mirrorOut != null && !mirrorError) {
@@ -471,8 +473,8 @@ class BlockReceiver implements Closeable {
       if(LOG.isDebugEnabled()) {
       if(LOG.isDebugEnabled()) {
         LOG.debug("Receiving an empty packet or the end of the block " + block);
         LOG.debug("Receiving an empty packet or the end of the block " + block);
       }
       }
-      // flush unless close() would flush anyway
-      if (syncBlock && !lastPacketInBlock) {
+      // sync block if requested
+      if (syncBlock) {
         flushOrSync(true);
         flushOrSync(true);
       }
       }
     } else {
     } else {
@@ -518,7 +520,7 @@ class BlockReceiver implements Closeable {
           // If this is a partial chunk, then read in pre-existing checksum
           // If this is a partial chunk, then read in pre-existing checksum
           if (firstByteInBlock % bytesPerChecksum != 0) {
           if (firstByteInBlock % bytesPerChecksum != 0) {
             LOG.info("Packet starts at " + firstByteInBlock +
             LOG.info("Packet starts at " + firstByteInBlock +
-                     " for block " + block +
+                     " for " + block +
                      " which is not a multiple of bytesPerChecksum " +
                      " which is not a multiple of bytesPerChecksum " +
                      bytesPerChecksum);
                      bytesPerChecksum);
             long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
             long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
@@ -563,8 +565,8 @@ class BlockReceiver implements Closeable {
                 checksumBuf.arrayOffset() + checksumBuf.position(),
                 checksumBuf.arrayOffset() + checksumBuf.position(),
                 checksumLen);
                 checksumLen);
           }
           }
-          /// flush entire packet, sync unless close() will sync
-          flushOrSync(syncBlock && !lastPacketInBlock);
+          /// flush entire packet, sync if requested
+          flushOrSync(syncBlock);
           
           
           replicaInfo.setLastChecksumAndDataLen(
           replicaInfo.setLastChecksumAndDataLen(
             offsetInBlock, lastChunkChecksum
             offsetInBlock, lastChunkChecksum
@@ -580,6 +582,13 @@ class BlockReceiver implements Closeable {
       }
       }
     }
     }
 
 
+    // if sync was requested, put in queue for pending acks here
+    // (after the fsync finished)
+    if (responder != null && syncBlock) {
+      ((PacketResponder) responder.getRunnable()).enqueue(seqno,
+          lastPacketInBlock, offsetInBlock);
+    }
+
     if (throttler != null) { // throttle I/O
     if (throttler != null) { // throttle I/O
       throttler.throttle(len);
       throttler.throttle(len);
     }
     }
@@ -662,7 +671,7 @@ class BlockReceiver implements Closeable {
       }
       }
 
 
     } catch (IOException ioe) {
     } catch (IOException ioe) {
-      LOG.info("Exception in receiveBlock for " + block, ioe);
+      LOG.info("Exception for " + block, ioe);
       throw ioe;
       throw ioe;
     } finally {
     } finally {
       if (!responderClosed) { // Abnormal termination of the flow above
       if (!responderClosed) { // Abnormal termination of the flow above
@@ -733,10 +742,9 @@ class BlockReceiver implements Closeable {
     int checksumSize = diskChecksum.getChecksumSize();
     int checksumSize = diskChecksum.getChecksumSize();
     blkoff = blkoff - sizePartialChunk;
     blkoff = blkoff - sizePartialChunk;
     LOG.info("computePartialChunkCrc sizePartialChunk " + 
     LOG.info("computePartialChunkCrc sizePartialChunk " + 
-              sizePartialChunk +
-              " block " + block +
-              " offset in block " + blkoff +
-              " offset in metafile " + ckoff);
+              sizePartialChunk + " " + block +
+              " block offset " + blkoff +
+              " metafile offset " + ckoff);
 
 
     // create an input stream from the block file
     // create an input stream from the block file
     // and read in partial crc chunk into temporary buffer
     // and read in partial crc chunk into temporary buffer
@@ -758,7 +766,7 @@ class BlockReceiver implements Closeable {
     partialCrc = DataChecksum.newDataChecksum(
     partialCrc = DataChecksum.newDataChecksum(
         diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
         diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
     partialCrc.update(buf, 0, sizePartialChunk);
     partialCrc.update(buf, 0, sizePartialChunk);
-    LOG.info("Read in partial CRC chunk from disk for block " + block);
+    LOG.info("Read in partial CRC chunk from disk for " + block);
 
 
     // paranoia! verify that the pre-computed crc matches what we
     // paranoia! verify that the pre-computed crc matches what we
     // recalculated just now
     // recalculated just now
@@ -973,7 +981,7 @@ class BlockReceiver implements Closeable {
                       "HDFS_WRITE", clientname, offset,
                       "HDFS_WRITE", clientname, offset,
                       dnR.getStorageID(), block, endTime-startTime));
                       dnR.getStorageID(), block, endTime-startTime));
               } else {
               } else {
-                LOG.info("Received block " + block + " of size "
+                LOG.info("Received " + block + " size "
                     + block.getNumBytes() + " from " + inAddr);
                     + block.getNumBytes() + " from " + inAddr);
               }
               }
             }
             }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -503,7 +503,7 @@ class BlockSender implements java.io.Closeable {
          * part of a block and then decides not to read the rest (but leaves
          * part of a block and then decides not to read the rest (but leaves
          * the socket open).
          * the socket open).
          */
          */
-          LOG.info("BlockSender.sendChunks() exception: ", e);
+          LOG.info("exception: ", e);
       } else {
       } else {
         /* Exception while writing to the client. Connection closure from
         /* Exception while writing to the client. Connection closure from
          * the other end is mostly the case and we do not care much about
          * the other end is mostly the case and we do not care much about

+ 11 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -481,8 +481,7 @@ public class DataNode extends Configured
       blockScanner = new DataBlockScanner(this, data, conf);
       blockScanner = new DataBlockScanner(this, data, conf);
       blockScanner.start();
       blockScanner.start();
     } else {
     } else {
-      LOG.info("Periodic Block Verification scan is disabled because " +
-               reason + ".");
+      LOG.info("Periodic Block Verification scan disabled because " + reason);
     }
     }
   }
   }
   
   
@@ -511,7 +510,7 @@ public class DataNode extends Configured
       directoryScanner.start();
       directoryScanner.start();
     } else {
     } else {
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
       LOG.info("Periodic Directory Tree Verification scan is disabled because " +
-               reason + ".");
+               reason);
     }
     }
   }
   }
   
   
@@ -1095,6 +1094,12 @@ public class DataNode extends Configured
       }
       }
     }
     }
     
     
+    // We need to make a copy of the original blockPoolManager#offerServices to
+    // make sure blockPoolManager#shutDownAll() can still access all the 
+    // BPOfferServices, since after setting DataNode#shouldRun to false the 
+    // offerServices may be modified.
+    BPOfferService[] bposArray = this.blockPoolManager == null ? null
+        : this.blockPoolManager.getAllNamenodeThreads();
     this.shouldRun = false;
     this.shouldRun = false;
     shutdownPeriodicScanners();
     shutdownPeriodicScanners();
     
     
@@ -1141,7 +1146,7 @@ public class DataNode extends Configured
     
     
     if(blockPoolManager != null) {
     if(blockPoolManager != null) {
       try {
       try {
-        this.blockPoolManager.shutDownAll();
+        this.blockPoolManager.shutDownAll(bposArray);
       } catch (InterruptedException ie) {
       } catch (InterruptedException ie) {
         LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
         LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
       }
       }
@@ -1256,7 +1261,7 @@ public class DataNode extends Configured
           xfersBuilder.append(xferTargets[i]);
           xfersBuilder.append(xferTargets[i]);
           xfersBuilder.append(" ");
           xfersBuilder.append(" ");
         }
         }
-        LOG.info(bpReg + " Starting thread to transfer block " + 
+        LOG.info(bpReg + " Starting thread to transfer " + 
                  block + " to " + xfersBuilder);                       
                  block + " to " + xfersBuilder);                       
       }
       }
 
 
@@ -2043,7 +2048,7 @@ public class DataNode extends Configured
     ExtendedBlock block = rb.getBlock();
     ExtendedBlock block = rb.getBlock();
     DatanodeInfo[] targets = rb.getLocations();
     DatanodeInfo[] targets = rb.getLocations();
     
     
-    LOG.info(who + " calls recoverBlock(block=" + block
+    LOG.info(who + " calls recoverBlock(" + block
         + ", targets=[" + Joiner.on(", ").join(targets) + "]"
         + ", targets=[" + Joiner.on(", ").join(targets) + "]"
         + ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
         + ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
   }
   }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -155,11 +155,11 @@ public class DataStorage extends Storage {
           break;
           break;
         case NON_EXISTENT:
         case NON_EXISTENT:
           // ignore this storage
           // ignore this storage
-          LOG.info("Storage directory " + dataDir + " does not exist.");
+          LOG.info("Storage directory " + dataDir + " does not exist");
           it.remove();
           it.remove();
           continue;
           continue;
         case NOT_FORMATTED: // format
         case NOT_FORMATTED: // format
-          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Storage directory " + dataDir + " is not formatted");
           LOG.info("Formatting ...");
           LOG.info("Formatting ...");
           format(sd, nsInfo);
           format(sd, nsInfo);
           break;
           break;
@@ -482,7 +482,7 @@ public class DataStorage extends Storage {
     
     
     // 5. Rename <SD>/previous.tmp to <SD>/previous
     // 5. Rename <SD>/previous.tmp to <SD>/previous
     rename(tmpDir, prevDir);
     rename(tmpDir, prevDir);
-    LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
+    LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
     addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
     addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
   }
   }
 
 
@@ -556,7 +556,7 @@ public class DataStorage extends Storage {
     rename(prevDir, curDir);
     rename(prevDir, curDir);
     // delete tmp dir
     // delete tmp dir
     deleteDir(tmpDir);
     deleteDir(tmpDir);
-    LOG.info("Rollback of " + sd.getRoot() + " is complete.");
+    LOG.info("Rollback of " + sd.getRoot() + " is complete");
   }
   }
   
   
   /**
   /**
@@ -596,9 +596,9 @@ public class DataStorage extends Storage {
               deleteDir(bbwDir);
               deleteDir(bbwDir);
             }
             }
           } catch(IOException ex) {
           } catch(IOException ex) {
-            LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+            LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
           }
           }
-          LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+          LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
         }
         }
         @Override
         @Override
         public String toString() { return "Finalize " + dataDirPath; }
         public String toString() { return "Finalize " + dataDirPath; }

+ 9 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -170,7 +170,7 @@ class DataXceiver extends Receiver implements Runnable {
         } catch (InvalidMagicNumberException imne) {
         } catch (InvalidMagicNumberException imne) {
           LOG.info("Failed to read expected encryption handshake from client " +
           LOG.info("Failed to read expected encryption handshake from client " +
               "at " + s.getInetAddress() + ". Perhaps the client is running an " +
               "at " + s.getInetAddress() + ". Perhaps the client is running an " +
-              "older version of Hadoop which does not support encryption.");
+              "older version of Hadoop which does not support encryption");
           return;
           return;
         }
         }
         input = encryptedStreams.in;
         input = encryptedStreams.in;
@@ -367,9 +367,8 @@ class DataXceiver extends Receiver implements Runnable {
     // make a copy here.
     // make a copy here.
     final ExtendedBlock originalBlock = new ExtendedBlock(block);
     final ExtendedBlock originalBlock = new ExtendedBlock(block);
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
-    LOG.info("Receiving block " + block + 
-             " src: " + remoteAddress +
-             " dest: " + localAddress);
+    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
+        + localAddress);
 
 
     // reply to upstream datanode or client 
     // reply to upstream datanode or client 
     final DataOutputStream replyOut = new DataOutputStream(
     final DataOutputStream replyOut = new DataOutputStream(
@@ -478,9 +477,9 @@ class DataXceiver extends Receiver implements Runnable {
                       block + " to mirror " + mirrorNode + ": " + e);
                       block + " to mirror " + mirrorNode + ": " + e);
             throw e;
             throw e;
           } else {
           } else {
-            LOG.info(datanode + ":Exception transfering block " +
+            LOG.info(datanode + ":Exception transfering " +
                      block + " to mirror " + mirrorNode +
                      block + " to mirror " + mirrorNode +
-                     ". continuing without the mirror.", e);
+                     "- continuing without the mirror", e);
           }
           }
         }
         }
       }
       }
@@ -528,10 +527,8 @@ class DataXceiver extends Receiver implements Runnable {
       if (isDatanode ||
       if (isDatanode ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
         datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
-        LOG.info("Received block " + block + 
-                 " src: " + remoteAddress +
-                 " dest: " + localAddress +
-                 " of size " + block.getNumBytes());
+        LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
+            + localAddress + " of size " + block.getNumBytes());
       }
       }
 
 
       
       
@@ -674,7 +671,7 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBytesRead((int) read);
       datanode.metrics.incrBytesRead((int) read);
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrBlocksRead();
       
       
-      LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
+      LOG.info("Copied " + block + " to " + s.getRemoteSocketAddress());
     } catch (IOException ioe) {
     } catch (IOException ioe) {
       isOpSuccess = false;
       isOpSuccess = false;
       LOG.info("opCopyBlock " + block + " received exception " + ioe);
       LOG.info("opCopyBlock " + block + " received exception " + ioe);
@@ -797,8 +794,7 @@ class DataXceiver extends Receiver implements Runnable {
       // notify name node
       // notify name node
       datanode.notifyNamenodeReceivedBlock(block, delHint);
       datanode.notifyNamenodeReceivedBlock(block, delHint);
 
 
-      LOG.info("Moved block " + block + 
-          " from " + s.getRemoteSocketAddress());
+      LOG.info("Moved " + block + " from " + s.getRemoteSocketAddress());
       
       
     } catch (IOException ioe) {
     } catch (IOException ioe) {
       opStatus = ERROR;
       opStatus = ERROR;

+ 23 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java

@@ -38,6 +38,8 @@ import org.mortbay.jetty.security.SslSocketConnector;
 
 
 import javax.net.ssl.SSLServerSocketFactory;
 import javax.net.ssl.SSLServerSocketFactory;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
 /**
  * Utility class to start a datanode in a secure cluster, first obtaining 
  * Utility class to start a datanode in a secure cluster, first obtaining 
  * privileged resources before main startup and handing them to the datanode.
  * privileged resources before main startup and handing them to the datanode.
@@ -73,6 +75,25 @@ public class SecureDataNodeStarter implements Daemon {
     // Stash command-line arguments for regular datanode
     // Stash command-line arguments for regular datanode
     args = context.getArguments();
     args = context.getArguments();
     
     
+    sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+    resources = getSecureResources(sslFactory, conf);
+  }
+
+  @Override
+  public void start() throws Exception {
+    System.err.println("Starting regular datanode initialization");
+    DataNode.secureMain(args, resources);
+  }
+  
+  @Override public void destroy() {
+    sslFactory.destroy();
+  }
+
+  @Override public void stop() throws Exception { /* Nothing to do */ }
+
+  @VisibleForTesting
+  public static SecureResources getSecureResources(final SSLFactory sslFactory,
+                                  Configuration conf) throws Exception {
     // Obtain secure port for data streaming to datanode
     // Obtain secure port for data streaming to datanode
     InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
     InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
     int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
     int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
@@ -85,13 +106,12 @@ public class SecureDataNodeStarter implements Daemon {
     // Check that we got the port we need
     // Check that we got the port we need
     if (ss.getLocalPort() != streamingAddr.getPort()) {
     if (ss.getLocalPort() != streamingAddr.getPort()) {
       throw new RuntimeException("Unable to bind on specified streaming port in secure " +
       throw new RuntimeException("Unable to bind on specified streaming port in secure " +
-      		"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+          "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
     }
     }
 
 
     // Obtain secure listener for web server
     // Obtain secure listener for web server
     Connector listener;
     Connector listener;
     if (HttpConfig.isSecure()) {
     if (HttpConfig.isSecure()) {
-      sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
       try {
       try {
         sslFactory.init();
         sslFactory.init();
       } catch (GeneralSecurityException ex) {
       } catch (GeneralSecurityException ex) {
@@ -126,18 +146,7 @@ public class SecureDataNodeStarter implements Daemon {
     }
     }
     System.err.println("Opened streaming server at " + streamingAddr);
     System.err.println("Opened streaming server at " + streamingAddr);
     System.err.println("Opened info server at " + infoSocAddr);
     System.err.println("Opened info server at " + infoSocAddr);
-    resources = new SecureResources(ss, listener);
+    return new SecureResources(ss, listener);
   }
   }
 
 
-  @Override
-  public void start() throws Exception {
-    System.err.println("Starting regular datanode initialization");
-    DataNode.secureMain(args, resources);
-  }
-  
-  @Override public void destroy() {
-    sslFactory.destroy();
-  }
-
-  @Override public void stop() throws Exception { /* Nothing to do */ }
 }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java

@@ -136,7 +136,7 @@ class FsDatasetAsyncDiskService {
     if (executors == null) {
     if (executors == null) {
       LOG.warn("AsyncDiskService has already shut down.");
       LOG.warn("AsyncDiskService has already shut down.");
     } else {
     } else {
-      LOG.info("Shutting down all async disk service threads...");
+      LOG.info("Shutting down all async disk service threads");
       
       
       for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
       for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
         e.getValue().shutdown();
         e.getValue().shutdown();
@@ -144,7 +144,7 @@ class FsDatasetAsyncDiskService {
       // clear the executor map so that calling execute again will fail.
       // clear the executor map so that calling execute again will fail.
       executors = null;
       executors = null;
       
       
-      LOG.info("All async disk service threads have been shut down.");
+      LOG.info("All async disk service threads have been shut down");
     }
     }
   }
   }
 
 
@@ -154,7 +154,7 @@ class FsDatasetAsyncDiskService {
    */
    */
   void deleteAsync(FsVolumeImpl volume, File blockFile, File metaFile,
   void deleteAsync(FsVolumeImpl volume, File blockFile, File metaFile,
       ExtendedBlock block) {
       ExtendedBlock block) {
-    LOG.info("Scheduling block " + block.getLocalBlock()
+    LOG.info("Scheduling " + block.getLocalBlock()
         + " file " + blockFile + " for deletion");
         + " file " + blockFile + " for deletion");
     ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
     ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
         volume, blockFile, metaFile, block);
         volume, blockFile, metaFile, block);
@@ -198,8 +198,8 @@ class FsDatasetAsyncDiskService {
           datanode.notifyNamenodeDeletedBlock(block);
           datanode.notifyNamenodeDeletedBlock(block);
         }
         }
         volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
         volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
-        LOG.info("Deleted block " + block.getBlockPoolId() + " "
-            + block.getLocalBlock() + " at file " + blockFile);
+        LOG.info("Deleted " + block.getBlockPoolId() + " "
+            + block.getLocalBlock() + " file " + blockFile);
       }
       }
     }
     }
   }
   }

+ 8 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -425,7 +425,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       return;
       return;
     }
     }
     if (newlen > oldlen) {
     if (newlen > oldlen) {
-      throw new IOException("Cannout truncate block to from oldlen (=" + oldlen
+      throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
           + ") to newlen (=" + newlen + ")");
           + ") to newlen (=" + newlen + ")");
     }
     }
 
 
@@ -481,7 +481,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           " should be greater than the replica " + b + "'s generation stamp");
           " should be greater than the replica " + b + "'s generation stamp");
     }
     }
     ReplicaInfo replicaInfo = getReplicaInfo(b);
     ReplicaInfo replicaInfo = getReplicaInfo(b);
-    LOG.info("Appending to replica " + replicaInfo);
+    LOG.info("Appending to " + replicaInfo);
     if (replicaInfo.getState() != ReplicaState.FINALIZED) {
     if (replicaInfo.getState() != ReplicaState.FINALIZED) {
       throw new ReplicaNotFoundException(
       throw new ReplicaNotFoundException(
           ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
           ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
@@ -689,7 +689,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b,
   public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b,
       long newGS, long minBytesRcvd, long maxBytesRcvd)
       long newGS, long minBytesRcvd, long maxBytesRcvd)
       throws IOException {
       throws IOException {
-    LOG.info("Recover the RBW replica " + b);
+    LOG.info("Recover RBW replica " + b);
 
 
     ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
     ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
     
     
@@ -700,7 +700,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
     }
     ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
     ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
     
     
-    LOG.info("Recovering replica " + rbw);
+    LOG.info("Recovering " + rbw);
 
 
     // Stop the previous writer
     // Stop the previous writer
     rbw.stopWriter();
     rbw.stopWriter();
@@ -736,8 +736,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     final long blockId = b.getBlockId();
     final long blockId = b.getBlockId();
     final long expectedGs = b.getGenerationStamp();
     final long expectedGs = b.getGenerationStamp();
     final long visible = b.getNumBytes();
     final long visible = b.getNumBytes();
-    LOG.info("Convert replica " + b
-        + " from Temporary to RBW, visible length=" + visible);
+    LOG.info("Convert " + b + " from Temporary to RBW, visible length="
+        + visible);
 
 
     final ReplicaInPipeline temp;
     final ReplicaInPipeline temp;
     {
     {
@@ -1415,8 +1415,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   static ReplicaRecoveryInfo initReplicaRecovery(String bpid,
   static ReplicaRecoveryInfo initReplicaRecovery(String bpid,
       ReplicaMap map, Block block, long recoveryId) throws IOException {
       ReplicaMap map, Block block, long recoveryId) throws IOException {
     final ReplicaInfo replica = map.get(bpid, block.getBlockId());
     final ReplicaInfo replica = map.get(bpid, block.getBlockId());
-    LOG.info("initReplicaRecovery: block=" + block
-        + ", recoveryId=" + recoveryId
+    LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
         + ", replica=" + replica);
         + ", replica=" + replica);
 
 
     //check replica
     //check replica
@@ -1485,7 +1484,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     //get replica
     //get replica
     final String bpid = oldBlock.getBlockPoolId();
     final String bpid = oldBlock.getBlockPoolId();
     final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
     final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
-    LOG.info("updateReplica: block=" + oldBlock
+    LOG.info("updateReplica: " + oldBlock
         + ", recoveryId=" + recoveryId
         + ", recoveryId=" + recoveryId
         + ", length=" + newlength
         + ", length=" + newlength
         + ", replica=" + replica);
         + ", replica=" + replica);

+ 2 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -78,10 +78,6 @@ public class BackupNode extends NameNode {
   String nnHttpAddress;
   String nnHttpAddress;
   /** Checkpoint manager */
   /** Checkpoint manager */
   Checkpointer checkpointManager;
   Checkpointer checkpointManager;
-  /** ClusterID to which BackupNode belongs to */
-  String clusterId;
-  /** Block pool Id of the peer namenode of this BackupNode */
-  String blockPoolId;
   
   
   BackupNode(Configuration conf, NamenodeRole role) throws IOException {
   BackupNode(Configuration conf, NamenodeRole role) throws IOException {
     super(conf, role);
     super(conf, role);
@@ -145,6 +141,7 @@ public class BackupNode extends NameNode {
                  CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
                  CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
     NamespaceInfo nsInfo = handshake(conf);
     NamespaceInfo nsInfo = handshake(conf);
     super.initialize(conf);
     super.initialize(conf);
+    namesystem.setBlockPoolId(nsInfo.getBlockPoolID());
 
 
     if (false == namesystem.isInSafeMode()) {
     if (false == namesystem.isInSafeMode()) {
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -154,9 +151,6 @@ public class BackupNode extends NameNode {
     // therefore lease hard limit should never expire.
     // therefore lease hard limit should never expire.
     namesystem.leaseManager.setLeasePeriod(
     namesystem.leaseManager.setLeasePeriod(
         HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
         HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
-    
-    clusterId = nsInfo.getClusterID();
-    blockPoolId = nsInfo.getBlockPoolID();
 
 
     // register with the active name-node 
     // register with the active name-node 
     registerWith(nsInfo);
     registerWith(nsInfo);
@@ -219,7 +213,7 @@ public class BackupNode extends NameNode {
   }
   }
   
   
   /* @Override */// NameNode
   /* @Override */// NameNode
-  public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action)
+  public boolean setSafeMode(SafeModeAction action)
       throws IOException {
       throws IOException {
     throw new UnsupportedActionException("setSafeMode");
     throw new UnsupportedActionException("setSafeMode");
   }
   }
@@ -415,14 +409,6 @@ public class BackupNode extends NameNode {
     return nsInfo;
     return nsInfo;
   }
   }
   
   
-  String getBlockPoolId() {
-    return blockPoolId;
-  }
-  
-  String getClusterId() {
-    return clusterId;
-  }
-  
   @Override
   @Override
   protected NameNodeHAContext createHAContext() {
   protected NameNodeHAContext createHAContext() {
     return new BNHAContext();
     return new BNHAContext();

+ 142 - 196
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -57,6 +57,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.ByteArray;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -140,7 +142,7 @@ public class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
         DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
         DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
     NameNode.LOG.info("Caching file names occuring more than " + threshold
     NameNode.LOG.info("Caching file names occuring more than " + threshold
-        + " times ");
+        + " times");
     nameCache = new NameCache<ByteArray>(threshold);
     nameCache = new NameCache<ByteArray>(threshold);
     namesystem = ns;
     namesystem = ns;
   }
   }
@@ -253,15 +255,12 @@ public class FSDirectory implements Closeable {
       writeUnlock();
       writeUnlock();
     }
     }
     if (newNode == null) {
     if (newNode == null) {
-      NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
-                                   +"failed to add "+path
-                                   +" to the file system");
+      NameNode.stateChangeLog.info("DIR* addFile: failed to add " + path);
       return null;
       return null;
     }
     }
 
 
     if(NameNode.stateChangeLog.isDebugEnabled()) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-          +path+" is added to the file system");
+      NameNode.stateChangeLog.debug("DIR* addFile: " + path + " is added");
     }
     }
     return newNode;
     return newNode;
   }
   }
@@ -315,7 +314,7 @@ public class FSDirectory implements Closeable {
       }
       }
       if(newParent == null)
       if(newParent == null)
         return null;
         return null;
-      if(!newNode.isDirectory() && !newNode.isLink()) {
+      if(!newNode.isDirectory() && !newNode.isSymlink()) {
         // Add file->block mapping
         // Add file->block mapping
         INodeFile newF = (INodeFile)newNode;
         INodeFile newF = (INodeFile)newNode;
         BlockInfo[] blocks = newF.getBlocks();
         BlockInfo[] blocks = newF.getBlocks();
@@ -332,22 +331,18 @@ public class FSDirectory implements Closeable {
   /**
   /**
    * Add a block to the file. Returns a reference to the added block.
    * Add a block to the file. Returns a reference to the added block.
    */
    */
-  BlockInfo addBlock(String path,
-                     INode[] inodes,
-                     Block block,
-                     DatanodeDescriptor targets[]
-  ) throws QuotaExceededException {
+  BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block,
+      DatanodeDescriptor targets[]) throws IOException {
     waitForReady();
     waitForReady();
 
 
     writeLock();
     writeLock();
     try {
     try {
-      assert inodes[inodes.length-1].isUnderConstruction() :
-        "INode should correspond to a file under construction";
-      INodeFileUnderConstruction fileINode = 
-        (INodeFileUnderConstruction)inodes[inodes.length-1];
+      final INode[] inodes = inodesInPath.getINodes();
+      final INodeFileUnderConstruction fileINode = 
+          INodeFileUnderConstruction.valueOf(inodes[inodes.length-1], path);
 
 
       // check quota limits and updated space consumed
       // check quota limits and updated space consumed
-      updateCount(inodes, inodes.length-1, 0,
+      updateCount(inodesInPath, inodes.length-1, 0,
           fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true);
           fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true);
 
 
       // associate new last block for the file
       // associate new last block for the file
@@ -443,8 +438,9 @@ public class FSDirectory implements Closeable {
     }
     }
 
 
     // update space consumed
     // update space consumed
-    INode[] pathINodes = getExistingPathINodes(path);
-    updateCount(pathINodes, pathINodes.length-1, 0,
+    final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, true);
+    final INode[] inodes = inodesInPath.getINodes();
+    updateCount(inodesInPath, inodes.length-1, 0,
         -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true);
         -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true);
   }
   }
 
 
@@ -512,7 +508,8 @@ public class FSDirectory implements Closeable {
     throws QuotaExceededException, UnresolvedLinkException, 
     throws QuotaExceededException, UnresolvedLinkException, 
     FileAlreadyExistsException {
     FileAlreadyExistsException {
     assert hasWriteLock();
     assert hasWriteLock();
-    INode[] srcInodes = rootDir.getExistingPathINodes(src, false);
+    INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false);
+    INode[] srcInodes = srcInodesInPath.getINodes();
     INode srcInode = srcInodes[srcInodes.length-1];
     INode srcInode = srcInodes[srcInodes.length-1];
     
     
     // check the validation of the source
     // check the validation of the source
@@ -535,7 +532,7 @@ public class FSDirectory implements Closeable {
     if (dst.equals(src)) {
     if (dst.equals(src)) {
       return true;
       return true;
     }
     }
-    if (srcInode.isLink() && 
+    if (srcInode.isSymlink() && 
         dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
         dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
       throw new FileAlreadyExistsException(
       throw new FileAlreadyExistsException(
           "Cannot rename symlink "+src+" to its target "+dst);
           "Cannot rename symlink "+src+" to its target "+dst);
@@ -551,8 +548,9 @@ public class FSDirectory implements Closeable {
     }
     }
     
     
     byte[][] dstComponents = INode.getPathComponents(dst);
     byte[][] dstComponents = INode.getPathComponents(dst);
-    INode[] dstInodes = new INode[dstComponents.length];
-    rootDir.getExistingPathINodes(dstComponents, dstInodes, false);
+    INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents,
+        dstComponents.length, false);
+    INode[] dstInodes = dstInodesInPath.getINodes();
     if (dstInodes[dstInodes.length-1] != null) {
     if (dstInodes[dstInodes.length-1] != null) {
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
                                    +"failed to rename "+src+" to "+dst+ 
                                    +"failed to rename "+src+" to "+dst+ 
@@ -567,14 +565,14 @@ public class FSDirectory implements Closeable {
     }
     }
     
     
     // Ensure dst has quota to accommodate rename
     // Ensure dst has quota to accommodate rename
-    verifyQuotaForRename(srcInodes,dstInodes);
+    verifyQuotaForRename(srcInodes, dstInodes);
     
     
     INode dstChild = null;
     INode dstChild = null;
     INode srcChild = null;
     INode srcChild = null;
     String srcChildName = null;
     String srcChildName = null;
     try {
     try {
       // remove src
       // remove src
-      srcChild = removeChild(srcInodes, srcInodes.length-1);
+      srcChild = removeChild(srcInodesInPath, srcInodes.length-1);
       if (srcChild == null) {
       if (srcChild == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
             + "failed to rename " + src + " to " + dst
             + "failed to rename " + src + " to " + dst
@@ -585,7 +583,7 @@ public class FSDirectory implements Closeable {
       srcChild.setLocalName(dstComponents[dstInodes.length-1]);
       srcChild.setLocalName(dstComponents[dstInodes.length-1]);
       
       
       // add src to the destination
       // add src to the destination
-      dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
+      dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length-1,
           srcChild, UNKNOWN_DISK_SPACE);
           srcChild, UNKNOWN_DISK_SPACE);
       if (dstChild != null) {
       if (dstChild != null) {
         srcChild = null;
         srcChild = null;
@@ -602,7 +600,7 @@ public class FSDirectory implements Closeable {
       if (dstChild == null && srcChild != null) {
       if (dstChild == null && srcChild != null) {
         // put it back
         // put it back
         srcChild.setLocalName(srcChildName);
         srcChild.setLocalName(srcChildName);
-        addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, 
+        addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, srcChild, 
             UNKNOWN_DISK_SPACE);
             UNKNOWN_DISK_SPACE);
       }
       }
     }
     }
@@ -635,7 +633,8 @@ public class FSDirectory implements Closeable {
       }
       }
     }
     }
     String error = null;
     String error = null;
-    final INode[] srcInodes = rootDir.getExistingPathINodes(src, false);
+    final INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false);
+    final INode[] srcInodes = srcInodesInPath.getINodes();
     final INode srcInode = srcInodes[srcInodes.length - 1];
     final INode srcInode = srcInodes[srcInodes.length - 1];
     // validate source
     // validate source
     if (srcInode == null) {
     if (srcInode == null) {
@@ -656,7 +655,7 @@ public class FSDirectory implements Closeable {
       throw new FileAlreadyExistsException(
       throw new FileAlreadyExistsException(
           "The source "+src+" and destination "+dst+" are the same");
           "The source "+src+" and destination "+dst+" are the same");
     }
     }
-    if (srcInode.isLink() && 
+    if (srcInode.isSymlink() && 
         dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
         dst.equals(((INodeSymlink)srcInode).getLinkValue())) {
       throw new FileAlreadyExistsException(
       throw new FileAlreadyExistsException(
           "Cannot rename symlink "+src+" to its target "+dst);
           "Cannot rename symlink "+src+" to its target "+dst);
@@ -671,8 +670,9 @@ public class FSDirectory implements Closeable {
       throw new IOException(error);
       throw new IOException(error);
     }
     }
     final byte[][] dstComponents = INode.getPathComponents(dst);
     final byte[][] dstComponents = INode.getPathComponents(dst);
-    final INode[] dstInodes = new INode[dstComponents.length];
-    rootDir.getExistingPathINodes(dstComponents, dstInodes, false);
+    INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents,
+        dstComponents.length, false);
+    final INode[] dstInodes = dstInodesInPath.getINodes();
     INode dstInode = dstInodes[dstInodes.length - 1];
     INode dstInode = dstInodes[dstInodes.length - 1];
     if (dstInodes.length == 1) {
     if (dstInodes.length == 1) {
       error = "rename destination cannot be the root";
       error = "rename destination cannot be the root";
@@ -696,7 +696,7 @@ public class FSDirectory implements Closeable {
         throw new FileAlreadyExistsException(error);
         throw new FileAlreadyExistsException(error);
       }
       }
       List<INode> children = dstInode.isDirectory() ? 
       List<INode> children = dstInode.isDirectory() ? 
-          ((INodeDirectory) dstInode).getChildrenRaw() : null;
+          ((INodeDirectory) dstInode).getChildren() : null;
       if (children != null && children.size() != 0) {
       if (children != null && children.size() != 0) {
         error = "rename cannot overwrite non empty destination directory "
         error = "rename cannot overwrite non empty destination directory "
             + dst;
             + dst;
@@ -720,7 +720,7 @@ public class FSDirectory implements Closeable {
 
 
     // Ensure dst has quota to accommodate rename
     // Ensure dst has quota to accommodate rename
     verifyQuotaForRename(srcInodes, dstInodes);
     verifyQuotaForRename(srcInodes, dstInodes);
-    INode removedSrc = removeChild(srcInodes, srcInodes.length - 1);
+    INode removedSrc = removeChild(srcInodesInPath, srcInodes.length - 1);
     if (removedSrc == null) {
     if (removedSrc == null) {
       error = "Failed to rename " + src + " to " + dst
       error = "Failed to rename " + src + " to " + dst
           + " because the source can not be removed";
           + " because the source can not be removed";
@@ -733,14 +733,14 @@ public class FSDirectory implements Closeable {
     INode removedDst = null;
     INode removedDst = null;
     try {
     try {
       if (dstInode != null) { // dst exists remove it
       if (dstInode != null) { // dst exists remove it
-        removedDst = removeChild(dstInodes, dstInodes.length - 1);
+        removedDst = removeChild(dstInodesInPath, dstInodes.length - 1);
         dstChildName = removedDst.getLocalName();
         dstChildName = removedDst.getLocalName();
       }
       }
 
 
       INode dstChild = null;
       INode dstChild = null;
       removedSrc.setLocalName(dstComponents[dstInodes.length - 1]);
       removedSrc.setLocalName(dstComponents[dstInodes.length - 1]);
       // add src as dst to complete rename
       // add src as dst to complete rename
-      dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
+      dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1,
           removedSrc, UNKNOWN_DISK_SPACE);
           removedSrc, UNKNOWN_DISK_SPACE);
 
 
       int filesDeleted = 0;
       int filesDeleted = 0;
@@ -758,7 +758,7 @@ public class FSDirectory implements Closeable {
         if (removedDst != null) {
         if (removedDst != null) {
           INode rmdst = removedDst;
           INode rmdst = removedDst;
           removedDst = null;
           removedDst = null;
-          List<Block> collectedBlocks = new ArrayList<Block>();
+          BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
           filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
           filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
           getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
           getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
         }
         }
@@ -768,13 +768,13 @@ public class FSDirectory implements Closeable {
       if (removedSrc != null) {
       if (removedSrc != null) {
         // Rename failed - restore src
         // Rename failed - restore src
         removedSrc.setLocalName(srcChildName);
         removedSrc.setLocalName(srcChildName);
-        addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, removedSrc, 
+        addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, removedSrc, 
             UNKNOWN_DISK_SPACE);
             UNKNOWN_DISK_SPACE);
       }
       }
       if (removedDst != null) {
       if (removedDst != null) {
         // Rename failed - restore dst
         // Rename failed - restore dst
         removedDst.setLocalName(dstChildName);
         removedDst.setLocalName(dstChildName);
-        addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, removedDst, 
+        addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1, removedDst, 
             UNKNOWN_DISK_SPACE);
             UNKNOWN_DISK_SPACE);
       }
       }
     }
     }
@@ -814,12 +814,13 @@ public class FSDirectory implements Closeable {
                                     UnresolvedLinkException {
                                     UnresolvedLinkException {
     assert hasWriteLock();
     assert hasWriteLock();
 
 
-    INode[] inodes = rootDir.getExistingPathINodes(src, true);
+    final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true);
+    final INode[] inodes = inodesInPath.getINodes();
     INode inode = inodes[inodes.length - 1];
     INode inode = inodes[inodes.length - 1];
     if (inode == null) {
     if (inode == null) {
       return null;
       return null;
     }
     }
-    assert !inode.isLink();
+    assert !inode.isSymlink();
     if (inode.isDirectory()) {
     if (inode.isDirectory()) {
       return null;
       return null;
     }
     }
@@ -828,7 +829,7 @@ public class FSDirectory implements Closeable {
 
 
     // check disk quota
     // check disk quota
     long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl);
     long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl);
-    updateCount(inodes, inodes.length-1, 0, dsDelta, true);
+    updateCount(inodesInPath, inodes.length-1, 0, dsDelta, true);
 
 
     fileNode.setReplication(replication);
     fileNode.setReplication(replication);
 
 
@@ -851,7 +852,7 @@ public class FSDirectory implements Closeable {
       if (inode == null) {
       if (inode == null) {
         throw new FileNotFoundException("File does not exist: " + filename);
         throw new FileNotFoundException("File does not exist: " + filename);
       }
       }
-      if (inode.isDirectory() || inode.isLink()) {
+      if (inode.isDirectory() || inode.isSymlink()) {
         throw new IOException("Getting block size of non-file: "+ filename); 
         throw new IOException("Getting block size of non-file: "+ filename); 
       }
       }
       return ((INodeFile)inode).getPreferredBlockSize();
       return ((INodeFile)inode).getPreferredBlockSize();
@@ -868,7 +869,7 @@ public class FSDirectory implements Closeable {
       if (inode == null) {
       if (inode == null) {
          return false;
          return false;
       }
       }
-      return inode.isDirectory() || inode.isLink() 
+      return inode.isDirectory() || inode.isSymlink() 
         ? true 
         ? true 
         : ((INodeFile)inode).getBlocks() != null;
         : ((INodeFile)inode).getBlocks() != null;
     } finally {
     } finally {
@@ -958,7 +959,8 @@ public class FSDirectory implements Closeable {
     }
     }
     // do the move
     // do the move
     
     
-    INode [] trgINodes =  getExistingPathINodes(target);
+    final INodesInPath trgINodesInPath = rootDir.getExistingPathINodes(target, true);
+    final INode[] trgINodes = trgINodesInPath.getINodes();
     INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1];
     INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1];
     INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
     INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
     
     
@@ -966,9 +968,9 @@ public class FSDirectory implements Closeable {
     int i = 0;
     int i = 0;
     int totalBlocks = 0;
     int totalBlocks = 0;
     for(String src : srcs) {
     for(String src : srcs) {
-      INodeFile srcInode = getFileINode(src);
+      INodeFile srcInode = (INodeFile)getINode(src);
       allSrcInodes[i++] = srcInode;
       allSrcInodes[i++] = srcInode;
-      totalBlocks += srcInode.blocks.length;  
+      totalBlocks += srcInode.numBlocks();  
     }
     }
     trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
     trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
     
     
@@ -977,7 +979,7 @@ public class FSDirectory implements Closeable {
     for(INodeFile nodeToRemove: allSrcInodes) {
     for(INodeFile nodeToRemove: allSrcInodes) {
       if(nodeToRemove == null) continue;
       if(nodeToRemove == null) continue;
       
       
-      nodeToRemove.blocks = null;
+      nodeToRemove.setBlocks(null);
       trgParent.removeChild(nodeToRemove);
       trgParent.removeChild(nodeToRemove);
       count++;
       count++;
     }
     }
@@ -985,7 +987,7 @@ public class FSDirectory implements Closeable {
     trgInode.setModificationTimeForce(timestamp);
     trgInode.setModificationTimeForce(timestamp);
     trgParent.setModificationTime(timestamp);
     trgParent.setModificationTime(timestamp);
     // update quota on the parent directory ('count' files removed, 0 space)
     // update quota on the parent directory ('count' files removed, 0 space)
-    unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0);
+    unprotectedUpdateCount(trgINodesInPath, trgINodes.length-1, -count, 0);
   }
   }
 
 
   /**
   /**
@@ -995,7 +997,7 @@ public class FSDirectory implements Closeable {
    * @param collectedBlocks Blocks under the deleted directory
    * @param collectedBlocks Blocks under the deleted directory
    * @return true on successful deletion; else false
    * @return true on successful deletion; else false
    */
    */
-  boolean delete(String src, List<Block>collectedBlocks) 
+  boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) 
     throws UnresolvedLinkException {
     throws UnresolvedLinkException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
@@ -1019,35 +1021,21 @@ public class FSDirectory implements Closeable {
     return true;
     return true;
   }
   }
   
   
-  /** Return if a directory is empty or not **/
-  boolean isDirEmpty(String src) throws UnresolvedLinkException {
-    boolean dirNotEmpty = true;
-    if (!isDir(src)) {
-      return true;
-    }
+  /**
+   * @return true if the path is a non-empty directory; otherwise, return false.
+   */
+  boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
     readLock();
     readLock();
     try {
     try {
-      INode targetNode = rootDir.getNode(src, false);
-      assert targetNode != null : "should be taken care in isDir() above";
-      if (((INodeDirectory)targetNode).getChildren().size() != 0) {
-        dirNotEmpty = false;
+      final INode inode = rootDir.getNode(path, false);
+      if (inode == null || !inode.isDirectory()) {
+        //not found or not a directory
+        return false;
       }
       }
+      return ((INodeDirectory)inode).getChildrenList().size() != 0;
     } finally {
     } finally {
       readUnlock();
       readUnlock();
     }
     }
-    return dirNotEmpty;
-  }
-
-  boolean isEmpty() {
-    try {
-      return isDirEmpty("/");
-    } catch (UnresolvedLinkException e) {
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("/ cannot be a symlink");
-      }
-      assert false : "/ cannot be a symlink";
-      return true;
-    }
   }
   }
 
 
   /**
   /**
@@ -1062,7 +1050,7 @@ public class FSDirectory implements Closeable {
   void unprotectedDelete(String src, long mtime) 
   void unprotectedDelete(String src, long mtime) 
     throws UnresolvedLinkException {
     throws UnresolvedLinkException {
     assert hasWriteLock();
     assert hasWriteLock();
-    List<Block> collectedBlocks = new ArrayList<Block>();
+    BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime);
     int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime);
     if (filesRemoved > 0) {
     if (filesRemoved > 0) {
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
@@ -1077,12 +1065,13 @@ public class FSDirectory implements Closeable {
    * @param mtime the time the inode is removed
    * @param mtime the time the inode is removed
    * @return the number of inodes deleted; 0 if no inodes are deleted.
    * @return the number of inodes deleted; 0 if no inodes are deleted.
    */ 
    */ 
-  int unprotectedDelete(String src, List<Block> collectedBlocks, 
+  int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks, 
       long mtime) throws UnresolvedLinkException {
       long mtime) throws UnresolvedLinkException {
     assert hasWriteLock();
     assert hasWriteLock();
     src = normalizePath(src);
     src = normalizePath(src);
 
 
-    INode[] inodes =  rootDir.getExistingPathINodes(src, false);
+    final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, false);
+    final INode[] inodes = inodesInPath.getINodes();
     INode targetNode = inodes[inodes.length-1];
     INode targetNode = inodes[inodes.length-1];
 
 
     if (targetNode == null) { // non-existent src
     if (targetNode == null) { // non-existent src
@@ -1100,7 +1089,7 @@ public class FSDirectory implements Closeable {
     }
     }
     int pos = inodes.length - 1;
     int pos = inodes.length - 1;
     // Remove the node from the namespace
     // Remove the node from the namespace
-    targetNode = removeChild(inodes, pos);
+    targetNode = removeChild(inodesInPath, pos);
     if (targetNode == null) {
     if (targetNode == null) {
       return 0;
       return 0;
     }
     }
@@ -1171,7 +1160,7 @@ public class FSDirectory implements Closeable {
                 targetNode, needLocation)}, 0);
                 targetNode, needLocation)}, 0);
       }
       }
       INodeDirectory dirInode = (INodeDirectory)targetNode;
       INodeDirectory dirInode = (INodeDirectory)targetNode;
-      List<INode> contents = dirInode.getChildren();
+      List<INode> contents = dirInode.getChildrenList();
       int startChild = dirInode.nextChild(startAfter);
       int startChild = dirInode.nextChild(startAfter);
       int totalNumChildren = contents.size();
       int totalNumChildren = contents.size();
       int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
       int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
@@ -1222,7 +1211,7 @@ public class FSDirectory implements Closeable {
         return null;
         return null;
       if (targetNode.isDirectory())
       if (targetNode.isDirectory())
         return null;
         return null;
-      if (targetNode.isLink()) 
+      if (targetNode.isSymlink()) 
         return null;
         return null;
       return ((INodeFile)targetNode).getBlocks();
       return ((INodeFile)targetNode).getBlocks();
     } finally {
     } finally {
@@ -1230,47 +1219,13 @@ public class FSDirectory implements Closeable {
     }
     }
   }
   }
 
 
-  /**
-   * Get {@link INode} associated with the file.
-   */
-  INodeFile getFileINode(String src) throws UnresolvedLinkException {
-    INode inode = getINode(src);
-    if (inode == null || inode.isDirectory())
-      return null;
-    assert !inode.isLink();
-    return (INodeFile) inode;
-  }
-  
   /**
   /**
    * Get {@link INode} associated with the file / directory.
    * Get {@link INode} associated with the file / directory.
    */
    */
   INode getINode(String src) throws UnresolvedLinkException {
   INode getINode(String src) throws UnresolvedLinkException {
     readLock();
     readLock();
     try {
     try {
-      INode iNode = rootDir.getNode(src, true);
-      return iNode;
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Retrieve the existing INodes along the given path.
-   * 
-   * @param path the path to explore
-   * @return INodes array containing the existing INodes in the order they
-   *         appear when following the path from the root INode to the
-   *         deepest INodes. The array size will be the number of expected
-   *         components in the path, and non existing components will be
-   *         filled with null
-   *         
-   * @see INodeDirectory#getExistingPathINodes(byte[][], INode[])
-   */
-  INode[] getExistingPathINodes(String path) 
-    throws UnresolvedLinkException {
-    readLock();
-    try {
-      return rootDir.getExistingPathINodes(path, true);
+      return rootDir.getNode(src, true);
     } finally {
     } finally {
       readUnlock();
       readUnlock();
     }
     }
@@ -1340,13 +1295,14 @@ public class FSDirectory implements Closeable {
                                                 UnresolvedLinkException {
                                                 UnresolvedLinkException {
     writeLock();
     writeLock();
     try {
     try {
-      INode[] inodes = rootDir.getExistingPathINodes(path, false);
+      final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, false);
+      final INode[] inodes = inodesInPath.getINodes();
       int len = inodes.length;
       int len = inodes.length;
       if (inodes[len - 1] == null) {
       if (inodes[len - 1] == null) {
         throw new FileNotFoundException(path + 
         throw new FileNotFoundException(path + 
                                         " does not exist under rootDir.");
                                         " does not exist under rootDir.");
       }
       }
-      updateCount(inodes, len-1, nsDelta, dsDelta, true);
+      updateCount(inodesInPath, len-1, nsDelta, dsDelta, true);
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
@@ -1361,7 +1317,7 @@ public class FSDirectory implements Closeable {
    * @param checkQuota if true then check if quota is exceeded
    * @param checkQuota if true then check if quota is exceeded
    * @throws QuotaExceededException if the new count violates any quota limit
    * @throws QuotaExceededException if the new count violates any quota limit
    */
    */
-  private void updateCount(INode[] inodes, int numOfINodes, 
+  private void updateCount(INodesInPath inodesInPath, int numOfINodes, 
                            long nsDelta, long dsDelta, boolean checkQuota)
                            long nsDelta, long dsDelta, boolean checkQuota)
                            throws QuotaExceededException {
                            throws QuotaExceededException {
     assert hasWriteLock();
     assert hasWriteLock();
@@ -1369,29 +1325,25 @@ public class FSDirectory implements Closeable {
       //still initializing. do not check or update quotas.
       //still initializing. do not check or update quotas.
       return;
       return;
     }
     }
-    if (numOfINodes>inodes.length) {
+    final INode[] inodes = inodesInPath.getINodes();
+    if (numOfINodes > inodes.length) {
       numOfINodes = inodes.length;
       numOfINodes = inodes.length;
     }
     }
     if (checkQuota) {
     if (checkQuota) {
       verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
       verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
     }
     }
-    for(int i = 0; i < numOfINodes; i++) {
-      if (inodes[i].isQuotaSet()) { // a directory with quota
-        INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
-        node.updateNumItemsInTree(nsDelta, dsDelta);
-      }
-    }
+    unprotectedUpdateCount(inodesInPath, numOfINodes, nsDelta, dsDelta);
   }
   }
   
   
   /** 
   /** 
    * update quota of each inode and check to see if quota is exceeded. 
    * update quota of each inode and check to see if quota is exceeded. 
    * See {@link #updateCount(INode[], int, long, long, boolean)}
    * See {@link #updateCount(INode[], int, long, long, boolean)}
    */ 
    */ 
-  private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, 
-                           long nsDelta, long dsDelta) {
+  private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
+      int numOfINodes, long nsDelta, long dsDelta) {
     assert hasWriteLock();
     assert hasWriteLock();
     try {
     try {
-      updateCount(inodes, numOfINodes, nsDelta, dsDelta, false);
+      updateCount(inodesInPath, numOfINodes, nsDelta, dsDelta, false);
     } catch (QuotaExceededException e) {
     } catch (QuotaExceededException e) {
       NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e);
       NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e);
     }
     }
@@ -1405,9 +1357,10 @@ public class FSDirectory implements Closeable {
    * @param nsDelta
    * @param nsDelta
    * @param dsDelta
    * @param dsDelta
    */
    */
-   void unprotectedUpdateCount(INode[] inodes, int numOfINodes, 
-                                      long nsDelta, long dsDelta) {
-     assert hasWriteLock();
+  private void unprotectedUpdateCount(INodesInPath inodesInPath,
+      int numOfINodes, long nsDelta, long dsDelta) {
+    assert hasWriteLock();
+    final INode[] inodes = inodesInPath.getINodes();
     for(int i=0; i < numOfINodes; i++) {
     for(int i=0; i < numOfINodes; i++) {
       if (inodes[i].isQuotaSet()) { // a directory with quota
       if (inodes[i].isQuotaSet()) { // a directory with quota
         INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
         INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; 
@@ -1472,18 +1425,19 @@ public class FSDirectory implements Closeable {
     src = normalizePath(src);
     src = normalizePath(src);
     String[] names = INode.getPathNames(src);
     String[] names = INode.getPathNames(src);
     byte[][] components = INode.getPathComponents(names);
     byte[][] components = INode.getPathComponents(names);
-    INode[] inodes = new INode[components.length];
-    final int lastInodeIndex = inodes.length - 1;
+    final int lastInodeIndex = components.length - 1;
 
 
     writeLock();
     writeLock();
     try {
     try {
-      rootDir.getExistingPathINodes(components, inodes, false);
+      INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
+          components.length, false);
+      INode[] inodes = inodesInPath.getINodes();
 
 
       // find the index of the first null in inodes[]
       // find the index of the first null in inodes[]
       StringBuilder pathbuilder = new StringBuilder();
       StringBuilder pathbuilder = new StringBuilder();
       int i = 1;
       int i = 1;
       for(; i < inodes.length && inodes[i] != null; i++) {
       for(; i < inodes.length && inodes[i] != null; i++) {
-        pathbuilder.append(Path.SEPARATOR + names[i]);
+        pathbuilder.append(Path.SEPARATOR).append(names[i]);
         if (!inodes[i].isDirectory()) {
         if (!inodes[i].isDirectory()) {
           throw new FileAlreadyExistsException("Parent path is not a directory: "
           throw new FileAlreadyExistsException("Parent path is not a directory: "
               + pathbuilder+ " "+inodes[i].getLocalName());
               + pathbuilder+ " "+inodes[i].getLocalName());
@@ -1525,8 +1479,7 @@ public class FSDirectory implements Closeable {
       // create directories beginning from the first null index
       // create directories beginning from the first null index
       for(; i < inodes.length; i++) {
       for(; i < inodes.length; i++) {
         pathbuilder.append(Path.SEPARATOR + names[i]);
         pathbuilder.append(Path.SEPARATOR + names[i]);
-        String cur = pathbuilder.toString();
-        unprotectedMkdir(inodes, i, components[i],
+        unprotectedMkdir(inodesInPath, i, components[i],
             (i < lastInodeIndex) ? parentPermissions : permissions, now);
             (i < lastInodeIndex) ? parentPermissions : permissions, now);
         if (inodes[i] == null) {
         if (inodes[i] == null) {
           return false;
           return false;
@@ -1535,6 +1488,8 @@ public class FSDirectory implements Closeable {
         // to match count of FilesDeleted metric.
         // to match count of FilesDeleted metric.
         if (getFSNamesystem() != null)
         if (getFSNamesystem() != null)
           NameNode.getNameNodeMetrics().incrFilesCreated();
           NameNode.getNameNodeMetrics().incrFilesCreated();
+
+        final String cur = pathbuilder.toString();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         if(NameNode.stateChangeLog.isDebugEnabled()) {
         if(NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug(
           NameNode.stateChangeLog.debug(
@@ -1547,49 +1502,48 @@ public class FSDirectory implements Closeable {
     return true;
     return true;
   }
   }
 
 
-  /**
-   */
   INode unprotectedMkdir(String src, PermissionStatus permissions,
   INode unprotectedMkdir(String src, PermissionStatus permissions,
                           long timestamp) throws QuotaExceededException,
                           long timestamp) throws QuotaExceededException,
                           UnresolvedLinkException {
                           UnresolvedLinkException {
     assert hasWriteLock();
     assert hasWriteLock();
     byte[][] components = INode.getPathComponents(src);
     byte[][] components = INode.getPathComponents(src);
-    INode[] inodes = new INode[components.length];
-
-    rootDir.getExistingPathINodes(components, inodes, false);
-    unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1],
-        permissions, timestamp);
-    return inodes[inodes.length-1];
+    INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = inodesInPath.getINodes();
+    final int pos = inodes.length - 1;
+    unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp);
+    return inodes[pos];
   }
   }
 
 
   /** create a directory at index pos.
   /** create a directory at index pos.
    * The parent path to the directory is at [0, pos-1].
    * The parent path to the directory is at [0, pos-1].
    * All ancestors exist. Newly created one stored at index pos.
    * All ancestors exist. Newly created one stored at index pos.
    */
    */
-  private void unprotectedMkdir(INode[] inodes, int pos,
+  private void unprotectedMkdir(INodesInPath inodesInPath, int pos,
       byte[] name, PermissionStatus permission,
       byte[] name, PermissionStatus permission,
       long timestamp) throws QuotaExceededException {
       long timestamp) throws QuotaExceededException {
     assert hasWriteLock();
     assert hasWriteLock();
-    inodes[pos] = addChild(inodes, pos, 
-        new INodeDirectory(name, permission, timestamp),
-        -1);
+    final INodeDirectory dir = new INodeDirectory(name, permission, timestamp);
+    final INode inode = addChild(inodesInPath, pos, dir, -1, true);
+    inodesInPath.setINode(pos, inode);
   }
   }
   
   
   /** Add a node child to the namespace. The full path name of the node is src.
   /** Add a node child to the namespace. The full path name of the node is src.
    * childDiskspace should be -1, if unknown. 
    * childDiskspace should be -1, if unknown. 
-   * QuotaExceededException is thrown if it violates quota limit */
-  private <T extends INode> T addNode(String src, T child, 
-        long childDiskspace) 
-  throws QuotaExceededException, UnresolvedLinkException {
+   * @throw QuotaExceededException is thrown if it violates quota limit
+   */
+  private <T extends INode> T addNode(String src, T child, long childDiskspace
+      ) throws QuotaExceededException, UnresolvedLinkException {
     byte[][] components = INode.getPathComponents(src);
     byte[][] components = INode.getPathComponents(src);
     byte[] path = components[components.length-1];
     byte[] path = components[components.length-1];
     child.setLocalName(path);
     child.setLocalName(path);
     cacheName(child);
     cacheName(child);
-    INode[] inodes = new INode[components.length];
     writeLock();
     writeLock();
     try {
     try {
-      rootDir.getExistingPathINodes(components, inodes, false);
-      return addChild(inodes, inodes.length-1, child, childDiskspace);
+      INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
+          components.length, false);
+      return addChild(inodesInPath, inodesInPath.getINodes().length-1, child,
+          childDiskspace, true);
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }
@@ -1695,7 +1649,7 @@ public class FSDirectory implements Closeable {
       }
       }
       if (maxDirItems != 0) {
       if (maxDirItems != 0) {
         INodeDirectory parent = (INodeDirectory)pathComponents[pos-1];
         INodeDirectory parent = (INodeDirectory)pathComponents[pos-1];
-        int count = parent.getChildren().size();
+        int count = parent.getChildrenList().size();
         if (count >= maxDirItems) {
         if (count >= maxDirItems) {
           throw new MaxDirectoryItemsExceededException(maxDirItems, count);
           throw new MaxDirectoryItemsExceededException(maxDirItems, count);
         }
         }
@@ -1714,19 +1668,22 @@ public class FSDirectory implements Closeable {
   }
   }
   
   
   /** Add a node child to the inodes at index pos. 
   /** Add a node child to the inodes at index pos. 
-   * Its ancestors are stored at [0, pos-1]. 
-   * QuotaExceededException is thrown if it violates quota limit */
-  private <T extends INode> T addChild(INode[] pathComponents, int pos,
+   * Its ancestors are stored at [0, pos-1].
+   * @return the added node. 
+   * @throw QuotaExceededException is thrown if it violates quota limit
+   */
+  private <T extends INode> T addChild(INodesInPath inodesInPath, int pos,
       T child, long childDiskspace,
       T child, long childDiskspace,
       boolean checkQuota) throws QuotaExceededException {
       boolean checkQuota) throws QuotaExceededException {
-	// The filesystem limits are not really quotas, so this check may appear
-	// odd.  It's because a rename operation deletes the src, tries to add
-	// to the dest, if that fails, re-adds the src from whence it came.
-	// The rename code disables the quota when it's restoring to the
-	// original location becase a quota violation would cause the the item
-	// to go "poof".  The fs limits must be bypassed for the same reason.
+    final INode[] inodes = inodesInPath.getINodes();
+    // The filesystem limits are not really quotas, so this check may appear
+    // odd. It's because a rename operation deletes the src, tries to add
+    // to the dest, if that fails, re-adds the src from whence it came.
+    // The rename code disables the quota when it's restoring to the
+    // original location becase a quota violation would cause the the item
+    // to go "poof".  The fs limits must be bypassed for the same reason.
     if (checkQuota) {
     if (checkQuota) {
-      verifyFsLimits(pathComponents, pos, child);
+      verifyFsLimits(inodes, pos, child);
     }
     }
     
     
     INode.DirCounts counts = new INode.DirCounts();
     INode.DirCounts counts = new INode.DirCounts();
@@ -1734,31 +1691,22 @@ public class FSDirectory implements Closeable {
     if (childDiskspace < 0) {
     if (childDiskspace < 0) {
       childDiskspace = counts.getDsCount();
       childDiskspace = counts.getDsCount();
     }
     }
-    updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace,
-        checkQuota);
-    if (pathComponents[pos-1] == null) {
+    updateCount(inodesInPath, pos, counts.getNsCount(), childDiskspace, checkQuota);
+    if (inodes[pos-1] == null) {
       throw new NullPointerException("Panic: parent does not exist");
       throw new NullPointerException("Panic: parent does not exist");
     }
     }
-    T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild(
-        child, true);
+    final T addedNode = ((INodeDirectory)inodes[pos-1]).addChild(child, true);
     if (addedNode == null) {
     if (addedNode == null) {
-      updateCount(pathComponents, pos, -counts.getNsCount(), 
-          -childDiskspace, true);
+      updateCount(inodesInPath, pos, -counts.getNsCount(), -childDiskspace, true);
     }
     }
     return addedNode;
     return addedNode;
   }
   }
-
-  private <T extends INode> T addChild(INode[] pathComponents, int pos,
-      T child, long childDiskspace)
-      throws QuotaExceededException {
-    return addChild(pathComponents, pos, child, childDiskspace, true);
-  }
   
   
-  private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents,
+  private <T extends INode> T addChildNoQuotaCheck(INodesInPath inodesInPath,
       int pos, T child, long childDiskspace) {
       int pos, T child, long childDiskspace) {
     T inode = null;
     T inode = null;
     try {
     try {
-      inode = addChild(pathComponents, pos, child, childDiskspace, false);
+      inode = addChild(inodesInPath, pos, child, childDiskspace, false);
     } catch (QuotaExceededException e) {
     } catch (QuotaExceededException e) {
       NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); 
       NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); 
     }
     }
@@ -1770,13 +1718,13 @@ public class FSDirectory implements Closeable {
    * Count of each ancestor with quota is also updated.
    * Count of each ancestor with quota is also updated.
    * Return the removed node; null if the removal fails.
    * Return the removed node; null if the removal fails.
    */
    */
-  private INode removeChild(INode[] pathComponents, int pos) {
-    INode removedNode = 
-      ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]);
+  private INode removeChild(final INodesInPath inodesInPath, int pos) {
+    final INode[] inodes = inodesInPath.getINodes();
+    INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild(inodes[pos]);
     if (removedNode != null) {
     if (removedNode != null) {
       INode.DirCounts counts = new INode.DirCounts();
       INode.DirCounts counts = new INode.DirCounts();
       removedNode.spaceConsumedInTree(counts);
       removedNode.spaceConsumedInTree(counts);
-      updateCountNoQuotaCheck(pathComponents, pos,
+      updateCountNoQuotaCheck(inodesInPath, pos,
                   -counts.getNsCount(), -counts.getDsCount());
                   -counts.getNsCount(), -counts.getDsCount());
     }
     }
     return removedNode;
     return removedNode;
@@ -1844,11 +1792,11 @@ public class FSDirectory implements Closeable {
      * INode. using 'parent' is not currently recommended. */
      * INode. using 'parent' is not currently recommended. */
     nodesInPath.add(dir);
     nodesInPath.add(dir);
 
 
-    for (INode child : dir.getChildren()) {
+    for (INode child : dir.getChildrenList()) {
       if (child.isDirectory()) {
       if (child.isDirectory()) {
         updateCountForINodeWithQuota((INodeDirectory)child, 
         updateCountForINodeWithQuota((INodeDirectory)child, 
                                      counts, nodesInPath);
                                      counts, nodesInPath);
-      } else if (child.isLink()) {
+      } else if (child.isSymlink()) {
         counts.nsCount += 1;
         counts.nsCount += 1;
       } else { // reduce recursive calls
       } else { // reduce recursive calls
         counts.nsCount += 1;
         counts.nsCount += 1;
@@ -1911,7 +1859,8 @@ public class FSDirectory implements Closeable {
     
     
     String srcs = normalizePath(src);
     String srcs = normalizePath(src);
 
 
-    INode[] inodes = rootDir.getExistingPathINodes(src, true);
+    final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true);
+    final INode[] inodes = inodesInPath.getINodes();
     INode targetNode = inodes[inodes.length-1];
     INode targetNode = inodes[inodes.length-1];
     if (targetNode == null) {
     if (targetNode == null) {
       throw new FileNotFoundException("Directory does not exist: " + srcs);
       throw new FileNotFoundException("Directory does not exist: " + srcs);
@@ -2077,7 +2026,7 @@ public class FSDirectory implements Closeable {
         node.getFsPermission(),
         node.getFsPermission(),
         node.getUserName(),
         node.getUserName(),
         node.getGroupName(),
         node.getGroupName(),
-        node.isLink() ? ((INodeSymlink)node).getSymlink() : null,
+        node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
         path);
         path);
   }
   }
 
 
@@ -2113,7 +2062,7 @@ public class FSDirectory implements Closeable {
           node.getFsPermission(),
           node.getFsPermission(),
           node.getUserName(),
           node.getUserName(),
           node.getGroupName(),
           node.getGroupName(),
-          node.isLink() ? ((INodeSymlink)node).getSymlink() : null,
+          node.isSymlink() ? ((INodeSymlink)node).getSymlink() : null,
           path,
           path,
           loc);
           loc);
       }
       }
@@ -2145,16 +2094,13 @@ public class FSDirectory implements Closeable {
       writeUnlock();
       writeUnlock();
     }
     }
     if (newNode == null) {
     if (newNode == null) {
-      NameNode.stateChangeLog.info("DIR* FSDirectory.addSymlink: "
-                                   +"failed to add "+path
-                                   +" to the file system");
+      NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path);
       return null;
       return null;
     }
     }
     fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode);
     fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode);
     
     
     if(NameNode.stateChangeLog.isDebugEnabled()) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.addSymlink: "
-          +path+" is added to the file system");
+      NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added");
     }
     }
     return newNode;
     return newNode;
   }
   }
@@ -2187,7 +2133,7 @@ public class FSDirectory implements Closeable {
    */
    */
   void cacheName(INode inode) {
   void cacheName(INode inode) {
     // Name is cached only for files
     // Name is cached only for files
-    if (inode.isDirectory() || inode.isLink()) {
+    if (inode.isDirectory() || inode.isSymlink()) {
       return;
       return;
     }
     }
     ByteArray name = new ByteArray(inode.getLocalNameBytes());
     ByteArray name = new ByteArray(inode.getLocalNameBytes());

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -900,7 +900,7 @@ public class FSEditLog implements LogsPurgeable {
    * in the new log.
    * in the new log.
    */
    */
   synchronized long rollEditLog() throws IOException {
   synchronized long rollEditLog() throws IOException {
-    LOG.info("Rolling edit logs.");
+    LOG.info("Rolling edit logs");
     endCurrentLogSegment(true);
     endCurrentLogSegment(true);
     
     
     long nextTxId = getLastWrittenTxId() + 1;
     long nextTxId = getLastWrittenTxId() + 1;
@@ -915,7 +915,7 @@ public class FSEditLog implements LogsPurgeable {
    */
    */
   public synchronized void startLogSegment(long txid, 
   public synchronized void startLogSegment(long txid, 
       boolean abortCurrentLogSegment) throws IOException {
       boolean abortCurrentLogSegment) throws IOException {
-    LOG.info("Namenode started a new log segment at txid " + txid);
+    LOG.info("Started a new log segment at txid " + txid);
     if (isSegmentOpen()) {
     if (isSegmentOpen()) {
       if (getLastWrittenTxId() == txid - 1) {
       if (getLastWrittenTxId() == txid - 1) {
         //In sync with the NN, so end and finalize the current segment`
         //In sync with the NN, so end and finalize the current segment`

Vissa filer visades inte eftersom för många filer har ändrats