Browse Source

Merge trunk into auto-failover branch.

Needs a few tweaks to fix compilation - will do in followup commit. This is just a straight merge


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1324567 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 years ago
parent
commit
2bf19979b3
100 changed files with 1577 additions and 2105 deletions
  1. 8 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  3. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  5. 39 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  6. 6 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  7. 2 8
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
  8. 6 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
  9. 0 63
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/build.xml
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build-contrib.xml
  11. 7 19
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build.xml
  12. 161 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
  13. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
  14. 26 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
  15. 1 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
  16. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
  17. 0 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  18. 1 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
  19. 2 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  20. 0 57
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  21. 1 57
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
  22. 1 39
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
  23. 1 69
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
  24. 0 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
  25. 1 47
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
  26. 1 63
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
  27. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  28. 2 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  29. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java
  30. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
  31. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
  32. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  33. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
  34. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
  35. 0 110
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ProtocolSignatureWritable.java
  36. 0 44
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/overview.html
  37. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  38. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  39. 43 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  40. 3 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
  41. 1 39
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java
  42. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  43. 15 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
  44. 3 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
  45. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
  46. 12 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
  47. 11 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
  48. 82 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
  49. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
  50. 30 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  51. 87 60
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  52. 137 117
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  53. 32 40
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  54. 8 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
  55. 10 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  56. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
  57. 0 56
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalStream.java
  58. 130 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
  59. 2 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  60. 96 39
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  61. 81 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
  62. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
  63. 0 39
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java
  64. 0 56
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
  65. 0 65
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
  66. 2 56
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
  67. 0 30
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java
  68. 1 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
  69. 1 45
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
  70. 0 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
  71. 1 38
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java
  72. 0 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
  73. 0 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
  74. 1 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
  75. 0 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java
  76. 0 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
  77. 0 39
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
  78. 0 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
  79. 1 27
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
  80. 0 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
  81. 0 33
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
  82. 1 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
  83. 0 34
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
  84. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
  85. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
  86. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
  87. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  88. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  89. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  90. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
  91. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
  92. 0 79
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java
  93. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  94. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
  95. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
  96. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
  97. 31 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  98. 0 78
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
  99. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
  100. 384 17
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java

+ 8 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -335,6 +335,14 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8249. invalid hadoop-auth cookies should trigger authentication 
     HADOOP-8249. invalid hadoop-auth cookies should trigger authentication 
     if info is avail before returning HTTP 401 (tucu)
     if info is avail before returning HTTP 401 (tucu)
 
 
+    HADOOP-8261. Har file system doesn't deal with FS URIs with a host but no
+    port. (atm)
+
+    HADOOP-8263. Stringification of IPC calls not useful (todd)
+
+    HADOOP-8264. Remove irritating double double quotes in front of hostname
+    (Bernd Fondermann via bobby)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -202,7 +202,8 @@ public class HarFileSystem extends FilterFileSystem {
     final String underLyingHost = i == host.length()? null: host.substring(i);
     final String underLyingHost = i == host.length()? null: host.substring(i);
     int underLyingPort = rawURI.getPort();
     int underLyingPort = rawURI.getPort();
     String auth = (underLyingHost == null && underLyingPort == -1)?
     String auth = (underLyingHost == null && underLyingPort == -1)?
-                  null:(underLyingHost+":"+underLyingPort);
+                  null:(underLyingHost+
+                      (underLyingPort == -1 ? "" : ":"+underLyingPort));
     URI tmp = null;
     URI tmp = null;
     if (rawURI.getQuery() != null) {
     if (rawURI.getQuery() != null) {
       // query component not allowed
       // query component not allowed

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -268,6 +268,12 @@ public class ProtobufRpcEngine implements RpcEngine {
       in.readFully(bytes);
       in.readFully(bytes);
       message = HadoopRpcRequestProto.parseFrom(bytes);
       message = HadoopRpcRequestProto.parseFrom(bytes);
     }
     }
+    
+    @Override
+    public String toString() {
+      return message.getDeclaringClassProtocolName() + "." +
+          message.getMethodName();
+    }
   }
   }
 
 
   /**
   /**

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -782,7 +782,7 @@ public class NetUtils {
     hostDetails.append("local host is: ")
     hostDetails.append("local host is: ")
         .append(quoteHost(localHost))
         .append(quoteHost(localHost))
         .append("; ");
         .append("; ");
-    hostDetails.append("destination host is: \"").append(quoteHost(destHost))
+    hostDetails.append("destination host is: ").append(quoteHost(destHost))
         .append(":")
         .append(":")
         .append(destPort).append("; ");
         .append(destPort).append("; ");
     return hostDetails.toString();
     return hostDetails.toString();

+ 39 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -117,6 +117,15 @@ Trunk (unreleased changes)
 
 
     HDFS-3121. Add HDFS tests for HADOOP-8014 change. (John George via
     HDFS-3121. Add HDFS tests for HADOOP-8014 change. (John George via
     suresh)
     suresh)
+
+    HDFS-3119. Overreplicated block is not deleted even after the replication 
+    factor is reduced after sync follwed by closing that file. (Ashish Singhi 
+    via umamahesh)
+
+    HDFS-3235. MiniDFSClusterManager doesn't correctly support -format option.
+    (Henry Robinson via atm)
+
+    HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
     
     
 Release 2.0.0 - UNRELEASED 
 Release 2.0.0 - UNRELEASED 
 
 
@@ -195,6 +204,8 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3102. Add CLI tool to initialize the shared-edits dir. (atm)
     HDFS-3102. Add CLI tool to initialize the shared-edits dir. (atm)
 
 
+    HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-2018. Move all journal stream management code into one place.
     HDFS-2018. Move all journal stream management code into one place.
@@ -341,6 +352,21 @@ Release 2.0.0 - UNRELEASED
     HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
     HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
     and epoch in JournalProtocol. (suresh via szetszwo)
     and epoch in JournalProtocol. (suresh via szetszwo)
 
 
+    HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG
+    (todd)
+
+    HDFS-3238. ServerCommand and friends don't need to be writables. (eli)
+
+    HDFS-3094. add -nonInteractive and -force option to namenode -format
+    command (Arpit Gupta via todd)
+
+    HDFS-3244. Remove dead writable code from hdfs/protocol. (eli)
+
+    HDFS-3247. Improve bootstrapStandby behavior when original NN is not active
+    (todd)
+
+    HDFS-3249. Use ToolRunner.confirmPrompt in NameNode (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -453,6 +479,19 @@ Release 2.0.0 - UNRELEASED
     HDFS-3136. Remove SLF4J dependency as HDFS does not need it to fix
     HDFS-3136. Remove SLF4J dependency as HDFS does not need it to fix
     unnecessary warnings. (Jason Lowe via suresh)
     unnecessary warnings. (Jason Lowe via suresh)
 
 
+    HDFS-3214. InterDatanodeProtocolServerSideTranslatorPB doesn't handle
+    null response from initReplicaRecovery (todd)
+
+    HDFS-3234. Accidentally left log message in GetConf after HDFS-3226 (todd)
+
+    HDFS-3236. NameNode does not initialize generic conf keys when started
+    with -initializeSharedEditsDir (atm)
+
+    HDFS-3248. bootstrapStandby repeated twice in hdfs namenode usage message
+    (Colin Patrick McCabe via todd)
+
+    HDFS-2696. Fix the fuse-fds build. (Bruno Mahé via eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -267,4 +267,10 @@
        <Method name="doRefreshNamenodes" />
        <Method name="doRefreshNamenodes" />
        <Bug category="PERFORMANCE" />
        <Bug category="PERFORMANCE" />
      </Match>
      </Match>
+     <!-- Don't complain about System.exit() being called from quit() -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext" />
+       <Method name="quit" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
  </FindBugsFilter>
  </FindBugsFilter>

+ 2 - 8
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java

@@ -94,8 +94,8 @@ class BookKeeperEditLogInputStream extends EditLogInputStream {
   }
   }
 
 
   @Override
   @Override
-  public FSEditLogOp readOp() throws IOException {
-    return reader.readOp();
+  protected FSEditLogOp nextOp() throws IOException {
+    return reader.readOp(false);
   }
   }
 
 
   @Override
   @Override
@@ -123,12 +123,6 @@ class BookKeeperEditLogInputStream extends EditLogInputStream {
         lh.toString(), firstTxId, lastTxId);
         lh.toString(), firstTxId, lastTxId);
   }
   }
 
 
-  @Override
-  public JournalType getType() {
-    assert (false);
-    return null;
-  }
-
   // TODO(HA): Test this.
   // TODO(HA): Test this.
   @Override
   @Override
   public boolean isInProgress() {
   public boolean isInProgress() {

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java

@@ -18,13 +18,17 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 
 
 /**
 /**
  * Utilities for testing edit logs
  * Utilities for testing edit logs
  */
  */
 public class FSEditLogTestUtil {
 public class FSEditLogTestUtil {
+  private static OpInstanceCache cache = new OpInstanceCache();
+
   public static FSEditLogOp getNoOpInstance() {
   public static FSEditLogOp getNoOpInstance() {
-    return FSEditLogOp.LogSegmentOp.getInstance(FSEditLogOpCodes.OP_END_LOG_SEGMENT);
+    return FSEditLogOp.LogSegmentOp.getInstance(cache,
+        FSEditLogOpCodes.OP_END_LOG_SEGMENT);
   }
   }
 
 
   public static long countTransactionsInStream(EditLogInputStream in) 
   public static long countTransactionsInStream(EditLogInputStream in) 
@@ -32,4 +36,4 @@ public class FSEditLogTestUtil {
     FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
     FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
     return validation.getNumTransactions();
     return validation.getNumTransactions();
   }
   }
-}
+}

+ 0 - 63
hadoop-hdfs-project/hadoop-hdfs/src/contrib/build.xml

@@ -1,63 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project name="hadoopcontrib" default="compile" basedir=".">
-  
-  <!-- In case one of the contrib subdirectories -->
-  <!-- fails the build or test targets and you cannot fix it: -->
-  <!-- Then add to fileset: excludes="badcontrib/build.xml" -->
-
-  <!-- ====================================================== -->
-  <!-- Compile contribs.                                      -->
-  <!-- ====================================================== -->
-  <target name="compile">
-    <subant target="compile">
-      <fileset dir="." includes="*/build.xml"/>
-    </subant>
-  </target>
-  
-  <!-- ====================================================== -->
-  <!-- Package contrib jars.                                  -->
-  <!-- ====================================================== -->
-  <target name="package">
-    <subant target="package">
-      <fileset dir="." includes="*/build.xml"/>
-    </subant>
-  </target>
-  
-  <!-- ====================================================== -->
-  <!-- Test all the contribs.                               -->
-  <!-- ====================================================== -->
-  <target name="test">
-    <subant target="test">
-      <fileset dir="." includes="fuse-dfs/build.xml"/>
-    </subant> 
-  </target>
-  
-  
-  <!-- ====================================================== -->
-  <!-- Clean all the contribs.                              -->
-  <!-- ====================================================== -->
-  <target name="clean">
-    <subant target="clean">
-      <fileset dir="." includes="*/build.xml"/>
-    </subant>
-  </target>
-
-</project>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/build-contrib.xml → hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build-contrib.xml

@@ -70,7 +70,7 @@
   <property name="ivy.dir" location="ivy" />
   <property name="ivy.dir" location="ivy" />
   <property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/>
   <property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/>
   <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
   <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
-  <loadproperties srcfile="${hadoop.root}/ivy/libraries.properties"/>
+  <loadproperties srcfile="ivy/libraries.properties"/>
   <property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/>
   <property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/>
   <property name="ivy_repo_url" 
   <property name="ivy_repo_url" 
 	value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" />
 	value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" />

+ 7 - 19
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/build.xml

@@ -17,19 +17,19 @@
    limitations under the License.
    limitations under the License.
 -->
 -->
 
 
-<project name="fuse-dfs" default="jar" xmlns:ivy="antlib:org.apache.ivy.ant">
+<project name="fuse-dfs" default="compile" xmlns:ivy="antlib:org.apache.ivy.ant">
 
 
-  <import file="../build-contrib.xml"/>
+  <import file="build-contrib.xml"/>
 
 
-  <target name="check-libhdfs-exists" if="fusedfs">
+  <target name="check-libhdfs-exists">
     <property name="libhdfs.lib" value="${build.c++.libhdfs}/libhdfs.so"/>
     <property name="libhdfs.lib" value="${build.c++.libhdfs}/libhdfs.so"/>
     <available file="${libhdfs.lib}" property="libhdfs-exists"/>
     <available file="${libhdfs.lib}" property="libhdfs-exists"/>
-    <fail message="libhdfs.so does not exist: ${libhdfs.lib}. Please check flags -Dlibhdfs=1 -Dfusedfs=1 are set or first try ant compile -Dcompile.c++=true -Dlibhdfs=true">
+    <fail message="libhdfs.so does not exist: ${libhdfs.lib}.">
       <condition><not><isset property="libhdfs-exists"/></not></condition>
       <condition><not><isset property="libhdfs-exists"/></not></condition>
     </fail>
     </fail>
   </target>
   </target>
 
 
-  <target name="compile" if="fusedfs">
+  <target name="compile">
     <exec executable="autoreconf" dir="${basedir}" 
     <exec executable="autoreconf" dir="${basedir}" 
           searchpath="yes" failonerror="yes">
           searchpath="yes" failonerror="yes">
        <arg value="-if"/>
        <arg value="-if"/>
@@ -46,24 +46,12 @@
       <env key="PACKAGE_VERSION" value="0.1.0"/>
       <env key="PACKAGE_VERSION" value="0.1.0"/>
       <env key="BUILD_PLATFORM" value="${build.platform}" />
       <env key="BUILD_PLATFORM" value="${build.platform}" />
     </exec>
     </exec>
-
-    <mkdir dir="${build.dir}"/>
-    <mkdir dir="${build.dir}/test"/>
-
-    <!-- Use exec since the copy task doesn't preserve attrs -->
-    <exec executable="cp" failonerror="true">
-      <arg line="${hadoop.root}/src/contrib/fuse-dfs/src/fuse_dfs ${build.dir}"/>
-    </exec>
-
-    <exec executable="cp" failonerror="true">
-      <arg line="${hadoop.root}/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh ${build.dir}"/>
-    </exec>
   </target>
   </target>
 
 
   <target name="jar" />
   <target name="jar" />
   <target name="package" />
   <target name="package" />
 
 
-  <target name="compile-test" depends="ivy-retrieve-common, check-libhdfs-exists" if="fusedfs">
+  <target name="compile-test" depends="ivy-retrieve-common, check-libhdfs-exists">
     <javac encoding="${build.encoding}"
     <javac encoding="${build.encoding}"
 	   srcdir="${src.test}"
 	   srcdir="${src.test}"
 	   includes="**/*.java"
 	   includes="**/*.java"
@@ -73,7 +61,7 @@
     </javac>
     </javac>
   </target>
   </target>
 
 
-  <target name="test" depends="compile-test,check-libhdfs-exists" if="fusedfs">
+  <target name="test" depends="compile-test,check-libhdfs-exists">
     <junit showoutput="${test.output}" fork="yes" printsummary="yes"
     <junit showoutput="${test.output}" fork="yes" printsummary="yes"
            errorProperty="tests.failed" haltonfailure="no" failureProperty="tests.failed">
            errorProperty="tests.failed" haltonfailure="no" failureProperty="tests.failed">
       <classpath refid="test.classpath"/>
       <classpath refid="test.classpath"/>

+ 161 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml

@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../../../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop.contrib</groupId>
+  <artifactId>hadoop-hdfs-fuse</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <packaging>pom</packaging>
+
+  <name>Apache Hadoop HDFS Fuse</name>
+  <description>Apache Hadoop HDFS Fuse</description>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <!-- workaround for filtered/unfiltered resources in same directory -->
+        <!-- remove when maven-eclipse-plugin 2.9 is available -->
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-eclipse-plugin</artifactId>
+        <version>2.6</version>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <threadCount>1</threadCount>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-javadoc-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>javadoc</goal>
+            </goals>
+            <phase>site</phase>
+            <configuration>
+              <linksource>true</linksource>
+              <quiet>true</quiet>
+              <verbose>false</verbose>
+              <source>${maven.compile.source}</source>
+              <charset>${maven.compile.encoding}</charset>
+              <groups>
+                <group>
+                  <title>HttpFs API</title>
+                  <packages>*</packages>
+                </group>
+              </groups>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-project-info-reports-plugin</artifactId>
+        <executions>
+          <execution>
+            <configuration>
+              <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
+            </configuration>
+            <goals>
+              <goal>dependencies</goal>
+            </goals>
+            <phase>site</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+          </excludes>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <profiles>
+    <profile>
+      <id>fuse</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>prepare-compile-native</id>
+                <phase>generate-sources</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <configuration>
+                  <target>
+                    <copy toDir="${project.build.directory}/fuse-dfs">
+                      <fileset dir="${basedir}"/>
+                    </copy>
+                  </target>
+                </configuration>
+              </execution>
+              <execution>
+                <id>compile-fuse</id>
+                <phase>compile</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <configuration>
+                  <target>
+                    <ant antfile="${project.build.directory}/fuse-dfs/build.xml"
+                         dir="${project.build.directory}/fuse-dfs">
+                      <target name="compile"/>
+                    </ant>
+                  </target>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+       </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am

@@ -17,5 +17,5 @@
 bin_PROGRAMS = fuse_dfs
 bin_PROGRAMS = fuse_dfs
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
 AM_CFLAGS= -Wall -g
 AM_CFLAGS= -Wall -g
-AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_PREFIX)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm
+AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
+AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm

+ 26 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml

@@ -537,7 +537,32 @@
        For command usage, see <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#fetchdt"><code>fetchdt</code> command</a>. 
        For command usage, see <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#fetchdt"><code>fetchdt</code> command</a>. 
       </p>
       </p>
              
              
-   </section><section> <title> Upgrade and Rollback </title>
+      </section>
+     <section> <title>Recovery Mode</title>
+       <p>Typically, you will configure multiple metadata storage locations.
+       Then, if one storage location is corrupt, you can read the
+       metadata from one of the other storage locations.</p>
+
+       <p>However, what can you do if the only storage locations available are
+       corrupt?  In this case, there is a special NameNode startup mode called
+       Recovery mode that may allow you to recover most of your data.</p>
+
+       <p>You can start the NameNode in recovery mode like so:
+        <code>namenode -recover</code></p>
+
+        <p>When in recovery mode, the NameNode will interactively prompt you at
+       the command line about possible courses of action you can take to
+       recover your data.</p>
+
+       <p>If you don't want to be prompted, you can give the
+       <code>-force</code> option.  This option will force
+       recovery mode to always select the first choice.  Normally, this
+       will be the most reasonable choice.</p>
+
+       <p>Because Recovery mode can cause you to lose data, you should always
+       back up your edit log and fsimage before using it.</p>
+     </section>
+      <section> <title> Upgrade and Rollback </title>
      <p>
      <p>
       When Hadoop is upgraded on an existing cluster, as with any
       When Hadoop is upgraded on an existing cluster, as with any
       software upgrade, it is possible there are new bugs or
       software upgrade, it is possible there are new bugs or

+ 1 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java

@@ -17,16 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * A block and the full path information to the block data file and
  * A block and the full path information to the block data file and
@@ -34,20 +26,11 @@ import org.apache.hadoop.io.WritableFactory;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class BlockLocalPathInfo implements Writable {
-  static final WritableFactory FACTORY = new WritableFactory() {
-    public Writable newInstance() { return new BlockLocalPathInfo(); }
-  };
-  static {                                      // register a ctor
-    WritableFactories.setFactory(BlockLocalPathInfo.class, FACTORY);
-  }
-
+public class BlockLocalPathInfo {
   private ExtendedBlock block;
   private ExtendedBlock block;
   private String localBlockPath = "";  // local file storing the data
   private String localBlockPath = "";  // local file storing the data
   private String localMetaPath = "";   // local file storing the checksum
   private String localMetaPath = "";   // local file storing the checksum
 
 
-  public BlockLocalPathInfo() {}
-
   /**
   /**
    * Constructs BlockLocalPathInfo.
    * Constructs BlockLocalPathInfo.
    * @param b The block corresponding to this lock path info. 
    * @param b The block corresponding to this lock path info. 
@@ -77,21 +60,6 @@ public class BlockLocalPathInfo implements Writable {
    */
    */
   public String getMetaPath() {return localMetaPath;}
   public String getMetaPath() {return localMetaPath;}
 
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-    block.write(out);
-    Text.writeString(out, localBlockPath);
-    Text.writeString(out, localMetaPath);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    block = new ExtendedBlock();
-    block.readFields(in);
-    localBlockPath = Text.readString(in);
-    localMetaPath = Text.readString(in);
-  }
-  
   /**
   /**
    * Get number of bytes in the block.
    * Get number of bytes in the block.
    * @return Number of bytes in the block.
    * @return Number of bytes in the block.

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
-import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
@@ -42,9 +41,6 @@ public interface ClientDatanodeProtocol {
    * the client interface to the DN AND the RPC protocol used to 
    * the client interface to the DN AND the RPC protocol used to 
    * communicate with the NN.
    * communicate with the NN.
    * 
    * 
-   * Post version 10 (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/ClientDatanodeWireProtocol}
-   * 
    * This class is used by both the DFSClient and the 
    * This class is used by both the DFSClient and the 
    * DN server side to insulate from the protocol serialization.
    * DN server side to insulate from the protocol serialization.
    * 
    * 
@@ -60,7 +56,6 @@ public interface ClientDatanodeProtocol {
    * 
    * 
    * 9 is the last version id when this class was used for protocols
    * 9 is the last version id when this class was used for protocols
    *  serialization. DO not update this version any further. 
    *  serialization. DO not update this version any further. 
-   *  Changes are recorded in R23 classes.
    */
    */
   public static final long versionID = 9L;
   public static final long versionID = 9L;
 
 

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -66,9 +66,6 @@ public interface ClientProtocol {
    * the client interface to the NN AND the RPC protocol used to 
    * the client interface to the NN AND the RPC protocol used to 
    * communicate with the NN.
    * communicate with the NN.
    * 
    * 
-   * Post version 70 (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
-   * 
    * This class is used by both the DFSClient and the 
    * This class is used by both the DFSClient and the 
    * NN server side to insulate from the protocol serialization.
    * NN server side to insulate from the protocol serialization.
    * 
    * 
@@ -84,7 +81,6 @@ public interface ClientProtocol {
    * 
    * 
    * 69L is the last version id when this class was used for protocols
    * 69L is the last version id when this class was used for protocols
    *  serialization. DO not update this version any further. 
    *  serialization. DO not update this version any further. 
-   *  Changes are recorded in R23 classes.
    */
    */
   public static final long versionID = 69L;
   public static final long versionID = 69L;
   
   

+ 1 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java

@@ -17,11 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.Text;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
 /**
 /**
@@ -29,7 +24,7 @@ import java.util.Arrays;
  * used for iterative calls to NameNode.listCorruptFileBlocks.
  * used for iterative calls to NameNode.listCorruptFileBlocks.
  *
  *
  */
  */
-public class CorruptFileBlocks implements Writable {
+public class CorruptFileBlocks {
   // used for hashCode
   // used for hashCode
   private static final int PRIME = 16777619;
   private static final int PRIME = 16777619;
 
 
@@ -53,28 +48,6 @@ public class CorruptFileBlocks implements Writable {
     return cookie;
     return cookie;
   }
   }
 
 
-  
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int fileCount = in.readInt();
-    files = new String[fileCount];
-    for (int i = 0; i < fileCount; i++) {
-      files[i] = Text.readString(in);
-    }
-    cookie = Text.readString(in);
-  }
-
-  
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(files.length);
-    for (int i = 0; i < files.length; i++) {
-      Text.writeString(out, files[i]);
-    }
-    Text.writeString(out, cookie);
-  }
-
- 
   @Override
   @Override
   public boolean equals(Object obj) {
   public boolean equals(Object obj) {
     if (this == obj) {
     if (this == obj) {

+ 2 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -18,15 +18,9 @@
 
 
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
 
 
 /**
 /**
  * This class represents the primary identifier for a Datanode.
  * This class represents the primary identifier for a Datanode.
@@ -41,8 +35,8 @@ import org.apache.hadoop.io.WritableComparable;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class DatanodeID implements WritableComparable<DatanodeID> {
-  public static final DatanodeID[] EMPTY_ARRAY = {}; 
+public class DatanodeID implements Comparable<DatanodeID> {
+  public static final DatanodeID[] EMPTY_ARRAY = {};
 
 
   protected String ipAddr;     // IP address
   protected String ipAddr;     // IP address
   protected String hostName;   // hostname
   protected String hostName;   // hostname
@@ -51,10 +45,6 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
   protected int infoPort;      // info server port
   protected int infoPort;      // info server port
   protected int ipcPort;       // IPC server port
   protected int ipcPort;       // IPC server port
 
 
-  public DatanodeID() {
-    this("", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
-  }
-
   public DatanodeID(String ipAddr, int xferPort) {
   public DatanodeID(String ipAddr, int xferPort) {
     this(ipAddr, "", "", xferPort,
     this(ipAddr, "", "", xferPort,
         DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
         DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
@@ -234,28 +224,4 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
   public int compareTo(DatanodeID that) {
   public int compareTo(DatanodeID that) {
     return getXferAddr().compareTo(that.getXferAddr());
     return getXferAddr().compareTo(that.getXferAddr());
   }
   }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, ipAddr);
-    Text.writeString(out, hostName);
-    Text.writeString(out, storageID);
-    out.writeShort(xferPort);
-    out.writeShort(infoPort);
-    out.writeShort(ipcPort);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    ipAddr = Text.readString(in);
-    hostName = Text.readString(in);
-    storageID = Text.readString(in);
-    // The port read could be negative, if the port is a large number (more
-    // than 15 bits in storage size (but less than 16 bits).
-    // So chop off the first two bytes (and hence the signed bits) before 
-    // setting the field.
-    xferPort = in.readShort() & 0x0000ffff;
-    infoPort = in.readShort() & 0x0000ffff;
-    ipcPort = in.readShort() & 0x0000ffff;
-  }
 }
 }

+ 0 - 57
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -17,19 +17,11 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Date;
 import java.util.Date;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
@@ -78,11 +70,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
 
   protected AdminStates adminState;
   protected AdminStates adminState;
 
 
-  public DatanodeInfo() {
-    super();
-    adminState = null;
-  }
-  
   public DatanodeInfo(DatanodeInfo from) {
   public DatanodeInfo(DatanodeInfo from) {
     super(from);
     super(from);
     this.capacity = from.getCapacity();
     this.capacity = from.getCapacity();
@@ -356,50 +343,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public int getLevel() { return level; }
   public int getLevel() { return level; }
   public void setLevel(int level) {this.level = level;}
   public void setLevel(int level) {this.level = level;}
 
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DatanodeInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DatanodeInfo(); }
-       });
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeLong(capacity);
-    out.writeLong(dfsUsed);
-    out.writeLong(remaining);
-    out.writeLong(blockPoolUsed);
-    out.writeLong(lastUpdate);
-    out.writeInt(xceiverCount);
-    Text.writeString(out, location);
-    WritableUtils.writeEnum(out, getAdminState());
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.capacity = in.readLong();
-    this.dfsUsed = in.readLong();
-    this.remaining = in.readLong();
-    this.blockPoolUsed = in.readLong();
-    this.lastUpdate = in.readLong();
-    this.xceiverCount = in.readInt();
-    this.location = Text.readString(in);
-    setAdminState(WritableUtils.readEnum(in, AdminStates.class));
-  }
-
-  /** Read a DatanodeInfo */
-  public static DatanodeInfo read(DataInput in) throws IOException {
-    final DatanodeInfo d = new DatanodeInfo();
-    d.readFields(in);
-    return d;
-  }
-
   @Override
   @Override
   public int hashCode() {
   public int hashCode() {
     // Super implementation is sufficient
     // Super implementation is sufficient

+ 1 - 57
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java

@@ -16,15 +16,8 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * This class defines a partial listing of a directory to support
  * This class defines a partial listing of a directory to support
@@ -32,24 +25,10 @@ import org.apache.hadoop.io.WritableFactory;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class DirectoryListing implements Writable {
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DirectoryListing.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DirectoryListing(); }
-       });
-  }
-
+public class DirectoryListing {
   private HdfsFileStatus[] partialListing;
   private HdfsFileStatus[] partialListing;
   private int remainingEntries;
   private int remainingEntries;
   
   
-  /**
-   * default constructor
-   */
-  public DirectoryListing() {
-  }
-  
   /**
   /**
    * constructor
    * constructor
    * @param partialListing a partial listing of a directory
    * @param partialListing a partial listing of a directory
@@ -103,39 +82,4 @@ public class DirectoryListing implements Writable {
     }
     }
     return partialListing[partialListing.length-1].getLocalNameInBytes();
     return partialListing[partialListing.length-1].getLocalNameInBytes();
   }
   }
-
-  // Writable interface
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int numEntries = in.readInt();
-    partialListing = new HdfsFileStatus[numEntries];
-    if (numEntries !=0 ) {
-      boolean hasLocation = in.readBoolean();
-      for (int i=0; i<numEntries; i++) {
-        if (hasLocation) {
-          partialListing[i] = new HdfsLocatedFileStatus();
-        } else {
-          partialListing[i] = new HdfsFileStatus();
-        }
-        partialListing[i].readFields(in);
-      }
-    }
-    remainingEntries = in.readInt();
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(partialListing.length);
-    if (partialListing.length != 0) { 
-       if (partialListing[0] instanceof HdfsLocatedFileStatus) {
-         out.writeBoolean(true);
-       } else {
-         out.writeBoolean(false);
-       }
-       for (HdfsFileStatus fileStatus : partialListing) {
-         fileStatus.write(out);
-       }
-    }
-    out.writeInt(remainingEntries);
-  }
 }
 }

+ 1 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java

@@ -17,34 +17,18 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * Identifies a Block uniquely across the block pools
  * Identifies a Block uniquely across the block pools
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class ExtendedBlock implements Writable {
+public class ExtendedBlock {
   private String poolId;
   private String poolId;
   private Block block;
   private Block block;
 
 
-  static { // register a ctor
-    WritableFactories.setFactory(ExtendedBlock.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new ExtendedBlock();
-      }
-    });
-  }
-
   public ExtendedBlock() {
   public ExtendedBlock() {
     this(null, 0, 0, 0);
     this(null, 0, 0, 0);
   }
   }
@@ -68,28 +52,6 @@ public class ExtendedBlock implements Writable {
     block = new Block(blkid, len, genstamp);
     block = new Block(blkid, len, genstamp);
   }
   }
 
 
-  public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, poolId);
-    block.writeHelper(out);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    this.poolId = DeprecatedUTF8.readString(in);
-    block.readHelper(in);
-  }
-
-  // Write only the identifier part of the block
-  public void writeId(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, poolId);
-    block.writeId(out);
-  }
-
-  // Read only the identifier part of the block
-  public void readId(DataInput in) throws IOException {
-    this.poolId = DeprecatedUTF8.readString(in);
-    block.readId(in);
-  }
-  
   public String getBlockPoolId() {
   public String getBlockPoolId() {
     return poolId;
     return poolId;
   }
   }

+ 1 - 69
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java

@@ -17,32 +17,17 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /** Interface that represents the over the wire information for a file.
 /** Interface that represents the over the wire information for a file.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class HdfsFileStatus implements Writable {
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (HdfsFileStatus.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new HdfsFileStatus(); }
-       });
-  }
+public class HdfsFileStatus {
 
 
   private byte[] path;  // local name of the inode that's encoded in java UTF8
   private byte[] path;  // local name of the inode that's encoded in java UTF8
   private byte[] symlink; // symlink target encoded in java UTF8 or null
   private byte[] symlink; // symlink target encoded in java UTF8 or null
@@ -58,13 +43,6 @@ public class HdfsFileStatus implements Writable {
   
   
   public static final byte[] EMPTY_NAME = new byte[0];
   public static final byte[] EMPTY_NAME = new byte[0];
 
 
-  /**
-   * default constructor
-   */
-  public HdfsFileStatus() { 
-    this(0, false, 0, 0, 0, 0, null, null, null, null, null); 
-  }
-  
   /**
   /**
    * Constructor
    * Constructor
    * @param length the number of bytes the file has
    * @param length the number of bytes the file has
@@ -242,50 +220,4 @@ public class HdfsFileStatus implements Writable {
   final public byte[] getSymlinkInBytes() {
   final public byte[] getSymlinkInBytes() {
     return symlink;
     return symlink;
   }
   }
-
-  //////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(path.length);
-    out.write(path);
-    out.writeLong(length);
-    out.writeBoolean(isdir);
-    out.writeShort(block_replication);
-    out.writeLong(blocksize);
-    out.writeLong(modification_time);
-    out.writeLong(access_time);
-    permission.write(out);
-    Text.writeString(out, owner);
-    Text.writeString(out, group);
-    out.writeBoolean(isSymlink());
-    if (isSymlink()) {
-      out.writeInt(symlink.length);
-      out.write(symlink);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    int numOfBytes = in.readInt();
-    if (numOfBytes == 0) {
-      this.path = EMPTY_NAME;
-    } else {
-      this.path = new byte[numOfBytes];
-      in.readFully(path);
-    }
-    this.length = in.readLong();
-    this.isdir = in.readBoolean();
-    this.block_replication = in.readShort();
-    blocksize = in.readLong();
-    modification_time = in.readLong();
-    access_time = in.readLong();
-    permission.readFields(in);
-    owner = Text.readString(in);
-    group = Text.readString(in);
-    if (in.readBoolean()) {
-      numOfBytes = in.readInt();
-      this.symlink = new byte[numOfBytes];
-      in.readFully(symlink);
-    }
-  }
 }
 }

+ 0 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java

@@ -17,10 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,12 +30,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 public class HdfsLocatedFileStatus extends HdfsFileStatus {
 public class HdfsLocatedFileStatus extends HdfsFileStatus {
   private LocatedBlocks locations;
   private LocatedBlocks locations;
   
   
-  /**
-   * Default constructor
-   */
-  public HdfsLocatedFileStatus() {
-  }
-  
   /**
   /**
    * Constructor
    * Constructor
    * 
    * 
@@ -69,22 +59,4 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
 	public LocatedBlocks getBlockLocations() {
 	public LocatedBlocks getBlockLocations() {
 		return locations;
 		return locations;
 	}
 	}
-	
-  //////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    if (!isDir() && !isSymlink()) {
-      locations.write(out);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    if (!isDir() && !isSymlink()) {
-      locations = new LocatedBlocks();
-      locations.readFields(in);
-    }
-  }
 }
 }

+ 1 - 47
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java

@@ -20,11 +20,8 @@ package org.apache.hadoop.hdfs.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
-import java.io.*;
-
 /****************************************************
 /****************************************************
  * A LocatedBlock is a pair of Block, DatanodeInfo[]
  * A LocatedBlock is a pair of Block, DatanodeInfo[]
  * objects.  It tells where to find a Block.
  * objects.  It tells where to find a Block.
@@ -32,15 +29,7 @@ import java.io.*;
  ****************************************************/
  ****************************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class LocatedBlock implements Writable {
-
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (LocatedBlock.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new LocatedBlock(); }
-       });
-  }
+public class LocatedBlock {
 
 
   private ExtendedBlock b;
   private ExtendedBlock b;
   private long offset;  // offset of the first byte of the block in the file
   private long offset;  // offset of the first byte of the block in the file
@@ -124,41 +113,6 @@ public class LocatedBlock implements Writable {
     return this.corrupt;
     return this.corrupt;
   }
   }
 
 
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    blockToken.write(out);
-    out.writeBoolean(corrupt);
-    out.writeLong(offset);
-    b.write(out);
-    out.writeInt(locs.length);
-    for (int i = 0; i < locs.length; i++) {
-      locs[i].write(out);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    blockToken.readFields(in);
-    this.corrupt = in.readBoolean();
-    offset = in.readLong();
-    this.b = new ExtendedBlock();
-    b.readFields(in);
-    int count = in.readInt();
-    this.locs = new DatanodeInfo[count];
-    for (int i = 0; i < locs.length; i++) {
-      locs[i] = new DatanodeInfo();
-      locs[i].readFields(in);
-    }
-  }
-
-  /** Read LocatedBlock from in. */
-  public static LocatedBlock read(DataInput in) throws IOException {
-    final LocatedBlock lb = new LocatedBlock();
-    lb.readFields(in);
-    return lb;
-  }
-
   @Override
   @Override
   public String toString() {
   public String toString() {
     return getClass().getSimpleName() + "{" + b
     return getClass().getSimpleName() + "{" + b

+ 1 - 63
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java

@@ -17,26 +17,19 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol;
 package org.apache.hadoop.hdfs.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.List;
 import java.util.List;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Comparator;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * Collection of blocks with their locations and the file length.
  * Collection of blocks with their locations and the file length.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class LocatedBlocks implements Writable {
+public class LocatedBlocks {
   private long fileLength;
   private long fileLength;
   private List<LocatedBlock> blocks; // array of blocks with prioritized locations
   private List<LocatedBlock> blocks; // array of blocks with prioritized locations
   private boolean underConstruction;
   private boolean underConstruction;
@@ -167,61 +160,6 @@ public class LocatedBlocks implements Writable {
     return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
     return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
   }
   }
 
 
-  //////////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (LocatedBlocks.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new LocatedBlocks(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    out.writeLong(this.fileLength);
-    out.writeBoolean(underConstruction);
-
-    //write the last located block
-    final boolean isNull = lastLocatedBlock == null;
-    out.writeBoolean(isNull);
-    if (!isNull) {
-      lastLocatedBlock.write(out);
-    }
-    out.writeBoolean(isLastBlockComplete);
-
-    // write located blocks
-    int nrBlocks = locatedBlockCount();
-    out.writeInt(nrBlocks);
-    if (nrBlocks == 0) {
-      return;
-    }
-    for (LocatedBlock blk : this.blocks) {
-      blk.write(out);
-    }
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    this.fileLength = in.readLong();
-    underConstruction = in.readBoolean();
-
-    //read the last located block
-    final boolean isNull = in.readBoolean();
-    if (!isNull) {
-      lastLocatedBlock = LocatedBlock.read(in);
-    }
-    isLastBlockComplete = in.readBoolean();
-
-    // read located blocks
-    int nrBlocks = in.readInt();
-    this.blocks = new ArrayList<LocatedBlock>(nrBlocks);
-    for (int idx = 0; idx < nrBlocks; idx++) {
-      LocatedBlock blk = new LocatedBlock();
-      blk.readFields(in);
-      this.blocks.add(blk);
-    }
-  }
-
   @Override
   @Override
   public String toString() {
   public String toString() {
     final StringBuilder b = new StringBuilder(getClass().getSimpleName());
     final StringBuilder b = new StringBuilder(getClass().getSimpleName());

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -39,12 +39,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlo
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;

+ 2 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
@@ -102,24 +101,16 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -127,8 +118,8 @@ import com.google.protobuf.ServiceException;
 
 
 /**
 /**
  * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
  * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
- * while translating from the parameter types used in ClientProtocol to those
- * used in protocolR23Compatile.*.
+ * while translating from the parameter types used in ClientProtocol to the
+ * new PB types.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 @InterfaceStability.Stable

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java

@@ -22,10 +22,8 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java

@@ -56,9 +56,17 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
-    return InitReplicaRecoveryResponseProto.newBuilder()
-        .setBlock(PBHelper.convert(r))
-        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
+    
+    if (r == null) {
+      return InitReplicaRecoveryResponseProto.newBuilder()
+          .setReplicaFound(false)
+          .build();
+    } else {
+      return InitReplicaRecoveryResponseProto.newBuilder()
+          .setReplicaFound(true)
+          .setBlock(PBHelper.convert(r))
+          .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
+    }
   }
   }
 
 
   @Override
   @Override

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java

@@ -85,6 +85,17 @@ public class InterDatanodeProtocolTranslatorPB implements
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
+    if (!resp.getReplicaFound()) {
+      // No replica found on the remote node.
+      return null;
+    } else {
+      if (!resp.hasBlock() || !resp.hasState()) {
+        throw new IOException("Replica was found but missing fields. " +
+            "Req: " + req + "\n" +
+            "Resp: " + resp);
+      }
+    }
+    
     BlockProto b = resp.getBlock();
     BlockProto b = resp.getBlock();
     return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
     return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
         b.getGenStamp(), PBHelper.convert(resp.getState()));
         b.getGenStamp(), PBHelper.convert(resp.getState()));

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransacti
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -46,7 +45,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java

@@ -22,10 +22,8 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java

@@ -23,10 +23,8 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;

+ 0 - 110
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ProtocolSignatureWritable.java

@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.protocolR23Compatible;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ProtocolSignatureWritable implements Writable {
-  static {               // register a ctor
-    WritableFactories.setFactory
-      (ProtocolSignatureWritable.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new ProtocolSignatureWritable(); }
-       });
-  }
-
-  private long version;
-  private int[] methods = null; // an array of method hash codes
-  
-  public static org.apache.hadoop.ipc.ProtocolSignature convert(
-      final ProtocolSignatureWritable ps) {
-    if (ps == null) return null;
-    return new org.apache.hadoop.ipc.ProtocolSignature(
-        ps.getVersion(), ps.getMethods());
-  }
-  
-  public static ProtocolSignatureWritable convert(
-      final org.apache.hadoop.ipc.ProtocolSignature ps) {
-    if (ps == null) return null;
-    return new ProtocolSignatureWritable(ps.getVersion(), ps.getMethods());
-  }
-  
-  /**
-   * default constructor
-   */
-  public ProtocolSignatureWritable() {
-  }
-  
-  /**
-   * Constructor
-   * 
-   * @param version server version
-   * @param methodHashcodes hash codes of the methods supported by server
-   */
-  public ProtocolSignatureWritable(long version, int[] methodHashcodes) {
-    this.version = version;
-    this.methods = methodHashcodes;
-  }
-  
-  public long getVersion() {
-    return version;
-  }
-  
-  public int[] getMethods() {
-    return methods;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    version = in.readLong();
-    boolean hasMethods = in.readBoolean();
-    if (hasMethods) {
-      int numMethods = in.readInt();
-      methods = new int[numMethods];
-      for (int i=0; i<numMethods; i++) {
-        methods[i] = in.readInt();
-      }
-    }
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeLong(version);
-    if (methods == null) {
-      out.writeBoolean(false);
-    } else {
-      out.writeBoolean(true);
-      out.writeInt(methods.length);
-      for (int method : methods) {
-        out.writeInt(method);
-      }
-    }
-  }
-}
-

+ 0 - 44
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/overview.html

@@ -1,44 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<head>
-   <title>Namenode Client Protocols Compatible with the version
-    of Hadoop Release 23</title>
-</head>
-<body>
-<p>
-This package is for ALL versions of HDFS protocols that use writable data types
-and are compatible with the version of the protocol that was
- shipped with Release 23 of Hadoop.
-</p>
-
-Compatibility should be maintained:
-<ul>
-<li> Do NOT delete any methods </li>
-<li> Do NOT change the signatures of any method:
- do not  change parameters, parameter types
-or exceptions thrown by the method.</li>
-</ul>
-<p>
-You can add new methods and new types. If you need to  change a method's
-signature, please add a new method instead.
-When you add new methods and new types do NOT change the version number.
-<p> 
-Version number is changed ONLY when compatibility is broken (which
-should be very rare and a big deal).
-</p>

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -2767,7 +2767,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     }
     }
   }
   }
 
 
-  public void checkReplication(Block block, int numExpectedReplicas) {
+  public void checkReplication(Block block, short numExpectedReplicas) {
     // filter out containingNodes that are marked for decommission.
     // filter out containingNodes that are marked for decommission.
     NumberReplicas number = countNodes(block);
     NumberReplicas number = countNodes(block);
     if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { 
     if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { 
@@ -2775,6 +2775,10 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
                              number.liveReplicas(),
                              number.liveReplicas(),
                              number.decommissionedReplicas(),
                              number.decommissionedReplicas(),
                              numExpectedReplicas);
                              numExpectedReplicas);
+      return;
+    }
+    if (number.liveReplicas() > numExpectedReplicas) {
+      processOverReplicatedBlock(block, numExpectedReplicas, null, null);
     }
     }
   }
   }
 
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -153,8 +153,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
    */
   private boolean disallowed = false;
   private boolean disallowed = false;
 
 
-  public DatanodeDescriptor() {}
-  
   /**
   /**
    * DatanodeDescriptor constructor
    * DatanodeDescriptor constructor
    * @param nodeID id of the data node
    * @param nodeID id of the data node

+ 43 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -22,6 +22,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
 
 
 /************************************
 /************************************
  * Some handy internal HDFS constants
  * Some handy internal HDFS constants
@@ -54,13 +55,23 @@ public final class HdfsServerConstants {
     FINALIZE("-finalize"),
     FINALIZE("-finalize"),
     IMPORT  ("-importCheckpoint"),
     IMPORT  ("-importCheckpoint"),
     BOOTSTRAPSTANDBY("-bootstrapStandby"),
     BOOTSTRAPSTANDBY("-bootstrapStandby"),
-    INITIALIZESHAREDEDITS("-initializeSharedEdits");
+    INITIALIZESHAREDEDITS("-initializeSharedEdits"),
+    RECOVER  ("-recover"),
+    FORCE("-force"),
+    NONINTERACTIVE("-nonInteractive");
     
     
     private String name = null;
     private String name = null;
     
     
     // Used only with format and upgrade options
     // Used only with format and upgrade options
     private String clusterId = null;
     private String clusterId = null;
     
     
+    // Used only with format option
+    private boolean isForceFormat = false;
+    private boolean isInteractiveFormat = true;
+    
+    // Used only with recovery option
+    private int force = 0;
+
     private StartupOption(String arg) {this.name = arg;}
     private StartupOption(String arg) {this.name = arg;}
     public String getName() {return name;}
     public String getName() {return name;}
     public NamenodeRole toNodeRole() {
     public NamenodeRole toNodeRole() {
@@ -77,10 +88,40 @@ public final class HdfsServerConstants {
     public void setClusterId(String cid) {
     public void setClusterId(String cid) {
       clusterId = cid;
       clusterId = cid;
     }
     }
-    
+
     public String getClusterId() {
     public String getClusterId() {
       return clusterId;
       return clusterId;
     }
     }
+
+    public MetaRecoveryContext createRecoveryContext() {
+      if (!name.equals(RECOVER.name))
+        return null;
+      return new MetaRecoveryContext(force);
+    }
+
+    public void setForce(int force) {
+      this.force = force;
+    }
+    
+    public int getForce() {
+      return this.force;
+    }
+    
+    public boolean getForceFormat() {
+      return isForceFormat;
+    }
+    
+    public void setForceFormat(boolean force) {
+      isForceFormat = force;
+    }
+    
+    public boolean getInteractiveFormat() {
+      return isInteractiveFormat;
+    }
+    
+    public void setInteractiveFormat(boolean interactive) {
+      isInteractiveFormat = interactive;
+    }
   }
   }
 
 
   // Timeouts for communicating with DataNode for streaming writes/reads
   // Timeouts for communicating with DataNode for streaming writes/reads

+ 3 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java

@@ -17,13 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.server.common;
 package org.apache.hadoop.hdfs.server.common;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 
 
@@ -33,16 +27,16 @@ import com.google.common.base.Joiner;
  * TODO namespaceID should be long and computed as hash(address + port)
  * TODO namespaceID should be long and computed as hash(address + port)
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class StorageInfo implements Writable {
+public class StorageInfo {
   public int   layoutVersion;   // layout version of the storage data
   public int   layoutVersion;   // layout version of the storage data
   public int   namespaceID;     // id of the file system
   public int   namespaceID;     // id of the file system
   public String clusterID;      // id of the cluster
   public String clusterID;      // id of the cluster
   public long  cTime;           // creation time of the file system state
   public long  cTime;           // creation time of the file system state
-  
+ 
   public StorageInfo () {
   public StorageInfo () {
     this(0, 0, "", 0L);
     this(0, 0, "", 0L);
   }
   }
-  
+
   public StorageInfo(int layoutV, int nsID, String cid, long cT) {
   public StorageInfo(int layoutV, int nsID, String cid, long cT) {
     layoutVersion = layoutV;
     layoutVersion = layoutV;
     clusterID = cid;
     clusterID = cid;
@@ -83,23 +77,6 @@ public class StorageInfo implements Writable {
     namespaceID = from.namespaceID;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
     cTime = from.cTime;
   }
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(getLayoutVersion());
-    out.writeInt(getNamespaceID());
-    WritableUtils.writeString(out, clusterID);
-    out.writeLong(getCTime());
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    layoutVersion = in.readInt();
-    namespaceID = in.readInt();
-    clusterID = WritableUtils.readString(in);
-    cTime = in.readLong();
-  }
   
   
   public String toString() {
   public String toString() {
     StringBuilder sb = new StringBuilder();
     StringBuilder sb = new StringBuilder();

+ 1 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java

@@ -17,14 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.server.common;
 package org.apache.hadoop.hdfs.server.common;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * Base upgrade upgradeStatus class.
  * Base upgrade upgradeStatus class.
@@ -33,17 +26,11 @@ import org.apache.hadoop.io.WritableFactory;
  * Describes status of current upgrade.
  * Describes status of current upgrade.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class UpgradeStatusReport implements Writable {
+public class UpgradeStatusReport {
   protected int version;
   protected int version;
   protected short upgradeStatus;
   protected short upgradeStatus;
   protected boolean finalized;
   protected boolean finalized;
 
 
-  public UpgradeStatusReport() {
-    this.version = 0;
-    this.upgradeStatus = 0;
-    this.finalized = false;
-  }
-
   public UpgradeStatusReport(int version, short status, boolean isFinalized) {
   public UpgradeStatusReport(int version, short status, boolean isFinalized) {
     this.version = version;
     this.version = version;
     this.upgradeStatus = status;
     this.upgradeStatus = status;
@@ -98,29 +85,4 @@ public class UpgradeStatusReport implements Writable {
   public String toString() {
   public String toString() {
     return getStatusText(false);
     return getStatusText(false);
   }
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (UpgradeStatusReport.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new UpgradeStatusReport(); }
-       });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(this.version);
-    out.writeShort(this.upgradeStatus);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    this.version = in.readInt();
-    this.upgradeStatus = in.readShort();
-  }
 }
 }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -417,7 +417,9 @@ class BPServiceActor implements Runnable {
   
   
   
   
   HeartbeatResponse sendHeartBeat() throws IOException {
   HeartbeatResponse sendHeartBeat() throws IOException {
-    LOG.info("heartbeat: " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Sending heartbeat from service actor: " + this);
+    }
     // reports number of failed volumes
     // reports number of failed volumes
     StorageReport[] report = { new StorageReport(bpRegistration.getStorageID(),
     StorageReport[] report = { new StorageReport(bpRegistration.getStorageID(),
         false,
         false,

+ 15 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java

@@ -213,19 +213,21 @@ public class BackupImage extends FSImage {
         LOG.debug("data:" + StringUtils.byteToHexString(data));
         LOG.debug("data:" + StringUtils.byteToHexString(data));
       }
       }
 
 
-      FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
+      FSEditLogLoader logLoader =
+          new FSEditLogLoader(namesystem, lastAppliedTxId);
       int logVersion = storage.getLayoutVersion();
       int logVersion = storage.getLayoutVersion();
       backupInputStream.setBytes(data, logVersion);
       backupInputStream.setBytes(data, logVersion);
 
 
-      long numLoaded = logLoader.loadEditRecords(logVersion, backupInputStream, 
-                                                true, lastAppliedTxId + 1);
-      if (numLoaded != numTxns) {
+      long numTxnsAdvanced = logLoader.loadEditRecords(logVersion, 
+          backupInputStream, true, lastAppliedTxId + 1, null);
+      if (numTxnsAdvanced != numTxns) {
         throw new IOException("Batch of txns starting at txnid " +
         throw new IOException("Batch of txns starting at txnid " +
             firstTxId + " was supposed to contain " + numTxns +
             firstTxId + " was supposed to contain " + numTxns +
-            " transactions but only was able to apply " + numLoaded);
+            " transactions, but we were only able to advance by " +
+            numTxnsAdvanced);
       }
       }
-      lastAppliedTxId += numTxns;
-      
+      lastAppliedTxId = logLoader.getLastAppliedTxId();
+
       namesystem.dir.updateCountForINodeWithQuota(); // inefficient!
       namesystem.dir.updateCountForINodeWithQuota(); // inefficient!
     } finally {
     } finally {
       backupInputStream.clear();
       backupInputStream.clear();
@@ -275,7 +277,7 @@ public class BackupImage extends FSImage {
           editStreams.add(s);
           editStreams.add(s);
         }
         }
       }
       }
-      loadEdits(editStreams, namesystem);
+      loadEdits(editStreams, namesystem, null);
     }
     }
     
     
     // now, need to load the in-progress file
     // now, need to load the in-progress file
@@ -309,12 +311,11 @@ public class BackupImage extends FSImage {
         LOG.info("Going to finish converging with remaining " + remainingTxns
         LOG.info("Going to finish converging with remaining " + remainingTxns
             + " txns from in-progress stream " + stream);
             + " txns from in-progress stream " + stream);
         
         
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);
-        long numLoaded = loader.loadFSEdits(stream, lastAppliedTxId + 1);
-        lastAppliedTxId += numLoaded;
-        assert numLoaded == remainingTxns :
-          "expected to load " + remainingTxns + " but loaded " +
-          numLoaded + " from " + stream;
+        FSEditLogLoader loader =
+            new FSEditLogLoader(namesystem, lastAppliedTxId);
+        loader.loadFSEdits(stream, lastAppliedTxId + 1, null);
+        lastAppliedTxId = loader.getLastAppliedTxId();
+        assert lastAppliedTxId == getEditLog().getLastWrittenTxId();
       } finally {
       } finally {
         FSEditLog.closeAllStreams(editStreams);
         FSEditLog.closeAllStreams(editStreams);
       }
       }

+ 3 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java

@@ -17,15 +17,11 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
 
 
 import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.ComparisonChain;
 
 
@@ -33,16 +29,15 @@ import com.google.common.collect.ComparisonChain;
  * A unique signature intended to identify checkpoint transactions.
  * A unique signature intended to identify checkpoint transactions.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class CheckpointSignature extends StorageInfo 
-                      implements WritableComparable<CheckpointSignature> {
+public class CheckpointSignature extends StorageInfo
+    implements Comparable<CheckpointSignature> { 
+
   private static final String FIELD_SEPARATOR = ":";
   private static final String FIELD_SEPARATOR = ":";
   private static final int NUM_FIELDS = 7;
   private static final int NUM_FIELDS = 7;
   String blockpoolID = "";
   String blockpoolID = "";
   long mostRecentCheckpointTxId;
   long mostRecentCheckpointTxId;
   long curSegmentTxId;
   long curSegmentTxId;
 
 
-  public CheckpointSignature() {}
-
   CheckpointSignature(FSImage fsImage) {
   CheckpointSignature(FSImage fsImage) {
     super(fsImage.getStorage());
     super(fsImage.getStorage());
     blockpoolID = fsImage.getBlockPoolID();
     blockpoolID = fsImage.getBlockPoolID();
@@ -162,21 +157,4 @@ public class CheckpointSignature extends StorageInfo
             (int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)
             (int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)
             ^ clusterID.hashCode() ^ blockpoolID.hashCode();
             ^ clusterID.hashCode() ^ blockpoolID.hashCode();
   }
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    WritableUtils.writeString(out, blockpoolID);
-    out.writeLong(mostRecentCheckpointTxId);
-    out.writeLong(curSegmentTxId);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    blockpoolID = WritableUtils.readString(in);
-    mostRecentCheckpointTxId = in.readLong();
-    curSegmentTxId = in.readLong();
-  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -292,6 +292,6 @@ class Checkpointer extends Daemon {
     }
     }
     LOG.info("Checkpointer about to load edits from " +
     LOG.info("Checkpointer about to load edits from " +
         editsStreams.size() + " stream(s).");
         editsStreams.size() + " stream(s).");
-    dstImage.loadEdits(editsStreams, dstNamesystem);
+    dstImage.loadEdits(editsStreams, dstNamesystem, null);
   }
   }
 }
 }

+ 12 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java

@@ -70,21 +70,25 @@ class EditLogBackupInputStream extends EditLogInputStream {
     reader = null;
     reader = null;
   }
   }
 
 
-  @Override // JournalStream
+  @Override
   public String getName() {
   public String getName() {
     return address;
     return address;
   }
   }
 
 
-  @Override // JournalStream
-  public JournalType getType() {
-    return JournalType.BACKUP;
-  }
-
   @Override
   @Override
-  public FSEditLogOp readOp() throws IOException {
+  protected FSEditLogOp nextOp() throws IOException {
     Preconditions.checkState(reader != null,
     Preconditions.checkState(reader != null,
         "Must call setBytes() before readOp()");
         "Must call setBytes() before readOp()");
-    return reader.readOp();
+    return reader.readOp(false);
+  }
+
+  @Override
+  protected FSEditLogOp nextValidOp() {
+    try {
+      return reader.readOp(true);
+    } catch (IOException e) {
+      throw new RuntimeException("got unexpected IOException " + e, e);
+    }
   }
   }
 
 
   @Override
   @Override

+ 11 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java

@@ -89,24 +89,6 @@ public class EditLogFileInputStream extends EditLogInputStream {
     this.isInProgress = isInProgress;
     this.isInProgress = isInProgress;
   }
   }
 
 
-  /**
-   * Skip over a number of transactions. Subsequent calls to
-   * {@link EditLogFileInputStream#readOp()} will begin after these skipped
-   * transactions. If more transactions are requested to be skipped than remain
-   * in the edit log, all edit log ops in the log will be skipped and subsequent
-   * calls to {@link EditLogInputStream#readOp} will return null.
-   * 
-   * @param transactionsToSkip number of transactions to skip over.
-   * @throws IOException if there's an error while reading an operation
-   */
-  public void skipTransactions(long transactionsToSkip) throws IOException {
-    assert firstTxId != HdfsConstants.INVALID_TXID &&
-        lastTxId != HdfsConstants.INVALID_TXID;
-    for (long i = 0; i < transactionsToSkip; i++) {
-      reader.readOp();
-    }
-  }
-
   @Override
   @Override
   public long getFirstTxId() throws IOException {
   public long getFirstTxId() throws IOException {
     return firstTxId;
     return firstTxId;
@@ -117,19 +99,23 @@ public class EditLogFileInputStream extends EditLogInputStream {
     return lastTxId;
     return lastTxId;
   }
   }
 
 
-  @Override // JournalStream
+  @Override
   public String getName() {
   public String getName() {
     return file.getPath();
     return file.getPath();
   }
   }
 
 
-  @Override // JournalStream
-  public JournalType getType() {
-    return JournalType.FILE;
+  @Override
+  protected FSEditLogOp nextOp() throws IOException {
+    return reader.readOp(false);
   }
   }
-
+  
   @Override
   @Override
-  public FSEditLogOp readOp() throws IOException {
-    return reader.readOp();
+  protected FSEditLogOp nextValidOp() {
+    try {
+      return reader.readOp(true);
+    } catch (IOException e) {
+      return null;
+    }
   }
   }
 
 
   @Override
   @Override

+ 82 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java

@@ -34,7 +34,14 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public abstract class EditLogInputStream implements JournalStream, Closeable {
+public abstract class EditLogInputStream implements Closeable {
+  private FSEditLogOp cachedOp = null; 
+  
+  /** 
+   * @return the name of the EditLogInputStream
+   */
+  public abstract String getName();
+  
   /** 
   /** 
    * @return the first transaction which will be found in this stream
    * @return the first transaction which will be found in this stream
    */
    */
@@ -57,8 +64,81 @@ public abstract class EditLogInputStream implements JournalStream, Closeable {
    * @return an operation from the stream or null if at end of stream
    * @return an operation from the stream or null if at end of stream
    * @throws IOException if there is an error reading from the stream
    * @throws IOException if there is an error reading from the stream
    */
    */
-  public abstract FSEditLogOp readOp() throws IOException;
+  public FSEditLogOp readOp() throws IOException {
+    FSEditLogOp ret;
+    if (cachedOp != null) {
+      ret = cachedOp;
+      cachedOp = null;
+      return ret;
+    }
+    return nextOp();
+  }
 
 
+  /** 
+   * Position the stream so that a valid operation can be read from it with
+   * readOp().
+   * 
+   * This method can be used to skip over corrupted sections of edit logs.
+   */
+  public void resync() throws IOException {
+    if (cachedOp != null) {
+      return;
+    }
+    cachedOp = nextValidOp();
+  }
+  
+  /** 
+   * Get the next operation from the stream storage.
+   * 
+   * @return an operation from the stream or null if at end of stream
+   * @throws IOException if there is an error reading from the stream
+   */
+  protected abstract FSEditLogOp nextOp() throws IOException;
+  
+  /** 
+   * Get the next valid operation from the stream storage.
+   * 
+   * This is exactly like nextOp, except that we attempt to skip over damaged
+   * parts of the edit log
+   * 
+   * @return an operation from the stream or null if at end of stream
+   */
+  protected FSEditLogOp nextValidOp() {
+    // This is a trivial implementation which just assumes that any errors mean
+    // that there is nothing more of value in the log.  Subclasses that support
+    // error recovery will want to override this.
+    try {
+      return nextOp();
+    } catch (IOException e) {
+      return null;
+    }
+  }
+  
+  /** 
+   * Skip edit log operations up to a given transaction ID, or until the
+   * end of the edit log is reached.
+   *
+   * After this function returns, the next call to readOp will return either
+   * end-of-file (null) or a transaction with a txid equal to or higher than
+   * the one we asked for.
+   *
+   * @param txid    The transaction ID to read up until.
+   * @return        Returns true if we found a transaction ID greater than
+   *                or equal to 'txid' in the log.
+   */
+  public boolean skipUntil(long txid) throws IOException {
+    while (true) {
+      FSEditLogOp op = readOp();
+      if (op == null) {
+        return false;
+      }
+      if (op.getTransactionId() >= txid) {
+        cachedOp = op;
+        return true;
+      }
+    }
+  }
+  
   /** 
   /** 
    * Get the layout version of the data in the stream.
    * Get the layout version of the data in the stream.
    * @return the layout version of the ops in the stream.
    * @return the layout version of the ops in the stream.

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.io.Closeable;
 
 
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 
 
@@ -30,7 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public abstract class EditLogOutputStream {
+public abstract class EditLogOutputStream implements Closeable {
   // these are statistics counters
   // these are statistics counters
   private long numSync;        // number of sync(s) to disk
   private long numSync;        // number of sync(s) to disk
   private long totalTimeSync;  // total time to sync
   private long totalTimeSync;  // total time to sync

+ 30 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -127,6 +127,14 @@ public class FSEditLog  {
   private Configuration conf;
   private Configuration conf;
   
   
   private List<URI> editsDirs;
   private List<URI> editsDirs;
+
+  private ThreadLocal<OpInstanceCache> cache =
+      new ThreadLocal<OpInstanceCache>() {
+    @Override
+    protected OpInstanceCache initialValue() {
+      return new OpInstanceCache();
+    }
+  };
   
   
   /**
   /**
    * The edit directories that are shared between primary and secondary.
    * The edit directories that are shared between primary and secondary.
@@ -596,7 +604,7 @@ public class FSEditLog  {
    * Records the block locations of the last block.
    * Records the block locations of the last block.
    */
    */
   public void logOpenFile(String path, INodeFileUnderConstruction newNode) {
   public void logOpenFile(String path, INodeFileUnderConstruction newNode) {
-    AddOp op = AddOp.getInstance()
+    AddOp op = AddOp.getInstance(cache.get())
       .setPath(path)
       .setPath(path)
       .setReplication(newNode.getReplication())
       .setReplication(newNode.getReplication())
       .setModificationTime(newNode.getModificationTime())
       .setModificationTime(newNode.getModificationTime())
@@ -614,7 +622,7 @@ public class FSEditLog  {
    * Add close lease record to edit log.
    * Add close lease record to edit log.
    */
    */
   public void logCloseFile(String path, INodeFile newNode) {
   public void logCloseFile(String path, INodeFile newNode) {
-    CloseOp op = CloseOp.getInstance()
+    CloseOp op = CloseOp.getInstance(cache.get())
       .setPath(path)
       .setPath(path)
       .setReplication(newNode.getReplication())
       .setReplication(newNode.getReplication())
       .setModificationTime(newNode.getModificationTime())
       .setModificationTime(newNode.getModificationTime())
@@ -627,7 +635,7 @@ public class FSEditLog  {
   }
   }
   
   
   public void logUpdateBlocks(String path, INodeFileUnderConstruction file) {
   public void logUpdateBlocks(String path, INodeFileUnderConstruction file) {
-    UpdateBlocksOp op = UpdateBlocksOp.getInstance()
+    UpdateBlocksOp op = UpdateBlocksOp.getInstance(cache.get())
       .setPath(path)
       .setPath(path)
       .setBlocks(file.getBlocks());
       .setBlocks(file.getBlocks());
     logEdit(op);
     logEdit(op);
@@ -637,7 +645,7 @@ public class FSEditLog  {
    * Add create directory record to edit log
    * Add create directory record to edit log
    */
    */
   public void logMkDir(String path, INode newNode) {
   public void logMkDir(String path, INode newNode) {
-    MkdirOp op = MkdirOp.getInstance()
+    MkdirOp op = MkdirOp.getInstance(cache.get())
       .setPath(path)
       .setPath(path)
       .setTimestamp(newNode.getModificationTime())
       .setTimestamp(newNode.getModificationTime())
       .setPermissionStatus(newNode.getPermissionStatus());
       .setPermissionStatus(newNode.getPermissionStatus());
@@ -649,7 +657,7 @@ public class FSEditLog  {
    * TODO: use String parameters until just before writing to disk
    * TODO: use String parameters until just before writing to disk
    */
    */
   void logRename(String src, String dst, long timestamp) {
   void logRename(String src, String dst, long timestamp) {
-    RenameOldOp op = RenameOldOp.getInstance()
+    RenameOldOp op = RenameOldOp.getInstance(cache.get())
       .setSource(src)
       .setSource(src)
       .setDestination(dst)
       .setDestination(dst)
       .setTimestamp(timestamp);
       .setTimestamp(timestamp);
@@ -660,7 +668,7 @@ public class FSEditLog  {
    * Add rename record to edit log
    * Add rename record to edit log
    */
    */
   void logRename(String src, String dst, long timestamp, Options.Rename... options) {
   void logRename(String src, String dst, long timestamp, Options.Rename... options) {
-    RenameOp op = RenameOp.getInstance()
+    RenameOp op = RenameOp.getInstance(cache.get())
       .setSource(src)
       .setSource(src)
       .setDestination(dst)
       .setDestination(dst)
       .setTimestamp(timestamp)
       .setTimestamp(timestamp)
@@ -672,7 +680,7 @@ public class FSEditLog  {
    * Add set replication record to edit log
    * Add set replication record to edit log
    */
    */
   void logSetReplication(String src, short replication) {
   void logSetReplication(String src, short replication) {
-    SetReplicationOp op = SetReplicationOp.getInstance()
+    SetReplicationOp op = SetReplicationOp.getInstance(cache.get())
       .setPath(src)
       .setPath(src)
       .setReplication(replication);
       .setReplication(replication);
     logEdit(op);
     logEdit(op);
@@ -684,7 +692,7 @@ public class FSEditLog  {
    * @param quota the directory size limit
    * @param quota the directory size limit
    */
    */
   void logSetQuota(String src, long nsQuota, long dsQuota) {
   void logSetQuota(String src, long nsQuota, long dsQuota) {
-    SetQuotaOp op = SetQuotaOp.getInstance()
+    SetQuotaOp op = SetQuotaOp.getInstance(cache.get())
       .setSource(src)
       .setSource(src)
       .setNSQuota(nsQuota)
       .setNSQuota(nsQuota)
       .setDSQuota(dsQuota);
       .setDSQuota(dsQuota);
@@ -693,7 +701,7 @@ public class FSEditLog  {
 
 
   /**  Add set permissions record to edit log */
   /**  Add set permissions record to edit log */
   void logSetPermissions(String src, FsPermission permissions) {
   void logSetPermissions(String src, FsPermission permissions) {
-    SetPermissionsOp op = SetPermissionsOp.getInstance()
+    SetPermissionsOp op = SetPermissionsOp.getInstance(cache.get())
       .setSource(src)
       .setSource(src)
       .setPermissions(permissions);
       .setPermissions(permissions);
     logEdit(op);
     logEdit(op);
@@ -701,7 +709,7 @@ public class FSEditLog  {
 
 
   /**  Add set owner record to edit log */
   /**  Add set owner record to edit log */
   void logSetOwner(String src, String username, String groupname) {
   void logSetOwner(String src, String username, String groupname) {
-    SetOwnerOp op = SetOwnerOp.getInstance()
+    SetOwnerOp op = SetOwnerOp.getInstance(cache.get())
       .setSource(src)
       .setSource(src)
       .setUser(username)
       .setUser(username)
       .setGroup(groupname);
       .setGroup(groupname);
@@ -712,7 +720,7 @@ public class FSEditLog  {
    * concat(trg,src..) log
    * concat(trg,src..) log
    */
    */
   void logConcat(String trg, String [] srcs, long timestamp) {
   void logConcat(String trg, String [] srcs, long timestamp) {
-    ConcatDeleteOp op = ConcatDeleteOp.getInstance()
+    ConcatDeleteOp op = ConcatDeleteOp.getInstance(cache.get())
       .setTarget(trg)
       .setTarget(trg)
       .setSources(srcs)
       .setSources(srcs)
       .setTimestamp(timestamp);
       .setTimestamp(timestamp);
@@ -723,7 +731,7 @@ public class FSEditLog  {
    * Add delete file record to edit log
    * Add delete file record to edit log
    */
    */
   void logDelete(String src, long timestamp) {
   void logDelete(String src, long timestamp) {
-    DeleteOp op = DeleteOp.getInstance()
+    DeleteOp op = DeleteOp.getInstance(cache.get())
       .setPath(src)
       .setPath(src)
       .setTimestamp(timestamp);
       .setTimestamp(timestamp);
     logEdit(op);
     logEdit(op);
@@ -733,7 +741,7 @@ public class FSEditLog  {
    * Add generation stamp record to edit log
    * Add generation stamp record to edit log
    */
    */
   void logGenerationStamp(long genstamp) {
   void logGenerationStamp(long genstamp) {
-    SetGenstampOp op = SetGenstampOp.getInstance()
+    SetGenstampOp op = SetGenstampOp.getInstance(cache.get())
       .setGenerationStamp(genstamp);
       .setGenerationStamp(genstamp);
     logEdit(op);
     logEdit(op);
   }
   }
@@ -742,7 +750,7 @@ public class FSEditLog  {
    * Add access time record to edit log
    * Add access time record to edit log
    */
    */
   void logTimes(String src, long mtime, long atime) {
   void logTimes(String src, long mtime, long atime) {
-    TimesOp op = TimesOp.getInstance()
+    TimesOp op = TimesOp.getInstance(cache.get())
       .setPath(src)
       .setPath(src)
       .setModificationTime(mtime)
       .setModificationTime(mtime)
       .setAccessTime(atime);
       .setAccessTime(atime);
@@ -754,7 +762,7 @@ public class FSEditLog  {
    */
    */
   void logSymlink(String path, String value, long mtime, 
   void logSymlink(String path, String value, long mtime, 
                   long atime, INodeSymlink node) {
                   long atime, INodeSymlink node) {
-    SymlinkOp op = SymlinkOp.getInstance()
+    SymlinkOp op = SymlinkOp.getInstance(cache.get())
       .setPath(path)
       .setPath(path)
       .setValue(value)
       .setValue(value)
       .setModificationTime(mtime)
       .setModificationTime(mtime)
@@ -770,7 +778,7 @@ public class FSEditLog  {
    */
    */
   void logGetDelegationToken(DelegationTokenIdentifier id,
   void logGetDelegationToken(DelegationTokenIdentifier id,
       long expiryTime) {
       long expiryTime) {
-    GetDelegationTokenOp op = GetDelegationTokenOp.getInstance()
+    GetDelegationTokenOp op = GetDelegationTokenOp.getInstance(cache.get())
       .setDelegationTokenIdentifier(id)
       .setDelegationTokenIdentifier(id)
       .setExpiryTime(expiryTime);
       .setExpiryTime(expiryTime);
     logEdit(op);
     logEdit(op);
@@ -778,26 +786,26 @@ public class FSEditLog  {
   
   
   void logRenewDelegationToken(DelegationTokenIdentifier id,
   void logRenewDelegationToken(DelegationTokenIdentifier id,
       long expiryTime) {
       long expiryTime) {
-    RenewDelegationTokenOp op = RenewDelegationTokenOp.getInstance()
+    RenewDelegationTokenOp op = RenewDelegationTokenOp.getInstance(cache.get())
       .setDelegationTokenIdentifier(id)
       .setDelegationTokenIdentifier(id)
       .setExpiryTime(expiryTime);
       .setExpiryTime(expiryTime);
     logEdit(op);
     logEdit(op);
   }
   }
   
   
   void logCancelDelegationToken(DelegationTokenIdentifier id) {
   void logCancelDelegationToken(DelegationTokenIdentifier id) {
-    CancelDelegationTokenOp op = CancelDelegationTokenOp.getInstance()
+    CancelDelegationTokenOp op = CancelDelegationTokenOp.getInstance(cache.get())
       .setDelegationTokenIdentifier(id);
       .setDelegationTokenIdentifier(id);
     logEdit(op);
     logEdit(op);
   }
   }
   
   
   void logUpdateMasterKey(DelegationKey key) {
   void logUpdateMasterKey(DelegationKey key) {
-    UpdateMasterKeyOp op = UpdateMasterKeyOp.getInstance()
+    UpdateMasterKeyOp op = UpdateMasterKeyOp.getInstance(cache.get())
       .setDelegationKey(key);
       .setDelegationKey(key);
     logEdit(op);
     logEdit(op);
   }
   }
 
 
   void logReassignLease(String leaseHolder, String src, String newHolder) {
   void logReassignLease(String leaseHolder, String src, String newHolder) {
-    ReassignLeaseOp op = ReassignLeaseOp.getInstance()
+    ReassignLeaseOp op = ReassignLeaseOp.getInstance(cache.get())
       .setLeaseHolder(leaseHolder)
       .setLeaseHolder(leaseHolder)
       .setPath(src)
       .setPath(src)
       .setNewHolder(newHolder);
       .setNewHolder(newHolder);
@@ -896,7 +904,7 @@ public class FSEditLog  {
     state = State.IN_SEGMENT;
     state = State.IN_SEGMENT;
 
 
     if (writeHeaderTxn) {
     if (writeHeaderTxn) {
-      logEdit(LogSegmentOp.getInstance(
+      logEdit(LogSegmentOp.getInstance(cache.get(),
           FSEditLogOpCodes.OP_START_LOG_SEGMENT));
           FSEditLogOpCodes.OP_START_LOG_SEGMENT));
       logSync();
       logSync();
     }
     }
@@ -912,7 +920,7 @@ public class FSEditLog  {
         "Bad state: %s", state);
         "Bad state: %s", state);
     
     
     if (writeEndTxn) {
     if (writeEndTxn) {
-      logEdit(LogSegmentOp.getInstance(
+      logEdit(LogSegmentOp.getInstance(cache.get(), 
           FSEditLogOpCodes.OP_END_LOG_SEGMENT));
           FSEditLogOpCodes.OP_END_LOG_SEGMENT));
       logSync();
       logSync();
     }
     }

+ 87 - 60
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -71,9 +71,11 @@ public class FSEditLogLoader {
   static final Log LOG = LogFactory.getLog(FSEditLogLoader.class.getName());
   static final Log LOG = LogFactory.getLog(FSEditLogLoader.class.getName());
   static long REPLAY_TRANSACTION_LOG_INTERVAL = 1000; // 1sec
   static long REPLAY_TRANSACTION_LOG_INTERVAL = 1000; // 1sec
   private final FSNamesystem fsNamesys;
   private final FSNamesystem fsNamesys;
-
-  public FSEditLogLoader(FSNamesystem fsNamesys) {
+  private long lastAppliedTxId;
+  
+  public FSEditLogLoader(FSNamesystem fsNamesys, long lastAppliedTxId) {
     this.fsNamesys = fsNamesys;
     this.fsNamesys = fsNamesys;
+    this.lastAppliedTxId = lastAppliedTxId;
   }
   }
   
   
   /**
   /**
@@ -81,32 +83,29 @@ public class FSEditLogLoader {
    * This is where we apply edits that we've been writing to disk all
    * This is where we apply edits that we've been writing to disk all
    * along.
    * along.
    */
    */
-  long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId)
-      throws IOException {
-    long numEdits = 0;
+  long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
+      MetaRecoveryContext recovery) throws IOException {
     int logVersion = edits.getVersion();
     int logVersion = edits.getVersion();
 
 
     fsNamesys.writeLock();
     fsNamesys.writeLock();
     try {
     try {
       long startTime = now();
       long startTime = now();
-      numEdits = loadEditRecords(logVersion, edits, false, 
-                                 expectedStartingTxId);
+      long numEdits = loadEditRecords(logVersion, edits, false, 
+                                 expectedStartingTxId, recovery);
       FSImage.LOG.info("Edits file " + edits.getName() 
       FSImage.LOG.info("Edits file " + edits.getName() 
           + " of size " + edits.length() + " edits # " + numEdits 
           + " of size " + edits.length() + " edits # " + numEdits 
           + " loaded in " + (now()-startTime)/1000 + " seconds.");
           + " loaded in " + (now()-startTime)/1000 + " seconds.");
+      return numEdits;
     } finally {
     } finally {
       edits.close();
       edits.close();
       fsNamesys.writeUnlock();
       fsNamesys.writeUnlock();
     }
     }
-    
-    return numEdits;
   }
   }
 
 
   long loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit,
   long loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit,
-                      long expectedStartingTxId)
-      throws IOException, EditLogInputException {
+                      long expectedStartingTxId, MetaRecoveryContext recovery)
+      throws IOException {
     FSDirectory fsDir = fsNamesys.dir;
     FSDirectory fsDir = fsNamesys.dir;
-    long numEdits = 0;
 
 
     EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
     EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
       new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
       new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
@@ -120,72 +119,99 @@ public class FSEditLogLoader {
 
 
     long recentOpcodeOffsets[] = new long[4];
     long recentOpcodeOffsets[] = new long[4];
     Arrays.fill(recentOpcodeOffsets, -1);
     Arrays.fill(recentOpcodeOffsets, -1);
-
-    long txId = expectedStartingTxId - 1;
+    
+    long expectedTxId = expectedStartingTxId;
+    long numEdits = 0;
     long lastTxId = in.getLastTxId();
     long lastTxId = in.getLastTxId();
     long numTxns = (lastTxId - expectedStartingTxId) + 1;
     long numTxns = (lastTxId - expectedStartingTxId) + 1;
-
     long lastLogTime = now();
     long lastLogTime = now();
 
 
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("edit log length: " + in.length() + ", start txid: "
       LOG.debug("edit log length: " + in.length() + ", start txid: "
           + expectedStartingTxId + ", last txid: " + lastTxId);
           + expectedStartingTxId + ", last txid: " + lastTxId);
     }
     }
-
     try {
     try {
-      try {
-        while (true) {
+      while (true) {
+        try {
           FSEditLogOp op;
           FSEditLogOp op;
           try {
           try {
-            if ((op = in.readOp()) == null) {
+            op = in.readOp();
+            if (op == null) {
               break;
               break;
             }
             }
-          } catch (IOException ioe) {
-            long badTxId = txId + 1; // because txId hasn't been incremented yet
-            String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, badTxId);
+          } catch (Throwable e) {
+            // Handle a problem with our input
+            check203UpgradeFailure(logVersion, e);
+            String errorMessage =
+              formatEditLogReplayError(in, recentOpcodeOffsets, expectedTxId);
             FSImage.LOG.error(errorMessage);
             FSImage.LOG.error(errorMessage);
-            throw new EditLogInputException(errorMessage,
-                ioe, numEdits);
+            if (recovery == null) {
+               // We will only try to skip over problematic opcodes when in
+               // recovery mode.
+              throw new EditLogInputException(errorMessage, e, numEdits);
+            }
+            MetaRecoveryContext.editLogLoaderPrompt(
+                "We failed to read txId " + expectedTxId,
+                recovery, "skipping the bad section in the log");
+            in.resync();
+            continue;
           }
           }
           recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
           recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] =
             in.getPosition();
             in.getPosition();
           if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
           if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
-            long expectedTxId = txId + 1;
-            txId = op.txid;
-            if (txId != expectedTxId) {
-              throw new IOException("Expected transaction ID " +
-                  expectedTxId + " but got " + txId);
+            if (op.getTransactionId() > expectedTxId) { 
+              MetaRecoveryContext.editLogLoaderPrompt("There appears " +
+                  "to be a gap in the edit log.  We expected txid " +
+                  expectedTxId + ", but got txid " +
+                  op.getTransactionId() + ".", recovery, "ignoring missing " +
+                  " transaction IDs");
+            } else if (op.getTransactionId() < expectedTxId) { 
+              MetaRecoveryContext.editLogLoaderPrompt("There appears " +
+                  "to be an out-of-order edit in the edit log.  We " +
+                  "expected txid " + expectedTxId + ", but got txid " +
+                  op.getTransactionId() + ".", recovery,
+                  "skipping the out-of-order edit");
+              continue;
             }
             }
           }
           }
-
-          incrOpCount(op.opCode, opCounts);
           try {
           try {
             applyEditLogOp(op, fsDir, logVersion);
             applyEditLogOp(op, fsDir, logVersion);
-          } catch (Throwable t) {
-            // Catch Throwable because in the case of a truly corrupt edits log, any
-            // sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
-            String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, txId);
-            FSImage.LOG.error(errorMessage);
-            throw new IOException(errorMessage, t);
+          } catch (Throwable e) {
+            LOG.error("Encountered exception on operation " + op, e);
+            MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
+             "apply edit log operation " + op + ": error " +
+             e.getMessage(), recovery, "applying edits");
+          }
+          // Now that the operation has been successfully decoded and
+          // applied, update our bookkeeping.
+          incrOpCount(op.opCode, opCounts);
+          if (op.hasTransactionId()) {
+            lastAppliedTxId = op.getTransactionId();
+            expectedTxId = lastAppliedTxId + 1;
+          } else {
+            expectedTxId = lastAppliedTxId = expectedStartingTxId;
           }
           }
-
           // log progress
           // log progress
-          if (now() - lastLogTime > REPLAY_TRANSACTION_LOG_INTERVAL) {
-            int percent = Math.round((float) txId / numTxns * 100);
-            LOG.info("replaying edit log: " + txId + "/" + numTxns
-                + " transactions completed. (" + percent + "%)");
-            lastLogTime = now();
+          if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
+            long now = now();
+            if (now - lastLogTime > REPLAY_TRANSACTION_LOG_INTERVAL) {
+              int percent = Math.round((float)lastAppliedTxId / numTxns * 100);
+              LOG.info("replaying edit log: " + lastAppliedTxId + "/" + numTxns
+                  + " transactions completed. (" + percent + "%)");
+              lastLogTime = now;
+            }
           }
           }
-
           numEdits++;
           numEdits++;
+        } catch (MetaRecoveryContext.RequestStopException e) {
+          MetaRecoveryContext.LOG.warn("Stopped reading edit log at " +
+              in.getPosition() + "/"  + in.length());
+          break;
         }
         }
-      } catch (IOException ex) {
-        check203UpgradeFailure(logVersion, ex);
-      } finally {
-        if(closeOnExit)
-          in.close();
       }
       }
     } finally {
     } finally {
+      if(closeOnExit) {
+        in.close();
+      }
       fsDir.writeUnlock();
       fsDir.writeUnlock();
       fsNamesys.writeUnlock();
       fsNamesys.writeUnlock();
 
 
@@ -472,7 +498,7 @@ public class FSEditLogLoader {
       long recentOpcodeOffsets[], long txid) {
       long recentOpcodeOffsets[], long txid) {
     StringBuilder sb = new StringBuilder();
     StringBuilder sb = new StringBuilder();
     sb.append("Error replaying edit log at offset " + in.getPosition());
     sb.append("Error replaying edit log at offset " + in.getPosition());
-    sb.append(" on transaction ID ").append(txid);
+    sb.append(".  Expected transaction ID was ").append(txid);
     if (recentOpcodeOffsets[0] != -1) {
     if (recentOpcodeOffsets[0] != -1) {
       Arrays.sort(recentOpcodeOffsets);
       Arrays.sort(recentOpcodeOffsets);
       sb.append("\nRecent opcode offsets:");
       sb.append("\nRecent opcode offsets:");
@@ -519,7 +545,7 @@ public class FSEditLogLoader {
       if (oldBlock.getBlockId() != newBlock.getBlockId() ||
       if (oldBlock.getBlockId() != newBlock.getBlockId() ||
           (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && 
           (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && 
               !(isGenStampUpdate && isLastBlock))) {
               !(isGenStampUpdate && isLastBlock))) {
-        throw new IOException("Mismatched block IDs or generation stamps, " + 
+        throw new IOException("Mismatched block IDs or generation stamps, " +
             "attempting to replace block " + oldBlock + " with " + newBlock +
             "attempting to replace block " + oldBlock + " with " + newBlock +
             " as block # " + i + "/" + newBlocks.length + " of " +
             " as block # " + i + "/" + newBlocks.length + " of " +
             path);
             path);
@@ -605,7 +631,7 @@ public class FSEditLogLoader {
    * Throw appropriate exception during upgrade from 203, when editlog loading
    * Throw appropriate exception during upgrade from 203, when editlog loading
    * could fail due to opcode conflicts.
    * could fail due to opcode conflicts.
    */
    */
-  private void check203UpgradeFailure(int logVersion, IOException ex)
+  private void check203UpgradeFailure(int logVersion, Throwable e)
       throws IOException {
       throws IOException {
     // 0.20.203 version version has conflicting opcodes with the later releases.
     // 0.20.203 version version has conflicting opcodes with the later releases.
     // The editlog must be emptied by restarting the namenode, before proceeding
     // The editlog must be emptied by restarting the namenode, before proceeding
@@ -616,9 +642,7 @@ public class FSEditLogLoader {
           + logVersion + " from release 0.20.203. Please go back to the old "
           + logVersion + " from release 0.20.203. Please go back to the old "
           + " release and restart the namenode. This empties the editlog "
           + " release and restart the namenode. This empties the editlog "
           + " and saves the namespace. Resume the upgrade after this step.";
           + " and saves the namespace. Resume the upgrade after this step.";
-      throw new IOException(msg, ex);
-    } else {
-      throw ex;
+      throw new IOException(msg, e);
     }
     }
   }
   }
   
   
@@ -643,14 +667,14 @@ public class FSEditLogLoader {
           break;
           break;
         }
         }
         if (firstTxId == HdfsConstants.INVALID_TXID) {
         if (firstTxId == HdfsConstants.INVALID_TXID) {
-          firstTxId = op.txid;
+          firstTxId = op.getTransactionId();
         }
         }
         if (lastTxId == HdfsConstants.INVALID_TXID
         if (lastTxId == HdfsConstants.INVALID_TXID
-            || op.txid == lastTxId + 1) {
-          lastTxId = op.txid;
+            || op.getTransactionId() == lastTxId + 1) {
+          lastTxId = op.getTransactionId();
         } else {
         } else {
-          FSImage.LOG.error("Out of order txid found. Found " + op.txid 
-                            + ", expected " + (lastTxId + 1));
+          FSImage.LOG.error("Out of order txid found. Found " +
+            op.getTransactionId() + ", expected " + (lastTxId + 1));
           break;
           break;
         }
         }
         numValid++;
         numValid++;
@@ -743,4 +767,7 @@ public class FSEditLogLoader {
     }
     }
   }
   }
 
 
+  public long getLastAppliedTxId() {
+    return lastAppliedTxId;
+  }
 }
 }

+ 137 - 117
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -33,6 +33,8 @@ import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.PureJavaCrc32;
@@ -54,6 +56,8 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 import org.xml.sax.helpers.AttributesImpl;
 
 
+import com.google.common.base.Preconditions;
+
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
@@ -74,42 +78,44 @@ public abstract class FSEditLogOp {
 
 
 
 
   @SuppressWarnings("deprecation")
   @SuppressWarnings("deprecation")
-  private static ThreadLocal<EnumMap<FSEditLogOpCodes, FSEditLogOp>> opInstances =
-    new ThreadLocal<EnumMap<FSEditLogOpCodes, FSEditLogOp>>() {
-      @Override
-      protected EnumMap<FSEditLogOpCodes, FSEditLogOp> initialValue() {
-        EnumMap<FSEditLogOpCodes, FSEditLogOp> instances 
-          = new EnumMap<FSEditLogOpCodes, FSEditLogOp>(FSEditLogOpCodes.class);
-        instances.put(OP_ADD, new AddOp());
-        instances.put(OP_CLOSE, new CloseOp());
-        instances.put(OP_SET_REPLICATION, new SetReplicationOp());
-        instances.put(OP_CONCAT_DELETE, new ConcatDeleteOp());
-        instances.put(OP_RENAME_OLD, new RenameOldOp());
-        instances.put(OP_DELETE, new DeleteOp());
-        instances.put(OP_MKDIR, new MkdirOp());
-        instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
-        instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
-        instances.put(OP_SET_OWNER, new SetOwnerOp());
-        instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
-        instances.put(OP_CLEAR_NS_QUOTA, new ClearNSQuotaOp());
-        instances.put(OP_SET_QUOTA, new SetQuotaOp());
-        instances.put(OP_TIMES, new TimesOp());
-        instances.put(OP_SYMLINK, new SymlinkOp());
-        instances.put(OP_RENAME, new RenameOp());
-        instances.put(OP_REASSIGN_LEASE, new ReassignLeaseOp());
-        instances.put(OP_GET_DELEGATION_TOKEN, new GetDelegationTokenOp());
-        instances.put(OP_RENEW_DELEGATION_TOKEN, new RenewDelegationTokenOp());
-        instances.put(OP_CANCEL_DELEGATION_TOKEN, 
-                      new CancelDelegationTokenOp());
-        instances.put(OP_UPDATE_MASTER_KEY, new UpdateMasterKeyOp());
-        instances.put(OP_START_LOG_SEGMENT,
-                      new LogSegmentOp(OP_START_LOG_SEGMENT));
-        instances.put(OP_END_LOG_SEGMENT,
-                      new LogSegmentOp(OP_END_LOG_SEGMENT));
-        instances.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp());
-        return instances;
-      }
-  };
+  final public static class OpInstanceCache {
+    private EnumMap<FSEditLogOpCodes, FSEditLogOp> inst = 
+        new EnumMap<FSEditLogOpCodes, FSEditLogOp>(FSEditLogOpCodes.class);
+    
+    public OpInstanceCache() {
+      inst.put(OP_ADD, new AddOp());
+      inst.put(OP_CLOSE, new CloseOp());
+      inst.put(OP_SET_REPLICATION, new SetReplicationOp());
+      inst.put(OP_CONCAT_DELETE, new ConcatDeleteOp());
+      inst.put(OP_RENAME_OLD, new RenameOldOp());
+      inst.put(OP_DELETE, new DeleteOp());
+      inst.put(OP_MKDIR, new MkdirOp());
+      inst.put(OP_SET_GENSTAMP, new SetGenstampOp());
+      inst.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
+      inst.put(OP_SET_OWNER, new SetOwnerOp());
+      inst.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
+      inst.put(OP_CLEAR_NS_QUOTA, new ClearNSQuotaOp());
+      inst.put(OP_SET_QUOTA, new SetQuotaOp());
+      inst.put(OP_TIMES, new TimesOp());
+      inst.put(OP_SYMLINK, new SymlinkOp());
+      inst.put(OP_RENAME, new RenameOp());
+      inst.put(OP_REASSIGN_LEASE, new ReassignLeaseOp());
+      inst.put(OP_GET_DELEGATION_TOKEN, new GetDelegationTokenOp());
+      inst.put(OP_RENEW_DELEGATION_TOKEN, new RenewDelegationTokenOp());
+      inst.put(OP_CANCEL_DELEGATION_TOKEN, 
+                    new CancelDelegationTokenOp());
+      inst.put(OP_UPDATE_MASTER_KEY, new UpdateMasterKeyOp());
+      inst.put(OP_START_LOG_SEGMENT,
+                    new LogSegmentOp(OP_START_LOG_SEGMENT));
+      inst.put(OP_END_LOG_SEGMENT,
+                    new LogSegmentOp(OP_END_LOG_SEGMENT));
+      inst.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp());
+    }
+    
+    public FSEditLogOp get(FSEditLogOpCodes opcode) {
+      return inst.get(opcode);
+    }
+  }
 
 
   /**
   /**
    * Constructor for an EditLog Op. EditLog ops cannot be constructed
    * Constructor for an EditLog Op. EditLog ops cannot be constructed
@@ -117,13 +123,22 @@ public abstract class FSEditLogOp {
    */
    */
   private FSEditLogOp(FSEditLogOpCodes opCode) {
   private FSEditLogOp(FSEditLogOpCodes opCode) {
     this.opCode = opCode;
     this.opCode = opCode;
-    this.txid = 0;
+    this.txid = HdfsConstants.INVALID_TXID;
   }
   }
 
 
   public long getTransactionId() {
   public long getTransactionId() {
+    Preconditions.checkState(txid != HdfsConstants.INVALID_TXID);
     return txid;
     return txid;
   }
   }
 
 
+  public String getTransactionIdStr() {
+    return (txid == HdfsConstants.INVALID_TXID) ? "(none)" : "" + txid;
+  }
+  
+  public boolean hasTransactionId() {
+    return (txid != HdfsConstants.INVALID_TXID);
+  }
+
   public void setTransactionId(long txid) {
   public void setTransactionId(long txid) {
     this.txid = txid;
     this.txid = txid;
   }
   }
@@ -373,8 +388,8 @@ public abstract class FSEditLogOp {
       super(OP_ADD);
       super(OP_ADD);
     }
     }
 
 
-    static AddOp getInstance() {
-      return (AddOp)opInstances.get().get(OP_ADD);
+    static AddOp getInstance(OpInstanceCache cache) {
+      return (AddOp)cache.get(OP_ADD);
     }
     }
 
 
     public boolean shouldCompleteLastBlock() {
     public boolean shouldCompleteLastBlock() {
@@ -395,8 +410,8 @@ public abstract class FSEditLogOp {
       super(OP_CLOSE);
       super(OP_CLOSE);
     }
     }
 
 
-    static CloseOp getInstance() {
-      return (CloseOp)opInstances.get().get(OP_CLOSE);
+    static CloseOp getInstance(OpInstanceCache cache) {
+      return (CloseOp)cache.get(OP_CLOSE);
     }
     }
 
 
     public boolean shouldCompleteLastBlock() {
     public boolean shouldCompleteLastBlock() {
@@ -420,9 +435,8 @@ public abstract class FSEditLogOp {
       super(OP_UPDATE_BLOCKS);
       super(OP_UPDATE_BLOCKS);
     }
     }
     
     
-    static UpdateBlocksOp getInstance() {
-      return (UpdateBlocksOp)opInstances.get()
-        .get(OP_UPDATE_BLOCKS);
+    static UpdateBlocksOp getInstance(OpInstanceCache cache) {
+      return (UpdateBlocksOp)cache.get(OP_UPDATE_BLOCKS);
     }
     }
     
     
     
     
@@ -500,9 +514,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_REPLICATION);
       super(OP_SET_REPLICATION);
     }
     }
 
 
-    static SetReplicationOp getInstance() {
-      return (SetReplicationOp)opInstances.get()
-        .get(OP_SET_REPLICATION);
+    static SetReplicationOp getInstance(OpInstanceCache cache) {
+      return (SetReplicationOp)cache.get(OP_SET_REPLICATION);
     }
     }
 
 
     SetReplicationOp setPath(String path) {
     SetReplicationOp setPath(String path) {
@@ -571,9 +584,8 @@ public abstract class FSEditLogOp {
       super(OP_CONCAT_DELETE);
       super(OP_CONCAT_DELETE);
     }
     }
 
 
-    static ConcatDeleteOp getInstance() {
-      return (ConcatDeleteOp)opInstances.get()
-        .get(OP_CONCAT_DELETE);
+    static ConcatDeleteOp getInstance(OpInstanceCache cache) {
+      return (ConcatDeleteOp)cache.get(OP_CONCAT_DELETE);
     }
     }
 
 
     ConcatDeleteOp setTarget(String trg) {
     ConcatDeleteOp setTarget(String trg) {
@@ -697,9 +709,8 @@ public abstract class FSEditLogOp {
       super(OP_RENAME_OLD);
       super(OP_RENAME_OLD);
     }
     }
 
 
-    static RenameOldOp getInstance() {
-      return (RenameOldOp)opInstances.get()
-        .get(OP_RENAME_OLD);
+    static RenameOldOp getInstance(OpInstanceCache cache) {
+      return (RenameOldOp)cache.get(OP_RENAME_OLD);
     }
     }
 
 
     RenameOldOp setSource(String src) {
     RenameOldOp setSource(String src) {
@@ -790,9 +801,8 @@ public abstract class FSEditLogOp {
       super(OP_DELETE);
       super(OP_DELETE);
     }
     }
 
 
-    static DeleteOp getInstance() {
-      return (DeleteOp)opInstances.get()
-        .get(OP_DELETE);
+    static DeleteOp getInstance(OpInstanceCache cache) {
+      return (DeleteOp)cache.get(OP_DELETE);
     }
     }
 
 
     DeleteOp setPath(String path) {
     DeleteOp setPath(String path) {
@@ -872,9 +882,8 @@ public abstract class FSEditLogOp {
       super(OP_MKDIR);
       super(OP_MKDIR);
     }
     }
     
     
-    static MkdirOp getInstance() {
-      return (MkdirOp)opInstances.get()
-        .get(OP_MKDIR);
+    static MkdirOp getInstance(OpInstanceCache cache) {
+      return (MkdirOp)cache.get(OP_MKDIR);
     }
     }
 
 
     MkdirOp setPath(String path) {
     MkdirOp setPath(String path) {
@@ -977,9 +986,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_GENSTAMP);
       super(OP_SET_GENSTAMP);
     }
     }
 
 
-    static SetGenstampOp getInstance() {
-      return (SetGenstampOp)opInstances.get()
-        .get(OP_SET_GENSTAMP);
+    static SetGenstampOp getInstance(OpInstanceCache cache) {
+      return (SetGenstampOp)cache.get(OP_SET_GENSTAMP);
     }
     }
 
 
     SetGenstampOp setGenerationStamp(long genStamp) {
     SetGenstampOp setGenerationStamp(long genStamp) {
@@ -1031,9 +1039,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_PERMISSIONS);
       super(OP_SET_PERMISSIONS);
     }
     }
 
 
-    static SetPermissionsOp getInstance() {
-      return (SetPermissionsOp)opInstances.get()
-        .get(OP_SET_PERMISSIONS);
+    static SetPermissionsOp getInstance(OpInstanceCache cache) {
+      return (SetPermissionsOp)cache.get(OP_SET_PERMISSIONS);
     }
     }
 
 
     SetPermissionsOp setSource(String src) {
     SetPermissionsOp setSource(String src) {
@@ -1098,9 +1105,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_OWNER);
       super(OP_SET_OWNER);
     }
     }
 
 
-    static SetOwnerOp getInstance() {
-      return (SetOwnerOp)opInstances.get()
-        .get(OP_SET_OWNER);
+    static SetOwnerOp getInstance(OpInstanceCache cache) {
+      return (SetOwnerOp)cache.get(OP_SET_OWNER);
     }
     }
 
 
     SetOwnerOp setSource(String src) {
     SetOwnerOp setSource(String src) {
@@ -1179,9 +1185,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_NS_QUOTA);
       super(OP_SET_NS_QUOTA);
     }
     }
 
 
-    static SetNSQuotaOp getInstance() {
-      return (SetNSQuotaOp)opInstances.get()
-        .get(OP_SET_NS_QUOTA);
+    static SetNSQuotaOp getInstance(OpInstanceCache cache) {
+      return (SetNSQuotaOp)cache.get(OP_SET_NS_QUOTA);
     }
     }
 
 
     @Override
     @Override
@@ -1232,9 +1237,8 @@ public abstract class FSEditLogOp {
       super(OP_CLEAR_NS_QUOTA);
       super(OP_CLEAR_NS_QUOTA);
     }
     }
 
 
-    static ClearNSQuotaOp getInstance() {
-      return (ClearNSQuotaOp)opInstances.get()
-        .get(OP_CLEAR_NS_QUOTA);
+    static ClearNSQuotaOp getInstance(OpInstanceCache cache) {
+      return (ClearNSQuotaOp)cache.get(OP_CLEAR_NS_QUOTA);
     }
     }
 
 
     @Override
     @Override
@@ -1281,9 +1285,8 @@ public abstract class FSEditLogOp {
       super(OP_SET_QUOTA);
       super(OP_SET_QUOTA);
     }
     }
 
 
-    static SetQuotaOp getInstance() {
-      return (SetQuotaOp)opInstances.get()
-        .get(OP_SET_QUOTA);
+    static SetQuotaOp getInstance(OpInstanceCache cache) {
+      return (SetQuotaOp)cache.get(OP_SET_QUOTA);
     }
     }
 
 
     SetQuotaOp setSource(String src) {
     SetQuotaOp setSource(String src) {
@@ -1360,9 +1363,8 @@ public abstract class FSEditLogOp {
       super(OP_TIMES);
       super(OP_TIMES);
     }
     }
 
 
-    static TimesOp getInstance() {
-      return (TimesOp)opInstances.get()
-        .get(OP_TIMES);
+    static TimesOp getInstance(OpInstanceCache cache) {
+      return (TimesOp)cache.get(OP_TIMES);
     }
     }
 
 
     TimesOp setPath(String path) {
     TimesOp setPath(String path) {
@@ -1458,9 +1460,8 @@ public abstract class FSEditLogOp {
       super(OP_SYMLINK);
       super(OP_SYMLINK);
     }
     }
 
 
-    static SymlinkOp getInstance() {
-      return (SymlinkOp)opInstances.get()
-        .get(OP_SYMLINK);
+    static SymlinkOp getInstance(OpInstanceCache cache) {
+      return (SymlinkOp)cache.get(OP_SYMLINK);
     }
     }
 
 
     SymlinkOp setPath(String path) {
     SymlinkOp setPath(String path) {
@@ -1579,9 +1580,8 @@ public abstract class FSEditLogOp {
       super(OP_RENAME);
       super(OP_RENAME);
     }
     }
 
 
-    static RenameOp getInstance() {
-      return (RenameOp)opInstances.get()
-        .get(OP_RENAME);
+    static RenameOp getInstance(OpInstanceCache cache) {
+      return (RenameOp)cache.get(OP_RENAME);
     }
     }
 
 
     RenameOp setSource(String src) {
     RenameOp setSource(String src) {
@@ -1723,9 +1723,8 @@ public abstract class FSEditLogOp {
       super(OP_REASSIGN_LEASE);
       super(OP_REASSIGN_LEASE);
     }
     }
 
 
-    static ReassignLeaseOp getInstance() {
-      return (ReassignLeaseOp)opInstances.get()
-        .get(OP_REASSIGN_LEASE);
+    static ReassignLeaseOp getInstance(OpInstanceCache cache) {
+      return (ReassignLeaseOp)cache.get(OP_REASSIGN_LEASE);
     }
     }
 
 
     ReassignLeaseOp setLeaseHolder(String leaseHolder) {
     ReassignLeaseOp setLeaseHolder(String leaseHolder) {
@@ -1798,9 +1797,8 @@ public abstract class FSEditLogOp {
       super(OP_GET_DELEGATION_TOKEN);
       super(OP_GET_DELEGATION_TOKEN);
     }
     }
 
 
-    static GetDelegationTokenOp getInstance() {
-      return (GetDelegationTokenOp)opInstances.get()
-        .get(OP_GET_DELEGATION_TOKEN);
+    static GetDelegationTokenOp getInstance(OpInstanceCache cache) {
+      return (GetDelegationTokenOp)cache.get(OP_GET_DELEGATION_TOKEN);
     }
     }
 
 
     GetDelegationTokenOp setDelegationTokenIdentifier(
     GetDelegationTokenOp setDelegationTokenIdentifier(
@@ -1870,9 +1868,8 @@ public abstract class FSEditLogOp {
       super(OP_RENEW_DELEGATION_TOKEN);
       super(OP_RENEW_DELEGATION_TOKEN);
     }
     }
 
 
-    static RenewDelegationTokenOp getInstance() {
-      return (RenewDelegationTokenOp)opInstances.get()
-          .get(OP_RENEW_DELEGATION_TOKEN);
+    static RenewDelegationTokenOp getInstance(OpInstanceCache cache) {
+      return (RenewDelegationTokenOp)cache.get(OP_RENEW_DELEGATION_TOKEN);
     }
     }
 
 
     RenewDelegationTokenOp setDelegationTokenIdentifier(
     RenewDelegationTokenOp setDelegationTokenIdentifier(
@@ -1941,9 +1938,8 @@ public abstract class FSEditLogOp {
       super(OP_CANCEL_DELEGATION_TOKEN);
       super(OP_CANCEL_DELEGATION_TOKEN);
     }
     }
 
 
-    static CancelDelegationTokenOp getInstance() {
-      return (CancelDelegationTokenOp)opInstances.get()
-          .get(OP_CANCEL_DELEGATION_TOKEN);
+    static CancelDelegationTokenOp getInstance(OpInstanceCache cache) {
+      return (CancelDelegationTokenOp)cache.get(OP_CANCEL_DELEGATION_TOKEN);
     }
     }
 
 
     CancelDelegationTokenOp setDelegationTokenIdentifier(
     CancelDelegationTokenOp setDelegationTokenIdentifier(
@@ -1996,9 +1992,8 @@ public abstract class FSEditLogOp {
       super(OP_UPDATE_MASTER_KEY);
       super(OP_UPDATE_MASTER_KEY);
     }
     }
 
 
-    static UpdateMasterKeyOp getInstance() {
-      return (UpdateMasterKeyOp)opInstances.get()
-          .get(OP_UPDATE_MASTER_KEY);
+    static UpdateMasterKeyOp getInstance(OpInstanceCache cache) {
+      return (UpdateMasterKeyOp)cache.get(OP_UPDATE_MASTER_KEY);
     }
     }
 
 
     UpdateMasterKeyOp setDelegationKey(DelegationKey key) {
     UpdateMasterKeyOp setDelegationKey(DelegationKey key) {
@@ -2050,8 +2045,9 @@ public abstract class FSEditLogOp {
              code == OP_END_LOG_SEGMENT : "Bad op: " + code;
              code == OP_END_LOG_SEGMENT : "Bad op: " + code;
     }
     }
 
 
-    static LogSegmentOp getInstance(FSEditLogOpCodes code) {
-      return (LogSegmentOp)opInstances.get().get(code);
+    static LogSegmentOp getInstance(OpInstanceCache cache,
+        FSEditLogOpCodes code) {
+      return (LogSegmentOp)cache.get(code);
     }
     }
 
 
     public void readFields(DataInputStream in, int logVersion)
     public void readFields(DataInputStream in, int logVersion)
@@ -2091,8 +2087,8 @@ public abstract class FSEditLogOp {
       super(OP_INVALID);
       super(OP_INVALID);
     }
     }
 
 
-    static InvalidOp getInstance() {
-      return (InvalidOp)opInstances.get().get(OP_INVALID);
+    static InvalidOp getInstance(OpInstanceCache cache) {
+      return (InvalidOp)cache.get(OP_INVALID);
     }
     }
 
 
     @Override
     @Override
@@ -2207,6 +2203,7 @@ public abstract class FSEditLogOp {
     private final DataInputStream in;
     private final DataInputStream in;
     private final int logVersion;
     private final int logVersion;
     private final Checksum checksum;
     private final Checksum checksum;
+    private final OpInstanceCache cache;
 
 
     /**
     /**
      * Construct the reader
      * Construct the reader
@@ -2228,6 +2225,7 @@ public abstract class FSEditLogOp {
       } else {
       } else {
         this.in = in;
         this.in = in;
       }
       }
+      this.cache = new OpInstanceCache();
     }
     }
 
 
     /**
     /**
@@ -2236,16 +2234,42 @@ public abstract class FSEditLogOp {
      * Note that the objects returned from this method may be re-used by future
      * Note that the objects returned from this method may be re-used by future
      * calls to the same method.
      * calls to the same method.
      * 
      * 
+     * @param skipBrokenEdits    If true, attempt to skip over damaged parts of
+     * the input stream, rather than throwing an IOException
      * @return the operation read from the stream, or null at the end of the file
      * @return the operation read from the stream, or null at the end of the file
      * @throws IOException on error.
      * @throws IOException on error.
      */
      */
-    public FSEditLogOp readOp() throws IOException {
+    public FSEditLogOp readOp(boolean skipBrokenEdits) throws IOException {
+      FSEditLogOp op = null;
+      while (true) {
+        try {
+          in.mark(in.available());
+          try {
+            op = decodeOp();
+          } finally {
+            // If we encountered an exception or an end-of-file condition,
+            // do not advance the input stream.
+            if (op == null) {
+              in.reset();
+            }
+          }
+          return op;
+        } catch (IOException e) {
+          if (!skipBrokenEdits) {
+            throw e;
+          }
+          if (in.skip(1) < 1) {
+            return null;
+          }
+        }
+      }
+    }
+
+    private FSEditLogOp decodeOp() throws IOException {
       if (checksum != null) {
       if (checksum != null) {
         checksum.reset();
         checksum.reset();
       }
       }
 
 
-      in.mark(1);
-
       byte opCodeByte;
       byte opCodeByte;
       try {
       try {
         opCodeByte = in.readByte();
         opCodeByte = in.readByte();
@@ -2255,12 +2279,10 @@ public abstract class FSEditLogOp {
       }
       }
 
 
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
       FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
-      if (opCode == OP_INVALID) {
-        in.reset(); // reset back to end of file if somebody reads it again
+      if (opCode == OP_INVALID)
         return null;
         return null;
-      }
 
 
-      FSEditLogOp op = opInstances.get().get(opCode);
+      FSEditLogOp op = cache.get(opCode);
       if (op == null) {
       if (op == null) {
         throw new IOException("Read invalid opcode " + opCode);
         throw new IOException("Read invalid opcode " + opCode);
       }
       }
@@ -2268,6 +2290,8 @@ public abstract class FSEditLogOp {
       if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
       if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
         // Read the txid
         // Read the txid
         op.setTransactionId(in.readLong());
         op.setTransactionId(in.readLong());
+      } else {
+        op.setTransactionId(HdfsConstants.INVALID_TXID);
       }
       }
 
 
       op.readFields(in, logVersion);
       op.readFields(in, logVersion);
@@ -2426,8 +2450,4 @@ public abstract class FSEditLogOp {
     short mode = Short.valueOf(st.getValue("MODE"));
     short mode = Short.valueOf(st.getValue("MODE"));
     return new PermissionStatus(username, groupname, new FsPermission(mode));
     return new PermissionStatus(username, groupname, new FsPermission(mode));
   }
   }
-
-  public static FSEditLogOp getOpInstance(FSEditLogOpCodes opCode) {
-    return opInstances.get().get(opCode);
-  }
-}
+		}

+ 32 - 40
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -158,8 +158,8 @@ public class FSImage implements Closeable {
    * @throws IOException
    * @throws IOException
    * @return true if the image needs to be saved or false otherwise
    * @return true if the image needs to be saved or false otherwise
    */
    */
-  boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target)
-      throws IOException {
+  boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
+      MetaRecoveryContext recovery) throws IOException {
     assert startOpt != StartupOption.FORMAT : 
     assert startOpt != StartupOption.FORMAT : 
       "NameNode formatting should be performed before reading the image";
       "NameNode formatting should be performed before reading the image";
     
     
@@ -244,7 +244,7 @@ public class FSImage implements Closeable {
       // just load the image
       // just load the image
     }
     }
     
     
-    return loadFSImage(target);
+    return loadFSImage(target, recovery);
   }
   }
   
   
   /**
   /**
@@ -304,7 +304,7 @@ public class FSImage implements Closeable {
     if(storage.getDistributedUpgradeState()) {
     if(storage.getDistributedUpgradeState()) {
       // only distributed upgrade need to continue
       // only distributed upgrade need to continue
       // don't do version upgrade
       // don't do version upgrade
-      this.loadFSImage(target);
+      this.loadFSImage(target, null);
       storage.initializeDistributedUpgrade();
       storage.initializeDistributedUpgrade();
       return;
       return;
     }
     }
@@ -319,7 +319,7 @@ public class FSImage implements Closeable {
     }
     }
 
 
     // load the latest image
     // load the latest image
-    this.loadFSImage(target);
+    this.loadFSImage(target, null);
 
 
     // Do upgrade for each directory
     // Do upgrade for each directory
     long oldCTime = storage.getCTime();
     long oldCTime = storage.getCTime();
@@ -505,7 +505,7 @@ public class FSImage implements Closeable {
     target.dir.fsImage = ckptImage;
     target.dir.fsImage = ckptImage;
     // load from the checkpoint dirs
     // load from the checkpoint dirs
     try {
     try {
-      ckptImage.recoverTransitionRead(StartupOption.REGULAR, target);
+      ckptImage.recoverTransitionRead(StartupOption.REGULAR, target, null);
     } finally {
     } finally {
       ckptImage.close();
       ckptImage.close();
     }
     }
@@ -550,7 +550,7 @@ public class FSImage implements Closeable {
     target.dir.reset();
     target.dir.reset();
 
 
     LOG.debug("Reloading namespace from " + file);
     LOG.debug("Reloading namespace from " + file);
-    loadFSImage(file, target);
+    loadFSImage(file, target, null);
   }
   }
 
 
   /**
   /**
@@ -568,7 +568,8 @@ public class FSImage implements Closeable {
    * @return whether the image should be saved
    * @return whether the image should be saved
    * @throws IOException
    * @throws IOException
    */
    */
-  boolean loadFSImage(FSNamesystem target) throws IOException {
+  boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery)
+      throws IOException {
     FSImageStorageInspector inspector = storage.readAndInspectDirs();
     FSImageStorageInspector inspector = storage.readAndInspectDirs();
     
     
     isUpgradeFinalized = inspector.isUpgradeFinalized();
     isUpgradeFinalized = inspector.isUpgradeFinalized();
@@ -583,7 +584,6 @@ public class FSImage implements Closeable {
       // We only want to recover streams if we're going into Active mode.
       // We only want to recover streams if we're going into Active mode.
       editLog.recoverUnclosedStreams();
       editLog.recoverUnclosedStreams();
     }
     }
-
     if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, 
     if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, 
                                getLayoutVersion())) {
                                getLayoutVersion())) {
       // If we're open for write, we're either non-HA or we're the active NN, so
       // If we're open for write, we're either non-HA or we're the active NN, so
@@ -610,7 +610,7 @@ public class FSImage implements Closeable {
                                  getLayoutVersion())) {
                                  getLayoutVersion())) {
         // For txid-based layout, we should have a .md5 file
         // For txid-based layout, we should have a .md5 file
         // next to the image file
         // next to the image file
-        loadFSImage(imageFile.getFile(), target);
+        loadFSImage(imageFile.getFile(), target, recovery);
       } else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
       } else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
                                         getLayoutVersion())) {
                                         getLayoutVersion())) {
         // In 0.22, we have the checksum stored in the VERSION file.
         // In 0.22, we have the checksum stored in the VERSION file.
@@ -622,22 +622,19 @@ public class FSImage implements Closeable {
               NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
               NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
               " not set for storage directory " + sdForProperties.getRoot());
               " not set for storage directory " + sdForProperties.getRoot());
         }
         }
-        loadFSImage(imageFile.getFile(), new MD5Hash(md5), target);
+        loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery);
       } else {
       } else {
         // We don't have any record of the md5sum
         // We don't have any record of the md5sum
-        loadFSImage(imageFile.getFile(), null, target);
+        loadFSImage(imageFile.getFile(), null, target, recovery);
       }
       }
     } catch (IOException ioe) {
     } catch (IOException ioe) {
       FSEditLog.closeAllStreams(editStreams);
       FSEditLog.closeAllStreams(editStreams);
       throw new IOException("Failed to load image from " + imageFile, ioe);
       throw new IOException("Failed to load image from " + imageFile, ioe);
     }
     }
-    
-    long numLoaded = loadEdits(editStreams, target);
+    long txnsAdvanced = loadEdits(editStreams, target, recovery);
     needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(),
     needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(),
-                                                    numLoaded);
-    
-    // update the txid for the edit log
-    editLog.setNextTxId(storage.getMostRecentCheckpointTxId() + numLoaded + 1);
+                                                    txnsAdvanced);
+    editLog.setNextTxId(lastAppliedTxId + 1);
     return needToSave;
     return needToSave;
   }
   }
 
 
@@ -664,33 +661,29 @@ public class FSImage implements Closeable {
   
   
   /**
   /**
    * Load the specified list of edit files into the image.
    * Load the specified list of edit files into the image.
-   * @return the number of transactions loaded
    */
    */
   public long loadEdits(Iterable<EditLogInputStream> editStreams,
   public long loadEdits(Iterable<EditLogInputStream> editStreams,
-      FSNamesystem target) throws IOException, EditLogInputException {
+      FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
     LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
     LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
-
-    long startingTxId = getLastAppliedTxId() + 1;
-    long numLoaded = 0;
-
+    
+    long prevLastAppliedTxId = lastAppliedTxId;  
     try {    
     try {    
-      FSEditLogLoader loader = new FSEditLogLoader(target);
+      FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
       
       
       // Load latest edits
       // Load latest edits
       for (EditLogInputStream editIn : editStreams) {
       for (EditLogInputStream editIn : editStreams) {
-        LOG.info("Reading " + editIn + " expecting start txid #" + startingTxId);
-        long thisNumLoaded = 0;
+        LOG.info("Reading " + editIn + " expecting start txid #" +
+              (lastAppliedTxId + 1));
         try {
         try {
-          thisNumLoaded = loader.loadFSEdits(editIn, startingTxId);
-        } catch (EditLogInputException elie) {
-          thisNumLoaded = elie.getNumEditsLoaded();
-          throw elie;
+          loader.loadFSEdits(editIn, lastAppliedTxId + 1, recovery);
         } finally {
         } finally {
           // Update lastAppliedTxId even in case of error, since some ops may
           // Update lastAppliedTxId even in case of error, since some ops may
           // have been successfully applied before the error.
           // have been successfully applied before the error.
-          lastAppliedTxId = startingTxId + thisNumLoaded - 1;
-          startingTxId += thisNumLoaded;
-          numLoaded += thisNumLoaded;
+          lastAppliedTxId = loader.getLastAppliedTxId();
+        }
+        // If we are in recovery mode, we may have skipped over some txids.
+        if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
+          lastAppliedTxId = editIn.getLastTxId();
         }
         }
       }
       }
     } finally {
     } finally {
@@ -698,8 +691,7 @@ public class FSImage implements Closeable {
       // update the counts
       // update the counts
       target.dir.updateCountForINodeWithQuota();   
       target.dir.updateCountForINodeWithQuota();   
     }
     }
-    
-    return numLoaded;
+    return lastAppliedTxId - prevLastAppliedTxId;
   }
   }
 
 
 
 
@@ -707,14 +699,14 @@ public class FSImage implements Closeable {
    * Load the image namespace from the given image file, verifying
    * Load the image namespace from the given image file, verifying
    * it against the MD5 sum stored in its associated .md5 file.
    * it against the MD5 sum stored in its associated .md5 file.
    */
    */
-  private void loadFSImage(File imageFile, FSNamesystem target)
-      throws IOException {
+  private void loadFSImage(File imageFile, FSNamesystem target,
+      MetaRecoveryContext recovery) throws IOException {
     MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
     MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
     if (expectedMD5 == null) {
     if (expectedMD5 == null) {
       throw new IOException("No MD5 file found corresponding to image file "
       throw new IOException("No MD5 file found corresponding to image file "
           + imageFile);
           + imageFile);
     }
     }
-    loadFSImage(imageFile, expectedMD5, target);
+    loadFSImage(imageFile, expectedMD5, target, recovery);
   }
   }
   
   
   /**
   /**
@@ -722,7 +714,7 @@ public class FSImage implements Closeable {
    * filenames and blocks.
    * filenames and blocks.
    */
    */
   private void loadFSImage(File curFile, MD5Hash expectedMd5,
   private void loadFSImage(File curFile, MD5Hash expectedMd5,
-      FSNamesystem target) throws IOException {
+      FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
     FSImageFormat.Loader loader = new FSImageFormat.Loader(
     FSImageFormat.Loader loader = new FSImageFormat.Loader(
         conf, target);
         conf, target);
     loader.load(curFile);
     loader.load(curFile);

+ 8 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java

@@ -56,7 +56,14 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
       return;
       return;
     }
     }
     
     
-    maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd));
+    // Check for a seen_txid file, which marks a minimum transaction ID that
+    // must be included in our load plan.
+    try {
+      maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd));
+    } catch (IOException ioe) {
+      LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe);
+      return;
+    }
 
 
     File currentDir = sd.getCurrentDir();
     File currentDir = sd.getCurrentDir();
     File filesInStorage[];
     File filesInStorage[];
@@ -91,15 +98,6 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
       }
       }
     }
     }
     
     
-
-    // Check for a seen_txid file, which marks a minimum transaction ID that
-    // must be included in our load plan.
-    try {
-      maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd));
-    } catch (IOException ioe) {
-      LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe);
-    }
-    
     // set finalized flag
     // set finalized flag
     isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();
     isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();
   }
   }

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -380,9 +380,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
     FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
     FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
     FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
     FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
+    StartupOption startOpt = NameNode.getStartupOption(conf);
+    if (startOpt == StartupOption.RECOVER) {
+      namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    }
 
 
     long loadStart = now();
     long loadStart = now();
-    StartupOption startOpt = NameNode.getStartupOption(conf);
     String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
     String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
     namesystem.loadFSImage(startOpt, fsImage,
     namesystem.loadFSImage(startOpt, fsImage,
       HAUtil.isHAEnabled(conf, nameserviceId));
       HAUtil.isHAEnabled(conf, nameserviceId));
@@ -491,7 +494,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     writeLock();
     writeLock();
     try {
     try {
       // We shouldn't be calling saveNamespace if we've come up in standby state.
       // We shouldn't be calling saveNamespace if we've come up in standby state.
-      if (fsImage.recoverTransitionRead(startOpt, this) && !haEnabled) {
+      MetaRecoveryContext recovery = startOpt.createRecoveryContext();
+      if (fsImage.recoverTransitionRead(startOpt, this, recovery) && !haEnabled) {
         fsImage.saveNamespace(this);
         fsImage.saveNamespace(this);
       }
       }
       // This will start a new log segment and write to the seen_txid file, so
       // This will start a new log segment and write to the seen_txid file, so
@@ -2120,10 +2124,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
 
   /** 
   /** 
    * Check all blocks of a file. If any blocks are lower than their intended
    * Check all blocks of a file. If any blocks are lower than their intended
-   * replication factor, then insert them into neededReplication
+   * replication factor, then insert them into neededReplication and if 
+   * the blocks are more than the intended replication factor then insert 
+   * them into invalidateBlocks.
    */
    */
   private void checkReplicationFactor(INodeFile file) {
   private void checkReplicationFactor(INodeFile file) {
-    int numExpectedReplicas = file.getReplication();
+    short numExpectedReplicas = file.getReplication();
     Block[] pendingBlocks = file.getBlocks();
     Block[] pendingBlocks = file.getBlocks();
     int nrBlocks = pendingBlocks.length;
     int nrBlocks = pendingBlocks.length;
     for (int i = 0; i < nrBlocks; i++) {
     for (int i = 0; i < nrBlocks; i++) {

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -232,7 +232,10 @@ class FileJournalManager implements JournalManager {
           LOG.info(String.format("Log begins at txid %d, but requested start "
           LOG.info(String.format("Log begins at txid %d, but requested start "
               + "txid is %d. Skipping %d edits.", elf.getFirstTxId(), fromTxId,
               + "txid is %d. Skipping %d edits.", elf.getFirstTxId(), fromTxId,
               transactionsToSkip));
               transactionsToSkip));
-          elfis.skipTransactions(transactionsToSkip);
+        }
+        if (elfis.skipUntil(fromTxId) == false) {
+          throw new IOException("failed to advance input stream to txid " +
+              fromTxId);
         }
         }
         return elfis;
         return elfis;
       }
       }

+ 0 - 56
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalStream.java

@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-/**
- * A generic interface for journal input and output streams.
- */
-interface JournalStream {
-  /**
-   * Type of the underlying persistent storage type the stream is based upon.
-   * <ul>
-   * <li>{@link JournalType#FILE} - streams edits into a local file, see
-   * {@link FSEditLog.EditLogFileOutputStream} and 
-   * {@link FSEditLog.EditLogFileInputStream}</li>
-   * <li>{@link JournalType#BACKUP} - streams edits to a backup node, see
-   * {@link EditLogBackupOutputStream} and {@link EditLogBackupInputStream}</li>
-   * </ul>
-   */
-  static enum JournalType {
-    FILE,
-    BACKUP;
-    boolean isOfType(JournalType other) {
-      return other == null || this == other;
-    }
-  };
-
-  /**
-   * Get this stream name.
-   * 
-   * @return name of the stream
-   */
-  String getName();
-
-  /**
-   * Get the type of the stream.
-   * Determines the underlying persistent storage type.
-   * @see JournalType
-   * @return type
-   */
-  JournalType getType();
-}

+ 130 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java

@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/** Context data for an ongoing NameNode metadata recovery process. */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class MetaRecoveryContext  {
+  public static final Log LOG = LogFactory.getLog(MetaRecoveryContext.class.getName());
+  public final static int FORCE_NONE = 0;
+  public final static int FORCE_FIRST_CHOICE = 1;
+  public final static int FORCE_ALL = 2;
+  private int force;
+  
+  /** Exception thrown when the user has requested processing to stop. */
+  static public class RequestStopException extends IOException {
+    private static final long serialVersionUID = 1L;
+    public RequestStopException(String msg) {
+      super(msg);
+    }
+  }
+  
+  public MetaRecoveryContext(int force) {
+    this.force = force;
+  }
+
+  /**
+   * Display a prompt to the user and get his or her choice.
+   *  
+   * @param prompt      The prompt to display
+   * @param default     First choice (will be taken if autoChooseDefault is
+   *                    true)
+   * @param choices     Other choies
+   *
+   * @return            The choice that was taken
+   * @throws IOException
+   */
+  public String ask(String prompt, String firstChoice, String... choices) 
+      throws IOException {
+    while (true) {
+      LOG.info(prompt);
+      if (force > FORCE_NONE) {
+        LOG.info("automatically choosing " + firstChoice);
+        return firstChoice;
+      }
+      StringBuilder responseBuilder = new StringBuilder();
+      while (true) {
+        int c = System.in.read();
+        if (c == -1 || c == '\r' || c == '\n') {
+          break;
+        }
+        responseBuilder.append((char)c);
+      }
+      String response = responseBuilder.toString();
+      if (response.equalsIgnoreCase(firstChoice))
+        return firstChoice;
+      for (String c : choices) {
+        if (response.equalsIgnoreCase(c)) {
+          return c;
+        }
+      }
+      LOG.error("I'm sorry, I cannot understand your response.\n");
+    }
+  }
+
+  public static void editLogLoaderPrompt(String prompt,
+        MetaRecoveryContext recovery, String contStr)
+        throws IOException, RequestStopException
+  {
+    if (recovery == null) {
+      throw new IOException(prompt);
+    }
+    LOG.error(prompt);
+    String answer = recovery.ask("\nEnter 'c' to continue, " + contStr + "\n" +
+      "Enter 's' to stop reading the edit log here, abandoning any later " +
+        "edits\n" +
+      "Enter 'q' to quit without saving\n" +
+      "Enter 'a' to always select the first choice in the future " +
+      "without prompting. " + 
+      "(c/s/q/a)\n", "c", "s", "q", "a");
+    if (answer.equals("c")) {
+      LOG.info("Continuing.");
+      return;
+    } else if (answer.equals("s")) {
+      throw new RequestStopException("user requested stop");
+    } else if (answer.equals("q")) {
+      recovery.quit();
+    } else {
+      recovery.setForce(FORCE_FIRST_CHOICE);
+      return;
+    }
+  }
+
+  /** Log a message and quit */
+  public void quit() {
+    LOG.error("Exiting on user request.");
+    System.exit(0);
+  }
+
+  public int getForce() {
+    return this.force;
+  }
+
+  public void setForce(int force) {
+    this.force = force;
+  }
+}

+ 2 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
 
 
@@ -299,8 +298,7 @@ public class NNStorage extends Storage implements Closeable {
                           NameNodeDirType.IMAGE;
                           NameNodeDirType.IMAGE;
       // Add to the list of storage directories, only if the
       // Add to the list of storage directories, only if the
       // URI is of type file://
       // URI is of type file://
-      if(dirName.getScheme().compareTo(JournalType.FILE.name().toLowerCase())
-          == 0){
+      if(dirName.getScheme().compareTo("file") == 0) {
         this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
         this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
             dirType,
             dirType,
             !sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
             !sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
@@ -312,8 +310,7 @@ public class NNStorage extends Storage implements Closeable {
       checkSchemeConsistency(dirName);
       checkSchemeConsistency(dirName);
       // Add to the list of storage directories, only if the
       // Add to the list of storage directories, only if the
       // URI is of type file://
       // URI is of type file://
-      if(dirName.getScheme().compareTo(JournalType.FILE.name().toLowerCase())
-          == 0)
+      if(dirName.getScheme().compareTo("file") == 0)
         this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
         this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
                     NameNodeDirType.EDITS, !sharedEditsDirs.contains(dirName)));
                     NameNodeDirType.EDITS, !sharedEditsDirs.contains(dirName)));
     }
     }

+ 96 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -73,6 +73,7 @@ import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
@@ -233,7 +234,7 @@ public class NameNode {
   /** Format a new filesystem.  Destroys any filesystem that may already
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
    * exist at this location.  **/
   public static void format(Configuration conf) throws IOException {
   public static void format(Configuration conf) throws IOException {
-    format(conf, true);
+    format(conf, true, true);
   }
   }
 
 
   static NameNodeMetrics metrics;
   static NameNodeMetrics metrics;
@@ -532,6 +533,8 @@ public class NameNode {
    * <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
    * <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
    * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster  
    * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster  
    * upgrade and create a snapshot of the current file system state</li> 
    * upgrade and create a snapshot of the current file system state</li> 
+   * <li>{@link StartupOption#RECOVERY RECOVERY} - recover name node
+   * metadata</li>
    * <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the  
    * <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the  
    *            cluster back to the previous state</li>
    *            cluster back to the previous state</li>
    * <li>{@link StartupOption#FINALIZE FINALIZE} - finalize 
    * <li>{@link StartupOption#FINALIZE FINALIZE} - finalize 
@@ -674,9 +677,8 @@ public class NameNode {
    * @return true if formatting was aborted, false otherwise
    * @return true if formatting was aborted, false otherwise
    * @throws IOException
    * @throws IOException
    */
    */
-  private static boolean format(Configuration conf,
-                                boolean force)
-      throws IOException {
+  private static boolean format(Configuration conf, boolean force,
+      boolean isInteractive) throws IOException {
     String nsId = DFSUtil.getNamenodeNameServiceId(conf);
     String nsId = DFSUtil.getNamenodeNameServiceId(conf);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     initializeGenericKeys(conf, nsId, namenodeId);
     initializeGenericKeys(conf, nsId, namenodeId);
@@ -685,7 +687,7 @@ public class NameNode {
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
     List<URI> editDirsToFormat = 
     List<URI> editDirsToFormat = 
                  FSNamesystem.getNamespaceEditsDirs(conf);
                  FSNamesystem.getNamespaceEditsDirs(conf);
-    if (!confirmFormat(dirsToFormat, force, true)) {
+    if (!confirmFormat(dirsToFormat, force, isInteractive)) {
       return true; // aborted
       return true; // aborted
     }
     }
 
 
@@ -776,6 +778,9 @@ public class NameNode {
    */
    */
   private static boolean initializeSharedEdits(Configuration conf,
   private static boolean initializeSharedEdits(Configuration conf,
       boolean force, boolean interactive) {
       boolean force, boolean interactive) {
+    String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+    String namenodeId = HAUtil.getNameNodeId(conf, nsId);
+    initializeGenericKeys(conf, nsId, namenodeId);
     NNStorage existingStorage = null;
     NNStorage existingStorage = null;
     try {
     try {
       FSNamesystem fsns = FSNamesystem.loadFromDisk(conf,
       FSNamesystem fsns = FSNamesystem.loadFromDisk(conf,
@@ -843,14 +848,17 @@ public class NameNode {
       "Usage: java NameNode [" +
       "Usage: java NameNode [" +
       StartupOption.BACKUP.getName() + "] | [" +
       StartupOption.BACKUP.getName() + "] | [" +
       StartupOption.CHECKPOINT.getName() + "] | [" +
       StartupOption.CHECKPOINT.getName() + "] | [" +
-      StartupOption.FORMAT.getName() + "[" + StartupOption.CLUSTERID.getName() +  
-      " cid ]] | [" +
+      StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +  
+      " cid ] [" + StartupOption.FORCE.getName() + "] [" +
+      StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
       StartupOption.UPGRADE.getName() + "] | [" +
       StartupOption.UPGRADE.getName() + "] | [" +
       StartupOption.ROLLBACK.getName() + "] | [" +
       StartupOption.ROLLBACK.getName() + "] | [" +
       StartupOption.FINALIZE.getName() + "] | [" +
       StartupOption.FINALIZE.getName() + "] | [" +
       StartupOption.IMPORT.getName() + "] | [" +
       StartupOption.IMPORT.getName() + "] | [" +
-      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
-      StartupOption.INITIALIZESHAREDEDITS.getName() + "]");
+      StartupOption.INITIALIZESHAREDEDITS.getName() + "] | [" +
+      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + 
+      StartupOption.RECOVER.getName() + " [ " +
+        StartupOption.FORCE.getName() + " ] ]");
   }
   }
 
 
   private static StartupOption parseArguments(String args[]) {
   private static StartupOption parseArguments(String args[]) {
@@ -860,11 +868,35 @@ public class NameNode {
       String cmd = args[i];
       String cmd = args[i];
       if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
       if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
         startOpt = StartupOption.FORMAT;
-        // might be followed by two args
-        if (i + 2 < argsLen
-            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
-          i += 2;
-          startOpt.setClusterId(args[i]);
+        for (i = i + 1; i < argsLen; i++) {
+          if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+            i++;
+            if (i >= argsLen) {
+              // if no cluster id specified, return null
+              LOG.fatal("Must specify a valid cluster ID after the "
+                  + StartupOption.CLUSTERID.getName() + " flag");
+              return null;
+            }
+            String clusterId = args[i];
+            // Make sure an id is specified and not another flag
+            if (clusterId.isEmpty() ||
+                clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
+                clusterId.equalsIgnoreCase(
+                    StartupOption.NONINTERACTIVE.getName())) {
+              LOG.fatal("Must specify a valid cluster ID after the "
+                  + StartupOption.CLUSTERID.getName() + " flag");
+              return null;
+            }
+            startOpt.setClusterId(clusterId);
+          }
+
+          if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
+            startOpt.setForceFormat(true);
+          }
+
+          if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
+            startOpt.setInteractiveFormat(false);
+          }
         }
         }
       } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
       } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.GENCLUSTERID;
         startOpt = StartupOption.GENCLUSTERID;
@@ -894,6 +926,21 @@ public class NameNode {
       } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
       } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.INITIALIZESHAREDEDITS;
         startOpt = StartupOption.INITIALIZESHAREDEDITS;
         return startOpt;
         return startOpt;
+      } else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) {
+        if (startOpt != StartupOption.REGULAR) {
+          throw new RuntimeException("Can't combine -recover with " +
+              "other startup options.");
+        }
+        startOpt = StartupOption.RECOVER;
+        while (++i < argsLen) {
+          if (args[i].equalsIgnoreCase(
+                StartupOption.FORCE.getName())) {
+            startOpt.setForce(MetaRecoveryContext.FORCE_FIRST_CHOICE);
+          } else {
+            throw new RuntimeException("Error parsing recovery options: " + 
+              "can't understand option \"" + args[i] + "\"");
+          }
+        }
       } else {
       } else {
         return null;
         return null;
       }
       }
@@ -910,31 +957,36 @@ public class NameNode {
                                           StartupOption.REGULAR.toString()));
                                           StartupOption.REGULAR.toString()));
   }
   }
 
 
-  /**
-   * Print out a prompt to the user, and return true if the user
-   * responds with "Y" or "yes".
-   */
-  static boolean confirmPrompt(String prompt) throws IOException {
-    while (true) {
-      System.err.print(prompt + " (Y or N) ");
-      StringBuilder responseBuilder = new StringBuilder();
-      while (true) {
-        int c = System.in.read();
-        if (c == -1 || c == '\r' || c == '\n') {
-          break;
-        }
-        responseBuilder.append((char)c);
-      }
-  
-      String response = responseBuilder.toString();
-      if (response.equalsIgnoreCase("y") ||
-          response.equalsIgnoreCase("yes")) {
-        return true;
-      } else if (response.equalsIgnoreCase("n") ||
-          response.equalsIgnoreCase("no")) {
-        return false;
+  private static void doRecovery(StartupOption startOpt, Configuration conf)
+      throws IOException {
+    if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
+      if (!confirmPrompt("You have selected Metadata Recovery mode.  " +
+          "This mode is intended to recover lost metadata on a corrupt " +
+          "filesystem.  Metadata recovery mode often permanently deletes " +
+          "data from your HDFS filesystem.  Please back up your edit log " +
+          "and fsimage before trying this!\n\n" +
+          "Are you ready to proceed? (Y/N)\n")) {
+        System.err.println("Recovery aborted at user request.\n");
+        return;
       }
       }
-      // else ask them again
+    }
+    MetaRecoveryContext.LOG.info("starting recovery...");
+    UserGroupInformation.setConfiguration(conf);
+    NameNode.initMetrics(conf, startOpt.toNodeRole());
+    FSNamesystem fsn = null;
+    try {
+      fsn = FSNamesystem.loadFromDisk(conf);
+      fsn.saveNamespace();
+      MetaRecoveryContext.LOG.info("RECOVERY COMPLETE");
+    } catch (IOException e) {
+      MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e);
+      throw e;
+    } catch (RuntimeException e) {
+      MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e);
+      throw e;
+    } finally {
+      if (fsn != null)
+        fsn.close();
     }
     }
   }
   }
 
 
@@ -959,7 +1011,8 @@ public class NameNode {
 
 
     switch (startOpt) {
     switch (startOpt) {
       case FORMAT: {
       case FORMAT: {
-        boolean aborted = format(conf, false);
+        boolean aborted = format(conf, startOpt.getForceFormat(),
+            startOpt.getInteractiveFormat());
         System.exit(aborted ? 1 : 0);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
         return null; // avoid javac warning
       }
       }
@@ -991,6 +1044,10 @@ public class NameNode {
         DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
         DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
         return new BackupNode(conf, role);
         return new BackupNode(conf, role);
       }
       }
+      case RECOVER: {
+        NameNode.doRecovery(startOpt, conf);
+        return null;
+      }
       default:
       default:
         DefaultMetricsSystem.initialize("NameNode");
         DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);
         return new NameNode(conf);

+ 81 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

@@ -33,10 +33,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -47,8 +51,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
@@ -65,7 +71,7 @@ import com.google.common.collect.Sets;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class BootstrapStandby implements Tool, Configurable {
 public class BootstrapStandby implements Tool, Configurable {
-  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); 
+  private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
   private String nsId;
   private String nsId;
   private String nnId;
   private String nnId;
   private String otherNNId;
   private String otherNNId;
@@ -79,7 +85,13 @@ public class BootstrapStandby implements Tool, Configurable {
   
   
   private boolean force = false;
   private boolean force = false;
   private boolean interactive = true;
   private boolean interactive = true;
-  
+
+  // Exit/return codes.
+  static final int ERR_CODE_FAILED_CONNECT = 2;
+  static final int ERR_CODE_INVALID_VERSION = 3;
+  static final int ERR_CODE_OTHER_NN_NOT_ACTIVE = 4;
+  static final int ERR_CODE_ALREADY_FORMATTED = 5;
+  static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
 
   public int run(String[] args) throws Exception {
   public int run(String[] args) throws Exception {
     SecurityUtil.initKrb5CipherSuites();
     SecurityUtil.initKrb5CipherSuites();
@@ -121,24 +133,43 @@ public class BootstrapStandby implements Tool, Configurable {
     System.err.println("Usage: " + this.getClass().getSimpleName() +
     System.err.println("Usage: " + this.getClass().getSimpleName() +
         "[-force] [-nonInteractive]");
         "[-force] [-nonInteractive]");
   }
   }
+  
+  private NamenodeProtocol createNNProtocolProxy()
+      throws IOException {
+    return NameNodeProxies.createNonHAProxy(getConf(),
+        otherIpcAddr, NamenodeProtocol.class,
+        UserGroupInformation.getLoginUser(), true)
+        .getProxy();
+  }
+  
+  private HAServiceProtocol createHAProtocolProxy()
+      throws IOException {
+    return new NNHAServiceTarget(new HdfsConfiguration(conf),
+        nsId, otherNNId).getProxy(conf, 15000);
+  }
 
 
   private int doRun() throws IOException {
   private int doRun() throws IOException {
-    ProxyAndInfo<NamenodeProtocol> proxyAndInfo = NameNodeProxies.createNonHAProxy(getConf(),
-      otherIpcAddr, NamenodeProtocol.class,
-      UserGroupInformation.getLoginUser(), true);
-    NamenodeProtocol proxy = proxyAndInfo.getProxy();
+
+    NamenodeProtocol proxy = createNNProtocolProxy();
     NamespaceInfo nsInfo;
     NamespaceInfo nsInfo;
     try {
     try {
       nsInfo = proxy.versionRequest();
       nsInfo = proxy.versionRequest();
-      checkLayoutVersion(nsInfo);
     } catch (IOException ioe) {
     } catch (IOException ioe) {
       LOG.fatal("Unable to fetch namespace information from active NN at " +
       LOG.fatal("Unable to fetch namespace information from active NN at " +
           otherIpcAddr + ": " + ioe.getMessage());
           otherIpcAddr + ": " + ioe.getMessage());
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Full exception trace", ioe);
         LOG.debug("Full exception trace", ioe);
       }
       }
-      return 1;
+      return ERR_CODE_FAILED_CONNECT;
     }
     }
+
+    if (!checkLayoutVersion(nsInfo)) {
+      LOG.fatal("Layout version on remote node (" +
+          nsInfo.getLayoutVersion() + ") does not match " +
+          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
+      return ERR_CODE_INVALID_VERSION;
+    }
+
     
     
     System.out.println(
     System.out.println(
         "=====================================================\n" +
         "=====================================================\n" +
@@ -153,12 +184,35 @@ public class BootstrapStandby implements Tool, Configurable {
         "           Layout version: " + nsInfo.getLayoutVersion() + "\n" +
         "           Layout version: " + nsInfo.getLayoutVersion() + "\n" +
         "=====================================================");
         "=====================================================");
 
 
+    // Ensure the other NN is active - we can't force it to roll edit logs
+    // below if it's not active.
+    if (!isOtherNNActive()) {
+      String err = "NameNode " + nsId + "." + nnId + " at " + otherIpcAddr +
+          " is not currently in ACTIVE state.";
+      if (!interactive) {
+        LOG.fatal(err + " Please transition it to " +
+            "active before attempting to bootstrap a standby node.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+      
+      System.err.println(err);
+      if (ToolRunner.confirmPrompt(
+            "Do you want to automatically transition it to active now?")) {
+        transitionOtherNNActive();
+      } else {
+        LOG.fatal("User aborted. Exiting without bootstrapping standby.");
+        return ERR_CODE_OTHER_NN_NOT_ACTIVE;
+      }
+    }
+    
+
+    
     // Check with the user before blowing away data.
     // Check with the user before blowing away data.
     if (!NameNode.confirmFormat(
     if (!NameNode.confirmFormat(
             Sets.union(Sets.newHashSet(dirsToFormat),
             Sets.union(Sets.newHashSet(dirsToFormat),
                 Sets.newHashSet(editUrisToFormat)),
                 Sets.newHashSet(editUrisToFormat)),
             force, interactive)) {
             force, interactive)) {
-      return 1;
+      return ERR_CODE_ALREADY_FORMATTED;
     }
     }
 
 
     // Force the active to roll its log
     // Force the active to roll its log
@@ -180,7 +234,7 @@ public class BootstrapStandby implements Tool, Configurable {
     // Ensure that we have enough edits already in the shared directory to
     // Ensure that we have enough edits already in the shared directory to
     // start up from the last checkpoint on the active.
     // start up from the last checkpoint on the active.
     if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) {
     if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) {
-      return 1;
+      return ERR_CODE_LOGS_UNAVAILABLE;
     }
     }
     
     
     image.getStorage().writeTransactionIdFileToStorage(rollTxId);
     image.getStorage().writeTransactionIdFileToStorage(rollTxId);
@@ -193,6 +247,14 @@ public class BootstrapStandby implements Tool, Configurable {
     return 0;
     return 0;
   }
   }
 
 
+  
+  private void transitionOtherNNActive()
+      throws AccessControlException, ServiceFailedException, IOException {
+    LOG.info("Transitioning the running namenode to active...");
+    createHAProtocolProxy().transitionToActive();    
+    LOG.info("Successful");
+  }
+
   private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
   private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
       long rollTxId) {
       long rollTxId) {
     
     
@@ -225,12 +287,14 @@ public class BootstrapStandby implements Tool, Configurable {
     }
     }
   }
   }
 
 
-  private void checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
-    if (nsInfo.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
-      throw new IOException("Layout version on remote node (" +
-          nsInfo.getLayoutVersion() + ") does not match " +
-          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
-    }
+  private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
+    return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
+  }
+  
+  private boolean isOtherNNActive()
+      throws AccessControlException, IOException {
+    HAServiceStatus status = createHAProtocolProxy().getServiceStatus();
+    return status.getState() == HAServiceState.ACTIVE;
   }
   }
 
 
   private void parseConfAndFindOtherNN() throws IOException {
   private void parseConfAndFindOtherNN() throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java

@@ -219,7 +219,7 @@ public class EditLogTailer {
       // disk are ignored.
       // disk are ignored.
       long editsLoaded = 0;
       long editsLoaded = 0;
       try {
       try {
-        editsLoaded = image.loadEdits(streams, namesystem);
+        editsLoaded = image.loadEdits(streams, namesystem, null);
       } catch (EditLogInputException elie) {
       } catch (EditLogInputException elie) {
         editsLoaded = elie.getNumEditsLoaded();
         editsLoaded = elie.getNumEditsLoaded();
         throw elie;
         throw elie;

+ 0 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java

@@ -25,14 +25,6 @@ package org.apache.hadoop.hdfs.server.protocol;
  * each datanode.
  * each datanode.
  */
  */
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 /**
 /**
  * Balancer bandwidth command instructs each datanode to change its value for
  * Balancer bandwidth command instructs each datanode to change its value for
  * the max amount of network bandwidth it may use during the block balancing
  * the max amount of network bandwidth it may use during the block balancing
@@ -71,35 +63,4 @@ public class BalancerBandwidthCommand extends DatanodeCommand {
   public long getBalancerBandwidthValue() {
   public long getBalancerBandwidthValue() {
     return this.bandwidth;
     return this.bandwidth;
   }
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(BalancerBandwidthCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new BalancerBandwidthCommand();
-      }
-    });
-  }
-
-  /**
-   * Writes the bandwidth payload to the Balancer Bandwidth Command packet.
-   * @param out DataOutput stream used for writing commands to the datanode.
-   * @throws IOException
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeLong(this.bandwidth);
-  }
-
-  /**
-   * Reads the bandwidth payload from the Balancer Bandwidth Command packet.
-   * @param in DataInput stream used for reading commands to the datanode.
-   * @throws IOException
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.bandwidth = in.readLong();
-  }
 }
 }

+ 0 - 56
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java

@@ -17,9 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -27,11 +24,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-
 
 
 /****************************************************
 /****************************************************
  * A BlockCommand is an instruction to a datanode 
  * A BlockCommand is an instruction to a datanode 
@@ -58,8 +50,6 @@ public class BlockCommand extends DatanodeCommand {
   Block blocks[];
   Block blocks[];
   DatanodeInfo targets[][];
   DatanodeInfo targets[][];
 
 
-  public BlockCommand() {}
-
   /**
   /**
    * Create BlockCommand for transferring blocks to another datanode
    * Create BlockCommand for transferring blocks to another datanode
    * @param blocktargetlist    blocks to be transferred 
    * @param blocktargetlist    blocks to be transferred 
@@ -110,50 +100,4 @@ public class BlockCommand extends DatanodeCommand {
   public DatanodeInfo[][] getTargets() {
   public DatanodeInfo[][] getTargets() {
     return targets;
     return targets;
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    Text.writeString(out, poolId);
-    out.writeInt(blocks.length);
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i].write(out);
-    }
-    out.writeInt(targets.length);
-    for (int i = 0; i < targets.length; i++) {
-      out.writeInt(targets[i].length);
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j].write(out);
-      }
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.poolId = Text.readString(in);
-    this.blocks = new Block[in.readInt()];
-    for (int i = 0; i < blocks.length; i++) {
-      blocks[i] = new Block();
-      blocks[i].readFields(in);
-    }
-
-    this.targets = new DatanodeInfo[in.readInt()][];
-    for (int i = 0; i < targets.length; i++) {
-      this.targets[i] = new DatanodeInfo[in.readInt()];
-      for (int j = 0; j < targets[i].length; j++) {
-        targets[i][j] = new DatanodeInfo();
-        targets[i][j].readFields(in);
-      }
-    }
-  }
 }
 }

+ 0 - 65
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java

@@ -17,9 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.ArrayList;
 import java.util.ArrayList;
 
 
@@ -28,9 +25,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 
 
@@ -61,14 +55,6 @@ public class BlockRecoveryCommand extends DatanodeCommand {
   public static class RecoveringBlock extends LocatedBlock {
   public static class RecoveringBlock extends LocatedBlock {
     private long newGenerationStamp;
     private long newGenerationStamp;
 
 
-    /**
-     * Create empty RecoveringBlock.
-     */
-    public RecoveringBlock() {
-      super();
-      newGenerationStamp = -1L;
-    }
-
     /**
     /**
      * Create RecoveringBlock.
      * Create RecoveringBlock.
      */
      */
@@ -84,27 +70,6 @@ public class BlockRecoveryCommand extends DatanodeCommand {
     public long getNewGenerationStamp() {
     public long getNewGenerationStamp() {
       return newGenerationStamp;
       return newGenerationStamp;
     }
     }
-
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (RecoveringBlock.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new RecoveringBlock(); }
-         });
-    }
-
-    public void write(DataOutput out) throws IOException {
-      super.write(out);
-      out.writeLong(newGenerationStamp);
-    }
-
-    public void readFields(DataInput in) throws IOException {
-      super.readFields(in);
-      newGenerationStamp = in.readLong();
-    }
   }
   }
 
 
   /**
   /**
@@ -149,34 +114,4 @@ public class BlockRecoveryCommand extends DatanodeCommand {
     sb.append("\n)");
     sb.append("\n)");
     return sb.toString();
     return sb.toString();
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (BlockRecoveryCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new BlockRecoveryCommand(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(recoveringBlocks.size());
-    for(RecoveringBlock block : recoveringBlocks) {
-      block.write(out);
-    }
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    int numBlocks = in.readInt();
-    recoveringBlocks = new ArrayList<RecoveringBlock>(numBlocks);
-    for(int i = 0; i < numBlocks; i++) {
-      RecoveringBlock b = new RecoveringBlock();
-      b.readFields(in);
-      add(b);
-    }
-  }
 }
 }

+ 2 - 56
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java

@@ -17,16 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 
 /** A class to implement an array of BlockLocations
 /** A class to implement an array of BlockLocations
  *  It provide efficient customized serialization/deserialization methods
  *  It provide efficient customized serialization/deserialization methods
@@ -34,23 +27,17 @@ import org.apache.hadoop.io.WritableUtils;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class BlocksWithLocations implements Writable {
+public class BlocksWithLocations {
 
 
   /**
   /**
    * A class to keep track of a block and its locations
    * A class to keep track of a block and its locations
    */
    */
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
   @InterfaceStability.Evolving
-  public static class BlockWithLocations  implements Writable {
+  public static class BlockWithLocations {
     Block block;
     Block block;
     String datanodeIDs[];
     String datanodeIDs[];
     
     
-    /** default constructor */
-    public BlockWithLocations() {
-      block = new Block();
-      datanodeIDs = null;
-    }
-    
     /** constructor */
     /** constructor */
     public BlockWithLocations(Block b, String[] datanodes) {
     public BlockWithLocations(Block b, String[] datanodes) {
       block = b;
       block = b;
@@ -66,33 +53,10 @@ public class BlocksWithLocations implements Writable {
     public String[] getDatanodes() {
     public String[] getDatanodes() {
       return datanodeIDs;
       return datanodeIDs;
     }
     }
-    
-    /** deserialization method */
-    public void readFields(DataInput in) throws IOException {
-      block.readFields(in);
-      int len = WritableUtils.readVInt(in); // variable length integer
-      datanodeIDs = new String[len];
-      for(int i=0; i<len; i++) {
-        datanodeIDs[i] = Text.readString(in);
-      }
-    }
-    
-    /** serialization method */
-    public void write(DataOutput out) throws IOException {
-      block.write(out);
-      WritableUtils.writeVInt(out, datanodeIDs.length); // variable length int
-      for(String id:datanodeIDs) {
-        Text.writeString(out, id);
-      }
-    }
   }
   }
 
 
   private BlockWithLocations[] blocks;
   private BlockWithLocations[] blocks;
 
 
-  /** default constructor */
-  BlocksWithLocations() {
-  }
-
   /** Constructor with one parameter */
   /** Constructor with one parameter */
   public BlocksWithLocations( BlockWithLocations[] blocks ) {
   public BlocksWithLocations( BlockWithLocations[] blocks ) {
     this.blocks = blocks;
     this.blocks = blocks;
@@ -102,22 +66,4 @@ public class BlocksWithLocations implements Writable {
   public BlockWithLocations[] getBlocks() {
   public BlockWithLocations[] getBlocks() {
     return blocks;
     return blocks;
   }
   }
-
-  /** serialization method */
-  public void write( DataOutput out ) throws IOException {
-    WritableUtils.writeVInt(out, blocks.length);
-    for(int i=0; i<blocks.length; i++) {
-      blocks[i].write(out);
-    }
-  }
-
-  /** deserialization method */
-  public void readFields(DataInput in) throws IOException {
-    int len = WritableUtils.readVInt(in);
-    blocks = new BlockWithLocations[len];
-    for(int i=0; i<len; i++) {
-      blocks[i] = new BlockWithLocations();
-      blocks[i].readFields(in);
-    }
-  }
 }
 }

+ 0 - 30
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java

@@ -17,13 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -77,27 +70,4 @@ public class CheckpointCommand extends NamenodeCommand {
   public boolean needToReturnImage() {
   public boolean needToReturnImage() {
     return needToReturnImage;
     return needToReturnImage;
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {
-    WritableFactories.setFactory(CheckpointCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new CheckpointCommand();}
-        });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    cSig.write(out);
-    out.writeBoolean(needToReturnImage);
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    cSig = new CheckpointSignature();
-    cSig.readFields(in);
-    needToReturnImage = in.readBoolean();
-  }
 }
 }

+ 1 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java

@@ -27,10 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public abstract class DatanodeCommand extends ServerCommand {
 public abstract class DatanodeCommand extends ServerCommand {
-  public DatanodeCommand() {
-    super();
-  }
-  
+
   DatanodeCommand(int action) {
   DatanodeCommand(int action) {
     super(action);
     super(action);
   }
   }

+ 1 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java

@@ -18,20 +18,12 @@
 
 
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /** 
 /** 
  * DatanodeRegistration class contains all information the name-node needs
  * DatanodeRegistration class contains all information the name-node needs
@@ -41,23 +33,11 @@ import org.apache.hadoop.io.WritableFactory;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class DatanodeRegistration extends DatanodeID
 public class DatanodeRegistration extends DatanodeID
-implements Writable, NodeRegistration {
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (DatanodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new DatanodeRegistration(); }
-       });
-  }
+    implements NodeRegistration {
 
 
   private StorageInfo storageInfo;
   private StorageInfo storageInfo;
   private ExportedBlockKeys exportedKeys;
   private ExportedBlockKeys exportedKeys;
 
 
-  public DatanodeRegistration() {
-    this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        new StorageInfo(), new ExportedBlockKeys());
-  }
-  
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
       ExportedBlockKeys keys) {
       ExportedBlockKeys keys) {
     super(dn);
     super(dn);
@@ -118,30 +98,6 @@ implements Writable, NodeRegistration {
       + ")";
       + ")";
   }
   }
 
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    out.writeShort(ipcPort);
-
-    storageInfo.write(out);
-    exportedKeys.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-
-    //TODO: move it to DatanodeID once HADOOP-2797 has been committed
-    this.ipcPort = in.readShort() & 0x0000ffff;
-
-    storageInfo.readFields(in);
-    exportedKeys.readFields(in);
-  }
   @Override
   @Override
   public boolean equals(Object to) {
   public boolean equals(Object to) {
     return super.equals(to);
     return super.equals(to);

+ 0 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java

@@ -17,16 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 
 /**
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -34,17 +26,6 @@ import org.apache.hadoop.io.WritableUtils;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class FinalizeCommand extends DatanodeCommand {
 public class FinalizeCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new FinalizeCommand();
-      }
-    });
-  }
-  
   String blockPoolId;
   String blockPoolId;
   private FinalizeCommand() {
   private FinalizeCommand() {
     super(DatanodeProtocol.DNA_FINALIZE);
     super(DatanodeProtocol.DNA_FINALIZE);
@@ -58,11 +39,4 @@ public class FinalizeCommand extends DatanodeCommand {
   public String getBlockPoolId() {
   public String getBlockPoolId() {
     return blockPoolId;
     return blockPoolId;
   }
   }
-  
-  public void readFields(DataInput in) throws IOException {
-    blockPoolId = WritableUtils.readString(in);
-  }
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeString(out, blockPoolId);
-  }
 }
 }

+ 1 - 38
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java

@@ -17,31 +17,21 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.ObjectWritable;
-import org.apache.hadoop.io.Writable;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 /**
 /**
  * Response to {@link DatanodeProtocol#sendHeartbeat}
  * Response to {@link DatanodeProtocol#sendHeartbeat}
  */
  */
-public class HeartbeatResponse implements Writable {
+public class HeartbeatResponse {
   /** Commands returned from the namenode to the datanode */
   /** Commands returned from the namenode to the datanode */
   private DatanodeCommand[] commands;
   private DatanodeCommand[] commands;
   
   
   /** Information about the current HA-related state of the NN */
   /** Information about the current HA-related state of the NN */
   private NNHAStatusHeartbeat haStatus;
   private NNHAStatusHeartbeat haStatus;
   
   
-  public HeartbeatResponse() {
-    // Empty constructor required for Writable
-  }
-  
   public HeartbeatResponse(DatanodeCommand[] cmds,
   public HeartbeatResponse(DatanodeCommand[] cmds,
       NNHAStatusHeartbeat haStatus) {
       NNHAStatusHeartbeat haStatus) {
     commands = cmds;
     commands = cmds;
@@ -55,31 +45,4 @@ public class HeartbeatResponse implements Writable {
   public NNHAStatusHeartbeat getNameNodeHaState() {
   public NNHAStatusHeartbeat getNameNodeHaState() {
     return haStatus;
     return haStatus;
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    int length = commands == null ? 0 : commands.length;
-    out.writeInt(length);
-    for (int i = 0; i < length; i++) {
-      ObjectWritable.writeObject(out, commands[i], commands[i].getClass(),
-                                 null, true);
-    }
-    haStatus.write(out);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int length = in.readInt();
-    commands = new DatanodeCommand[length];
-    ObjectWritable objectWritable = new ObjectWritable();
-    for (int i = 0; i < length; i++) {
-      commands[i] = (DatanodeCommand) ObjectWritable.readObject(in,
-          objectWritable, null);
-    }
-    haStatus = new NNHAStatusHeartbeat();
-    haStatus.readFields(in);
-  }
 }
 }

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java

@@ -42,9 +42,6 @@ public interface InterDatanodeProtocol {
    * the interface to the DN AND the RPC protocol used to communicate with the 
    * the interface to the DN AND the RPC protocol used to communicate with the 
    * DN.
    * DN.
    * 
    * 
-   * Post version 6L (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
-   * 
    * This class is used by both the DN to insulate from the protocol 
    * This class is used by both the DN to insulate from the protocol 
    * serialization.
    * serialization.
    * 
    * 

+ 0 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java

@@ -17,16 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -45,29 +38,4 @@ public class KeyUpdateCommand extends DatanodeCommand {
   public ExportedBlockKeys getExportedKeys() {
   public ExportedBlockKeys getExportedKeys() {
     return this.keys;
     return this.keys;
   }
   }
-
-  // ///////////////////////////////////////////////
-  // Writable
-  // ///////////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(KeyUpdateCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new KeyUpdateCommand();
-      }
-    });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    keys.write(out);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    keys.readFields(in);
-  }
 }
 }

+ 1 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java

@@ -17,26 +17,17 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class NNHAStatusHeartbeat implements Writable {
+public class NNHAStatusHeartbeat {
 
 
   private State state;
   private State state;
   private long txid = HdfsConstants.INVALID_TXID;
   private long txid = HdfsConstants.INVALID_TXID;
   
   
-  public NNHAStatusHeartbeat() {
-  }
-  
   public NNHAStatusHeartbeat(State state, long txid) {
   public NNHAStatusHeartbeat(State state, long txid) {
     this.state = state;
     this.state = state;
     this.txid = txid;
     this.txid = txid;
@@ -50,21 +41,6 @@ public class NNHAStatusHeartbeat implements Writable {
     return txid;
     return txid;
   }
   }
   
   
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeEnum(out, state);
-    out.writeLong(txid);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    state = WritableUtils.readEnum(in, State.class);
-    txid = in.readLong();
-  }
-
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   public enum State {
   public enum State {
     ACTIVE,
     ACTIVE,

+ 0 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java

@@ -19,9 +19,6 @@ package org.apache.hadoop.hdfs.server.protocol;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * Base class for name-node command.
  * Base class for name-node command.
@@ -30,17 +27,6 @@ import org.apache.hadoop.io.WritableFactory;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class NamenodeCommand extends ServerCommand {
 public class NamenodeCommand extends ServerCommand {
-  static {
-    WritableFactories.setFactory(NamenodeCommand.class,
-        new WritableFactory() {
-          public Writable newInstance() {return new NamenodeCommand();}
-        });
-  }
-
-  public NamenodeCommand() {
-    super();
-  }
-
   public NamenodeCommand(int action) {
   public NamenodeCommand(int action) {
     super(action);
     super(action);
   }
   }

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
 
 /*****************************************************************************
 /*****************************************************************************
@@ -42,9 +41,6 @@ public interface NamenodeProtocol {
    * the client interface to the NN AND the RPC protocol used to 
    * the client interface to the NN AND the RPC protocol used to 
    * communicate with the NN.
    * communicate with the NN.
    * 
    * 
-   * Post version 70 (release 23 of Hadoop), the protocol is implemented in
-   * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
-   * 
    * This class is used by both the DFSClient and the 
    * This class is used by both the DFSClient and the 
    * NN server side to insulate from the protocol serialization.
    * NN server side to insulate from the protocol serialization.
    * 
    * 

+ 0 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java

@@ -18,14 +18,6 @@
 
 
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -44,10 +36,6 @@ implements NodeRegistration {
   String httpAddress;         // HTTP address of the node
   String httpAddress;         // HTTP address of the node
   NamenodeRole role;          // node role
   NamenodeRole role;          // node role
 
 
-  public NamenodeRegistration() {
-    super();
-  }
-
   public NamenodeRegistration(String address,
   public NamenodeRegistration(String address,
                               String httpAddress,
                               String httpAddress,
                               StorageInfo storageInfo,
                               StorageInfo storageInfo,
@@ -95,31 +83,4 @@ implements NodeRegistration {
   public boolean isRole(NamenodeRole that) {
   public boolean isRole(NamenodeRole that) {
     return role.equals(that);
     return role.equals(that);
   }
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {
-    WritableFactories.setFactory
-      (NamenodeRegistration.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamenodeRegistration(); }
-       });
-  }
-
-  @Override // Writable
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, rpcAddress);
-    Text.writeString(out, httpAddress);
-    Text.writeString(out, role.name());
-    super.write(out);
-  }
-
-  @Override // Writable
-  public void readFields(DataInput in) throws IOException {
-    rpcAddress = Text.readString(in);
-    httpAddress = Text.readString(in);
-    role = NamenodeRole.valueOf(Text.readString(in));
-    super.readFields(in);
-  }
 }
 }

+ 0 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java

@@ -18,8 +18,6 @@
 
 
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -28,11 +26,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
 
 
 /**
 /**
  * NamespaceInfo is returned by the name-node in reply 
  * NamespaceInfo is returned by the name-node in reply 
@@ -76,31 +69,6 @@ public class NamespaceInfo extends StorageInfo {
     return blockPoolID;
     return blockPoolID;
   }
   }
 
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (NamespaceInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new NamespaceInfo(); }
-       });
-  }
-
-  public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, getBuildVersion());
-    super.write(out);
-    out.writeInt(getDistributedUpgradeVersion());
-    WritableUtils.writeString(out, blockPoolID);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    buildVersion = DeprecatedUTF8.readString(in);
-    super.readFields(in);
-    distributedUpgradeVersion = in.readInt();
-    blockPoolID = WritableUtils.readString(in);
-  }
-  
   public String toString(){
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;
     return super.toString() + ";bpid=" + blockPoolID;
   }
   }

+ 1 - 27
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java

@@ -18,19 +18,12 @@
 
 
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 
 
 /**
 /**
  * A data structure to store the blocks in an incremental block report. 
  * A data structure to store the blocks in an incremental block report. 
  */
  */
-public class ReceivedDeletedBlockInfo implements Writable {
+public class ReceivedDeletedBlockInfo {
   Block block;
   Block block;
   BlockStatus status;
   BlockStatus status;
   String delHints;
   String delHints;
@@ -113,25 +106,6 @@ public class ReceivedDeletedBlockInfo implements Writable {
     return status == BlockStatus.DELETED_BLOCK;
     return status == BlockStatus.DELETED_BLOCK;
   }
   }
 
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-    this.block.write(out);
-    WritableUtils.writeVInt(out, this.status.code);
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      Text.writeString(out, this.delHints);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.block = new Block();
-    this.block.readFields(in);
-    this.status = BlockStatus.fromCode(WritableUtils.readVInt(in));
-    if (this.status == BlockStatus.DELETED_BLOCK) {
-      this.delHints = Text.readString(in);
-    }
-  }
-
   public String toString() {
   public String toString() {
     return block.toString() + ", status: " + status +
     return block.toString() + ", status: " + status +
       ", delHint: " + delHints;
       ", delHint: " + delHints;

+ 0 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java

@@ -17,14 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * A BlockCommand is an instruction to a datanode to register with the namenode.
  * A BlockCommand is an instruction to a datanode to register with the namenode.
@@ -32,26 +26,10 @@ import org.apache.hadoop.io.WritableFactory;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class RegisterCommand extends DatanodeCommand {
 public class RegisterCommand extends DatanodeCommand {
-  // /////////////////////////////////////////
-  // Writable
-  // /////////////////////////////////////////
-  static { // register a ctor
-    WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() {
-      public Writable newInstance() {
-        return new RegisterCommand();
-      }
-    });
-  }
   
   
   public static final DatanodeCommand REGISTER = new RegisterCommand();
   public static final DatanodeCommand REGISTER = new RegisterCommand();
 
 
   public RegisterCommand() {
   public RegisterCommand() {
     super(DatanodeProtocol.DNA_REGISTER);
     super(DatanodeProtocol.DNA_REGISTER);
   }
   }
-
-  @Override
-  public void readFields(DataInput in) { }
- 
-  @Override
-  public void write(DataOutput out) { }
 }
 }

+ 0 - 33
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java

@@ -18,17 +18,10 @@
 
 
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * Replica recovery information.
  * Replica recovery information.
@@ -38,9 +31,6 @@ import org.apache.hadoop.io.WritableFactory;
 public class ReplicaRecoveryInfo extends Block {
 public class ReplicaRecoveryInfo extends Block {
   private ReplicaState originalState;
   private ReplicaState originalState;
 
 
-  public ReplicaRecoveryInfo() {
-  }
-
   public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
   public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
     set(blockId, diskLen, gs);
     set(blockId, diskLen, gs);
     originalState = rState;
     originalState = rState;
@@ -59,27 +49,4 @@ public class ReplicaRecoveryInfo extends Block {
   public int hashCode() {
   public int hashCode() {
     return super.hashCode();
     return super.hashCode();
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (ReplicaRecoveryInfo.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new ReplicaRecoveryInfo(); }
-       });
-  }
-
- @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    originalState = ReplicaState.read(in); 
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    originalState.write(out);
-  }
 }
 }

+ 1 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java

@@ -17,11 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.*;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
 
 
 /**
 /**
  * Base class for a server command.
  * Base class for a server command.
@@ -33,20 +30,9 @@ import org.apache.hadoop.io.Writable;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public abstract class ServerCommand implements Writable {
+public abstract class ServerCommand {
   private int action;
   private int action;
 
 
-  /**
-   * Unknown server command constructor.
-   * Creates a command with action 0.
-   * 
-   * @see NamenodeProtocol#ACT_UNKNOWN
-   * @see DatanodeProtocol#DNA_UNKNOWN
-   */
-  public ServerCommand() {
-    this(0);
-  }
-
   /**
   /**
    * Create a command for the specified action.
    * Create a command for the specified action.
    * Actions are protocol specific.
    * Actions are protocol specific.
@@ -66,15 +52,4 @@ public abstract class ServerCommand implements Writable {
   public int getAction() {
   public int getAction() {
     return this.action;
     return this.action;
   }
   }
-
-  ///////////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(this.action);
-  }
-
-  public void readFields(DataInput in) throws IOException {
-    this.action = in.readInt();
-  }
 }
 }

+ 0 - 34
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java

@@ -17,15 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
 
 
 /**
 /**
  * This as a generic distributed upgrade command.
  * This as a generic distributed upgrade command.
@@ -68,31 +61,4 @@ public class UpgradeCommand extends DatanodeCommand {
   public short getCurrentStatus() {
   public short getCurrentStatus() {
     return this.upgradeStatus;
     return this.upgradeStatus;
   }
   }
-
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (UpgradeCommand.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new UpgradeCommand(); }
-       });
-  }
-
-  /**
-   */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeInt(this.version);
-    out.writeShort(this.upgradeStatus);
-  }
-
-  /**
-   */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    this.version = in.readInt();
-    this.upgradeStatus = in.readShort();
-  }
 }
 }

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java

@@ -248,7 +248,6 @@ public class GetConf extends Configured implements Tool {
     @Override
     @Override
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
       this.key = args[0];
       this.key = args[0];
-      System.err.println("key: " + key);
       return super.doWorkInternal(tool, args);
       return super.doWorkInternal(tool, args);
     }
     }
   }
   }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 
 
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.xml.sax.Attributes;
 import org.xml.sax.Attributes;
@@ -54,6 +55,7 @@ class OfflineEditsXmlLoader
   private FSEditLogOpCodes opCode;
   private FSEditLogOpCodes opCode;
   private StringBuffer cbuf;
   private StringBuffer cbuf;
   private long nextTxId;
   private long nextTxId;
+  private final OpInstanceCache opCache = new OpInstanceCache();
   
   
   static enum ParseState {
   static enum ParseState {
     EXPECT_EDITS_TAG,
     EXPECT_EDITS_TAG,
@@ -207,7 +209,7 @@ class OfflineEditsXmlLoader
           throw new InvalidXmlException("expected </DATA>");
           throw new InvalidXmlException("expected </DATA>");
         }
         }
         state = ParseState.EXPECT_RECORD;
         state = ParseState.EXPECT_RECORD;
-        FSEditLogOp op = FSEditLogOp.getOpInstance(opCode);
+        FSEditLogOp op = opCache.get(opCode);
         opCode = null;
         opCode = null;
         try {
         try {
           op.decodeXml(stanza);
           op.decodeXml(stanza);

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto

@@ -38,8 +38,11 @@ message InitReplicaRecoveryRequestProto {
  * Repica recovery information
  * Repica recovery information
  */
  */
 message InitReplicaRecoveryResponseProto {
 message InitReplicaRecoveryResponseProto {
-  required ReplicaStateProto state = 1; // State of the replica
-  required BlockProto block = 2;   // block information
+  required bool replicaFound = 1;
+
+  // The following entries are not set if there was no replica found.
+  optional ReplicaStateProto state = 2; // State of the replica
+  optional BlockProto block = 3;   // block information
 }
 }
 
 
 /**
 /**

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -705,4 +706,14 @@ public class DFSTestUtil {
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
     conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
         .join(nameservices));
         .join(nameservices));
   }
   }
+  
+  public static DatanodeDescriptor getLocalDatanodeDescriptor() {
+    return new DatanodeDescriptor(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
+
+  public static DatanodeInfo getLocalDatanodeInfo() {
+    return new DatanodeInfo(
+        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+  }
 }
 }

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -583,6 +583,10 @@ public class MiniDFSCluster {
       }
       }
     }
     }
     
     
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
+
     // Start the DataNodes
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
         hosts, simulatedCapacities, setupHostsFile);
         hosts, simulatedCapacities, setupHostsFile);
@@ -783,6 +787,9 @@ public class MiniDFSCluster {
                      operation == StartupOption.REGULAR) ?
                      operation == StartupOption.REGULAR) ?
       new String[] {} : new String[] {operation.getName()};
       new String[] {} : new String[] {operation.getName()};
     NameNode nn =  NameNode.createNameNode(args, conf);
     NameNode nn =  NameNode.createNameNode(args, conf);
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     
     
     // After the NN has started, set back the bound ports into
     // After the NN has started, set back the bound ports into
     // the conf
     // the conf
@@ -958,6 +965,9 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              long[] simulatedCapacities,
                              boolean setupHostsFile,
                              boolean setupHostsFile,
                              boolean checkDataNodeAddrConfig) throws IOException {
                              boolean checkDataNodeAddrConfig) throws IOException {
+    if (operation == StartupOption.RECOVER) {
+      return;
+    }
     conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
     conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
 
 
     int curDatanodesNum = dataNodes.size();
     int curDatanodesNum = dataNodes.size();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -62,7 +62,7 @@ public class TestDFSUtil {
    */
    */
   @Test
   @Test
   public void testLocatedBlocks2Locations() {
   public void testLocatedBlocks2Locations() {
-    DatanodeInfo d = new DatanodeInfo();
+    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
     DatanodeInfo[] ds = new DatanodeInfo[1];
     DatanodeInfo[] ds = new DatanodeInfo[1];
     ds[0] = d;
     ds[0] = d;
 
 

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -121,7 +121,9 @@ public class TestGetBlocks extends TestCase {
       getBlocksWithException(namenode, dataNodes[0], -1);
       getBlocksWithException(namenode, dataNodes[0], -1);
 
 
       // get blocks of size BlockSize from a non-existent datanode
       // get blocks of size BlockSize from a non-existent datanode
-      getBlocksWithException(namenode, new DatanodeInfo(), 2);
+      DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
+      info.setIpAddr("1.2.3.4");
+      getBlocksWithException(namenode, info, 2);
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
@@ -132,7 +134,7 @@ public class TestGetBlocks extends TestCase {
                                       long size) throws IOException {
                                       long size) throws IOException {
     boolean getException = false;
     boolean getException = false;
     try {
     try {
-        namenode.getBlocks(new DatanodeInfo(), 2);
+        namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
     } catch(RemoteException e) {
     } catch(RemoteException e) {
       getException = true;
       getException = true;
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
       assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java

@@ -179,7 +179,7 @@ public class TestParallelReadUtil {
    */
    */
   static class ReadWorker extends Thread {
   static class ReadWorker extends Thread {
 
 
-    static public final int N_ITERATIONS = 1024 * 4;
+    static public final int N_ITERATIONS = 1024;
 
 
     private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
     private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
 
 

+ 0 - 79
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestCorruptFileBlocks.java

@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-
-import static org.junit.Assert.assertTrue;
-import org.junit.Test;
-
-import org.apache.hadoop.io.DataOutputBuffer;
-
-public class TestCorruptFileBlocks {
-
-  /**
-   * Serialize the cfb given, deserialize and return the result.
-   */
-  static CorruptFileBlocks serializeAndDeserialize(CorruptFileBlocks cfb) 
-    throws IOException {
-    DataOutputBuffer buf = new DataOutputBuffer();
-    cfb.write(buf);
-
-    byte[] data = buf.getData();
-    DataInputStream input = new DataInputStream(new ByteArrayInputStream(data));
-
-    CorruptFileBlocks result = new CorruptFileBlocks();
-    result.readFields(input);
-
-    return result;
-  }
-
-  /**
-   * Check whether cfb is unchanged after serialization and deserialization.
-   */
-  static boolean checkSerialize(CorruptFileBlocks cfb)
-    throws IOException {
-    return cfb.equals(serializeAndDeserialize(cfb));
-  }
-
-  /**
-   * Test serialization and deserializaton of CorruptFileBlocks.
-   */
-  @Test
-  public void testSerialization() throws IOException {
-    {
-      CorruptFileBlocks cfb = new CorruptFileBlocks();
-      assertTrue("cannot serialize empty CFB", checkSerialize(cfb));
-    }
-
-    {
-      String[] files = new String[0];
-      CorruptFileBlocks cfb = new CorruptFileBlocks(files, "");
-      assertTrue("cannot serialize CFB with empty cookie", checkSerialize(cfb));
-    }
-
-    {
-      String[] files = { "a", "bb", "ccc" };
-      CorruptFileBlocks cfb = new CorruptFileBlocks(files, "test");
-      assertTrue("cannot serialize CFB", checkSerialize(cfb));
-    }
-  }
-}

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -441,9 +442,9 @@ public class TestPBHelper {
     Block[] blocks = new Block[] { new Block(21), new Block(22) };
     Block[] blocks = new Block[] { new Block(21), new Block(22) };
     DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
     DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
         new DatanodeInfo[2] };
         new DatanodeInfo[2] };
-    dnInfos[0][0] = new DatanodeInfo();
-    dnInfos[1][0] = new DatanodeInfo();
-    dnInfos[1][1] = new DatanodeInfo();
+    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
+    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
     BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
     BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
         blocks, dnInfos);
         blocks, dnInfos);
     BlockCommandProto bcProto = PBHelper.convert(bc);
     BlockCommandProto bcProto = PBHelper.convert(bc);

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java

@@ -26,6 +26,7 @@ import java.util.Random;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -47,7 +48,7 @@ public class TestBlockInfo {
 
 
     final int MAX_BLOCKS = 10;
     final int MAX_BLOCKS = 10;
 
 
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
     ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
     int headIndex;
     int headIndex;

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java

@@ -28,6 +28,7 @@ import junit.framework.TestCase;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 
 
 
@@ -80,8 +81,8 @@ public class TestCorruptReplicaInfo extends TestCase {
         block_ids.add((long)i);
         block_ids.add((long)i);
       }
       }
       
       
-      DatanodeDescriptor dn1 = new DatanodeDescriptor();
-      DatanodeDescriptor dn2 = new DatanodeDescriptor();
+      DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
+      DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
       
       
       crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
       crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
       assertEquals("Number of corrupt blocks not returning correctly",
       assertEquals("Number of corrupt blocks not returning correctly",

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 
 
@@ -36,7 +37,7 @@ public class TestDatanodeDescriptor extends TestCase {
     final int REMAINING_BLOCKS = 2;
     final int REMAINING_BLOCKS = 2;
     final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
     final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
     
     
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
     for (int i=0; i<MAX_BLOCKS; i++) {
     for (int i=0; i<MAX_BLOCKS; i++) {
       blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
       blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
@@ -49,7 +50,7 @@ public class TestDatanodeDescriptor extends TestCase {
   }
   }
   
   
   public void testBlocksCounter() throws Exception {
   public void testBlocksCounter() throws Exception {
-    DatanodeDescriptor dd = new DatanodeDescriptor();
+    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
     assertEquals(0, dd.numBlocks());
     assertEquals(0, dd.numBlocks());
     BlockInfo blk = new BlockInfo(new Block(1L), 1);
     BlockInfo blk = new BlockInfo(new Block(1L), 1);
     BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
     BlockInfo blk1 = new BlockInfo(new Block(2L), 2);

+ 31 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -17,12 +17,13 @@
  */
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
+import static org.junit.Assert.*;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 
 
-import junit.framework.TestCase;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,13 +37,15 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.junit.Test;
 
 
-public class TestOverReplicatedBlocks extends TestCase {
+public class TestOverReplicatedBlocks {
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.
    * It make sure that it won't treat corrupt replicas as valid ones 
    * It make sure that it won't treat corrupt replicas as valid ones 
    * thus prevents NN deleting valid replicas but keeping
    * thus prevents NN deleting valid replicas but keeping
    * corrupt ones.
    * corrupt ones.
    */
    */
+  @Test
   public void testProcesOverReplicateBlock() throws IOException {
   public void testProcesOverReplicateBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
@@ -113,4 +116,30 @@ public class TestOverReplicatedBlocks extends TestCase {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+  /**
+   * Test over replicated block should get invalidated when decreasing the
+   * replication for a partial block.
+   */
+  @Test
+  public void testInvalidateOverReplicatedBlock() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    try {
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final BlockManager bm = namesystem.getBlockManager();
+      FileSystem fs = cluster.getFileSystem();
+      Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
+      FSDataOutputStream out = fs.create(p, (short) 2);
+      out.writeBytes("HDFS-3119: " + p);
+      out.hsync();
+      fs.setReplication(p, (short) 1);
+      out.close();
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
+      assertEquals("Expected only one live replica for the block", 1, bm
+          .countNodes(block.getLocalBlock()).liveReplicas());
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }
 }

+ 0 - 78
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java

@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.common;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
-/**
- * This is a unit test, which tests {@link Util#stringAsURI(String)}
- * for IDs being used in HDFS, e.g. ClusterID and BlockPoolID.
- */
-public class TestStorageInfo extends TestCase {
-
-  /**
-   * Test write() / readFieds() of StroageInfo.  Write StorageInfo into a buffer
-   * then read it back and the result should be the same with the original one.
-   * @throws IOException 
-   */
-  public void testStorageInfo() throws IOException {
-    
-    int nsID = 123;
-    String cid = "cid-test";
-    int layoutV = 234;
-    long cT = 0L;
-    
-    StorageInfo sinfo = new StorageInfo(layoutV, nsID, cid,  cT);
-    
-    Assert.assertNotNull(sinfo);
-
-    ByteArrayOutputStream bos = new ByteArrayOutputStream();
-    DataOutput output = new DataOutputStream(bos);
-
-    try {
-        // we need to first create an DataOutputStream for sinfo to write into
-        sinfo.write(output);
-        //remember to close the DataOutputStream 
-        //to make sure the data has been written
-        bos.close();
-        
-        // convert ByteArrayInputStream to ByteArrayOutputStream
-        ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
-        DataInputStream dataInputStream = new DataInputStream(bis);
-
-        StorageInfo secondsinfo = new StorageInfo();
-        secondsinfo.readFields(dataInputStream);
-        
-        // compare
-        Assert.assertEquals(sinfo.getClusterID(), secondsinfo.getClusterID());
-        Assert.assertEquals(sinfo.getNamespaceID(), secondsinfo.getNamespaceID());
-        Assert.assertEquals(sinfo.getLayoutVersion(), secondsinfo.getLayoutVersion());
-        Assert.assertEquals(sinfo.getCTime(), secondsinfo.getCTime());
-    }catch (IOException e) {
-      e.getMessage();
-    }
-  }
-}
-

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java

@@ -17,8 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -172,6 +171,13 @@ public class TestInterDatanodeProtocol {
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
       checkMetaInfo(newblock, datanode);
+      
+      // Verify correct null response trying to init recovery for a missing block
+      ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
+          b.getBlockId(), 0, 0);
+      assertNull(idp.initReplicaRecovery(
+          new RecoveringBlock(badBlock,
+              locatedblock.getLocations(), recoveryId)));
     }
     }
     finally {
     finally {
       if (cluster != null) {cluster.shutdown();}
       if (cluster != null) {cluster.shutdown();}

+ 384 - 17
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java

@@ -18,12 +18,19 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintStream;
 import java.net.URI;
 import java.net.URI;
-import java.util.ArrayList;
+import java.security.Permission;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
@@ -40,11 +47,11 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
-
 public class TestClusterId {
 public class TestClusterId {
   private static final Log LOG = LogFactory.getLog(TestClusterId.class);
   private static final Log LOG = LogFactory.getLog(TestClusterId.class);
   File hdfsDir;
   File hdfsDir;
-  
+  Configuration config;
+
   private String getClusterId(Configuration config) throws IOException {
   private String getClusterId(Configuration config) throws IOException {
     // see if cluster id not empty.
     // see if cluster id not empty.
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
@@ -59,33 +66,41 @@ public class TestClusterId {
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;
     return cid;
   }
   }
-  
+
   @Before
   @Before
   public void setUp() throws IOException {
   public void setUp() throws IOException {
+    System.setSecurityManager(new NoExitSecurityManager());
+
     String baseDir = System.getProperty("test.build.data", "build/test/data");
     String baseDir = System.getProperty("test.build.data", "build/test/data");
 
 
-    hdfsDir = new File(baseDir, "dfs");
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not delete test directory '" + 
-          hdfsDir + "'");
+    hdfsDir = new File(baseDir, "dfs/name");
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not delete test directory '" + hdfsDir + "'");
     }
     }
     LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
     LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+
+    // as some tests might change these values we reset them to defaults before
+    // every test
+    StartupOption.FORMAT.setForceFormat(false);
+    StartupOption.FORMAT.setInteractiveFormat(true);
+    
+    config = new Configuration();
+    config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
   }
   }
-  
+
   @After
   @After
   public void tearDown() throws IOException {
   public void tearDown() throws IOException {
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not tearDown test directory '" +
-          hdfsDir + "'");
+    System.setSecurityManager(null);
+
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not tearDown test directory '" + hdfsDir
+          + "'");
     }
     }
   }
   }
-  
+
   @Test
   @Test
   public void testFormatClusterIdOption() throws IOException {
   public void testFormatClusterIdOption() throws IOException {
-    Configuration config = new Configuration();
     
     
-    config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
-
     // 1. should format without cluster id
     // 1. should format without cluster id
     //StartupOption.FORMAT.setClusterId("");
     //StartupOption.FORMAT.setClusterId("");
     NameNode.format(config);
     NameNode.format(config);
@@ -107,4 +122,356 @@ public class TestClusterId {
     String newCid = getClusterId(config);
     String newCid = getClusterId(config);
     assertFalse("ClusterId should not be the same", newCid.equals(cid));
     assertFalse("ClusterId should not be the same", newCid.equals(cid));
   }
   }
-}
+
+  /**
+   * Test namenode format with -format option. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormat() throws IOException {
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when an empty name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyDir() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force options when name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force -clusterid option when name
+   * directory exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForceAndClusterId() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String myId = "testFormatWithForceAndClusterId";
+    String[] argv = { "-format", "-force", "-clusterid", myId };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cId = getClusterId(config);
+    assertEquals("ClusterIds do not match", myId, cId);
+  }
+
+  /**
+   * Test namenode format with -clusterid -force option. Format command should
+   * fail as no cluster id was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithInvalidClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "-force" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid options. Format should fail
+   * was no clusterid was sent.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNoClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid and empty clusterid. Format
+   * should fail as no valid if was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "" };
+
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when a non empty
+   * name directory exists. Format should not succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractive() throws IOException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have been aborted with exit code 1", 1,
+          e.status);
+    }
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when name
+   * directory does not exist. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveNameDirDoesNotExit()
+      throws IOException {
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -force -nonInteractive -force option. Format
+   * should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveAndForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter Y when prompted and the format should succeed.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterYes() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    System.setIn(origIn);
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter N when prompted and format should be aborted.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterNo() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should not have succeeded", 1, e.status);
+    }
+
+    System.setIn(origIn);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  private static class ExitException extends SecurityException {
+    private static final long serialVersionUID = 1L;
+    public final int status;
+
+    public ExitException(int status) {
+      super("There is no escape!");
+      this.status = status;
+    }
+  }
+
+  private static class NoExitSecurityManager extends SecurityManager {
+    @Override
+    public void checkPermission(Permission perm) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+      super.checkExit(status);
+      throw new ExitException(status);
+    }
+  }
+}

Some files were not shown because too many files changed in this diff