Browse Source

HDFS-3214. InterDatanodeProtocolServerSideTranslatorPB doesn't handle null response from initReplicaRecovery. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1311124 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 years ago
parent
commit
78b3f4493a

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -336,6 +336,9 @@ Release 2.0.0 - UNRELEASED
     HDFS-3136. Remove SLF4J dependency as HDFS does not need it to fix
     unnecessary warnings. (Jason Lowe via suresh)
 
+    HDFS-3214. InterDatanodeProtocolServerSideTranslatorPB doesn't handle
+    null response from initReplicaRecovery (todd)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java

@@ -56,9 +56,17 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return InitReplicaRecoveryResponseProto.newBuilder()
-        .setBlock(PBHelper.convert(r))
-        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
+    
+    if (r == null) {
+      return InitReplicaRecoveryResponseProto.newBuilder()
+          .setReplicaFound(false)
+          .build();
+    } else {
+      return InitReplicaRecoveryResponseProto.newBuilder()
+          .setReplicaFound(true)
+          .setBlock(PBHelper.convert(r))
+          .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
+    }
   }
 
   @Override

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java

@@ -85,6 +85,17 @@ public class InterDatanodeProtocolTranslatorPB implements
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }
+    if (!resp.getReplicaFound()) {
+      // No replica found on the remote node.
+      return null;
+    } else {
+      if (!resp.hasBlock() || !resp.hasState()) {
+        throw new IOException("Replica was found but missing fields. " +
+            "Req: " + req + "\n" +
+            "Resp: " + resp);
+      }
+    }
+    
     BlockProto b = resp.getBlock();
     return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
         b.getGenStamp(), PBHelper.convert(resp.getState()));

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto

@@ -38,8 +38,11 @@ message InitReplicaRecoveryRequestProto {
  * Repica recovery information
  */
 message InitReplicaRecoveryResponseProto {
-  required ReplicaStateProto state = 1; // State of the replica
-  required BlockProto block = 2;   // block information
+  required bool replicaFound = 1;
+
+  // The following entries are not set if there was no replica found.
+  optional ReplicaStateProto state = 2; // State of the replica
+  optional BlockProto block = 3;   // block information
 }
 
 /**

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java

@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -172,6 +171,13 @@ public class TestInterDatanodeProtocol {
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
+      
+      // Verify correct null response trying to init recovery for a missing block
+      ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
+          b.getBlockId(), 0, 0);
+      assertNull(idp.initReplicaRecovery(
+          new RecoveringBlock(badBlock,
+              locatedblock.getLocations(), recoveryId)));
     }
     finally {
       if (cluster != null) {cluster.shutdown();}