浏览代码

HDFS-2968. Protocol translator for BlockRecoveryCommand broken when multiple blocks need recovery. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1245833 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 年之前
父节点
当前提交
3baf787dbc

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -119,6 +119,9 @@ Release 0.23-PB - Unreleased
     HDFS-2768. BackupNode stop can not close proxy connections because
     HDFS-2768. BackupNode stop can not close proxy connections because
     it is not a proxy instance. (Uma Maheswara Rao G via eli)
     it is not a proxy instance. (Uma Maheswara Rao G via eli)
 
 
+    HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
+    multiple blocks need recovery. (todd)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -767,8 +767,9 @@ public class PBHelper {
     List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
     List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
     List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(
     List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(
         list.size());
         list.size());
-    for (int i = 0; i < list.size(); i++) {
-      recoveringBlocks.add(PBHelper.convert(list.get(0)));
+    
+    for (RecoveringBlockProto rbp : list) {
+      recoveringBlocks.add(PBHelper.convert(rbp));
     }
     }
     return new BlockRecoveryCommand(recoveringBlocks);
     return new BlockRecoveryCommand(recoveringBlocks);
   }
   }

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java

@@ -32,6 +32,8 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.WritableFactory;
 
 
+import com.google.common.base.Joiner;
+
 /**
 /**
  * BlockRecoveryCommand is an instruction to a data-node to recover
  * BlockRecoveryCommand is an instruction to a data-node to recover
  * the specified blocks.
  * the specified blocks.
@@ -138,6 +140,15 @@ public class BlockRecoveryCommand extends DatanodeCommand {
   public void add(RecoveringBlock block) {
   public void add(RecoveringBlock block) {
     recoveringBlocks.add(block);
     recoveringBlocks.add(block);
   }
   }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("BlockRecoveryCommand(\n  ");
+    Joiner.on("\n  ").appendTo(sb, recoveringBlocks);
+    sb.append("\n)");
+    return sb.toString();
+  }
 
 
   ///////////////////////////////////////////
   ///////////////////////////////////////////
   // Writable
   // Writable

+ 36 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -68,6 +70,10 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
 /**
 /**
  * Tests for {@link PBHelper}
  * Tests for {@link PBHelper}
  */
  */
@@ -265,9 +271,12 @@ public class TestPBHelper {
       compare(logs.get(i), logs1.get(i));
       compare(logs.get(i), logs1.get(i));
     }
     }
   }
   }
-  
   public ExtendedBlock getExtendedBlock() {
   public ExtendedBlock getExtendedBlock() {
-    return new ExtendedBlock("bpid", 1, 100, 2);
+    return getExtendedBlock(1);
+  }
+  
+  public ExtendedBlock getExtendedBlock(long blkid) {
+    return new ExtendedBlock("bpid", blkid, 100, 2);
   }
   }
   
   
   public DatanodeInfo getDNInfo() {
   public DatanodeInfo getDNInfo() {
@@ -318,6 +327,31 @@ public class TestPBHelper {
     }
     }
   }
   }
   
   
+  @Test
+  public void testConvertBlockRecoveryCommand() {
+    DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+
+    List<RecoveringBlock> blks = ImmutableList.of(
+      new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
+      new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
+    );
+    
+    BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
+    BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
+    assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
+    assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
+    
+    BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
+    
+    List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
+        cmd2.getRecoveringBlocks());
+    assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
+    assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
+    assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
+    assertEquals(cmd.toString(), cmd2.toString());
+  }
+  
+  
   @Test
   @Test
   public void testConvertText() {
   public void testConvertText() {
     Text t = new Text("abc".getBytes());
     Text t = new Text("abc".getBytes());