Explorar el Código

Hdfs Federation: TestFileAppend2, TestFileAppend3 and TestBlockTokenWithDFS failing.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1076505 13f79535-47bb-0310-9956-ffa450edef68
Jitendra Nath Pandey hace 14 años
padre
commit
bbd535262a

+ 3 - 0
CHANGES.txt

@@ -159,6 +159,9 @@ Trunk (unreleased changes)
     HDFS-1702. Federation: fix TestBackupNode and TestRefreshNamendoes
     failures. (suresh)
 
+    HDFS-1706. Federation: TestFileAppend2, TestFileAppend3 and 
+    TestBlockTokenWithDFS failing. (jitendra)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

+ 1 - 1
src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java

@@ -50,7 +50,7 @@ public class ExtendedBlock implements Writable {
   }
 
   public ExtendedBlock(final ExtendedBlock b) {
-    this(b.poolId, b.block);
+    this(b.poolId, new Block(b.block));
   }
   
   public ExtendedBlock(final String poolId, final long blockId) {

+ 6 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -218,6 +218,11 @@ class DataXceiver extends DataTransferProtocol.Receiver
                 " tcp no delay " + s.getTcpNoDelay());
     }
 
+    // We later mutate block's generation stamp and length, but we need to
+    // forward the original version of the block to downstream mirrors, so
+    // make a copy here.
+    final ExtendedBlock originalBlock = new ExtendedBlock(block);
+
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
     LOG.info("Receiving block " + block + 
              " src: " + remoteAddress +
@@ -294,7 +299,7 @@ class DataXceiver extends DataTransferProtocol.Receiver
           mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
 
           // Write header: Copied from DFSClient.java!
-          DataTransferProtocol.Sender.opWriteBlock(mirrorOut, block,
+          DataTransferProtocol.Sender.opWriteBlock(mirrorOut, originalBlock,
               pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client,
               srcDataNode, targets, blockToken);
 

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -975,7 +975,8 @@ public class MiniDFSCluster {
       System.out.println("Shutting down the namenode");
       nn.stop();
       nn.join();
-      nameNodes[nnIndex] = null;
+      Configuration conf = nameNodes[nnIndex].conf;
+      nameNodes[nnIndex] = new NameNodeInfo(null, conf);
     }
   }
 

+ 9 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java

@@ -104,12 +104,15 @@ public class TestPipelines {
       filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
     String bpid = cluster.getNamesystem().getBlockPoolId();
-    Replica r = DataNodeAdapter.fetchReplicaInfo(cluster.getDataNodes().get(1),
-        bpid, lb.get(0).getBlock().getBlockId());
-    assertTrue("Replica shouldn'e be null", r != null);
-    assertEquals(
-      "Should be RBW replica after sequence of calls append()/write()/hflush()",
-      HdfsConstants.ReplicaState.RBW, r.getState());
+    for (DataNode dn : cluster.getDataNodes()) {
+      Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
+          .getBlock().getBlockId());
+
+      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
+      assertEquals("Should be RBW replica on " + dn
+          + " after sequence of calls " + "append()/write()/hflush()",
+          HdfsConstants.ReplicaState.RBW, r.getState());
+    }
     ofs.close();
   }