浏览代码

HDFS-581. Merge -r 809440:810333 from trunk to the append branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-265@810341 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko 15 年之前
父节点
当前提交
0809d3a98b

+ 40 - 28
CHANGES.txt

@@ -1,5 +1,37 @@
 Hadoop HDFS Change Log
 
+Append branch (unreleased changes)
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+    HDFS-536. Support hflush at DFSClient. (hairong)
+
+    HDFS-517. Introduce BlockInfoUnderConstruction to reflect block replica
+    states while writing. (shv)
+
+    HDFS-544. Add a "rbw" subdir to DataNode data directory. (hairong)
+
+    HDFS-565. Introduce block committing logic during new block allocation
+    and file close. (shv)
+
+  IMPROVEMENTS
+
+    HDFS-509. Redesign DataNode volumeMap to include all types of Replicas.
+    (hairong)
+
+    HDFS-562. Add a test for NameNode.getBlockLocations(..) to check read from
+    un-closed file.  (szetszwo)
+
+    HDFS-543. Break FSDatasetInterface#writToBlock() into writeToRemporary,
+    writeToRBW, ad append. (hairong)
+
+  BUG FIXES
+
+    HDFS-547. TestHDFSFileSystemContract#testOutputStreamClosedTwice
+    sometimes fails with CloseByInterruptException. (hairong)
+
 Trunk (unreleased changes)
 
   INCOMPATIBLE CHANGES
@@ -19,16 +51,6 @@ Trunk (unreleased changes)
 
     HDFS-461. Tool to analyze file size distribution in HDFS. (shv)
 
-    HDFS-536. Support hflush at DFSClient. (hairong)
-
-    HDFS-517. Introduce BlockInfoUnderConstruction to reflect block replica
-    states while writing. (shv)
-
-    HDFS-544. Add a "rbw" subdir to DataNode data directory. (hairong)
-
-    HDFS-565. Introduce block committing logic during new block allocation
-    and file close. (shv)
-
     HDFS-492. Add two JSON JSP pages to the Namenode for providing corrupt
     blocks/replicas information.  (Bill Zeller via szetszwo)
 
@@ -108,9 +130,10 @@ Trunk (unreleased changes)
 
     HDFS-451. Add fault injection tests for DataTransferProtocol.  (szetszwo)
 
-    HDFS-509. Redesign DataNode volumeMap to include all types of Replicas.
-    (hairong)
-    
+    HDFS-409. Add more access token tests.  (Kan Zhang via szetszwo)
+
+    HDFS-546. DatanodeDescriptor iterates blocks as BlockInfo. (shv)
+
     HDFS-457. Do not shutdown datanode if some, but not all, volumes fail.
     (Boris Shkolnik via szetszwo)
 
@@ -122,22 +145,11 @@ Trunk (unreleased changes)
     HDFS-552. Change TestFiDataTransferProtocol to junit 4 and add a few new
     tests.  (szetszwo)
 
-<<<<<<< .working
-    HDFS-562. Add a test for NameNode.getBlockLocations(..) to check read from
-    un-closed file.  (szetszwo)
-
-    HDFS-543. Break FSDatasetInterface#writToBlock() into writeToRemporary,
-    writeToRBW, ad append. (hairong)
-
-    HDFS-549. Allow a non-fault-inject test, which is specified by -Dtestcase,
-    to be executed by the run-test-hdfs-fault-inject target.  (Konstantin
-    Boudnik via szetszwo)
-
-=======
->>>>>>> .merge-right.r809439
     HDFS-563. Simplify the codes in FSNamesystem.getBlockLocations(..).
     (szetszwo)
 
+    HDFS-581. Introduce an iterator over blocks in the block report array. (shv)
+
   BUG FIXES
 
     HDFS-76. Better error message to users when commands fail because of 
@@ -191,8 +203,8 @@ Trunk (unreleased changes)
 
     HDFS-534. Include avro in ivy.  (szetszwo)
 
-    HDFS-547. TestHDFSFileSystemContract#testOutputStreamClosedTwice
-    sometimes fails with CloseByInterruptException. (hairong)
+    HDFS-532. Allow applications to know that a read request failed 
+    because block is missing. (dhruba)
 
     HDFS-561. Fix write pipeline READ_TIMEOUT in DataTransferProtocol.
     (Kan Zhang via szetszwo)

+ 50 - 7
src/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java

@@ -17,14 +17,16 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.util.Iterator;
+
 /**
  * This class provides an interface for accessing list of blocks that
  * has been implemented as long[].
- * This class is usefull for block report. Rather than send block reports
+ * This class is useful for block report. Rather than send block reports
  * as a Block[] we can send it as a long[].
  *
  */
-public class BlockListAsLongs {
+public class BlockListAsLongs implements Iterable<Block>{
   /**
    * A block as 3 longs
    *   block-id and block length and generation stamp
@@ -48,7 +50,6 @@ public class BlockListAsLongs {
    * @param blockArray - the input array block[]
    * @return the output array of long[]
    */
-  
   public static long[] convertToArrayLongs(final Block[] blockArray) {
     long[] blocksAsLongs = new long[blockArray.length * LONGS_PER_BLOCK];
 
@@ -61,6 +62,10 @@ public class BlockListAsLongs {
     return blocksAsLongs;
   }
 
+  public BlockListAsLongs() {
+    this(null);
+  }
+
   /**
    * Constructor
    * @param iBlockList - BlockListALongs create from this long[] parameter
@@ -77,7 +82,43 @@ public class BlockListAsLongs {
     }
   }
 
-  
+  /**
+   * Iterates over blocks in the block report.
+   * Avoids object allocation on each iteration.
+   */
+  private class BlockReportIterator implements Iterator<Block> {
+    private int currentBlockIndex;
+    private Block block;
+
+    BlockReportIterator() {
+      this.currentBlockIndex = 0;
+      this.block = new Block();
+    }
+
+    public boolean hasNext() {
+      return currentBlockIndex < getNumberOfBlocks();
+    }
+
+    public Block next() {
+      block.set(blockList[index2BlockId(currentBlockIndex)],
+                blockList[index2BlockLen(currentBlockIndex)],
+                blockList[index2BlockGenStamp(currentBlockIndex)]);
+      currentBlockIndex++;
+      return block;
+    }
+
+    public void remove()  {
+      throw new UnsupportedOperationException("Sorry. can't remove.");
+    }
+  }
+
+  /**
+   * Returns an iterator over blocks in the block report. 
+   */
+  public Iterator<Block> iterator() {
+    return new BlockReportIterator();
+  }
+
   /**
    * The number of blocks
    * @return - the number of blocks
@@ -85,13 +126,13 @@ public class BlockListAsLongs {
   public int getNumberOfBlocks() {
     return blockList.length/LONGS_PER_BLOCK;
   }
-  
-  
+
   /**
    * The block-id of the indexTh block
    * @param index - the block whose block-id is desired
    * @return the block-id
    */
+  @Deprecated
   public long getBlockId(final int index)  {
     return blockList[index2BlockId(index)];
   }
@@ -101,6 +142,7 @@ public class BlockListAsLongs {
    * @param index - the block whose block-len is desired
    * @return - the block-len
    */
+  @Deprecated
   public long getBlockLen(final int index)  {
     return blockList[index2BlockLen(index)];
   }
@@ -110,6 +152,7 @@ public class BlockListAsLongs {
    * @param index - the block whose block-len is desired
    * @return - the generation stamp
    */
+  @Deprecated
   public long getBlockGenStamp(final int index)  {
     return blockList[index2BlockGenStamp(index)];
   }
@@ -119,7 +162,7 @@ public class BlockListAsLongs {
    * @param index - the index of the block to set
    * @param b - the block is set to the value of the this block
    */
-  void setBlock(final int index, final Block b) {
+  private void setBlock(final int index, final Block b) {
     blockList[index2BlockId(index)] = b.getBlockId();
     blockList[index2BlockLen(index)] = b.getNumBytes();
     blockList[index2BlockGenStamp(index)] = b.getGenerationStamp();

+ 3 - 6
src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java

@@ -381,20 +381,17 @@ public class DatanodeDescriptor extends DatanodeInfo {
                   Collection<Block> toAdd,    // add to DatanodeDescriptor
                   Collection<Block> toRemove, // remove from DatanodeDescriptor
                   Collection<Block> toInvalidate) { // should be removed from DN
-    // place a deilimiter in the list which separates blocks 
+    // place a delimiter in the list which separates blocks 
     // that have been reported from those that have not
     BlockInfo delimiter = new BlockInfo(new Block(), 1);
     boolean added = this.addBlock(delimiter);
     assert added : "Delimiting block cannot be present in the node";
     if(newReport == null)
-      newReport = new BlockListAsLongs( new long[0]);
+      newReport = new BlockListAsLongs();
     // scan the report and collect newly reported blocks
     // Note we are taking special precaution to limit tmp blocks allocated
     // as part this block report - which why block list is stored as longs
-    Block iblk = new Block(); // a fixed new'ed block to be reused with index i
-    for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) {
-      iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i), 
-               newReport.getBlockGenStamp(i));
+    for (Block iblk : newReport) {
       BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
       if(storedBlock == null) {
         // If block is not in blocksMap it does not belong to any file