浏览代码

HDFS-517. Introduce BlockInfoUnderConstruction to reflect block replica states while writing. Contributed by Konstantin Shvachko.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-265@806409 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko 16 年之前
父节点
当前提交
53ec155301

+ 3 - 0
CHANGES.txt

@@ -15,6 +15,9 @@ Trunk (unreleased changes)
 
     HDFS-536. Support hflush at DFSClient. (hairong)
 
+    HDFS-517. Introduce BlockInfoUnderConstruction to reflect block replica
+    states while writing. (shv)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

+ 44 - 7
src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java

@@ -79,16 +79,53 @@ public interface HdfsConstants {
       return description;
     }
   }
-  
+
   /**
-   * Define Replica Type
+   * Block replica states, which it can go through while being constructed.
    */
   static public enum ReplicaState {
-    FINALIZED,  // finalized replica
-    RBW,        // replica being written
-    RWR,        // replica waiting to be recovered
-    RUR,        // replica under recovery
-    TEMPORARY   // temporary replica
+    /** Replica is finalized. The state when replica is not modified. */
+    FINALIZED,
+    /** Replica is being written to. */
+    RBW,
+    /** Replica is waiting to be recovered. */
+    RWR,
+    /** Replica is under recovery. */
+    RUR,
+    /** Temporary replica: created for replication and relocation only. */
+    TEMPORARY;
+  }
+
+  /**
+   * States, which a block can go through while it is under construction.
+   */
+  static public enum BlockUCState {
+    /**
+     * Block construction completed.<br>
+     * The block has at least one {@link ReplicaState#FINALIZED} replica,
+     * and is not going to be modified.
+     */
+    COMPLETE,
+    /**
+     * The block is under construction.<br>
+     * It has been recently allocated for write or append.
+     */
+    UNDER_CONSTRUCTION,
+    /**
+     * The block is under recovery.<br>
+     * When a file lease expires its last block may not be {@link #COMPLETE}
+     * and needs to go through a recovery procedure, 
+     * which synchronizes the existing replicas contents.
+     */
+    UNDER_RECOVERY,
+    /**
+     * The block is committed.<br>
+     * The client reported that all bytes are written to data-nodes
+     * with the given generation stamp and block length, but no 
+     * {@link ReplicaState#FINALIZED} 
+     * replicas has yet been reported by data-nodes themselves.
+     */
+    COMMITTED;
   }
 }
 

+ 40 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 
 /**
  * Internal class for block metadata.
@@ -35,12 +36,22 @@ class BlockInfo extends Block {
    */
   private Object[] triplets;
 
-  BlockInfo(Block blk, int replication) {
+  protected BlockInfo(Block blk, int replication) {
     super(blk);
     this.triplets = new Object[3*replication];
     this.inode = null;
   }
 
+  /**
+   * Copy construction.
+   * This is used to convert BlockInfoUnderConstruction
+   * @param from BlockInfo to copy from.
+   */
+  protected BlockInfo(BlockInfo from) {
+    this(from, from.inode.getReplication());
+    this.inode = from.inode;
+  }
+
   INodeFile getINode() {
     return inode;
   }
@@ -64,7 +75,7 @@ class BlockInfo extends Block {
     assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
     BlockInfo info = (BlockInfo)triplets[index*3+1];
     assert info == null || 
-        BlockInfo.class.getName().equals(info.getClass().getName()) : 
+        info.getClass().getName().startsWith(BlockInfo.class.getName()) : 
               "BlockInfo is expected at " + index*3;
     return info;
   }
@@ -262,6 +273,33 @@ class BlockInfo extends Block {
     return true;
   }
 
+  /**
+   * BlockInfo represents a block that is not being constructed.
+   * In order to start modifying the block, the BlockInfo should be converted
+   * to {@link BlockInfoUnderConstruction}.
+   * @return {@link BlockUCState#COMPLETE}
+   */
+  BlockUCState getBlockUCState() {
+    return BlockUCState.COMPLETE;
+  }
+
+  /**
+   * Is this block being constructed?
+   */
+  boolean isUnderConstruction() {
+    return !getBlockUCState().equals(BlockUCState.COMPLETE);
+  }
+
+  /**
+   * Convert a complete block to an under construction block.
+   * 
+   * @return BlockInfoUnderConstruction -  an under construction block.
+   */
+  BlockInfoUnderConstruction convertToBlockUnderConstruction() {
+    assert !isUnderConstruction() : "the block is already under construction";
+    return new BlockInfoUnderConstruction(this, getINode().getReplication());
+  }
+
   @Override
   public int hashCode() {
     // Super implementation is sufficient

+ 175 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java

@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+
+/**
+ * Represents a block that is currently being constructed.<br>
+ * This is usually the last block of a file opened for write or append.
+ */
+class BlockInfoUnderConstruction extends BlockInfo {
+  /** Block state. See {@link BlockUCState} */
+  private BlockUCState blockUCState;
+
+  /**
+   * Block replica locations as assigned when the block was allocated.
+   * This defines the pipeline order.
+   * It is not guaranteed that data-nodes actually have corresponding replicas.
+   */
+  private DatanodeDescriptor[] locations;
+
+  /**
+   * Block replica states.
+   * Replica at locations[i] has state replicaStates[i].
+   */
+  private ReplicaState[] replicaStates;
+
+  /** A data-node responsible for block recovery. */
+  private int primaryNodeIndex = -1;
+
+  /** The last time the block was recovered. */
+  private long lastRecoveryTime = 0;
+
+  /**
+   * Create block and set its state to
+   * {@link BlockUCState#UNDER_CONSTRUCTION}.
+   */
+  BlockInfoUnderConstruction(Block blk, int replication) {
+    this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
+  }
+
+  BlockInfoUnderConstruction(Block blk, int replication,
+                             BlockUCState state,
+                             DatanodeDescriptor[] targets) {
+    super(blk, replication);
+    assert getBlockUCState() != BlockUCState.COMPLETE :
+      "BlockInfoUnderConstruction cannot be in COMPLETE state";
+    this.blockUCState = state;
+    setLocations(targets);
+  }
+
+  /**
+   * Convert an under construction block to a complete block.
+   * 
+   * @return BlockInfo - a complete block.
+   * @throws IOException if the state of the block 
+   * (the generation stamp and the length) has not been committed by 
+   * the client or it does not have at least a minimal number of replicas 
+   * reported from data-nodes. 
+   */
+  BlockInfo convertToCompleteBlock() throws IOException {
+    assert getBlockUCState() != BlockUCState.COMPLETE :
+      "Trying to convert a COMPLETE block";
+    if(getBlockUCState() != BlockUCState.COMMITTED)
+      throw new IOException(
+          "Cannot complete block: block has not been COMMITTED by the client");
+    return new BlockInfo(this);
+  }
+
+  void setLocations(DatanodeDescriptor[] targets) {
+    this.locations = targets;
+    int numLocations = targets == null ? 0 : targets.length;
+    replicaStates = new ReplicaState[numLocations];
+    for(int i = 0; i < numLocations; i++)
+      replicaStates[i] = ReplicaState.RBW;
+  }
+
+  DatanodeDescriptor[] getLocations() {
+    return locations;
+  }
+
+  int getNumLocations() {
+    return locations == null ? 0 : locations.length;
+  }
+
+  /**
+   * Return the state of the block under construction.
+   * @see {@link BlockUCState}
+   */
+  @Override // BlockInfo
+  BlockUCState getBlockUCState() {
+    return blockUCState;
+  }
+
+  /**
+   * Commit block's length and generation stamp as reported by the client.
+   * Set block state to {@link BlockUCState#COMMITTED}.
+   * @param block - contains client reported block length and generation 
+   * @throws IOException if block ids are inconsistent.
+   */
+  void commitBlock(Block block) throws IOException {
+    if(getBlockId() != block.getBlockId())
+      throw new IOException("Trying to commit inconsistent block: id = "
+          + block.getBlockId() + ", expected id = " + getBlockId());
+    blockUCState = BlockUCState.COMMITTED;
+    this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+  }
+
+  /**
+   * Initialize lease recovery for this block.
+   * Find the first alive data-node starting from the previous primary.
+   */
+  void assignPrimaryDatanode() {
+    if (locations.length == 0) {
+      NameNode.stateChangeLog.warn("BLOCK*"
+        + " INodeFileUnderConstruction.initLeaseRecovery:"
+        + " No blocks found, lease removed.");
+    }
+
+    int previous = primaryNodeIndex;
+    for(int i = 1; i <= locations.length; i++) {
+      int j = (previous + i)%locations.length;
+      if (locations[j].isAlive) {
+        DatanodeDescriptor primary = locations[primaryNodeIndex = j]; 
+        primary.addBlockToBeRecovered(this, locations);
+        NameNode.stateChangeLog.info("BLOCK* " + this
+          + " recovery started, primary=" + primary);
+        return;
+      }
+    }
+  }
+
+  /**
+   * Update lastRecoveryTime if expired.
+   * @return true if lastRecoveryTimeis updated. 
+   */
+  boolean setLastRecoveryTime(long now) {
+    boolean expired = now - lastRecoveryTime > NameNode.LEASE_RECOVER_PERIOD;
+    if (expired) {
+      lastRecoveryTime = now;
+    }
+    return expired;
+  }
+
+  @Override // BlockInfo
+  // BlockInfoUnderConstruction participates in maps the same way as BlockInfo
+  public int hashCode() {
+    return super.hashCode();
+  }
+
+  @Override // BlockInfo
+  public boolean equals(Object obj) {
+    // Sufficient to rely on super's implementation
+    return (this == obj) || super.equals(obj);
+  }
+}

+ 59 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -230,6 +230,64 @@ public class BlockManager {
     return (blocksMap.numNodes(block) >= minReplication);
   }
 
+  /**
+   * Commit the last block of the file and complete the penultimate block.
+   * 
+   * @param fileINode file inode
+   * @param commitBlock - contains client reported block length and generation
+   * @throws IOException if the block does not have at least a minimal number
+   * of replicas reported from data-nodes.
+   */
+  void commitLastBlock(INodeFileUnderConstruction fileINode, 
+                       Block commitBlock) throws IOException {
+    BlockInfoUnderConstruction lastBlock = fileINode.getLastBlock();
+    if(lastBlock == null)
+      return;
+    lastBlock.commitBlock(commitBlock);
+
+    // complete the penultimate block
+    completeBlock(fileINode, fileINode.numBlocks()-2);
+  }
+
+  /**
+   * Convert a specified block of the file to a complete block.
+   * @param fileINode file
+   * @param blkIndex  block index in the file
+   * @throws IOException if the block does not have at least a minimal number
+   * of replicas reported from data-nodes.
+   */
+  void completeBlock(INodeFile fileINode, int blkIndex) throws IOException {
+    if(blkIndex < 0)
+      return;
+    BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
+    if(!curBlock.isUnderConstruction())
+      return;
+    BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
+    if(ucBlock.numNodes() < minReplication)
+      throw new IOException("Cannot complete block: " +
+          "block does not satisfy minimal replication requirement.");
+    BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
+    // replace penultimate block in file
+    fileINode.setBlock(blkIndex, completeBlock);
+    // replace block in the blocksMap
+    blocksMap.replaceBlock(completeBlock);
+  }
+
+  /**
+   * Convert the last block of the file to an under constroction block.
+   * @param fileINode file
+   */
+  void convertLastBlockToUnderConstruction(INodeFile fileINode)
+  throws IOException {
+    BlockInfo oldBlock = fileINode.getLastBlock();
+    if(oldBlock == null || oldBlock.isUnderConstruction())
+      return;
+    BlockInfoUnderConstruction ucBlock =
+      oldBlock.convertToBlockUnderConstruction();
+    fileINode.setBlock(fileINode.numBlocks()-1, ucBlock);
+    blocksMap.replaceBlock(ucBlock);
+  }
+
   /**
    * Get all valid locations of the block
    */
@@ -1390,7 +1448,7 @@ public class BlockManager {
     return Math.max(missingBlocksInPrevIter, missingBlocksInCurIter);
   }
 
-  BlockInfo addINode(Block block, INodeFile iNode) {
+  BlockInfo addINode(BlockInfo block, INodeFile iNode) {
     return blocksMap.addINode(block, iNode);
   }
 

+ 22 - 4
src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java

@@ -75,11 +75,10 @@ class BlocksMap {
   /**
    * Add block b belonging to the specified file inode to the map.
    */
-  BlockInfo addINode(Block b, INodeFile iNode) {
-    int replication = iNode.getReplication();
+  BlockInfo addINode(BlockInfo b, INodeFile iNode) {
     BlockInfo info = map.get(b);
-    if (info == null) {
-      info = new BlockInfo(b, replication);
+    if (info != b) {
+      info = b;
       map.put(info, info);
     }
     info.setINode(iNode);
@@ -191,4 +190,23 @@ class BlocksMap {
   float getLoadFactor() {
     return loadFactor;
   }
+
+  /**
+   * Replace a block in the block map by a new block.
+   * The new block and the old one have the same key.
+   * @param newBlock - block for replacement
+   * @return new block
+   */
+  BlockInfo replaceBlock(BlockInfo newBlock) {
+    BlockInfo currentBlock = map.get(newBlock);
+    assert currentBlock != null : "the block if not in blocksMap";
+    // replace block in data-node lists
+    for(int idx = currentBlock.numNodes()-1; idx >= 0; idx--) {
+      DatanodeDescriptor dn = currentBlock.getDatanode(idx);
+      dn.replaceBlock(currentBlock, newBlock);
+    }
+    // replace block in the map itself
+    map.put(newBlock, newBlock);
+    return newBlock;
+  }
 }

+ 15 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java

@@ -201,6 +201,21 @@ public class DatanodeDescriptor extends DatanodeInfo {
     blockList = b.listInsert(blockList, this);
   }
 
+  /**
+   * Replace specified old block with a new one in the DataNodeDescriptor.
+   * 
+   * @param oldBlock - block to be replaced
+   * @param newBlock - a replacement block
+   * @return the new block
+   */
+  BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
+    boolean done = removeBlock(oldBlock);
+    assert done : "Old block should belong to the data-node when replacing";
+    done = addBlock(newBlock);
+    assert done : "New block should not belong to the data-node when replacing";
+    return newBlock;
+  }
+
   void resetBlocks() {
     this.capacity = 0;
     this.remaining = 0;

+ 31 - 13
src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
 /*************************************************
@@ -184,7 +185,7 @@ class FSDirectory implements Closeable {
    */
   INode unprotectedAddFile( String path, 
                             PermissionStatus permissions,
-                            Block[] blocks, 
+                            BlockInfo[] blocks, 
                             short replication,
                             long modificationTime,
                             long atime,
@@ -254,7 +255,8 @@ class FSDirectory implements Closeable {
         // Add file->block mapping
         INodeFile newF = (INodeFile)newNode;
         for (int i = 0; i < nrBlocks; i++) {
-          newF.setBlock(i, getBlockManager().addINode(blocks[i], newF));
+          BlockInfo blockInfo = new BlockInfo(blocks[i], newF.getReplication());
+          newF.setBlock(i, getBlockManager().addINode(blockInfo, newF));
         }
       }
     }
@@ -264,27 +266,43 @@ class FSDirectory implements Closeable {
   /**
    * Add a block to the file. Returns a reference to the added block.
    */
-  Block addBlock(String path, INode[] inodes, Block block
-      ) throws QuotaExceededException  {
+  BlockInfo addBlock(String path,
+                     INode[] inodes,
+                     Block block,
+                     DatanodeDescriptor targets[]
+  ) throws QuotaExceededException, IOException  {
     waitForReady();
 
     synchronized (rootDir) {
-      INodeFile fileNode = (INodeFile) inodes[inodes.length-1];
+      assert inodes[inodes.length-1].isUnderConstruction() :
+        "INode should correspond to a file under construction";
+      INodeFileUnderConstruction fileINode = 
+        (INodeFileUnderConstruction)inodes[inodes.length-1];
+
+      // commit the last block and complete the penultimate block
+      // SHV !!! second parameter should be a block reported by client
+      getBlockManager().commitLastBlock(fileINode, fileINode.getLastBlock());
 
       // check quota limits and updated space consumed
       updateCount(inodes, inodes.length-1, 0, 
-                  fileNode.getPreferredBlockSize()*fileNode.getReplication());
-      
-      // associate the new list of blocks with this file
-      BlockInfo blockInfo = getBlockManager().addINode(block, fileNode);
-      fileNode.addBlock(blockInfo);
+                  fileINode.getPreferredBlockSize()*fileINode.getReplication());
+
+      // associate new last block for the file
+      BlockInfoUnderConstruction blockInfo =
+        new BlockInfoUnderConstruction(
+            block,
+            fileINode.getReplication(),
+            BlockUCState.UNDER_CONSTRUCTION,
+            targets);
+      getBlockManager().addINode(blockInfo, fileINode);
+      fileINode.addBlock(blockInfo);
 
       NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
                                     + path + " with " + block
                                     + " block is added to the in-memory "
                                     + "file system");
+      return blockInfo;
     }
-    return block;
   }
 
   /**
@@ -328,7 +346,7 @@ class FSDirectory implements Closeable {
 
     synchronized (rootDir) {
       // modify file-> block and blocksMap
-      fileNode.removeBlock(block);
+      fileNode.removeLastBlock(block);
       getBlockManager().removeBlockFromMap(block);
       // If block is removed from blocksMap remove it from corruptReplicasMap
       getBlockManager().removeFromCorruptReplicasMap(block);
@@ -699,7 +717,7 @@ class FSDirectory implements Closeable {
       }
       
       int index = 0;
-      for (Block b : newnode.getBlocks()) {
+      for (BlockInfo b : newnode.getBlocks()) {
         BlockInfo info = getBlockManager().addINode(b, newnode);
         newnode.setBlock(index, info); // inode refers to the block in BlocksMap
         index++;

+ 22 - 18
src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -461,19 +461,9 @@ public class FSEditLog {
             blockSize = readLong(in);
           }
           // get blocks
-          Block blocks[] = null;
-          if (logVersion <= -14) {
-            blocks = readBlocks(in);
-          } else {
-            BlockTwo oldblk = new BlockTwo();
-            int num = in.readInt();
-            blocks = new Block[num];
-            for (int i = 0; i < num; i++) {
-              oldblk.readFields(in);
-              blocks[i] = new Block(oldblk.blkid, oldblk.len, 
-                                    Block.GRANDFATHER_GENERATION_STAMP);
-            }
-          }
+          boolean isFileUnderConstruction = (opcode == OP_ADD);
+          BlockInfo blocks[] = 
+            readBlocks(in, logVersion, isFileUnderConstruction, replication);
 
           // Older versions of HDFS does not store the block size in inode.
           // If the file has more than one block, use the size of the
@@ -521,7 +511,7 @@ public class FSEditLog {
                                                     path, permissions,
                                                     blocks, replication, 
                                                     mtime, atime, blockSize);
-          if (opcode == OP_ADD) {
+          if (isFileUnderConstruction) {
             numOpAdd++;
             //
             // Replace current node with a INodeUnderConstruction.
@@ -1247,12 +1237,26 @@ public class FSEditLog {
     return Long.parseLong(FSImage.readString(in));
   }
 
-  static private Block[] readBlocks(DataInputStream in) throws IOException {
+  static private BlockInfo[] readBlocks(
+      DataInputStream in,
+      int logVersion,
+      boolean isFileUnderConstruction,
+      short replication) throws IOException {
     int numBlocks = in.readInt();
-    Block[] blocks = new Block[numBlocks];
+    BlockInfo[] blocks = new BlockInfo[numBlocks];
+    Block blk = new Block();
+    BlockTwo oldblk = new BlockTwo();
     for (int i = 0; i < numBlocks; i++) {
-      blocks[i] = new Block();
-      blocks[i].readFields(in);
+      if (logVersion <= -14) {
+        blk.readFields(in);
+      } else {
+        oldblk.readFields(in);
+        blk.set(oldblk.blkid, oldblk.len, Block.GRANDFATHER_GENERATION_STAMP);
+      }
+      if(isFileUnderConstruction && i == numBlocks-1)
+        blocks[i] = new BlockInfoUnderConstruction(blk, replication);
+      else
+        blocks[i] = new BlockInfo(blk, replication);
     }
     return blocks;
   }

+ 10 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -1419,10 +1420,17 @@ public class FSImage extends Storage {
     int numBlocks = in.readInt();
     BlockInfo[] blocks = new BlockInfo[numBlocks];
     Block blk = new Block();
-    for (int i = 0; i < numBlocks; i++) {
+    int i = 0;
+    for (; i < numBlocks-1; i++) {
       blk.readFields(in);
       blocks[i] = new BlockInfo(blk, blockReplication);
     }
+    // last block is UNDER_CONSTRUCTION
+    if(numBlocks > 0) {
+      blk.readFields(in);
+      blocks[i] = new BlockInfoUnderConstruction(
+        blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
+    }
     PermissionStatus perm = PermissionStatus.read(in);
     String clientName = readString(in);
     String clientMachine = readString(in);
@@ -1430,7 +1438,7 @@ public class FSImage extends Storage {
     // These locations are not used at all
     int numLocs = in.readInt();
     DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
-    for (int i = 0; i < numLocs; i++) {
+    for (i = 0; i < numLocs; i++) {
       locations[i] = new DatanodeDescriptor();
       locations[i].readFields(in);
     }

+ 63 - 51
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -552,13 +552,18 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
     }
     List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
     long totalSize = 0;
+    BlockInfo curBlock;
     while(totalSize<size && iter.hasNext()) {
-      totalSize += addBlock(iter.next(), results);
+      curBlock = iter.next();
+      if(curBlock.isUnderConstruction())  continue;
+      totalSize += addBlock(curBlock, results);
     }
     if(totalSize<size) {
       iter = node.getBlockIterator(); // start from the beginning
       for(int i=0; i<startBlock&&totalSize<size; i++) {
-        totalSize += addBlock(iter.next(), results);
+        curBlock = iter.next();
+        if(curBlock.isUnderConstruction())  continue;
+        totalSize += addBlock(curBlock, results);
       }
     }
 
@@ -968,6 +973,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
         // Recreate in-memory lease record.
         //
         INodeFile node = (INodeFile) myFile;
+        blockManager.convertLastBlockToUnderConstruction(node);
         INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
                                         node.getLocalNameBytes(),
                                         node.getReplication(),
@@ -1029,40 +1035,36 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
     LocatedBlock lb = null;
     synchronized (this) {
       INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src);
-
-      BlockInfo[] blocks = file.getBlocks();
-      if (blocks != null && blocks.length > 0) {
-        BlockInfo last = blocks[blocks.length-1];
-        // this is a redundant search in blocksMap
-        // should be resolved by the new implementation of append
-        BlockInfo storedBlock = blockManager.getStoredBlock(last);
-        assert last == storedBlock : "last block should be in the blocksMap";
-        if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
+      BlockInfoUnderConstruction lastBlock = file.getLastBlock();
+      if (lastBlock != null) {
+        assert lastBlock == blockManager.getStoredBlock(lastBlock) :
+          "last block of the file is not in blocksMap";
+        if (file.getPreferredBlockSize() > lastBlock.getNumBytes()) {
           long fileLength = file.computeContentSummary().getLength();
-          DatanodeDescriptor[] targets = blockManager.getNodes(storedBlock);
+          DatanodeDescriptor[] targets = blockManager.getNodes(lastBlock);
           // remove the replica locations of this block from the node
           for (int i = 0; i < targets.length; i++) {
-            targets[i].removeBlock(storedBlock);
+            targets[i].removeBlock(lastBlock);
           }
-          // set the locations of the last block in the lease record
-          file.setLastBlock(storedBlock, targets);
+          // convert last block to under-construction and set its locations
+          file.setLastBlock(lastBlock, targets);
 
-          lb = new LocatedBlock(last, targets, 
-                                fileLength-storedBlock.getNumBytes());
+          lb = new LocatedBlock(lastBlock, targets, 
+                                fileLength-lastBlock.getNumBytes());
           if (isAccessTokenEnabled) {
             lb.setAccessToken(accessTokenHandler.generateToken(lb.getBlock()
                 .getBlockId(), EnumSet.of(AccessTokenHandler.AccessMode.WRITE)));
           }
 
           // Remove block from replication queue.
-          blockManager.updateNeededReplications(last, 0, 0);
+          blockManager.updateNeededReplications(lastBlock, 0, 0);
 
           // remove this block from the list of pending blocks to be deleted. 
           // This reduces the possibility of triggering HADOOP-1349.
           //
           for (DatanodeDescriptor dd : targets) {
             String datanodeId = dd.getStorageID();
-            blockManager.removeFromInvalidates(datanodeId, last);
+            blockManager.removeFromInvalidates(datanodeId, lastBlock);
           }
         }
       }
@@ -1150,8 +1152,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       }
 
       // allocate new block record block locations in INode.
-      newBlock = allocateBlock(src, pathINodes);
-      pendingFile.setTargets(targets);
+      newBlock = allocateBlock(src, pathINodes, targets);
       
       for (DatanodeDescriptor dn : targets) {
         dn.incBlocksScheduled();
@@ -1293,13 +1294,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    * @param inodes INode representing each of the components of src. 
    *        <code>inodes[inodes.length-1]</code> is the INode for the file.
    */
-  private Block allocateBlock(String src, INode[] inodes) throws IOException {
+  private Block allocateBlock(String src,
+                              INode[] inodes,
+                              DatanodeDescriptor targets[]) throws IOException {
     Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0); 
     while(isValidBlock(b)) {
       b.setBlockId(FSNamesystem.randBlockId.nextLong());
     }
     b.setGenerationStamp(getGenerationStamp());
-    b = dir.addBlock(src, inodes, b);
+    b = dir.addBlock(src, inodes, b, targets);
     NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: "
                                  +src+ ". "+b);
     return b;
@@ -1310,12 +1313,12 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    * replicated.  If not, return false. If checkall is true, then check
    * all blocks, otherwise check only penultimate block.
    */
-  synchronized boolean checkFileProgress(INodeFile v, boolean checkall) {
+  synchronized boolean checkFileProgress(INodeFile v, boolean checkall) throws IOException {
     if (checkall) {
       //
       // check all blocks of the file.
       //
-      for (Block block: v.getBlocks()) {
+      for (BlockInfo block: v.getBlocks()) {
         if (!blockManager.checkMinReplication(block)) {
           return false;
         }
@@ -1324,7 +1327,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       //
       // check the penultimate block of this file
       //
-      Block b = v.getPenultimateBlock();
+      BlockInfo b = v.getPenultimateBlock();
       if (b != null && !blockManager.checkMinReplication(b)) {
         return false;
       }
@@ -1567,27 +1570,28 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
     }
 
     INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) iFile;
+    BlockInfoUnderConstruction lastBlock = pendingFile.getLastBlock();
 
     // Initialize lease recovery for pendingFile. If there are no blocks 
     // associated with this file, then reap lease immediately. Otherwise 
     // renew the lease and trigger lease recovery.
-    if (pendingFile.getTargets() == null ||
-        pendingFile.getTargets().length == 0) {
-      if (pendingFile.getBlocks().length == 0) {
-        finalizeINodeFileUnderConstruction(src, pendingFile);
-        NameNode.stateChangeLog.warn("BLOCK*"
-          + " internalReleaseLease: No blocks found, lease removed.");
-        return;
-      }
-      // setup the Inode.targets for the last block from the blockManager
-      //
-      BlockInfo[] blocks = pendingFile.getBlocks();
-      BlockInfo last = blocks[blocks.length-1];
-      DatanodeDescriptor[] targets = blockManager.getNodes(last);
-      pendingFile.setTargets(targets);
+    if (lastBlock == null) {
+      assert pendingFile.getBlocks().length == 0 :
+        "file is not empty but the last block does not exist";
+      finalizeINodeFileUnderConstruction(src, pendingFile);
+      NameNode.stateChangeLog.warn("BLOCK*"
+        + " internalReleaseLease: No blocks found, lease removed.");
+      return;
     }
+
+    // setup the last block locations from the blockManager if not known
+    if(lastBlock.getNumLocations() == 0) {
+      DatanodeDescriptor targets[] = blockManager.getNodes(lastBlock);
+      lastBlock.setLocations(targets);
+    }
+
     // start lease recovery of the last block for this file.
-    pendingFile.assignPrimaryDatanode();
+    lastBlock.assignPrimaryDatanode();
     leaseManager.renewLease(lease);
   }
 
@@ -1595,11 +1599,17 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       INodeFileUnderConstruction pendingFile) throws IOException {
     leaseManager.removeLease(pendingFile.clientName, src);
 
+    // commit the last block and complete the penultimate block
+    // SHV !!! second parameter should be a block reported by client
+    blockManager.commitLastBlock(pendingFile, pendingFile.getLastBlock());
+
     // The file is no longer pending.
-    // Create permanent INode, update blockmap
+    // Create permanent INode, update blocks
     INodeFile newFile = pendingFile.convertToInodeFile();
     dir.replaceNode(src, pendingFile, newFile);
 
+    // complete last block of the file
+    blockManager.completeBlock(newFile, newFile.numBlocks()-1);
     // close file and persist block allocations for this file
     dir.closeFile(src, newFile);
 
@@ -1635,12 +1645,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
     blockManager.removeBlockFromMap(oldblockinfo);
 
     if (deleteblock) {
-      pendingFile.removeBlock(lastblock);
+      pendingFile.removeLastBlock(lastblock);
     }
     else {
       // update last block, construct newblockinfo and add it to the blocks map
       lastblock.set(lastblock.getBlockId(), newlength, newgenerationstamp);
-      final BlockInfo newblockinfo = blockManager.addINode(lastblock, pendingFile);
+      BlockInfoUnderConstruction newblockinfo = 
+        new BlockInfoUnderConstruction(
+            lastblock, pendingFile.getReplication());
+      blockManager.addINode(newblockinfo, pendingFile);
 
       // find the DatanodeDescriptor objects
       // There should be no locations in the blockManager till now because the
@@ -1659,11 +1672,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
         for (int i = 0; i < descriptors.length; i++) {
           descriptors[i].addBlock(newblockinfo);
         }
-        pendingFile.setLastBlock(newblockinfo, null);
-      } else {
-        // add locations into the INodeUnderConstruction
-        pendingFile.setLastBlock(newblockinfo, descriptors);
       }
+      // add locations into the INodeUnderConstruction
+      pendingFile.setLastBlock(newblockinfo, descriptors);
     }
 
     // If this commit does not want to close the file, persist
@@ -3624,12 +3635,13 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       throw new IOException(msg);
     }
     INodeFile fileINode = storedBlock.getINode();
-    if (!fileINode.isUnderConstruction()) {
-      String msg = block + " is already commited, !fileINode.isUnderConstruction().";
+    if(!fileINode.isUnderConstruction() || !storedBlock.isUnderConstruction()) {
+      String msg = block + 
+            " is already commited, file or block is not under construction().";
       LOG.info(msg);
       throw new IOException(msg);
     }
-    if (!((INodeFileUnderConstruction)fileINode).setLastRecoveryTime(now())) {
+    if(!((BlockInfoUnderConstruction)storedBlock).setLastRecoveryTime(now())) {
       String msg = block + " is beening recovered, ignoring this request.";
       LOG.info(msg);
       throw new IOException(msg);

+ 25 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -172,13 +172,14 @@ class INodeFile extends INode {
   /**
    * Return the penultimate allocated block for this file.
    */
-  Block getPenultimateBlock() {
+  BlockInfo getPenultimateBlock() {
     if (blocks == null || blocks.length <= 1) {
       return null;
     }
     return blocks[blocks.length - 2];
   }
 
+  // SHV !!! this is not used anywhere - remove
   INodeFileUnderConstruction toINodeFileUnderConstruction(
       String clientName, String clientMachine, DatanodeDescriptor clientNode
       ) throws IOException {
@@ -190,4 +191,27 @@ class INodeFile extends INode {
         blocks, getPermissionStatus(),
         clientName, clientMachine, clientNode);
   }
+
+  /**
+   * Get the last block of the file.
+   * Make sure it has the right type.
+   */
+  <T extends BlockInfo> T getLastBlock() throws IOException {
+    if (blocks == null || blocks.length == 0)
+      return null;
+    T returnBlock = null;
+    try {
+      @SuppressWarnings("unchecked")  // ClassCastException is caught below
+      T tBlock = (T)blocks[blocks.length - 1];
+      returnBlock = tBlock;
+    } catch(ClassCastException cce) {
+      throw new IOException("Unexpected last block type: " 
+          + blocks[blocks.length - 1].getClass().getSimpleName());
+    }
+    return returnBlock;
+  }
+
+  int numBlocks() {
+    return blocks == null ? 0 : blocks.length;
+  }
 }

+ 17 - 62
src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java

@@ -27,10 +27,6 @@ class INodeFileUnderConstruction extends INodeFile {
   final String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
-
-  private int primaryNodeIndex = -1; //the node working on lease recovery
-  private DatanodeDescriptor[] targets = null;   //locations for last block
-  private long lastRecoveryTime = 0;
   
   INodeFileUnderConstruction(PermissionStatus permissions,
                              short replication,
@@ -83,15 +79,6 @@ class INodeFileUnderConstruction extends INodeFile {
     return true;
   }
 
-  DatanodeDescriptor[] getTargets() {
-    return targets;
-  }
-
-  void setTargets(DatanodeDescriptor[] targets) {
-    this.targets = targets;
-    this.primaryNodeIndex = -1;
-  }
-
   //
   // converts a INodeFileUnderConstruction into a INodeFile
   // use the modification time as the access time
@@ -108,10 +95,10 @@ class INodeFileUnderConstruction extends INodeFile {
   }
 
   /**
-   * remove a block from the block list. This block should be
+   * Remove a block from the block list. This block should be
    * the last one on the list.
    */
-  void removeBlock(Block oldblock) throws IOException {
+  void removeLastBlock(Block oldblock) throws IOException {
     if (blocks == null) {
       throw new IOException("Trying to delete non-existant block " + oldblock);
     }
@@ -124,57 +111,25 @@ class INodeFileUnderConstruction extends INodeFile {
     BlockInfo[] newlist = new BlockInfo[size_1];
     System.arraycopy(blocks, 0, newlist, 0, size_1);
     blocks = newlist;
-    
-    // Remove the block locations for the last block.
-    targets = null;
-  }
-
-  synchronized void setLastBlock(BlockInfo newblock, DatanodeDescriptor[] newtargets
-      ) throws IOException {
-    if (blocks == null) {
-      throw new IOException("Trying to update non-existant block (newblock="
-          + newblock + ")");
-    }
-    blocks[blocks.length - 1] = newblock;
-    setTargets(newtargets);
-    lastRecoveryTime = 0;
   }
 
   /**
-   * Initialize lease recovery for this object
-   */
-  void assignPrimaryDatanode() {
-    //assign the first alive datanode as the primary datanode
-
-    if (targets.length == 0) {
-      NameNode.stateChangeLog.warn("BLOCK*"
-        + " INodeFileUnderConstruction.initLeaseRecovery:"
-        + " No blocks found, lease removed.");
-    }
-
-    int previous = primaryNodeIndex;
-    //find an alive datanode beginning from previous
-    for(int i = 1; i <= targets.length; i++) {
-      int j = (previous + i)%targets.length;
-      if (targets[j].isAlive) {
-        DatanodeDescriptor primary = targets[primaryNodeIndex = j]; 
-        primary.addBlockToBeRecovered(blocks[blocks.length - 1], targets);
-        NameNode.stateChangeLog.info("BLOCK* " + blocks[blocks.length - 1]
-          + " recovery started, primary=" + primary);
-        return;
-      }
-    }
-  }
-  
-  /**
-   * Update lastRecoveryTime if expired.
-   * @return true if lastRecoveryTimeis updated. 
+   * Convert the last block of the file to an under-construction block.
+   * Set its locations.
    */
-  synchronized boolean setLastRecoveryTime(long now) {
-    boolean expired = now - lastRecoveryTime > NameNode.LEASE_RECOVER_PERIOD;
-    if (expired) {
-      lastRecoveryTime = now;
+  void setLastBlock(BlockInfo lastBlock, DatanodeDescriptor[] targets)
+  throws IOException {
+    if (blocks == null || blocks.length == 0) {
+      throw new IOException("Trying to update non-existant block. " +
+      		"File is empty.");
     }
-    return expired;
+    BlockInfoUnderConstruction ucBlock;
+    if(lastBlock.isUnderConstruction())
+      ucBlock = (BlockInfoUnderConstruction)lastBlock;
+    else
+      ucBlock = new BlockInfoUnderConstruction(lastBlock, getReplication());
+    ucBlock.setLocations(targets);
+    ucBlock.setLastRecoveryTime(0);
+    blocks[blocks.length - 1] = ucBlock;
   }
 }

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -73,7 +73,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   // the datanodes.
 
   // creates a file but does not close it
-  static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+  public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
     System.out.println("createFile: Created " + name + " with " + repl + " replica.");
     FSDataOutputStream stm = fileSys.create(name, true,
@@ -92,7 +92,7 @@ public class TestFileCreation extends junit.framework.TestCase {
   //
   // writes specified bytes to file.
   //
-  static void writeFile(FSDataOutputStream stm, int size) throws IOException {
+  public static void writeFile(FSDataOutputStream stm, int size) throws IOException {
     byte[] buffer = AppendTestUtil.randomBytes(seed, size);
     stm.write(buffer, 0, size);
   }

+ 135 - 0
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java

@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestFileCreation;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+
+import junit.framework.TestCase;
+
+public class TestBlockUnderConstruction extends TestCase {
+  static final String BASE_DIR = "/test/TestBlockUnderConstruction";
+  static final int BLOCK_SIZE = 8192; // same as TestFileCreation.blocksize
+  static final int NUM_BLOCKS = 5;  // number of blocks to write
+
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem hdfs;
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    Configuration conf = new Configuration();
+    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster.waitActive();
+    hdfs = (DistributedFileSystem)cluster.getFileSystem();
+  }
+
+  protected void tearDown() throws Exception {
+    if(hdfs != null) hdfs.close();
+    if(cluster != null) cluster.shutdown();
+    super.tearDown();
+  }
+
+  void writeFile(Path file, FSDataOutputStream stm, int size)
+  throws IOException {
+    long blocksBefore = stm.getPos() / BLOCK_SIZE;
+    
+    TestFileCreation.writeFile(stm, BLOCK_SIZE);
+    int blocksAfter = 0;
+    // wait until the block is allocated by DataStreamer
+    BlockLocation[] locatedBlocks;
+    while(blocksAfter <= blocksBefore) {
+      locatedBlocks = hdfs.getClient().getBlockLocations(
+          file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
+      blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
+    }
+  }
+
+  private void verifyFileBlocks(String file,
+                                boolean isFileOpen) throws IOException {
+    FSNamesystem ns = cluster.getNamesystem();
+    INodeFile inode = ns.dir.getFileINode(file);
+    assertTrue("File does not exist: " + inode.toString(), inode != null);
+    assertTrue("File " + inode.toString() +
+        " isUnderConstruction = " + inode.isUnderConstruction() +
+        " expected to be " + isFileOpen,
+        inode.isUnderConstruction() == isFileOpen);
+    BlockInfo[] blocks = inode.getBlocks();
+    assertTrue("File does not have blocks: " + inode.toString(),
+        blocks != null && blocks.length > 0);
+    
+    int idx = 0;
+    BlockInfo curBlock;
+    // all blocks but the last two should be regular blocks
+    for(; idx < blocks.length - 2; idx++) {
+      curBlock = blocks[idx];
+      assertFalse("Block is not under construction: " + curBlock,
+          curBlock.isUnderConstruction());
+      assertTrue("Block is not in BlocksMap: " + curBlock,
+          ns.blockManager.getStoredBlock(curBlock) == curBlock);
+    }
+
+    // the penultimate block is either complete or
+    // committed if the file is not closed
+    if(idx > 0) {
+      curBlock = blocks[idx-1]; // penultimate block
+      assertTrue("Block " + curBlock +
+          " isUnderConstruction = " + inode.isUnderConstruction() +
+          " expected to be " + isFileOpen,
+          (isFileOpen && !curBlock.isUnderConstruction()) ||
+          (!isFileOpen && curBlock.isUnderConstruction() == 
+            (curBlock.getBlockUCState() ==
+              BlockUCState.COMMITTED)));
+      assertTrue("Block is not in BlocksMap: " + curBlock,
+          ns.blockManager.getStoredBlock(curBlock) == curBlock);
+    }
+
+    // the last block is under construction if the file is not closed
+    curBlock = blocks[idx]; // last block
+    assertTrue("Block " + curBlock +
+        " isUnderConstruction = " + inode.isUnderConstruction() +
+        " expected to be " + isFileOpen,
+        curBlock.isUnderConstruction() == isFileOpen);
+    assertTrue("Block is not in BlocksMap: " + curBlock,
+        ns.blockManager.getStoredBlock(curBlock) == curBlock);
+  }
+
+  public void testBlockCreation() throws IOException {
+    Path file1 = new Path(BASE_DIR, "file1.dat");
+    FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
+
+    for(int idx = 0; idx < NUM_BLOCKS; idx++) {
+      // write one block
+      writeFile(file1, out, BLOCK_SIZE);
+      // verify consistency
+      verifyFileBlocks(file1.toString(), true);
+    }
+
+    // close file
+    out.close();
+    // verify consistency
+    verifyFileBlocks(file1.toString(), false);
+  }
+}