Quellcode durchsuchen

HADOOP-4734. Block and meta data validation codes in HADOOP-1700 should be committed to 0.18. (szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.18@723202 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze vor 16 Jahren
Ursprung
Commit
7df7796428

+ 4 - 1
CHANGES.txt

@@ -55,7 +55,7 @@ Release 0.18.3 - Unreleased
     HADOOP-4713. Fix librecordio to handle records larger than 64k. (Christian
     Kunz via cdouglas)
    
-   HADOOP-4635. Fix a memory leak in fuse dfs. (pete wyckoff via mahadev)
+    HADOOP-4635. Fix a memory leak in fuse dfs. (pete wyckoff via mahadev)
 
     HADOOP-4714. Report status between merges and make the number of records
     between progress reports configurable. (Jothi Padmanabhan via cdouglas)
@@ -66,6 +66,9 @@ Release 0.18.3 - Unreleased
     HADOOP-4679. Datanode prints tons of log messages: waiting for threadgroup
     to exit, active threads is XX. (hairong)
 
+    HADOOP-4734. Block and meta data validation codes in HADOOP-1700 should be
+    committed to 0.18. (szetszwo)
+
 Release 0.18.2 - 2008-11-03
 
   BUG FIXES

+ 16 - 2
src/hdfs/org/apache/hadoop/dfs/DataNode.java

@@ -3150,8 +3150,22 @@ public class DataNode extends Configured
       LOG.debug("block=" + block);
     }
     Block stored = data.getStoredBlock(block.blkid);
-    return stored == null?
-        null: new BlockMetaDataInfo(stored, blockScanner.getLastScanTime(stored));
+
+    if (stored == null) {
+      return null;
+    }
+    BlockMetaDataInfo info = new BlockMetaDataInfo(stored,
+                                 blockScanner.getLastScanTime(stored));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("getBlockMetaDataInfo successful block=" + stored +
+                " length " + stored.getNumBytes() +
+                " genstamp " + stored.getGenerationStamp());
+    }
+
+    // paranoia! verify that the contents of the stored block
+    // matches the block file on disk.
+    data.validateBlockMetadata(stored);
+    return info;
   }
 
   Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {

+ 49 - 0
src/hdfs/org/apache/hadoop/dfs/FSDataset.java

@@ -862,6 +862,10 @@ class FSDataset implements FSConstants, FSDatasetInterface {
 
     updateBlockMap(ongoingCreates, oldblock, newblock);
     updateBlockMap(volumeMap, oldblock, newblock);
+
+    // paranoia! verify that the contents of the stored block 
+    // matches the block file on disk.
+    validateBlockMetadata(newblock);
     return null;
   }
 
@@ -1125,6 +1129,51 @@ class FSDataset implements FSConstants, FSDatasetInterface {
     return null;
   }
 
+  /** {@inheritDoc} */
+  public void validateBlockMetadata(Block b) throws IOException {
+    DatanodeBlockInfo info = volumeMap.get(b);
+    if (info == null) {
+      throw new IOException("Block " + b + " does not exist in volumeMap.");
+    }
+    FSVolume v = info.getVolume();
+    File tmp = v.getTmpFile(b);
+    File f = getFile(b);
+    if (f == null) {
+      f = tmp;
+    }
+    if (f == null) {
+      throw new IOException("Block " + b + " does not exist on disk.");
+    }
+    if (!f.exists()) {
+      throw new IOException("Block " + b + 
+                            " block file " + f +
+                            " does not exist on disk.");
+    }
+    if (b.getNumBytes() != f.length()) {
+      throw new IOException("Block " + b + 
+                            " length is " + b.getNumBytes()  +
+                            " does not match block file length " +
+                            f.length());
+    }
+    File meta = getMetaFile(f, b);
+    if (meta == null) {
+      throw new IOException("Block " + b + 
+                            " metafile does not exist.");
+    }
+    if (!meta.exists()) {
+      throw new IOException("Block " + b + 
+                            " metafile " + meta +
+                            " does not exist on disk.");
+    }
+    long stamp = parseGenerationStamp(f, meta);
+    if (stamp != b.getGenerationStamp()) {
+      throw new IOException("Block " + b + 
+                            " genstamp is " + b.getGenerationStamp()  +
+                            " does not match meta file stamp " +
+                            stamp);
+    }
+  }
+
   /**
    * We're informed that a block is no longer valid.  We
    * could lazily garbage-collect the block, but why bother?

+ 7 - 0
src/hdfs/org/apache/hadoop/dfs/FSDatasetInterface.java

@@ -221,4 +221,11 @@ public interface FSDatasetInterface extends FSDatasetMBean {
   public void setChannelPosition(Block b, BlockWriteStreams stream, long dataOffset,
                                  long ckOffset) throws IOException;
 
+  /**
+   * Validate that the contents in the Block matches
+   * the file on disk. Returns true if everything is fine.
+   * @param b The block to be verified.
+   * @throws IOException
+   */
+  public void validateBlockMetadata(Block b) throws IOException;
 }

+ 4 - 0
src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java

@@ -399,6 +399,10 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
     return result;
   }
 
+  /** No-op */
+  public void validateBlockMetadata(Block b) {
+  }
+
   /**
    * Returns metaData of block b as an input stream
    * @param b - the block for which the metadata is desired