瀏覽代碼

HDFS-660. Remove deprecated methods from InterDatanodeProtocol. Contributed by Konstantin Shvachko.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@822080 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko 16 年之前
父節點
當前提交
1882dba508

+ 2 - 0
CHANGES.txt

@@ -41,6 +41,8 @@ Release 0.21.0 - Unreleased
 
     HDFS-658. Block recovery for primary data-node. (shv)
 
+    HDFS-660. Remove deprecated methods from InterDatanodeProtocol. (shv)
+
   NEW FEATURES
 
     HDFS-436. Introduce AspectJ framework for HDFS code and tests.

+ 1 - 43
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -68,7 +68,6 @@ import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1508,32 +1507,6 @@ public class DataNode extends Configured
     }
   }
 
-  // InterDataNodeProtocol implementation
-  /** {@inheritDoc} */
-  public BlockMetaDataInfo getBlockMetaDataInfo(Block block
-      ) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("block=" + block);
-    }
-    Block stored = data.getStoredBlock(block.getBlockId());
-
-    if (stored == null) {
-      return null;
-    }
-    BlockMetaDataInfo info = new BlockMetaDataInfo(stored,
-                                 blockScanner.getLastScanTime(stored));
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("getBlockMetaDataInfo successful block=" + stored +
-                " length " + stored.getNumBytes() +
-                " genstamp " + stored.getGenerationStamp());
-    }
-
-    // paranoia! verify that the contents of the stored block
-    // matches the block file on disk.
-    data.validateBlockMetadata(stored);
-    return info;
-  }
-
   public Daemon recoverBlocks(final Collection<RecoveringBlock> blocks) {
     Daemon d = new Daemon(threadGroup, new Runnable() {
       /** Recover a list of blocks. It is run by the primary datanode. */
@@ -1552,22 +1525,7 @@ public class DataNode extends Configured
     return d;
   }
 
-  /** {@inheritDoc} */
-  public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException {
-    LOG.info("oldblock=" + oldblock + "(length=" + oldblock.getNumBytes()
-        + "), newblock=" + newblock + "(length=" + newblock.getNumBytes()
-        + "), datanode=" + dnRegistration.getName());
-    data.updateBlock(oldblock, newblock);
-    if (finalize) {
-      data.finalizeBlock(newblock);
-      myMetrics.blocksWritten.inc(); 
-      notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT);
-      LOG.info("Received block " + newblock +
-                " of size " + newblock.getNumBytes() +
-                " as part of lease recovery.");
-    }
-  }
-
+  // InterDataNodeProtocol implementation
   @Override // InterDatanodeProtocol
   public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
   throws IOException {

+ 0 - 65
src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -960,66 +960,6 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
    return info.unlinkBlock(numLinks);
   }
 
-  /** {@inheritDoc} */
-  public void updateBlock(Block oldblock, Block newblock) throws IOException {
-    if (oldblock.getBlockId() != newblock.getBlockId()) {
-      throw new IOException("Cannot update oldblock (=" + oldblock
-          + ") to newblock (=" + newblock + ").");
-    }
-    
-    final ReplicaInfo replicaInfo = volumeMap.get(oldblock.getBlockId());
-    File blockFile = replicaInfo==null?null:replicaInfo.getBlockFile();
-    if (blockFile == null) {
-      throw new IOException("Block " + oldblock + " does not exist.");
-    }
-
-    //check write threads
-    if (replicaInfo instanceof ReplicaInPipeline) {
-      ((ReplicaInPipeline)replicaInfo).stopWriter();
-    }
-
-    //No ongoing create threads is alive.  Update block.
-    File oldMetaFile = replicaInfo.getMetaFile();
-    long oldgs = replicaInfo.getGenerationStamp();
-    
-    //rename meta file to a tmp file
-    File tmpMetaFile = new File(oldMetaFile.getParent(),
-        oldMetaFile.getName()+"_tmp" + newblock.getGenerationStamp());
-    if (!oldMetaFile.renameTo(tmpMetaFile)){
-      throw new IOException("Cannot rename block meta file to " + tmpMetaFile);
-    }
-
-    //update generation stamp
-    if (oldgs >= newblock.getGenerationStamp()) {
-      throw new IOException("Cannot update block (id=" + newblock.getBlockId()
-          + ") generation stamp from " + oldgs
-          + " to " + newblock.getGenerationStamp());
-    }
-    
-    //update length
-    if (newblock.getNumBytes() > oldblock.getNumBytes()) {
-      throw new IOException("Cannot update block file (=" + blockFile
-          + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes());
-    }
-    if (newblock.getNumBytes() < oldblock.getNumBytes()) {
-      truncateBlock(blockFile, tmpMetaFile, oldblock.getNumBytes(), newblock.getNumBytes());
-    }
-
-    // update replicaInfo
-    replicaInfo.setGenerationStamp(newblock.getGenerationStamp());
-    replicaInfo.setNumBytes(newblock.getNumBytes());
-    
-    //rename the tmp file to the new meta file (with new generation stamp)
-    File newMetaFile = replicaInfo.getMetaFile();
-    if (!tmpMetaFile.renameTo(newMetaFile)) {
-      throw new IOException("Cannot rename tmp meta file to " + newMetaFile);
-    }
-
-    // paranoia! verify that the contents of the stored block 
-    // matches the block file on disk.
-    validateBlockMetadata(newblock);
-  }
-
   static private void truncateBlock(File blockFile, File metaFile,
       long oldlen, long newlen) throws IOException {
     DataNode.LOG.info("truncateBlock: blockFile=" + blockFile
@@ -1597,11 +1537,6 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
     return null;
   }
 
-  /** {@inheritDoc} */
-  public void validateBlockMetadata(Block b) throws IOException {
-    checkReplicaFiles(getReplicaInfo(b));
-  }
-
   /** Check the files of a replica. */
   static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
     final File f = r.getBlockFile();

+ 0 - 13
src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java

@@ -251,11 +251,6 @@ public interface FSDatasetInterface extends FSDatasetMBean {
   public void recoverClose(Block b,
       long newGS, long expectedBlockLen) throws IOException;
   
-  /**
-   * Update the block to the new generation stamp and length.  
-   */
-  public void updateBlock(Block oldblock, Block newblock) throws IOException;
-  
   /**
    * Finalizes the block previously opened for writing using writeToBlock.
    * The block size is what is in the parameter b and it must match the amount
@@ -332,14 +327,6 @@ public interface FSDatasetInterface extends FSDatasetMBean {
   public void setChannelPosition(Block b, BlockWriteStreams stream, long dataOffset,
                                  long ckOffset) throws IOException;
 
-  /**
-   * Validate that the contents in the Block matches
-   * the file on disk. Returns true if everything is fine.
-   * @param b The block to be verified.
-   * @throws IOException
-   */
-  public void validateBlockMetadata(Block b) throws IOException;
-
   /**
    * checks how many valid storage volumes are there in the DataNode
    * @return true if more then minimum valid volumes left in the FSDataSet

+ 0 - 66
src/java/org/apache/hadoop/hdfs/server/protocol/BlockMetaDataInfo.java

@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.protocol;
-
-import java.io.*;
-
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.io.*;
-
-/**
- * Meta data information for a block
- */
-public class BlockMetaDataInfo extends Block {
-  static final WritableFactory FACTORY = new WritableFactory() {
-    public Writable newInstance() { return new BlockMetaDataInfo(); }
-  };
-  static {                                      // register a ctor
-    WritableFactories.setFactory(BlockMetaDataInfo.class, FACTORY);
-  }
-
-  private long lastScanTime;
-
-  public BlockMetaDataInfo() {}
-
-  public BlockMetaDataInfo(Block b, long lastScanTime) {
-    super(b);
-    this.lastScanTime = lastScanTime;
-  }
-
-  public long getLastScanTime() {return lastScanTime;}
-
-  /** {@inheritDoc} */
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    out.writeLong(lastScanTime);
-  }
-
-  /** {@inheritDoc} */
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    lastScanTime = in.readLong();
-  }
-  @Override
-  public boolean equals(Object o) {
-    return super.equals(o);
-  }
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-}

+ 2 - 15
src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java

@@ -32,22 +32,9 @@ public interface InterDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 
   /**
-   * 4: initReplicaRecovery(), updateReplicaUnderRecovery() added.
+   * 5: getBlockMetaDataInfo(), updateBlock() removed.
    */
-  public static final long versionID = 4L;
-
-  /** @return the BlockMetaDataInfo of a block;
-   *  null if the block is not found 
-   */
-  @Deprecated
-  BlockMetaDataInfo getBlockMetaDataInfo(Block block) throws IOException;
-
-  /**
-   * Update the block to the new generation stamp and length.  
-   */
-  @Deprecated
-  void updateBlock(Block oldblock, Block newblock, boolean finalize)
-  throws IOException;
+  public static final long versionID = 5L;
 
   /**
    * Initialize a replica recovery.

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -93,7 +93,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
         assertTrue(datanodes[i] != null);
       }
       
-      //verify BlockMetaDataInfo
+      //verify Block Info
       Block lastblock = locatedblock.getBlock();
       DataNode.LOG.info("newblocks=" + lastblock);
       for(int i = 0; i < REPLICATION_NUM; i++) {

+ 0 - 19
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -115,12 +115,6 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
       return theBlock.getGenerationStamp();
     }
 
-    synchronized void updateBlock(Block b) {
-      theBlock.setGenerationStamp(b.getGenerationStamp());
-      setNumBytes(b.getNumBytes());
-      setBytesOnDisk(b.getNumBytes());
-    }
-    
     synchronized public long getNumBytes() {
       if (!finalized) {
          return bytesRcvd;
@@ -415,15 +409,6 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
     return b;
   }
 
-  /** {@inheritDoc} */
-  public void updateBlock(Block oldblock, Block newblock) throws IOException {
-    BInfo binfo = blockMap.get(newblock);
-    if (binfo == null) {
-      throw new IOException("BInfo not found, b=" + newblock);
-    }
-    binfo.updateBlock(newblock);
-  }
-
   public synchronized void invalidate(Block[] invalidBlks) throws IOException {
     boolean error = false;
     if (invalidBlks == null) {
@@ -578,10 +563,6 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
     throw new IOException("Not supported");
   }
 
-  /** No-op */
-  public void validateBlockMetadata(Block b) {
-  }
-
   /**
    * Returns metaData of block b as an input stream
    * @param b - the block for which the metadata is desired