فهرست منبع

HDFS-3082. Clean up FSDatasetInterface and change DataNode.data to package private.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1300392 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 سال پیش
والد
کامیت
3e582c690c
13فایلهای تغییر یافته به همراه40 افزوده شده و 136 حذف شده
  1. 3 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  2. 1 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  3. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  4. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  5. 5 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  6. 6 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  8. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
  9. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  10. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  11. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  12. 8 44
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  13. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -225,6 +225,9 @@ Release 0.23.3 - UNRELEASED
     HDFS-2731. Add command to bootstrap the Standby Node's name directories
     from the Active NameNode. (todd)
 
+    HDFS-3082. Clean up FSDatasetInterface and change DataNode.data to package
+    private.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

+ 1 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -29,21 +29,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
@@ -51,16 +43,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
@@ -578,7 +562,7 @@ class BPOfferService {
           dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete);
         }
         // using global fsdataset
-        dn.data.invalidate(bcmd.getBlockPoolId(), toDelete);
+        dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete);
       } catch(IOException e) {
         dn.checkDiskError();
         throw e;

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -219,9 +219,10 @@ class BlockSender implements java.io.Closeable {
         (!is32Bit || length <= Integer.MAX_VALUE);
 
       DataChecksum csum;
-      if (!corruptChecksumOk || datanode.data.metaFileExists(block)) {
-        checksumIn = new DataInputStream(new BufferedInputStream(datanode.data
-            .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE));
+      final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
+      if (!corruptChecksumOk || metaIn != null) {
+        checksumIn = new DataInputStream(
+            new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
 
         // read and handle the common header here. For now just a version
         BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -231,7 +231,7 @@ public class DataNode extends Configured
   
   volatile boolean shouldRun = true;
   private BlockPoolManager blockPoolManager;
-  public volatile FSDatasetInterface<? extends FSVolumeInterface> data = null;
+  volatile FSDatasetInterface<? extends FSVolumeInterface> data = null;
   private String clusterId = null;
 
   public final static String EMPTY_DEL_HINT = "";

+ 5 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -1036,23 +1036,14 @@ class FSDataset implements FSDatasetInterface<FSDataset.FSVolume> {
     return null;
   }
 
-  @Override // FSDatasetInterface
-  public boolean metaFileExists(ExtendedBlock b) throws IOException {
-    return getMetaFile(b).exists();
-  }
-  
-  @Override // FSDatasetInterface
-  public long getMetaDataLength(ExtendedBlock b) throws IOException {
-    File checksumFile = getMetaFile(b);
-    return checksumFile.length();
-  }
-
   @Override // FSDatasetInterface
   public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
       throws IOException {
-    File checksumFile = getMetaFile(b);
-    return new MetaDataInputStream(new FileInputStream(checksumFile),
-                                                    checksumFile.length());
+    final File meta = getMetaFile(b);
+    if (meta == null || !meta.exists()) {
+      return null;
+    }
+    return new MetaDataInputStream(new FileInputStream(meta), meta.length());
   }
     
   private final DataNode datanode;
@@ -1213,18 +1204,6 @@ class FSDataset implements FSDatasetInterface<FSDataset.FSVolume> {
     return f;
   }
   
-  @Override // FSDatasetInterface
-  public InputStream getBlockInputStream(ExtendedBlock b)
-      throws IOException {
-    File f = getBlockFileNoExistsCheck(b);
-    try {
-      return new FileInputStream(f);
-    } catch (FileNotFoundException fnfe) {
-      throw new IOException("Block " + b + " is not valid. " +
-          "Expected block file at " + f + " does not exist.");
-    }
-  }
-  
   /**
    * Return the File associated with a block, without first
    * checking that it exists. This should be used when the

+ 6 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java

@@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlo
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.ReflectionUtils;
 
 /**
  * This is an interface for the underlying storage that stores blocks for
@@ -123,14 +123,6 @@ public interface FSDatasetInterface<V extends FSDatasetInterface.FSVolumeInterfa
   public void checkAndUpdate(String bpid, long blockId, File diskFile,
       File diskMetaFile, FSVolumeInterface vol);
 
-  /**
-   * Returns the length of the metadata file of the specified block
-   * @param b - the block for which the metadata length is desired
-   * @return the length of the metadata file for the specified block.
-   * @throws IOException
-   */
-  public long getMetaDataLength(ExtendedBlock b) throws IOException;
-  
   /**
    * This class provides the input stream and length of the metadata
    * of a block
@@ -149,22 +141,13 @@ public interface FSDatasetInterface<V extends FSDatasetInterface.FSVolumeInterfa
   }
   
   /**
-   * Returns metaData of block b as an input stream (and its length)
-   * @param b - the block
-   * @return the metadata input stream; 
-   * @throws IOException
-   */
-  public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
-        throws IOException;
-  
-  /**
-   * Does the meta file exist for this block?
    * @param b - the block
-   * @return true of the metafile for specified block exits
+   * @return a stream if the meta-data of the block exists;
+   *         otherwise, return null.
    * @throws IOException
    */
-  public boolean metaFileExists(ExtendedBlock b) throws IOException;
-
+  public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b
+      ) throws IOException;
 
   /**
    * Returns the specified block's on-disk length (excluding metadata)
@@ -191,16 +174,7 @@ public interface FSDatasetInterface<V extends FSDatasetInterface.FSVolumeInterfa
   /**
    * @return the generation stamp stored with the block.
    */
-  public Block getStoredBlock(String bpid, long blkid)
-      throws IOException;
-
-  /**
-   * Returns an input stream to read the contents of the specified block
-   * @param b
-   * @return an input stream to read the contents of the specified block
-   * @throws IOException
-   */
-  public InputStream getBlockInputStream(ExtendedBlock b) throws IOException;
+  public Block getStoredBlock(String bpid, long blkid) throws IOException;
   
   /**
    * Returns an input stream at specified offset of the specified block
@@ -408,11 +382,6 @@ public interface FSDatasetInterface<V extends FSDatasetInterface.FSVolumeInterfa
      */
   public void checkDataDir() throws DiskErrorException;
       
-    /**
-     * Stringifies the name of the storage
-     */
-  public String toString();
-  
   /**
    * Shutdown the FSDataset
    */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -1744,7 +1744,7 @@ public class MiniDFSCluster {
     
     // If datanode dataset is not initialized then wait
     for (DataNodeProperties dn : dataNodes) {
-      if (dn.datanode.data == null) {
+      if (DataNodeTestUtils.getFSDataset(dn.datanode) == null) {
         return true;
       }
     }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -272,8 +272,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
       }
       for(DatanodeInfo datanodeinfo : lb.getLocations()) {
         final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
-        final Block metainfo = dn.data.getStoredBlock(blk.getBlockPoolId(), 
-            blk.getBlockId());
+        final Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
+            blk.getBlockPoolId(), blk.getBlockId());
         assertEquals(size, metainfo.getNumBytes());
       }
     }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -846,7 +846,8 @@ public class TestFileCreation extends junit.framework.TestCase {
       for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
         DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
         ExtendedBlock blk = locatedblock.getBlock();
-        Block b = datanode.data.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
+        Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
+            blk.getBlockPoolId(), blk.getBlockId());
         final File blockfile = DataNodeTestUtils.getFile(datanode,
             blk.getBlockPoolId(), b.getBlockId());
         System.out.println("blockfile=" + blockfile);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -117,8 +118,8 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
           dfs.dfs.getNamenode(), filestr).getBlock();
       long currentGS = lastblock.getGenerationStamp();
       for(int i = 0; i < REPLICATION_NUM; i++) {
-        updatedmetainfo[i] = datanodes[i].data.getStoredBlock(lastblock
-            .getBlockPoolId(), lastblock.getBlockId());
+        updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
+            lastblock.getBlockPoolId(), lastblock.getBlockId());
         assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
         assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
         assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -211,7 +212,8 @@ public class TestShortCircuitLocalRead {
       
       //This should succeed
       BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
-      Assert.assertEquals(dn.data.getBlockLocalPathInfo(blk).getBlockPath(),
+      Assert.assertEquals(
+          DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
           blpi.getBlockPath());
 
       // Now try with a not allowed user.

+ 8 - 44
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -667,9 +667,8 @@ public class SimulatedFSDataset
     return binfo;
   }
 
-  @Override // FSDatasetInterface
-  public synchronized InputStream getBlockInputStream(ExtendedBlock b)
-      throws IOException {
+  synchronized InputStream getBlockInputStream(ExtendedBlock b
+      ) throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
@@ -694,15 +693,9 @@ public class SimulatedFSDataset
     throw new IOException("Not supported");
   }
 
-  /**
-   * Returns metaData of block b as an input stream
-   * @param b - the block for which the metadata is desired
-   * @return metaData of block b as an input stream
-   * @throws IOException - block does not exist or problems accessing
-   *  the meta file
-   */
-  private synchronized InputStream getMetaDataInStream(ExtendedBlock b)
-                                              throws IOException {
+  @Override // FSDatasetInterface
+  public synchronized MetaDataInputStream getMetaDataInputStream(ExtendedBlock b
+      ) throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {
@@ -712,40 +705,11 @@ public class SimulatedFSDataset
       throw new IOException("Block " + b + 
           " is being written, its meta cannot be read");
     }
-    return binfo.getMetaIStream();
-  }
- 
-  @Override // FSDatasetInterface
-  public synchronized long getMetaDataLength(ExtendedBlock b)
-      throws IOException {
-    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
-    BInfo binfo = map.get(b.getLocalBlock());
-    if (binfo == null) {
-      throw new IOException("No such Block " + b );  
-    }
-    if (!binfo.finalized) {
-      throw new IOException("Block " + b +
-          " is being written, its metalength cannot be read");
-    }
-    return binfo.getMetaIStream().getLength();
-  }
-  
-  @Override // FSDatasetInterface
-  public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
-      throws IOException {
-     return new MetaDataInputStream(getMetaDataInStream(b), 
-                                    getMetaDataLength(b));
-  }
-
-  @Override // FSDatasetInterface
-  public synchronized boolean metaFileExists(ExtendedBlock b) throws IOException {
-    if (!isValidBlock(b)) {
-          throw new IOException("Block " + b +
-              " is valid, and cannot be written to.");
-      }
-    return true; // crc exists for all valid blocks
+    final SimulatedInputStream sin = binfo.getMetaIStream();
+    return new MetaDataInputStream(sin, sin.getLength());
   }
 
+  @Override
   public void checkDataDir() throws DiskErrorException {
     // nothing to check for simulated data set
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java

@@ -102,7 +102,7 @@ public class TestSimulatedFSDataset extends TestCase {
     final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
     ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
     try {
-      assertFalse(fsdataset.metaFileExists(b));
+      assertTrue(fsdataset.getMetaDataInputStream(b) == null);
       assertTrue("Expected an IO exception", false);
     } catch (IOException e) {
       // ok - as expected