瀏覽代碼

HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from StripedBlockProto. Contributed by Yi Liu.

Jing Zhao 10 年之前
父節點
當前提交
683332b36d

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt

@@ -296,3 +296,6 @@
 
     HDFS-8450. Erasure Coding: Consolidate erasure coding zone related
     implementation into a single class (Rakesh R via vinayakumarb)
+
+    HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
+    StripedBlockProto. (Yi Liu via jing9)

+ 0 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -183,7 +183,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
@@ -195,7 +194,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -444,20 +442,6 @@ public class PBHelper {
     return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
-  public static BlockInfoStriped convert(StripedBlockProto p, ECSchema schema) {
-    return new BlockInfoStriped(convert(p.getBlock()), schema);
-  }
-
-  public static StripedBlockProto convert(BlockInfoStriped blk) {
-    BlockProto bp = BlockProto.newBuilder().setBlockId(blk.getBlockId())
-        .setGenStamp(blk.getGenerationStamp()).setNumBytes(blk.getNumBytes())
-        .build();
-    return StripedBlockProto.newBuilder()
-        .setDataBlockNum(blk.getDataBlockNum())
-        .setParityBlockNum(blk.getParityBlockNum())
-        .setBlock(bp).build();
-  }
-
   public static BlockWithLocationsProto convert(BlockWithLocations blk) {
     BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
         .newBuilder().setBlock(convert(blk.getBlock()))

+ 12 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java

@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,7 +41,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
@@ -330,10 +328,14 @@ public final class FSImageFormatPBINode {
       short replication = (short) f.getReplication();
       LoaderContext state = parent.getLoaderContext();
 
-      BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()];
-      for (int i = 0, e = bp.size(); i < e; ++i) {
-        blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
+      BlockInfoContiguous[] blocks = null;
+      if (!f.hasStripedBlocks()) {
+        blocks = new BlockInfoContiguous[bp.size()];
+        for (int i = 0, e = bp.size(); i < e; ++i) {
+          blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
+        }
       }
+
       final PermissionStatus permissions = loadPermission(f.getPermission(),
           parent.getLoaderContext().getStringTable());
 
@@ -357,10 +359,9 @@ public final class FSImageFormatPBINode {
       if (f.hasStripedBlocks()) {
         // TODO: HDFS-7859
         ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
-        StripedBlocksFeature sb = f.getStripedBlocks();
         stripeFeature = file.addStripedBlocksFeature();
-        for (StripedBlockProto sp : sb.getBlocksList()) {
-          stripeFeature.addBlock(PBHelper.convert(sp, schema));
+        for (BlockProto b : bp) {
+          stripeFeature.addBlock(new BlockInfoStriped(PBHelper.convert(b), schema));
         }
       }
 
@@ -658,14 +659,14 @@ public final class FSImageFormatPBINode {
 
       FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature();
       if (sb != null) {
-        StripedBlocksFeature.Builder builder =
-            StripedBlocksFeature.newBuilder();
         BlockInfoStriped[] sblocks = sb.getBlocks();
         if (sblocks != null) {
           for (BlockInfoStriped sblk : sblocks) {
-            builder.addBlocks(PBHelper.convert(sblk));
+            b.addBlocks(PBHelper.convert(sblk));
           }
         }
+        StripedBlocksFeature.Builder builder =
+            StripedBlocksFeature.newBuilder();
         b.setStripedBlocks(builder.build());
       }
 

+ 2 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java

@@ -41,15 +41,12 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
 import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
-import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.LimitInputStream;
@@ -485,21 +482,8 @@ class FSImageLoader {
 
   static long getFileSize(FsImageProto.INodeSection.INodeFile f) {
     long size = 0;
-    if (f.hasStripedBlocks()) {
-      List<StripedBlockProto> blocksList = f.getStripedBlocks().getBlocksList();
-      // Get total of actual data block size
-      for (StripedBlockProto p : blocksList) {
-        // Total usage by this striped blocks should be the total of data
-        // blocks and parity blocks
-        size += StripedBlockUtil.spaceConsumedByStripedBlock(p.getBlock()
-            .getNumBytes(), p.getDataBlockNum(), p.getParityBlockNum(),
-            HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
-      }
-    } else {
-      for (HdfsProtos.BlockProto p : f.getBlocksList()) {
-        size += p.getNumBytes();
-      }
-
+    for (HdfsProtos.BlockProto p : f.getBlocksList()) {
+      size += p.getNumBytes();
     }
     return size;
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto

@@ -93,7 +93,7 @@ message INodeSection {
   }
 
   message StripedBlocksFeature {
-    repeated StripedBlockProto blocks = 1;
+    // store striped blocks related information
   }
 
   message AclFeatureProto {

+ 0 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -514,16 +514,6 @@ message BlockProto {
   optional uint64 numBytes = 3 [default = 0];
 }
 
-/**
- * Striped block information. Besides the basic information for a block,
- * it also contains the number of data/parity blocks.
- */
-message StripedBlockProto {
-  required BlockProto block = 1;
-  optional uint32 dataBlockNum = 2;
-  optional uint32 parityBlockNum = 3;
-}
-
 /**
  * Block and datanodes where is it located
  */

+ 5 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java

@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -136,28 +135,25 @@ public class TestOfflineImageViewerWithStripedBlocks {
     }
     FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
     String fileStatus = loader.getFileStatus("/eczone/striped");
-    long expectedSpaceConsumed = StripedBlockUtil.spaceConsumedByStripedBlock(
-        bytes.length, HdfsConstants.NUM_DATA_BLOCKS,
-        HdfsConstants.NUM_PARITY_BLOCKS, HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
+    long expectedFileSize = bytes.length;
 
     // Verify space consumed present in BlockInfoStriped
     FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
     INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
     assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
-    long actualSpaceConsumed = 0;
+    long actualFileSize = 0;
     for (BlockInfo blockInfo : fileNode.getBlocks()) {
       assertTrue("Didn't find block striped information",
           blockInfo instanceof BlockInfoStriped);
-      BlockInfoStriped b = (BlockInfoStriped) blockInfo;
-      actualSpaceConsumed += b.spaceConsumed();
+      actualFileSize += blockInfo.getNumBytes();
     }
 
     assertEquals("Wrongly computed file size contains striped blocks",
-        expectedSpaceConsumed, actualSpaceConsumed);
+        expectedFileSize, actualFileSize);
 
     // Verify space consumed present in filestatus
     String EXPECTED_FILE_SIZE = "\"length\":"
-        + String.valueOf(expectedSpaceConsumed);
+        + String.valueOf(expectedFileSize);
     assertTrue(
         "Wrongly computed file size contains striped blocks, file status:"
             + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,