Forráskód Böngészése

HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks. Contributed by Jing Zhao and Zhe Zhang.

Jing Zhao 10 éve
szülő
commit
4d0bc724f2

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java

@@ -96,7 +96,7 @@ public class BlockInfoStripedUnderConstruction extends BlockInfoStriped
     for(int i = 0; i < numLocations; i++) {
       // when creating a new block we simply sequentially assign block index to
       // each storage
-      Block blk = new Block(this.getBlockId() + i, this.getGenerationStamp(), 0);
+      Block blk = new Block(this.getBlockId() + i, 0, this.getGenerationStamp());
       replicas[i] = new ReplicaUnderConstruction(blk, targets[i],
           ReplicaState.RBW);
     }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -2483,12 +2483,12 @@ public class BlockManager {
       case COMMITTED:
         if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) {
           final long reportedGS = reported.getGenerationStamp();
-          return new BlockToMarkCorrupt(reported, storedBlock, reportedGS,
+          return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
               "block is " + ucState + " and reported genstamp " + reportedGS
               + " does not match genstamp in block map "
               + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
         } else if (storedBlock.getNumBytes() != reported.getNumBytes()) {
-          return new BlockToMarkCorrupt(reported, storedBlock,
+          return new BlockToMarkCorrupt(new Block(reported), storedBlock,
               "block is " + ucState + " and reported length " +
               reported.getNumBytes() + " does not match " +
               "length in block map " + storedBlock.getNumBytes(),
@@ -2499,7 +2499,7 @@ public class BlockManager {
       case UNDER_CONSTRUCTION:
         if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
           final long reportedGS = reported.getGenerationStamp();
-          return new BlockToMarkCorrupt(reported, storedBlock, reportedGS,
+          return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
               "block is " + ucState + " and reported state " + reportedState
               + ", But reported genstamp " + reportedGS
               + " does not match genstamp in block map "
@@ -2515,7 +2515,7 @@ public class BlockManager {
         return null; // not corrupt
       } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) {
         final long reportedGS = reported.getGenerationStamp();
-        return new BlockToMarkCorrupt(reported, storedBlock, reportedGS,
+        return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
             "reported " + reportedState + " replica with genstamp " + reportedGS
             + " does not match COMPLETE block's genstamp in block map "
             + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
@@ -2530,7 +2530,7 @@ public class BlockManager {
               "complete with the same genstamp");
           return null;
         } else {
-          return new BlockToMarkCorrupt(reported, storedBlock,
+          return new BlockToMarkCorrupt(new Block(reported), storedBlock,
               "reported replica has invalid state " + reportedState,
               Reason.INVALID_STATE);
         }
@@ -2543,7 +2543,7 @@ public class BlockManager {
       " on " + dn + " size " + storedBlock.getNumBytes();
       // log here at WARN level since this is really a broken HDFS invariant
       LOG.warn(msg);
-      return new BlockToMarkCorrupt(reported, storedBlock, msg,
+      return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg,
           Reason.INVALID_STATE);
     }
   }

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java

@@ -59,6 +59,13 @@ public class ErasureCodingZoneManager {
       if (inode == null) {
         continue;
       }
+      // We don't allow symlinks in an EC zone, or pointing to a file/dir in
+      // an EC. Therefore if a symlink is encountered, the dir shouldn't have
+      // EC
+      // TODO: properly support symlinks in EC zones
+      if (inode.isSymlink()) {
+        return false;
+      }
       final List<XAttr> xAttrs = inode.getXAttrFeature() == null ?
           new ArrayList<XAttr>(0)
           : inode.getXAttrFeature().getXAttrs();

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -416,7 +416,7 @@ public class FSEditLogLoader {
       newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
       newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
       // TODO whether the file is striped should later be retrieved from iip
-      updateBlocks(fsDir, addCloseOp, iip, newFile, newFile.isStriped());
+      updateBlocks(fsDir, addCloseOp, iip, newFile, fsDir.getECPolicy(iip));
       break;
     }
     case OP_CLOSE: {
@@ -437,7 +437,7 @@ public class FSEditLogLoader {
       file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
       file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
       // TODO whether the file is striped should later be retrieved from iip
-      updateBlocks(fsDir, addCloseOp, iip, file, file.isStriped());
+      updateBlocks(fsDir, addCloseOp, iip, file, fsDir.getECPolicy(iip));
 
       // Now close the file
       if (!file.isUnderConstruction() &&
@@ -496,7 +496,7 @@ public class FSEditLogLoader {
       INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
       // Update in-memory data structures
       // TODO whether the file is striped should later be retrieved from iip
-      updateBlocks(fsDir, updateOp, iip, oldFile, oldFile.isStriped());
+      updateBlocks(fsDir, updateOp, iip, oldFile, fsDir.getECPolicy(iip));
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
@@ -510,10 +510,11 @@ public class FSEditLogLoader {
         FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " new block id : " + addBlockOp.getLastBlock().getBlockId());
       }
-      INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path);
+      INodesInPath iip = fsDir.getINodesInPath(path, true);
+      INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
       // add the new block to the INodeFile
       // TODO whether the file is striped should later be retrieved from iip
-      addNewBlock(addBlockOp, oldFile, oldFile.isStriped());
+      addNewBlock(addBlockOp, oldFile, fsDir.getECPolicy(iip));
       break;
     }
     case OP_SET_REPLICATION: {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java

@@ -207,7 +207,7 @@ public class FSImageSerialization {
     out.writeLong(cons.getModificationTime());
     out.writeLong(cons.getPreferredBlockSize());
     // whether the file has striped blocks
-    out.writeBoolean(cons.isWithStripedBlocks());
+    out.writeBoolean(cons.isStriped());
     writeBlocks(cons.getBlocks(), out);
     cons.getPermissionStatus().write(out);
 
@@ -233,7 +233,7 @@ public class FSImageSerialization {
     out.writeLong(file.getAccessTime());
     out.writeLong(file.getPreferredBlockSize());
     // whether the file has striped blocks
-    out.writeBoolean(file.isWithStripedBlocks());
+    out.writeBoolean(file.isStriped());
     writeBlocks(file.getBlocks(), out);
     SnapshotFSImageFormat.saveFileDiffList(file, out);
 

+ 2 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -185,17 +185,13 @@ public class INodeFile extends INodeWithAdditionalFields
   public FileWithStripedBlocksFeature addStripedBlocksFeature() {
     assert blocks == null || blocks.length == 0:
         "The file contains contiguous blocks";
-    assert !isWithStripedBlocks();
+    assert !isStriped();
     this.setFileReplication((short) 0);
     FileWithStripedBlocksFeature sb = new FileWithStripedBlocksFeature();
     addFeature(sb);
     return sb;
   }
 
-  public boolean isWithStripedBlocks() {
-    return getStripedBlocksFeature() != null;
-  }
-
   /** Used to make sure there is no contiguous block related info */
   private boolean hasNoContiguousBlock() {
     return (blocks == null || blocks.length == 0) && getFileReplication() == 0;
@@ -432,7 +428,7 @@ public class INodeFile extends INodeWithAdditionalFields
   /** Set the replication factor of this file. */
   public final INodeFile setFileReplication(short replication,
       int latestSnapshotId) throws QuotaExceededException {
-    Preconditions.checkState(!isWithStripedBlocks(),
+    Preconditions.checkState(!isStriped(),
         "Cannot set replication to a file with striped blocks");
     recordModification(latestSnapshotId);
     setFileReplication(replication);
@@ -679,7 +675,6 @@ public class INodeFile extends INodeWithAdditionalFields
 
     final long ssDeltaNoReplication;
     short replication;
-
     if (isStriped()) {
       return computeQuotaUsageWithStriped(bsps, counts);
     }
@@ -708,11 +703,6 @@ public class INodeFile extends INodeWithAdditionalFields
 
   /**
    * Compute quota of striped file
-   * @param bsps
-   * @param counts
-   * @param useCache
-   * @param lastSnapshotId
-   * @return quota counts
    */
   public final QuotaCounts computeQuotaUsageWithStriped(
       BlockStoragePolicySuite bsps, QuotaCounts counts) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java

@@ -473,7 +473,7 @@ public class TestFSEditLogLoader {
       INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
           .getINode(testFilePath);
 
-      assertTrue(inodeLoaded.isWithStripedBlocks());
+      assertTrue(inodeLoaded.isStriped());
 
       BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
       assertEquals(1, blks.length);
@@ -552,7 +552,7 @@ public class TestFSEditLogLoader {
       INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
           .getINode(testFilePath);
 
-      assertTrue(inodeLoaded.isWithStripedBlocks());
+      assertTrue(inodeLoaded.isStriped());
 
       BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
       assertEquals(1, blks.length);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java

@@ -199,7 +199,7 @@ public class TestFSImage {
     assertEquals(mtime, fileByLoaded.getModificationTime());
     assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
     assertEquals(0, fileByLoaded.getContiguousBlocks().length);
-    assertEquals(0, fileByLoaded.getBlockReplication());
+    assertEquals(0, fileByLoaded.getFileReplication());
     assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
 
     //check the BlockInfoStriped

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java

@@ -84,7 +84,7 @@ public class TestRecoverStripedBlocks {
     final INodeFile fileNode = cluster.getNamesystem().getFSDirectory()
         .getINode4Write(filePath.toString()).asFile();
     assertFalse(fileNode.isUnderConstruction());
-    assertTrue(fileNode.isWithStripedBlocks());
+    assertTrue(fileNode.isStriped());
     BlockInfo[] blocks = fileNode.getBlocks();
     assertEquals(numBlocks, blocks.length);
     for (BlockInfo blk : blocks) {