Browse Source

HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk merge.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1450477 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 years ago
parent
commit
e2a618e1cc

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt

@@ -176,3 +176,6 @@ Branch-2802 Snapshot (Unreleased)
 
 
   HDFS-4514. Add CLI for supporting snapshot rename, diff report, and
   HDFS-4514. Add CLI for supporting snapshot rename, diff report, and
   snapshottable directory listing.  (Jing Zhao via szetszwo)
   snapshottable directory listing.  (Jing Zhao via szetszwo)
+
+  HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk
+  merge.  (szetszwo)

+ 12 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -335,7 +335,7 @@ public class FSDirectory implements Closeable {
           INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
           INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path);
 
 
       // check quota limits and updated space consumed
       // check quota limits and updated space consumed
-      updateCount(inodesInPath, 0, 0, fileINode.getBlockDiskspace(), true);
+      updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true);
 
 
       // associate new last block for the file
       // associate new last block for the file
       BlockInfoUnderConstruction blockInfo =
       BlockInfoUnderConstruction blockInfo =
@@ -426,7 +426,7 @@ public class FSDirectory implements Closeable {
 
 
     // update space consumed
     // update space consumed
     final INodesInPath iip = rootDir.getINodesInPath4Write(path, true);
     final INodesInPath iip = rootDir.getINodesInPath4Write(path, true);
-    updateCount(iip, 0, 0, -fileNode.getBlockDiskspace(), true);
+    updateCount(iip, 0, -fileNode.getBlockDiskspace(), true);
   }
   }
 
 
   /**
   /**
@@ -969,15 +969,11 @@ public class FSDirectory implements Closeable {
     INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
     INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
     final Snapshot trgLatestSnapshot = trgINodesInPath.getLatestSnapshot();
     final Snapshot trgLatestSnapshot = trgINodesInPath.getLatestSnapshot();
     
     
-    INodeFile [] allSrcInodes = new INodeFile[srcs.length];
-    int i = 0;
-    int totalBlocks = 0;
-    for(String src : srcs) {
-      INodeFile srcInode = (INodeFile)getINode(src);
-      allSrcInodes[i++] = srcInode;
-      totalBlocks += srcInode.numBlocks();  
+    final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
+    for(int i = 0; i < srcs.length; i++) {
+      allSrcInodes[i] = (INodeFile)getINode4Write(srcs[i]);
     }
     }
-    trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks
+    trgInode.concatBlocks(allSrcInodes);
     
     
     // since we are in the same dir - we can use same parent to remove files
     // since we are in the same dir - we can use same parent to remove files
     int count = 0;
     int count = 0;
@@ -1908,17 +1904,16 @@ public class FSDirectory implements Closeable {
   private INode removeLastINode(final INodesInPath inodesInPath) {
   private INode removeLastINode(final INodesInPath inodesInPath) {
     final INode[] inodes = inodesInPath.getINodes();
     final INode[] inodes = inodesInPath.getINodes();
     final int pos = inodes.length - 1;
     final int pos = inodes.length - 1;
-    INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild(
+    final boolean removed = ((INodeDirectory)inodes[pos-1]).removeChild(
         inodes[pos], inodesInPath.getLatestSnapshot());
         inodes[pos], inodesInPath.getLatestSnapshot());
-    if (removedNode != null) {
-      Preconditions.checkState(removedNode == inodes[pos]);
-
-      inodesInPath.setINode(pos - 1, removedNode.getParent());
-      final Quota.Counts counts = removedNode.computeQuotaUsage();
+    if (removed) {
+      inodesInPath.setINode(pos - 1, inodes[pos].getParent());
+      final Quota.Counts counts = inodes[pos].computeQuotaUsage();
       updateCountNoQuotaCheck(inodesInPath, pos,
       updateCountNoQuotaCheck(inodesInPath, pos,
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
+      return inodes[pos];
     }
     }
-    return removedNode;
+    return null;
   }
   }
   
   
   /**
   /**

+ 13 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1556,7 +1556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
 
 
       // check replication and blocks size
       // check replication and blocks size
-      if(repl != srcInode.getFileReplication()) {
+      if(repl != srcInode.getBlockReplication()) {
         throw new HadoopIllegalArgumentException("concat: the soruce file "
         throw new HadoopIllegalArgumentException("concat: the soruce file "
             + src + " and the target file " + target
             + src + " and the target file " + target
             + " should have the same replication: source replication is "
             + " should have the same replication: source replication is "
@@ -5722,6 +5722,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   
   
   /** Allow snapshot on a directroy. */
   /** Allow snapshot on a directroy. */
   public void allowSnapshot(String path) throws SafeModeException, IOException {
   public void allowSnapshot(String path) throws SafeModeException, IOException {
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     writeLock();
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -5729,7 +5730,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot allow snapshot for " + path,
         throw new SafeModeException("Cannot allow snapshot for " + path,
             safeMode);
             safeMode);
       }
       }
-      checkOwner(path);
+      checkOwner(pc, path);
 
 
       snapshotManager.setSnapshottable(path);
       snapshotManager.setSnapshottable(path);
       getEditLog().logAllowSnapshot(path);
       getEditLog().logAllowSnapshot(path);
@@ -5749,6 +5750,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   /** Disallow snapshot on a directory. */
   /** Disallow snapshot on a directory. */
   public void disallowSnapshot(String path)
   public void disallowSnapshot(String path)
       throws SafeModeException, IOException {
       throws SafeModeException, IOException {
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     writeLock();
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -5756,7 +5758,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot disallow snapshot for " + path,
         throw new SafeModeException("Cannot disallow snapshot for " + path,
             safeMode);
             safeMode);
       }
       }
-      checkOwner(path);
+      checkOwner(pc, path);
 
 
       snapshotManager.resetSnapshottable(path);
       snapshotManager.resetSnapshottable(path);
       getEditLog().logDisallowSnapshot(path);
       getEditLog().logDisallowSnapshot(path);
@@ -5780,6 +5782,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    */
    */
   public void createSnapshot(String snapshotRoot, String snapshotName)
   public void createSnapshot(String snapshotRoot, String snapshotName)
       throws SafeModeException, IOException {
       throws SafeModeException, IOException {
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     writeLock();
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -5787,7 +5790,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot create snapshot for "
         throw new SafeModeException("Cannot create snapshot for "
             + snapshotRoot, safeMode);
             + snapshotRoot, safeMode);
       }
       }
-      checkOwner(snapshotRoot);
+      checkOwner(pc, snapshotRoot);
 
 
       dir.writeLock();
       dir.writeLock();
       try {
       try {
@@ -5819,6 +5822,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    */
    */
   public void renameSnapshot(String path, String snapshotOldName,
   public void renameSnapshot(String path, String snapshotOldName,
       String snapshotNewName) throws SafeModeException, IOException {
       String snapshotNewName) throws SafeModeException, IOException {
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     writeLock();
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -5826,7 +5830,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException("Cannot rename snapshot for " + path,
         throw new SafeModeException("Cannot rename snapshot for " + path,
             safeMode);
             safeMode);
       }
       }
-      checkOwner(path);
+      checkOwner(pc, path);
       // TODO: check if the new name is valid. May also need this for
       // TODO: check if the new name is valid. May also need this for
       // creationSnapshot
       // creationSnapshot
       
       
@@ -5863,8 +5867,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       FSPermissionChecker checker = new FSPermissionChecker(
       FSPermissionChecker checker = new FSPermissionChecker(
           fsOwner.getShortUserName(), supergroup);
           fsOwner.getShortUserName(), supergroup);
-      status = snapshotManager
-          .getSnapshottableDirListing(checker.isSuper ? null : checker.user);
+      final String user = checker.isSuperUser()? null : checker.getUser();
+      status = snapshotManager.getSnapshottableDirListing(user);
     } finally {
     } finally {
       readUnlock();
       readUnlock();
     }
     }
@@ -5919,6 +5923,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    */
    */
   public void deleteSnapshot(String snapshotRoot, String snapshotName)
   public void deleteSnapshot(String snapshotRoot, String snapshotName)
       throws SafeModeException, IOException {
       throws SafeModeException, IOException {
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     writeLock();
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -5926,7 +5931,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new SafeModeException(
         throw new SafeModeException(
             "Cannot delete snapshot for " + snapshotRoot, safeMode);
             "Cannot delete snapshot for " + snapshotRoot, safeMode);
       }
       }
-      checkOwner(snapshotRoot);
+      checkOwner(pc, snapshotRoot);
 
 
       BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
       BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
       dir.writeLock();
       dir.writeLock();

+ 40 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -123,18 +123,41 @@ public class INodeDirectory extends INode {
    * 
    * 
    * @param child the child inode to be removed
    * @param child the child inode to be removed
    * @param latest See {@link INode#recordModification(Snapshot)}.
    * @param latest See {@link INode#recordModification(Snapshot)}.
-   * @return the removed child inode.
    */
    */
-  public INode removeChild(INode child, Snapshot latest) {
-    assertChildrenNonNull();
-
+  public boolean removeChild(INode child, Snapshot latest) {
     if (isInLatestSnapshot(latest)) {
     if (isInLatestSnapshot(latest)) {
       return replaceSelf4INodeDirectoryWithSnapshot()
       return replaceSelf4INodeDirectoryWithSnapshot()
           .removeChild(child, latest);
           .removeChild(child, latest);
     }
     }
 
 
+    return removeChild(child);
+  }
+
+  /** 
+   * Remove the specified child from this directory.
+   * The basic remove method which actually calls children.remove(..).
+   *
+   * @param child the child inode to be removed
+   * 
+   * @return true if the child is removed; false if the child is not found.
+   */
+  protected final boolean removeChild(final INode child) {
+    assertChildrenNonNull();
     final int i = searchChildren(child.getLocalNameBytes());
     final int i = searchChildren(child.getLocalNameBytes());
-    return i >= 0? children.remove(i): null;
+    if (i < 0) {
+      return false;
+    }
+
+    final INode removed = children.remove(i);
+    Preconditions.checkState(removed == child);
+    return true;
+  }
+
+  /**
+   * Remove the specified child and all its snapshot copies from this directory.
+   */
+  public boolean removeChildAndAllSnapshotCopies(INode child) {
+    return removeChild(child);
   }
   }
 
 
   /**
   /**
@@ -197,13 +220,18 @@ public class INodeDirectory extends INode {
     oldChild.clearReferences();
     oldChild.clearReferences();
   }
   }
 
 
+  private void replaceChildFile(final INodeFile oldChild, final INodeFile newChild) {
+    replaceChild(oldChild, newChild);
+    newChild.updateBlockCollection();
+  }
+
   /** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
   /** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
   INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot(
   INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot(
       final INodeFile child) {
       final INodeFile child) {
     Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
     Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
         "Child file is already an INodeFileWithSnapshot, child=" + child);
         "Child file is already an INodeFileWithSnapshot, child=" + child);
     final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child);
     final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child);
-    replaceChild(child, newChild);
+    replaceChildFile(child, newChild);
     return newChild;
     return newChild;
   }
   }
 
 
@@ -214,7 +242,7 @@ public class INodeDirectory extends INode {
         "Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
         "Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child);
     final INodeFileUnderConstructionWithSnapshot newChild
     final INodeFileUnderConstructionWithSnapshot newChild
         = new INodeFileUnderConstructionWithSnapshot(child, null);
         = new INodeFileUnderConstructionWithSnapshot(child, null);
-    replaceChild(child, newChild);
+    replaceChildFile(child, newChild);
     return newChild;
     return newChild;
   }
   }
 
 
@@ -840,7 +868,11 @@ public class INodeDirectory extends INode {
   public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
   public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
       final Snapshot snapshot) {
       final Snapshot snapshot) {
     super.dumpTreeRecursively(out, prefix, snapshot);
     super.dumpTreeRecursively(out, prefix, snapshot);
-    out.println(", childrenSize=" + getChildrenList(snapshot).size());
+    out.print(", childrenSize=" + getChildrenList(snapshot).size());
+    if (this instanceof INodeDirectoryWithQuota) {
+//      out.print(((INodeDirectoryWithQuota)this).quotaString());
+    }
+    out.println();
 
 
     if (prefix.length() >= 2) {
     if (prefix.length() >= 2) {
       prefix.setLength(prefix.length() - 2);
       prefix.setLength(prefix.length() - 2);

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java

@@ -173,4 +173,14 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
       throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
       throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
     }
     }
   }
   }
+
+  String namespaceString() {
+    return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
+  }
+  String diskspaceString() {
+    return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
+  }
+  String quotaString() {
+    return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]";
+  }
 }
 }

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -196,11 +196,23 @@ public class INodeFile extends INode implements BlockCollection {
     return this.blocks;
     return this.blocks;
   }
   }
 
 
+  void updateBlockCollection() {
+    if (blocks != null) {
+      for(BlockInfo b : blocks) {
+        b.setBlockCollection(this);
+      }
+    }
+  }
+
   /**
   /**
    * append array of blocks to this.blocks
    * append array of blocks to this.blocks
    */
    */
-  void appendBlocks(INodeFile [] inodes, int totalAddedBlocks) {
+  void concatBlocks(INodeFile[] inodes) {
     int size = this.blocks.length;
     int size = this.blocks.length;
+    int totalAddedBlocks = 0;
+    for(INodeFile f : inodes) {
+      totalAddedBlocks += f.blocks.length;
+    }
     
     
     BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
     BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
     System.arraycopy(this.blocks, 0, newlist, 0, size);
     System.arraycopy(this.blocks, 0, newlist, 0, size);
@@ -209,11 +221,9 @@ public class INodeFile extends INode implements BlockCollection {
       System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
       System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
       size += in.blocks.length;
       size += in.blocks.length;
     }
     }
-    
-    for(BlockInfo bi: newlist) {
-      bi.setBlockCollection(this);
-    }
+
     setBlocks(newlist);
     setBlocks(newlist);
+    updateBlockCollection();
   }
   }
   
   
   /**
   /**

+ 36 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
-import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
@@ -60,6 +60,16 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
       return getCreatedList().set(c, newChild);
       return getCreatedList().set(c, newChild);
     }
     }
 
 
+    private final boolean removeCreatedChild(final int c, final INode child) {
+      final List<INode> created = getCreatedList();
+      if (created.get(c) == child) {
+        final INode removed = created.remove(c);
+        Preconditions.checkState(removed == child);
+        return true;
+      }
+      return false;
+    }
+
     /** clear the created list */
     /** clear the created list */
     private int destroyCreatedList(
     private int destroyCreatedList(
         final INodeDirectoryWithSnapshot currentINode,
         final INodeDirectoryWithSnapshot currentINode,
@@ -499,28 +509,49 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
   }
   }
 
 
   @Override
   @Override
-  public INode removeChild(INode child, Snapshot latest) {
+  public boolean removeChild(INode child, Snapshot latest) {
     ChildrenDiff diff = null;
     ChildrenDiff diff = null;
     UndoInfo<INode> undoInfo = null;
     UndoInfo<INode> undoInfo = null;
     if (latest != null) {
     if (latest != null) {
       diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
       diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
       undoInfo = diff.delete(child);
       undoInfo = diff.delete(child);
     }
     }
-    final INode removed = super.removeChild(child, null);
+    final boolean removed = removeChild(child);
     if (undoInfo != null) {
     if (undoInfo != null) {
-      if (removed == null) {
+      if (!removed) {
         //remove failed, undo
         //remove failed, undo
         diff.undoDelete(child, undoInfo);
         diff.undoDelete(child, undoInfo);
       }
       }
     }
     }
     return removed;
     return removed;
   }
   }
+
+  @Override
+  public boolean removeChildAndAllSnapshotCopies(INode child) {
+    if (!removeChild(child)) {
+      return false;
+    }
+
+    // remove same child from the created list, if there is any.
+    final byte[] name = child.getLocalNameBytes();
+    final List<DirectoryDiff> diffList = diffs.asList();
+    for(int i = diffList.size() - 1; i >= 0; i--) {
+      final ChildrenDiff diff = diffList.get(i).diff;
+      final int c = diff.searchCreatedIndex(name);
+      if (c >= 0) {
+        if (diff.removeCreatedChild(c, child)) {
+          return true;
+        }
+      }
+    }
+    return true;
+  }
   
   
   @Override
   @Override
   public void replaceChild(final INode oldChild, final INode newChild) {
   public void replaceChild(final INode oldChild, final INode newChild) {
     super.replaceChild(oldChild, newChild);
     super.replaceChild(oldChild, newChild);
 
 
-    // replace the created child, if there is any.
+    // replace same child in the created list, if there is any.
     final byte[] name = oldChild.getLocalNameBytes();
     final byte[] name = oldChild.getLocalNameBytes();
     final List<DirectoryDiff> diffList = diffs.asList();
     final List<DirectoryDiff> diffList = diffs.asList();
     for(int i = diffList.size() - 1; i >= 0; i--) {
     for(int i = diffList.size() - 1; i >= 0; i--) {

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 
-import java.util.Iterator;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /** A class for testing quota-related commands */
 /** A class for testing quota-related commands */
@@ -171,15 +172,13 @@ public class TestQuota {
       fout = dfs.create(childFile1, replication);
       fout = dfs.create(childFile1, replication);
       
       
       // 10.s: but writing fileLen bytes should result in an quota exception
       // 10.s: but writing fileLen bytes should result in an quota exception
-      hasException = false;
       try {
       try {
         fout.write(new byte[fileLen]);
         fout.write(new byte[fileLen]);
         fout.close();
         fout.close();
+        Assert.fail();
       } catch (QuotaExceededException e) {
       } catch (QuotaExceededException e) {
-        hasException = true;
         IOUtils.closeStream(fout);
         IOUtils.closeStream(fout);
       }
       }
-      assertTrue(hasException);
       
       
       //delete the file
       //delete the file
       dfs.delete(childFile1, false);
       dfs.delete(childFile1, false);

+ 2 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

@@ -205,27 +205,14 @@ public class TestINodeFile {
   }
   }
   
   
   @Test
   @Test
-  public void testAppendBlocks() {
+  public void testConcatBlocks() {
     INodeFile origFile = createINodeFiles(1, "origfile")[0];
     INodeFile origFile = createINodeFiles(1, "origfile")[0];
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
 
 
     INodeFile[] appendFiles =   createINodeFiles(4, "appendfile");
     INodeFile[] appendFiles =   createINodeFiles(4, "appendfile");
-    origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
+    origFile.concatBlocks(appendFiles);
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
   }
   }
-
-  /** 
-   * Gives the count of blocks for a given number of files
-   * @param files Array of INode files
-   * @return total count of blocks
-   */
-  private int getTotalBlocks(INodeFile[] files) {
-    int nBlocks=0;
-    for(int i=0; i < files.length; i++) {
-       nBlocks += files[i].numBlocks();
-    }
-    return nBlocks;
-  }
   
   
   /** 
   /** 
    * Creates the required number of files with one block each
    * Creates the required number of files with one block each

+ 119 - 61
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java

@@ -17,13 +17,14 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
@@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -46,6 +46,8 @@ import org.junit.Test;
  * Test cases for snapshot-related information in blocksMap.
  * Test cases for snapshot-related information in blocksMap.
  */
  */
 public class TestSnapshotBlocksMap {
 public class TestSnapshotBlocksMap {
+  // TODO: fix concat for snapshot
+  private static final boolean runConcatTest = false;
   
   
   private static final long seed = 0;
   private static final long seed = 0;
   private static final short REPLICATION = 3;
   private static final short REPLICATION = 3;
@@ -57,8 +59,10 @@ public class TestSnapshotBlocksMap {
   protected Configuration conf;
   protected Configuration conf;
   protected MiniDFSCluster cluster;
   protected MiniDFSCluster cluster;
   protected FSNamesystem fsn;
   protected FSNamesystem fsn;
+  FSDirectory fsdir;
+  BlockManager blockmanager;
   protected DistributedFileSystem hdfs;
   protected DistributedFileSystem hdfs;
-  
+
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
@@ -68,6 +72,8 @@ public class TestSnapshotBlocksMap {
     cluster.waitActive();
     cluster.waitActive();
 
 
     fsn = cluster.getNamesystem();
     fsn = cluster.getNamesystem();
+    fsdir = fsn.getFSDirectory();
+    blockmanager = fsn.getBlockManager();
     hdfs = cluster.getFileSystem();
     hdfs = cluster.getFileSystem();
   }
   }
 
 
@@ -77,7 +83,39 @@ public class TestSnapshotBlocksMap {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
-  
+
+  void assertAllNull(INodeFile inode, Path path, String[] snapshots) throws Exception { 
+    Assert.assertNull(inode.getBlocks());
+    assertINodeNull(path.toString());
+    assertINodeNullInSnapshots(path, snapshots);
+  }
+
+  void assertINodeNull(String path) throws Exception {
+    Assert.assertNull(fsdir.getINode(path));
+  }
+
+  void assertINodeNullInSnapshots(Path path, String... snapshots) throws Exception {
+    for(String s : snapshots) {
+      assertINodeNull(SnapshotTestHelper.getSnapshotPath(
+          path.getParent(), s, path.getName()).toString());
+    }
+  }
+
+  INodeFile assertBlockCollection(String path, int numBlocks) throws Exception {
+    final INodeFile file = INodeFile.valueOf(fsdir.getINode(path), path);
+    assertEquals(numBlocks, file.getBlocks().length);
+    for(BlockInfo b : file.getBlocks()) {
+      assertBlockCollection(file, b);
+    }
+    return file;
+  }
+
+  void assertBlockCollection(final INodeFile file, final BlockInfo b) { 
+    Assert.assertSame(b, blockmanager.getStoredBlock(b));
+    Assert.assertSame(file, blockmanager.getBlockCollection(b));
+    Assert.assertSame(file, b.getBlockCollection());
+  }
+
   /**
   /**
    * Test deleting a file with snapshots. Need to check the blocksMap to make
    * Test deleting a file with snapshots. Need to check the blocksMap to make
    * sure the corresponding record is updated correctly.
    * sure the corresponding record is updated correctly.
@@ -87,87 +125,107 @@ public class TestSnapshotBlocksMap {
     Path file0 = new Path(sub1, "file0");
     Path file0 = new Path(sub1, "file0");
     Path file1 = new Path(sub1, "file1");
     Path file1 = new Path(sub1, "file1");
     
     
-    Path subsub1 = new Path(sub1, "sub1");
-    Path subfile0 = new Path(subsub1, "file0");
+    Path sub2 = new Path(sub1, "sub2");
+    Path file2 = new Path(sub2, "file2");
+
+    Path file3 = new Path(sub1, "file3");
+    Path file4 = new Path(sub1, "file4");
+    Path file5 = new Path(sub1, "file5");
     
     
     // Create file under sub1
     // Create file under sub1
-    DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
-    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
-    DFSTestUtil.createFile(hdfs, subfile0, BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, file0, 4*BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
     
     
-    BlockManager bm = fsn.getBlockManager();
-    FSDirectory dir = fsn.getFSDirectory();
-    
-    INodeFile inodeForDeletedFile = INodeFile.valueOf(
-        dir.getINode(subfile0.toString()), subfile0.toString());
-    BlockInfo[] blocksForDeletedFile = inodeForDeletedFile.getBlocks();
-    assertEquals(blocksForDeletedFile.length, 1);
-    BlockCollection bcForDeletedFile = bm
-        .getBlockCollection(blocksForDeletedFile[0]);
-    assertNotNull(bcForDeletedFile);
     // Normal deletion
     // Normal deletion
-    hdfs.delete(subsub1, true);
-    bcForDeletedFile = bm.getBlockCollection(blocksForDeletedFile[0]);
-    // The INode should have been removed from the blocksMap
-    assertNull(bcForDeletedFile);
+    {
+      final INodeFile f2 = assertBlockCollection(file2.toString(), 3);
+      BlockInfo[] blocks = f2.getBlocks();
+      hdfs.delete(sub2, true);
+      // The INode should have been removed from the blocksMap
+      for(BlockInfo b : blocks) {
+        assertNull(blockmanager.getBlockCollection(b));
+      }
+    }
     
     
     // Create snapshots for sub1
     // Create snapshots for sub1
-    for (int i = 0; i < 2; i++) {
-      SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
+    final String[] snapshots = {"s0", "s1", "s2"};
+    DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
+    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
+    DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
+    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
+    DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
+    SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);
+
+    // set replication so that the inode should be replaced for snapshots
+    {
+      INodeFile f1 = assertBlockCollection(file1.toString(), 2);
+      Assert.assertSame(INodeFile.class, f1.getClass());
+      hdfs.setReplication(file1, (short)2);
+      f1 = assertBlockCollection(file1.toString(), 2);
+      Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass());
     }
     }
     
     
     // Check the block information for file0
     // Check the block information for file0
-    // Get the INode for file0
-    INodeFile inode = INodeFile.valueOf(dir.getINode(file0.toString()),
-        file0.toString());
-    BlockInfo[] blocks = inode.getBlocks();
-    // Get the INode for the first block from blocksMap 
-    BlockCollection bc = bm.getBlockCollection(blocks[0]);
-    // The two INode should be the same one
-    assertTrue(bc == inode);
+    final INodeFile f0 = assertBlockCollection(file0.toString(), 4);
+    BlockInfo[] blocks0 = f0.getBlocks();
     
     
     // Also check the block information for snapshot of file0
     // Also check the block information for snapshot of file0
     Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
     Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
         file0.getName());
         file0.getName());
-    INodeFile ssINode0 = INodeFile.valueOf(dir.getINode(snapshotFile0.toString()),
-        snapshotFile0.toString());
-    BlockInfo[] ssBlocks = ssINode0.getBlocks();
-    // The snapshot of file1 should contain 1 block
-    assertEquals(1, ssBlocks.length);
+    assertBlockCollection(snapshotFile0.toString(), 4);
     
     
     // Delete file0
     // Delete file0
     hdfs.delete(file0, true);
     hdfs.delete(file0, true);
-    // Make sure the first block of file0 is still in blocksMap
-    BlockInfo blockInfoAfterDeletion = bm.getStoredBlock(blocks[0]);
-    assertNotNull(blockInfoAfterDeletion);
-    // Check the INode information
-    BlockCollection bcAfterDeletion = blockInfoAfterDeletion
-        .getBlockCollection();
+    // Make sure the blocks of file0 is still in blocksMap
+    for(BlockInfo b : blocks0) {
+      assertNotNull(blockmanager.getBlockCollection(b));
+    }
+    assertBlockCollection(snapshotFile0.toString(), 4);
     
     
     // Compare the INode in the blocksMap with INodes for snapshots
     // Compare the INode in the blocksMap with INodes for snapshots
-    Path snapshot1File0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",
-        file0.getName());
-    INodeFile ssINode1 = INodeFile.valueOf(
-        dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
-    assertTrue(bcAfterDeletion == ssINode0 || bcAfterDeletion == ssINode1);
-    assertEquals(1, bcAfterDeletion.getBlocks().length);
+    String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",
+        file0.getName()).toString();
+    assertBlockCollection(s1f0, 4);
     
     
     // Delete snapshot s1
     // Delete snapshot s1
     hdfs.deleteSnapshot(sub1, "s1");
     hdfs.deleteSnapshot(sub1, "s1");
+
     // Make sure the first block of file0 is still in blocksMap
     // Make sure the first block of file0 is still in blocksMap
-    BlockInfo blockInfoAfterSnapshotDeletion = bm.getStoredBlock(blocks[0]);
-    assertNotNull(blockInfoAfterSnapshotDeletion);
-    BlockCollection bcAfterSnapshotDeletion = blockInfoAfterSnapshotDeletion
-        .getBlockCollection();
-    assertTrue(bcAfterSnapshotDeletion == ssINode0);
-    assertEquals(1, bcAfterSnapshotDeletion.getBlocks().length);
+    for(BlockInfo b : blocks0) {
+      assertNotNull(blockmanager.getBlockCollection(b));
+    }
+    assertBlockCollection(snapshotFile0.toString(), 4);
+
     try {
     try {
-      ssINode1 = INodeFile.valueOf(
-        dir.getINode(snapshot1File0.toString()), snapshot1File0.toString());
+      INodeFile.valueOf(fsdir.getINode(s1f0), s1f0);
       fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
       fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
     } catch (IOException e) {
     } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("File does not exist: "
-          + snapshot1File0.toString(), e);
+      assertExceptionContains("File does not exist: " + s1f0, e);
+    }
+    
+    // concat file1, file3 and file5 to file4
+    if (runConcatTest) {
+      final INodeFile f1 = assertBlockCollection(file1.toString(), 2);
+      final BlockInfo[] f1blocks = f1.getBlocks();
+      final INodeFile f3 = assertBlockCollection(file3.toString(), 5);
+      final BlockInfo[] f3blocks = f3.getBlocks();
+      final INodeFile f5 = assertBlockCollection(file5.toString(), 7);
+      final BlockInfo[] f5blocks = f5.getBlocks();
+      assertBlockCollection(file4.toString(), 1);
+
+      hdfs.concat(file4, new Path[]{file1, file3, file5});
+
+      final INodeFile f4 = assertBlockCollection(file4.toString(), 15);
+      final BlockInfo[] blocks4 = f4.getBlocks();
+      for(BlockInfo[] blocks : Arrays.asList(f1blocks, f3blocks, blocks4, f5blocks)) {
+        for(BlockInfo b : blocks) {
+          assertBlockCollection(f4, b);
+        }
+      }
+      assertAllNull(f1, file1, snapshots);
+      assertAllNull(f3, file3, snapshots);
+      assertAllNull(f5, file5, snapshots);
     }
     }
   }
   }