Browse Source

HDFS-4076. Support snapshot of single files. (szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400245 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 years ago
parent
commit
5c1a7b9d5d

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -1,5 +1,11 @@
 Hadoop HDFS Change Log
 
+Branch-2802 Snapshot (Unreleased)
+
+  NEW FEATURES
+
+    HDFS-4076. Support snapshot of single files.  (szetszwo)
+
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES

+ 37 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -57,6 +57,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
 import org.apache.hadoop.hdfs.util.ByteArray;
 
 import com.google.common.base.Preconditions;
@@ -300,6 +302,40 @@ public class FSDirectory implements Closeable {
     return newNode;
   }
 
+  /** Add an INodeFileSnapshot to the source file. */
+  INodeFileSnapshot addFileSnapshot(String srcPath, String dstPath
+      ) throws IOException, QuotaExceededException {
+    waitForReady();
+
+    final INodeFile src = rootDir.getINodeFile(srcPath);
+    INodeFileSnapshot snapshot = new INodeFileSnapshot(src, src.computeFileSize(true)); 
+
+    writeLock();
+    try {
+      //add destination snaplink
+      snapshot = addNode(dstPath, snapshot, UNKNOWN_DISK_SPACE);
+
+      if (snapshot != null && src.getClass() == INodeFile.class) {
+        //created a snapshot and the source is an INodeFile, replace the source.
+        replaceNode(srcPath, src, new INodeFileWithLink(src));
+      }
+    } finally {
+      writeUnlock();
+
+      if (snapshot == null) {
+        NameNode.stateChangeLog.info(
+            "DIR* FSDirectory.addFileSnapshot: failed to add " + dstPath);
+        return null;
+      }
+    }
+
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFileSnapshot: "
+          + dstPath + " is added to the file system");
+    }
+    return snapshot;
+  }
+
   INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
       INode newNode, boolean propagateModTime) {
     // NOTE: This does not update space counts for parents

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -130,6 +130,21 @@ class INodeDirectory extends INode {
     return getNode(getPathComponents(path), resolveLink);
   }
 
+  /** @return the INodeFile corresponding to the path. */
+  INodeFile getINodeFile(String path) throws FileNotFoundException,
+      UnresolvedLinkException {
+    final INode inode = getNode(path, false);
+    if (inode == null) {
+      throw new FileNotFoundException("File \"" + path
+          + "\" not found");
+    }
+    if (!(inode instanceof INodeFile)) {
+      throw new FileNotFoundException("Path \"" + path
+          + "\" is not a file");
+    }
+    return (INodeFile)inode;
+  }
+
   /**
    * Retrieve existing INodes from a path. If existing is big enough to store
    * all path components (existing and non-existing), then existing INodes

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -54,6 +54,11 @@ public class INodeFile extends INode implements BlockCollection {
     blocks = blklist;
   }
 
+  protected INodeFile(INodeFile f) {
+    this(f.getPermissionStatus(), f.getBlocks(), f.getBlockReplication(),
+        f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize());
+  }
+
   /**
    * Set the {@link FsPermission} of this {@link INodeFile}.
    * Since this is a file,

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+
+/**
+ *  INode representing a snapshot of a file.
+ */
+@InterfaceAudience.Private
+public class INodeFileSnapshot extends INodeFileWithLink {
+  /** The file size at snapshot creation time. */
+  final long size;
+
+  public INodeFileSnapshot(INodeFile f, long size) {
+    super(f);
+    this.size = size;
+  }
+}

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+
+/**
+ * INodeFile with a link to the next element.
+ * This class is used to represent the original file that is snapshotted.
+ * The snapshot files are represented by {@link INodeFileSnapshot}.
+ * The link of all the snapshot files and the original file form a circular
+ * linked list so that all elements are accessible by any of the elements.
+ */
+@InterfaceAudience.Private
+public class INodeFileWithLink extends INodeFile {
+  private INodeFileWithLink next;
+
+  public INodeFileWithLink(INodeFile f) {
+    super(f);
+  }
+
+  void setNext(INodeFileWithLink next) {
+    this.next = next;
+  }
+
+  INodeFileWithLink getNext() {
+    return next;
+  }
+}