Explorar el Código

HDFS-4097. Provide CLI support for createSnapshot. Contributed by Brandon Li.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1401971 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas hace 12 años
padre
commit
2d5334931e

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2219,6 +2219,17 @@ public abstract class FileSystem extends Configured implements Closeable {
       ) throws IOException {
   }
 
+  /**
+   * Create a snapshot
+   * @param snapshotName The name of the snapshot
+   * @param snapshotRoot The directory where the snapshot will be taken
+   */
+  public void createSnapshot(String snapshotName, String snapshotRoot)
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support createSnapshot");
+  }
+  
   // making it volatile to be able to do a double checked locking
   private volatile static boolean FILE_SYSTEMS_LOADED = false;
 

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java

@@ -57,6 +57,7 @@ abstract public class FsCommand extends Command {
     factory.registerCommands(Tail.class);
     factory.registerCommands(Test.class);
     factory.registerCommands(Touch.class);
+    factory.registerCommands(SnapshotCommands.class);
   }
 
   protected FsCommand() {}

+ 82 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java

@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
+
+/**
+ * Snapshot related operations
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+class SnapshotCommands extends FsCommand {
+  private final static String CREATE_SNAPSHOT = "createSnapshot";
+  
+  public static void registerCommands(CommandFactory factory) {
+    factory.addClass(CreateSnapshot.class, "-" + CREATE_SNAPSHOT);
+  }
+  
+  /**
+   *  Create a snapshot
+   */
+  public static class CreateSnapshot extends FsCommand {
+    public static final String NAME = CREATE_SNAPSHOT;
+    public static final String USAGE = "<snapshotName> <snapshotRoot>";
+    public static final String DESCRIPTION = "Create a snapshot on a directory";
+
+    private static String snapshotName;
+
+    @Override
+    protected void processPath(PathData item) throws IOException {
+      if (!item.stat.isDirectory()) {
+        throw new PathIsNotDirectoryException(item.toString());
+      }
+    }
+    
+    @Override
+    protected void processOptions(LinkedList<String> args) throws IOException {
+      if (args.size() != 2) {
+        throw new IOException("args number not 2:" + args.size());
+      }
+      snapshotName = args.removeFirst();
+      // TODO: name length check  
+
+    }
+
+    @Override
+    protected void processArguments(LinkedList<PathData> items)
+    throws IOException {
+      super.processArguments(items);
+      if (exitCode != 0) { // check for error collecting paths
+        return;
+      }
+      assert(items.size() == 1);
+      PathData sroot = items.getFirst();
+      String snapshotRoot = sroot.path.toString();
+      sroot.fs.createSnapshot(snapshotName, snapshotRoot);
+    }    
+  }
+}
+

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt

@@ -26,3 +26,5 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4091. Add snapshot quota to limit the number of snapshots allowed.
   (szetszwo)
+
+  HDFS-4097. Provide CLI support for createSnapshot. (Brandon Li via suresh)

+ 11 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -79,7 +79,6 @@ import javax.net.SocketFactory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
@@ -1879,6 +1878,17 @@ public class DFSClient implements java.io.Closeable {
   public boolean setSafeMode(SafeModeAction action) throws IOException {
     return namenode.setSafeMode(action);
   }
+ 
+  /**
+   * Create one snapshot.
+   * 
+   * @see ClientProtocol#createSnapshot(String snapshotName, String
+   *      snapshotRoot)
+   */
+  public void createSnapshot(String snapshotName, String snapshotRoot)
+      throws IOException {
+    namenode.createSnapshot(snapshotName, snapshotRoot);
+  }
 
   /**
    * Allow snapshot on a directory.

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -890,4 +890,10 @@ public class DistributedFileSystem extends FileSystem {
       throws IOException {
     dfs.disallowSnapshot(snapshotRoot);
   }
+  
+  @Override
+  public void createSnapshot(String snapshotName, String snapshotRoot)
+      throws IOException {
+    dfs.createSnapshot(snapshotName, snapshotRoot);
+  }
 }

+ 0 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -958,21 +958,6 @@ public interface ClientProtocol {
   public void createSnapshot(String snapshotName, String snapshotRoot)
       throws IOException;
   
-  /**
-   * Delete a snapshot
-   * @param snapshotName name of the snapshot to be deleted
-   * @param snapshotRoot the path where the snapshot exists
-   */
-  public void deleteSnapshot(String snapshotName, String snapshotRoot)
-      throws IOException;
-
-  /**
-   * List snapshots of one directory
-   * @param snapshotRoot the path where the snapshot exists
-   */
-  public SnapshotInfo[] listSnapshots(String snapshotRoot)
-      throws IOException;
-  
     /**
      * Allow snapshot on a directory.
      * @param snapshotRoot the directory to be snapped

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
 
 /**
- * Interface that represents the over the wire information for a file.
+ * SnapshotInfo maintains information for a snapshot
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

+ 0 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@@ -55,8 +54,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
@@ -92,8 +89,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSer
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@@ -141,7 +136,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProt
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.io.Text;
 
@@ -160,8 +154,6 @@ import com.google.protobuf.ServiceException;
 public class ClientNamenodeProtocolServerSideTranslatorPB implements
     ClientNamenodeProtocolPB {
   final private ClientProtocol server;
-  static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
-      DeleteSnapshotResponseProto.newBuilder().build();
   static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
       CreateSnapshotResponseProto.newBuilder().build();
   static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = 
@@ -876,43 +868,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     return VOID_CREATE_SNAPSHOT_RESPONSE;
   }
 
-  @Override
-  public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
-      DeleteSnapshotRequestProto request) throws ServiceException {
-    try {
-      server.deleteSnapshot(request.getSnapshotName(),
-          request.getSnapshotRoot());
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-    return VOID_DELETE_SNAPSHOT_RESPONSE;
-  }
-
-  @Override
-  public ListSnapshotsResponseProto listSnapshots(RpcController controller,
-      ListSnapshotsRequestProto request) throws ServiceException {
-    SnapshotInfo[] result;
-
-    try {
-      result = server.listSnapshots(request.getSnapshotRoot());
-      ListSnapshotsResponseProto.Builder builder = ListSnapshotsResponseProto
-          .newBuilder();
-      for (SnapshotInfo si : result) {
-        SnapshotInfoProto.Builder infobuilder = SnapshotInfoProto.newBuilder();
-        infobuilder.setSnapshotName(si.getSnapshotName());
-        infobuilder.setSnapshotRoot(si.getSnapshotRoot());
-        infobuilder.setCreateTime(si.getCreateTime());
-        infobuilder.setPermission(si.getPermission());
-        infobuilder.setOwner(si.getOwner());
-        infobuilder.setGroup(si.getGroup());
-        builder.addSnapshots(infobuilder);
-      }
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
   @Override
   public AllowSnapshotResponseProto allowSnapshot(RpcController controller,
       AllowSnapshotRequestProto req) throws ServiceException {

+ 0 - 39
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -60,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
@@ -82,8 +80,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLis
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
@@ -106,7 +102,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -848,40 +843,6 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
 
-  @Override
-  public void deleteSnapshot(String snapshotName, String snapshotRoot)
-      throws IOException {
-    DeleteSnapshotRequestProto req = DeleteSnapshotRequestProto.newBuilder()
-        .setSnapshotName(snapshotName).setSnapshotRoot(snapshotRoot).build();
-    try {
-      rpcProxy.deleteSnapshot(null, req);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
-    SnapshotInfo[] sinfo = null;
-    ListSnapshotsRequestProto req = null;
-
-    req = ListSnapshotsRequestProto.newBuilder().setSnapshotRoot(snapshotRoot)
-        .build();
-    try {
-      ListSnapshotsResponseProto resp = rpcProxy.listSnapshots(null, req);
-      sinfo = new SnapshotInfo[resp.getSnapshotsCount()];
-      for (int i = 0; i < resp.getSnapshotsCount(); i++) {
-        SnapshotInfoProto siProto = resp.getSnapshots(i);
-        sinfo[i] = new SnapshotInfo(siProto.getSnapshotName(), resp
-            .getSnapshots(i).getSnapshotRoot(), siProto.getCreateTime(),
-            siProto.getPermission(), siProto.getOwner(), siProto.getGroup());
-      }
-      return sinfo;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
   @Override
   public void allowSnapshot(String snapshotRoot) throws IOException {
     AllowSnapshotRequestProto req = AllowSnapshotRequestProto.newBuilder()

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -5556,4 +5556,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       throws SafeModeException, IOException {
     // TODO: implement
   }
+  
+  /**
+   * Create a snapshot
+   * @param snapshotName The name of the snapshot
+   * @param snapshotRoot The directory where the snapshot will be taken
+   */
+  public void createSnapshot(String snapshotName, String snapshotRoot)
+      throws SafeModeException, IOException {
+    // TODO: implement
+  }
 }

+ 5 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
@@ -1077,21 +1076,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override
   public void createSnapshot(String snapshotName, String snapshotRoot)
       throws IOException {
-    // TODO Auto-generated method stub
-  }
-
-  @Override
-  public void deleteSnapshot(String snapshotName, String snapshotRoot)
-      throws IOException {
-    // TODO Auto-generated method stub
-  }
-
-  @Override
-  public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
-    // TODO Auto-generated method stub  
-    SnapshotInfo[] si = new SnapshotInfo[1];
-    si[0] = new SnapshotInfo(null, null, null, null, null, null);
-    return si;
+    if (!checkPathLength(snapshotRoot)) {
+      throw new IOException("createSnapshot: Pathname too long.  Limit "
+          + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+    }
+    namesystem.createSnapshot(snapshotName, snapshotRoot);
   }
 
   @Override

+ 0 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -450,22 +450,6 @@ message CreateSnapshotRequestProto {
 message CreateSnapshotResponseProto { // void response
 }
 
-message DeleteSnapshotRequestProto {
-  required string snapshotName = 1;
-  required string snapshotRoot = 2;
-}
-
-message DeleteSnapshotResponseProto { // void response
-}
-
-message ListSnapshotsRequestProto {
-  required string snapshotRoot = 1;
-}
-
-message ListSnapshotsResponseProto {
-  repeated SnapshotInfoProto snapshots = 1;
-}
-
 message AllowSnapshotRequestProto {
   required string snapshotRoot = 1;
 }
@@ -555,10 +539,6 @@ service ClientNamenodeProtocol {
       returns(GetDataEncryptionKeyResponseProto);
   rpc createSnapshot(CreateSnapshotRequestProto)
       returns(CreateSnapshotResponseProto);
-  rpc deleteSnapshot(DeleteSnapshotRequestProto)
-      returns(DeleteSnapshotResponseProto);
-  rpc listSnapshots(ListSnapshotsRequestProto)
-      returns(ListSnapshotsResponseProto);
   rpc allowSnapshot(AllowSnapshotRequestProto)
       returns(AllowSnapshotResponseProto);
   rpc disallowSnapshot(DisallowSnapshotRequestProto)