Browse Source

HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException but not IllegalStateException. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1480015 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 years ago
parent
commit
41312abe84

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt

@@ -344,3 +344,6 @@ Branch-2802 Snapshot (Unreleased)
 
 
   HDFS-4801. lsSnapshottableDir throws IllegalArgumentException when root is
   HDFS-4801. lsSnapshottableDir throws IllegalArgumentException when root is
   snapshottable.  (Jing Zhao via szetszwo)
   snapshottable.  (Jing Zhao via szetszwo)
+
+  HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException
+  but not IllegalStateException.  (Jing Zhao via szetszwo)

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java

@@ -84,13 +84,13 @@ public class SnapshotManager implements SnapshotStats {
       if (s.isAncestorDirectory(dir)) {
       if (s.isAncestorDirectory(dir)) {
         throw new SnapshotException(
         throw new SnapshotException(
             "Nested snapshottable directories not allowed: path=" + path
             "Nested snapshottable directories not allowed: path=" + path
-            + ", the ancestor " + s.getFullPathName()
+            + ", the subdirectory " + s.getFullPathName()
             + " is already a snapshottable directory.");
             + " is already a snapshottable directory.");
       }
       }
       if (dir.isAncestorDirectory(s)) {
       if (dir.isAncestorDirectory(s)) {
         throw new SnapshotException(
         throw new SnapshotException(
             "Nested snapshottable directories not allowed: path=" + path
             "Nested snapshottable directories not allowed: path=" + path
-            + ", the subdirectory " + s.getFullPathName()
+            + ", the ancestor " + s.getFullPathName()
             + " is already a snapshottable directory.");
             + " is already a snapshottable directory.");
       }
       }
     }
     }
@@ -156,6 +156,9 @@ public class SnapshotManager implements SnapshotStats {
     }
     }
 
 
     if (s == fsdir.getRoot()) {
     if (s == fsdir.getRoot()) {
+      if (s.getSnapshotQuota() == 0) {
+        throw new SnapshotException("Root is not a snapshottable directory");
+      }
       s.setSnapshotQuota(0); 
       s.setSnapshotQuota(0); 
     } else {
     } else {
       s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());
       s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java

@@ -357,7 +357,7 @@ public class Diff<K, E extends Diff.Element<K>> {
     // (A1) All lists are sorted.
     // (A1) All lists are sorted.
     // (A2) All elements in dlist must be in previous.
     // (A2) All elements in dlist must be in previous.
     // (A3) All elements in clist must be not in tmp = previous - dlist.
     // (A3) All elements in clist must be not in tmp = previous - dlist.
-    final List<E> tmp = new ArrayList<E>();
+    final List<E> tmp = new ArrayList<E>(previous.size() - dlist.size());
     {
     {
       // tmp = previous - dlist
       // tmp = previous - dlist
       final Iterator<E> i = previous.iterator();
       final Iterator<E> i = previous.iterator();
@@ -374,7 +374,7 @@ public class Diff<K, E extends Diff.Element<K>> {
       }
       }
     }
     }
 
 
-    final List<E> current = new ArrayList<E>();
+    final List<E> current = new ArrayList<E>(tmp.size() + clist.size());
     {
     {
       // current = tmp + clist
       // current = tmp + clist
       final Iterator<E> tmpIterator = tmp.iterator();
       final Iterator<E> tmpIterator = tmp.iterator();

+ 13 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -123,6 +125,13 @@ public class TestNestedSnapshots {
     print("delete snapshot " + rootSnapshot);
     print("delete snapshot " + rootSnapshot);
     hdfs.disallowSnapshot(rootPath);
     hdfs.disallowSnapshot(rootPath);
     print("disallow snapshot " + rootStr);
     print("disallow snapshot " + rootStr);
+    try {
+      hdfs.disallowSnapshot(rootPath);
+      fail("Expect snapshot exception when disallowing snapshot on root again");
+    } catch (SnapshotException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Root is not a snapshottable directory", e);
+    }
     
     
     //change foo to non-snapshottable
     //change foo to non-snapshottable
     hdfs.deleteSnapshot(foo, s1name);
     hdfs.deleteSnapshot(foo, s1name);
@@ -134,13 +143,13 @@ public class TestNestedSnapshots {
       hdfs.allowSnapshot(rootPath);
       hdfs.allowSnapshot(rootPath);
       Assert.fail();
       Assert.fail();
     } catch(SnapshotException se) {
     } catch(SnapshotException se) {
-      assertNestedSnapshotException(se, "ancestor");
+      assertNestedSnapshotException(se, "subdirectory");
     }
     }
     try {
     try {
       hdfs.allowSnapshot(foo);
       hdfs.allowSnapshot(foo);
       Assert.fail();
       Assert.fail();
     } catch(SnapshotException se) {
     } catch(SnapshotException se) {
-      assertNestedSnapshotException(se, "ancestor");
+      assertNestedSnapshotException(se, "subdirectory");
     }
     }
 
 
     final Path sub1Bar = new Path(bar, "sub1");
     final Path sub1Bar = new Path(bar, "sub1");
@@ -150,13 +159,13 @@ public class TestNestedSnapshots {
       hdfs.allowSnapshot(sub1Bar);
       hdfs.allowSnapshot(sub1Bar);
       Assert.fail();
       Assert.fail();
     } catch(SnapshotException se) {
     } catch(SnapshotException se) {
-      assertNestedSnapshotException(se, "subdirectory");
+      assertNestedSnapshotException(se, "ancestor");
     }
     }
     try {
     try {
       hdfs.allowSnapshot(sub2Bar);
       hdfs.allowSnapshot(sub2Bar);
       Assert.fail();
       Assert.fail();
     } catch(SnapshotException se) {
     } catch(SnapshotException se) {
-      assertNestedSnapshotException(se, "subdirectory");
+      assertNestedSnapshotException(se, "ancestor");
     }
     }
   }
   }