git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1478135 13f79535-47bb-0310-9956-ffa450edef68
@@ -329,3 +329,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4758. Disallow nested snapshottable directories and unwrap
RemoteException. (szetszwo)
+
+ HDFS-4781. Fix a NullPointerException when listing .snapshot under
+ a non-existing directory. (szetszwo)
@@ -1588,7 +1588,8 @@ public class FSDirectory implements Closeable {
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
final INode node = this.getINode(dirPath);
- if (node.isDirectory()
+ if (node != null
+ && node.isDirectory()
&& node.asDirectory() instanceof INodeDirectorySnapshottable) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L);
@@ -313,12 +313,12 @@ public abstract class INode implements Diff.Element<byte[]> {
* children.
*
* 1.3 The current inode is a {@link FileWithSnapshot}.
- * Call {@link INode#recordModification(Snapshot)} to capture the
- * current states. Mark the INode as deleted.
+ * Call recordModification(..) to capture the current states.
+ * Mark the INode as deleted.
* 1.4 The current inode is a {@link INodeDirectoryWithSnapshot}.
- * current states. Destroy files/directories created after the latest snapshot
+ * Destroy files/directories created after the latest snapshot
* (i.e., the inodes stored in the created list of the latest snapshot).
* Recursively clean remaining children.
@@ -111,7 +111,7 @@ public class INodeDirectory extends INodeWithAdditionalFields {
* Remove the specified child from this directory.
* @param child the child inode to be removed
- * @param latest See {@link INode#recordModification(Snapshot)}.
+ * @param latest See {@link INode#recordModification(Snapshot, INodeMap)}.
*/
public boolean removeChild(INode child, Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
@@ -22,6 +22,8 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import java.io.FileNotFoundException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -238,6 +240,18 @@ public class TestSnapshotPathINodes {
final INode last = nodesInPath.getLastINode();
assertEquals(last.getFullPathName(), sub1.toString());
assertFalse(last instanceof INodeFileWithSnapshot);
+ String[] invalidPathComponent = {"invalidDir", "foo", ".snapshot", "bar"};
+ Path invalidPath = new Path(invalidPathComponent[0]);
+ for(int i = 1; i < invalidPathComponent.length; i++) {
+ invalidPath = new Path(invalidPath, invalidPathComponent[i]);
+ try {
+ hdfs.getFileStatus(invalidPath);
+ Assert.fail();
+ } catch(FileNotFoundException fnfe) {
+ System.out.println("The exception is expected: " + fnfe);
+ }
}
/**