浏览代码

HADOOP-6229. Attempt to make a directory under an existing file on LocalFileSystem should throw an Exception. Contributed by Boris Shkolnik.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@812127 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 16 年之前
父节点
当前提交
9b0b37e410

+ 3 - 0
CHANGES.txt

@@ -972,6 +972,9 @@ Trunk (unreleased changes)
     HADOOP-6199. Move io.map.skip.index property to core-default from mapred.
     (Amareshwari Sriramadasu via cdouglas)
 
+    HADOOP-6229. Attempt to make a directory under an existing file on
+    LocalFileSystem should throw an Exception. (Boris Shkolnik via tomwhite)
+
 Release 0.20.1 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 38 - 0
src/java/org/apache/hadoop/fs/FileAlreadyExistsException.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+
+import java.io.IOException;
+
+/**
+ * Used when target file already exists for any operation and 
+ * is not configured to be overwritten.  
+ */
+public class FileAlreadyExistsException
+    extends IOException {
+
+  public FileAlreadyExistsException() {
+    super();
+  }
+
+  public FileAlreadyExistsException(String msg) {
+    super(msg);
+  }
+}

+ 12 - 1
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -308,8 +308,18 @@ public class RawLocalFileSystem extends FileSystem {
    * treat existence as an error.
    */
   public boolean mkdirs(Path f) throws IOException {
+    if(f == null) {
+      throw new IllegalArgumentException("mkdirs path arg is null");
+    }
     Path parent = f.getParent();
     File p2f = pathToFile(f);
+    if(parent != null) {
+      File parent2f = pathToFile(parent);
+      if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
+        throw new FileAlreadyExistsException("Parent path is not a directory: " 
+            + parent);
+      }
+    }
     return (parent == null || mkdirs(parent)) &&
       (p2f.mkdir() || p2f.isDirectory());
   }
@@ -318,7 +328,8 @@ public class RawLocalFileSystem extends FileSystem {
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     boolean b = mkdirs(f);
-    setPermission(f, permission);
+    if(b)
+      setPermission(f, permission);
     return b;
   }
   

+ 22 - 0
src/test/core/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -154,4 +154,26 @@ public class TestLocalFileSystem extends TestCase {
     assertEquals(path.makeQualified(fs), status.getPath());
     cleanupFile(fs, path);
   }
+  
+  public void testMkdirs() throws IOException {
+    Configuration conf = new Configuration();
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    Path test_dir = new Path(TEST_ROOT_DIR, "test_dir");
+    Path test_file = new Path(TEST_ROOT_DIR, "file1");
+    assertTrue(fs.mkdirs(test_dir));
+   
+    writeFile(fs, test_file);
+    // creating dir over a file
+    Path bad_dir = new Path(test_file, "another_dir");
+    
+    try {
+      fs.mkdirs(bad_dir);
+      fail("Failed to detect existing file in path");
+    } catch (FileAlreadyExistsException e) { }
+    
+    try {
+      fs.mkdirs(null);
+      fail("Failed to detect null in mkdir arg");
+    } catch (IllegalArgumentException e) { }
+  }
 }