瀏覽代碼

HADOOP-857. Fix S3 FileSystem implementation to permit its use for MapReduce input and output. Contributed by Tom White.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@494137 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 年之前
父節點
當前提交
b3ac3e52f2

+ 6 - 0
CHANGES.txt

@@ -1,6 +1,12 @@
 Hadoop Change Log
 
 
+Trunk (unreleased changes)
+
+ 1. HADOOP-857.  Fix S3 FileSystem implementation to permit its use
+    for MapReduce input and output.  (Tom White via cutting)
+
+
 Release 0.10.0 - 2007-01-05
 
  1. HADOOP-763. Change DFS namenode benchmark to not use MapReduce.

+ 1 - 1
src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java

@@ -216,7 +216,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
     if (!path.isAbsolute()) {
       throw new IllegalArgumentException("Path must be absolute: " + path);
     }
-    return urlEncode(path.toString());
+    return urlEncode(path.toUri().getPath());
   }
 
   private Path keyToPath(String key) {

+ 14 - 10
src/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -83,12 +83,12 @@ public class S3FileSystem extends FileSystem {
     Path absolutePath = makeAbsolute(path);
     INode inode = store.getINode(absolutePath);
     if (inode == null) {
-      store.storeINode(path, INode.DIRECTORY_INODE);
+      store.storeINode(absolutePath, INode.DIRECTORY_INODE);
     } else if (inode.isFile()) {
       throw new IOException(String.format(
-          "Can't make directory for path %s since it is a file.", path));
+          "Can't make directory for path %s since it is a file.", absolutePath));
     }
-    Path parent = path.getParent();
+    Path parent = absolutePath.getParent();
     return (parent == null || mkdirs(parent));
   }
 
@@ -123,13 +123,14 @@ public class S3FileSystem extends FileSystem {
 
   @Override
   public Path[] listPathsRaw(Path path) throws IOException {
-    INode inode = store.getINode(makeAbsolute(path));
+    Path absolutePath = makeAbsolute(path);
+    INode inode = store.getINode(absolutePath);
     if (inode == null) {
       return null;
     } else if (inode.isFile()) {
-      return new Path[] { path };
+      return new Path[] { absolutePath };
     } else { // directory
-      Set<Path> paths = store.listSubPaths(path);
+      Set<Path> paths = store.listSubPaths(absolutePath);
       return paths.toArray(new Path[0]);
     }
   }
@@ -146,10 +147,6 @@ public class S3FileSystem extends FileSystem {
       short replication, long blockSize, Progressable progress)
       throws IOException {
 
-    if (!isDirectory(file.getParent())) {
-      throw new IOException("Cannot create file " + file
-          + " since parent directory does not exist.");
-    }
     INode inode = store.getINode(makeAbsolute(file));
     if (inode != null) {
       if (overwrite) {
@@ -157,6 +154,13 @@ public class S3FileSystem extends FileSystem {
       } else {
         throw new IOException("File already exists: " + file);
       }
+    } else {
+      Path parent = file.getParent();
+      if (parent != null) {
+        if (!mkdirs(parent)) {
+          throw new IOException("Mkdirs failed to create " + parent.toString());
+        }
+      }      
     }
     return new S3OutputStream(getConf(), store, makeAbsolute(file),
         blockSize, progress);

+ 9 - 7
src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java

@@ -189,16 +189,18 @@ public abstract class S3FileSystemBaseTest extends TestCase {
     
   }
 
-  public void testWriteInNonExistentDirectory() {
+  public void testWriteInNonExistentDirectory() throws IOException {
     Path path = new Path("/test/hadoop/file");    
-    try {
-      s3FileSystem.createRaw(path, false, (short) 1, 128);
-      fail("Should throw IOException.");
-    } catch (IOException e) {
-      // Expected
-    }
+    FSOutputStream out = s3FileSystem.createRaw(path, false, (short) 1, BLOCK_SIZE);
+    out.write(data, 0, BLOCK_SIZE);
+    out.close();
+    
+    assertTrue("Exists", s3FileSystem.exists(path));
+    assertEquals("Length", BLOCK_SIZE, s3FileSystem.getLength(path));
+    assertTrue("Parent exists", s3FileSystem.exists(path.getParent()));
   }
 
+
   public void testRename() throws Exception {
     int len = BLOCK_SIZE;