Просмотр исходного кода

Fix for HADOOP-125. Absolute paths are tricky on Windows. For Hadoop's purposes, consider things that start with a slash to be absolute. Also, Hadoop should not change the JVM's CWD. All files are now correctly cleaned up for a Nutch crawl, in either local or psuedo-distributed mode.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@392451 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 лет назад
Родитель
Сommit
b54cc2875d

+ 6 - 4
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -29,7 +29,8 @@ import org.apache.hadoop.conf.Configuration;
  * @author Mike Cafarella
  *****************************************************************/
 public class LocalFileSystem extends FileSystem {
-    private File workingDir = new File(System.getProperty("user.dir"));
+    private File workingDir
+      = new File(System.getProperty("user.dir")).getAbsoluteFile();
     TreeMap sharedLockDataSet = new TreeMap();
     TreeMap nonsharedLockDataSet = new TreeMap();
     TreeMap lockObjSet = new TreeMap();
@@ -156,7 +157,7 @@ public class LocalFileSystem extends FileSystem {
       if (isAbsolute(f)) {
         return f;
       } else {
-        return new File(workingDir, f.toString());
+        return new File(workingDir, f.toString()).getAbsoluteFile();
       }
     }
     
@@ -200,7 +201,9 @@ public class LocalFileSystem extends FileSystem {
     }
 
     public boolean isAbsolute(File f) {
-      return f.isAbsolute();
+      return f.isAbsolute() ||
+        f.getPath().startsWith("/") ||
+        f.getPath().startsWith("\\");
     }
 
     public long getLength(File f) throws IOException {
@@ -226,7 +229,6 @@ public class LocalFileSystem extends FileSystem {
      */
     public void setWorkingDirectory(File new_dir) {
       workingDir = makeAbsolute(new_dir);
-      System.setProperty("user.dir", workingDir.toString());
     }
     
     public File getWorkingDirectory() {

+ 0 - 1
src/java/org/apache/hadoop/mapred/LocalJobRunner.java

@@ -92,7 +92,6 @@ class LocalJobRunner implements JobSubmissionProtocol {
         job.setNumReduceTasks(1);                 // force a single reduce task
         for (int i = 0; i < splits.length; i++) {
           mapIds.add("map_" + newId());
-          setWorkingDirectory(job, fs);
           MapTask map = new MapTask(file, (String)mapIds.get(i), splits[i]);
           map.setConf(job);
           map_tasks += 1;

+ 2 - 3
src/test/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -42,7 +42,7 @@ public class TestLocalFileSystem extends TestCase {
       
       // create a directory and check for it
       File dir1 = new File("dir1");
-      File dir1Absolute = dir1.getAbsoluteFile();
+      File dir1Absolute = new File(subdirAbsolute, dir1.getPath());
       fileSys.mkdirs(dir1);
       assertTrue(fileSys.isDirectory(dir1));
       assertTrue(fileSys.isDirectory(dir1Absolute));
@@ -55,8 +55,7 @@ public class TestLocalFileSystem extends TestCase {
       // create files and manipulate them.
       File file1 = new File("file1");
       File file2 = new File("sub/file2");
-      File file2_abs = file2.getAbsoluteFile();
-      assertEquals(file2_abs, new File(subdirAbsolute, file2.getPath()));
+      File file2_abs = new File(subdirAbsolute, file2.getPath());
       writeFile(fileSys, file1);
       fileSys.copyFromLocalFile(file1, file2);
       assertTrue(fileSys.exists(file1));