Просмотр исходного кода

HADOOP-373. Consistently check the value of FileSystem.mkdirs(). Contributed by Wendy.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@470195 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 лет назад
Родитель
Сommit
cbc4151701
35 измененных файлов с 195 добавлено и 60 удалено
  1. 3 0
      CHANGES.txt
  2. 8 2
      src/examples/org/apache/hadoop/examples/NNBench.java
  3. 3 1
      src/examples/org/apache/hadoop/examples/PiBenchmark.java
  4. 5 1
      src/examples/org/apache/hadoop/examples/RandomWriter.java
  5. 3 1
      src/java/org/apache/hadoop/dfs/DFSShell.java
  6. 13 5
      src/java/org/apache/hadoop/dfs/FSDataset.java
  7. 3 1
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  8. 4 1
      src/java/org/apache/hadoop/filecache/DistributedCache.java
  9. 15 4
      src/java/org/apache/hadoop/fs/FileUtil.java
  10. 10 4
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  11. 3 2
      src/java/org/apache/hadoop/io/MapFile.java
  12. 3 1
      src/java/org/apache/hadoop/mapred/JobHistory.java
  13. 3 1
      src/java/org/apache/hadoop/mapred/JobTracker.java
  14. 4 1
      src/java/org/apache/hadoop/mapred/LocalJobRunner.java
  15. 5 1
      src/java/org/apache/hadoop/mapred/TaskRunner.java
  16. 8 2
      src/java/org/apache/hadoop/mapred/TaskTracker.java
  17. 13 3
      src/java/org/apache/hadoop/util/CopyFiles.java
  18. 12 2
      src/java/org/apache/hadoop/util/RunJar.java
  19. 2 2
      src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
  20. 6 1
      src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
  21. 2 1
      src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java
  22. 1 1
      src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
  23. 4 1
      src/test/org/apache/hadoop/dfs/TestFsck.java
  24. 4 1
      src/test/org/apache/hadoop/fs/DFSCIOTest.java
  25. 4 1
      src/test/org/apache/hadoop/fs/TestCopyFiles.java
  26. 3 1
      src/test/org/apache/hadoop/fs/TestGlobPaths.java
  27. 2 2
      src/test/org/apache/hadoop/fs/TestLocalFileSystem.java
  28. 9 4
      src/test/org/apache/hadoop/mapred/MRCaching.java
  29. 10 1
      src/test/org/apache/hadoop/mapred/MiniMRCluster.java
  30. 3 2
      src/test/org/apache/hadoop/mapred/PiEstimator.java
  31. 3 1
      src/test/org/apache/hadoop/mapred/TestEmptyJobWithDFS.java
  32. 12 4
      src/test/org/apache/hadoop/mapred/TestMapRed.java
  33. 3 1
      src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java
  34. 3 1
      src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
  35. 6 2
      src/test/org/apache/hadoop/record/test/TestMapRed.java

+ 3 - 0
CHANGES.txt

@@ -127,6 +127,9 @@ Trunk (unreleased changes)
 35. HADOOP-669.  Fix a problem introduced by HADOOP-90 that can cause
     DFS to lose files.  (Milind Bhandarkar via cutting)
 
+36. HADOOP-373.  Consistently check the value returned by
+    FileSystem.mkdirs().  (Wendy Chien via cutting)
+
 
 Release 0.7.2 - 2006-10-18
 

+ 8 - 2
src/examples/org/apache/hadoop/examples/NNBench.java

@@ -81,7 +81,9 @@ public class NNBench extends MapReduceBase implements Reducer {
                     Reporter reporter) throws IOException {
       int nFiles = ((IntWritable) value).get();
       Path taskDir = new Path(topDir, taskId);
-      fileSys.mkdirs(taskDir);
+      if (!fileSys.mkdirs(taskDir)) {
+        throw new IOException("Mkdirs failed to create " + taskDir.toString());
+      }
       byte[] buffer = new byte[32768];
       for (int index = 0; index < nFiles; index++) {
         FSDataOutputStream out = fileSys.create(
@@ -186,7 +188,11 @@ public class NNBench extends MapReduceBase implements Reducer {
       return;
     }
     fileSys.delete(tmpDir);
-    fileSys.mkdirs(inDir);
+    if (!fileSys.mkdirs(inDir)) {
+      System.out.println("Error: Mkdirs failed to create " + 
+                         inDir.toString());
+      return;
+    }
 
     for(int i=0; i < numMaps; ++i) {
       Path file = new Path(inDir, "part"+i);

+ 3 - 1
src/examples/org/apache/hadoop/examples/PiBenchmark.java

@@ -168,7 +168,9 @@ public class PiBenchmark {
     Path outDir = new Path(tmpDir, "out");
     FileSystem fileSys = FileSystem.get(jobConf);
     fileSys.delete(tmpDir);
-    fileSys.mkdirs(inDir);
+    if (!fileSys.mkdirs(inDir)) {
+      throw new IOException("Mkdirs failed to create " + inDir.toString());
+    }
     
     jobConf.setInputPath(inDir);
     jobConf.setOutputPath(outDir);

+ 5 - 1
src/examples/org/apache/hadoop/examples/RandomWriter.java

@@ -189,7 +189,11 @@ public class RandomWriter extends MapReduceBase implements Reducer {
       return;
     }
     fileSys.delete(tmpDir);
-    fileSys.mkdirs(inDir);
+    if (!fileSys.mkdirs(inDir)) {
+      System.out.println("Error: Mkdirs failed to create " + 
+                         inDir.toString());
+      return;
+    }
     NumberFormat numberFormat = NumberFormat.getInstance();
     numberFormat.setMinimumIntegerDigits(6);
     numberFormat.setGroupingUsed(false);

+ 3 - 1
src/java/org/apache/hadoop/dfs/DFSShell.java

@@ -309,7 +309,9 @@ public class DFSShell extends ToolBase {
      */
     public void mkdir(String src) throws IOException {
         Path f = new Path(src);
-        fs.mkdirs(f);
+        if (!fs.mkdirs(f)) {
+          System.out.println("Mkdirs failed to create " + src);
+        }
     }
     
     /**

+ 13 - 5
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -48,13 +48,17 @@ class FSDataset implements FSConstants {
 
         /**
          */
-        public FSDir(File dir, int myIdx, FSDir[] siblings) {
+        public FSDir(File dir, int myIdx, FSDir[] siblings) 
+            throws IOException {
             this.dir = dir;
             this.myIdx = myIdx;
             this.siblings = siblings;
             this.children = null;
             if (! dir.exists()) {
-              dir.mkdirs();
+              if (! dir.mkdirs()) {
+                throw new IOException("Mkdirs failed to create " + 
+                                      dir.toString());
+              }
             } else {
               File[] files = dir.listFiles();
               int numChildren = 0;
@@ -80,7 +84,7 @@ class FSDataset implements FSConstants {
 
         /**
          */
-        public File addBlock(Block b, File src) {
+        public File addBlock(Block b, File src) throws IOException {
             if (numBlocks < maxBlocksPerDir) {
               File dest = new File(dir, b.getBlockName());
               src.renameTo(dest);
@@ -194,7 +198,11 @@ class FSDataset implements FSConstants {
         if (tmpDir.exists()) {
           FileUtil.fullyDelete(tmpDir);
         }
-        tmpDir.mkdirs();
+        if (!tmpDir.mkdirs()) {
+          if (!tmpDir.isDirectory()) {
+            throw new IOException("Mkdirs failed to create " + tmpDir.toString());
+          }
+        }
         this.usage = new DF(dir, conf);
       }
       
@@ -232,7 +240,7 @@ class FSDataset implements FSConstants {
         return f;
       }
       
-      File addBlock(Block b, File f) {
+      File addBlock(Block b, File f) throws IOException {
         return dataDir.addBlock(b, f);
       }
       

+ 3 - 1
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -366,7 +366,9 @@ class FSDirectory implements FSConstants {
 
         // Always do an implicit mkdirs for parent directory tree
         String pathString = path.toString();
-        mkdirs(new Path(pathString).getParent().toString());
+        if( ! mkdirs(new Path(pathString).getParent().toString()) ) {
+           return false;
+        }
         INode newNode = new INode( new File(pathString).getName(), blocks, replication);
         if( ! unprotectedAddFile(path, newNode) ) {
            NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "

+ 4 - 1
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -192,7 +192,10 @@ public class DistributedCache {
       localFs.delete(cacheStatus.localLoadPath);
       Path parchive = new Path(cacheStatus.localLoadPath,
                                new Path(cacheStatus.localLoadPath.getName()));
-      localFs.mkdirs(cacheStatus.localLoadPath);
+      if (!localFs.mkdirs(cacheStatus.localLoadPath)) {
+          throw new IOException("Mkdirs failed to create directory " + 
+                                cacheStatus.localLoadPath.toString());
+      }
       String cacheId = cache.getPath();
       dfs.copyToLocalFile(new Path(cacheId), parchive);
       dfs.copyToLocalFile(new Path(cacheId + "_md5"), new Path(parchive

+ 15 - 4
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -67,7 +67,9 @@ public class FileUtil {
     dst = checkDest(src.getName(), dstFS, dst);
 
     if (srcFS.isDirectory(src)) {
-      dstFS.mkdirs(dst);
+      if (!dstFS.mkdirs(dst)) {
+        return false;
+      }
       Path contents[] = srcFS.listPaths(src);
       for (int i = 0; i < contents.length; i++) {
         copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getName()),
@@ -137,7 +139,9 @@ public class FileUtil {
     dst = checkDest(src.getName(), dstFS, dst);
 
     if (src.isDirectory()) {
-      dstFS.mkdirs(dst);
+      if (!dstFS.mkdirs(dst)) {
+        return false;
+      }
       File contents[] = src.listFiles();
       for (int i = 0; i < contents.length; i++) {
         copy(contents[i], dstFS, new Path(dst, contents[i].getName()),
@@ -166,7 +170,9 @@ public class FileUtil {
     dst = checkDest(src.getName(), dst);
 
     if (srcFS.isDirectory(src)) {
-      dst.mkdirs();
+      if (!dst.mkdirs()) {
+        return false;
+      }
       Path contents[] = srcFS.listPaths(src);
       for (int i = 0; i < contents.length; i++) {
         copy(srcFS, contents[i], new File(dst, contents[i].getName()),
@@ -281,7 +287,12 @@ public class FileUtil {
           InputStream in = zipFile.getInputStream(entry);
           try {
             File file = new File(unzipDir, entry.getName());
-            file.getParentFile().mkdirs();
+            if (!file.getParentFile().mkdirs()) {           
+              if (!file.getParentFile().isDirectory()) {
+                throw new IOException("Mkdirs failed to create " + 
+                                      file.getParentFile().toString());
+              }
+            }
             OutputStream out = new FileOutputStream(file);
             try {
               byte[] buffer = new byte[8192];

+ 10 - 4
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -179,9 +179,11 @@ public class LocalFileSystem extends FileSystem {
             throw new IOException("File already exists:"+f);
         }
         Path parent = f.getParent();
-        if (parent != null)
-          mkdirs(parent);
-
+        if (parent != null) {
+          if (!mkdirs(parent)) {
+            throw new IOException("Mkdirs failed to create " + parent.toString());
+          }
+        }
         return new LocalFSFileOutputStream(f);
     }
 
@@ -379,7 +381,11 @@ public class LocalFileSystem extends FileSystem {
 
         // move the file there
         File badDir = new File(dir, "bad_files");
-        badDir.mkdirs();
+        if (!badDir.mkdirs()) {
+          if (!badDir.isDirectory()) {
+            throw new IOException("Mkdirs failed to create " + badDir.toString());
+          }
+        }
         String suffix = "." + new Random().nextInt();
         File badFile = new File(badDir,f.getName()+suffix);
         LOG.warn("Moving bad file " + f + " to " + badFile);

+ 3 - 2
src/java/org/apache/hadoop/io/MapFile.java

@@ -137,8 +137,9 @@ public class MapFile {
       this.lastKey = comparator.newKey();
 
       Path dir = new Path(dirName);
-      fs.mkdirs(dir);
-
+      if (!fs.mkdirs(dir)) {
+          throw new IOException("Mkdirs failed to create directory " + dir.toString());
+      }
       Path dataFile = new Path(dir, DATA_FILE_NAME);
       Path indexFile = new Path(dir, INDEX_FILE_NAME);
 

+ 3 - 1
src/java/org/apache/hadoop/mapred/JobHistory.java

@@ -88,7 +88,9 @@ public class JobHistory {
       try{
         File logDir = new File(LOG_DIR); 
         if( ! logDir.exists() ){
-          logDir.mkdirs(); 
+          if( ! logDir.mkdirs() ){
+            throw new IOException("Mkdirs failed to create " + logDir.toString());
+          }
         }
         masterIndex = 
           new PrintWriter(

+ 3 - 1
src/java/org/apache/hadoop/mapred/JobTracker.java

@@ -495,7 +495,9 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
         this.systemDir = jobConf.getSystemDir();
         this.fs = FileSystem.get(conf);
         fs.delete(systemDir);
-        fs.mkdirs(systemDir);
+        if (!fs.mkdirs(systemDir)) {
+          throw new IOException("Mkdirs failed to create " + systemDir.toString());
+        }
 
         // Same with 'localDir' except it's always on the local disk.
         jobConf.deleteLocalFiles(SUBDIR);

+ 4 - 1
src/java/org/apache/hadoop/mapred/LocalJobRunner.java

@@ -115,7 +115,10 @@ class LocalJobRunner implements JobSubmissionProtocol {
           String mapId = (String)mapIds.get(i);
           Path mapOut = this.mapoutputFile.getOutputFile(mapId, 0);
           Path reduceIn = this.mapoutputFile.getInputFile(i, reduceId);
-          localFs.mkdirs(reduceIn.getParent());
+          if (!localFs.mkdirs(reduceIn.getParent())) {
+            throw new IOException("Mkdirs failed to create " + 
+                                  reduceIn.getParent().toString());
+          }
           if (!localFs.rename(mapOut, reduceIn))
             throw new IOException("Couldn't rename " + mapOut);
           this.mapoutputFile.removeAll(mapId);

+ 5 - 1
src/java/org/apache/hadoop/mapred/TaskRunner.java

@@ -124,7 +124,11 @@ abstract class TaskRunner extends Thread {
       // start with same classpath as parent process
       classPath.append(System.getProperty("java.class.path"));
       classPath.append(sep);
-      workDir.mkdirs();
+      if (!workDir.mkdirs()) {
+        if (!workDir.isDirectory()) {
+          LOG.fatal("Mkdirs failed to create " + workDir.toString());
+        }
+      }
 	  
       String jar = conf.getJar();
       if (jar != null) {       

+ 8 - 2
src/java/org/apache/hadoop/mapred/TaskTracker.java

@@ -294,7 +294,11 @@ public class TaskTracker
             File workDir = new File(
                                     new File(localJobFile.toString()).getParent(),
                                     "work");
-            workDir.mkdirs();
+            if (!workDir.mkdirs()) {
+              if (!workDir.isDirectory()) {
+                throw new IOException("Mkdirs failed to create " + workDir.toString());
+              }
+            }
             RunJar.unJar(new File(localJarFile.toString()), workDir);
           }
           rjob.localized = true;
@@ -831,7 +835,9 @@ public class TaskTracker
               new Path(this.defaultJobConf.getLocalPath(TaskTracker.getJobCacheSubdir()), 
                 (task.getJobId() + Path.SEPARATOR + task.getTaskId()));
            FileSystem localFs = FileSystem.getNamed("local", fConf);
-           localFs.mkdirs(localTaskDir);
+           if (!localFs.mkdirs(localTaskDir)) {
+             throw new IOException("Mkdirs failed to create " + localTaskDir.toString());
+           }
            Path localTaskFile = new Path(localTaskDir, "job.xml");
            task.setJobFile(localTaskFile.toString());
            localJobConf.set("mapred.local.dir",

+ 13 - 3
src/java/org/apache/hadoop/util/CopyFiles.java

@@ -184,7 +184,12 @@ public class CopyFiles extends ToolBase {
       // create directories to hold destination file and create destFile
       Path destFile = new Path(destPath, src);
       Path destParent = destFile.getParent();
-      if (destParent != null) { destFileSys.mkdirs(destParent); }
+      if (destParent != null) { 
+        if (!destFileSys.mkdirs(destParent)) {
+          throw new IOException("Mkdirs failed to create " + 
+                                destParent.toString()); 
+        }
+      }
       FSDataOutputStream out = destFileSys.create(destFile);
       
       // copy file
@@ -285,7 +290,10 @@ public class CopyFiles extends ToolBase {
       Path inDir = new Path(jobDirectory, "in");
       Path fakeOutDir = new Path(jobDirectory, "out");
       FileSystem fileSys = FileSystem.get(jobConf);
-      fileSys.mkdirs(inDir);
+      if (!fileSys.mkdirs(inDir)) {
+        throw new IOException("Mkdirs failed to create " +
+                              inDir.toString());
+      }
       jobConf.set("distcp.job.dir", jobDirectory.toString());
       
       jobConf.setInputPath(inDir);
@@ -480,7 +488,9 @@ public class CopyFiles extends ToolBase {
       Path jobDirectory = new Path(jobConf.getSystemDir(), "distcp_" + 
           Integer.toString(Math.abs(r.nextInt()), 36));
       Path jobInputDir = new Path(jobDirectory, "in");
-      fileSystem.mkdirs(jobInputDir);
+      if (!fileSystem.mkdirs(jobInputDir)) {
+        throw new IOException("Mkdirs failed to create " + jobInputDir.toString());
+      }
       jobConf.setInputPath(jobInputDir);
       
       jobConf.set("distcp.job.dir", jobDirectory.toString());

+ 12 - 2
src/java/org/apache/hadoop/util/RunJar.java

@@ -41,7 +41,12 @@ public class RunJar {
           InputStream in = jar.getInputStream(entry);
           try {
             File file = new File(toDir, entry.getName());
-            file.getParentFile().mkdirs();
+            if (!file.getParentFile().mkdirs()) {
+              if (!file.getParentFile().isDirectory()) {
+                throw new IOException("Mkdirs failed to create " + 
+                                      file.getParentFile().toString());
+              }
+            }
             OutputStream out = new FileOutputStream(file);
             try {
               byte[] buffer = new byte[8192];
@@ -102,7 +107,12 @@ public class RunJar {
 
     final File workDir = File.createTempFile("hadoop-unjar","");
     workDir.delete();
-    workDir.mkdirs();
+    if (!workDir.mkdirs()) {
+      if (!workDir.isDirectory()) {
+        System.err.println("Mkdirs failed to create " + workDir.toString());
+        System.exit(-1);
+      }
+    }
 
     Runtime.getRuntime().addShutdownHook(new Thread() {
         public void run() {

+ 2 - 2
src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java

@@ -136,14 +136,14 @@ public class ClusterTestDFSNamespaceLogging extends TestCase implements FSConsta
     
       // create a directory
       try {
-        dfsClient.mkdirs( new UTF8( "/data") );
+        assertTrue(dfsClient.mkdirs( new UTF8( "/data") ));
         assertMkdirs( "/data", false );
       } catch ( IOException ioe ) {
       	ioe.printStackTrace();
       }
        
       try {
-        dfsClient.mkdirs( new UTF8( "data") );
+        assertTrue(dfsClient.mkdirs( new UTF8( "data") ));
         assertMkdirs( "data", true );
       } catch ( IOException ioe ) {
        	ioe.printStackTrace();

+ 6 - 1
src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

@@ -97,7 +97,12 @@ public class MiniDFSCluster {
         String[] dirs = conf.getStrings("dfs.data.dir");
         for (int idx = 0; idx < dirs.length; idx++) {
           File dataDir = new File(dirs[idx]);
-          dataDir.mkdirs();
+          if (!dataDir.mkdirs()) {      
+            if (!dataDir.isDirectory()) {
+              throw new RuntimeException("Mkdirs failed to create directory " +
+                                         dataDir.toString());
+            }
+          }
         }
         node = new DataNode(conf, dirs);
         node.run();

+ 2 - 1
src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java

@@ -50,7 +50,8 @@ public class TestDFSMkdirs extends TestCase {
     	Path myPath = new Path("/test/mkdirs");
     	assertTrue(fileSys.mkdirs(myPath));
     	assertTrue(fileSys.exists(myPath));
-    	
+    	assertTrue(fileSys.mkdirs(myPath));
+
     	// Second, create a file in that directory.
     	Path myFile = new Path("/test/mkdirs/myFile");
     	writeFile(fileSys, myFile);

+ 1 - 1
src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java

@@ -61,7 +61,7 @@ public class TestDFSShellGenericOptions extends TestCase {
     private void testConfOption(String[] args, String namenode) {
         // prepare configuration hadoop-site.xml
         File configDir = new File(new File("build", "test"), "minidfs");
-        configDir.mkdirs();
+        assertTrue(configDir.mkdirs());
         File siteFile = new File(configDir, "hadoop-site.xml");
         PrintWriter pw;
         try {

+ 4 - 1
src/test/org/apache/hadoop/dfs/TestFsck.java

@@ -108,7 +108,10 @@ public class TestFsck extends TestCase {
     
     for (int idx = 0; idx < NFILES; idx++) {
       Path fPath = new Path(root, files[idx].getName());
-      fs.mkdirs(fPath.getParent());
+      if (!fs.mkdirs(fPath.getParent())) {
+        throw new IOException("Mkdirs failed to create directory " +
+                              fPath.getParent().toString());
+      }
       FSDataOutputStream out = fs.create(fPath);
       byte[] toWrite = new byte[files[idx].getSize()];
       Random rb = new Random(files[idx].getSeed());

+ 4 - 1
src/test/org/apache/hadoop/fs/DFSCIOTest.java

@@ -437,7 +437,10 @@ public class DFSCIOTest extends TestCase {
       
       if (testType != TEST_TYPE_CLEANUP) {
     	  	fs.delete(HDFS_TEST_DIR);
-    	  	fs.mkdirs(HDFS_TEST_DIR);
+    	  	if (!fs.mkdirs(HDFS_TEST_DIR)) {
+                    throw new IOException("Mkdirs failed to create " + 
+                                          HDFS_TEST_DIR.toString());
+                }
 
     	  	//Copy the executables over to the remote filesystem
     	  	String hadoopHome = System.getenv("HADOOP_HOME");

+ 4 - 1
src/test/org/apache/hadoop/fs/TestCopyFiles.java

@@ -107,7 +107,10 @@ public class TestCopyFiles extends TestCase {
     
     for (int idx = 0; idx < NFILES; idx++) {
       Path fPath = new Path(root, files[idx].getName());
-      fs.mkdirs(fPath.getParent());
+      if (!fs.mkdirs(fPath.getParent())) {
+        throw new IOException("Mkdirs failed to create " + 
+                              fPath.getParent().toString());
+      }
       FSDataOutputStream out = fs.create(fPath);
       byte[] toWrite = new byte[files[idx].getSize()];
       Random rb = new Random(files[idx].getSeed());

+ 3 - 1
src/test/org/apache/hadoop/fs/TestGlobPaths.java

@@ -235,7 +235,9 @@ public class TestGlobPaths extends TestCase {
   throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
       path[i] = new Path( files[i] );
-      fs.mkdirs( path[i] );
+      if (!fs.mkdirs( path[i] )) {
+        throw new IOException("Mkdirs failed to create " + path[i].toString());
+      }
     }
     return fs.globPaths( new Path(pattern) );
   }

+ 2 - 2
src/test/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -51,14 +51,14 @@ public class TestLocalFileSystem extends TestCase {
       // make sure it doesn't already exist
       assertTrue(!fileSys.exists(subdir));
       // make it and check for it
-      fileSys.mkdirs(subdir);
+      assertTrue(fileSys.mkdirs(subdir));
       assertTrue(fileSys.isDirectory(subdir));
       
       fileSys.setWorkingDirectory(subdir);
       
       // create a directory and check for it
       Path dir1 = new Path("dir1");
-      fileSys.mkdirs(dir1);
+      assertTrue(fileSys.mkdirs(dir1));
       assertTrue(fileSys.isDirectory(dir1));
       
       // delete the directory and make sure it went away

+ 9 - 4
src/test/org/apache/hadoop/mapred/MRCaching.java

@@ -65,7 +65,9 @@ public class MRCaching {
         // read the cached files (unzipped, unjarred and text)
         // and put it into a single file /tmp/test.txt
         Path file = new Path("/tmp");
-        fs.mkdirs(file);
+        if (!fs.mkdirs(file)) {
+          throw new IOException("Mkdirs failed to create " + file.toString());
+        }
         Path fileOut = new Path(file, "test.txt");
         fs.delete(fileOut);
         DataOutputStream out = fs.create(fileOut);
@@ -130,8 +132,9 @@ public class MRCaching {
     final Path outDir = new Path(outdir);
     FileSystem fs = FileSystem.getNamed(fileSys, conf);
     fs.delete(outDir);
-    fs.mkdirs(inDir);
-
+    if (!fs.mkdirs(inDir)) {
+      throw new IOException("Mkdirs failed to create " + inDir.toString());
+    }
     {
       DataOutputStream file = fs.create(new Path(inDir, "part-0"));
       file.writeBytes(input);
@@ -160,7 +163,9 @@ public class MRCaching {
     Path zipPath = new Path(localPath, new Path("test.zip"));
     Path cacheTest = new Path("/tmp/cachedir");
     fs.delete(cacheTest);
-    fs.mkdirs(cacheTest);
+    if (!fs.mkdirs(cacheTest)) {
+      throw new IOException("Mkdirs failed to create " + cacheTest.toString());
+    }
     fs.copyFromLocalFile(txtPath, cacheTest);
     fs.copyFromLocalFile(jarPath, cacheTest);
     fs.copyFromLocalFile(zipPath, cacheTest);

+ 10 - 1
src/test/org/apache/hadoop/mapred/MiniMRCluster.java

@@ -116,12 +116,21 @@ public class MiniMRCluster {
                 File localDir = new File(jc.get("mapred.local.dir"));
                 String mapredDir = "";
                 File ttDir = new File(localDir, Integer.toString(taskTrackerPort) + "_" + 0);
-                ttDir.mkdirs();
+                if (!ttDir.mkdirs()) {
+                  if (!ttDir.isDirectory()) {
+                    throw new IOException("Mkdirs failed to create " + ttDir.toString());
+                  }
+                }
                 this.localDir[0] = ttDir.getAbsolutePath();
                 mapredDir = ttDir.getAbsolutePath();
                 for (int i = 1; i < numDir; i++){
                   ttDir = new File(localDir, Integer.toString(taskTrackerPort) + "_" + i);
                   ttDir.mkdirs();
+                  if (!ttDir.mkdirs()) {
+                    if (!ttDir.isDirectory()) {
+                      throw new IOException("Mkdirs failed to create " + ttDir.toString());
+                    }
+                  }
                   this.localDir[i] = ttDir.getAbsolutePath();
                   mapredDir = mapredDir + "," + ttDir.getAbsolutePath();
                 }

+ 3 - 2
src/test/org/apache/hadoop/mapred/PiEstimator.java

@@ -164,8 +164,9 @@ public class PiEstimator {
     Path outDir = new Path(tmpDir, "out");
     FileSystem fileSys = FileSystem.get(jobConf);
     fileSys.delete(tmpDir);
-    fileSys.mkdirs(inDir);
-    
+    if (!fileSys.mkdirs(inDir)) {
+      throw new IOException("Mkdirs failed to create " + inDir.toString());
+    }
     jobConf.setInputPath(inDir);
     jobConf.setOutputPath(outDir);
     

+ 3 - 1
src/test/org/apache/hadoop/mapred/TestEmptyJobWithDFS.java

@@ -57,7 +57,9 @@ public class TestEmptyJobWithDFS extends TestCase {
       final Path outDir = new Path("/testing/empty/output");
       FileSystem fs = FileSystem.getNamed(fileSys, conf);
       fs.delete(outDir);
-      fs.mkdirs(inDir);
+      if (!fs.mkdirs(inDir)) {
+          return false;
+      }
 
       // use WordCount example
       conf.set("fs.default.name", fileSys);

+ 12 - 4
src/test/org/apache/hadoop/mapred/TestMapRed.java

@@ -305,8 +305,12 @@ public class TestMapRed extends TestCase {
         SequenceFileOutputFormat.setCompressOutput(conf, true);
       }
       try {
-        fs.mkdirs(testdir);
-        fs.mkdirs(inDir);
+        if (!fs.mkdirs(testdir)) {
+          throw new IOException("Mkdirs failed to create " + testdir.toString());
+        }
+        if (!fs.mkdirs(inDir)) {
+          throw new IOException("Mkdirs failed to create " + inDir.toString());
+        }
         Path inFile = new Path(inDir, "part0");
         DataOutputStream f = fs.create(inFile);
         f.writeBytes("Owen was here\n");
@@ -364,10 +368,14 @@ public class TestMapRed extends TestCase {
         //
         FileSystem fs = FileSystem.get(conf);
         Path testdir = new Path("mapred.loadtest");
-        fs.mkdirs(testdir);
+        if (!fs.mkdirs(testdir)) {
+            throw new IOException("Mkdirs failed to create " + testdir.toString());
+        }
 
         Path randomIns = new Path(testdir, "genins");
-        fs.mkdirs(randomIns);
+        if (!fs.mkdirs(randomIns)) {
+            throw new IOException("Mkdirs failed to create " + randomIns.toString());
+        }
 
         Path answerkey = new Path(randomIns, "answer.key");
         SequenceFile.Writer out = 

+ 3 - 1
src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java

@@ -45,7 +45,9 @@ public class TestMiniMRClasspath extends TestCase {
     final Path outDir = new Path("/testing/wc/output");
     FileSystem fs = FileSystem.getNamed(fileSys, conf);
     fs.delete(outDir);
-    fs.mkdirs(inDir);
+    if (!fs.mkdirs(inDir)) {
+      throw new IOException("Mkdirs failed to create " + inDir.toString());
+    }
     {
       DataOutputStream file = fs.create(new Path(inDir, "part-0"));
       file.writeBytes(input);

+ 3 - 1
src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java

@@ -49,7 +49,9 @@ public class TestMiniMRWithDFS extends TestCase {
     final Path outDir = new Path("/testing/wc/output");
     FileSystem fs = FileSystem.getNamed(fileSys, conf);
     fs.delete(outDir);
-    fs.mkdirs(inDir);
+    if (!fs.mkdirs(inDir)) {
+      throw new IOException("Mkdirs failed to create " + inDir.toString());
+    }
     {
       DataOutputStream file = fs.create(new Path(inDir, "part-0"));
       file.writeBytes(input);

+ 6 - 2
src/test/org/apache/hadoop/record/test/TestMapRed.java

@@ -248,10 +248,14 @@ public class TestMapRed extends TestCase {
         //
         FileSystem fs = FileSystem.get(conf);
         File testdir = new File("mapred.loadtest");
-        fs.mkdirs(testdir);
+        if (!fs.mkdirs(testdir)) {
+          throw new IOException("Mkdirs failed to create directory " + testdir.toString());
+        }
 
         File randomIns = new File(testdir, "genins");
-        fs.mkdirs(randomIns);
+        if (!fs.mkdirs(randomIns)) {
+          throw new IOException("Mkdirs failed to create directory " + randomIns.toString());
+        }
 
         File answerkey = new File(randomIns, "answer.key");
         SequenceFile.Writer out = SequenceFile.createWriter(fs, conf,