Browse Source

HADOOP-761. Change unit tests to not use /tmp. Contributed by Nigel.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@505512 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 years ago
parent
commit
15a78e06d7

+ 2 - 0
CHANGES.txt

@@ -30,6 +30,8 @@ Trunk (unreleased changes)
     and stderr respectively, with each line tagged by the task's name.
     (Arun C Murthy via cutting)
 
+ 9. HADOOP-761.  Change unit tests to not use /tmp.  (Nigel Daley via cutting)
+
 
 Branch 0.11 - unreleased
 

+ 49 - 21
src/c++/libhdfs/hdfs_test.c

@@ -114,31 +114,47 @@ int main(int argc, char **argv) {
         hdfsCloseFile(fs, readFile);
     }
 
- 
+    int totalResult = 0;
+    int result = 0;
     {
         //Generic file-system operations
 
         const char* srcPath = "/tmp/testfile.txt";
+        const char* localSrcPath = "testfile.txt";
         const char* dstPath = "/tmp/testfile2.txt";
-
-        fprintf(stderr, "hdfsCopy(remote-local): %s\n", (hdfsCopy(fs, srcPath, lfs, srcPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", (hdfsCopy(fs, srcPath, fs, dstPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsMove(local-local): %s\n", (hdfsMove(lfs, srcPath, lfs, dstPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsMove(remote-local): %s\n", (hdfsMove(fs, srcPath, lfs, srcPath) ? "Failed!" : "Success!"));
-
-        fprintf(stderr, "hdfsRename: %s\n", (hdfsRename(fs, dstPath, srcPath) ? "Failed!" : "Success!"));
-
-        fprintf(stderr, "hdfsLock: %s\n", (hdfsLock(fs, srcPath, 1) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsReleaseLock: %s\n", (hdfsReleaseLock(fs, srcPath) ? "Failed!" : "Success!"));
+        const char* localDstPath = "testfile2.txt";
+
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsLock: %s\n", ((result = hdfsLock(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsReleaseLock: %s\n", ((result = hdfsReleaseLock(fs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
 
         const char* slashTmp = "/tmp";
         const char* newDirectory = "/tmp/newdir";
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", (hdfsCreateDirectory(fs, newDirectory) ? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+        totalResult += result;
 
         char buffer[256];
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", (hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)) ? buffer : "Failed!"));
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", (hdfsSetWorkingDirectory(fs, slashTmp) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", (hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        totalResult += (result ? 0 : 1);
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        totalResult += (result ? 0 : 1);
 
         fprintf(stderr, "hdfsGetDefaultBlockSize: %Ld\n", hdfsGetDefaultBlockSize(fs));
         fprintf(stderr, "hdfsGetCapacity: %Ld\n", hdfsGetCapacity(fs));
@@ -152,6 +168,7 @@ int main(int argc, char **argv) {
             fprintf(stderr, "Size: %ld\n", fileInfo->mSize);
             hdfsFreeFileInfo(fileInfo, 1);
         } else {
+            totalResult++;
             fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
         }
 
@@ -167,6 +184,7 @@ int main(int argc, char **argv) {
             hdfsFreeFileInfo(fileList, numEntries);
         } else {
             if (errno) {
+                totalResult++;
                 fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
             } else {
                 fprintf(stderr, "Empty directory!\n");
@@ -187,18 +205,28 @@ int main(int argc, char **argv) {
                 ++i;
             }
         } else {
+            totalResult++;
             fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
         }
         
         // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(fs, newDirectory) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(fs, srcPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(lfs, srcPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(lfs, dstPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsExists: %s\n", (hdfsExists(fs, newDirectory) ? "Success!" : "Failed!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+        totalResult += (result ? 0 : 1);
     }
 
-    return 0;
+    if (totalResult != 0) {
+        return -1;
+    } else {
+        return 0;
+    }
 }
 
 /**

+ 5 - 2
src/c++/libhdfs/tests/test-libhdfs.sh

@@ -28,11 +28,14 @@ HDFS_TEST=hdfs_test
 HADOOP_LIB_DIR=$HADOOP_HOME/lib
 HADOOP_BIN_DIR=$HADOOP_HOME/bin
 
-## Manipulate HADOOP_CONF_DIR so as to include 
-## HADOOP_HOME/conf/hadoop-default.xml too
+# Manipulate HADOOP_CONF_DIR so as to include 
+# HADOOP_HOME/conf/hadoop-default.xml too
 # which is necessary to circumvent bin/hadoop
 HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_HOME/conf
 
+# set pid file dir so they are not written to /tmp
+export HADOOP_PID_DIR=$HADOOP_LOG_DIR
+
 # CLASSPATH initially contains $HADOOP_CONF_DIR
 CLASSPATH="${HADOOP_CONF_DIR}"
 CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar

+ 1 - 0
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -139,6 +139,7 @@ public class TestStreamedMerge extends TestCase {
         "-dfs", conf_.get("fs.default.name"), 
         "-jt", "local",
         "-jobconf", "stream.sideoutput.localfs=true", 
+        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
     };
     ArrayList argList = new ArrayList();
     argList.addAll(Arrays.asList(testargs));

+ 2 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java

@@ -69,7 +69,8 @@ public class TestStreaming extends TestCase
         "-reducer", reduce,
         //"-verbose",
         //"-jobconf", "stream.debug=set"
-        "-jobconf", "keep.failed.task.files=true"
+        "-jobconf", "keep.failed.task.files=true",
+        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
         };
   }
   

+ 1 - 0
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java

@@ -75,6 +75,7 @@ public class TestSymLink extends TestCase
             //"-jobconf", "stream.debug=set"
             "-jobconf", strNamenode,
             "-jobconf", strJobtracker,
+            "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
             "-cacheFile", "dfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
         };
 

+ 11 - 7
src/test/org/apache/hadoop/mapred/MRCaching.java

@@ -62,8 +62,9 @@ public class MRCaching {
         Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
         FileSystem fs = FileSystem.get(conf);
         // read the cached files (unzipped, unjarred and text)
-        // and put it into a single file /tmp/test.txt
-        Path file = new Path("/tmp");
+        // and put it into a single file TEST_ROOT_DIR/test.txt
+        String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
+        Path file = new Path(TEST_ROOT_DIR);
         if (!fs.mkdirs(file)) {
           throw new IOException("Mkdirs failed to create " + file.toString());
         }
@@ -127,6 +128,9 @@ public class MRCaching {
   public static boolean launchMRCache(String indir,
       String outdir, JobConf conf, String input)
       throws IOException {
+    String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp"))
+      .toString().replace(' ', '+');
+    conf.set("test.build.data",TEST_ROOT_DIR);
     final Path inDir = new Path(indir);
     final Path outDir = new Path(outdir);
     FileSystem fs = FileSystem.get(conf);
@@ -158,7 +162,7 @@ public class MRCaching {
     Path txtPath = new Path(localPath, new Path("test.txt"));
     Path jarPath = new Path(localPath, new Path("test.jar"));
     Path zipPath = new Path(localPath, new Path("test.zip"));
-    Path cacheTest = new Path("/tmp/cachedir");
+    Path cacheTest = new Path(TEST_ROOT_DIR + "/cachedir");
     fs.delete(cacheTest);
     if (!fs.mkdirs(cacheTest)) {
       throw new IOException("Mkdirs failed to create " + cacheTest.toString());
@@ -168,9 +172,9 @@ public class MRCaching {
     fs.copyFromLocalFile(zipPath, cacheTest);
     // setting the cached archives to zip, jar and simple text files
     String fileSys = fs.getName();
-    String archive1 = "dfs://" + fileSys + "/tmp/cachedir/test.jar";
-    String archive2 = "dfs://" + fileSys + "/tmp/cachedir/test.zip"; 
-    String file1 = "dfs://" + fileSys + "/tmp/cachedir/test.txt";
+    String archive1 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.jar";
+    String archive2 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.zip";
+    String file1 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.txt";
     URI uri1 = null;
     URI uri2 = null;
     URI uri3 = null;
@@ -187,7 +191,7 @@ public class MRCaching {
     int count = 0;
     // after the job ran check to see if the the input from the localized cache
     // match the real string. check if there are 3 instances or not.
-    Path result = new Path("/tmp/test.txt");
+    Path result = new Path(TEST_ROOT_DIR + "/test.txt");
     {
       BufferedReader file = new BufferedReader(new InputStreamReader(fs
           .open(result)));

+ 1 - 1
src/test/org/apache/hadoop/mapred/PiEstimator.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 
 /**
- * A Map-reduce program to estimaate the valu eof Pi using monte-carlo
+ * A Map-reduce program to estimate the value of Pi using monte-carlo
  * method.
  *
  * @author Milind Bhandarkar

+ 8 - 4
src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.io.File;
 import junit.framework.TestCase;
 
 /**
@@ -28,8 +29,11 @@ import junit.framework.TestCase;
  */
 public class TestMiniMRLocalFS extends TestCase {
   
-    static final int NUM_MAPS = 10;
-    static final int NUM_SAMPLES = 100000;
+  static final int NUM_MAPS = 10;
+  static final int NUM_SAMPLES = 100000;
+  private static String TEST_ROOT_DIR =
+    new File(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
     
   public void testWithLocal() throws IOException {
       MiniMRCluster mr = null;
@@ -41,8 +45,8 @@ public class TestMiniMRLocalFS extends TestCase {
           assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
           // run the wordcount example with caching
           JobConf job = mr.createJobConf();
-          boolean ret = MRCaching.launchMRCache("/tmp/wc/input",
-                                                "/tmp/wc/output", 
+          boolean ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
+                                                TEST_ROOT_DIR + "/wc/output", 
                                                 job,
                                                 "The quick brown fox\nhas many silly\n"
                                                     + "red fox sox\n");