Browse Source

MAPREDUCE-4278. Cannot run two local jobs in parallel from the same gateway. Contributed by Sandy Ryza.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1430363 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 12 years ago
parent
commit
0f1f5491bc

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -238,6 +238,9 @@ Release 2.0.3-alpha - Unreleased
     MAPREDUCE-4856. TestJobOutputCommitter uses same directory as
     TestJobCleanup. (Sandy Ryza via tomwhite)
 
+    MAPREDUCE-4278. Cannot run two local jobs in parallel from the same
+    gateway. (Sandy Ryza via tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

+ 8 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java

@@ -609,8 +609,12 @@ public class LocalJobRunner implements ClientProtocol {
   // JobSubmissionProtocol methods
 
   private static int jobid = 0;
+  // used for making sure that local jobs run in different jvms don't
+  // collide on staging or job directories
+  private int randid;
+  
   public synchronized org.apache.hadoop.mapreduce.JobID getNewJobID() {
-    return new org.apache.hadoop.mapreduce.JobID("local", ++jobid);
+    return new org.apache.hadoop.mapreduce.JobID("local" + randid, ++jobid);
   }
 
   public org.apache.hadoop.mapreduce.JobStatus submitJob(
@@ -739,10 +743,11 @@ public class LocalJobRunner implements ClientProtocol {
         "/tmp/hadoop/mapred/staging"));
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String user;
+    randid = rand.nextInt(Integer.MAX_VALUE);
     if (ugi != null) {
-      user = ugi.getShortUserName() + rand.nextInt();
+      user = ugi.getShortUserName() + randid;
     } else {
-      user = "dummy" + rand.nextInt();
+      user = "dummy" + randid;
     }
     return fs.makeQualified(new Path(stagingRootDir, user+"/.staging")).toString();
   }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.io.Text;
  * the job. JobID consists of two parts. First part 
  * represents the jobtracker identifier, so that jobID to jobtracker map 
  * is defined. For cluster setup this string is the jobtracker 
- * start time, for local setting, it is "local".
+ * start time, for local setting, it is "local" and a random number.
  * Second part of the JobID is the job number. <br> 
  * An example JobID is : 
  * <code>job_200707121733_0003</code> , which represents the third job