Jelajahi Sumber

Reverted HADOOP-3924 since it broke the eclipse-plugin compile. merge -r 697068:697067

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@697156 13f79535-47bb-0310-9956-ffa450edef68
Nigel Daley 16 tahun lalu
induk
melakukan
108cc0553d

+ 0 - 2
CHANGES.txt

@@ -188,8 +188,6 @@ Trunk (unreleased changes)
     HADOOP-4070. Provide a mechanism in Hive for registering UDFs from the
     query language. (tomwhite)
 
-    HADOOP-3924. Adds a KILLED job status (Subramaniam Krishnan via ddas)
-
   IMPROVEMENTS
 
     HADOOP-4106. libhdfs: add time, permission and user attribute support (part 2).

+ 1 - 14
src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java

@@ -41,8 +41,7 @@ public class HadoopJob {
    */
   public enum JobState {
     PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
-        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED), KILLED(
-        JobStatus.KILLED);
+        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
 
     final int state;
 
@@ -60,8 +59,6 @@ public class HadoopJob {
           return FAILED;
         case JobStatus.SUCCEEDED:
           return SUCCEEDED;
-        case JobStatus.KILLED:
-          return KILLED;  
         default:
           return null;
       }
@@ -203,8 +200,6 @@ public class HadoopJob {
     if (this.completed) {
       if (this.successful) {
         return JobState.SUCCEEDED;
-      } else if (this.killed) {
-        return JobState.KILLED;
       } else {
         return JobState.FAILED;
       }
@@ -234,13 +229,6 @@ public class HadoopJob {
   public boolean isCompleted() {
     return this.completed;
   }
-  
-  /**
-   * @return
-   */
-  public boolean isKilled() {
-    return this.killed;
-  }
 
   /**
    * @return
@@ -293,7 +281,6 @@ public class HadoopJob {
       this.counters = running.getCounters();
       this.completed = running.isComplete();
       this.successful = running.isSuccessful();
-      this.killed = running.isKilled();
       this.mapProgress = running.mapProgress();
       this.reduceProgress = running.reduceProgress();
       // running.getTaskCompletionEvents(fromEvent);

+ 1 - 3
src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java

@@ -34,7 +34,6 @@ import java.util.Map.Entry;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /**
@@ -329,8 +328,7 @@ public class FairScheduler extends TaskScheduler {
     List<JobInProgress> toRemove = new ArrayList<JobInProgress>();
     for (JobInProgress job: infos.keySet()) { 
       int runState = job.getStatus().getRunState();
-      if (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED
-          || runState == JobStatus.KILLED) {
+      if (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED) {
         toRemove.add(job);
       }
     }

+ 0 - 2
src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java

@@ -31,7 +31,6 @@ import java.util.Map;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.FairScheduler.JobInfo;
 
 public class TestFairScheduler extends TestCase {
@@ -335,7 +334,6 @@ public class TestFairScheduler extends TestCase {
     submitJobs(1, JobStatus.PREP, 10, 10);
     submitJobs(1, JobStatus.SUCCEEDED, 10, 10);
     submitJobs(1, JobStatus.FAILED, 10, 10);
-    submitJobs(1, JobStatus.KILLED, 10, 10);
     assertNull(scheduler.assignTasks(tracker("tt1")));
     advanceTime(100); // Check that we still don't assign jobs after an update
     assertNull(scheduler.assignTasks(tracker("tt1")));

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/EagerTaskInitializationListener.java

@@ -58,7 +58,7 @@ class EagerTaskInitializationListener extends JobInProgressListener {
           LOG.error("Job initialization failed:\n" +
                     StringUtils.stringifyException(t));
           if (job != null) {
-            job.fail();
+            job.kill();
           }
         }
       }

+ 2 - 10
src/mapred/org/apache/hadoop/mapred/JobClient.java

@@ -62,6 +62,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapred.Counters.Counter;
 import org.apache.hadoop.mapred.Counters.Group;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -267,8 +268,7 @@ public class JobClient extends Configured implements MRConstants, Tool  {
     public synchronized boolean isComplete() throws IOException {
       updateStatus();
       return (status.getRunState() == JobStatus.SUCCEEDED ||
-              status.getRunState() == JobStatus.FAILED ||
-              status.getRunState() == JobStatus.KILLED);
+              status.getRunState() == JobStatus.FAILED);
     }
 
     /**
@@ -291,14 +291,6 @@ public class JobClient extends Configured implements MRConstants, Tool  {
       }
     }
 
-    /**
-     * Tells the service to get the state of the current job.
-     */
-    public synchronized int getJobState() throws IOException {
-      updateStatus();
-      return status.getRunState();
-    }
-    
     /**
      * Tells the service to terminate the current job.
      */

+ 1 - 32
src/mapred/org/apache/hadoop/mapred/JobHistory.java

@@ -919,37 +919,6 @@ public class JobHistory {
         }
       }
     }
-    /**
-     * Logs job killed event. Closes the job history log file.
-     * 
-     * @param jobid
-     *          job id
-     * @param timestamp
-     *          time when job killed was issued in ms.
-     * @param finishedMaps
-     *          no finished map tasks.
-     * @param finishedReduces
-     *          no of finished reduce tasks.
-     */
-    public static void logKilled(JobID jobid, long timestamp, int finishedMaps,
-        int finishedReduces) {
-      if (!disableHistory) {
-        String logFileKey = JOBTRACKER_UNIQUE_STRING + jobid;
-        ArrayList<PrintWriter> writer = openJobs.get(logFileKey);
-
-        if (null != writer) {
-          JobHistory.log(writer, RecordTypes.Job, new Keys[] { Keys.JOBID,
-              Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS,
-              Keys.FINISHED_REDUCES }, new String[] { jobid.toString(),
-              String.valueOf(timestamp), Values.KILLED.name(),
-              String.valueOf(finishedMaps), String.valueOf(finishedReduces) });
-          for (PrintWriter out : writer) {
-            out.close();
-          }
-          openJobs.remove(logFileKey);
-        }
-      }
-    }
     /**
      * Log job's priority. 
      * @param jobid job id
@@ -967,6 +936,7 @@ public class JobHistory {
         }
       }
     }
+
     /**
      * Log job's submit-time/launch-time 
      * @param jobid job id
@@ -990,7 +960,6 @@ public class JobHistory {
       }
     }
   }
-  
   /**
    * Helper class for logging or reading back events related to Task's start, finish or failure. 
    * All events logged by this class are logged in a separate file per job in 

+ 18 - 48
src/mapred/org/apache/hadoop/mapred/JobInProgress.java

@@ -31,7 +31,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.Vector;
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
@@ -84,7 +83,6 @@ class JobInProgress {
   int failedReduceTIPs = 0;
   private volatile boolean launchedCleanup = false;
   private volatile boolean jobKilled = false;
-  private volatile boolean jobFailed = false;
 
   JobPriority priority = JobPriority.NORMAL;
   JobTracker jobtracker = null;
@@ -870,7 +868,7 @@ class JobInProgress {
       return false;
     }
     // check if job has failed or killed
-    if (jobKilled || jobFailed) {
+    if (jobKilled) {
       return true;
     }
     // Check if all maps and reducers have finished.
@@ -1699,13 +1697,10 @@ class JobInProgress {
       }
       //
       // The Job is done
-      // if the job is failed, then mark the job failed.
-      if (jobFailed) {
-        terminateJob(JobStatus.FAILED);
-      }
-      // if the job is killed, then mark the job killed.
+      //
+      // if the job is killed, then mark the job failed.
       if (jobKilled) {
-        terminateJob(JobStatus.KILLED);
+        killJob();
       }
       else {
         jobComplete(metrics);
@@ -1747,31 +1742,24 @@ class JobInProgress {
     }
   }
   
-  private synchronized void terminateJob(int jobState) {
-    if ((status.getRunState() == JobStatus.RUNNING)
-        || (status.getRunState() == JobStatus.PREP)) {
+  private synchronized void killJob() {
+    if ((status.getRunState() == JobStatus.RUNNING) ||
+        (status.getRunState() == JobStatus.PREP)) {
+      this.status = new JobStatus(status.getJobID(),
+                          1.0f, 1.0f, 1.0f, JobStatus.FAILED);
       this.finishTime = System.currentTimeMillis();
-      if (jobState == JobStatus.FAILED) {
-        this.status = new JobStatus(status.getJobID(), 1.0f, 1.0f, 1.0f,
-            JobStatus.FAILED);
-        JobHistory.JobInfo.logFailed(this.status.getJobID(), finishTime,
-            this.finishedMapTasks, this.finishedReduceTasks);
-      } else if (jobState == JobStatus.KILLED) {
-        this.status = new JobStatus(status.getJobID(), 1.0f, 1.0f, 1.0f,
-            JobStatus.KILLED);
-        JobHistory.JobInfo.logKilled(this.status.getJobID(), finishTime,
-            this.finishedMapTasks, this.finishedReduceTasks);
-      }
+      JobHistory.JobInfo.logFailed(this.status.getJobID(), finishTime, 
+              this.finishedMapTasks, this.finishedReduceTasks);
       garbageCollect();
     }
   }
-  
+
   /**
    * Kill the job and all its component tasks.
    */
-  private synchronized void terminate(int jobState) {
-    if ((status.getRunState() == JobStatus.RUNNING)
-        || (status.getRunState() == JobStatus.PREP)) {
+  public synchronized void kill() {
+    if ((status.getRunState() == JobStatus.RUNNING) ||
+         (status.getRunState() == JobStatus.PREP)) {
       LOG.info("Killing job '" + this.status.getJobID() + "'");
       this.runningMapTasks = 0;
       this.runningReduceTasks = 0;
@@ -1784,28 +1772,10 @@ class JobInProgress {
       for (int i = 0; i < reduces.length; i++) {
         reduces[i].kill();
       }
-      if (jobState == JobStatus.FAILED) {
-        jobFailed = true;
-      } else if (jobState == JobStatus.KILLED) {
-        jobKilled = true;
-      }
+      jobKilled = true;
     }
   }
   
-  /**
-   * Kill the job and all its component tasks.
-   */
-  public synchronized void kill() {
-    terminate(JobStatus.KILLED);
-  }
-  
-  /**
-   * Fails the job and all its component tasks.
-   */
-  synchronized void fail() {
-    terminate(JobStatus.FAILED);
-  }
-  
   /**
    * A task assigned to this JobInProgress has reported in as failed.
    * Most of the time, we'll just reschedule execution.  However, after
@@ -1954,9 +1924,9 @@ class JobInProgress {
           } else {
             cleanup[0].kill();
           }
-          terminateJob(JobStatus.FAILED);
+          killJob();
         } else {
-          terminate(JobStatus.FAILED);
+          kill();
         }
       }
       

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/JobStatus.java

@@ -45,7 +45,6 @@ public class JobStatus implements Writable {
   public static final int SUCCEEDED = 2;
   public static final int FAILED = 3;
   public static final int PREP = 4;
-  public static final int KILLED = 5;
 
   private JobID jobid;
   private float mapProgress;

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/JobSubmissionProtocol.java

@@ -45,7 +45,6 @@ interface JobSubmissionProtocol extends VersionedProtocol {
    *             cleanupProgress to JobStatus as part of HADOOP-3150
    * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
    *             and getAllJobs(queue) as a part of HADOOP-3930
-   * Version 14: Added KILLED status to JobStatus as part of HADOOP-3924
    */
   public static final long versionID = 13L;
 

+ 3 - 5
src/mapred/org/apache/hadoop/mapred/JobTracker.java

@@ -29,8 +29,8 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Date;
-import java.util.HashMap;
 import java.util.HashSet;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -1541,8 +1541,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
               int rjobRunState = 
                 rjob.getStatus().getRunState();
               if (rjobRunState == JobStatus.SUCCEEDED || 
-                  rjobRunState == JobStatus.FAILED ||
-                  rjobRunState == JobStatus.KILLED) {
+                  rjobRunState == JobStatus.FAILED) {
                 // Ok, this call to removeTaskEntries
                 // is dangerous is some very very obscure
                 // cases; e.g. when rjob completed, hit
@@ -1629,8 +1628,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol,
     for (Iterator it = jobs.values().iterator(); it.hasNext();) {
       JobInProgress jip = (JobInProgress) it.next();
       JobStatus status = jip.getStatus();
-      if ((status.getRunState() == JobStatus.FAILED)
-          || (status.getRunState() == JobStatus.KILLED)) {
+      if (status.getRunState() == JobStatus.FAILED) {
         v.add(jip);
       }
     }

+ 1 - 9
src/mapred/org/apache/hadoop/mapred/RunningJob.java

@@ -109,7 +109,7 @@ public interface RunningJob {
    * @throws IOException
    */
   public boolean isSuccessful() throws IOException;
-  
+
   /**
    * Blocks until the job is complete.
    * 
@@ -117,14 +117,6 @@ public interface RunningJob {
    */
   public void waitForCompletion() throws IOException;
 
-  /**
-   * Returns the current state of the Job.
-   * {@link JobStatus}
-   * 
-   * @throws IOException
-   */
-  public int getJobState() throws IOException;
-  
   /**
    * Kill the running job.  Blocks until all job tasks have been
    * killed as well.  If the job is no longer running, it simply returns.

+ 0 - 160
src/test/org/apache/hadoop/mapred/TestJobKillAndFail.java

@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-
-/**
- * A JUnit test to test Kill Job & Fail Job functionality with local file
- * system.
- */
-public class TestJobKillAndFail extends TestCase {
-
-  private static String TEST_ROOT_DIR = new File(System.getProperty(
-      "test.build.data", "/tmp")).toURI().toString().replace(' ', '+');
-
-  private void runJobFail(JobConf conf) throws IOException {
-
-    conf.setJobName("testjobfail");
-    conf.setMapperClass(FailMapper.class);
-
-    RunningJob job = runJob(conf);
-    while (!job.isComplete()) {
-      try {
-        Thread.sleep(100);
-      } catch (InterruptedException e) {
-        break;
-      }
-    }
-    // Checking that the Job got failed
-    assertEquals(job.getJobState(), JobStatus.FAILED);
-  }
-
-  private void runJobKill(JobConf conf) throws IOException {
-
-    conf.setJobName("testjobkill");
-    conf.setMapperClass(KillMapper.class);
-
-    RunningJob job = runJob(conf);
-    while (job.getJobState() != JobStatus.RUNNING) {
-      try {
-        Thread.sleep(100);
-      } catch (InterruptedException e) {
-        break;
-      }
-    }
-    job.killJob();
-    while (job.cleanupProgress() == 0.0f) {
-      try {
-        Thread.sleep(10);
-      } catch (InterruptedException ie) {
-        break;
-      }
-    }
-    // Checking that the Job got killed
-    assertTrue(job.isComplete());
-    assertEquals(job.getJobState(), JobStatus.KILLED);
-  }
-
-  private RunningJob runJob(JobConf conf) throws IOException {
-
-    final Path inDir = new Path(TEST_ROOT_DIR + "/failkilljob/input");
-    final Path outDir = new Path(TEST_ROOT_DIR + "/failkilljob/output");
-
-    // run the dummy sleep map
-    FileSystem fs = FileSystem.get(conf);
-    fs.delete(outDir, true);
-    if (!fs.exists(inDir)) {
-      fs.mkdirs(inDir);
-    }
-    String input = "The quick brown fox\n" + "has many silly\n"
-        + "red fox sox\n";
-    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
-    file.writeBytes(input);
-    file.close();
-
-    conf.setInputFormat(TextInputFormat.class);
-    conf.setOutputKeyClass(Text.class);
-    conf.setOutputValueClass(IntWritable.class);
-
-    FileInputFormat.setInputPaths(conf, inDir);
-    FileOutputFormat.setOutputPath(conf, outDir);
-    conf.setNumMapTasks(1);
-    conf.setNumReduceTasks(0);
-
-    JobClient jobClient = new JobClient(conf);
-    RunningJob job = jobClient.submitJob(conf);
-
-    return job;
-
-  }
-
-  public void testJobFailAndKill() throws IOException {
-    MiniMRCluster mr = null;
-    try {
-      mr = new MiniMRCluster(2, "file:///", 3);
-
-      // run the TCs
-      JobConf conf = mr.createJobConf();
-      runJobFail(conf);
-      runJobKill(conf);
-    } finally {
-      if (mr != null) {
-        mr.shutdown();
-      }
-    }
-  }
-
-  static class FailMapper extends MapReduceBase implements
-      Mapper<WritableComparable, Writable, WritableComparable, Writable> {
-
-    public void map(WritableComparable key, Writable value,
-        OutputCollector<WritableComparable, Writable> out, Reporter reporter)
-        throws IOException {
-
-      throw new RuntimeException("failing map");
-    }
-  }
-
-  static class KillMapper extends MapReduceBase implements
-      Mapper<WritableComparable, Writable, WritableComparable, Writable> {
-
-    public void map(WritableComparable key, Writable value,
-        OutputCollector<WritableComparable, Writable> out, Reporter reporter)
-        throws IOException {
-
-      try {
-        Thread.sleep(100000);
-      } catch (InterruptedException e) {
-        // Do nothing
-      }
-    }
-  }
-}

+ 0 - 1
src/test/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java

@@ -228,7 +228,6 @@ public class TestJobQueueTaskScheduler extends TestCase {
     submitJobs(1, JobStatus.PREP);
     submitJobs(1, JobStatus.SUCCEEDED);
     submitJobs(1, JobStatus.FAILED);
-    submitJobs(1, JobStatus.KILLED);
     assertNull(scheduler.assignTasks(tracker("tt1")));
   }
   

+ 0 - 7
src/webapps/job/jobdetails.jsp

@@ -226,13 +226,6 @@
                   "<br>\n");
         out.print("<b>Failed in:</b> " + StringUtils.formatTimeDiff(
             job.getFinishTime(), job.getStartTime()) + "<br>\n");
-      } else if (runState == JobStatus.KILLED) {
-        out.print("<b>Status:</b> Killed<br>\n");
-        out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n");
-        out.print("<b>Killed at:</b> " + new Date(job.getFinishTime()) +
-                  "<br>\n");
-        out.print("<b>Killed in:</b> " + StringUtils.formatTimeDiff(
-            job.getFinishTime(), job.getStartTime()) + "<br>\n");
       }
     }
     out.print("<b>Job Cleanup:</b>");