Browse Source

Fix HADOOP-168. Add IOException to throws of all MapReduce RPC protocol methods. Contributed by Owen.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@397310 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 years ago
parent
commit
1dafad3980

+ 5 - 0
CHANGES.txt

@@ -101,6 +101,11 @@ Trunk (unreleased)
     Nutch only uses ObjectWritable in intermediate files, so this
     should not be a problem for Nutch.  (Stefan & cutting)
 
+27. Fix HADOOP-168.  MapReduce RPC protocol methods should all declare
+    IOException, so that timeouts are handled appropriately.
+    (omalley via cutting)
+
+
 Release 0.1.1 - 2006-04-08
 
  1. Added CHANGES.txt, logging all significant changes to Hadoop.  (cutting)

+ 7 - 4
src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java

@@ -34,16 +34,17 @@ interface InterTrackerProtocol {
    * TaskTracker must also indicate whether this is the first interaction
    * (since state refresh)
    */
-  int emitHeartbeat(TaskTrackerStatus status, boolean initialContact);
+  int emitHeartbeat(TaskTrackerStatus status, 
+                    boolean initialContact) throws IOException;
 
   /** Called to get new tasks from from the job tracker for this tracker.*/
-  Task pollForNewTask(String trackerName);
+  Task pollForNewTask(String trackerName) throws IOException;
 
   /** Called to find which tasks that have been run by this tracker should now
    * be closed because their job is complete.  This is used to, e.g., 
    * notify a map task that its output is no longer needed and may 
    * be removed. */
-  String[] pollForTaskWithClosedJob(String trackerName);
+  String[] pollForTaskWithClosedJob(String trackerName) throws IOException;
 
   /** Called by a reduce task to find which map tasks are completed.
    *
@@ -51,7 +52,9 @@ interface InterTrackerProtocol {
    * @param mapTasksNeeded an array of UTF8 naming map task ids whose output is needed.
    * @return an array of MapOutputLocation
    */
-  MapOutputLocation[] locateMapOutputs(String taskId, String[][] mapTasksNeeded);
+  MapOutputLocation[] locateMapOutputs(String taskId, 
+                                       String[][] mapTasksNeeded
+                                       ) throws IOException;
 
   /**
    * The task tracker calls this once, to discern where it can find

+ 6 - 6
src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java

@@ -34,28 +34,28 @@ interface JobSubmissionProtocol {
      * Get the current status of the cluster
      * @return summary of the state of the cluster
      */
-    public ClusterStatus getClusterStatus();
+    public ClusterStatus getClusterStatus() throws IOException;
     
     /**
      * Kill the indicated job
      */
-    public void killJob(String jobid);
+    public void killJob(String jobid) throws IOException;
 
     /**
      * Grab a handle to a job that is already known to the JobTracker
      */
-    public JobProfile getJobProfile(String jobid);
+    public JobProfile getJobProfile(String jobid) throws IOException;
 
     /**
      * Grab a handle to a job that is already known to the JobTracker
      */
-    public JobStatus getJobStatus(String jobid);
+    public JobStatus getJobStatus(String jobid) throws IOException;
 
     /**
      * Grab a bunch of info on the tasks that make up the job
      */
-    public TaskReport[] getMapTaskReports(String jobid);
-    public TaskReport[] getReduceTaskReports(String jobid);
+    public TaskReport[] getMapTaskReports(String jobid) throws IOException;
+    public TaskReport[] getReduceTaskReports(String jobid) throws IOException;
 
     /**
      * A MapReduce system always operates on a single filesystem.  This