Browse Source

HADOOP-796. Provide more convenient access to failed task information in the web interface. Contributed by Sanjay.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@486303 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 năm trước cách đây
mục cha
commit
c1e3caf380

+ 3 - 0
CHANGES.txt

@@ -51,6 +51,9 @@ Trunk (unreleased changes)
 14. HADOOP-786. Log common exception at debug level.
     (Sanjay Dahiya via cutting)
 
+15. HADOOP-796. Provide more convenient access to failed task
+    information in the web interface.  (Sanjay Dahiya via cutting)
+
 
 Release 0.9.1 - 2006-12-06
 

+ 30 - 0
src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java

@@ -3,6 +3,7 @@ package org.apache.hadoop.mapred;
 import java.util.*;
 import java.io.*;
 import org.apache.hadoop.mapred.JobHistory.Keys ; 
+import org.apache.hadoop.mapred.JobHistory.Values; ;
 
 /**
  * Default parser for job history files. It creates object model from 
@@ -153,4 +154,33 @@ public class DefaultJobHistoryParser {
       return jobTrackerToJobs;
     }
   }
+  
+  
+  // call this only for jobs that succeeded for better results. 
+  static class BadNodesFilter implements JobHistory.Listener {
+    private Map<String, Set<String>> badNodesToNumFaiedTasks = new HashMap(); 
+    Map<String, Set<String>> getValues(){
+      return badNodesToNumFaiedTasks; 
+    }
+    public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
+      throws IOException {
+      
+      if (recType.equals(JobHistory.RecordTypes.MapAttempt) || 
+          recType.equals(JobHistory.RecordTypes.ReduceAttempt) ) {
+        
+        if( Values.FAILED.name().equals(values.get(Keys.TASK_STATUS) )  ){
+          String hostName = values.get(Keys.HOSTNAME) ;
+          String taskid = values.get(Keys.TASKID); 
+          Set tasks = badNodesToNumFaiedTasks.get(hostName); 
+          if( null == tasks  ){
+            tasks = new TreeSet(); 
+            tasks.add(taskid);
+            badNodesToNumFaiedTasks.put(hostName, tasks);
+          }else{
+            tasks.add(taskid);
+          }
+        }
+      }      
+    }
+  }
 }

+ 34 - 0
src/webapps/job/jobdetailshistory.jsp

@@ -101,6 +101,40 @@
 	<td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, reduceFinished, reduceStarted) %></td>
 </tr>
  </table>
+
+<br/>
+ <%
+	DefaultJobHistoryParser.BadNodesFilter filter = new DefaultJobHistoryParser.BadNodesFilter();
+	String dir = System.getProperty("hadoop.log.dir") + File.separator + "history" ; 
+ 
+	JobHistory.parseHistory(new File(dir, jobTrackerId+"_" + jobid), filter); 
+	Map<String, Set<String>> badNodes = filter.getValues(); 
+	if( badNodes.size() > 0 ) {
+ %>
+<h3>Failed tasks attempts by nodes </h3>
+<table border="1">
+<tr><td>Hostname</td><td>Failed Tasks</td></tr>
+ <%	  
+	for( String node : badNodes.keySet() ) {
+	  Set<String> failedTasks = badNodes.get(node); 
+%>
+	<tr>
+		<td><%=node %></td>
+		<td>
+<%
+		for( String t : failedTasks ) {
+%>
+		 <a href="taskdetailshistory.jsp?jobid=<%=jobid%>&jobTrackerId=<%=jobTrackerId %>&taskid=<%=t %>"><%=t %></a>,&nbsp;
+<%		  
+		}
+%>	
+		</td>
+	</tr>
+<%	  
+     }
+	}
+ %>
+</table>
  </center>
 
 </body></html>