Przeglądaj źródła

HADOOP-1387. A number of minor 'performance' code cleanups suggested by findbugs. Contributed by Arun.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@540336 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 lat temu
rodzic
commit
307ec14f3a
31 zmienionych plików z 101 dodań i 117 usunięć
  1. 3 0
      CHANGES.txt
  2. 1 1
      src/java/org/apache/hadoop/fs/LocalDirAllocator.java
  3. 0 4
      src/java/org/apache/hadoop/io/MapFile.java
  4. 7 7
      src/java/org/apache/hadoop/io/ObjectWritable.java
  5. 0 2
      src/java/org/apache/hadoop/io/SequenceFile.java
  6. 0 2
      src/java/org/apache/hadoop/io/compress/CompressorStream.java
  7. 2 2
      src/java/org/apache/hadoop/ipc/Client.java
  8. 0 6
      src/java/org/apache/hadoop/ipc/Server.java
  9. 2 2
      src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
  10. 6 4
      src/java/org/apache/hadoop/mapred/Counters.java
  11. 12 14
      src/java/org/apache/hadoop/mapred/JobClient.java
  12. 10 8
      src/java/org/apache/hadoop/mapred/JobHistory.java
  13. 1 3
      src/java/org/apache/hadoop/mapred/JobInProgress.java
  14. 1 1
      src/java/org/apache/hadoop/mapred/JobProfile.java
  15. 2 3
      src/java/org/apache/hadoop/mapred/JobTracker.java
  16. 0 2
      src/java/org/apache/hadoop/mapred/MapRunner.java
  17. 0 5
      src/java/org/apache/hadoop/mapred/MapTask.java
  18. 3 3
      src/java/org/apache/hadoop/mapred/ReduceTask.java
  19. 2 2
      src/java/org/apache/hadoop/mapred/Task.java
  20. 1 1
      src/java/org/apache/hadoop/mapred/TaskInProgress.java
  21. 2 3
      src/java/org/apache/hadoop/mapred/TaskLog.java
  22. 4 3
      src/java/org/apache/hadoop/mapred/TaskRunner.java
  23. 1 1
      src/java/org/apache/hadoop/mapred/TaskTracker.java
  24. 1 1
      src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
  25. 2 2
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java
  26. 2 2
      src/java/org/apache/hadoop/tools/Logalyzer.java
  27. 4 4
      src/java/org/apache/hadoop/util/StringUtils.java
  28. 19 19
      src/webapps/job/analysejobhistory.jsp
  29. 6 6
      src/webapps/job/jobdetails.jsp
  30. 3 2
      src/webapps/job/jobdetailshistory.jsp
  31. 4 2
      src/webapps/job/jobhistory.jsp

+ 3 - 0
CHANGES.txt

@@ -42,6 +42,9 @@ Trunk (unreleased changes)
  13. HADOOP-1393.  Remove a potential unexpected negative number from
      uses of random number generator. (omalley via cutting)
 
+ 14. HADOOP-1387.  A number of "performance" code-cleanups suggested
+     by findbugs.  (Arun C Murthy via cutting)
+
 
 Branch 0.13 (unreleased changes)
 

+ 1 - 1
src/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -148,7 +148,7 @@ public class LocalDirAllocator {
     }
   }
     
-  private class AllocatorPerContext {
+  private static class AllocatorPerContext {
 
     private final Log LOG =
       LogFactory.getLog("org.apache.hadoop.fs.AllocatorPerContext");

+ 0 - 4
src/java/org/apache/hadoop/io/MapFile.java

@@ -193,8 +193,6 @@ public class MapFile {
     private int seekIndex = -1;
     private long firstPosition;
 
-    private WritableComparable getKey;
-
     // the data, on disk
     private SequenceFile.Reader data;
     private SequenceFile.Reader index;
@@ -235,8 +233,6 @@ public class MapFile {
       else
         this.comparator = comparator;
 
-      this.getKey = this.comparator.newKey();
-
       // open the index
       this.index = new SequenceFile.Reader(fs, indexFile, conf);
     }

+ 7 - 7
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -188,19 +188,19 @@ public class ObjectWritable implements Writable, Configurable {
       if (declaredClass == Boolean.TYPE) {             // boolean
         instance = Boolean.valueOf(in.readBoolean());
       } else if (declaredClass == Character.TYPE) {    // char
-        instance = new Character(in.readChar());
+        instance = Character.valueOf(in.readChar());
       } else if (declaredClass == Byte.TYPE) {         // byte
-        instance = new Byte(in.readByte());
+        instance = Byte.valueOf(in.readByte());
       } else if (declaredClass == Short.TYPE) {        // short
-        instance = new Short(in.readShort());
+        instance = Short.valueOf(in.readShort());
       } else if (declaredClass == Integer.TYPE) {      // int
-        instance = new Integer(in.readInt());
+        instance = Integer.valueOf(in.readInt());
       } else if (declaredClass == Long.TYPE) {         // long
-        instance = new Long(in.readLong());
+        instance = Long.valueOf(in.readLong());
       } else if (declaredClass == Float.TYPE) {        // float
-        instance = new Float(in.readFloat());
+        instance = Float.valueOf(in.readFloat());
       } else if (declaredClass == Double.TYPE) {       // double
-        instance = new Double(in.readDouble());
+        instance = Double.valueOf(in.readDouble());
       } else if (declaredClass == Void.TYPE) {         // void
         instance = null;
       } else {

+ 0 - 2
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -596,7 +596,6 @@ public class SequenceFile {
     Configuration conf;
     FSDataOutputStream out;
     DataOutputBuffer buffer = new DataOutputBuffer();
-    Path target = null;
 
     Class keyClass;
     Class valClass;
@@ -690,7 +689,6 @@ public class SequenceFile {
               Class keyClass, Class valClass,
               boolean compress, CompressionCodec codec, Metadata metadata) 
       throws IOException {
-      this.target = name;
       this.conf = conf;
       this.out = out;
       this.keyClass = keyClass;

+ 0 - 2
src/java/org/apache/hadoop/io/compress/CompressorStream.java

@@ -28,11 +28,9 @@ class CompressorStream extends CompressionOutputStream {
   Compressor compressor;
   byte[] buffer;
   boolean closed = false;
-  OutputStream rawOut;
   
   public CompressorStream(OutputStream out, Compressor compressor, int bufferSize) {
     super(out);
-    rawOut = out;
 
     if (out == null || compressor == null) {
       throw new NullPointerException();

+ 2 - 2
src/java/org/apache/hadoop/ipc/Client.java

@@ -262,7 +262,7 @@ public class Client {
           if (LOG.isDebugEnabled())
             LOG.debug(getName() + " got value #" + id);
 
-          Call call = (Call)calls.remove(new Integer(id));
+          Call call = (Call)calls.remove(id);
           boolean isError = in.readBoolean();     // read if error
           if (isError) {
             call.setResult(null, WritableUtils.readString(in),
@@ -308,7 +308,7 @@ public class Client {
     public void sendParam(Call call) throws IOException {
       boolean error = true;
       try {
-        calls.put(new Integer(call.id), call);
+        calls.put(call.id, call);
         synchronized (out) {
           if (LOG.isDebugEnabled())
             LOG.debug(getName() + " sending #" + call.id);

+ 0 - 6
src/java/org/apache/hadoop/ipc/Server.java

@@ -440,12 +440,6 @@ public abstract class Server {
       return lastContact;
     }
 
-    private boolean timedOut() {
-      if (System.currentTimeMillis() -  lastContact > maxIdleTime)
-        return true;
-      return false;
-    }
-
     private boolean timedOut(long currentTime) {
       if (currentTime -  lastContact > maxIdleTime)
         return true;

+ 2 - 2
src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java

@@ -50,7 +50,7 @@ abstract class BasicTypeSorterBase implements BufferSorter {
   //12 => 4 for keyoffsets, 4 for keylengths, 4 for valueLengths, and
   //4 for indices into startOffsets array in the
   //pointers array (ignored the partpointers list itself)
-  private final int BUFFERED_KEY_VAL_OVERHEAD = 16;
+  static private final int BUFFERED_KEY_VAL_OVERHEAD = 16;
 
   //Implementation of methods of the SorterBase interface
   //
@@ -176,7 +176,7 @@ class MRSortResultIterator implements RawKeyValueIterator {
   
   //An implementation of the ValueBytes interface for the in-memory value
   //buffers. 
-  private class InMemUncompressedBytes implements ValueBytes {
+  private static class InMemUncompressedBytes implements ValueBytes {
     private byte[] data;
     int start;
     int dataSize;

+ 6 - 4
src/java/org/apache/hadoop/mapred/Counters.java

@@ -321,12 +321,14 @@ public class Counters implements Writable {
   
   public synchronized void write(DataOutput out) throws IOException {
     out.writeInt(counters.size());
-    for (String groupName : counters.keySet()) {
+    for (Map.Entry<String, Map<Integer, CounterRec>> e1 : counters.entrySet()) {
+      String groupName = e1.getKey();
+      Map<Integer, CounterRec> map = e1.getValue();
       UTF8.writeString(out, groupName);
-      Map<Integer,CounterRec> map = counters.get(groupName);
       out.writeInt(map.size());
-      for (Integer ordinal : map.keySet()) {
-        CounterRec counter = map.get(ordinal);
+      for (Map.Entry<Integer, CounterRec> e2 : map.entrySet()) {
+        Integer ordinal = e2.getKey();
+        CounterRec counter = e2.getValue();
         out.writeInt(ordinal);
         UTF8.writeString(out, counter.name);
         out.writeLong(counter.value);

+ 12 - 14
src/java/org/apache/hadoop/mapred/JobClient.java

@@ -286,27 +286,25 @@ public class JobClient extends ToolBase implements MRConstants  {
     if ((tarchives != null) || (tfiles != null)) {
       // prepare these archives for md5 checksums
       if (tarchives != null) {
-        String md5Archives = StringUtils.byteToHexString(DistributedCache
-                                                         .createMD5(tarchives[0], job));
+        StringBuffer md5Archives = 
+          new StringBuffer(StringUtils.byteToHexString(DistributedCache.createMD5(tarchives[0], job)));
         for (int i = 1; i < tarchives.length; i++) {
-          md5Archives = md5Archives
-            + ","
-            + StringUtils.byteToHexString(DistributedCache
-                                          .createMD5(tarchives[i], job));
+          md5Archives.append(",");
+          md5Archives.append(StringUtils.byteToHexString(DistributedCache
+                                          .createMD5(tarchives[i], job)));
         }
-        DistributedCache.setArchiveMd5(job, md5Archives);
+        DistributedCache.setArchiveMd5(job, md5Archives.toString());
         //job.set("mapred.cache.archivemd5", md5Archives);
       }
       if (tfiles != null) {
-        String md5Files = StringUtils.byteToHexString(DistributedCache
-                                                      .createMD5(tfiles[0], job));
+        StringBuffer md5Files = 
+          new StringBuffer(StringUtils.byteToHexString(DistributedCache.createMD5(tfiles[0], job)));
         for (int i = 1; i < tfiles.length; i++) {
-          md5Files = md5Files
-            + ","
-            + StringUtils.byteToHexString(DistributedCache
-                                          .createMD5(tfiles[i], job));
+            md5Files.append(",");
+            md5Files.append(StringUtils.byteToHexString(DistributedCache
+                                          .createMD5(tfiles[i], job)));
         }
-        DistributedCache.setFileMd5(job, md5Files);
+        DistributedCache.setFileMd5(job, md5Files.toString());
         //"mapred.cache.filemd5", md5Files);
       }
     }

+ 10 - 8
src/java/org/apache/hadoop/mapred/JobHistory.java

@@ -690,9 +690,9 @@ public class JobHistory {
           
           // find job that started more than one month back and remove them
           // for jobtracker instances which dont have a job in past one month 
-          // remove the jobtracker start timestamp as well. 
-          for (String jobTrackerId : jobTrackersToJobs.keySet()){
-            Map<String, JobHistory.JobInfo> jobs = jobTrackersToJobs.get(jobTrackerId);
+          // remove the jobtracker start timestamp as well.
+          for (Map<String, JobHistory.JobInfo> jobs : 
+                  jobTrackersToJobs.values()) {
             for(Iterator iter = jobs.keySet().iterator(); iter.hasNext(); iter.next()){
               JobHistory.JobInfo job = jobs.get(iter.next());
               if (now - job.getLong(Keys.SUBMIT_TIME) > THIRTY_DAYS_IN_MS) {
@@ -705,14 +705,16 @@ public class JobHistory {
           }
           masterIndex.close(); 
           masterIndex = new PrintWriter(logFile);
-          // delete old history and write back to a new file 
-          for (String jobTrackerId : jobTrackersToJobs.keySet()){
-            Map<String, JobHistory.JobInfo> jobs = jobTrackersToJobs.get(jobTrackerId);
+          // delete old history and write back to a new file
+          for (Map.Entry<String, Map<String, JobHistory.JobInfo>> entry :
+                  jobTrackersToJobs.entrySet()) {
+            String jobTrackerId = entry.getKey();
+            Map<String, JobHistory.JobInfo> jobs = entry.getValue();
+
             
             log(masterIndex, RecordTypes.Jobtracker, Keys.START_TIME, jobTrackerId);
 
-            for(String jobId : jobs.keySet()){
-              JobHistory.JobInfo job = jobs.get(jobId);
+            for(JobHistory.JobInfo job : jobs.values()){
               Map<Keys, String> values = job.getValues();
               
               log(masterIndex, RecordTypes.Job, 

+ 1 - 3
src/java/org/apache/hadoop/mapred/JobInProgress.java

@@ -105,8 +105,6 @@ class JobInProgress {
   }
   private Counters jobCounters = new Counters();
   
-  private Counters mapCounters = new Counters();
-  private Counters reduceCounters = new Counters();
   private MetricsRecord jobMetrics;
   
   /**
@@ -563,7 +561,7 @@ class JobInProgress {
 
       Integer trackerFailures = trackerToFailuresMap.get(trackerHostName);
       if (trackerFailures == null) {
-        trackerFailures = new Integer(0);
+        trackerFailures = 0;
       }
       trackerToFailuresMap.put(trackerHostName, ++trackerFailures);
 

+ 1 - 1
src/java/org/apache/hadoop/mapred/JobProfile.java

@@ -95,7 +95,7 @@ public class JobProfile implements Writable {
    */
   public URL getURL() {
     try {
-      return new URL(url.toString());
+      return new URL(url);
     } catch (IOException ie) {
       return null;
     }

+ 2 - 3
src/java/org/apache/hadoop/mapred/JobTracker.java

@@ -222,7 +222,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     public void addNewTask(String taskName) {
       synchronized (launchingTasks) {
         launchingTasks.put(taskName, 
-                           new Long(System.currentTimeMillis()));
+                           System.currentTimeMillis());
       }
     }
       
@@ -495,7 +495,6 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
   String localMachine;
   long startTime;
   int totalSubmissions = 0;
-  Random r = new Random();
 
   private int maxCurrentTasks;
   private HostsFileReader hostsReader;
@@ -667,7 +666,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     // The rpc/web-server ports can be ephemeral ports... 
     // ... ensure we have the correct info
     this.port = interTrackerServer.getListenerAddress().getPort();
-    this.conf.set("mapred.job.tracker", new String(this.localMachine + ":" + this.port));
+    this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
     LOG.info("JobTracker up at: " + this.port);
     this.infoPort = this.infoServer.getPort();
     this.conf.setInt("mapred.job.tracker.info.port", this.infoPort); 

+ 0 - 2
src/java/org/apache/hadoop/mapred/MapRunner.java

@@ -26,11 +26,9 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 /** Default {@link MapRunnable} implementation.*/
 public class MapRunner implements MapRunnable {
-  private JobConf job;
   private Mapper mapper;
 
   public void configure(JobConf job) {
-    this.job = job;
     this.mapper = (Mapper)ReflectionUtils.newInstance(job.getMapperClass(),
                                                       job);
   }

+ 0 - 5
src/java/org/apache/hadoop/mapred/MapTask.java

@@ -239,11 +239,8 @@ class MapTask extends Task {
 
     private JobConf job;
 
-    private TaskUmbilicalProtocol umbilical;
-
     public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
         JobConf job, Reporter reporter) throws IOException {
-      this.umbilical = umbilical;
       this.job = job;
       this.reporter = reporter;
       String finalName = getTipId();
@@ -274,7 +271,6 @@ class MapTask extends Task {
 
     private final int partitions;
     private Partitioner partitioner;
-    private TaskUmbilicalProtocol umbilical;
     private JobConf job;
     private Reporter reporter;
 
@@ -304,7 +300,6 @@ class MapTask extends Task {
       maxBufferSize = job.getInt("io.sort.mb", 100) * 1024 * 1024;
       keyValBuffer = new DataOutputBuffer();
 
-      this.umbilical = umbilical;
       this.job = job;
       this.reporter = reporter;
       this.comparator = job.getOutputKeyComparator();

+ 3 - 3
src/java/org/apache/hadoop/mapred/ReduceTask.java

@@ -839,7 +839,7 @@ class ReduceTask extends Task {
       probe_sample_size = Math.max(numCopiers*5, 50);
       
       for (int i = 0; i < numOutputs; i++) {
-        neededOutputs.add(new Integer(i));
+        neededOutputs.add(i);
         copyPhase.addPhase();       // add sub-phase per file
       }
       
@@ -975,7 +975,7 @@ class ReduceTask extends Task {
                 currentTime = System.currentTimeMillis();
                 long nextContact = currentTime + 60 * 1000 +
                   backoff.nextInt(maxBackoff*1000);
-                penaltyBox.put(cr.getHost(), new Long(nextContact));          
+                penaltyBox.put(cr.getHost(), nextContact);          
                 LOG.warn(reduceTask.getTaskId() + " adding host " +
                          cr.getHost() + " to penalty box, next contact in " +
                          ((nextContact-currentTime)/1000) + " seconds");
@@ -1131,7 +1131,7 @@ class ReduceTask extends Task {
       lastPollTime = currentTime;
       
       TaskCompletionEvent t[] = umbilical.getMapCompletionEvents(
-                                                                 reduceTask.getJobId().toString(),
+                                                                 reduceTask.getJobId(),
                                                                  fromEventId.get(),
                                                                  probe_sample_size);
       

+ 2 - 2
src/java/org/apache/hadoop/mapred/Task.java

@@ -139,7 +139,7 @@ abstract class Task implements Writable, Configurable {
     if (taskOutputPath != null) {
       Text.writeString(out, taskOutputPath.toString());
     } else {
-      Text.writeString(out, new String(""));
+      Text.writeString(out, "");
     }
   }
   public void readFields(DataInput in) throws IOException {
@@ -159,7 +159,7 @@ abstract class Task implements Writable, Configurable {
   public String toString() { return taskId; }
 
   private Path getTaskOutputPath(JobConf conf) {
-    return new Path(conf.getOutputPath(), new String("_" + taskId));
+    return new Path(conf.getOutputPath(), ("_" + taskId));
   }
   
   /**

+ 1 - 1
src/java/org/apache/hadoop/mapred/TaskInProgress.java

@@ -633,7 +633,7 @@ class TaskInProgress {
     // Create the 'taskid'; do not count the 'killed' tasks against the job!
     String taskid = null;
     if (nextTaskId < (MAX_TASK_EXECS + maxTaskAttempts + numKilledTasks)) {
-      taskid = new String("task_" + taskIdPrefix + "_" + nextTaskId);
+      taskid = "task_" + taskIdPrefix + "_" + nextTaskId;
       ++nextTaskId;
     } else {
       LOG.warn("Exceeded limit of " + (MAX_TASK_EXECS + maxTaskAttempts) +

+ 2 - 3
src/java/org/apache/hadoop/mapred/TaskLog.java

@@ -82,7 +82,6 @@ class TaskLog {
    */
   static class Writer {
     private String taskId;
-    private JobConf conf;
     private LogFilter filter;
 
     private final File taskLogDir;
@@ -245,8 +244,8 @@ class TaskLog {
     }
     
     private synchronized void writeIndexRecord() throws IOException {
-      String indexRecord = new String(currentSplit + "|" + 
-                                      splitOffset + "|" + splitLength + "\n");
+      String indexRecord = currentSplit + "|" + splitOffset + "|" + 
+                               splitLength + "\n";
       splitIndex.write(indexRecord.getBytes());
       splitIndex.flush();
     }

+ 4 - 3
src/java/org/apache/hadoop/mapred/TaskRunner.java

@@ -91,11 +91,12 @@ abstract class TaskRunner extends Thread {
     if (p == null){
       return null;
     }
-    String str = p[0].toString();
+    StringBuffer str = new StringBuffer(p[0].toString());
     for (int i = 1; i < p.length; i++){
-      str = str + "," + p[i].toString();
+      str.append(",");
+      str.append(p[i].toString());
     }
-    return str;
+    return str.toString();
   }
   
   public final void run() {

+ 1 - 1
src/java/org/apache/hadoop/mapred/TaskTracker.java

@@ -1864,7 +1864,7 @@ public class TaskTracker
    * @author Owen O'Malley
    */
   public static class MapOutputServlet extends HttpServlet {
-    private final int MAX_BYTES_TO_READ = 64 * 1024;
+    private static final int MAX_BYTES_TO_READ = 64 * 1024;
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response
                       ) throws ServletException, IOException {

+ 1 - 1
src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java

@@ -208,7 +208,7 @@ public class FieldSelectionMapReduce implements Mapper, Reducer {
         int startPos = Integer.parseInt(start);
         int endPos = Integer.parseInt(end);
         for (j = startPos; j <= endPos; j++) {
-          fieldList.add(new Integer(j));
+          fieldList.add(j);
         }
       }
     }

+ 2 - 2
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueHistogram.java

@@ -59,9 +59,9 @@ public class ValueHistogram implements ValueAggregator {
     long inc = Long.parseLong(countStr);
 
     if (count == null) {
-      count = new Long(inc);
+      count = inc;
     } else {
-      count = new Long(count.longValue() + inc);
+      count = count.longValue() + inc;
     }
     items.put(valStr, count);
   }

+ 2 - 2
src/java/org/apache/hadoop/tools/Logalyzer.java

@@ -177,8 +177,8 @@ public class Logalyzer {
     doArchive(String logListURI, String archiveDirectory)
     throws IOException
   {
-    String destURL = new String("hdfs://" + fsConfig.get("fs.default.name", "local") + 
-                                archiveDirectory);
+    String destURL = "hdfs://" + fsConfig.get("fs.default.name", "local") + 
+                         archiveDirectory;
     CopyFiles.copy(fsConfig, logListURI, destURL, true, false);
   }
   

+ 4 - 4
src/java/org/apache/hadoop/util/StringUtils.java

@@ -159,12 +159,12 @@ public class StringUtils {
    * @param uris
    */
   public static String uriToString(URI[] uris){
-    String ret = null;
-    ret = uris[0].toString();
+    StringBuffer ret = new StringBuffer(uris[0].toString());
     for(int i = 1; i < uris.length;i++){
-      ret = ret + "," + uris[i].toString();
+      ret.append(",");
+      ret.append(uris[i].toString());
     }
-    return ret;
+    return ret.toString();
   }
   
   /**

+ 19 - 19
src/webapps/job/analysejobhistory.jsp

@@ -82,18 +82,18 @@
 	}
 	Comparator<JobHistory.Task> cMap = new Comparator<JobHistory.Task>(){
 	  public int compare(JobHistory.Task t1, JobHistory.Task t2){
-	    Long l1 = new Long(t1.getLong(Keys.FINISH_TIME) - t1.getLong(Keys.START_TIME)); 
-	    Long l2 = new Long(t2.getLong(Keys.FINISH_TIME) - t2.getLong(Keys.START_TIME)) ;
-	    return l2.compareTo(l1); 
+	    long l1 = t1.getLong(Keys.FINISH_TIME) - t1.getLong(Keys.START_TIME); 
+	    long l2 = t2.getLong(Keys.FINISH_TIME) - t2.getLong(Keys.START_TIME);
+      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
 	  }
 	}; 
 	Comparator<JobHistory.Task> cShuffle = new Comparator<JobHistory.Task>(){
 	  public int compare(JobHistory.Task t1, JobHistory.Task t2){
-	    Long l1 = new Long(t1.getLong(Keys.SHUFFLE_FINISHED) - 
-	                       t1.getLong(Keys.START_TIME)); 
-	    Long l2 = new Long(t2.getLong(Keys.SHUFFLE_FINISHED) - 
-	                       t2.getLong(Keys.START_TIME)) ;
-	    return l2.compareTo(l1); 
+	    long l1 = t1.getLong(Keys.SHUFFLE_FINISHED) - 
+	                       t1.getLong(Keys.START_TIME); 
+	    long l2 = t2.getLong(Keys.SHUFFLE_FINISHED) - 
+	                       t2.getLong(Keys.START_TIME);
+      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
 	  }
 	}; 
 	Arrays.sort(mapTasks, cMap);
@@ -128,9 +128,9 @@
     Comparator<JobHistory.Task> cFinishMapRed = 
       new Comparator<JobHistory.Task>() {
       public int compare(JobHistory.Task t1, JobHistory.Task t2){
-        Long l1 = new Long(t1.getLong(Keys.FINISH_TIME)); 
-        Long l2 = new Long(t2.getLong(Keys.FINISH_TIME));
-        return l2.compareTo(l1); 
+        long l1 = t1.getLong(Keys.FINISH_TIME); 
+        long l2 = t2.getLong(Keys.FINISH_TIME);
+        return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
       }
     };
     Arrays.sort(mapTasks, cFinishMapRed);
@@ -175,9 +175,9 @@ finished at (relative to the Job launch time):
     Comparator<JobHistory.Task> cFinishShuffle = 
       new Comparator<JobHistory.Task>() {
       public int compare(JobHistory.Task t1, JobHistory.Task t2){
-        Long l1 = new Long(t1.getLong(Keys.SHUFFLE_FINISHED)); 
-        Long l2 = new Long(t2.getLong(Keys.SHUFFLE_FINISHED));
-        return l2.compareTo(l1); 
+        long l1 = t1.getLong(Keys.SHUFFLE_FINISHED); 
+        long l2 = t2.getLong(Keys.SHUFFLE_FINISHED);
+        return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
       }
     };
     Arrays.sort(reduceTasks, cFinishShuffle);
@@ -195,11 +195,11 @@ finished at (relative to the Job launch time):
 <%
 	Comparator<JobHistory.Task> cReduce = new Comparator<JobHistory.Task>(){
 	  public int compare(JobHistory.Task t1, JobHistory.Task t2){
-	    Long l1 = new Long(t1.getLong(Keys.FINISH_TIME) - 
-	                       t1.getLong(Keys.SHUFFLE_FINISHED)); 
-	    Long l2 = new Long(t2.getLong(Keys.FINISH_TIME) - 
-	                       t2.getLong(Keys.SHUFFLE_FINISHED));
-	    return l2.compareTo(l1); 
+	    long l1 = t1.getLong(Keys.FINISH_TIME) - 
+	                       t1.getLong(Keys.SHUFFLE_FINISHED); 
+	    long l2 = t2.getLong(Keys.FINISH_TIME) - 
+	                       t2.getLong(Keys.SHUFFLE_FINISHED);
+      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
 	  }
 	}; 
 	Arrays.sort(reduceTasks, cReduce); 

+ 6 - 6
src/webapps/job/jobdetails.jsp

@@ -56,16 +56,16 @@
               killedTasks +
               "</td><td align=\"right\">" + 
               ((failedTaskAttempts > 0) ? 
-                  new String("<a href=\"/jobfailures.jsp?jobid=" + jobId + 
-                      "&kind=" + kind + "&cause=failed\">" + failedTaskAttempts + 
-                      "</a>") : 
+                  ("<a href=\"/jobfailures.jsp?jobid=" + jobId + 
+                   "&kind=" + kind + "&cause=failed\">" + failedTaskAttempts + 
+                   "</a>") : 
                   "0"
                   ) + 
               " / " +
               ((killedTaskAttempts > 0) ? 
-                  new String("<a href=\"/jobfailures.jsp?jobid=" + jobId + 
-                      "&kind=" + kind + "&cause=killed\">" + killedTaskAttempts + 
-                      "</a>") : 
+                  ("<a href=\"/jobfailures.jsp?jobid=" + jobId + 
+                   "&kind=" + kind + "&cause=killed\">" + killedTaskAttempts + 
+                   "</a>") : 
                   "0"
                   ) + 
               "</td></tr>\n");

+ 3 - 2
src/webapps/job/jobdetailshistory.jsp

@@ -122,8 +122,9 @@
 <table border="1">
 <tr><td>Hostname</td><td>Failed Tasks</td></tr>
  <%	  
-	for( String node : badNodes.keySet() ) {
-	  Set<String> failedTasks = badNodes.get(node); 
+  for (Map.Entry<String, Set<String>> entry : badNodes.entrySet()) {
+    String node = entry.getKey();
+    Set<String> failedTasks = entry.getValue();
 %>
 	<tr>
 		<td><%=node %></td>

+ 4 - 2
src/webapps/job/jobhistory.jsp

@@ -26,8 +26,10 @@
 		  return ; 
 		}
 
-		for(String trackerStartTime : jobTrackerToJobs.keySet() ){ 
-		  Map<String, JobInfo> jobs = (Map<String, JobInfo>)jobTrackerToJobs.get(trackerStartTime) ; 
+    for (Map.Entry<String, Map<String, JobInfo>> entry : 
+            jobTrackerToJobs.entrySet()) {
+      String trackerStartTime = entry.getKey();
+      Map<String, JobInfo> jobs = entry.getValue();
 %>
 <h2>JobTracker started at : <%=new Date(Long.parseLong(trackerStartTime)) %></h2>
 <hr/>