浏览代码

Accidently got too many files. Rolling back patch for HADOOP-1245.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@588067 13f79535-47bb-0310-9956-ffa450edef68
Owen O'Malley 17 年之前
父节点
当前提交
ea2ad1bb24

+ 0 - 7
CHANGES.txt

@@ -3,13 +3,6 @@ Hadoop Change Log
 
 Trunk (unreleased changes)
 
-  INCOMPATIBLE CHANGES
-
-    HADOOP-1245.  Use the mapred.tasktracker.tasks.maximum value
-    configured on each tasktracker when allocating tasks, instead of
-    the value configured on the jobtracker. InterTrackerProtocol
-    version changed from 5 to 6. (Michael Bieniosek via omalley)
-
   IMPROVEMENTS
 
     HADOOP-2045.  Change committer list on website to a table, so that

+ 0 - 17
src/c++/pipes/api/hadoop/Pipes.hh

@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 #ifndef HADOOP_PIPES_HH
 #define HADOOP_PIPES_HH
 

+ 0 - 17
src/c++/pipes/api/hadoop/TemplateFactory.hh

@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 #ifndef HADOOP_PIPES_TEMPLATE_FACTORY_HH
 #define HADOOP_PIPES_TEMPLATE_FACTORY_HH
 

+ 0 - 18
src/c++/pipes/impl/HadoopPipes.cc

@@ -1,21 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
 #include "hadoop/Pipes.hh"
 #include "hadoop/SerialUtils.hh"
 #include "hadoop/StringUtils.hh"

+ 1 - 4
src/examples/pipes/Makefile.am

@@ -19,7 +19,7 @@ AM_CXXFLAGS=-Wall -I$(HADOOP_UTILS_PREFIX)/include \
 LDADD=-L$(HADOOP_UTILS_PREFIX)/lib -L$(HADOOP_PIPES_PREFIX)/lib \
       -lhadooppipes -lhadooputils
 
-bin_PROGRAMS= wordcount-simple wordcount-part wordcount-nopipe pipes-sort
+bin_PROGRAMS= wordcount-simple wordcount-part wordcount-nopipe
 
 # Define the sources for each program
 wordcount_simple_SOURCES = \
@@ -31,6 +31,3 @@ wordcount_part_SOURCES = \
 wordcount_nopipe_SOURCES = \
 	impl/wordcount-nopipe.cc
 
-pipes_sort_SOURCES = \
-        impl/sort.cc
-

+ 4 - 18
src/examples/pipes/Makefile.in

@@ -14,7 +14,7 @@
 
 @SET_MAKE@
 
-SOURCES = $(pipes_sort_SOURCES) $(wordcount_nopipe_SOURCES) $(wordcount_part_SOURCES) $(wordcount_simple_SOURCES)
+SOURCES = $(wordcount_nopipe_SOURCES) $(wordcount_part_SOURCES) $(wordcount_simple_SOURCES)
 
 srcdir = @srcdir@
 top_srcdir = @top_srcdir@
@@ -38,7 +38,7 @@ PRE_UNINSTALL = :
 POST_UNINSTALL = :
 host_triplet = @host@
 bin_PROGRAMS = wordcount-simple$(EXEEXT) wordcount-part$(EXEEXT) \
-	wordcount-nopipe$(EXEEXT) pipes-sort$(EXEEXT)
+	wordcount-nopipe$(EXEEXT)
 DIST_COMMON = config.guess config.sub $(srcdir)/Makefile.in \
 	$(srcdir)/Makefile.am $(top_srcdir)/configure \
 	$(am__configure_deps) $(top_srcdir)/impl/config.h.in depcomp \
@@ -59,10 +59,6 @@ am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am__dirstamp = $(am__leading_dot)dirstamp
-am_pipes_sort_OBJECTS = impl/sort.$(OBJEXT)
-pipes_sort_OBJECTS = $(am_pipes_sort_OBJECTS)
-pipes_sort_LDADD = $(LDADD)
-pipes_sort_DEPENDENCIES =
 am_wordcount_nopipe_OBJECTS = impl/wordcount-nopipe.$(OBJEXT)
 wordcount_nopipe_OBJECTS = $(am_wordcount_nopipe_OBJECTS)
 wordcount_nopipe_LDADD = $(LDADD)
@@ -86,8 +82,8 @@ LTCXXCOMPILE = $(LIBTOOL) --mode=compile --tag=CXX $(CXX) $(DEFS) \
 CXXLD = $(CXX)
 CXXLINK = $(LIBTOOL) --mode=link --tag=CXX $(CXXLD) $(AM_CXXFLAGS) \
 	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-SOURCES = $(pipes_sort_SOURCES) $(wordcount_nopipe_SOURCES) \
-	$(wordcount_part_SOURCES) $(wordcount_simple_SOURCES)
+SOURCES = $(wordcount_nopipe_SOURCES) $(wordcount_part_SOURCES) \
+	$(wordcount_simple_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 ACLOCAL = @ACLOCAL@
@@ -222,9 +218,6 @@ wordcount_part_SOURCES = \
 wordcount_nopipe_SOURCES = \
 	impl/wordcount-nopipe.cc
 
-pipes_sort_SOURCES = \
-        impl/sort.cc
-
 all: all-am
 
 .SUFFIXES:
@@ -313,11 +306,6 @@ impl/$(am__dirstamp):
 impl/$(DEPDIR)/$(am__dirstamp):
 	@$(mkdir_p) impl/$(DEPDIR)
 	@: > impl/$(DEPDIR)/$(am__dirstamp)
-impl/sort.$(OBJEXT): impl/$(am__dirstamp) \
-	impl/$(DEPDIR)/$(am__dirstamp)
-pipes-sort$(EXEEXT): $(pipes_sort_OBJECTS) $(pipes_sort_DEPENDENCIES) 
-	@rm -f pipes-sort$(EXEEXT)
-	$(CXXLINK) $(pipes_sort_LDFLAGS) $(pipes_sort_OBJECTS) $(pipes_sort_LDADD) $(LIBS)
 impl/wordcount-nopipe.$(OBJEXT): impl/$(am__dirstamp) \
 	impl/$(DEPDIR)/$(am__dirstamp)
 wordcount-nopipe$(EXEEXT): $(wordcount_nopipe_OBJECTS) $(wordcount_nopipe_DEPENDENCIES) 
@@ -336,7 +324,6 @@ wordcount-simple$(EXEEXT): $(wordcount_simple_OBJECTS) $(wordcount_simple_DEPEND
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
-	-rm -f impl/sort.$(OBJEXT)
 	-rm -f impl/wordcount-nopipe.$(OBJEXT)
 	-rm -f impl/wordcount-part.$(OBJEXT)
 	-rm -f impl/wordcount-simple.$(OBJEXT)
@@ -344,7 +331,6 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/sort.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-nopipe.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-part.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@impl/$(DEPDIR)/wordcount-simple.Po@am__quote@

+ 0 - 17
src/examples/pipes/impl/wordcount-nopipe.cc

@@ -1,20 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 #include "hadoop/Pipes.hh"
 #include "hadoop/TemplateFactory.hh"
 #include "hadoop/StringUtils.hh"

+ 0 - 18
src/examples/pipes/impl/wordcount-part.cc

@@ -1,21 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
 #include "hadoop/Pipes.hh"
 #include "hadoop/TemplateFactory.hh"
 #include "hadoop/StringUtils.hh"

+ 0 - 18
src/examples/pipes/impl/wordcount-simple.cc

@@ -1,21 +1,3 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
 #include "hadoop/Pipes.hh"
 #include "hadoop/TemplateFactory.hh"
 #include "hadoop/StringUtils.hh"

+ 1 - 2
src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java

@@ -34,9 +34,8 @@ interface InterTrackerProtocol extends VersionedProtocol {
    * version 4 changed TaskReport for HADOOP-549.
    * version 5 introduced that removes locateMapOutputs and instead uses
    * getTaskCompletionEvents to figure finished maps and fetch the outputs
-   * version 6 adds maxTasks to TaskTrackerStatus for HADOOP-1245
    */
-  public static final long versionID = 6L;
+  public static final long versionID = 5L;
   
   public final static int TRACKERS_OK = 0;
   public final static int UNKNOWN_TASKTRACKER = 1;

+ 56 - 128
src/java/org/apache/hadoop/mapred/JobHistory.java

@@ -71,23 +71,17 @@ public class JobHistory {
    * Record types are identifiers for each line of log in history files. 
    * A record type appears as the first token in a single line of log. 
    */
-  public static enum RecordTypes {
-    Jobtracker, Job, Task, MapAttempt, ReduceAttempt
-  }
-
+  public static enum RecordTypes {Jobtracker, Job, Task, MapAttempt, ReduceAttempt};
   /**
    * Job history files contain key="value" pairs, where keys belong to this enum. 
    * It acts as a global namespace for all keys. 
    */
-  public static enum Keys { 
-    JOBTRACKERID,
-    START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME, 
-    LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, 
-    FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, 
-    ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE, 
-    SHUFFLE_FINISHED, SORT_FINISHED, COUNTERS
-  }
-
+  public static enum Keys { JOBTRACKERID,
+                            START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME, LAUNCH_TIME, 
+                            TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES,
+                            JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, 
+                            COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED 
+  };
   /**
    * This enum contains some of the values commonly used by history log events. 
    * since values in history can only be strings - Values.name() is used in 
@@ -95,8 +89,7 @@ public class JobHistory {
    */
   public static enum Values {
     SUCCESS, FAILED, KILLED, MAP, REDUCE
-  }
-
+  };
   // temp buffer for parsed dataa
   private static Map<Keys,String> parseBuffer = new HashMap<Keys, String>(); 
 
@@ -188,8 +181,7 @@ public class JobHistory {
    * @param value value
    */
   
-  static void log(PrintWriter out, RecordTypes recordType, Keys key, 
-                  String value){
+  static void log(PrintWriter out, RecordTypes recordType, Enum key, String value){
     out.println(recordType.name() + DELIMITER + key + "=\"" + value + "\""); 
     out.flush();
   }
@@ -202,8 +194,7 @@ public class JobHistory {
    * @param values type of log event
    */
 
-  static void log(PrintWriter out, RecordTypes recordType, Keys[] keys, 
-                  String[] values){
+  static void log(PrintWriter out, RecordTypes recordType, Enum[] keys, String[] values){
     StringBuffer buf = new StringBuffer(recordType.name()); 
     buf.append(DELIMITER); 
     for(int i =0; i< keys.length; i++){
@@ -349,7 +340,7 @@ public class JobHistory {
       if (!disableHistory){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-                         new Keys[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
                          new String[]{jobId, jobName, user, 
                                       String.valueOf(submitTime), jobConfPath}
                         );
@@ -363,7 +354,7 @@ public class JobHistory {
           openJobs.put(logFileName, writer);
           // add to writer as well 
           JobHistory.log(writer, RecordTypes.Job, 
-                         new Keys[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
                          new String[]{jobId, jobName, user, 
                                       String.valueOf(submitTime) , jobConfPath}
                         ); 
@@ -398,7 +389,7 @@ public class JobHistory {
       if (!disableHistory){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-                         new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
                          new String[] {jobId,  String.valueOf(startTime), 
                                        String.valueOf(totalMaps), String.valueOf(totalReduces) }); 
         }
@@ -408,7 +399,7 @@ public class JobHistory {
         
         if (null != writer){
           JobHistory.log(writer, RecordTypes.Job, 
-                         new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
                          new String[] {jobId,  String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces)}); 
         }
       }
@@ -421,22 +412,15 @@ public class JobHistory {
      * @param finishedReduces no of reduces finished sucessfully. 
      * @param failedMaps no of failed map tasks. 
      * @param failedReduces no of failed reduce tasks. 
-     * @param counters the counters from the job
      */ 
-    public static void logFinished(String jobId, long finishTime, 
-                                   int finishedMaps, int finishedReduces,
-                                   int failedMaps, int failedReduces,
-                                   Counters counters){
+    public static void logFinished(String jobId, long finishTime, int finishedMaps, int finishedReduces,
+                                   int failedMaps, int failedReduces){
       if (!disableHistory){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,          
-                         new Keys[] {Keys.JOBID, Keys.FINISH_TIME, 
-                                     Keys.JOB_STATUS, Keys.FINISHED_MAPS, 
-                                     Keys.FINISHED_REDUCES},
-                         new String[] {jobId,  "" + finishTime, 
-                                       Values.SUCCESS.name(), 
-                                       String.valueOf(finishedMaps), 
-                                       String.valueOf(finishedReduces)});
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces) });
         }
         
         // close job file for this job
@@ -444,18 +428,11 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(logFileName); 
         if (null != writer){
           JobHistory.log(writer, RecordTypes.Job,          
-                         new Keys[] {Keys.JOBID, Keys.FINISH_TIME, 
-                                     Keys.JOB_STATUS, Keys.FINISHED_MAPS, 
-                                     Keys.FINISHED_REDUCES,
-                                     Keys.FAILED_MAPS, Keys.FAILED_REDUCES,
-                                     Keys.COUNTERS},
-                         new String[] {jobId,  Long.toString(finishTime), 
-                                       Values.SUCCESS.name(), 
-                                       String.valueOf(finishedMaps), 
-                                       String.valueOf(finishedReduces),
-                                       String.valueOf(failedMaps), 
-                                       String.valueOf(failedReduces),
-                                       stringifyCounters(counters)});
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES,
+                                     Keys.FAILED_MAPS, Keys.FAILED_REDUCES},
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces),
+                                       String.valueOf(failedMaps), String.valueOf(failedReduces)});
           writer.close();
           openJobs.remove(logFileName); 
         }
@@ -474,7 +451,7 @@ public class JobHistory {
       if (!disableHistory){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,
-                         new Keys[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
                          new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
                                        String.valueOf(finishedReduces)}); 
         }
@@ -482,7 +459,7 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(logFileName); 
         if (null != writer){
           JobHistory.log(writer, RecordTypes.Job,
-                         new Keys[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
                          new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
                                        String.valueOf(finishedReduces)}); 
           writer.close();
@@ -511,8 +488,7 @@ public class JobHistory {
       if (!disableHistory){
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if (null != writer){
-          JobHistory.log(writer, RecordTypes.Task, 
-                         new Keys[]{Keys.TASKID, Keys.TASK_TYPE , Keys.START_TIME}, 
+          JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE , Keys.START_TIME}, 
                          new String[]{taskId, taskType, String.valueOf(startTime)});
         }
       }
@@ -525,17 +501,13 @@ public class JobHistory {
      * @param finishTime finish timeof task in ms
      */
     public static void logFinished(String jobId, String taskId, String taskType, 
-                                   long finishTime, Counters counters){
+                                   long finishTime){
       if (!disableHistory){
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if (null != writer){
-          JobHistory.log(writer, RecordTypes.Task, 
-                         new Keys[]{Keys.TASKID, Keys.TASK_TYPE, 
-                                    Keys.TASK_STATUS, Keys.FINISH_TIME,
-                                    Keys.COUNTERS}, 
-                         new String[]{ taskId, taskType, Values.SUCCESS.name(), 
-                                       String.valueOf(finishTime),
-                                       stringifyCounters(counters)});
+          JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME}, 
+                         new String[]{ taskId, taskType, Values.SUCCESS.name(), String.valueOf(finishTime)});
         }
       }
     }
@@ -551,9 +523,8 @@ public class JobHistory {
       if (!disableHistory){
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if (null != writer){
-          JobHistory.log(writer, RecordTypes.Task, 
-                         new Keys[]{Keys.TASKID, Keys.TASK_TYPE, 
-                                    Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
+          JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
                          new String[]{ taskId,  taskType, Values.FAILED.name(), String.valueOf(time) , error});
         }
       }
@@ -589,9 +560,8 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-                         new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, 
-                                     Keys.TASK_ATTEMPT_ID, Keys.START_TIME, 
-                                     Keys.HOSTNAME},
+                         new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, 
+                                     Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
                          new String[]{Values.MAP.name(),  taskId, 
                                       taskAttemptId, String.valueOf(startTime), hostName}); 
         }
@@ -605,22 +575,18 @@ public class JobHistory {
      * @param finishTime finish time
      * @param hostName host name 
      */
-    public static void logFinished(String jobId, String taskId, 
-                                   String taskAttemptId, long finishTime, 
-                                   String hostName){
+    public static void logFinished(String jobId, String taskId, String taskAttemptId, long finishTime, String hostName){
       if (!disableHistory){
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-                         new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, 
-                                     Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                         new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
                                      Keys.FINISH_TIME, Keys.HOSTNAME},
                          new String[]{Values.MAP.name(), taskId, taskAttemptId, Values.SUCCESS.name(),  
                                       String.valueOf(finishTime), hostName}); 
         }
       }
     }
-
     /**
      * Log task attempt failed event.  
      * @param jobId jobid
@@ -636,7 +602,7 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-                         new Keys[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                         new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
                                     Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
                          new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.FAILED.name(),
                                        String.valueOf(timestamp), hostName, error}); 
@@ -658,7 +624,7 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-                         new Keys[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                         new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
                                     Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
                          new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.KILLED.name(),
                                        String.valueOf(timestamp), hostName, error}); 
@@ -685,7 +651,7 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.ReduceAttempt, 
-                         new Keys[]{  Keys.TASK_TYPE, Keys.TASKID, 
+                         new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, 
                                       Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
                          new String[]{Values.REDUCE.name(),  taskId, 
                                       taskAttemptId, String.valueOf(startTime), hostName}); 
@@ -702,18 +668,14 @@ public class JobHistory {
      * @param finishTime finish time of task
      * @param hostName host name where task attempt executed
      */
-    public static void logFinished(String jobId, String taskId, 
-                                   String taskAttemptId, long shuffleFinished, 
-                                   long sortFinished, long finishTime, 
-                                   String hostName){
+    public static void logFinished(String jobId, String taskId, String taskAttemptId, 
+                                   long shuffleFinished, long sortFinished, long finishTime, String hostName){
       if (!disableHistory){
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.ReduceAttempt, 
-                         new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, 
-                                     Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-                                     Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED,
-                                     Keys.FINISH_TIME, Keys.HOSTNAME},
+                         new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                     Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME},
                          new String[]{Values.REDUCE.name(),  taskId, taskAttemptId, Values.SUCCESS.name(), 
                                       String.valueOf(shuffleFinished), String.valueOf(sortFinished),
                                       String.valueOf(finishTime), hostName}); 
@@ -735,7 +697,7 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.ReduceAttempt, 
-                         new Keys[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                         new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
                                       Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
                          new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.FAILED.name(), 
                                        String.valueOf(timestamp), hostName, error }); 
@@ -757,10 +719,8 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if (null != writer){
           JobHistory.log(writer, RecordTypes.ReduceAttempt, 
-                         new Keys[]{  Keys.TASK_TYPE, Keys.TASKID, 
-                                      Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-                                      Keys.FINISH_TIME, Keys.HOSTNAME, 
-                                      Keys.ERROR },
+                         new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                      Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
                          new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.KILLED.name(), 
                                        String.valueOf(timestamp), hostName, error }); 
         }
@@ -768,33 +728,6 @@ public class JobHistory {
     }
 
   }
-  
-  /**
-   * Convert a counters object into a string
-   * @param counters the counters to stringify
-   * @return the resulting string
-   */
-  private static String stringifyCounters(Counters counters) {
-    StringBuffer buffer = new StringBuffer();
-    for(String groupName: counters.getGroupNames()){
-      Counters.Group group = counters.getGroup(groupName);
-      boolean first = true;
-      for(String counterName: group.getCounterNames()) {
-        if (first) {
-          first = false;
-        } else {
-          buffer.append(',');
-        }
-        buffer.append(groupName);
-        buffer.append('.');
-        buffer.append(counterName);
-        buffer.append('=');
-        buffer.append(group.getCounter(counterName));
-      }
-    }
-    return buffer.toString();
-  }
-
   /**
    * Callback interface for reading back log events from JobHistory. This interface 
    * should be implemented and passed to JobHistory.parseHistory() 
@@ -849,21 +782,16 @@ public class JobHistory {
           // find job that started more than one month back and remove them
           // for jobtracker instances which dont have a job in past one month 
           // remove the jobtracker start timestamp as well.
-          Iterator<Map<String, JobHistory.JobInfo>> jobTrackerItr =
-            jobTrackersToJobs.values().iterator();
-          while (jobTrackerItr.hasNext()) {
-            Map<String, JobHistory.JobInfo> jobs = jobTrackerItr.next();
-            Iterator<Map.Entry<String, JobHistory.JobInfo>> jobItr = 
-                   jobs.entrySet().iterator();
-            while (jobItr.hasNext()) {
-              Map.Entry<String, JobHistory.JobInfo> item = jobItr.next();
-              if (now - item.getValue().getLong(Keys.SUBMIT_TIME) > 
-                  THIRTY_DAYS_IN_MS) {
-                jobItr.remove(); 
+          for (Map<String, JobHistory.JobInfo> jobs : 
+                  jobTrackersToJobs.values()) {
+            for(Iterator iter = jobs.keySet().iterator(); iter.hasNext(); iter.next()){
+              JobHistory.JobInfo job = jobs.get(iter.next());
+              if (now - job.getLong(Keys.SUBMIT_TIME) > THIRTY_DAYS_IN_MS) {
+                iter.remove(); 
+              }
+              if (jobs.size() == 0){
+                iter.remove(); 
               }
-            }
-            if (jobs.size() == 0){
-              jobTrackerItr.remove(); 
             }
           }
           masterIndex.close(); 

+ 3 - 7
src/java/org/apache/hadoop/mapred/JobInProgress.java

@@ -797,8 +797,7 @@ class JobInProgress {
                                         tip.getTIPId(), status.getTaskId(), status.getFinishTime(), 
                                         taskTrackerName); 
       JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
-                                  Values.MAP.name(), status.getFinishTime(),
-                                  status.getCounters()); 
+                                  Values.MAP.name(), status.getFinishTime()); 
     }else{
       JobHistory.ReduceAttempt.logStarted(profile.getJobId(), 
                                           tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
@@ -808,8 +807,7 @@ class JobInProgress {
                                            status.getSortFinishTime(), status.getFinishTime(), 
                                            taskTrackerName); 
       JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
-                                  Values.REDUCE.name(), status.getFinishTime(),
-                                  status.getCounters()); 
+                                  Values.REDUCE.name(), status.getFinishTime()); 
     }
         
     // Update the running/finished map/reduce counts
@@ -877,9 +875,7 @@ class JobInProgress {
       LOG.info("Job " + this.status.getJobId() + 
                " has completed successfully.");
       JobHistory.JobInfo.logFinished(this.status.getJobId(), finishTime, 
-                                     this.finishedMapTasks, 
-                                     this.finishedReduceTasks, failedMapTasks, 
-                                     failedReduceTasks, getCounters());
+                                     this.finishedMapTasks, this.finishedReduceTasks, failedMapTasks, failedReduceTasks);
       metrics.completeJob();
       return true;
     }

+ 7 - 9
src/java/org/apache/hadoop/mapred/JobTracker.java

@@ -507,7 +507,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
   long startTime;
   int totalSubmissions = 0;
 
-  private int totalTaskCapacity;
+  private int maxCurrentTasks;
   private HostsFileReader hostsReader;
 
   //
@@ -621,6 +621,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     //
     TASKTRACKER_EXPIRY_INTERVAL = 
       conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000);
+    maxCurrentTasks = conf.getInt("mapred.tasktracker.tasks.maximum", 2);
     RETIRE_JOB_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.interval", 24 * 60 * 60 * 1000);
     RETIRE_JOB_CHECK_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.check", 60 * 1000);
     TASK_ALLOC_EPSILON = conf.getFloat("mapred.jobtracker.taskalloc.loadbalance.epsilon", 0.2f);
@@ -1230,7 +1231,6 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     if (oldStatus != null) {
       totalMaps -= oldStatus.countMapTasks();
       totalReduces -= oldStatus.countReduceTasks();
-      totalTaskCapacity -= oldStatus.getMaxTasks();
       if (status == null) {
         taskTrackers.remove(trackerName);
       }
@@ -1238,7 +1238,6 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     if (status != null) {
       totalMaps += status.countMapTasks();
       totalReduces += status.countReduceTasks();
-      totalTaskCapacity += status.getMaxTasks();
       taskTrackers.put(trackerName, status);
     }
     return oldStatus != null;
@@ -1298,7 +1297,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     int remainingMapLoad = 0;
     int numTaskTrackers;
     TaskTrackerStatus tts;
-
+	
     synchronized (taskTrackers) {
       numTaskTrackers = taskTrackers.size();
       tts = taskTrackers.get(taskTracker);
@@ -1307,6 +1306,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
       LOG.warn("Unknown task tracker polling; ignoring: " + taskTracker);
       return null;
     }
+    int totalCapacity = numTaskTrackers * maxCurrentTasks;
 
     synchronized(jobsByPriority){
       for (Iterator it = jobsByPriority.iterator(); it.hasNext();) {
@@ -1320,8 +1320,6 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
       }   
     }
 
-    int maxCurrentTasks = tts.getMaxTasks();
-    
     // find out the maximum number of maps or reduces that we are willing
     // to run on any node.
     int maxMapLoad = 0;
@@ -1383,7 +1381,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
             padding = Math.min(maxCurrentTasks,
                                (int)(totalNeededMaps * PAD_FRACTION));
           }
-          if (totalMaps + padding >= totalTaskCapacity) {
+          if (totalMaps + padding >= totalCapacity) {
             break;
           }
         }
@@ -1421,7 +1419,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
               Math.min(maxCurrentTasks,
                        (int) (totalNeededReduces * PAD_FRACTION));
           }
-          if (totalReduces + padding >= totalTaskCapacity) {
+          if (totalReduces + padding >= totalCapacity) {
             break;
           }
         }
@@ -1577,7 +1575,7 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
       return new ClusterStatus(taskTrackers.size(),
                                totalMaps,
                                totalReduces,
-                               totalTaskCapacity,
+                               maxCurrentTasks,
                                state);          
     }
   }

+ 2 - 2
src/java/org/apache/hadoop/mapred/TaskTracker.java

@@ -391,7 +391,7 @@ public class TaskTracker
     this.reduceTotal = 0;
     this.acceptNewTasks = true;
     this.status = null;
-
+        
     this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L);
     this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L);
     int numCopiers = this.fConf.getInt("mapred.reduce.parallel.copies", 5);
@@ -860,7 +860,7 @@ public class TaskTracker
       synchronized (this) {
         status = new TaskTrackerStatus(taskTrackerName, localHostname, 
                                        httpPort, cloneAndResetRunningTaskStatuses(), 
-                                       failures, maxCurrentTasks); 
+                                       failures); 
       }
     } else {
       LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() +

+ 1 - 16
src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java

@@ -46,7 +46,6 @@ class TaskTrackerStatus implements Writable {
   List<TaskStatus> taskReports;
     
   volatile long lastSeen;
-  int maxTasks;
     
   /**
    */
@@ -58,15 +57,13 @@ class TaskTrackerStatus implements Writable {
    */
   public TaskTrackerStatus(String trackerName, String host, 
                            int httpPort, List<TaskStatus> taskReports, 
-                           int failures, int maxTasks) {
+                           int failures) {
     this.trackerName = trackerName;
     this.host = host;
     this.httpPort = httpPort;
 
     this.taskReports = new ArrayList<TaskStatus>(taskReports);
     this.failures = failures;
-
-    this.maxTasks = maxTasks;
   }
 
   /**
@@ -152,16 +149,6 @@ class TaskTrackerStatus implements Writable {
     this.lastSeen = lastSeen;
   }
 
-  /**
-   * Get the maximum concurrent tasks for this node.  (This applies
-   * per type of task - a node with maxTasks==1 will run up to 1 map
-   * and 1 reduce concurrently).
-   * @return maximum tasks this node supports
-   */
-  public int getMaxTasks() {
-    return maxTasks;
-  }
-  
   ///////////////////////////////////////////
   // Writable
   ///////////////////////////////////////////
@@ -170,7 +157,6 @@ class TaskTrackerStatus implements Writable {
     UTF8.writeString(out, host);
     out.writeInt(httpPort);
     out.writeInt(failures);
-    out.writeInt(maxTasks);
 
     out.writeInt(taskReports.size());
     for (TaskStatus taskStatus : taskReports) {
@@ -183,7 +169,6 @@ class TaskTrackerStatus implements Writable {
     this.host = UTF8.readString(in);
     this.httpPort = in.readInt();
     this.failures = in.readInt();
-    this.maxTasks = in.readInt();
 
     taskReports.clear();
     int numTasks = in.readInt();

+ 5 - 11
src/webapps/job/jobtracker.jsp

@@ -67,21 +67,15 @@
   public void generateSummaryTable(JspWriter out,
                                    JobTracker tracker) throws IOException {
     ClusterStatus status = tracker.getClusterStatus();
-    String tasksPerNode = status.getTaskTrackers() > 0 ?
-      percentFormat.format(((double)status.getMaxTasks()) / status.getTaskTrackers()) :
-      "-";
     out.print("<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\">\n"+
               "<tr><th>Maps</th><th>Reduces</th>" + 
-              "<th>Total Submissions</th>" +
-              "<th>Nodes</th><th>Task Capacity</th><th>Avg. Tasks/Node</th></tr>\n");
+              "<th>Tasks/Node</th><th>Total Submissions</th>" +
+              "<th>Nodes</th></tr>\n");
     out.print("<tr><td>" + status.getMapTasks() + "</td><td>" +
               status.getReduceTasks() + "</td><td>" + 
-              tracker.getTotalSubmissions() +
-              "</td><td><a href=\"machines.jsp\">" +
-              status.getTaskTrackers() +
-              "</a></td><td>" + status.getMaxTasks() +
-	      "</td><td>" + tasksPerNode +
-              "</td></tr></table>\n");
+              status.getMaxTasks() + "</td><td>" +
+              tracker.getTotalSubmissions() + "</td><td><a href=\"machines.jsp\">" +
+              status.getTaskTrackers() + "</a></td></tr></table>\n");
   }%>
 
 <%@page import="org.apache.hadoop.dfs.JspHelper"%>

+ 2 - 4
src/webapps/job/machines.jsp

@@ -23,10 +23,9 @@
     } else {
       out.print("<center>\n");
       out.print("<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\">\n");
-      out.print("<tr><td align=\"center\" colspan=\"6\"><b>Task Trackers</b></td></tr>\n");
+      out.print("<tr><td align=\"center\" colspan=\"5\"><b>Task Trackers</b></td></tr>\n");
       out.print("<tr><td><b>Name</b></td><td><b>Host</b></td>" +
-                "<td><b># running tasks</b></td><td><b>Max Tasks</b></td>" +
-                "<td><b>Failures</b></td>" +
+                "<td><b># running tasks</b></td><td><b>Failures</b></td>" +
                 "<td><b>Seconds since heartbeat</b></td></tr>\n");
       int maxFailures = 0;
       String failureKing = null;
@@ -50,7 +49,6 @@
         out.print(tt.getHost() + ":" + tt.getHttpPort() + "/\">");
         out.print(tt.getTrackerName() + "</a></td><td>");
         out.print(tt.getHost() + "</td><td>" + numCurTasks +
-                  "</td><td>" + tt.getMaxTasks() + 
                   "</td><td>" + numFailures + 
                   "</td><td>" + sinceHeartbeat + "</td></tr>\n");
       }