Explorar o código

HADOOP-1231. Add generics to Mapper and Reducer interfaces. Contributed by Tom White.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@566798 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting %!s(int64=17) %!d(string=hai) anos
pai
achega
4ed40724fb
Modificáronse 93 ficheiros con 946 adicións e 598 borrados
  1. 3 0
      CHANGES.txt
  2. 11 9
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
  3. 8 5
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
  4. 4 4
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
  5. 5 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
  6. 4 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
  7. 4 4
      src/examples/org/apache/hadoop/examples/AggregateWordCount.java
  8. 14 11
      src/examples/org/apache/hadoop/examples/PiEstimator.java
  9. 12 9
      src/examples/org/apache/hadoop/examples/RandomWriter.java
  10. 11 10
      src/examples/org/apache/hadoop/examples/WordCount.java
  11. 7 6
      src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
  12. 6 2
      src/java/org/apache/hadoop/mapred/FileInputFormat.java
  13. 7 4
      src/java/org/apache/hadoop/mapred/InputFormat.java
  14. 30 10
      src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
  15. 22 3
      src/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
  16. 6 8
      src/java/org/apache/hadoop/mapred/LineRecordReader.java
  17. 5 4
      src/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
  18. 9 2
      src/java/org/apache/hadoop/mapred/MapRunnable.java
  19. 9 5
      src/java/org/apache/hadoop/mapred/MapRunner.java
  20. 14 6
      src/java/org/apache/hadoop/mapred/MapTask.java
  21. 6 3
      src/java/org/apache/hadoop/mapred/Mapper.java
  22. 6 2
      src/java/org/apache/hadoop/mapred/MultiFileInputFormat.java
  23. 4 2
      src/java/org/apache/hadoop/mapred/OutputCollector.java
  24. 6 3
      src/java/org/apache/hadoop/mapred/OutputFormat.java
  25. 6 2
      src/java/org/apache/hadoop/mapred/OutputFormatBase.java
  26. 5 2
      src/java/org/apache/hadoop/mapred/Partitioner.java
  27. 5 4
      src/java/org/apache/hadoop/mapred/RecordReader.java
  28. 3 2
      src/java/org/apache/hadoop/mapred/RecordWriter.java
  29. 2 0
      src/java/org/apache/hadoop/mapred/ReduceTask.java
  30. 6 3
      src/java/org/apache/hadoop/mapred/Reducer.java
  31. 8 3
      src/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
  32. 30 11
      src/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
  33. 12 6
      src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
  34. 7 3
      src/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java
  35. 15 11
      src/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
  36. 9 3
      src/java/org/apache/hadoop/mapred/TextInputFormat.java
  37. 15 8
      src/java/org/apache/hadoop/mapred/TextOutputFormat.java
  38. 10 6
      src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
  39. 4 2
      src/java/org/apache/hadoop/mapred/lib/HashPartitioner.java
  40. 4 3
      src/java/org/apache/hadoop/mapred/lib/IdentityMapper.java
  41. 5 4
      src/java/org/apache/hadoop/mapred/lib/IdentityReducer.java
  42. 8 8
      src/java/org/apache/hadoop/mapred/lib/InverseMapper.java
  43. 4 2
      src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java
  44. 7 4
      src/java/org/apache/hadoop/mapred/lib/LongSumReducer.java
  45. 17 10
      src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
  46. 7 4
      src/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
  47. 14 15
      src/java/org/apache/hadoop/mapred/lib/RegexMapper.java
  48. 11 10
      src/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
  49. 4 2
      src/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
  50. 14 12
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java
  51. 6 4
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java
  52. 2 1
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java
  53. 6 1
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java
  54. 14 10
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java
  55. 6 4
      src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
  56. 11 9
      src/java/org/apache/hadoop/mapred/pipes/Application.java
  57. 19 12
      src/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
  58. 4 4
      src/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java
  59. 9 7
      src/java/org/apache/hadoop/mapred/pipes/OutputHandler.java
  60. 10 9
      src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
  61. 7 3
      src/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java
  62. 13 11
      src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
  63. 4 4
      src/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java
  64. 25 20
      src/java/org/apache/hadoop/tools/Logalyzer.java
  65. 4 4
      src/java/org/apache/hadoop/util/CopyFiles.java
  66. 1 0
      src/test/checkstyle.xml
  67. 10 8
      src/test/org/apache/hadoop/fs/AccumulatingReducer.java
  68. 1 1
      src/test/org/apache/hadoop/fs/DFSCIOTest.java
  69. 1 1
      src/test/org/apache/hadoop/fs/DistributedFSCheck.java
  70. 9 7
      src/test/org/apache/hadoop/fs/IOMapperBase.java
  71. 1 1
      src/test/org/apache/hadoop/fs/TestDFSIO.java
  72. 18 10
      src/test/org/apache/hadoop/fs/TestFileSystem.java
  73. 11 6
      src/test/org/apache/hadoop/mapred/MRBench.java
  74. 14 9
      src/test/org/apache/hadoop/mapred/MRCaching.java
  75. 15 11
      src/test/org/apache/hadoop/mapred/PiEstimator.java
  76. 35 22
      src/test/org/apache/hadoop/mapred/SortValidator.java
  77. 39 25
      src/test/org/apache/hadoop/mapred/TestComparators.java
  78. 8 8
      src/test/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
  79. 11 10
      src/test/org/apache/hadoop/mapred/TestMapOutputType.java
  80. 49 28
      src/test/org/apache/hadoop/mapred/TestMapRed.java
  81. 25 16
      src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
  82. 5 4
      src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
  83. 3 2
      src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
  84. 3 2
      src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
  85. 6 6
      src/test/org/apache/hadoop/mapred/TestTextInputFormat.java
  86. 10 9
      src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java
  87. 9 6
      src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
  88. 3 3
      src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java
  89. 10 7
      src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java
  90. 47 27
      src/test/org/apache/hadoop/record/TestRecordMR.java
  91. 3 2
      src/test/org/apache/hadoop/record/TestRecordWritable.java
  92. 5 2
      src/test/testjar/ClassWordCount.java
  93. 9 4
      src/test/testjar/ExternalMapperReducer.java

+ 3 - 0
CHANGES.txt

@@ -53,6 +53,9 @@ Trunk (unreleased changes)
     HADOOP-1693.  Remove un-needed log fields in DFS replication classes,
     since the log may be accessed statically. (Konstantin Shvachko via cutting)
 
+    HADOOP-1231.  Add generics to Mapper and Reducer interfaces.
+    (tomwhite via cutting)
+
 
 Release 0.14.0 - 2007-08-17
 

+ 11 - 9
src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java

@@ -48,7 +48,9 @@ import org.apache.log4j.Logger;
 /**
  * Convert HBase tabular data into a format that is consumable by Map/Reduce
  */
-public class TableInputFormat implements InputFormat, JobConfigurable {
+public class TableInputFormat
+  implements InputFormat<HStoreKey, KeyedDataArrayWritable>, JobConfigurable {
+  
   static final Logger LOG = Logger.getLogger(TableInputFormat.class.getName());
 
   /**
@@ -64,7 +66,7 @@ public class TableInputFormat implements InputFormat, JobConfigurable {
   /**
    * Iterate over an HBase table data, return (HStoreKey, KeyedDataArrayWritable) pairs
    */
-  class TableRecordReader implements RecordReader {
+  class TableRecordReader implements RecordReader<HStoreKey, KeyedDataArrayWritable> {
     private HScannerInterface m_scanner;
     private TreeMap<Text, byte[]> m_row; // current buffer
     private Text m_endRow;
@@ -95,7 +97,7 @@ public class TableInputFormat implements InputFormat, JobConfigurable {
      *
      * @see org.apache.hadoop.mapred.RecordReader#createKey()
      */
-    public WritableComparable createKey() {
+    public HStoreKey createKey() {
       return new HStoreKey();
     }
 
@@ -104,7 +106,7 @@ public class TableInputFormat implements InputFormat, JobConfigurable {
      *
      * @see org.apache.hadoop.mapred.RecordReader#createValue()
      */
-    public Writable createValue() {
+    public KeyedDataArrayWritable createValue() {
       return new KeyedDataArrayWritable();
     }
 
@@ -130,17 +132,17 @@ public class TableInputFormat implements InputFormat, JobConfigurable {
      * @return true if there was more data
      * @throws IOException
      */
-    public boolean next(Writable key, Writable value) throws IOException {
+    public boolean next(HStoreKey key, KeyedDataArrayWritable value) throws IOException {
       LOG.debug("start next");
       m_row.clear();
-      HStoreKey tKey = (HStoreKey)key;
+      HStoreKey tKey = key;
       boolean hasMore = m_scanner.next(tKey, m_row);
 
       if(hasMore) {
         if(m_endRow.getLength() > 0 && (tKey.getRow().compareTo(m_endRow) < 0)) {
           hasMore = false;
         } else {
-          KeyedDataArrayWritable rowVal = (KeyedDataArrayWritable) value;
+          KeyedDataArrayWritable rowVal = value;
           ArrayList<KeyedData> columns = new ArrayList<KeyedData>();
 
           for(Map.Entry<Text, byte[]> e: m_row.entrySet()) {
@@ -159,8 +161,8 @@ public class TableInputFormat implements InputFormat, JobConfigurable {
 
   }
 
-  /** {@inheritDoc} */
-  public RecordReader getRecordReader(InputSplit split,
+  public RecordReader<HStoreKey, KeyedDataArrayWritable> getRecordReader(
+      InputSplit split,
       @SuppressWarnings("unused") JobConf job,
       @SuppressWarnings("unused") Reporter reporter) throws IOException {
     

+ 8 - 5
src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java

@@ -42,7 +42,8 @@ import org.apache.log4j.Logger;
 /**
  * Convert Map/Reduce output and write it to an HBase table
  */
-public class TableOutputFormat extends OutputFormatBase {
+public class TableOutputFormat
+  extends OutputFormatBase<Text, KeyedDataArrayWritable> {
 
   /** JobConf parameter that specifies the output table */
   public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
@@ -56,7 +57,9 @@ public class TableOutputFormat extends OutputFormatBase {
    * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) 
    * and write to an HBase table
    */
-  protected class TableRecordWriter implements RecordWriter {
+  protected class TableRecordWriter
+    implements RecordWriter<Text, KeyedDataArrayWritable> {
+    
     private HTable m_table;
 
     /**
@@ -77,10 +80,10 @@ public class TableOutputFormat extends OutputFormatBase {
      *
      * @see org.apache.hadoop.mapred.RecordWriter#write(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
      */
-    public void write(WritableComparable key, Writable value) throws IOException {
+    public void write(Text key, KeyedDataArrayWritable value) throws IOException {
       LOG.debug("start write");
-      Text tKey = (Text)key;
-      KeyedDataArrayWritable tValue = (KeyedDataArrayWritable) value;
+      Text tKey = key;
+      KeyedDataArrayWritable tValue = value;
       KeyedData[] columns = tValue.get();
 
       // start transaction

+ 4 - 4
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java

@@ -39,7 +39,7 @@ import org.apache.commons.logging.*;
  * @see StreamLineRecordReader
  * @see StreamXmlRecordReader 
  */
-public abstract class StreamBaseRecordReader implements RecordReader {
+public abstract class StreamBaseRecordReader implements RecordReader<Text, Text> {
 
   protected static final Log LOG = LogFactory.getLog(StreamBaseRecordReader.class.getName());
 
@@ -65,7 +65,7 @@ public abstract class StreamBaseRecordReader implements RecordReader {
 
   /** Read a record. Implementation should call numRecStats at the end
    */
-  public abstract boolean next(Writable key, Writable value) throws IOException;
+  public abstract boolean next(Text key, Text value) throws IOException;
 
   /** This implementation always returns true. */
   public void validateInput(JobConf job) throws IOException {
@@ -89,11 +89,11 @@ public abstract class StreamBaseRecordReader implements RecordReader {
     }
   }
   
-  public WritableComparable createKey() {
+  public Text createKey() {
     return new Text();
   }
 
-  public Writable createValue() {
+  public Text createValue() {
     return new Text();
   }
 

+ 5 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java

@@ -23,6 +23,7 @@ import java.lang.reflect.*;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.io.Text;
 
 import org.apache.hadoop.mapred.*;
 
@@ -33,7 +34,8 @@ import org.apache.hadoop.mapred.*;
  */
 public class StreamInputFormat extends KeyValueTextInputFormat {
 
-  public RecordReader getRecordReader(final InputSplit genericSplit,
+  @SuppressWarnings("unchecked")
+  public RecordReader<Text, Text> getRecordReader(final InputSplit genericSplit,
                                       JobConf job, Reporter reporter) throws IOException {
     String c = job.get("stream.recordreader.class");
     if (c == null || c.indexOf("LineRecordReader") >= 0) {
@@ -67,9 +69,9 @@ public class StreamInputFormat extends KeyValueTextInputFormat {
       throw new RuntimeException(nsm);
     }
 
-    RecordReader reader;
+    RecordReader<Text, Text> reader;
     try {
-      reader = (RecordReader) ctor.newInstance(new Object[] { in, split,
+      reader = (RecordReader<Text, Text>) ctor.newInstance(new Object[] { in, split,
                                                               reporter, job, fs });
     } catch (Exception nsm) {
       throw new RuntimeException(nsm);

+ 4 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java

@@ -24,6 +24,7 @@ import java.util.regex.*;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.mapred.Reporter;
@@ -75,7 +76,7 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
   
   int numNext = 0;
 
-  public synchronized boolean next(Writable key, Writable value) throws IOException {
+  public synchronized boolean next(Text key, Text value) throws IOException {
     long pos = in_.getPos();
     numNext++;
     if (pos >= end_) {
@@ -96,8 +97,8 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
 
     numRecStats(record, 0, record.length);
 
-    ((Text) key).set(record);
-    ((Text) value).set("");
+    key.set(record);
+    value.set("");
 
     /*if (numNext < 5) {
       System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")

+ 4 - 4
src/examples/org/apache/hadoop/examples/AggregateWordCount.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.mapred.JobConf;
 import java.io.IOException;
 
 import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.lib.aggregate.*;
 
 /**
@@ -43,13 +42,14 @@ public class AggregateWordCount {
 
   public static class WordCountPlugInClass extends
       ValueAggregatorBaseDescriptor {
-    public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
+    public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
+                                                            Object val) {
       String countType = LONG_VALUE_SUM;
-      ArrayList<Entry> retv = new ArrayList<Entry>();
+      ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
       String line = val.toString();
       StringTokenizer itr = new StringTokenizer(line);
       while (itr.hasMoreTokens()) {
-        Entry e = generateEntry(countType, itr.nextToken(), ONE);
+        Entry<Text, Text> e = generateEntry(countType, itr.nextToken(), ONE);
         if (e != null) {
           retv.add(e);
         }

+ 14 - 11
src/examples/org/apache/hadoop/examples/PiEstimator.java

@@ -41,7 +41,8 @@ public class PiEstimator {
    * Mappper class for Pi estimation.
    */
   
-  public static class PiMapper extends MapReduceBase implements Mapper {
+  public static class PiMapper extends MapReduceBase
+    implements Mapper<LongWritable, Writable, LongWritable, LongWritable> {
     
     /** Mapper configuration.
      *
@@ -60,11 +61,11 @@ public class PiEstimator {
      * @param out
      * @param reporter
      */
-    public void map(WritableComparable key,
+    public void map(LongWritable key,
                     Writable val,
-                    OutputCollector out,
+                    OutputCollector<LongWritable, LongWritable> out,
                     Reporter reporter) throws IOException {
-      long nSamples = ((LongWritable) key).get();
+      long nSamples = key.get();
       for(long idx = 0; idx < nSamples; idx++) {
         double x = r.nextDouble();
         double y = r.nextDouble();
@@ -87,7 +88,9 @@ public class PiEstimator {
     }
   }
   
-  public static class PiReducer extends MapReduceBase implements Reducer {
+  public static class PiReducer extends MapReduceBase
+    implements Reducer<LongWritable, LongWritable, WritableComparable, Writable> {
+    
     long numInside = 0;
     long numOutside = 0;
     JobConf conf;
@@ -104,18 +107,18 @@ public class PiEstimator {
      * @param output
      * @param reporter
      */
-    public void reduce(WritableComparable key,
-                       Iterator values,
-                       OutputCollector output,
+    public void reduce(LongWritable key,
+                       Iterator<LongWritable> values,
+                       OutputCollector<WritableComparable, Writable> output,
                        Reporter reporter) throws IOException {
-      if (((LongWritable)key).get() == 1) {
+      if (key.get() == 1) {
         while (values.hasNext()) {
-          long num = ((LongWritable)values.next()).get();
+          long num = values.next().get();
           numInside += num;
         }
       } else {
         while (values.hasNext()) {
-          long num = ((LongWritable)values.next()).get();
+          long num = values.next().get();
           numOutside += num;
         }
       }

+ 12 - 9
src/examples/org/apache/hadoop/examples/RandomWriter.java

@@ -75,7 +75,7 @@ public class RandomWriter extends ToolBase {
    * A custom input format that creates virtual inputs of a single string
    * for each map.
    */
-  static class RandomInputFormat implements InputFormat {
+  static class RandomInputFormat implements InputFormat<Text, Text> {
     
     /** Accept all job confs */
     public void validateInput(JobConf job) throws IOException {
@@ -99,23 +99,23 @@ public class RandomWriter extends ToolBase {
      * Return a single record (filename, "") where the filename is taken from
      * the file split.
      */
-    static class RandomRecordReader implements RecordReader {
+    static class RandomRecordReader implements RecordReader<Text, Text> {
       Path name;
       public RandomRecordReader(Path p) {
         name = p;
       }
-      public boolean next(Writable key, Writable value) {
+      public boolean next(Text key, Text value) {
         if (name != null) {
-          ((Text) key).set(name.getName());
+          key.set(name.getName());
           name = null;
           return true;
         }
         return false;
       }
-      public WritableComparable createKey() {
+      public Text createKey() {
         return new Text();
       }
-      public Writable createValue() {
+      public Text createValue() {
         return new Text();
       }
       public long getPos() {
@@ -127,14 +127,17 @@ public class RandomWriter extends ToolBase {
       }
     }
 
-    public RecordReader getRecordReader(InputSplit split,
+    public RecordReader<Text, Text> getRecordReader(InputSplit split,
                                         JobConf job, 
                                         Reporter reporter) throws IOException {
       return new RandomRecordReader(((FileSplit) split).getPath());
     }
   }
 
-  static class Map extends MapReduceBase implements Mapper {
+  static class Map extends MapReduceBase
+    implements Mapper<WritableComparable, Writable,
+                      BytesWritable, BytesWritable> {
+    
     private long numBytesToWrite;
     private int minKeySize;
     private int keySizeRange;
@@ -155,7 +158,7 @@ public class RandomWriter extends ToolBase {
      */
     public void map(WritableComparable key, 
                     Writable value,
-                    OutputCollector output, 
+                    OutputCollector<BytesWritable, BytesWritable> output, 
                     Reporter reporter) throws IOException {
       int itemCount = 0;
       while (numBytesToWrite > 0) {

+ 11 - 10
src/examples/org/apache/hadoop/examples/WordCount.java

@@ -23,9 +23,8 @@ import java.util.*;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Mapper;
@@ -50,15 +49,16 @@ public class WordCount {
    * For each line of input, break the line into words and emit them as
    * (<b>word</b>, <b>1</b>).
    */
-  public static class MapClass extends MapReduceBase implements Mapper {
+  public static class MapClass extends MapReduceBase
+    implements Mapper<LongWritable, Text, Text, IntWritable> {
     
     private final static IntWritable one = new IntWritable(1);
     private Text word = new Text();
     
-    public void map(WritableComparable key, Writable value, 
-                    OutputCollector output, 
+    public void map(LongWritable key, Text value, 
+                    OutputCollector<Text, IntWritable> output, 
                     Reporter reporter) throws IOException {
-      String line = ((Text)value).toString();
+      String line = value.toString();
       StringTokenizer itr = new StringTokenizer(line);
       while (itr.hasMoreTokens()) {
         word.set(itr.nextToken());
@@ -70,14 +70,15 @@ public class WordCount {
   /**
    * A reducer class that just emits the sum of the input values.
    */
-  public static class Reduce extends MapReduceBase implements Reducer {
+  public static class Reduce extends MapReduceBase
+    implements Reducer<Text, IntWritable, Text, IntWritable> {
     
-    public void reduce(WritableComparable key, Iterator values,
-                       OutputCollector output, 
+    public void reduce(Text key, Iterator<IntWritable> values,
+                       OutputCollector<Text, IntWritable> output, 
                        Reporter reporter) throws IOException {
       int sum = 0;
       while (values.hasNext()) {
-        sum += ((IntWritable) values.next()).get();
+        sum += values.next().get();
       }
       output.collect(key, new IntWritable(sum));
     }

+ 7 - 6
src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java

@@ -24,7 +24,6 @@ import java.util.*;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
@@ -46,13 +45,15 @@ public class DistributedPentomino {
    * the solutions that start with that prefix. The output is the prefix as
    * the key and the solution as the value.
    */
-  public static class PentMap extends MapReduceBase implements Mapper {
+  public static class PentMap extends MapReduceBase
+    implements Mapper<WritableComparable, Text, Text, Text> {
+    
     private int width;
     private int height;
     private int depth;
     private Pentomino pent;
     private Text prefixString;
-    private OutputCollector output;
+    private OutputCollector<Text, Text> output;
     private Reporter reporter;
     
     /**
@@ -81,12 +82,12 @@ public class DistributedPentomino {
      * will be selected for each column in order). Find all solutions with
      * that prefix.
      */
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter
+    public void map(WritableComparable key, Text value,
+                    OutputCollector<Text, Text> output, Reporter reporter
                     ) throws IOException {
       this.output = output;
       this.reporter = reporter;
-      prefixString = (Text) value;
+      prefixString = value;
       StringTokenizer itr = new StringTokenizer(prefixString.toString(), ",");
       int[] prefix = new int[depth];
       int idx = 0;

+ 6 - 2
src/java/org/apache/hadoop/mapred/FileInputFormat.java

@@ -28,12 +28,16 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 
 /** 
  * A base class for {@link InputFormat}. 
  * 
  */
-public abstract class FileInputFormat implements InputFormat {
+public abstract class FileInputFormat<K extends WritableComparable,
+                                      V extends Writable>
+  implements InputFormat<K, V> {
 
   public static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.mapred.FileInputFormat");
@@ -62,7 +66,7 @@ public abstract class FileInputFormat implements InputFormat {
     return true;
   }
   
-  public abstract RecordReader getRecordReader(InputSplit split,
+  public abstract RecordReader<K, V> getRecordReader(InputSplit split,
                                                JobConf job,
                                                Reporter reporter)
     throws IOException;

+ 7 - 4
src/java/org/apache/hadoop/mapred/InputFormat.java

@@ -22,12 +22,15 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 
 /** An input data format.  Input files are stored in a {@link FileSystem}.
  * The processing of an input file may be split across multiple machines.
  * Files are processed as sequences of records, implementing {@link
  * RecordReader}.  Files must thus be split on record boundaries. */
-public interface InputFormat {
+public interface InputFormat<K extends WritableComparable,
+                             V extends Writable> {
 
   /**
    * Are the input directories valid? This method is used to test the input
@@ -52,8 +55,8 @@ public interface InputFormat {
    * @param job the job that this split belongs to
    * @return a {@link RecordReader}
    */
-  RecordReader getRecordReader(InputSplit split,
-                               JobConf job, 
-                               Reporter reporter) throws IOException;
+  RecordReader<K, V> getRecordReader(InputSplit split,
+                                     JobConf job, 
+                                     Reporter reporter) throws IOException;
 }
 

+ 30 - 10
src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java

@@ -21,9 +21,8 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 
 /**
  * This class treats a line in the input as a key/value pair separated by a 
@@ -31,23 +30,32 @@ import org.apache.hadoop.io.WritableComparable;
  * under the attribute name key.value.separator.in.input.line. The default
  * separator is the tab character ('\t').
  */
-public class KeyValueLineRecordReader extends LineRecordReader {
+public class KeyValueLineRecordReader implements RecordReader<Text, Text> {
+  
+  private final LineRecordReader lineRecordReader;
 
   private byte separator = (byte) '\t';
 
-  private WritableComparable dummyKey = super.createKey();
+  private LongWritable dummyKey;
 
-  private Text innerValue = (Text) super.createValue();
+  private Text innerValue;
 
   public Class getKeyClass() { return Text.class; }
   
   public Text createKey() {
     return new Text();
   }
+  
+  public Text createValue() {
+    return new Text();
+  }
 
   public KeyValueLineRecordReader(Configuration job, FileSplit split)
     throws IOException {
-    super(job, split);
+    
+    lineRecordReader = new LineRecordReader(job, split);
+    dummyKey = lineRecordReader.createKey();
+    innerValue = lineRecordReader.createValue();
     String sepStr = job.get("key.value.separator.in.input.line", "\t");
     this.separator = (byte) sepStr.charAt(0);
   }
@@ -62,13 +70,13 @@ public class KeyValueLineRecordReader extends LineRecordReader {
   }
 
   /** Read key/value pair in a line. */
-  public synchronized boolean next(Writable key, Writable value)
+  public synchronized boolean next(Text key, Text value)
     throws IOException {
-    Text tKey = (Text) key;
-    Text tValue = (Text) value;
+    Text tKey = key;
+    Text tValue = value;
     byte[] line = null;
     int lineLen = -1;
-    if (super.next(dummyKey, innerValue)) {
+    if (lineRecordReader.next(dummyKey, innerValue)) {
       line = innerValue.getBytes();
       lineLen = innerValue.getLength();
     } else {
@@ -92,4 +100,16 @@ public class KeyValueLineRecordReader extends LineRecordReader {
     }
     return true;
   }
+  
+  public float getProgress() {
+    return lineRecordReader.getProgress();
+  }
+  
+  public synchronized long getPos() throws IOException {
+    return lineRecordReader.getPos();
+  }
+
+  public synchronized void close() throws IOException { 
+    lineRecordReader.close();
+  }
 }

+ 22 - 3
src/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java

@@ -20,16 +20,35 @@ package org.apache.hadoop.mapred;
 
 import java.io.IOException;
 
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+
 /**
  * An {@link InputFormat} for plain text files. Files are broken into lines.
  * Either linefeed or carriage-return are used to signal end of line. Each line
  * is divided into key and value parts by a separator byte. If no such a byte
  * exists, the key will be the entire line and value will be empty.
  */
-public class KeyValueTextInputFormat extends TextInputFormat {
+public class KeyValueTextInputFormat extends FileInputFormat<Text, Text>
+  implements JobConfigurable {
 
-  public RecordReader getRecordReader(InputSplit genericSplit, JobConf job,
-                                      Reporter reporter) throws IOException {
+  private CompressionCodecFactory compressionCodecs = null;
+  
+  public void configure(JobConf conf) {
+    compressionCodecs = new CompressionCodecFactory(conf);
+  }
+  
+  protected boolean isSplitable(FileSystem fs, Path file) {
+    return compressionCodecs.getCodec(file) == null;
+  }
+  
+  public RecordReader<Text, Text> getRecordReader(InputSplit genericSplit,
+                                                  JobConf job,
+                                                  Reporter reporter)
+    throws IOException {
+    
     reporter.setStatus(genericSplit.toString());
     return new KeyValueLineRecordReader(job, (FileSplit) genericSplit);
   }

+ 6 - 8
src/java/org/apache/hadoop/mapred/LineRecordReader.java

@@ -30,15 +30,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 
 /**
  * Treats keys as offset in file and value as line. 
  */
-public class LineRecordReader implements RecordReader {
+public class LineRecordReader implements RecordReader<LongWritable, Text> {
   private CompressionCodecFactory compressionCodecs = null;
   private long start; 
   private long pos;
@@ -100,28 +98,28 @@ public class LineRecordReader implements RecordReader {
     //    readLine(in, null); 
   }
   
-  public WritableComparable createKey() {
+  public LongWritable createKey() {
     return new LongWritable();
   }
   
-  public Writable createValue() {
+  public Text createValue() {
     return new Text();
   }
   
   /** Read a line. */
-  public synchronized boolean next(Writable key, Writable value)
+  public synchronized boolean next(LongWritable key, Text value)
     throws IOException {
     if (pos >= end)
       return false;
 
-    ((LongWritable)key).set(pos);           // key is position
+    key.set(pos);           // key is position
     buffer.reset();
     long bytesRead = readLine();
     if (bytesRead == 0) {
       return false;
     }
     pos += bytesRead;
-    bridge.target = (Text) value;
+    bridge.target = value;
     buffer.writeTo(bridge);
     return true;
   }

+ 5 - 4
src/java/org/apache/hadoop/mapred/MapFileOutputFormat.java

@@ -78,10 +78,11 @@ public class MapFileOutputFormat extends OutputFormatBase {
   }
     
   /** Get an entry from output generated by this class. */
-  public static Writable getEntry(MapFile.Reader[] readers,
-                                  Partitioner partitioner,
-                                  WritableComparable key,
-                                  Writable value) throws IOException {
+  public static <K extends WritableComparable, V extends Writable>
+  Writable getEntry(MapFile.Reader[] readers,
+                                  Partitioner<K, V> partitioner,
+                                  K key,
+                                  V value) throws IOException {
     int part = partitioner.getPartition(key, value, readers.length);
     return readers[part].get(key, value);
   }

+ 9 - 2
src/java/org/apache/hadoop/mapred/MapRunnable.java

@@ -20,13 +20,20 @@ package org.apache.hadoop.mapred;
 
 import java.io.IOException;
 
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+
 /** Expert: Permits greater control of map processing. For example,
  * implementations might perform multi-threaded, asynchronous mappings. */
-public interface MapRunnable extends JobConfigurable {
+public interface MapRunnable<K1 extends WritableComparable, V1 extends Writable,
+                             K2 extends WritableComparable, V2 extends Writable>
+    extends JobConfigurable {
+  
   /** Called to execute mapping.  Mapping is complete when this returns.
    * @param input the {@link RecordReader} with input key/value pairs.
    * @param output the {@link OutputCollector} for mapped key/value pairs.
    */
-  void run(RecordReader input, OutputCollector output, Reporter reporter)
+  void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
+           Reporter reporter)
     throws IOException;
 }

+ 9 - 5
src/java/org/apache/hadoop/mapred/MapRunner.java

@@ -25,21 +25,25 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /** Default {@link MapRunnable} implementation.*/
-public class MapRunner implements MapRunnable {
-  private Mapper mapper;
+public class MapRunner<K1 extends WritableComparable, V1 extends Writable,
+                       K2 extends WritableComparable, V2 extends Writable>
+    implements MapRunnable<K1, V1, K2, V2> {
+  
+  private Mapper<K1, V1, K2, V2> mapper;
 
+  @SuppressWarnings("unchecked")
   public void configure(JobConf job) {
     this.mapper = (Mapper)ReflectionUtils.newInstance(job.getMapperClass(),
                                                       job);
   }
 
-  public void run(RecordReader input, OutputCollector output,
+  public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
                   Reporter reporter)
     throws IOException {
     try {
       // allocate key & value instances that are re-used for all entries
-      WritableComparable key = input.createKey();
-      Writable value = input.createValue();
+      K1 key = input.createKey();
+      V1 value = input.createValue();
       
       while (input.next(key, value)) {
         // map pair to output

+ 14 - 6
src/java/org/apache/hadoop/mapred/MapTask.java

@@ -112,6 +112,7 @@ class MapTask extends Task {
     return instantiatedSplit;
   }
 
+  @SuppressWarnings("unchecked")
   public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
     throws IOException {
 
@@ -163,7 +164,7 @@ class MapTask extends Task {
           return rawIn.createValue();
         }
          
-        public synchronized boolean next(Writable key, Writable value)
+        public synchronized boolean next(WritableComparable key, Writable value)
           throws IOException {
 
           setProgress(getProgress());
@@ -196,7 +197,9 @@ class MapTask extends Task {
     done(umbilical);
   }
 
-  interface MapOutputCollector extends OutputCollector {
+  interface MapOutputCollector<K extends WritableComparable,
+                               V extends Writable>
+    extends OutputCollector<K, V> {
 
     public void close() throws IOException;
     
@@ -204,12 +207,15 @@ class MapTask extends Task {
         
   }
 
-  class DirectMapOutputCollector implements MapOutputCollector {
-
-    private RecordWriter out = null;
+  class DirectMapOutputCollector<K extends WritableComparable,
+                                 V extends Writable>
+    implements MapOutputCollector<K, V> {
+ 
+    private RecordWriter<K, V> out = null;
 
     private Reporter reporter = null;
 
+    @SuppressWarnings("unchecked")
     public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical,
         JobConf job, Reporter reporter) throws IOException {
       this.reporter = reporter;
@@ -231,7 +237,7 @@ class MapTask extends Task {
       
     }
 
-    public void collect(WritableComparable key, Writable value) throws IOException {
+    public void collect(K key, V value) throws IOException {
       this.out.write(key, value);
     }
     
@@ -315,6 +321,7 @@ class MapTask extends Task {
       indexOut.writeLong(out.getPos()-segmentStart);
     }
     
+    @SuppressWarnings("unchecked")
     public void collect(WritableComparable key,
                         Writable value) throws IOException {
       
@@ -420,6 +427,7 @@ class MapTask extends Task {
       }
     }
     
+    @SuppressWarnings("unchecked")
     private void combineAndSpill(RawKeyValueIterator resultIter, 
                                  Reducer combiner, OutputCollector combineCollector) throws IOException {
       //combine the key/value obtained from the offset & indices arrays.

+ 6 - 3
src/java/org/apache/hadoop/mapred/Mapper.java

@@ -28,7 +28,10 @@ import org.apache.hadoop.io.WritableComparable;
  * intermediate values associated with a given output key are subsequently
  * grouped by the map/reduce system, and passed to a {@link Reducer} to
  * determine the final output.. */
-public interface Mapper extends JobConfigurable, Closeable {
+public interface Mapper<K1 extends WritableComparable, V1 extends Writable,
+                        K2 extends WritableComparable, V2 extends Writable>
+  extends JobConfigurable, Closeable {
+  
   /** Maps a single input key/value pair into intermediate key/value pairs.
    * Output pairs need not be of the same types as input pairs.  A given input
    * pair may map to zero or many output pairs.  Output pairs are collected
@@ -39,7 +42,7 @@ public interface Mapper extends JobConfigurable, Closeable {
    * @param value the values
    * @param output collects mapped keys and values
    */
-  void map(WritableComparable key, Writable value,
-           OutputCollector output, Reporter reporter)
+  void map(K1 key, V1 value,
+           OutputCollector<K2, V2> output, Reporter reporter)
     throws IOException;
 }

+ 6 - 2
src/java/org/apache/hadoop/mapred/MultiFileInputFormat.java

@@ -22,6 +22,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 
 /**
  * An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
@@ -32,7 +34,9 @@ import org.apache.hadoop.fs.Path;
  * to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
  * @see MultiFileSplit
  */
-public abstract class MultiFileInputFormat extends FileInputFormat {
+public abstract class MultiFileInputFormat<K extends WritableComparable,
+                                           V extends Writable>
+  extends FileInputFormat<K, V> {
 
   @Override
   public InputSplit[] getSplits(JobConf job, int numSplits) 
@@ -87,7 +91,7 @@ public abstract class MultiFileInputFormat extends FileInputFormat {
     return lengths.length - startIndex;
   }
   
-  public abstract RecordReader getRecordReader(InputSplit split,
+  public abstract RecordReader<K, V> getRecordReader(InputSplit split,
       JobConf job, Reporter reporter)
       throws IOException;
 }

+ 4 - 2
src/java/org/apache/hadoop/mapred/OutputCollector.java

@@ -26,11 +26,13 @@ import org.apache.hadoop.io.WritableComparable;
 
 /** Passed to {@link Mapper} and {@link Reducer} implementations to collect
  * output data. */
-public interface OutputCollector {
+public interface OutputCollector<K extends WritableComparable,
+                                 V extends Writable> {
+  
   /** Adds a key/value pair to the output.
    *
    * @param key the key to add
    * @param value to value to add
    */
-  void collect(WritableComparable key, Writable value) throws IOException;
+  void collect(K key, V value) throws IOException;
 }

+ 6 - 3
src/java/org/apache/hadoop/mapred/OutputFormat.java

@@ -21,11 +21,14 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.util.Progressable;
 
 /** An output data format.  Output files are stored in a {@link
  * FileSystem}. */
-public interface OutputFormat {
+public interface OutputFormat<K extends WritableComparable,
+                              V extends Writable> {
 
   /** Construct a {@link RecordWriter} with Progressable.
    *
@@ -34,8 +37,8 @@ public interface OutputFormat {
    * @param progress mechanism for reporting progress while writing to file
    * @return a {@link RecordWriter}
    */
-  RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
-                               Progressable progress)
+  RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job,
+                                     String name, Progressable progress)
     throws IOException;
 
   /** Check whether the output specification for a job is appropriate.  Called

+ 6 - 2
src/java/org/apache/hadoop/mapred/OutputFormatBase.java

@@ -22,11 +22,15 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Progressable;
 
 /** A base class for {@link OutputFormat}. */
-public abstract class OutputFormatBase implements OutputFormat {
+public abstract class OutputFormatBase<K extends WritableComparable,
+                                       V extends Writable>
+  implements OutputFormat<K, V> {
 
   /**
    * Set whether the output of the reduce is compressed
@@ -79,7 +83,7 @@ public abstract class OutputFormatBase implements OutputFormat {
     }
   }
   
-  public abstract RecordWriter getRecordWriter(FileSystem ignored,
+  public abstract RecordWriter<K, V> getRecordWriter(FileSystem ignored,
                                                JobConf job, String name,
                                                Progressable progress)
     throws IOException;

+ 5 - 2
src/java/org/apache/hadoop/mapred/Partitioner.java

@@ -22,7 +22,10 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 
 /** Partitions the key space.  A partition is created for each reduce task. */
-public interface Partitioner extends JobConfigurable {
+public interface Partitioner<K2 extends WritableComparable,
+                             V2 extends Writable>
+  extends JobConfigurable {
+  
   /** Returns the paritition number for a given entry given the total number of
    * partitions.  Typically a hash function on a all or a subset of the key.
    *
@@ -31,5 +34,5 @@ public interface Partitioner extends JobConfigurable {
    * @param numPartitions the number of partitions
    * @return the partition number
    */
-  int getPartition(WritableComparable key, Writable value, int numPartitions);
+  int getPartition(K2 key, V2 value, int numPartitions);
 }

+ 5 - 4
src/java/org/apache/hadoop/mapred/RecordReader.java

@@ -26,7 +26,8 @@ import org.apache.hadoop.io.WritableComparable;
 
 /** Reads key/value pairs from an input file {@link FileSplit}.
  * Implemented by {@link InputFormat} implementations. */
-public interface RecordReader {
+public interface RecordReader<K extends WritableComparable,
+                              V extends Writable> {
   /** Reads the next key/value pair.
    *
    * @param key the key to read data into
@@ -35,19 +36,19 @@ public interface RecordReader {
    *
    * @see Writable#readFields(DataInput)
    */      
-  boolean next(Writable key, Writable value) throws IOException;
+  boolean next(K key, V value) throws IOException;
   
   /**
    * Create an object of the appropriate type to be used as a key.
    * @return a new key object
    */
-  WritableComparable createKey();
+  K createKey();
   
   /**
    * Create an object of the appropriate type to be used as the value.
    * @return a new value object
    */
-  Writable createValue();
+  V createValue();
 
   /** Returns the current position in the input. */
   long getPos() throws IOException;

+ 3 - 2
src/java/org/apache/hadoop/mapred/RecordWriter.java

@@ -26,7 +26,8 @@ import org.apache.hadoop.io.Writable;
 
 /** Writes key/value pairs to an output file.  Implemented by {@link
  * OutputFormat} implementations. */
-public interface RecordWriter {
+public interface RecordWriter<K extends WritableComparable,
+                              V extends Writable> {
   /** Writes a key/value pair.
    *
    * @param key the key to write
@@ -34,7 +35,7 @@ public interface RecordWriter {
    *
    * @see Writable#write(DataOutput)
    */      
-  void write(WritableComparable key, Writable value) throws IOException;
+  void write(K key, V value) throws IOException;
 
   /** Close this to future operations.*/ 
   void close(Reporter reporter) throws IOException;

+ 2 - 0
src/java/org/apache/hadoop/mapred/ReduceTask.java

@@ -235,6 +235,7 @@ class ReduceTask extends Task {
     }
   }
 
+  @SuppressWarnings("unchecked")
   public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
     throws IOException {
     Reducer reducer = (Reducer)ReflectionUtils.newInstance(
@@ -299,6 +300,7 @@ class ReduceTask extends Task {
       job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);  
     
     OutputCollector collector = new OutputCollector() {
+        @SuppressWarnings("unchecked")
         public void collect(WritableComparable key, Writable value)
           throws IOException {
           out.write(key, value);

+ 6 - 3
src/java/org/apache/hadoop/mapred/Reducer.java

@@ -28,7 +28,10 @@ import org.apache.hadoop.io.WritableComparable;
 
 /** Reduces a set of intermediate values which share a key to a smaller set of
  * values.  Input values are the grouped output of a {@link Mapper}. */
-public interface Reducer extends JobConfigurable, Closeable {
+public interface Reducer<K2 extends WritableComparable, V2 extends Writable,
+                         K3 extends WritableComparable, V3 extends Writable>
+    extends JobConfigurable, Closeable {
+  
   /** Combines values for a given key.  Output values must be of the same type
    * as input values.  Input keys must not be altered.  Typically all values
    * are combined into zero or one value.  Output pairs are collected with
@@ -38,8 +41,8 @@ public interface Reducer extends JobConfigurable, Closeable {
    * @param values the values to combine
    * @param output to collect combined values
    */
-  void reduce(WritableComparable key, Iterator values,
-              OutputCollector output, Reporter reporter)
+  void reduce(K2 key, Iterator<V2> values,
+              OutputCollector<K3, V3> output, Reporter reporter)
     throws IOException;
 
 }

+ 8 - 3
src/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java

@@ -20,18 +20,23 @@ package org.apache.hadoop.mapred;
 
 import java.io.IOException;
 
+import org.apache.hadoop.io.Text;
+
 /**
  * This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader 
  * which converts the input keys and values to their String forms by calling toString() method. 
  */
-public class SequenceFileAsTextInputFormat extends SequenceFileInputFormat {
+public class SequenceFileAsTextInputFormat
+  extends SequenceFileInputFormat<Text, Text> {
 
   public SequenceFileAsTextInputFormat() {
     super();
   }
 
-  public RecordReader getRecordReader(InputSplit split, JobConf job,
-                                      Reporter reporter) throws IOException {
+  public RecordReader<Text, Text> getRecordReader(InputSplit split,
+                                                  JobConf job,
+                                                  Reporter reporter)
+    throws IOException {
 
     reporter.setStatus(split.toString());
 

+ 30 - 11
src/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java

@@ -30,34 +30,53 @@ import org.apache.hadoop.io.WritableComparable;
  * method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
  * class to TextInputFormat class.
  */
-public class SequenceFileAsTextRecordReader extends SequenceFileRecordReader {
+public class SequenceFileAsTextRecordReader
+  implements RecordReader<Text, Text> {
+  
+  private final SequenceFileRecordReader<WritableComparable, Writable>
+  sequenceFileRecordReader;
 
-  private Writable innerKey = super.createKey();
-  private Writable innerValue = super.createValue();
+  private WritableComparable innerKey;
+  private Writable innerValue;
 
   public SequenceFileAsTextRecordReader(Configuration conf, FileSplit split)
     throws IOException {
-    super(conf, split);
+    sequenceFileRecordReader =
+      new SequenceFileRecordReader<WritableComparable, Writable>(conf, split);
+    innerKey = sequenceFileRecordReader.createKey();
+    innerValue = sequenceFileRecordReader.createValue();
   }
 
-  public WritableComparable createKey() {
+  public Text createKey() {
     return new Text();
   }
   
-  public Writable createValue() {
+  public Text createValue() {
     return new Text();
   }
 
   /** Read key/value pair in a line. */
-  public synchronized boolean next(Writable key, Writable value)
-    throws IOException {
-    Text tKey = (Text) key;
-    Text tValue = (Text) value;
-    if (!super.next(innerKey, innerValue)) {
+  public synchronized boolean next(Text key, Text value) throws IOException {
+    Text tKey = key;
+    Text tValue = value;
+    if (!sequenceFileRecordReader.next(innerKey, innerValue)) {
       return false;
     }
     tKey.set(innerKey.toString());
     tValue.set(innerValue.toString());
     return true;
   }
+  
+  public float getProgress() throws IOException {
+    return sequenceFileRecordReader.getProgress();
+  }
+  
+  public synchronized long getPos() throws IOException {
+    return sequenceFileRecordReader.getPos();
+  }
+  
+  public synchronized void close() throws IOException {
+    sequenceFileRecordReader.close();
+  }
+  
 }

+ 12 - 6
src/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /**
@@ -41,7 +42,10 @@ import org.apache.hadoop.util.ReflectionUtils;
  * 
  */
 
-public class SequenceFileInputFilter extends SequenceFileInputFormat {
+public class SequenceFileInputFilter<K extends WritableComparable,
+                                     V extends Writable>
+  extends SequenceFileInputFormat<K, V> {
+  
   final private static String FILTER_CLASS = "sequencefile.filter.class";
   final private static String FILTER_FREQUENCY
     = "sequencefile.filter.frequency";
@@ -56,13 +60,13 @@ public class SequenceFileInputFilter extends SequenceFileInputFormat {
    * @param reporter reporter who sends report to task tracker
    * @return RecordReader
    */
-  public RecordReader getRecordReader(InputSplit split,
+  public RecordReader<K, V> getRecordReader(InputSplit split,
                                       JobConf job, Reporter reporter)
     throws IOException {
         
     reporter.setStatus(split.toString());
         
-    return new FilterRecordReader(job, (FileSplit) split);
+    return new FilterRecordReader<K, V>(job, (FileSplit) split);
   }
 
 
@@ -278,7 +282,10 @@ public class SequenceFileInputFilter extends SequenceFileInputFormat {
     }
   }
     
-  private static class FilterRecordReader extends SequenceFileRecordReader {
+  private static class FilterRecordReader<K extends WritableComparable,
+                                          V extends Writable>
+    extends SequenceFileRecordReader<K, V> {
+    
     private Filter filter;
         
     public FilterRecordReader(Configuration conf, FileSplit split)
@@ -290,8 +297,7 @@ public class SequenceFileInputFilter extends SequenceFileInputFormat {
                                                    conf);
     }
         
-    public synchronized boolean next(Writable key, Writable value)
-      throws IOException {
+    public synchronized boolean next(K key, V value) throws IOException {
       while (next(key)) {
         if (filter.accept(key)) {
           getCurrentValue(value);

+ 7 - 3
src/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java

@@ -24,9 +24,13 @@ import org.apache.hadoop.fs.Path;
 
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 
 /** An {@link InputFormat} for {@link SequenceFile}s. */
-public class SequenceFileInputFormat extends FileInputFormat {
+public class SequenceFileInputFormat<K extends WritableComparable,
+                                     V extends Writable>
+  extends FileInputFormat<K, V> {
 
   public SequenceFileInputFormat() {
     setMinSplitSize(SequenceFile.SYNC_INTERVAL);
@@ -45,13 +49,13 @@ public class SequenceFileInputFormat extends FileInputFormat {
     return files;
   }
 
-  public RecordReader getRecordReader(InputSplit split,
+  public RecordReader<K, V> getRecordReader(InputSplit split,
                                       JobConf job, Reporter reporter)
     throws IOException {
 
     reporter.setStatus(split.toString());
 
-    return new SequenceFileRecordReader(job, (FileSplit) split);
+    return new SequenceFileRecordReader<K, V>(job, (FileSplit) split);
   }
 
 }

+ 15 - 11
src/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java

@@ -28,7 +28,10 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /** An {@link RecordReader} for {@link SequenceFile}s. */
-public class SequenceFileRecordReader implements RecordReader {
+public class SequenceFileRecordReader<K extends WritableComparable,
+                                      V extends Writable>
+  implements RecordReader<K, V> {
+  
   private SequenceFile.Reader in;
   private long start;
   private long end;
@@ -52,24 +55,25 @@ public class SequenceFileRecordReader implements RecordReader {
 
 
   /** The class of key that must be passed to {@link
-   * #next(Writable,Writable)}.. */
+   * #next(WritableComparable,Writable)}.. */
   public Class getKeyClass() { return in.getKeyClass(); }
 
   /** The class of value that must be passed to {@link
-   * #next(Writable,Writable)}.. */
+   * #next(WritableComparable,Writable)}.. */
   public Class getValueClass() { return in.getValueClass(); }
   
-  public WritableComparable createKey() {
-    return (WritableComparable) ReflectionUtils.newInstance(getKeyClass(), 
+  @SuppressWarnings("unchecked")
+  public K createKey() {
+    return (K) ReflectionUtils.newInstance(getKeyClass(), 
                                                             conf);
   }
   
-  public Writable createValue() {
-    return (Writable) ReflectionUtils.newInstance(getValueClass(), conf);
+  @SuppressWarnings("unchecked")
+  public V createValue() {
+    return (V) ReflectionUtils.newInstance(getValueClass(), conf);
   }
     
-  public synchronized boolean next(Writable key, Writable value)
-    throws IOException {
+  public synchronized boolean next(K key, V value) throws IOException {
     if (!more) return false;
     long pos = in.getPosition();
     boolean eof = in.next(key, value);
@@ -81,7 +85,7 @@ public class SequenceFileRecordReader implements RecordReader {
     return more;
   }
   
-  protected synchronized boolean next(Writable key)
+  protected synchronized boolean next(K key)
     throws IOException {
     if (!more) return false;
     long pos = in.getPosition();
@@ -94,7 +98,7 @@ public class SequenceFileRecordReader implements RecordReader {
     return more;
   }
   
-  protected synchronized void getCurrentValue(Writable value)
+  protected synchronized void getCurrentValue(V value)
     throws IOException {
     in.getCurrentValue(value);
   }

+ 9 - 3
src/java/org/apache/hadoop/mapred/TextInputFormat.java

@@ -21,12 +21,15 @@ package org.apache.hadoop.mapred;
 import java.io.*;
 
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.compress.*;
 
 /** An {@link InputFormat} for plain text files.  Files are broken into lines.
  * Either linefeed or carriage-return are used to signal end of line.  Keys are
  * the position in the file, and values are the line of text.. */
-public class TextInputFormat extends FileInputFormat implements JobConfigurable {
+public class TextInputFormat extends FileInputFormat<LongWritable, Text>
+  implements JobConfigurable {
 
   private CompressionCodecFactory compressionCodecs = null;
   
@@ -38,8 +41,11 @@ public class TextInputFormat extends FileInputFormat implements JobConfigurable
     return compressionCodecs.getCodec(file) == null;
   }
 
-  public RecordReader getRecordReader(InputSplit genericSplit, JobConf job,
-                                      Reporter reporter) throws IOException {
+  public RecordReader<LongWritable, Text> getRecordReader(
+                                          InputSplit genericSplit, JobConf job,
+                                          Reporter reporter)
+    throws IOException {
+    
     reporter.setStatus(genericSplit.toString());
     return new LineRecordReader(job, (FileSplit) genericSplit);
   }

+ 15 - 8
src/java/org/apache/hadoop/mapred/TextOutputFormat.java

@@ -32,16 +32,21 @@ import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.util.*;
 
 /** An {@link OutputFormat} that writes plain text files. */
-public class TextOutputFormat extends OutputFormatBase {
+public class TextOutputFormat<K extends WritableComparable,
+                              V extends Writable>
+  extends OutputFormatBase<K, V> {
 
-  protected static class LineRecordWriter implements RecordWriter {
+  protected static class LineRecordWriter<K extends WritableComparable,
+                                          V extends Writable>
+    implements RecordWriter<K, V> {
+    
     private DataOutputStream out;
     
     public LineRecordWriter(DataOutputStream out) {
       this.out = out;
     }
     
-    public synchronized void write(WritableComparable key, Writable value)
+    public synchronized void write(K key, V value)
       throws IOException {
 
       if (key == null && value == null) {
@@ -64,8 +69,10 @@ public class TextOutputFormat extends OutputFormatBase {
     }
   }
   
-  public RecordWriter getRecordWriter(FileSystem ignored, JobConf job,
-                                      String name, Progressable progress)
+  public RecordWriter<K, V> getRecordWriter(FileSystem ignored,
+                                                  JobConf job,
+                                                  String name,
+                                                  Progressable progress)
     throws IOException {
 
     Path dir = job.getOutputPath();
@@ -73,7 +80,7 @@ public class TextOutputFormat extends OutputFormatBase {
     boolean isCompressed = getCompressOutput(job);
     if (!isCompressed) {
       FSDataOutputStream fileOut = fs.create(new Path(dir, name), progress);
-      return new LineRecordWriter(fileOut);
+      return new LineRecordWriter<K, V>(fileOut);
     } else {
       Class codecClass = getOutputCompressorClass(job, GzipCodec.class);
       // create the named codec
@@ -82,8 +89,8 @@ public class TextOutputFormat extends OutputFormatBase {
       // build the filename including the extension
       Path filename = new Path(dir, name + codec.getDefaultExtension());
       FSDataOutputStream fileOut = fs.create(filename, progress);
-      return new LineRecordWriter(new DataOutputStream
-                                  (codec.createOutputStream(fileOut)));
+      return new LineRecordWriter<K, V>(new DataOutputStream
+                                        (codec.createOutputStream(fileOut)));
     }
   }      
 }

+ 10 - 6
src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java

@@ -63,7 +63,9 @@ import org.apache.hadoop.mapred.TextInputFormat;
  * the key is never ignored.
  * 
  */
-public class FieldSelectionMapReduce implements Mapper, Reducer {
+public class FieldSelectionMapReduce<K extends WritableComparable,
+                                     V extends Writable>
+    implements Mapper<K, V, Text, Text>, Reducer<Text, Text, Text, Text> {
 
   private String mapOutputKeyValueSpec;
 
@@ -133,8 +135,8 @@ public class FieldSelectionMapReduce implements Mapper, Reducer {
   /**
    * The identify function. Input key/value pair is written directly to output.
    */
-  public void map(WritableComparable key, Writable val, OutputCollector output,
-      Reporter reporter) throws IOException {
+  public void map(K key, V val,
+                  OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
     String valStr = val.toString();
     String[] inputValFields = valStr.split(this.fieldSeparator);
     String[] inputKeyFields = null;
@@ -180,7 +182,8 @@ public class FieldSelectionMapReduce implements Mapper, Reducer {
    * @param fieldList an array of field numbers extracted from the specs.
    * @return number n if some field spec is in the form of "n-", -1 otherwise.
    */
-  private int extractFields(String[] fieldListSpec, ArrayList<Integer> fieldList) {
+  private int extractFields(String[] fieldListSpec,
+                            ArrayList<Integer> fieldList) {
     int allFieldsFrom = -1;
     int i = 0;
     int j = 0;
@@ -310,8 +313,9 @@ public class FieldSelectionMapReduce implements Mapper, Reducer {
     return retv;
   }
 
-  public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+  public void reduce(Text key, Iterator<Text> values,
+                     OutputCollector<Text, Text> output, Reporter reporter)
+    throws IOException {
 
     String keyStr = key.toString() + this.fieldSeparator;
     while (values.hasNext()) {

+ 4 - 2
src/java/org/apache/hadoop/mapred/lib/HashPartitioner.java

@@ -25,12 +25,14 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.Writable;
 
 /** Partition keys by their {@link Object#hashCode()}. */
-public class HashPartitioner implements Partitioner {
+public class HashPartitioner<K2 extends WritableComparable,
+                             V2 extends Writable>
+    implements Partitioner<K2, V2> {
 
   public void configure(JobConf job) {}
 
   /** Use {@link Object#hashCode()} to partition. */
-  public int getPartition(WritableComparable key, Writable value,
+  public int getPartition(K2 key, V2 value,
                           int numReduceTasks) {
     return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
   }

+ 4 - 3
src/java/org/apache/hadoop/mapred/lib/IdentityMapper.java

@@ -29,12 +29,13 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 
 /** Implements the identity function, mapping inputs directly to outputs. */
-public class IdentityMapper extends MapReduceBase implements Mapper {
+public class IdentityMapper<K extends WritableComparable, V extends Writable>
+    extends MapReduceBase implements Mapper<K, V, K, V> {
 
   /** The identify function.  Input key/value pair is written directly to
    * output.*/
-  public void map(WritableComparable key, Writable val,
-                  OutputCollector output, Reporter reporter)
+  public void map(K key, V val,
+                  OutputCollector<K, V> output, Reporter reporter)
     throws IOException {
     output.collect(key, val);
   }

+ 5 - 4
src/java/org/apache/hadoop/mapred/lib/IdentityReducer.java

@@ -31,14 +31,15 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 
 /** Performs no reduction, writing all input values directly to the output. */
-public class IdentityReducer extends MapReduceBase implements Reducer {
+public class IdentityReducer<K extends WritableComparable, V extends Writable>
+    extends MapReduceBase implements Reducer<K, V, K, V> {
 
   /** Writes all keys and values directly to output. */
-  public void reduce(WritableComparable key, Iterator values,
-                     OutputCollector output, Reporter reporter)
+  public void reduce(K key, Iterator<V> values,
+                     OutputCollector<K, V> output, Reporter reporter)
     throws IOException {
     while (values.hasNext()) {
-      output.collect(key, (Writable)values.next());
+      output.collect(key, values.next());
     }
   }
 	

+ 8 - 8
src/java/org/apache/hadoop/mapred/lib/InverseMapper.java

@@ -20,23 +20,23 @@ package org.apache.hadoop.mapred.lib;
 
 import java.io.IOException;
 
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.MapReduceBase;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.Writable;
 
 
 /** A {@link Mapper} that swaps keys and values. */
-public class InverseMapper extends MapReduceBase implements Mapper {
+public class InverseMapper<K extends WritableComparable,
+                           V extends WritableComparable>
+    extends MapReduceBase implements Mapper<K, V, V, K> {
 
   /** The inverse function.  Input keys and values are swapped.*/
-  public void map(WritableComparable key, Writable value,
-                  OutputCollector output, Reporter reporter)
+  public void map(K key, V value,
+                  OutputCollector<V, K> output, Reporter reporter)
     throws IOException {
-    output.collect((WritableComparable)value, key);
+    output.collect(value, key);
   }
   
 }

+ 4 - 2
src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.java

@@ -23,7 +23,9 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Partitioner;
 
-public class KeyFieldBasedPartitioner implements Partitioner {
+public class KeyFieldBasedPartitioner<K2 extends WritableComparable,
+                                      V2 extends Writable>
+    implements Partitioner<K2, V2> {
 
   private int numOfPartitionFields;
 
@@ -35,7 +37,7 @@ public class KeyFieldBasedPartitioner implements Partitioner {
   }
 
   /** Use {@link Object#hashCode()} to partition. */
-  public int getPartition(WritableComparable key, Writable value,
+  public int getPartition(K2 key, V2 value,
       int numReduceTasks) {
     String partitionKeyStr = key.toString();
     String[] fields = partitionKeyStr.split(this.keyFieldSeparator);

+ 7 - 4
src/java/org/apache/hadoop/mapred/lib/LongSumReducer.java

@@ -30,16 +30,19 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.LongWritable;
 
 /** A {@link Reducer} that sums long values. */
-public class LongSumReducer extends MapReduceBase implements Reducer {
+public class LongSumReducer<K extends WritableComparable>
+    extends MapReduceBase
+    implements Reducer<K, LongWritable, K, LongWritable> {
 
-  public void reduce(WritableComparable key, Iterator values,
-                     OutputCollector output, Reporter reporter)
+  public void reduce(K key, Iterator<LongWritable> values,
+                     OutputCollector<K, LongWritable> output,
+                     Reporter reporter)
     throws IOException {
 
     // sum all values for this key
     long sum = 0;
     while (values.hasNext()) {
-      sum += ((LongWritable)values.next()).get();
+      sum += values.next().get();
     }
 
     // output sum

+ 17 - 10
src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java

@@ -50,15 +50,21 @@ import java.util.concurrent.TimeUnit;
  * <b>mapred.map.multithreadedrunner.threads</b> property).
  * <p>
  */
-public class MultithreadedMapRunner implements MapRunnable {
+public class MultithreadedMapRunner<K1 extends WritableComparable,
+                                    V1 extends Writable,
+                                    K2 extends WritableComparable,
+                                    V2 extends Writable>
+    implements MapRunnable<K1, V1, K2, V2> {
+
   private static final Log LOG =
     LogFactory.getLog(MultithreadedMapRunner.class.getName());
 
   private JobConf job;
-  private Mapper mapper;
+  private Mapper<K1, V1, K2, V2> mapper;
   private ExecutorService executorService;
   private volatile IOException ioException;
 
+  @SuppressWarnings("unchecked")
   public void configure(JobConf job) {
     int numberOfThreads =
       job.getInt("mapred.map.multithreadedrunner.threads", 10);
@@ -76,14 +82,14 @@ public class MultithreadedMapRunner implements MapRunnable {
     executorService = Executors.newFixedThreadPool(numberOfThreads);
   }
 
-  public void run(RecordReader input, OutputCollector output,
+  public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
                   Reporter reporter)
     throws IOException {
     try {
       // allocate key & value instances these objects will not be reused
       // because execution of Mapper.map is not serialized.
-      WritableComparable key = input.createKey();
-      Writable value = input.createValue();
+      K1 key = input.createKey();
+      V1 value = input.createValue();
 
       while (input.next(key, value)) {
 
@@ -166,9 +172,9 @@ public class MultithreadedMapRunner implements MapRunnable {
    * Runnable to execute a single Mapper.map call from a forked thread.
    */
   private class MapperInvokeRunable implements Runnable {
-    private WritableComparable key;
-    private Writable value;
-    private OutputCollector output;
+    private K1 key;
+    private V1 value;
+    private OutputCollector<K2, V2> output;
     private Reporter reporter;
 
     /**
@@ -180,8 +186,9 @@ public class MultithreadedMapRunner implements MapRunnable {
      * @param output
      * @param reporter
      */
-    public MapperInvokeRunable(WritableComparable key, Writable value,
-                               OutputCollector output, Reporter reporter) {
+    public MapperInvokeRunable(K1 key, V1 value,
+                               OutputCollector<K2, V2> output,
+                               Reporter reporter) {
       this.key = key;
       this.value = value;
       this.output = output;

+ 7 - 4
src/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java

@@ -30,11 +30,14 @@ import org.apache.hadoop.util.Progressable;
 /**
  * Consume all outputs and put them in /dev/null. 
  */
-public class NullOutputFormat implements OutputFormat {
-  public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, 
+public class NullOutputFormat<K extends WritableComparable,
+                              V extends Writable>
+  implements OutputFormat<K, V> {
+  
+  public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job, 
                                       String name, Progressable progress) {
-    return new RecordWriter(){
-        public void write(WritableComparable key, Writable value) { }
+    return new RecordWriter<K, V>(){
+        public void write(K key, V value) { }
         public void close(Reporter reporter) { }
       };
   }

+ 14 - 15
src/java/org/apache/hadoop/mapred/lib/RegexMapper.java

@@ -19,25 +19,23 @@
 package org.apache.hadoop.mapred.lib;
 
 import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.MapReduceBase;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-
-
-import java.util.regex.Pattern;
-import java.util.regex.Matcher;
 
 
 /** A {@link Mapper} that extracts text matching a regular expression. */
-public class RegexMapper extends MapReduceBase implements Mapper {
+public class RegexMapper<K extends WritableComparable>
+    extends MapReduceBase
+    implements Mapper<K, Text, Text, LongWritable> {
 
   private Pattern pattern;
   private int group;
@@ -47,10 +45,11 @@ public class RegexMapper extends MapReduceBase implements Mapper {
     group = job.getInt("mapred.mapper.regex.group", 0);
   }
 
-  public void map(WritableComparable key, Writable value,
-                  OutputCollector output, Reporter reporter)
+  public void map(K key, Text value,
+                  OutputCollector<Text, LongWritable> output,
+                  Reporter reporter)
     throws IOException {
-    String text = ((Text)value).toString();
+    String text = value.toString();
     Matcher matcher = pattern.matcher(text);
     while (matcher.find()) {
       output.collect(new Text(matcher.group(group)), new LongWritable(1));

+ 11 - 10
src/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java

@@ -21,26 +21,27 @@ package org.apache.hadoop.mapred.lib;
 import java.io.IOException;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.MapReduceBase;
-
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
 
 
 /** A {@link Mapper} that maps text values into <token,freq> pairs.  Uses
  * {@link StringTokenizer} to break text into tokens. */
-public class TokenCountMapper extends MapReduceBase implements Mapper {
+public class TokenCountMapper<K extends WritableComparable>
+    extends MapReduceBase
+    implements Mapper<K, Text, Text, LongWritable> {
 
-  public void map(WritableComparable key, Writable value,
-                  OutputCollector output, Reporter reporter)
+  public void map(K key, Text value,
+                  OutputCollector<Text, LongWritable> output,
+                  Reporter reporter)
     throws IOException {
     // get input text
-    String text = ((Text)value).toString();       // value is line of text
+    String text = value.toString();       // value is line of text
 
     // tokenize the value
     StringTokenizer st = new StringTokenizer(text);

+ 4 - 2
src/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java

@@ -22,6 +22,7 @@ import java.lang.reflect.Constructor;
 import java.util.ArrayList;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 
 /**
@@ -87,8 +88,9 @@ public class UserDefinedValueAggregatorDescriptor implements
    *         aggregation type which is used to guide the way to aggregate the
    *         value in the reduce/combiner phrase of an Aggregate based job.
    */
-  public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
-    ArrayList<Entry> retv = new ArrayList<Entry>();
+  public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
+                                                          Object val) {
+    ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
     if (this.theAggregatorDescriptor != null) {
       retv = this.theAggregatorDescriptor.generateKeyValPairs(key, val);
     }

+ 14 - 12
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorBaseDescriptor.java

@@ -49,25 +49,25 @@ public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor
   
   public String inputFile = null;
 
-  private static class MyEntry implements Entry {
-    Object key;
+  private static class MyEntry implements Entry<Text, Text> {
+    Text key;
 
-    Object val;
+    Text val;
 
-    public Object getKey() {
+    public Text getKey() {
       return key;
     }
 
-    public Object getValue() {
+    public Text getValue() {
       return val;
     }
 
-    public Object setValue(Object val) {
+    public Text setValue(Text val) {
       this.val = val;
       return val;
     }
 
-    public MyEntry(Object key, Object val) {
+    public MyEntry(Text key, Text val) {
       this.key = key;
       this.val = val;
     }
@@ -81,7 +81,7 @@ public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor
    * @return an Entry whose key is the aggregation id prefixed with 
    * the aggregation type.
    */
-  public static Entry generateEntry(String type, String id, Object val) {
+  public static Entry<Text, Text> generateEntry(String type, String id, Text val) {
     Text key = new Text(type + TYPE_SEPARATOR + id);
     return new MyEntry(key, val);
   }
@@ -129,11 +129,12 @@ public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor
    *         aggregation type which is used to guide the way to aggregate the
    *         value in the reduce/combiner phrase of an Aggregate based job.
    */
-  public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
-    ArrayList<Entry> retv = new ArrayList<Entry>();
+  public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
+                                                          Object val) {
+    ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
     String countType = LONG_VALUE_SUM;
     String id = "record_count";
-    Entry e = generateEntry(countType, id, ONE);
+    Entry<Text, Text> e = generateEntry(countType, id, ONE);
     if (e != null) {
       retv.add(e);
     }
@@ -153,6 +154,7 @@ public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor
    */
   public void configure(JobConf job) {
     this.inputFile = job.get("map.input.file");
-    maxNumItems = job.getLong("aggregate.max.num.unique.values", Long.MAX_VALUE);
+    maxNumItems = job.getLong("aggregate.max.num.unique.values",
+                              Long.MAX_VALUE);
   }
 }

+ 6 - 4
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorCombiner.java

@@ -31,7 +31,9 @@ import org.apache.hadoop.mapred.Reporter;
 /**
  * This class implements the generic combiner of Aggregate.
  */
-public class ValueAggregatorCombiner extends ValueAggregatorJobBase {
+public class ValueAggregatorCombiner<K1 extends WritableComparable,
+                                     V1 extends Writable>
+  extends ValueAggregatorJobBase<K1, V1> {
 
   /**
    * Combiner does not need to configure.
@@ -46,8 +48,8 @@ public class ValueAggregatorCombiner extends ValueAggregatorJobBase {
    * @param values the values to combine
    * @param output to collect combined values
    */
-  public void reduce(WritableComparable key, Iterator values,
-                     OutputCollector output, Reporter reporter) throws IOException {
+  public void reduce(Text key, Iterator<Text> values,
+                     OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
@@ -80,7 +82,7 @@ public class ValueAggregatorCombiner extends ValueAggregatorJobBase {
    * Do nothing. Should not be called. 
    *
    */
-  public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
+  public void map(K1 arg0, V1 arg1, OutputCollector<Text, Text> arg2,
                   Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }

+ 2 - 1
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorDescriptor.java

@@ -54,7 +54,8 @@ public interface ValueAggregatorDescriptor {
    *         aggregation type which is used to guide the way to aggregate the
    *         value in the reduce/combiner phrase of an Aggregate based job.
    */
-  public ArrayList<Entry> generateKeyValPairs(Object key, Object val);
+  public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
+                                                          Object val);
 
   /**
    * Configure the object

+ 6 - 1
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJobBase.java

@@ -21,6 +21,9 @@ package org.apache.hadoop.mapred.lib.aggregate;
 import java.io.IOException;
 import java.util.ArrayList;
 
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.Reducer;
@@ -29,7 +32,9 @@ import org.apache.hadoop.mapred.Reducer;
  * This abstract class implements some common functionalities of the
  * the generic mapper, reducer and combiner classes of Aggregate.
  */
-public abstract class ValueAggregatorJobBase implements Mapper, Reducer {
+public abstract class ValueAggregatorJobBase<K1 extends WritableComparable,
+                                             V1 extends Writable>
+  implements Mapper<K1, V1, Text, Text>, Reducer<Text, Text, Text, Text> {
 
   protected ArrayList<ValueAggregatorDescriptor> aggregatorDescriptorList = null;
 

+ 14 - 10
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorMapper.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.Iterator;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.OutputCollector;
@@ -30,23 +31,25 @@ import org.apache.hadoop.mapred.Reporter;
 /**
  * This class implements the generic mapper of Aggregate.
  */
-public class ValueAggregatorMapper extends ValueAggregatorJobBase {
+public class ValueAggregatorMapper<K1 extends WritableComparable,
+                                   V1 extends Writable>
+  extends ValueAggregatorJobBase<K1, V1> {
 
   /**
    *  the map function. It iterates through the value aggregator descriptor 
    *  list to generate aggregation id/value pairs and emit them.
    */
-  public void map(WritableComparable key, Writable value,
-                  OutputCollector output, Reporter reporter) throws IOException {
+  public void map(K1 key, V1 value,
+                  OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
 
     Iterator iter = this.aggregatorDescriptorList.iterator();
     while (iter.hasNext()) {
       ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next();
-      Iterator<Entry> ens = ad.generateKeyValPairs(key, value).iterator();
+      Iterator<Entry<Text, Text>> ens =
+        ad.generateKeyValPairs(key, value).iterator();
       while (ens.hasNext()) {
-        Entry en = ens.next();
-        output.collect((WritableComparable) en.getKey(), (Writable) en
-                       .getValue());
+        Entry<Text, Text> en = ens.next();
+        output.collect(en.getKey(), en.getValue());
       }
     }
   }
@@ -54,8 +57,9 @@ public class ValueAggregatorMapper extends ValueAggregatorJobBase {
   /**
    * Do nothing. Should not be called.
    */
-  public void reduce(WritableComparable arg0, Iterator arg1,
-                     OutputCollector arg2, Reporter arg3) throws IOException {
-    throw new IOException ("should not be called\n");
+  public void reduce(Text arg0, Iterator<Text> arg1,
+                     OutputCollector<Text, Text> arg2,
+                     Reporter arg3) throws IOException {
+    throw new IOException("should not be called\n");
   }
 }

+ 6 - 4
src/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java

@@ -32,7 +32,9 @@ import org.apache.hadoop.mapred.Reporter;
  * 
  * 
  */
-public class ValueAggregatorReducer extends ValueAggregatorJobBase {
+public class ValueAggregatorReducer<K1 extends WritableComparable,
+                                    V1 extends Writable>
+  extends ValueAggregatorJobBase<K1, V1> {
 
   /**
    * @param key
@@ -43,8 +45,8 @@ public class ValueAggregatorReducer extends ValueAggregatorJobBase {
    *          may be further customiized.
    * @value the values to be aggregated
    */
-  public void reduce(WritableComparable key, Iterator values,
-                     OutputCollector output, Reporter reporter) throws IOException {
+  public void reduce(Text key, Iterator<Text> values,
+                     OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
@@ -65,7 +67,7 @@ public class ValueAggregatorReducer extends ValueAggregatorJobBase {
   /**
    * Do nothing. Should not be called
    */
-  public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
+  public void map(K1 arg0, V1 arg1, OutputCollector<Text, Text> arg2,
                   Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }

+ 11 - 9
src/java/org/apache/hadoop/mapred/pipes/Application.java

@@ -44,13 +44,14 @@ import org.apache.hadoop.util.StringUtils;
  * This class is responsible for launching and communicating with the child 
  * process.
  */
-class Application {
+class Application<K1 extends WritableComparable, V1 extends Writable,
+                  K2 extends WritableComparable, V2 extends Writable> {
   private static final Log LOG = LogFactory.getLog(Application.class.getName());
   private ServerSocket serverSocket;
   private Process process;
   private Socket clientSocket;
-  private OutputHandler handler;
-  private BinaryProtocol downlink;
+  private OutputHandler<K2, V2> handler;
+  private BinaryProtocol<K1, V1, K2, V2> downlink;
 
   /**
    * Start the child process to handle the task for us.
@@ -62,7 +63,8 @@ class Application {
    * @throws IOException
    * @throws InterruptedException
    */
-  Application(JobConf conf, OutputCollector output, Reporter reporter,
+  @SuppressWarnings("unchecked")
+  Application(JobConf conf, OutputCollector<K2, V2> output, Reporter reporter,
               Class outputKeyClass, Class outputValueClass
               ) throws IOException, InterruptedException {
     serverSocket = new ServerSocket(0);
@@ -81,12 +83,12 @@ class Application {
     cmd = TaskLog.captureOutAndError(cmd, stdout, stderr, logLength);
     process = runClient(cmd, env);
     clientSocket = serverSocket.accept();
-    handler = new OutputHandler(output, reporter);
-    WritableComparable outputKey = (WritableComparable)
+    handler = new OutputHandler<K2, V2>(output, reporter);
+    K2 outputKey = (K2)
       ReflectionUtils.newInstance(outputKeyClass, conf);
-    Writable outputValue = (Writable) 
+    V2 outputValue = (V2) 
       ReflectionUtils.newInstance(outputValueClass, conf);
-    downlink = new BinaryProtocol(clientSocket, handler, 
+    downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                   outputKey, outputValue, conf);
     downlink.start();
     downlink.setJobConf(conf);
@@ -97,7 +99,7 @@ class Application {
    * application.
    * @return the downlink proxy
    */
-  DownwardProtocol getDownlink() {
+  DownwardProtocol<K1, V1> getDownlink() {
     return downlink;
   }
 

+ 19 - 12
src/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java

@@ -39,7 +39,10 @@ import org.apache.hadoop.util.StringUtils;
 /**
  * This protocol is a binary implementation of the Pipes protocol.
  */
-class BinaryProtocol implements DownwardProtocol {
+class BinaryProtocol<K1 extends WritableComparable, V1 extends Writable,
+                     K2 extends WritableComparable, V2 extends Writable>
+  implements DownwardProtocol<K1, V1> {
+  
   public static final int CURRENT_PROTOCOL_VERSION = 0;
   private DataOutputStream stream;
   private DataOutputBuffer buffer = new DataOutputBuffer();
@@ -72,15 +75,18 @@ class BinaryProtocol implements DownwardProtocol {
     }
   }
 
-  private static class UplinkReaderThread extends Thread {
+  private static class UplinkReaderThread<K2 extends WritableComparable,
+                                          V2 extends Writable>  
+    extends Thread {
+    
     private DataInputStream inStream;
-    private UpwardProtocol handler;
-    private WritableComparable key;
-    private Writable value;
+    private UpwardProtocol<K2, V2> handler;
+    private K2 key;
+    private V2 value;
     
-    public UplinkReaderThread(InputStream stream, UpwardProtocol handler, 
-                              WritableComparable key, Writable value
-                              ) throws IOException{
+    public UplinkReaderThread(InputStream stream,
+                              UpwardProtocol<K2, V2> handler, 
+                              K2 key, V2 value) throws IOException{
       inStream = new DataInputStream(stream);
       this.handler = handler;
       this.key = key;
@@ -192,9 +198,9 @@ class BinaryProtocol implements DownwardProtocol {
    * @throws IOException
    */
   public BinaryProtocol(Socket sock, 
-                        UpwardProtocol handler,
-                        WritableComparable key,
-                        Writable value,
+                        UpwardProtocol<K2, V2> handler,
+                        K2 key,
+                        V2 value,
                         JobConf config) throws IOException {
     OutputStream raw = sock.getOutputStream();
     // If we are debugging, save a copy of the downlink commands to a file
@@ -202,7 +208,8 @@ class BinaryProtocol implements DownwardProtocol {
       raw = new TeeOutputStream("downlink.data", raw);
     }
     stream = new DataOutputStream(raw);
-    uplink = new UplinkReaderThread(sock.getInputStream(), handler, key, value);
+    uplink = new UplinkReaderThread<K2, V2>(sock.getInputStream(),
+                                            handler, key, value);
     uplink.setName("pipe-uplink-handler");
     uplink.start();
   }

+ 4 - 4
src/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.mapred.JobConf;
  * All of these calls are asynchronous and return before the message has been 
  * processed.
  */
-interface DownwardProtocol {
+interface DownwardProtocol<K extends WritableComparable, V extends Writable> {
   /**
    * Start communication
    * @throws IOException
@@ -68,7 +68,7 @@ interface DownwardProtocol {
    * @param value The record's value
    * @throws IOException
    */
-  void mapItem(WritableComparable key, Writable value) throws IOException;
+  void mapItem(K key, V value) throws IOException;
   
   /**
    * Run a reduce task in the child
@@ -83,14 +83,14 @@ interface DownwardProtocol {
    * @param key the new key
    * @throws IOException
    */
-  void reduceKey(WritableComparable key) throws IOException;
+  void reduceKey(K key) throws IOException;
   
   /**
    * The reduce should be given a new value
    * @param value the new value
    * @throws IOException
    */
-  void reduceValue(Writable value) throws IOException;
+  void reduceValue(V value) throws IOException;
   
   /**
    * The task has no more input coming, but it should finish processing it's 

+ 9 - 7
src/java/org/apache/hadoop/mapred/pipes/OutputHandler.java

@@ -28,9 +28,12 @@ import org.apache.hadoop.mapred.Reporter;
 /**
  * Handles the upward (C++ to Java) messages from the application.
  */
-class OutputHandler implements UpwardProtocol {
+class OutputHandler<K extends WritableComparable,
+                    V extends Writable>
+  implements UpwardProtocol<K, V> {
+  
   private Reporter reporter;
-  private OutputCollector collector;
+  private OutputCollector<K, V> collector;
   private float progressValue = 0.0f;
   private boolean done = false;
   private Throwable exception = null;
@@ -40,7 +43,7 @@ class OutputHandler implements UpwardProtocol {
    * @param collector the "real" collector that takes the output
    * @param reporter the reporter for reporting progress
    */
-  public OutputHandler(OutputCollector collector, Reporter reporter) {
+  public OutputHandler(OutputCollector<K, V> collector, Reporter reporter) {
     this.reporter = reporter;
     this.collector = collector;
   }
@@ -48,16 +51,15 @@ class OutputHandler implements UpwardProtocol {
   /**
    * The task output a normal record.
    */
-  public void output(WritableComparable key, 
-                     Writable value) throws IOException {
+  public void output(K key, V value) throws IOException {
     collector.collect(key, value);
   }
 
   /**
    * The task output a record with a partition number attached.
    */
-  public void partitionedOutput(int reduce, WritableComparable key, 
-                                Writable value) throws IOException {
+  public void partitionedOutput(int reduce, K key, 
+                                V value) throws IOException {
     PipesPartitioner.setNextPartition(reduce);
     collector.collect(key, value);
   }

+ 10 - 9
src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java

@@ -31,7 +31,9 @@ import org.apache.hadoop.mapred.Reporter;
 /**
  * An adaptor to run a C++ mapper.
  */
-class PipesMapRunner extends MapRunner {
+class PipesMapRunner<K1 extends WritableComparable, V1 extends Writable,
+    K2 extends WritableComparable, V2 extends Writable>
+    extends MapRunner<K1, V1, K2, V2> {
   private JobConf job;
 
   /**
@@ -48,26 +50,25 @@ class PipesMapRunner extends MapRunner {
    * @param output the object to collect the outputs of the map
    * @param reporter the object to update with status
    */
-  public void run(RecordReader input, OutputCollector output,
-                  Reporter reporter
-                  ) throws IOException {
-    Application application = null;
+  public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
+                  Reporter reporter) throws IOException {
+    Application<K1, V1, K2, V2> application = null;
     try {
-      application = new Application(job, output, reporter,
+      application = new Application<K1, V1, K2, V2>(job, output, reporter,
                                     job.getMapOutputKeyClass(),
                                     job.getMapOutputValueClass());
     } catch (InterruptedException ie) {
       throw new RuntimeException("interrupted", ie);
     }
-    DownwardProtocol downlink = application.getDownlink();
+    DownwardProtocol<K1, V1> downlink = application.getDownlink();
     boolean isJavaInput = Submitter.getIsJavaRecordReader(job);
     downlink.runMap(reporter.getInputSplit(), 
                     job.getNumReduceTasks(), isJavaInput);
     try {
       if (isJavaInput) {
         // allocate key & value instances that are re-used for all entries
-        WritableComparable key = input.createKey();
-        Writable value = input.createValue();
+        K1 key = input.createKey();
+        V1 value = input.createValue();
         downlink.setInputTypes(key.getClass().getName(),
                                value.getClass().getName());
         

+ 7 - 3
src/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java

@@ -28,10 +28,14 @@ import org.apache.hadoop.util.ReflectionUtils;
  * This partitioner is one that can either be set manually per a record or it
  * can fall back onto a Java partitioner that was set by the user.
  */
-class PipesPartitioner implements Partitioner {
+class PipesPartitioner<K extends WritableComparable,
+                       V extends Writable>
+  implements Partitioner<K, V> {
+  
   private static ThreadLocal<Integer> cache = new ThreadLocal<Integer>();
-  private Partitioner part = null;
+  private Partitioner<K, V> part = null;
   
+  @SuppressWarnings("unchecked")
   public void configure(JobConf conf) {
     part = (Partitioner) 
       ReflectionUtils.newInstance(Submitter.getJavaPartitioner(conf), conf);
@@ -52,7 +56,7 @@ class PipesPartitioner implements Partitioner {
    * @param value the value to partition
    * @param numPartitions the number of reduces
    */
-  public int getPartition(WritableComparable key, Writable value, 
+  public int getPartition(K key, V value, 
                           int numPartitions) {
     Integer result = cache.get();
     if (result == null) {

+ 13 - 11
src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java

@@ -33,11 +33,13 @@ import java.util.Iterator;
 /**
  * This class is used to talk to a C++ reduce task.
  */
-class PipesReducer implements Reducer {
+class PipesReducer<K2 extends WritableComparable, V2 extends Writable,
+    K3 extends WritableComparable, V3 extends Writable>
+    implements Reducer<K2, V2, K3, V3> {
   private static final Log LOG= LogFactory.getLog(PipesReducer.class.getName());
   private JobConf job;
-  private Application application = null;
-  private DownwardProtocol downlink = null;
+  private Application<K2, V2, K3, V3> application = null;
+  private DownwardProtocol<K2, V2> downlink = null;
   private boolean isOk = true;
 
   public void configure(JobConf job) {
@@ -48,23 +50,23 @@ class PipesReducer implements Reducer {
    * Process all of the keys and values. Start up the application if we haven't
    * started it yet.
    */
-  public void reduce(WritableComparable key, Iterator values, 
-                     OutputCollector output, Reporter reporter
+  public void reduce(K2 key, Iterator<V2> values, 
+                     OutputCollector<K3, V3> output, Reporter reporter
                      ) throws IOException {
     isOk = false;
     startApplication(output, reporter);
     downlink.reduceKey(key);
     while (values.hasNext()) {
-      downlink.reduceValue((Writable) values.next());
+      downlink.reduceValue(values.next());
     }
     isOk = true;
   }
 
-  private void startApplication(OutputCollector output, Reporter reporter) throws IOException {
+  private void startApplication(OutputCollector<K3, V3> output, Reporter reporter) throws IOException {
     if (application == null) {
       try {
         LOG.info("starting application");
-        application = new Application(job, output, reporter, 
+        application = new Application<K2, V2, K3, V3>(job, output, reporter, 
                                       job.getOutputKeyClass(), 
                                       job.getOutputValueClass());
         downlink = application.getDownlink();
@@ -82,9 +84,9 @@ class PipesReducer implements Reducer {
   public void close() throws IOException {
     // if we haven't started the application, we have nothing to do
     if (isOk) {
-      OutputCollector nullCollector = new OutputCollector() {
-        public void collect(WritableComparable key, 
-                            Writable value) throws IOException {
+      OutputCollector<K3, V3> nullCollector = new OutputCollector<K3, V3>() {
+        public void collect(K3 key, 
+                            V3 value) throws IOException {
           // NULL
         }
       };

+ 4 - 4
src/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java

@@ -26,14 +26,14 @@ import org.apache.hadoop.io.WritableComparable;
  * The interface for the messages that can come up from the child. All of these
  * calls are asynchronous and return before the message has been processed.
  */
-interface UpwardProtocol {
+interface UpwardProtocol<K extends WritableComparable, V extends Writable> {
   /**
    * Output a record from the child.
    * @param key the record's key
    * @param value the record's value
    * @throws IOException
    */
-  void output(WritableComparable key, Writable value) throws IOException;
+  void output(K key, V value) throws IOException;
   
   /**
    * Map functions where the application has defined a partition function
@@ -43,8 +43,8 @@ interface UpwardProtocol {
    * @param value the record's value
    * @throws IOException
    */
-  void partitionedOutput(int reduce, WritableComparable key, 
-                         Writable value) throws IOException;
+  void partitionedOutput(int reduce, K key, 
+                         V value) throws IOException;
   
   /**
    * Update the task's status message

+ 25 - 20
src/java/org/apache/hadoop/tools/Logalyzer.java

@@ -18,30 +18,32 @@
 
 package org.apache.hadoop.tools;
 
-import java.io.*;
-
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
 import java.util.Random;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
-import org.apache.commons.logging.*;
-
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.fs.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.conf.*;
-import org.apache.hadoop.util.CopyFiles;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.mapred.lib.LongSumReducer;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.Writable;
-
-import java.util.regex.Pattern;
-import java.util.regex.Matcher;
+import org.apache.hadoop.util.CopyFiles;
 
 /**
  * Logalyzer: A utility tool for archiving and analyzing hadoop logs.
@@ -62,7 +64,9 @@ public class Logalyzer {
   private static Configuration fsConfig = new Configuration();
   
   /** A {@link Mapper} that extracts text matching a regular expression. */
-  public static class LogRegexMapper extends MapReduceBase implements Mapper {
+  public static class LogRegexMapper<K extends WritableComparable>
+    extends MapReduceBase
+    implements Mapper<K, Text, Text, LongWritable> {
     
     private Pattern pattern;
     
@@ -70,13 +74,14 @@ public class Logalyzer {
       pattern = Pattern.compile(job.get("mapred.mapper.regex"));
     }
     
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter)
+    public void map(K key, Text value,
+                    OutputCollector<Text, LongWritable> output,
+                    Reporter reporter)
       throws IOException {
-      String text = ((Text)value).toString();
+      String text = value.toString();
       Matcher matcher = pattern.matcher(text);
       while (matcher.find()) {
-        output.collect((Text)value, new LongWritable(1));
+        output.collect(value, new LongWritable(1));
       }
     }
     

+ 4 - 4
src/java/org/apache/hadoop/util/CopyFiles.java

@@ -182,7 +182,7 @@ public class CopyFiles extends ToolBase {
    * DFSCopyFilesMapper: The mapper for copying files from the DFS.
    */
   public static class FSCopyFilesMapper extends CopyFilesMapper 
-    implements Mapper 
+    implements Mapper<Text, Writable, WritableComparable, Text> 
   {
     private int sizeBuf = 4096;
     private FileSystem srcFileSys = null;
@@ -397,11 +397,11 @@ public class CopyFiles extends ToolBase {
      * @param out not-used.
      * @param reporter
      */
-    public void map(WritableComparable key,
+    public void map(Text key,
                     Writable value,
-                    OutputCollector out,
+                    OutputCollector<WritableComparable, Text> out,
                     Reporter reporter) throws IOException {
-      String src = ((Text) key).toString();
+      String src = key.toString();
       try {
         copy(src, reporter);
       } catch (IOException except) {

+ 1 - 0
src/test/checkstyle.xml

@@ -52,6 +52,7 @@
         <!-- See http://checkstyle.sf.net/config_javadoc.html -->
         <module name="JavadocType">
           <property name="scope" value="public"/>
+          <property name="allowMissingParamTags" value="true"/>
         </module>
         <module name="JavadocStyle"/>
 

+ 10 - 8
src/test/org/apache/hadoop/fs/AccumulatingReducer.java

@@ -44,7 +44,9 @@ import org.apache.hadoop.mapred.TaskTracker;
  * </ul>
  * 
  */
-public class AccumulatingReducer extends MapReduceBase implements Reducer {
+public class AccumulatingReducer extends MapReduceBase
+    implements Reducer<UTF8, UTF8, UTF8, UTF8> {
+  
   protected String hostName;
   
   public AccumulatingReducer () {
@@ -57,12 +59,12 @@ public class AccumulatingReducer extends MapReduceBase implements Reducer {
     TaskTracker.LOG.info("Starting AccumulatingReducer on " + hostName);
   }
   
-  public void reduce(WritableComparable key, 
-                     Iterator values,
-                     OutputCollector output, 
+  public void reduce(UTF8 key, 
+                     Iterator<UTF8> values,
+                     OutputCollector<UTF8, UTF8> output, 
                      Reporter reporter
                      ) throws IOException {
-    String field = ((UTF8) key).toString();
+    String field = key.toString();
 
     reporter.setStatus("starting " + field + " ::host = " + hostName);
 
@@ -70,7 +72,7 @@ public class AccumulatingReducer extends MapReduceBase implements Reducer {
     if (field.startsWith("s:")) {
       String sSum = "";
       while (values.hasNext())
-        sSum += ((UTF8) values.next()).toString() + ";";
+        sSum += values.next().toString() + ";";
       output.collect(key, new UTF8(sSum));
       reporter.setStatus("finished " + field + " ::host = " + hostName);
       return;
@@ -79,7 +81,7 @@ public class AccumulatingReducer extends MapReduceBase implements Reducer {
     if (field.startsWith("f:")) {
       float fSum = 0;
       while (values.hasNext())
-        fSum += Float.parseFloat(((UTF8) values.next()).toString());
+        fSum += Float.parseFloat(values.next().toString());
       output.collect(key, new UTF8(String.valueOf(fSum)));
       reporter.setStatus("finished " + field + " ::host = " + hostName);
       return;
@@ -88,7 +90,7 @@ public class AccumulatingReducer extends MapReduceBase implements Reducer {
     if (field.startsWith("l:")) {
       long lSum = 0;
       while (values.hasNext()) {
-        lSum += Long.parseLong(((UTF8) values.next()).toString());
+        lSum += Long.parseLong(values.next().toString());
       }
       output.collect(key, new UTF8(String.valueOf(lSum)));
     }

+ 1 - 1
src/test/org/apache/hadoop/fs/DFSCIOTest.java

@@ -159,7 +159,7 @@ public class DFSCIOTest extends TestCase {
       super(fsConfig);
     }
     
-    void collectStats(OutputCollector output, 
+    void collectStats(OutputCollector<UTF8, UTF8> output, 
                       String name,
                       long execTime, 
                       Object objSize) throws IOException {

+ 1 - 1
src/test/org/apache/hadoop/fs/DistributedFSCheck.java

@@ -163,7 +163,7 @@ public class DistributedFSCheck extends TestCase {
       return new Long(actualSize);
     }
     
-    void collectStats(OutputCollector output, 
+    void collectStats(OutputCollector<UTF8, UTF8> output, 
                       String name, 
                       long execTime, 
                       Object corruptedBlock) throws IOException {

+ 9 - 7
src/test/org/apache/hadoop/fs/IOMapperBase.java

@@ -39,7 +39,9 @@ import org.apache.hadoop.mapred.Reporter;
  * statistics data to be collected by subsequent reducers.
  * 
  */
-public abstract class IOMapperBase extends Configured implements Mapper {
+public abstract class IOMapperBase extends Configured
+    implements Mapper<UTF8, LongWritable, UTF8, UTF8> {
+  
   protected byte[] buffer;
   protected int bufferSize;
   protected FileSystem fs;
@@ -91,7 +93,7 @@ public abstract class IOMapperBase extends Configured implements Mapper {
    * @param doIOReturnValue value returned by {@link #doIO(Reporter,String,long)}
    * @throws IOException
    */
-  abstract void collectStats(OutputCollector output, 
+  abstract void collectStats(OutputCollector<UTF8, UTF8> output, 
                              String name, 
                              long execTime, 
                              Object doIOReturnValue) throws IOException;
@@ -109,12 +111,12 @@ public abstract class IOMapperBase extends Configured implements Mapper {
    * {@link #collectStats(OutputCollector,String,long,Object)} 
    * is called to prepare stat data for a subsequent reducer.
    */
-  public void map(WritableComparable key, 
-                  Writable value,
-                  OutputCollector output, 
+  public void map(UTF8 key, 
+                  LongWritable value,
+                  OutputCollector<UTF8, UTF8> output, 
                   Reporter reporter) throws IOException {
-    String name = ((UTF8)key).toString();
-    long longValue = ((LongWritable)value).get();
+    String name = key.toString();
+    long longValue = value.get();
     
     reporter.setStatus("starting " + name + " ::host = " + hostName);
     

+ 1 - 1
src/test/org/apache/hadoop/fs/TestDFSIO.java

@@ -154,7 +154,7 @@ public class TestDFSIO extends TestCase {
       super(fsConfig);
     }
     
-    void collectStats(OutputCollector output, 
+    void collectStats(OutputCollector<UTF8, UTF8> output, 
                       String name,
                       long execTime, 
                       Object objSize) throws IOException {

+ 18 - 10
src/test/org/apache/hadoop/fs/TestFileSystem.java

@@ -106,7 +106,9 @@ public class TestFileSystem extends TestCase {
     LOG.info("created control file for: "+totalSize+" bytes");
   }
 
-  public static class WriteMapper extends Configured implements Mapper {
+  public static class WriteMapper extends Configured
+      implements Mapper<UTF8, LongWritable, UTF8, LongWritable> {
+    
     private Random random = new Random();
     private byte[] buffer = new byte[BUFFER_SIZE];
     private FileSystem fs;
@@ -132,11 +134,13 @@ public class TestFileSystem extends TestCase {
       fastCheck = job.getBoolean("fs.test.fastCheck", false);
     }
 
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector collector, Reporter reporter)
+    public void map(UTF8 key, LongWritable value,
+                    OutputCollector<UTF8, LongWritable> collector,
+                    Reporter reporter)
       throws IOException {
-      String name = ((UTF8)key).toString();
-      long size = ((LongWritable)value).get();
+      
+      String name = key.toString();
+      long size = value.get();
       long seed = Long.parseLong(name);
 
       random.setSeed(seed);
@@ -200,7 +204,9 @@ public class TestFileSystem extends TestCase {
     JobClient.runJob(job);
   }
 
-  public static class ReadMapper extends Configured implements Mapper {
+  public static class ReadMapper extends Configured
+      implements Mapper<UTF8, LongWritable, UTF8, LongWritable> {
+    
     private Random random = new Random();
     private byte[] buffer = new byte[BUFFER_SIZE];
     private byte[] check  = new byte[BUFFER_SIZE];
@@ -224,11 +230,13 @@ public class TestFileSystem extends TestCase {
       fastCheck = job.getBoolean("fs.test.fastCheck", false);
     }
 
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector collector, Reporter reporter)
+    public void map(UTF8 key, LongWritable value,
+                    OutputCollector<UTF8, LongWritable> collector,
+                    Reporter reporter)
       throws IOException {
-      String name = ((UTF8)key).toString();
-      long size = ((LongWritable)value).get();
+      
+      String name = key.toString();
+      long size = value.get();
       long seed = Long.parseLong(name);
 
       random.setSeed(seed);

+ 11 - 6
src/test/org/apache/hadoop/mapred/MRBench.java

@@ -51,9 +51,12 @@ public class MRBench {
    * Takes input format as text lines, runs some processing on it and 
    * writes out data as text again. 
    */
-  public static class Map extends MapReduceBase implements Mapper {
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter) throws IOException 
+  public static class Map extends MapReduceBase
+    implements Mapper<WritableComparable, UTF8, UTF8, UTF8> {
+    
+    public void map(WritableComparable key, UTF8 value,
+                    OutputCollector<UTF8, UTF8> output,
+                    Reporter reporter) throws IOException 
     {
       String line = value.toString();
       output.collect(new UTF8(process(line)), new UTF8(""));		
@@ -66,9 +69,11 @@ public class MRBench {
   /**
    * Ignores the key and writes values to the output. 
    */
-  public static class Reduce extends MapReduceBase implements Reducer {
-    public void reduce(WritableComparable key, Iterator values,
-                       OutputCollector output, Reporter reporter) throws IOException 
+  public static class Reduce extends MapReduceBase
+    implements Reducer<UTF8, UTF8, UTF8, UTF8> {
+    
+    public void reduce(UTF8 key, Iterator<UTF8> values,
+                       OutputCollector<UTF8, UTF8> output, Reporter reporter) throws IOException 
     {
       while(values.hasNext()) {
         output.collect(key, new UTF8(values.next().toString()));

+ 14 - 9
src/test/org/apache/hadoop/mapred/MRCaching.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.mapred;
 import java.io.*;
 import java.util.*;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
@@ -48,7 +48,9 @@ public class MRCaching {
    * archives/files are set and then are checked in the map if they have been
    * localized or not.
    */
-  public static class MapClass extends MapReduceBase implements Mapper {
+  public static class MapClass extends MapReduceBase
+    implements Mapper<LongWritable, Text, Text, IntWritable> {
+    
     JobConf conf;
 
     private final static IntWritable one = new IntWritable(1);
@@ -97,9 +99,10 @@ public class MRCaching {
       }
     }
 
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter) throws IOException {
-      String line = ((Text) value).toString();
+    public void map(LongWritable key, Text value,
+                    OutputCollector<Text, IntWritable> output,
+                    Reporter reporter) throws IOException {
+      String line = value.toString();
       StringTokenizer itr = new StringTokenizer(line);
       while (itr.hasMoreTokens()) {
         word.set(itr.nextToken());
@@ -112,13 +115,15 @@ public class MRCaching {
   /**
    * A reducer class that just emits the sum of the input values.
    */
-  public static class ReduceClass extends MapReduceBase implements Reducer {
+  public static class ReduceClass extends MapReduceBase
+    implements Reducer<Text, IntWritable, Text, IntWritable> {
 
-    public void reduce(WritableComparable key, Iterator values,
-                       OutputCollector output, Reporter reporter) throws IOException {
+    public void reduce(Text key, Iterator<IntWritable> values,
+                       OutputCollector<Text, IntWritable> output,
+                       Reporter reporter) throws IOException {
       int sum = 0;
       while (values.hasNext()) {
-        sum += ((IntWritable) values.next()).get();
+        sum += values.next().get();
       }
       output.collect(key, new IntWritable(sum));
     }

+ 15 - 11
src/test/org/apache/hadoop/mapred/PiEstimator.java

@@ -24,6 +24,7 @@ import java.util.Random;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
@@ -39,7 +40,8 @@ public class PiEstimator {
    * Mappper class for Pi estimation.
    */
   
-  public static class PiMapper extends MapReduceBase implements Mapper {
+  public static class PiMapper extends MapReduceBase
+    implements Mapper<IntWritable, Writable, IntWritable, IntWritable> {
     
     static Random r = new Random();
     
@@ -49,11 +51,11 @@ public class PiEstimator {
      * @param out
      * @param reporter
      */
-    public void map(WritableComparable key,
+    public void map(IntWritable key,
                     Writable val,
-                    OutputCollector out,
+                    OutputCollector<IntWritable, IntWritable> out,
                     Reporter reporter) throws IOException {
-      int nSamples = ((IntWritable) key).get();
+      int nSamples = key.get();
       for(int idx = 0; idx < nSamples; idx++) {
         double x = r.nextDouble();
         double y = r.nextDouble();
@@ -74,7 +76,9 @@ public class PiEstimator {
     }
   }
   
-  public static class PiReducer extends MapReduceBase implements Reducer {
+  public static class PiReducer extends MapReduceBase 
+    implements Reducer<IntWritable, IntWritable, WritableComparable, Writable> {
+    
     int numInside = 0;
     int numOutside = 0;
     JobConf conf;
@@ -91,18 +95,18 @@ public class PiEstimator {
      * @param output
      * @param reporter
      */
-    public void reduce(WritableComparable key,
-                       Iterator values,
-                       OutputCollector output,
+    public void reduce(IntWritable key,
+                       Iterator<IntWritable> values,
+                       OutputCollector<WritableComparable, Writable> output,
                        Reporter reporter) throws IOException {
-      if (((IntWritable)key).get() == 1) {
+      if (key.get() == 1) {
         while (values.hasNext()) {
-          int num = ((IntWritable)values.next()).get();
+          int num = values.next().get();
           numInside += num;
         }
       } else {
         while (values.hasNext()) {
-          int num = ((IntWritable)values.next()).get();
+          int num = values.next().get();
           numOutside += num;
         }
       }

+ 35 - 22
src/test/org/apache/hadoop/mapred/SortValidator.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.RecordStatsWritable;
 import org.apache.hadoop.mapred.lib.HashPartitioner;
 import org.apache.hadoop.fs.*;
 
@@ -129,10 +130,13 @@ public class SortValidator {
       public int getChecksum() { return checksum; }
     }
     
-    public static class Map extends MapReduceBase implements Mapper {
+    public static class Map extends MapReduceBase
+      implements Mapper<BytesWritable, BytesWritable,
+                        IntWritable, RecordStatsWritable> {
+      
       private IntWritable key = null;
       private BytesWritable prevKey = null;
-      private Partitioner partitioner = null;
+      private Partitioner<BytesWritable, BytesWritable> partitioner = null;
       private int partition = -1;
       private int noSortReducers = -1;
       private long recordId = -1;
@@ -142,7 +146,7 @@ public class SortValidator {
         key = deduceInputFile(job);
         
         if (key == sortOutput) {
-          partitioner = new HashPartitioner();
+          partitioner = new HashPartitioner<BytesWritable, BytesWritable>();
           
           // Figure the 'current' partition and no. of reduces of the 'sort'
           try {
@@ -159,12 +163,12 @@ public class SortValidator {
         }
       }
       
-      public void map(WritableComparable key, 
-                      Writable value,
-                      OutputCollector output, 
+      public void map(BytesWritable key, 
+                      BytesWritable value,
+                      OutputCollector<IntWritable, RecordStatsWritable> output, 
                       Reporter reporter) throws IOException {
-        BytesWritable bwKey = (BytesWritable)key;
-        BytesWritable bwValue = (BytesWritable)value;
+        BytesWritable bwKey = key;
+        BytesWritable bwValue = value;
         ++recordId;
         
         if (this.key == sortOutput) {
@@ -201,15 +205,19 @@ public class SortValidator {
       }
     }
     
-    public static class Reduce extends MapReduceBase implements Reducer {
-      public void reduce(WritableComparable key, Iterator values,
-                         OutputCollector output, 
+    public static class Reduce extends MapReduceBase
+      implements Reducer<IntWritable, RecordStatsWritable,
+                         IntWritable, RecordStatsWritable> {
+      
+      public void reduce(IntWritable key, Iterator<RecordStatsWritable> values,
+                         OutputCollector<IntWritable,
+                                         RecordStatsWritable> output, 
                          Reporter reporter) throws IOException {
         long bytes = 0;
         long records = 0;
         int xor = 0;
         while (values.hasNext()) {
-          RecordStatsWritable stats = ((RecordStatsWritable)values.next());
+          RecordStatsWritable stats = values.next();
           bytes += stats.getBytes();
           records += stats.getRecords();
           xor ^= stats.getChecksum(); 
@@ -308,7 +316,10 @@ public class SortValidator {
    */
   public static class RecordChecker {
     
-    public static class Map extends MapReduceBase implements Mapper {
+    public static class Map extends MapReduceBase
+      implements Mapper<BytesWritable, BytesWritable,
+                        BytesWritable, IntWritable> {
+      
       private IntWritable value = null;
       
       public void configure(JobConf job) {
@@ -316,27 +327,29 @@ public class SortValidator {
         value = deduceInputFile(job);
       }
       
-      public void map(WritableComparable key, 
-                      Writable value,
-                      OutputCollector output, 
+      public void map(BytesWritable key, 
+                      BytesWritable value,
+                      OutputCollector<BytesWritable, IntWritable> output, 
                       Reporter reporter) throws IOException {
         // newKey = (key, value)
-        BytesWritable keyValue = 
-          new BytesWritable(pair((BytesWritable)key, (BytesWritable)value));
+        BytesWritable keyValue = new BytesWritable(pair(key, value));
     
         // output (newKey, value)
         output.collect(keyValue, this.value);
       }
     }
     
-    public static class Reduce extends MapReduceBase implements Reducer {
-      public void reduce(WritableComparable key, Iterator values,
-                         OutputCollector output, 
+    public static class Reduce extends MapReduceBase
+      implements Reducer<BytesWritable, IntWritable,
+                        BytesWritable, IntWritable> {
+      
+      public void reduce(BytesWritable key, Iterator<IntWritable> values,
+                         OutputCollector<BytesWritable, IntWritable> output,
                          Reporter reporter) throws IOException {
         int ones = 0;
         int twos = 0;
         while (values.hasNext()) {
-          IntWritable count = ((IntWritable) values.next()); 
+          IntWritable count = values.next(); 
           if (count.equals(sortInput)) {
             ++ones;
           } else if (count.equals(sortOutput)) {

+ 39 - 25
src/test/org/apache/hadoop/mapred/TestComparators.java

@@ -51,16 +51,19 @@ public class TestComparators extends TestCase
    * mediate key value pairs are ordered by {input key, value}.
    * Think of the random value as a timestamp associated with the record. 
    */
-  static class RandomGenMapper implements Mapper {
+  static class RandomGenMapper
+    implements Mapper<IntWritable, Writable, IntWritable, IntWritable> {
+    
     public void configure(JobConf job) {
     }
     
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector out, Reporter reporter) throws IOException {
+    public void map(IntWritable key, Writable value,
+                    OutputCollector<IntWritable, IntWritable> out,
+                    Reporter reporter) throws IOException {
       int num_values = 5;
       for(int i = 0; i < num_values; ++i) {
         int val = rng.nextInt(num_values);
-        int compositeKey = ((IntWritable)(key)).get() * 100 + val;
+        int compositeKey = key.get() * 100 + val;
         out.collect(new IntWritable(compositeKey), new IntWritable(val));
       }
     }
@@ -72,12 +75,16 @@ public class TestComparators extends TestCase
   /** 
    * Your basic identity mapper. 
    */
-  static class IdentityMapper implements Mapper {
+  static class IdentityMapper
+    implements Mapper<WritableComparable, Writable,
+                      WritableComparable, Writable> {
+    
     public void configure(JobConf job) {
     }
     
     public void map(WritableComparable key, Writable value,
-                    OutputCollector out, Reporter reporter) throws IOException {
+                    OutputCollector<WritableComparable, Writable> out,
+                    Reporter reporter) throws IOException {
       out.collect(key, value);
     }
     
@@ -88,14 +95,17 @@ public class TestComparators extends TestCase
   /** 
    * Checks whether keys are in ascending order.  
    */
-  static class AscendingKeysReducer implements Reducer {
+  static class AscendingKeysReducer
+    implements Reducer<IntWritable, Writable, IntWritable, Text> {
+    
     public void configure(JobConf job) {}
 
     // keep track of the last key we've seen
     private int lastKey = Integer.MIN_VALUE;
-    public void reduce(WritableComparable key, Iterator values, 
-        OutputCollector out, Reporter reporter) throws IOException {
-      int currentKey = ((IntWritable)(key)).get();
+    public void reduce(IntWritable key, Iterator<Writable> values, 
+                       OutputCollector<IntWritable, Text> out,
+                       Reporter reporter) throws IOException {
+      int currentKey = key.get();
       // keys should be in ascending order
       if (currentKey < lastKey) {
         fail("Keys not in sorted ascending order");
@@ -110,13 +120,15 @@ public class TestComparators extends TestCase
   /** 
    * Checks whether keys are in ascending order.  
    */
-  static class DescendingKeysReducer implements Reducer {
+  static class DescendingKeysReducer
+    implements Reducer<IntWritable, Writable, IntWritable, Text> {
     public void configure(JobConf job) {}
 
     // keep track of the last key we've seen
     private int lastKey = Integer.MAX_VALUE;
-    public void reduce(WritableComparable key, Iterator values, 
-        OutputCollector out, Reporter reporter) throws IOException {
+    public void reduce(IntWritable key, Iterator<Writable> values, 
+                       OutputCollector<IntWritable, Text> out,
+                       Reporter reporter) throws IOException {
       int currentKey = ((IntWritable)(key)).get();
       // keys should be in descending order
       if (currentKey > lastKey) {
@@ -134,19 +146,20 @@ public class TestComparators extends TestCase
    * should have 5 values if the grouping is correct). It also checks whether
    * the keys themselves are in ascending order.
    */
-  static class AscendingGroupReducer implements Reducer {
+  static class AscendingGroupReducer
+    implements Reducer<IntWritable, IntWritable, IntWritable, Text> {
     
     public void configure(JobConf job) {
     }
 
     // keep track of the last key we've seen
     private int lastKey = Integer.MIN_VALUE;
-    public void reduce(WritableComparable key,
-                       Iterator values,
-                       OutputCollector out,
+    public void reduce(IntWritable key,
+                       Iterator<IntWritable> values,
+                       OutputCollector<IntWritable, Text> out,
                        Reporter reporter) throws IOException {
       // check key order
-      int currentKey = ((IntWritable)(key)).get();
+      int currentKey = key.get();
       if (currentKey < lastKey) {
         fail("Keys not in sorted ascending order");
       }
@@ -155,7 +168,7 @@ public class TestComparators extends TestCase
       IntWritable previous = new IntWritable(Integer.MIN_VALUE);
       int valueCount = 0;
       while (values.hasNext()) {
-        IntWritable current = (IntWritable) values.next();
+        IntWritable current = values.next();
         
         // Check that the values are sorted
         if (current.compareTo(previous) < 0)
@@ -177,19 +190,20 @@ public class TestComparators extends TestCase
    * whether they are correctly grouped by key (i.e. each call to reduce
    * should have 5 values if the grouping is correct). 
    */
-  static class DescendingGroupReducer implements Reducer {
+  static class DescendingGroupReducer
+    implements Reducer<IntWritable, IntWritable, IntWritable, Text> {
     
     public void configure(JobConf job) {
     }
 
     // keep track of the last key we've seen
     private int lastKey = Integer.MAX_VALUE;
-    public void reduce(WritableComparable key,
-                       Iterator values,
-                       OutputCollector out,
+    public void reduce(IntWritable key,
+                       Iterator<IntWritable> values,
+                       OutputCollector<IntWritable, Text> out,
                        Reporter reporter) throws IOException {
       // check key order
-      int currentKey = ((IntWritable)(key)).get();
+      int currentKey = key.get();
       if (currentKey > lastKey) {
         fail("Keys not in sorted descending order");
       }
@@ -198,7 +212,7 @@ public class TestComparators extends TestCase
       IntWritable previous = new IntWritable(Integer.MAX_VALUE);
       int valueCount = 0;
       while (values.hasNext()) {
-        IntWritable current = (IntWritable) values.next();
+        IntWritable current = values.next();
         
         // Check that the values are sorted
         if (current.compareTo(previous) > 0)

+ 8 - 8
src/test/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java

@@ -81,7 +81,7 @@ public class TestKeyValueTextInputFormat extends TestCase {
       }
 
       // try splitting the file in a variety of sizes
-      TextInputFormat format = new KeyValueTextInputFormat();
+      KeyValueTextInputFormat format = new KeyValueTextInputFormat();
       format.configure(job);
       for (int i = 0; i < 3; i++) {
         int numSplits = random.nextInt(MAX_LENGTH/20)+1;
@@ -93,14 +93,14 @@ public class TestKeyValueTextInputFormat extends TestCase {
         BitSet bits = new BitSet(length);
         for (int j = 0; j < splits.length; j++) {
           LOG.debug("split["+j+"]= " + splits[j]);
-          RecordReader reader =
+          RecordReader<Text, Text> reader =
             format.getRecordReader(splits[j], job, reporter);
           Class readerClass = reader.getClass();
           assertEquals("reader class is KeyValueLineRecordReader.", KeyValueLineRecordReader.class, readerClass);        
 
-          Writable key = reader.createKey();
+          Text key = reader.createKey();
           Class keyClass = key.getClass();
-          Writable value = reader.createValue();
+          Text value = reader.createValue();
           Class valueClass = value.getClass();
           assertEquals("Key class is Text.", Text.class, keyClass);
           assertEquals("Value class is Text.", Text.class, valueClass);
@@ -187,14 +187,14 @@ public class TestKeyValueTextInputFormat extends TestCase {
   
   private static final Reporter voidReporter = Reporter.NULL;
   
-  private static List<Text> readSplit(InputFormat format, 
+  private static List<Text> readSplit(KeyValueTextInputFormat format, 
                                       InputSplit split, 
                                       JobConf job) throws IOException {
     List<Text> result = new ArrayList<Text>();
-    RecordReader reader = format.getRecordReader(split, job,
+    RecordReader<Text, Text> reader = format.getRecordReader(split, job,
                                                  voidReporter);
-    Text key = (Text) reader.createKey();
-    Text value = (Text) reader.createValue();
+    Text key = reader.createKey();
+    Text value = reader.createValue();
     while (reader.next(key, value)) {
       result.add(value);
       value = (Text) reader.createValue();

+ 11 - 10
src/test/org/apache/hadoop/mapred/TestMapOutputType.java

@@ -38,16 +38,16 @@ public class TestMapOutputType extends TestCase
    * type specified in conf will be anything but.
    */
    
-  static class TextGen implements Mapper {
+  static class TextGen
+    implements Mapper<WritableComparable, Writable, Text, Text> {
+    
     public void configure(JobConf job) {
     }
     
-    public void map(WritableComparable key, Writable val, OutputCollector out,
+    public void map(WritableComparable key, Writable val,
+                    OutputCollector<Text, Text> out,
                     Reporter reporter) throws IOException {
-      key = new Text("Hello");
-      val = new Text("World");
-      
-      out.collect(key, val);
+      out.collect(new Text("Hello"), new Text("World"));
     }
     
     public void close() {
@@ -57,14 +57,15 @@ public class TestMapOutputType extends TestCase
   /** A do-nothing reducer class. We won't get this far, really.
    *
    */
-  static class TextReduce implements Reducer {
+  static class TextReduce
+    implements Reducer<Text, Text, Text, Text> {
     
     public void configure(JobConf job) {
     }
 
-    public void reduce(WritableComparable key,
-                       Iterator values,
-                       OutputCollector out,
+    public void reduce(Text key,
+                       Iterator<Text> values,
+                       OutputCollector<Text, Text> out,
                        Reporter reporter) throws IOException {
       out.collect(new Text("Test"), new Text("Me"));
     }

+ 49 - 28
src/test/org/apache/hadoop/mapred/TestMapRed.java

@@ -83,13 +83,17 @@ public class TestMapRed extends TestCase {
    * of numbers in random order, but where each number appears
    * as many times as we were instructed.
    */
-  static class RandomGenMapper implements Mapper {
+  static class RandomGenMapper
+    implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
+    
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      int randomVal = ((IntWritable) key).get();
-      int randomCount = ((IntWritable) val).get();
+    public void map(IntWritable key, IntWritable val,
+                    OutputCollector<IntWritable, IntWritable> out,
+                    Reporter reporter) throws IOException {
+      int randomVal = key.get();
+      int randomCount = key.get();
 
       for (int i = 0; i < randomCount; i++) {
         out.collect(new IntWritable(Math.abs(r.nextInt())), new IntWritable(randomVal));
@@ -100,13 +104,17 @@ public class TestMapRed extends TestCase {
   }
   /**
    */
-  static class RandomGenReducer implements Reducer {
+  static class RandomGenReducer
+    implements Reducer<IntWritable, IntWritable, Text, Text> {
+    
     public void configure(JobConf job) {
     }
 
-    public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException {
+    public void reduce(IntWritable key, Iterator<IntWritable> it,
+                       OutputCollector<Text, Text> out,
+                       Reporter reporter) throws IOException {
       while (it.hasNext()) {
-        int val = ((IntWritable) it.next()).get();
+        int val = it.next().get();
         out.collect(new Text("" + val), new Text(""));
       }
     }
@@ -130,26 +138,31 @@ public class TestMapRed extends TestCase {
    * Each key here is a random number, and the count is the
    * number of times the number was emitted.
    */
-  static class RandomCheckMapper implements Mapper {
+  static class RandomCheckMapper
+    implements Mapper<WritableComparable, Text, IntWritable, IntWritable> {
+    
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      Text str = (Text) val;
-
-      out.collect(new IntWritable(Integer.parseInt(str.toString().trim())), new IntWritable(1));
+    public void map(WritableComparable key, Text val,
+                    OutputCollector<IntWritable, IntWritable> out,
+                    Reporter reporter) throws IOException {
+      out.collect(new IntWritable(Integer.parseInt(val.toString().trim())), new IntWritable(1));
     }
     public void close() {
     }
   }
   /**
    */
-  static class RandomCheckReducer implements Reducer {
+  static class RandomCheckReducer
+      implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
     public void configure(JobConf job) {
     }
         
-    public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((IntWritable) key).get();
+    public void reduce(IntWritable key, Iterator<IntWritable> it,
+                       OutputCollector<IntWritable, IntWritable> out,
+                       Reporter reporter) throws IOException {
+      int keyint = key.get();
       int count = 0;
       while (it.hasNext()) {
         it.next();
@@ -169,28 +182,35 @@ public class TestMapRed extends TestCase {
    * Thus, the map() function is just the identity function
    * and reduce() just sums.  Nothing to see here!
    */
-  static class MergeMapper implements Mapper {
+  static class MergeMapper
+    implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
+    
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((IntWritable) key).get();
-      int valint = ((IntWritable) val).get();
+    public void map(IntWritable key, IntWritable val,
+                    OutputCollector<IntWritable, IntWritable> out,
+                    Reporter reporter) throws IOException {
+      int keyint = key.get();
+      int valint = val.get();
 
       out.collect(new IntWritable(keyint), new IntWritable(valint));
     }
     public void close() {
     }
   }
-  static class MergeReducer implements Reducer {
+  static class MergeReducer
+    implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
     public void configure(JobConf job) {
     }
         
-    public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((IntWritable) key).get();
+    public void reduce(IntWritable key, Iterator<IntWritable> it,
+                       OutputCollector<IntWritable, IntWritable> out,
+                       Reporter reporter) throws IOException {
+      int keyint = key.get();
       int total = 0;
       while (it.hasNext()) {
-        total += ((IntWritable) it.next()).get();
+        total += it.next().get();
       }
       out.collect(new IntWritable(keyint), new IntWritable(total));
     }
@@ -214,15 +234,16 @@ public class TestMapRed extends TestCase {
     launch();
   }
 
-  private static class MyMap implements Mapper {
+  private static class MyMap
+    implements Mapper<WritableComparable, Text, Text, Text> {
       
     public void configure(JobConf conf) {
     }
       
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter
-                    ) throws IOException {
-      String str = ((Text) value).toString().toLowerCase();
+    public void map(WritableComparable key, Text value,
+                    OutputCollector<Text, Text> output,
+                    Reporter reporter) throws IOException {
+      String str = value.toString().toLowerCase();
       output.collect(new Text(str), value);
     }
 

+ 25 - 16
src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java

@@ -111,7 +111,9 @@ public class TestMiniMRLocalFS extends TestCase {
     
   }
   
-  private static class MyInputFormat implements InputFormat {
+  private static class MyInputFormat
+    implements InputFormat<IntWritable, Text> {
+    
     static final String[] data = new String[]{
       "crocodile pants", 
       "aunt annie", 
@@ -151,7 +153,7 @@ public class TestMiniMRLocalFS extends TestCase {
       }
     }
 
-    static class MyRecordReader implements RecordReader {
+    static class MyRecordReader implements RecordReader<IntWritable, Text> {
       int index;
       int past;
       int length;
@@ -162,21 +164,21 @@ public class TestMiniMRLocalFS extends TestCase {
         this.length = length;
       }
 
-      public boolean next(Writable key, Writable value) throws IOException {
+      public boolean next(IntWritable key, Text value) throws IOException {
         if (index < past) {
-          ((IntWritable) key).set(index);
-          ((Text) value).set(data[index]);
+          key.set(index);
+          value.set(data[index]);
           index += 1;
           return true;
         }
         return false;
       }
       
-      public WritableComparable createKey() {
+      public IntWritable createKey() {
         return new IntWritable();
       }
       
-      public Writable createValue() {
+      public Text createValue() {
         return new Text();
       }
 
@@ -200,18 +202,23 @@ public class TestMiniMRLocalFS extends TestCase {
                            new MySplit(4, 2)};
     }
 
-    public RecordReader getRecordReader(InputSplit split,
-                                        JobConf job, 
-                                        Reporter reporter) throws IOException {
+    public RecordReader<IntWritable, Text> getRecordReader(InputSplit split,
+                                                           JobConf job, 
+                                                           Reporter reporter)
+                                                           throws IOException {
       MySplit sp = (MySplit) split;
       return new MyRecordReader(sp.first, sp.length);
     }
     
   }
   
-  static class MyMapper extends MapReduceBase implements Mapper {
+  static class MyMapper extends MapReduceBase
+    implements Mapper<WritableComparable, Writable,
+                      WritableComparable, Writable> {
+    
     public void map(WritableComparable key, Writable value, 
-                    OutputCollector out, Reporter reporter) throws IOException {
+                    OutputCollector<WritableComparable, Writable> out,
+                    Reporter reporter) throws IOException {
       System.out.println("map: " + key + ", " + value);
       out.collect((WritableComparable) value, key);
       InputSplit split = reporter.getInputSplit();
@@ -222,10 +229,12 @@ public class TestMiniMRLocalFS extends TestCase {
     }
   }
 
-  static class MyReducer extends MapReduceBase implements Reducer {
-    public void reduce(WritableComparable key, Iterator values, 
-                       OutputCollector output, Reporter reporter
-                       ) throws IOException {
+  static class MyReducer extends MapReduceBase
+    implements Reducer<WritableComparable, Writable,
+                      WritableComparable, Writable> {
+    public void reduce(WritableComparable key, Iterator<Writable> values, 
+                       OutputCollector<WritableComparable, Writable> output,
+                       Reporter reporter) throws IOException {
       try {
         InputSplit split = reporter.getInputSplit();
         throw new IOException("Got an input split of " + split);

+ 5 - 4
src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java

@@ -71,7 +71,8 @@ public class TestSequenceFileAsTextInputFormat extends TestCase {
       }
 
       // try splitting the file in a variety of sizes
-      InputFormat format = new SequenceFileAsTextInputFormat();
+      InputFormat<Text, Text> format =
+        new SequenceFileAsTextInputFormat();
       
       for (int i = 0; i < 3; i++) {
         int numSplits =
@@ -83,12 +84,12 @@ public class TestSequenceFileAsTextInputFormat extends TestCase {
         // check each split
         BitSet bits = new BitSet(length);
         for (int j = 0; j < splits.length; j++) {
-          RecordReader reader =
+          RecordReader<Text, Text> reader =
             format.getRecordReader(splits[j], job, reporter);
           Class readerClass = reader.getClass();
           assertEquals("reader class is SequenceFileAsTextRecordReader.", SequenceFileAsTextRecordReader.class, readerClass);        
-          Text value = (Text)reader.createValue();
-          Text key = (Text)reader.createKey();
+          Text value = reader.createValue();
+          Text key = reader.createKey();
           try {
             int count = 0;
             while (reader.next(key, value)) {

+ 3 - 2
src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java

@@ -70,7 +70,8 @@ public class TestSequenceFileInputFilter extends TestCase {
 
 
   private int countRecords(int numSplits) throws IOException {
-    InputFormat format = new SequenceFileInputFilter();
+    InputFormat<Text, BytesWritable> format =
+      new SequenceFileInputFilter<Text, BytesWritable>();
     Text key = new Text();
     BytesWritable value = new BytesWritable();
     if (numSplits==0) {
@@ -83,7 +84,7 @@ public class TestSequenceFileInputFilter extends TestCase {
     int count = 0;
     LOG.info("Generated " + splits.length + " splits.");
     for (int j = 0; j < splits.length; j++) {
-      RecordReader reader =
+      RecordReader<Text, BytesWritable> reader =
         format.getRecordReader(splits[j], job, reporter);
       try {
         while (reader.next(key, value)) {

+ 3 - 2
src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java

@@ -73,7 +73,8 @@ public class TestSequenceFileInputFormat extends TestCase {
       }
 
       // try splitting the file in a variety of sizes
-      InputFormat format = new SequenceFileInputFormat();
+      InputFormat<IntWritable, BytesWritable> format =
+        new SequenceFileInputFormat<IntWritable, BytesWritable>();
       IntWritable key = new IntWritable();
       BytesWritable value = new BytesWritable();
       for (int i = 0; i < 3; i++) {
@@ -86,7 +87,7 @@ public class TestSequenceFileInputFormat extends TestCase {
         // check each split
         BitSet bits = new BitSet(length);
         for (int j = 0; j < splits.length; j++) {
-          RecordReader reader =
+          RecordReader<IntWritable, BytesWritable> reader =
             format.getRecordReader(splits[j], job, reporter);
           try {
             int count = 0;

+ 6 - 6
src/test/org/apache/hadoop/mapred/TestTextInputFormat.java

@@ -99,7 +99,7 @@ public class TestTextInputFormat extends TestCase {
         BitSet bits = new BitSet(length);
         for (int j = 0; j < splits.length; j++) {
           LOG.debug("split["+j+"]= " + splits[j]);
-          RecordReader reader =
+          RecordReader<LongWritable, Text> reader =
             format.getRecordReader(splits[j], job, reporter);
           try {
             int count = 0;
@@ -184,14 +184,14 @@ public class TestTextInputFormat extends TestCase {
   
   private static final Reporter voidReporter = Reporter.NULL;
   
-  private static List<Text> readSplit(InputFormat format, 
+  private static List<Text> readSplit(TextInputFormat format, 
                                       InputSplit split, 
                                       JobConf job) throws IOException {
     List<Text> result = new ArrayList<Text>();
-    RecordReader reader = format.getRecordReader(split, job,
-                                                 voidReporter);
-    LongWritable key = (LongWritable) reader.createKey();
-    Text value = (Text) reader.createValue();
+    RecordReader<LongWritable, Text> reader =
+      format.getRecordReader(split, job, voidReporter);
+    LongWritable key = reader.createKey();
+    Text value = reader.createValue();
     while (reader.next(key, value)) {
       result.add(value);
       value = (Text) reader.createValue();

+ 10 - 9
src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java

@@ -54,9 +54,10 @@ public class TestTextOutputFormat extends TestCase {
     // A reporter that does nothing
     Reporter reporter = Reporter.NULL;
 
-    TextOutputFormat theOutputFormat = new TextOutputFormat();
-    RecordWriter theRecodWriter = theOutputFormat.getRecordWriter(localFs, job,
-                                                                  file, reporter);
+    TextOutputFormat<Text, Text> theOutputFormat =
+      new TextOutputFormat<Text, Text>();
+    RecordWriter<Text, Text> theRecordWriter =
+      theOutputFormat.getRecordWriter(localFs, job, file, reporter);
 
     Text key1 = new Text("key1");
     Text key2 = new Text("key2");
@@ -64,14 +65,14 @@ public class TestTextOutputFormat extends TestCase {
     Text val2 = new Text("val2");
 
     try {
-      theRecodWriter.write(key1, val1);
-      theRecodWriter.write(null, val1);
-      theRecodWriter.write(key1, null);
-      theRecodWriter.write(null, null);
-      theRecodWriter.write(key2, val2);
+      theRecordWriter.write(key1, val1);
+      theRecordWriter.write(null, val1);
+      theRecordWriter.write(key1, null);
+      theRecordWriter.write(null, null);
+      theRecordWriter.write(key2, val2);
 
     } finally {
-      theRecodWriter.close(reporter);
+      theRecordWriter.close(reporter);
     }
     File expectedFile = new File(new Path(workDir, file).toString()); 
     StringBuffer expectedOutput = new StringBuffer();

+ 9 - 6
src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java

@@ -82,15 +82,18 @@ public class TestJobControl extends junit.framework.TestCase {
     out.close();
   }
 
-  public static class DataCopy extends MapReduceBase implements Mapper,
-                                                                Reducer {
-    public void map(WritableComparable key, Writable value,
-                    OutputCollector output, Reporter reporter) throws IOException {
+  public static class DataCopy extends MapReduceBase
+    implements Mapper<WritableComparable, Text, Text, Text>,
+               Reducer<Text, Text, Text, Text> {
+    public void map(WritableComparable key, Text value,
+                    OutputCollector<Text, Text> output,
+                    Reporter reporter) throws IOException {
       output.collect(new Text(key.toString()), value);
     }
 
-    public void reduce(WritableComparable key, Iterator values,
-                       OutputCollector output, Reporter reporter) throws IOException {
+    public void reduce(Text key, Iterator<Text> values,
+                       OutputCollector<Text, Text> output,
+                       Reporter reporter) throws IOException {
       Text dumbKey = new Text("");
       while (values.hasNext()) {
         Text data = (Text) values.next();

+ 3 - 3
src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java

@@ -24,13 +24,13 @@ import java.util.Map.Entry;
 
 public class AggregatorTests extends ValueAggregatorBaseDescriptor {
   
-  public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
-    ArrayList<Entry> retv = new ArrayList<Entry>();
+  public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key, Object val) {
+    ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
     String [] words = val.toString().split(" ");
     
     String countType;
     String id;
-    Entry e;
+    Entry<Text, Text> e;
     
     for (String word: words) {
       long numVal = Long.parseLong(word);

+ 10 - 7
src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java

@@ -30,7 +30,9 @@ import org.apache.hadoop.mapred.*;
  * It defines an InputFormat with InputSplits that are just strings. The
  * RecordReaders are not implemented in Java, naturally...
  */
-public class WordCountInputFormat implements InputFormat {
+public class WordCountInputFormat
+  implements InputFormat<IntWritable, Text> {
+  
   static class WordCountInputSplit implements InputSplit  {
     private String filename;
     WordCountInputSplit() { }
@@ -59,16 +61,17 @@ public class WordCountInputFormat implements InputFormat {
     return result.toArray(new InputSplit[result.size()]);
   }
   public void validateInput(JobConf conf) { }
-  public RecordReader getRecordReader(InputSplit split, JobConf conf, 
-                                      Reporter reporter) {
-    return new RecordReader(){
-      public boolean next(Writable key, Writable value) throws IOException {
+  public RecordReader<IntWritable, Text> getRecordReader(InputSplit split,
+                                                         JobConf conf, 
+                                                         Reporter reporter) {
+    return new RecordReader<IntWritable, Text>(){
+      public boolean next(IntWritable key, Text value) throws IOException {
         return false;
       }
-      public WritableComparable createKey() {
+      public IntWritable createKey() {
         return new IntWritable();
       }
-      public Writable createValue() {
+      public Text createValue() {
         return new Text();
       }
       public long getPos() {

+ 47 - 27
src/test/org/apache/hadoop/record/TestRecordMR.java

@@ -86,14 +86,18 @@ public class TestRecordMR extends TestCase {
    * of numbers in random order, but where each number appears
    * as many times as we were instructed.
    */
-  static public class RandomGenMapper implements Mapper {
+  static public class RandomGenMapper implements Mapper<RecInt, RecInt,
+                                                        RecInt, RecString> {
     Random r = new Random();
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      int randomVal = ((RecInt) key).getData();
-      int randomCount = ((RecInt) val).getData();
+    public void map(RecInt key,
+                    RecInt val,
+                    OutputCollector<RecInt, RecString> out,
+                    Reporter reporter) throws IOException {
+      int randomVal = key.getData();
+      int randomCount = val.getData();
 
       for (int i = 0; i < randomCount; i++) {
         out.collect(new RecInt(Math.abs(r.nextInt())),
@@ -105,18 +109,18 @@ public class TestRecordMR extends TestCase {
   }
   /**
    */
-  static public class RandomGenReducer implements Reducer {
+  static public class RandomGenReducer implements Reducer<RecInt, RecString,
+                                                          RecInt, RecString> {
     public void configure(JobConf job) {
     }
 
-    public void reduce(WritableComparable key,
-                       Iterator it,
-                       OutputCollector out,
-                       Reporter reporter)
-      throws IOException {
-      int keyint = ((RecInt) key).getData();
+    public void reduce(RecInt key,
+                       Iterator<RecString> it,
+                       OutputCollector<RecInt, RecString> out,
+                       Reporter reporter) throws IOException {
+      int keyint = key.getData();
       while (it.hasNext()) {
-        String val = ((RecString) it.next()).getData();
+        String val = it.next().getData();
         out.collect(new RecInt(Integer.parseInt(val)),
                     new RecString(""));
       }
@@ -141,13 +145,17 @@ public class TestRecordMR extends TestCase {
    * Each key here is a random number, and the count is the
    * number of times the number was emitted.
    */
-  static public class RandomCheckMapper implements Mapper {
+  static public class RandomCheckMapper implements Mapper<RecInt, RecString,
+                                                          RecInt, RecString> {
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      int pos = ((RecInt) key).getData();
-      String str = ((RecString) val).getData();
+    public void map(RecInt key,
+                    RecString val,
+                    OutputCollector<RecInt, RecString> out,
+                    Reporter reporter) throws IOException {
+      int pos = key.getData();
+      String str = val.getData();
       out.collect(new RecInt(pos), new RecString("1"));
     }
     public void close() {
@@ -155,12 +163,16 @@ public class TestRecordMR extends TestCase {
   }
   /**
    */
-  static public class RandomCheckReducer implements Reducer {
+  static public class RandomCheckReducer implements Reducer<RecInt, RecString,
+                                                            RecInt, RecString> {
     public void configure(JobConf job) {
     }
         
-    public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((RecInt) key).getData();
+    public void reduce(RecInt key,
+                       Iterator<RecString> it,
+                       OutputCollector<RecInt, RecString> out,
+                       Reporter reporter) throws IOException {
+      int keyint = key.getData();
       int count = 0;
       while (it.hasNext()) {
         it.next();
@@ -180,27 +192,35 @@ public class TestRecordMR extends TestCase {
    * Thus, the map() function is just the identity function
    * and reduce() just sums.  Nothing to see here!
    */
-  static public class MergeMapper implements Mapper {
+  static public class MergeMapper implements Mapper<RecInt, RecString,
+                                                    RecInt, RecInt> {
     public void configure(JobConf job) {
     }
 
-    public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((RecInt) key).getData();
-      String valstr = ((RecString) val).getData();
+    public void map(RecInt key,
+                    RecString val,
+                    OutputCollector<RecInt, RecInt> out,
+                    Reporter reporter) throws IOException {
+      int keyint = key.getData();
+      String valstr = val.getData();
       out.collect(new RecInt(keyint), new RecInt(Integer.parseInt(valstr)));
     }
     public void close() {
     }
   }
-  static public class MergeReducer implements Reducer {
+  static public class MergeReducer implements Reducer<RecInt, RecInt,
+                                                      RecInt, RecInt> {
     public void configure(JobConf job) {
     }
         
-    public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException {
-      int keyint = ((RecInt) key).getData();
+    public void reduce(RecInt key,
+                       Iterator<RecInt> it,
+                       OutputCollector<RecInt, RecInt> out,
+                       Reporter reporter) throws IOException {
+      int keyint = key.getData();
       int total = 0;
       while (it.hasNext()) {
-        total += ((RecInt) it.next()).getData();
+        total += it.next().getData();
       }
       out.collect(new RecInt(keyint), new RecInt(total));
     }

+ 3 - 2
src/test/org/apache/hadoop/record/TestRecordWritable.java

@@ -77,7 +77,8 @@ public class TestRecordWritable extends TestCase {
       }
 
       // try splitting the file in a variety of sizes
-      InputFormat format = new SequenceFileInputFormat();
+      InputFormat<RecInt, RecBuffer> format =
+        new SequenceFileInputFormat<RecInt, RecBuffer>();
       RecInt key = new RecInt();
       RecBuffer value = new RecBuffer();
       for (int i = 0; i < 3; i++) {
@@ -88,7 +89,7 @@ public class TestRecordWritable extends TestCase {
         // check each split
         BitSet bits = new BitSet(length);
         for (int j = 0; j < splits.length; j++) {
-          RecordReader reader =
+          RecordReader<RecInt, RecBuffer> reader =
             format.getRecordReader(splits[j], job, Reporter.NULL);
           try {
             int count = 0;

+ 5 - 2
src/test/testjar/ClassWordCount.java

@@ -23,6 +23,7 @@ import java.util.*;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
@@ -46,12 +47,14 @@ public class ClassWordCount {
    * For each line of input, break the line into words and emit them as
    * (<b>word</b>, <b>1</b>).
    */
-  public static class MapClass extends WordCount.MapClass implements Mapper {
+  public static class MapClass extends WordCount.MapClass
+    implements Mapper<LongWritable, Text, Text, IntWritable> {
   }
   
   /**
    * A reducer class that just emits the sum of the input values.
    */
-  public static class Reduce extends WordCount.Reduce implements Reducer {
+  public static class Reduce extends WordCount.Reduce
+    implements Reducer<Text, IntWritable, Text, IntWritable> {
   }
 }

+ 9 - 4
src/test/testjar/ExternalMapperReducer.java

@@ -32,7 +32,10 @@ import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 
 public class ExternalMapperReducer
-  implements Mapper, Reducer {
+  implements Mapper<WritableComparable, Writable,
+                    ExternalWritable, IntWritable>,
+             Reducer<WritableComparable, Writable,
+                     WritableComparable, IntWritable> {
 
   public void configure(JobConf job) {
 
@@ -44,7 +47,8 @@ public class ExternalMapperReducer
   }
 
   public void map(WritableComparable key, Writable value,
-                  OutputCollector output, Reporter reporter)
+                  OutputCollector<ExternalWritable, IntWritable> output,
+                  Reporter reporter)
     throws IOException {
     
     if (value instanceof Text) {
@@ -54,8 +58,9 @@ public class ExternalMapperReducer
     }
   }
 
-  public void reduce(WritableComparable key, Iterator values,
-                     OutputCollector output, Reporter reporter)
+  public void reduce(WritableComparable key, Iterator<Writable> values,
+                     OutputCollector<WritableComparable, IntWritable> output,
+                     Reporter reporter)
     throws IOException {
     
     int count = 0;