Forráskód Böngészése

MAPREDUCE-1734. Un-deprecate the old MapReduce API in the 0.20 branch. Contributed by Todd Lipcon

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20@1055670 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 14 éve
szülő
commit
b687ca56f8
33 módosított fájl, 3 hozzáadás és 78 törlés
  1. 3 0
      CHANGES.txt
  2. 0 3
      src/mapred/org/apache/hadoop/mapred/Counters.java
  3. 0 3
      src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
  4. 0 3
      src/mapred/org/apache/hadoop/mapred/FileSplit.java
  5. 0 1
      src/mapred/org/apache/hadoop/mapred/ID.java
  6. 0 2
      src/mapred/org/apache/hadoop/mapred/InputFormat.java
  7. 0 2
      src/mapred/org/apache/hadoop/mapred/InputSplit.java
  8. 0 2
      src/mapred/org/apache/hadoop/mapred/JobConf.java
  9. 0 1
      src/mapred/org/apache/hadoop/mapred/JobConfigurable.java
  10. 0 4
      src/mapred/org/apache/hadoop/mapred/JobContext.java
  11. 0 1
      src/mapred/org/apache/hadoop/mapred/JobID.java
  12. 0 3
      src/mapred/org/apache/hadoop/mapred/LineRecordReader.java
  13. 0 1
      src/mapred/org/apache/hadoop/mapred/MapReduceBase.java
  14. 0 2
      src/mapred/org/apache/hadoop/mapred/MapRunnable.java
  15. 0 2
      src/mapred/org/apache/hadoop/mapred/Mapper.java
  16. 0 2
      src/mapred/org/apache/hadoop/mapred/OutputCommitter.java
  17. 0 2
      src/mapred/org/apache/hadoop/mapred/OutputFormat.java
  18. 0 2
      src/mapred/org/apache/hadoop/mapred/Partitioner.java
  19. 0 2
      src/mapred/org/apache/hadoop/mapred/Reducer.java
  20. 0 4
      src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java
  21. 0 4
      src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
  22. 0 5
      src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java
  23. 0 1
      src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java
  24. 0 1
      src/mapred/org/apache/hadoop/mapred/TaskID.java
  25. 0 3
      src/mapred/org/apache/hadoop/mapred/TextInputFormat.java
  26. 0 3
      src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java
  27. 0 3
      src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java
  28. 0 2
      src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java
  29. 0 2
      src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java
  30. 0 3
      src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java
  31. 0 3
      src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java
  32. 0 3
      src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java
  33. 0 3
      src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java

+ 3 - 0
CHANGES.txt

@@ -89,6 +89,9 @@ Release 0.20.3 - 2011-1-5
 
     MAPREDUCE-1832. Allow file sizes less than 1MB in DFSIO benchmark. (shv)
 
+    MAPREDUCE-1734. Un-deprecate the old MapReduce API in the 0.20 branch.
+    (todd)
+
 Release 0.20.2 - 2010-2-19
 
   NEW FEATURES

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/Counters.java

@@ -47,9 +47,7 @@ import org.apache.hadoop.util.StringUtils;
  * 
  * <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
  * counters from a particular <code>Enum</code> class. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Counters} instead.
  */
-@Deprecated
 public class Counters implements Writable, Iterable<Counters.Group> {
   private static final Log LOG = LogFactory.getLog(Counters.class);
   private static final char GROUP_OPEN = '{';
@@ -110,7 +108,6 @@ public class Counters implements Writable, Iterable<Counters.Group> {
     }
     
     // Checks for (content) equality of two (basic) counters
-    @Deprecated
     synchronized boolean contentEquals(Counter c) {
       return this.equals(c);
     }

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/FileInputFormat.java

@@ -51,10 +51,7 @@ import org.apache.hadoop.util.StringUtils;
  * Subclasses of <code>FileInputFormat</code> can also override the 
  * {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
  * not split-up and are processed as a whole by {@link Mapper}s.
- * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
- *  instead.
  */
-@Deprecated
 public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
 
   public static final Log LOG =

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/FileSplit.java

@@ -28,10 +28,7 @@ import org.apache.hadoop.fs.Path;
 /** A section of an input file.  Returned by {@link
  * InputFormat#getSplits(JobConf, int)} and passed to
  * {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}
- *  instead.
  */
-@Deprecated
 public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit 
                        implements InputSplit {
   private Path file;

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/ID.java

@@ -27,7 +27,6 @@ package org.apache.hadoop.mapred;
  * @see TaskID
  * @see TaskAttemptID
  */
-@Deprecated
 public abstract class ID extends org.apache.hadoop.mapreduce.ID {
 
   /** constructs an ID object from the given int */

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/InputFormat.java

@@ -61,9 +61,7 @@ import org.apache.hadoop.fs.FileSystem;
  * @see RecordReader
  * @see JobClient
  * @see FileInputFormat
- * @deprecated Use {@link org.apache.hadoop.mapreduce.InputFormat} instead.
  */
-@Deprecated
 public interface InputFormat<K, V> {
 
   /** 

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/InputSplit.java

@@ -31,9 +31,7 @@ import org.apache.hadoop.io.Writable;
  * 
  * @see InputFormat
  * @see RecordReader
- * @deprecated Use {@link org.apache.hadoop.mapreduce.InputSplit} instead.
  */
-@Deprecated
 public interface InputSplit extends Writable {
 
   /**

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/JobConf.java

@@ -99,9 +99,7 @@ import org.apache.hadoop.util.Tool;
  * @see ClusterStatus
  * @see Tool
  * @see DistributedCache
- * @deprecated Use {@link Configuration} instead
  */
-@Deprecated
 public class JobConf extends Configuration {
   
   private static final Log LOG = LogFactory.getLog(JobConf.class);

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/JobConfigurable.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 /** That what may be configured. */
-@Deprecated
 public interface JobConfigurable {
   /** Initializes a new instance from a {@link JobConf}.
    *

+ 0 - 4
src/mapred/org/apache/hadoop/mapred/JobContext.java

@@ -19,10 +19,6 @@ package org.apache.hadoop.mapred;
 
 import org.apache.hadoop.util.Progressable;
 
-/**
- * @deprecated Use {@link org.apache.hadoop.mapreduce.JobContext} instead.
- */
-@Deprecated
 public class JobContext extends org.apache.hadoop.mapreduce.JobContext {
   private JobConf job;
   private Progressable progress;

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/JobID.java

@@ -38,7 +38,6 @@ import java.io.IOException;
  * @see TaskID
  * @see TaskAttemptID
  */
-@Deprecated
 public class JobID extends org.apache.hadoop.mapreduce.JobID {
   /**
    * Constructs a JobID object 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/LineRecordReader.java

@@ -34,10 +34,7 @@ import org.apache.commons.logging.Log;
 
 /**
  * Treats keys as offset in file and value as line. 
- * @deprecated Use 
- *   {@link org.apache.hadoop.mapreduce.lib.input.LineRecordReader} instead.
  */
-@Deprecated
 public class LineRecordReader implements RecordReader<LongWritable, Text> {
   private static final Log LOG
     = LogFactory.getLog(LineRecordReader.class.getName());

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/MapReduceBase.java

@@ -29,7 +29,6 @@ import org.apache.hadoop.mapred.JobConfigurable;
  * <p>Provides default no-op implementations for a few methods, most non-trivial
  * applications need to override some of them.</p>
  */
-@Deprecated
 public class MapReduceBase implements Closeable, JobConfigurable {
 
   /** Default implementation that does nothing. */

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/MapRunnable.java

@@ -27,9 +27,7 @@ import java.io.IOException;
  * control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
  * 
  * @see Mapper
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.
  */
-@Deprecated
 public interface MapRunnable<K1, V1, K2, V2>
     extends JobConfigurable {
   

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/Mapper.java

@@ -127,9 +127,7 @@ import org.apache.hadoop.io.compress.CompressionCodec;
  * @see MapReduceBase
  * @see MapRunnable
  * @see SequenceFile
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.
  */
-@Deprecated
 public interface Mapper<K1, V1, K2, V2> extends JobConfigurable, Closeable {
   
   /** 

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/OutputCommitter.java

@@ -53,9 +53,7 @@ import java.io.IOException;
  * @see FileOutputCommitter 
  * @see JobContext
  * @see TaskAttemptContext 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.OutputCommitter} instead.
  */
-@Deprecated
 public abstract class OutputCommitter 
                 extends org.apache.hadoop.mapreduce.OutputCommitter {
   /**

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/OutputFormat.java

@@ -42,9 +42,7 @@ import org.apache.hadoop.util.Progressable;
  * 
  * @see RecordWriter
  * @see JobConf
- * @deprecated Use {@link org.apache.hadoop.mapreduce.OutputFormat} instead.
  */
-@Deprecated
 public interface OutputFormat<K, V> {
 
   /** 

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/Partitioner.java

@@ -29,9 +29,7 @@ package org.apache.hadoop.mapred;
  * record) is sent for reduction.</p>
  * 
  * @see Reducer
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Partitioner} instead.
  */
-@Deprecated
 public interface Partitioner<K2, V2> extends JobConfigurable {
   
   /** 

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/Reducer.java

@@ -160,9 +160,7 @@ import org.apache.hadoop.io.Closeable;
  * @see Partitioner
  * @see Reporter
  * @see MapReduceBase
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead.
  */
-@Deprecated
 public interface Reducer<K2, V2, K3, V3> extends JobConfigurable, Closeable {
   
   /** 

+ 0 - 4
src/mapred/org/apache/hadoop/mapred/SequenceFileInputFormat.java

@@ -28,11 +28,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.MapFile;
 
 /** An {@link InputFormat} for {@link SequenceFile}s. 
- * @deprecated Use 
- *  {@link org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat} 
- *  instead.
  */
-@Deprecated
 public class SequenceFileInputFormat<K, V> extends FileInputFormat<K, V> {
 
   public SequenceFileInputFormat() {

+ 0 - 4
src/mapred/org/apache/hadoop/mapred/SequenceFileOutputFormat.java

@@ -33,11 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.*;
 
 /** An {@link OutputFormat} that writes {@link SequenceFile}s. 
- * @deprecated Use 
- *   {@link org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat} 
- *   instead.
  */
-@Deprecated
 public class SequenceFileOutputFormat <K,V> extends FileOutputFormat<K, V> {
 
   public RecordWriter<K, V> getRecordWriter(

+ 0 - 5
src/mapred/org/apache/hadoop/mapred/TaskAttemptContext.java

@@ -19,11 +19,6 @@ package org.apache.hadoop.mapred;
 
 import org.apache.hadoop.util.Progressable;
 
-/**
- * @deprecated Use {@link org.apache.hadoop.mapreduce.TaskAttemptContext}
- *   instead.
- */
-@Deprecated
 public class TaskAttemptContext 
        extends org.apache.hadoop.mapreduce.TaskAttemptContext {
   private Progressable progress;

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/TaskAttemptID.java

@@ -41,7 +41,6 @@ import java.io.IOException;
  * @see JobID
  * @see TaskID
  */
-@Deprecated
 public class TaskAttemptID extends org.apache.hadoop.mapreduce.TaskAttemptID {
   
   /**

+ 0 - 1
src/mapred/org/apache/hadoop/mapred/TaskID.java

@@ -43,7 +43,6 @@ import java.io.IOException;
  * @see JobID
  * @see TaskAttemptID
  */
-@Deprecated
 public class TaskID extends org.apache.hadoop.mapreduce.TaskID {
 
   /**

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/TextInputFormat.java

@@ -28,10 +28,7 @@ import org.apache.hadoop.io.compress.*;
 /** An {@link InputFormat} for plain text files.  Files are broken into lines.
  * Either linefeed or carriage-return are used to signal end of line.  Keys are
  * the position in the file, and values are the line of text.. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.TextInputFormat}
- *  instead.
  */
-@Deprecated
 public class TextInputFormat extends FileInputFormat<LongWritable, Text>
   implements JobConfigurable {
 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/TextOutputFormat.java

@@ -33,10 +33,7 @@ import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.util.*;
 
 /** An {@link OutputFormat} that writes plain text files. 
- * @deprecated Use 
- *   {@link org.apache.hadoop.mapreduce.lib.output.TextOutputFormat} instead.
  */
-@Deprecated
 public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {
 
   protected static class LineRecordWriter<K, V>

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/lib/HashPartitioner.java

@@ -22,10 +22,7 @@ import org.apache.hadoop.mapred.Partitioner;
 import org.apache.hadoop.mapred.JobConf;
 
 /** Partition keys by their {@link Object#hashCode()}. 
- * @deprecated Use 
- *   {@link org.apache.hadoop.mapreduce.lib.partition.HashPartitioner} instead.
  */
-@Deprecated
 public class HashPartitioner<K2, V2> implements Partitioner<K2, V2> {
 
   public void configure(JobConf job) {}

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/lib/IdentityMapper.java

@@ -26,9 +26,7 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.MapReduceBase;
 
 /** Implements the identity function, mapping inputs directly to outputs. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.
  */
-@Deprecated
 public class IdentityMapper<K, V>
     extends MapReduceBase implements Mapper<K, V, K, V> {
 

+ 0 - 2
src/mapred/org/apache/hadoop/mapred/lib/IdentityReducer.java

@@ -28,9 +28,7 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.MapReduceBase;
 
 /** Performs no reduction, writing all input values directly to the output. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead.
  */
-@Deprecated
 public class IdentityReducer<K, V>
     extends MapReduceBase implements Reducer<K, V, K, V> {
 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/lib/InverseMapper.java

@@ -26,10 +26,7 @@ import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reporter;
 
 /** A {@link Mapper} that swaps keys and values. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.map.InverseMapper} 
- *   instead.
  */
-@Deprecated
 public class InverseMapper<K, V>
     extends MapReduceBase implements Mapper<K, V, V, K> {
 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/lib/LongSumReducer.java

@@ -29,10 +29,7 @@ import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.io.LongWritable;
 
 /** A {@link Reducer} that sums long values. 
- * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer}
- *    instead.
  */
-@Deprecated
 public class LongSumReducer<K> extends MapReduceBase
     implements Reducer<K, LongWritable, K, LongWritable> {
 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/lib/NullOutputFormat.java

@@ -27,10 +27,7 @@ import org.apache.hadoop.util.Progressable;
 
 /**
  * Consume all outputs and put them in /dev/null. 
- * @deprecated Use 
- *   {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat} instead.
  */
-@Deprecated
 public class NullOutputFormat<K, V> implements OutputFormat<K, V> {
   
   public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job, 

+ 0 - 3
src/mapred/org/apache/hadoop/mapred/lib/TokenCountMapper.java

@@ -31,10 +31,7 @@ import org.apache.hadoop.mapred.Reporter;
 
 /** A {@link Mapper} that maps text values into <token,freq> pairs.  Uses
  * {@link StringTokenizer} to break text into tokens. 
- * @deprecated Use 
- *    {@link org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper} instead.
  */
-@Deprecated
 public class TokenCountMapper<K> extends MapReduceBase
     implements Mapper<K, Text, Text, LongWritable> {