浏览代码

HADOOP-3667. Remove the following deprecated methods from JobConf:
addInputPath(Path)
getInputPaths()
getMapOutputCompressionType()
getOutputPath()
getSystemDir()
setInputPath(Path)
setMapOutputCompressionType(CompressionType style)
setOutputPath(Path)
Contributed by Amareshwari Sriramadasu.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@672808 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 17 年之前
父节点
当前提交
bd9095c7a4

+ 11 - 0
CHANGES.txt

@@ -8,6 +8,17 @@ Trunk (unreleased changes)
     functionality, which was necessary to providing backwards
     compatible combiner semantics for 0.18. (cdouglas via omalley)
 
+    HADOOP-3667. Remove the following deprecated methods from JobConf:
+      addInputPath(Path)
+      getInputPaths()
+      getMapOutputCompressionType()
+      getOutputPath()
+      getSystemDir()
+      setInputPath(Path)
+      setMapOutputCompressionType(CompressionType style)
+      setOutputPath(Path)
+    (Amareshwari Sriramadasu via omalley)
+
   NEW FEATURES
 
     HADOOP-3341. Allow streaming jobs to specify the field separator for map

+ 1 - 4
src/core/org/apache/hadoop/io/SequenceFile.java

@@ -221,8 +221,7 @@ public class SequenceFile {
    * Get the compression type for the reduce outputs
    * @param job the job config to look in
    * @return the kind of compression to use
-   * @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
-   *             to get {@link CompressionType} for intermediate map-outputs or
+   * @deprecated Use 
    *             {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)} 
    *             to get {@link CompressionType} for job-outputs.
    */
@@ -239,8 +238,6 @@ public class SequenceFile {
    * @param val the new compression type (none, block, record)
    * @deprecated Use the one of the many SequenceFile.createWriter methods to specify
    *             the {@link CompressionType} while creating the {@link SequenceFile} or
-   *             {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
-   *             to specify the {@link CompressionType} for intermediate map-outputs or 
    *             {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
    *             to specify the {@link CompressionType} for job-outputs. 
    * or 

+ 1 - 127
src/mapred/org/apache/hadoop/mapred/JobConf.java

@@ -21,13 +21,9 @@ package org.apache.hadoop.mapred;
 
 import java.io.IOException;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.StringTokenizer;
-
 import java.net.URL;
 import java.net.URLDecoder;
+import java.util.Enumeration;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -38,7 +34,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.io.*;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 
 import org.apache.hadoop.mapred.lib.IdentityMapper;
@@ -185,17 +180,6 @@ public class JobConf extends Configuration {
     }   
   }
 
-  /**
-   * @deprecated Use {@link JobClient#getSystemDir()} instead.
-   * Get the system directory where job-specific files are to be placed.
-   * 
-   * @return the system directory where job-specific files are to be placed.
-   */
-  @Deprecated
-  public Path getSystemDir() {
-    return new Path(get("mapred.system.dir", "/tmp/hadoop/mapred/system"));
-  }
-
   public String[] getLocalDirs() throws IOException {
     return getStrings("mapred.local.dir");
   }
@@ -222,51 +206,6 @@ public class JobConf extends Configuration {
     return getLocalPath("mapred.local.dir", pathString);
   }
 
-  /**
-   * Set the {@link Path} of the input directory for the map-reduce job.
-   * 
-   * @param dir the {@link Path} of the input directory for the map-reduce job.
-   * @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
-   *                 {@link FileInputFormat#setInputPaths(JobConf, String)}
-   */
-  @Deprecated
-  public void setInputPath(Path dir) {
-    dir = new Path(getWorkingDirectory(), dir);
-    set("mapred.input.dir", dir.toString());
-  }
-
-  /**
-   * Add a {@link Path} to the list of inputs for the map-reduce job.
-   * 
-   * @param dir {@link Path} to be added to the list of inputs for 
-   *            the map-reduce job.
-   * @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
-   *                 {@link FileInputFormat#addInputPaths(JobConf, String)}
-   */
-  @Deprecated
-  public void addInputPath(Path dir) {
-    dir = new Path(getWorkingDirectory(), dir);
-    String dirs = get("mapred.input.dir");
-    set("mapred.input.dir", dirs == null ? dir.toString() : dirs + "," + dir);
-  }
-
-  /**
-   * Get the list of input {@link Path}s for the map-reduce job.
-   * 
-   * @return the list of input {@link Path}s for the map-reduce job.
-   * @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}
-   */
-  @Deprecated
-  public Path[] getInputPaths() {
-    String dirs = get("mapred.input.dir", "");
-    ArrayList<Object> list = Collections.list(new StringTokenizer(dirs, ","));
-    Path[] result = new Path[list.size()];
-    for (int i = 0; i < list.size(); i++) {
-      result[i] = new Path((String)list.get(i));
-    }
-    return result;
-  }
-
   /**
    * Get the reported username for this job.
    * 
@@ -360,38 +299,6 @@ public class JobConf extends Configuration {
     }
   }
   
-  /**
-   * @deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or
-   *                 {@link FileOutputFormat#getWorkOutputPath(JobConf)}
-   * Get the {@link Path} to the output directory for the map-reduce job.
-   * 
-   * @return the {@link Path} to the output directory for the map-reduce job.
-   */
-  @Deprecated
-  public Path getOutputPath() {
-    // this return context sensitive value for output path
-    // Returns task's temporary output path while task's execution
-    // Otherwise returns the output path that was set.
-    Path workOutputDir = FileOutputFormat.getWorkOutputPath(this);
-    if (workOutputDir != null) {
-      return workOutputDir;
-    }
-    else return FileOutputFormat.getOutputPath(this);
-  }
-
-  /**
-   * @deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)} 
-   * Set the {@link Path} of the output directory for the map-reduce job.
-   * 
-   * <p><i>Note</i>:
-   * </p>
-   * @param dir the {@link Path} of the output directory for the map-reduce job.
-   */
-  @Deprecated
-  public void setOutputPath(Path dir) {
-    FileOutputFormat.setOutputPath(this, dir);
-  }
-
   /**
    * Get the {@link InputFormat} implementation for the map-reduce job,
    * defaults to {@link TextInputFormat} if not specified explicity.
@@ -458,39 +365,6 @@ public class JobConf extends Configuration {
     return getBoolean("mapred.compress.map.output", false);
   }
 
-  /**
-   * Set the {@link CompressionType} for the map outputs.
-   * 
-   * @param style the {@link CompressionType} to control how the map outputs  
-   *              are compressed.
-   * @deprecated {@link CompressionType} is no longer valid for intermediate
-   *             map-outputs. 
-   */
-  @Deprecated
-  public void setMapOutputCompressionType(CompressionType style) {
-    setCompressMapOutput(true);
-    set("mapred.map.output.compression.type", style.toString());
-    LOG.warn("SequenceFile compression is no longer valid for intermediate " +
-    		     "map-outputs!");
-  }
-  
-  /**
-   * Get the {@link CompressionType} for the map outputs.
-   * 
-   * @return the {@link CompressionType} for map outputs, defaulting to 
-   *         {@link CompressionType#RECORD}.
-   * @deprecated {@link CompressionType} is no longer valid for intermediate
-   *             map-outputs. 
-   */
-  @Deprecated
-  public CompressionType getMapOutputCompressionType() {
-    String val = get("mapred.map.output.compression.type", 
-                     CompressionType.RECORD.toString());
-    LOG.warn("SequenceFile compression is no longer valid for intermediate " +
-    "map-outputs!");
-    return CompressionType.valueOf(val);
-  }
-
   /**
    * Set the given class as the  {@link CompressionCodec} for the map outputs.
    * 

+ 1 - 2
src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java

@@ -23,7 +23,6 @@ import java.util.Random;
 
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.io.*;
-import org.apache.hadoop.mapred.*;
 
 import junit.framework.TestCase;
 import org.apache.commons.logging.*;
@@ -70,7 +69,7 @@ public class TestSequenceFileAsBinaryInputFormat extends TestCase {
     Text cmpval = new Text();
     DataInputBuffer buf = new DataInputBuffer();
     final int NUM_SPLITS = 3;
-    job.setInputPath(file);
+    FileInputFormat.setInputPaths(job, file);
     for (InputSplit split : bformat.getSplits(job, NUM_SPLITS)) {
       RecordReader<BytesWritable,BytesWritable> reader =
         bformat.getRecordReader(split, job, Reporter.NULL);