Browse Source

Hack until MAPREDUCE-2365 is fixed to make PIG work with MRV2. (mahadev)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-279@1136557 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 14 years ago
parent
commit
d624098e7c

+ 2 - 0
mapreduce/CHANGES.txt

@@ -5,6 +5,8 @@ Trunk (unreleased changes)
 
 
     MAPREDUCE-279
     MAPREDUCE-279
 
 
+    Hack until MAPREDUCE-2365 is fixed to make PIG work with MRV2. (mahadev)
+
     Fix race between multiple localizers on single node (chris douglas via mahadev) 
     Fix race between multiple localizers on single node (chris douglas via mahadev) 
 
 
     Bug fix to set correct state on containers so as to avoid duplicate
     Bug fix to set correct state on containers so as to avoid duplicate

+ 13 - 2
mapreduce/mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java

@@ -35,11 +35,15 @@ import org.apache.hadoop.io.compress.SplitCompressionInputStream;
 import org.apache.hadoop.io.compress.SplittableCompressionCodec;
 import org.apache.hadoop.io.compress.SplittableCompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.MapContext;
 import org.apache.hadoop.mapreduce.MapContext;
+import org.apache.hadoop.mapreduce.counters.CounterGroupFactory;
+import org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup;
+import org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup.FrameworkCounter;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.LineReader;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -72,8 +76,15 @@ public class LineRecordReader extends RecordReader<LongWritable, Text> {
   public void initialize(InputSplit genericSplit,
   public void initialize(InputSplit genericSplit,
                          TaskAttemptContext context) throws IOException {
                          TaskAttemptContext context) throws IOException {
     FileSplit split = (FileSplit) genericSplit;
     FileSplit split = (FileSplit) genericSplit;
-    inputByteCounter = ((MapContext)context).getCounter(
-      FileInputFormat.COUNTER_GROUP, FileInputFormat.BYTES_READ);
+    /* TODO This is a hack. MAPREDUCE-2365 is the proper solution */
+    if (context instanceof MapContext) {
+      inputByteCounter = 
+        ((MapContext)context).getCounter(
+            FileInputFormat.COUNTER_GROUP, FileInputFormat.BYTES_READ);
+    } else {
+      inputByteCounter = new Counters().findCounter(FileInputFormat.COUNTER_GROUP
+          ,  FileInputFormat.BYTES_READ);
+    }
     Configuration job = context.getConfiguration();
     Configuration job = context.getConfiguration();
     this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
     this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
     start = split.getStart();
     start = split.getStart();

+ 10 - 2
mapreduce/mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.MapContext;
 import org.apache.hadoop.mapreduce.MapContext;
@@ -52,8 +53,15 @@ public class SequenceFileRecordReader<K, V> extends RecordReader<K, V> {
                          TaskAttemptContext context
                          TaskAttemptContext context
                          ) throws IOException, InterruptedException {
                          ) throws IOException, InterruptedException {
     FileSplit fileSplit = (FileSplit) split;
     FileSplit fileSplit = (FileSplit) split;
-    inputByteCounter = ((MapContext)context).getCounter(
-      FileInputFormat.COUNTER_GROUP, FileInputFormat.BYTES_READ);
+    /* TODO This is a hack. MAPREDUCE-2365 is the proper solution */
+    if (context instanceof MapContext) {
+      inputByteCounter = 
+        ((MapContext)context).getCounter(
+            FileInputFormat.COUNTER_GROUP, FileInputFormat.BYTES_READ);
+    } else {
+      inputByteCounter = new Counters().findCounter(FileInputFormat.COUNTER_GROUP
+          ,  FileInputFormat.BYTES_READ);
+    }
     conf = context.getConfiguration();    
     conf = context.getConfiguration();    
     Path path = fileSplit.getPath();
     Path path = fileSplit.getPath();
     FileSystem fs = path.getFileSystem(conf);
     FileSystem fs = path.getFileSystem(conf);