Browse Source

HADOOP-851. Add support for the LZO codec. Contributed by Arun.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@494905 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 years ago
parent
commit
acdc5b1d4d

+ 4 - 0
CHANGES.txt

@@ -32,6 +32,10 @@ Trunk (unreleased changes)
 10. HADOOP-873.	 Pass java.library.path correctly to child processes.
     (omalley via cutting)
 
+11. HADOOP-851.  Add support for the LZO codec.  This is much faster
+    than the default, zlib-based compression, but it is only available
+    when the native library is built.  (Arun C Murthy via cutting)
+
 
 Release 0.10.0 - 2007-01-05
 

+ 11 - 0
build.xml

@@ -194,6 +194,7 @@
   	
     <mkdir dir="${build.native}/lib"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/>
+    <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/lzo"/>
 
   	<javah
   	  classpath="${build.classes}"
@@ -205,6 +206,16 @@
       <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" />
   	</javah>
 
+  	<javah
+  	  classpath="${build.classes}"
+  	  destdir="${build.native}/src/org/apache/hadoop/io/compress/lzo"
+      force="yes"
+  	  verbose="yes"
+  	  >
+  	  <class name="org.apache.hadoop.io.compress.lzo.LzoCompressor" />
+      <class name="org.apache.hadoop.io.compress.lzo.LzoDecompressor" />
+  	</javah>
+
 	<exec dir="${build.native}" executable="sh" failonerror="true">
 	  <env key="OS_NAME" value="${os.name}"/>
 	  <env key="OS_ARCH" value="${os.arch}"/>

+ 114 - 0
src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java

@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A {@link org.apache.hadoop.io.compress.CompressorStream} which works
+ * with 'block-based' based compression algorithms, as opposed to 
+ * 'stream-based' compression algorithms.
+ *  
+ * @author Arun C Murthy
+ */
+class BlockCompressorStream extends CompressorStream {
+
+  // The 'maximum' size of input data to be compressed, to account
+  // for the overhead of the compression algorithm.
+  private final int MAX_INPUT_SIZE;
+
+  /**
+   * Create a {@link BlockCompressorStream}.
+   * 
+   * @param out stream
+   * @param compressor compressor to be used
+   * @param bufferSize size of buffer
+   * @param compressionOverhead maximum 'overhead' of the compression 
+   *                            algorithm with given bufferSize
+   */
+  public BlockCompressorStream(OutputStream out, Compressor compressor, 
+      int bufferSize, int compressionOverhead) {
+    super(out, compressor, bufferSize);
+    MAX_INPUT_SIZE = bufferSize - compressionOverhead;
+  }
+
+  /**
+   * Create a {@link BlockCompressorStream} with given output-stream and 
+   * compressor.
+   * Use default of 512 as bufferSize and compressionOverhead of 
+   * (1% of bufferSize + 12 bytes) =  18 bytes (zlib algorithm).
+   * 
+   * @param out stream
+   * @param compressor compressor to be used
+   */
+  public BlockCompressorStream(OutputStream out, Compressor compressor) {
+    this(out, compressor, 512, 18);
+  }
+
+  public void write(byte[] b, int off, int len) throws IOException {
+    // Sanity checks
+    if (compressor.finished()) {
+      throw new IOException("write beyond end of stream");
+    }
+    if (b == null) {
+      throw new NullPointerException();
+    } else if ((off < 0) || (off > b.length) || (len < 0) ||
+            ((off + len) > b.length)) {
+      throw new IndexOutOfBoundsException();
+    } else if (len == 0) {
+      return;
+    }
+
+    // Write out the length of the original data
+    rawWriteInt(len);
+    
+    // Compress data
+    if (!compressor.finished()) {
+      do {
+        // Compress atmost 'maxInputSize' chunks at a time
+        int bufLen = Math.min(len, MAX_INPUT_SIZE);
+        
+        compressor.setInput(b, off, bufLen);
+        while (!compressor.needsInput()) {
+          compress();
+        }
+        off += bufLen;
+        len -= bufLen;
+      } while (len > 0);
+    }
+  }
+
+  void compress() throws IOException {
+    int len = compressor.compress(buffer, 0, buffer.length);
+    if (len > 0) {
+      // Write out the compressed chunk
+      rawWriteInt(len);
+      out.write(buffer, 0, len);
+    }
+  }
+  
+  private void rawWriteInt(int v) throws IOException {
+    out.write((v >>> 24) & 0xFF);
+    out.write((v >>> 16) & 0xFF);
+    out.write((v >>>  8) & 0xFF);
+    out.write((v >>>  0) & 0xFF);
+  }
+
+}

+ 129 - 0
src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * A {@link org.apache.hadoop.io.compress.DecompressorStream} which works
+ * with 'block-based' based compression algorithms, as opposed to 
+ * 'stream-based' compression algorithms.
+ *  
+ * @author Arun C Murthy
+ */
+class BlockDecompressorStream extends DecompressorStream {
+  private int originalBlockSize = 0;
+  private int noUncompressedBytes = 0;
+
+  /**
+   * Create a {@link BlockDecompressorStream}.
+   * 
+   * @param in input stream
+   * @param decompressor decompressor to use
+   * @param bufferSize size of buffer
+   */
+  public BlockDecompressorStream(InputStream in, Decompressor decompressor, 
+      int bufferSize) {
+    super(in, decompressor, bufferSize);
+  }
+  
+  /**
+   * Create a {@link BlockDecompressorStream}.
+   * 
+   * @param in input stream
+   * @param decompressor decompressor to use
+   */
+  public BlockDecompressorStream(InputStream in, Decompressor decompressor) {
+    super(in, decompressor);
+  }
+
+  protected BlockDecompressorStream(InputStream in) {
+    super(in);
+  }
+
+  int decompress(byte[] b, int off, int len) throws IOException {
+    // Check if we are the beginning of a block
+    if (noUncompressedBytes == originalBlockSize) {
+      // Get original data size
+      try {
+        originalBlockSize =  rawReadInt();
+      } catch (IOException ioe) {
+        return -1;
+      }
+      noUncompressedBytes = 0;
+    }
+    
+    int n = 0;
+    while ((n = decompressor.decompress(b, off, len)) == 0) {
+      if (decompressor.finished() || decompressor.needsDictionary()) {
+        if (noUncompressedBytes >= originalBlockSize) {
+          eof = true;
+          return -1;
+        }
+      }
+      if (decompressor.needsInput()) {
+        getCompressedData();
+      }
+    }
+    
+    // Note the no. of decompressed bytes read from 'current' block
+    noUncompressedBytes += n;
+
+    return n;
+  }
+
+  void getCompressedData() throws IOException {
+    checkStream();
+
+    // Get the size of the compressed chunk
+    int len = rawReadInt();
+
+    // Read len bytes from underlying stream 
+    if (len > buffer.length) {
+      buffer = new byte[len];
+    }
+    int n = 0, off = 0;
+    while (n < len) {
+      int count = in.read(buffer, off + n, len - n);
+      if (count < 0) {
+        throw new EOFException();
+      }
+      n += count;
+    }
+    
+    // Send the read data to the decompressor
+    decompressor.setInput(buffer, 0, len);
+  }
+
+  public void resetState() throws IOException {
+    super.resetState();
+  }
+
+  private int rawReadInt() throws IOException {
+    int b1 = in.read();
+    int b2 = in.read();
+    int b3 = in.read();
+    int b4 = in.read();
+    if ((b1 | b2 | b3 | b4) < 0)
+        throw new EOFException();
+    return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0));
+  }
+}

+ 157 - 0
src/java/org/apache/hadoop/io/compress/LzoCodec.java

@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.InputStream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.compress.lzo.*;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ * <b>lzo</b> compression/decompression pair.
+ * http://www.oberhumer.com/opensource/lzo/
+ * 
+ * @author Arun C Murthy
+ */
+public class LzoCodec implements Configurable, CompressionCodec {
+  
+  private static final Log LOG = LogFactory.getLog(LzoCodec.class.getName());
+
+  private Configuration conf;
+  
+  public void setConf(Configuration conf) {
+	  this.conf = conf;
+  }
+  
+  public Configuration getConf() {
+	  return conf;
+  }
+
+  private static boolean nativeLzoLoaded = false;
+  
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      nativeLzoLoaded = LzoCompressor.isNativeLzoLoaded() &&
+                          LzoDecompressor.isNativeLzoLoaded();
+      
+      if (nativeLzoLoaded) {
+        LOG.info("Successfully loaded & initialized native-lzo library");
+      } else {
+        LOG.error("Failed to load/initialize native-lzo library");
+      }
+    } else {
+      LOG.error("Cannot load native-lzo without native-hadoop");
+    }
+  }
+
+  /**
+   * Check if native-lzo library is loaded & initialized.
+   * 
+   * @return <code>true</code> if native-lzo library is loaded & initialized;
+   *         else <code>false</code>
+   */
+  public static boolean isNativeLzoLoaded() {
+    return nativeLzoLoaded;
+  }
+  
+  public CompressionOutputStream createOutputStream(OutputStream out) 
+  throws IOException {
+    // Ensure native-lzo library is loaded & initialized
+    if (!isNativeLzoLoaded()) {
+      throw new IOException("native-lzo library not available");
+    }
+    
+    /**
+     * <b>http://www.oberhumer.com/opensource/lzo/lzofaq.php</b>
+     *
+     * How much can my data expand during compression ?
+     * ================================================
+     * LZO will expand incompressible data by a little amount.
+     * I still haven't computed the exact values, but I suggest using
+     * these formulas for a worst-case expansion calculation:
+     * 
+     * Algorithm LZO1, LZO1A, LZO1B, LZO1C, LZO1F, LZO1X, LZO1Y, LZO1Z:
+     * ----------------------------------------------------------------
+     * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3
+     * 
+     * This is about 106% for a large block size.
+     * 
+     * Algorithm LZO2A:
+     * ----------------
+     * output_block_size = input_block_size + (input_block_size / 8) + 128 + 3
+     */
+
+    // Create the lzo output-stream
+    LzoCompressor.CompressionStrategy strategy = 
+      LzoCompressor.CompressionStrategy.valueOf(
+              conf.get("io.compression.codec.lzo.compressor",
+                        LzoCompressor.CompressionStrategy.LZO1X_1.name()
+                      )
+                    ); 
+    int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
+                                  64*1024);
+    int compressionOverhead = 0;
+    if (strategy.name().contains("LZO1")) {
+      compressionOverhead = (int)(((bufferSize - (64 + 3)) * 16.0) / 17.0);  
+    } else {
+      compressionOverhead = (int)(((bufferSize - (128 + 3)) * 8.0) / 9.0);
+    }
+     
+    return new BlockCompressorStream(out, 
+            new LzoCompressor(strategy, bufferSize), 
+            bufferSize, compressionOverhead);
+  }
+  
+  public CompressionInputStream createInputStream(InputStream in) 
+  throws IOException {
+    // Ensure native-lzo library is loaded & initialized
+    if (!isNativeLzoLoaded()) {
+      throw new IOException("native-lzo library not available");
+    }
+    
+    // Create the lzo input-stream
+    LzoDecompressor.CompressionStrategy strategy = 
+      LzoDecompressor.CompressionStrategy.valueOf(
+              conf.get("io.compression.codec.lzo.decompressor",
+                        LzoDecompressor.CompressionStrategy.LZO1X.name()
+                      )
+                    ); 
+    int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
+                                  64*1024);
+
+    return new BlockDecompressorStream(in, 
+            new LzoDecompressor(strategy, bufferSize), 
+            bufferSize);
+  }
+  
+  /**
+   * Get the default filename extension for this kind of compression.
+   * @return the extension including the '.'
+   */
+  public String getDefaultExtension() {
+    return ".lzo";
+  }
+}

+ 319 - 0
src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java

@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.lzo;
+
+import java.io.IOException;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+/**
+ * A {@link Compressor} based on the lzo algorithm.
+ * http://www.oberhumer.com/opensource/lzo/
+ * 
+ * @author Arun C Murthy
+ */
+public class LzoCompressor implements Compressor {
+  private static final Log LOG = 
+    LogFactory.getLog(LzoCompressor.class.getName());
+
+  private int directBufferSize;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufLen = 0;
+  private Buffer uncompressedDirectBuf = null;
+  private int uncompressedDirectBufLen = 0;
+  private Buffer compressedDirectBuf = null;
+  private boolean finish, finished;
+  
+  private CompressionStrategy strategy; // The lzo compression algorithm.
+  private long lzoCompressor = 0;       // The actual lzo compression function.
+  private int workingMemoryBufLen = 0;  // The length of 'working memory' buf.
+  private Buffer workingMemoryBuf;      // The 'working memory' for lzo.
+  
+  /**
+   * The compression algorithm for lzo library.
+   */
+  public static enum CompressionStrategy {
+    /**
+     * lzo1 algorithms.
+     */
+    LZO1 (0),
+    LZO1_99 (1),
+    
+    /**
+     * lzo1a algorithms.
+     */
+    LZO1A (2),
+    LZO1A_99 (3),
+    
+    /**
+     * lzo1b algorithms.
+     */
+    LZO1B (4),
+    LZO1B_BEST_COMPRESSION(5),
+    LZO1B_BEST_SPEED(6),
+    LZO1B_1 (7),
+    LZO1B_2 (8),
+    LZO1B_3 (9),
+    LZO1B_4 (10),
+    LZO1B_5 (11),
+    LZO1B_6 (12),
+    LZO1B_7 (13),
+    LZO1B_8 (14),
+    LZO1B_9 (15),
+    LZO1B_99 (16),
+    LZO1B_999 (17),
+
+    /**
+     * lzo1c algorithms.
+     */
+    LZO1C (18),
+    LZO1C_BEST_COMPRESSION(19),
+    LZO1C_BEST_SPEED(20),
+    LZO1C_1 (21),
+    LZO1C_2 (22),
+    LZO1C_3 (23),
+    LZO1C_4 (24),
+    LZO1C_5 (25),
+    LZO1C_6 (26),
+    LZO1C_7 (27),
+    LZO1C_8 (28),
+    LZO1C_9 (29),
+    LZO1C_99 (30),
+    LZO1C_999 (31),
+    
+    /**
+     * lzo1f algorithms.
+     */
+    LZO1F_1 (32),
+    LZO1F_999 (33),
+    
+    /**
+     * lzo1x algorithms.
+     */
+    LZO1X_1 (34),
+    LZO1X_11 (35),
+    LZO1X_12 (36),
+    LZO1X_15 (37),
+    LZO1X_999 (38),
+    
+    /**
+     * lzo1y algorithms.
+     */
+    LZO1Y_1 (39),
+    LZO1Y_999 (40),
+    
+    /**
+     * lzo1z algorithms.
+     */
+    LZO1Z_999 (41),
+    
+    /**
+     * lzo2a algorithms.
+     */
+    LZO2A_999 (42);
+    
+    private final int compressor;
+
+    private CompressionStrategy(int compressor) {
+      this.compressor = compressor;
+    }
+    
+    int getCompressor() {
+      return compressor;
+    }
+  }; // CompressionStrategy
+
+  private static boolean nativeLzoLoaded = false;
+  
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      // Initialize the native library
+      initIDs();
+      nativeLzoLoaded = true;
+    } else {
+      LOG.error("Cannot load " + LzoCompressor.class.getName() + 
+              " without native-hadoop library!");
+    }
+  }
+  
+  /**
+   * Check if lzo compressors are loaded and initialized.
+   * 
+   * @return <code>true</code> if lzo compressors are loaded & initialized,
+   *         else <code>false</code> 
+   */
+  public static boolean isNativeLzoLoaded() {
+    return nativeLzoLoaded;
+  }
+
+  /** 
+   * Creates a new compressor using the specified {@link CompressionStrategy}.
+   * 
+   * @param strategy lzo compression algorithm to use
+   * @param directBufferSize size of the direct buffer to be used.
+   */
+  public LzoCompressor(CompressionStrategy strategy, int directBufferSize) {
+    this.strategy = strategy;
+    this.directBufferSize = directBufferSize;
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+    
+    /**
+     * Initialize {@link #lzoCompress} and {@link #workingMemoryBufLen}
+     */
+    init(this.strategy.getCompressor());
+    workingMemoryBuf = ByteBuffer.allocateDirect(workingMemoryBufLen);
+  }
+  
+  /**
+   * Creates a new compressor with the default lzo1x_1 compression.
+   */
+  public LzoCompressor() {
+    this(CompressionStrategy.LZO1X_1, 64*1024);
+  }
+  
+  public synchronized void setInput(byte[] b, int off, int len) {
+    if (b== null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    this.userBuf = b;
+    this.userBufOff = off;
+    this.userBufLen = len;
+
+    // Reinitialize lzo's output direct-buffer 
+    compressedDirectBuf.limit(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+  }
+
+  synchronized void setInputFromSavedData() {
+    uncompressedDirectBufLen = userBufLen;
+    if (uncompressedDirectBufLen > directBufferSize) {
+      uncompressedDirectBufLen = directBufferSize;
+    }
+
+    // Reinitialize lzo's input direct buffer
+    uncompressedDirectBuf.rewind();
+    ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
+                                          uncompressedDirectBufLen);
+
+    // Note how much data is being fed to lzo
+    userBufOff += uncompressedDirectBufLen;
+    userBufLen -= uncompressedDirectBufLen;
+  }
+
+  public synchronized void setDictionary(byte[] b, int off, int len) {
+    // nop
+  }
+
+  public boolean needsInput() {
+    // Consume remaining compressed data?
+    if (compressedDirectBuf.remaining() > 0) {
+      return false;
+    }
+
+    // Check if lzo has consumed all input
+    if (uncompressedDirectBufLen <= 0) {
+      // Check if we have consumed all user-input
+      if (userBufLen <= 0) {
+        return true;
+      } else {
+        setInputFromSavedData();
+      }
+    }
+    
+    return false;
+  }
+  
+  public synchronized void finish() {
+    finish = true;
+  }
+  
+  public synchronized boolean finished() {
+    // Check if 'lzo' says its 'finished' and
+    // all compressed data has been consumed
+    return (finished && compressedDirectBuf.remaining() == 0); 
+  }
+
+  public synchronized int compress(byte[] b, int off, int len) 
+  throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+    
+    int n = 0;
+    
+    // Check if there is compressed data
+    n = compressedDirectBuf.remaining();
+    if (n > 0) {
+      n = Math.min(n, len);
+      ((ByteBuffer)compressedDirectBuf).get(b, off, n);
+      return n;
+    }
+
+    // Re-initialize the lzo's output direct-buffer
+    compressedDirectBuf.rewind();
+    compressedDirectBuf.limit(directBufferSize);
+
+    // Compress data
+    n = compressBytesDirect(strategy.getCompressor());
+    compressedDirectBuf.limit(n);
+    
+    // Set 'finished' if lzo has consumed all user-data
+    if (userBufLen <= 0) {
+      finished = true;
+    }
+    
+    // Get atmost 'len' bytes
+    n = Math.min(n, len);
+    ((ByteBuffer)compressedDirectBuf).get(b, off, n);
+
+    return n;
+  }
+
+  public synchronized void reset() {
+    finish = false;
+    finished = false;
+    uncompressedDirectBuf.rewind();
+    uncompressedDirectBufLen = 0;
+    compressedDirectBuf.limit(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+    userBufOff = userBufLen = 0;
+  }
+  
+  public synchronized void end() {
+    // nop
+  }
+  
+  private native static void initIDs();
+  private native void init(int compressor);
+  private native int compressBytesDirect(int compressor);
+}

+ 308 - 0
src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java

@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.lzo;
+
+import java.io.IOException;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+/**
+ * A {@link Decompressor} based on the lzo algorithm.
+ * http://www.oberhumer.com/opensource/lzo/
+ * 
+ * @author Arun C Murthy
+ */
+public class LzoDecompressor implements Decompressor {
+  private static final Log LOG = 
+    LogFactory.getLog(LzoDecompressor.class.getName());
+  
+  private int directBufferSize;
+  private Buffer compressedDirectBuf = null;
+  private int compressedDirectBufLen;
+  private Buffer uncompressedDirectBuf = null;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufLen = 0;
+  private boolean finished;
+  
+  private CompressionStrategy strategy;
+  private long lzoDecompressor = 0;   // The actual lzo decompression function.
+  
+  public static enum CompressionStrategy {
+    /**
+     * lzo1 algorithms.
+     */
+    LZO1 (0),
+
+    /**
+     * lzo1a algorithms.
+     */
+    LZO1A (1),
+
+    /**
+     * lzo1b algorithms.
+     */
+    LZO1B (2),
+    LZO1B_SAFE(3),
+
+    /**
+     * lzo1c algorithms.
+     */
+    LZO1C (4),
+    LZO1C_SAFE(5),
+    LZO1C_ASM (6),
+    LZO1C_ASM_SAFE (7),
+
+    /**
+     * lzo1f algorithms.
+     */
+    LZO1F (8),
+    LZO1F_SAFE (9),
+    LZO1F_ASM_FAST (10),
+    LZO1F_ASM_FAST_SAFE (11),
+    
+    /**
+     * lzo1x algorithms.
+     */
+    LZO1X (12),
+    LZO1X_SAFE (13),
+    LZO1X_ASM (14),
+    LZO1X_ASM_SAFE (15),
+    LZO1X_ASM_FAST (16),
+    LZO1X_ASM_FAST_SAFE (17),
+    
+    /**
+     * lzo1y algorithms.
+     */
+    LZO1Y (18),
+    LZO1Y_SAFE (19),
+    LZO1Y_ASM (20),
+    LZO1Y_ASM_SAFE (21),
+    LZO1Y_ASM_FAST (22),
+    LZO1Y_ASM_FAST_SAFE (23),
+    
+    /**
+     * lzo1z algorithms.
+     */
+    LZO1Z (24),
+    LZO1Z_SAFE (25),
+    
+    /**
+     * lzo2a algorithms.
+     */
+    LZO2A (26),
+    LZO2A_SAFE (27);
+    
+    private final int decompressor;
+
+    private CompressionStrategy(int decompressor) {
+      this.decompressor = decompressor;
+    }
+    
+    int getDecompressor() {
+      return decompressor;
+    }
+  }; // CompressionStrategy
+  
+  private static boolean nativeLzoLoaded = false;
+  
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      // Initialize the native library
+      initIDs();
+      nativeLzoLoaded = true;
+    } else {
+      LOG.error("Cannot load " + LzoDecompressor.class.getName() + 
+              " without native-hadoop library!");
+    }
+  }
+  
+  /**
+   * Check if lzo decompressors are loaded and initialized.
+   * 
+   * @return <code>true</code> if lzo decompressors are loaded & initialized,
+   *         else <code>false</code> 
+   */
+  public static boolean isNativeLzoLoaded() {
+    return nativeLzoLoaded;
+  }
+
+  /**
+   * Creates a new lzo decompressor.
+   * 
+   * @param strategy lzo decompression algorithm
+   * @param directBufferSize size of the direct-buffer
+   */
+  public LzoDecompressor(CompressionStrategy strategy, int directBufferSize) {
+    this.directBufferSize = directBufferSize;
+    this.strategy = strategy;
+    
+    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+    
+    /**
+     * Initialize {@link #lzoDecompress}
+     */
+    init(this.strategy.getDecompressor());
+  }
+  
+  /**
+   * Creates a new lzo decompressor.
+   */
+  public LzoDecompressor() {
+    this(CompressionStrategy.LZO1X, 64*1024);
+  }
+
+  public synchronized void setInput(byte[] b, int off, int len) {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+  
+    this.userBuf = b;
+    this.userBufOff = off;
+    this.userBufLen = len;
+    
+    setInputFromSavedData();
+    
+    // Reinitialize lzo's output direct-buffer 
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+  }
+  
+  synchronized void setInputFromSavedData() {
+    compressedDirectBufLen = userBufLen;
+    if (compressedDirectBufLen > directBufferSize) {
+      compressedDirectBufLen = directBufferSize;
+    }
+
+    // Reinitialize lzo's input direct-buffer
+    compressedDirectBuf.rewind();
+    ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
+                                        compressedDirectBufLen);
+    
+    // Note how much data is being fed to lzo
+    userBufOff += compressedDirectBufLen;
+    userBufLen -= compressedDirectBufLen;
+  }
+
+  public synchronized void setDictionary(byte[] b, int off, int len) {
+    // nop
+  }
+
+  public synchronized boolean needsInput() {
+    // Consume remanining compressed data?
+    if (uncompressedDirectBuf.remaining() > 0) {
+      return false;
+    }
+    
+    // Check if lzo has consumed all input
+    if (compressedDirectBufLen <= 0) {
+      // Check if we have consumed all user-input
+      if (userBufLen <= 0) {
+        return true;
+      } else {
+        setInputFromSavedData();
+      }
+    }
+    
+    return false;
+  }
+
+  public synchronized boolean needsDictionary() {
+    return false;
+  }
+
+  public synchronized boolean finished() {
+    // Check if 'lzo' says its 'finished' and
+    // all uncompressed data has been consumed
+    return (finished && uncompressedDirectBuf.remaining() == 0);
+  }
+
+  public synchronized int decompress(byte[] b, int off, int len) 
+  throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+    
+    int n = 0;
+    
+    // Check if there is uncompressed data
+    n = uncompressedDirectBuf.remaining();
+    if(n > 0) {
+      n = Math.min(n, len);
+      ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
+      return n;
+    }
+    
+    // Check if there is data to decompress
+    if (compressedDirectBufLen <= 0) {
+      return 0;
+    }
+    
+    // Re-initialize the lzo's output direct-buffer
+    uncompressedDirectBuf.rewind();
+    uncompressedDirectBuf.limit(directBufferSize);
+
+    // Decompress data
+    n = decompressBytesDirect(strategy.getDecompressor());
+    uncompressedDirectBuf.limit(n);
+
+    // Set 'finished' if lzo has consumed all user-data
+    if (userBufLen <= 0) {
+      finished = true;
+    }
+    
+    // Return atmost 'len' bytes
+    n = Math.min(n, len);
+    ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
+
+    return n;
+  }
+  
+  public synchronized void reset() {
+    finished = false;
+    compressedDirectBufLen = 0;
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+    userBufOff = userBufLen = 0;
+  }
+
+  public synchronized void end() {
+    // nop
+  }
+
+  protected void finalize() {
+    end();
+  }
+  
+  private native static void initIDs();
+  private native void init(int decompressor);
+  private native int decompressBytesDirect(int decompressor);
+}

+ 1 - 1
src/native/Makefile.am

@@ -36,7 +36,7 @@
 export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
 
 # List the sub-directories here
-SUBDIRS = src/org/apache/hadoop/io/compress/zlib lib
+SUBDIRS = src/org/apache/hadoop/io/compress/zlib src/org/apache/hadoop/io/compress/lzo lib
 
 # The following export is needed to build libhadoop.so in the 'lib' directory
 export SUBDIRS

+ 1 - 1
src/native/Makefile.in

@@ -207,7 +207,7 @@ sysconfdir = @sysconfdir@
 target_alias = @target_alias@
 
 # List the sub-directories here
-SUBDIRS = src/org/apache/hadoop/io/compress/zlib lib
+SUBDIRS = src/org/apache/hadoop/io/compress/zlib src/org/apache/hadoop/io/compress/lzo lib
 all: config.h
 	$(MAKE) $(AM_MAKEFLAGS) all-recursive
 

+ 2 - 0
src/native/NEWS

@@ -1,3 +1,5 @@
 2006-10-05 Arun C Murthy <arunc@yahoo-inc.com>
   * Initial version of libhadoop released
 
+2007-01-03 Arun C Murthy <arunc@yahoo-inc.com>
+  * Added support for lzo compression library 

+ 36 - 0
src/native/config.h.in

@@ -1,5 +1,8 @@
 /* config.h.in.  Generated from configure.ac by autoheader.  */
 
+/* The 'actual' dynamic-library for '-llzo2' */
+#undef HADOOP_LZO_LIBRARY
+
 /* The 'actual' dynamic-library for '-lz' */
 #undef HADOOP_ZLIB_LIBRARY
 
@@ -18,9 +21,42 @@
 /* Define to 1 if you have the `jvm' library (-ljvm). */
 #undef HAVE_LIBJVM
 
+/* Define to 1 if you have the `lzo2' library (-llzo2). */
+#undef HAVE_LIBLZO2
+
 /* Define to 1 if you have the `z' library (-lz). */
 #undef HAVE_LIBZ
 
+/* Define to 1 if you have the <lzo/lzo1a.h> header file. */
+#undef HAVE_LZO_LZO1A_H
+
+/* Define to 1 if you have the <lzo/lzo1b.h> header file. */
+#undef HAVE_LZO_LZO1B_H
+
+/* Define to 1 if you have the <lzo/lzo1c.h> header file. */
+#undef HAVE_LZO_LZO1C_H
+
+/* Define to 1 if you have the <lzo/lzo1f.h> header file. */
+#undef HAVE_LZO_LZO1F_H
+
+/* Define to 1 if you have the <lzo/lzo1x.h> header file. */
+#undef HAVE_LZO_LZO1X_H
+
+/* Define to 1 if you have the <lzo/lzo1y.h> header file. */
+#undef HAVE_LZO_LZO1Y_H
+
+/* Define to 1 if you have the <lzo/lzo1z.h> header file. */
+#undef HAVE_LZO_LZO1Z_H
+
+/* Define to 1 if you have the <lzo/lzo1.h> header file. */
+#undef HAVE_LZO_LZO1_H
+
+/* Define to 1 if you have the <lzo/lzo2a.h> header file. */
+#undef HAVE_LZO_LZO2A_H
+
+/* Define to 1 if you have the <lzo/lzo_asm.h> header file. */
+#undef HAVE_LZO_LZO_ASM_H
+
 /* Define to 1 if you have the <memory.h> header file. */
 #undef HAVE_MEMORY_H
 

File diff suppressed because it is too large
+ 604 - 157
src/native/configure


+ 7 - 0
src/native/configure.ac

@@ -64,6 +64,9 @@ AC_SUBST([JNI_LDFLAGS])
 dnl Check for '-lz'
 AC_CHECK_LIB([z], [deflate])
 
+dnl Check for '-llzo2'
+AC_CHECK_LIB([lzo2], [lzo_init])
+
 # Checks for header files.
 dnl Check for Ansi C headers
 AC_HEADER_STDC
@@ -89,6 +92,9 @@ AC_SUBST([JNI_CPPFLAGS])
 dnl Check for zlib headers
 AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
 
+dnl Check for lzo headers
+AC_CHECK_HEADERS([lzo/lzo1.h lzo/lzo1a.h lzo/lzo1b.h lzo/lzo1c.h lzo/lzo1f.h lzo/lzo1x.h lzo/lzo1y.h lzo/lzo1z.h lzo/lzo2a.h lzo/lzo_asm.h], AC_COMPUTE_NEEDED_DSO(lzo2,HADOOP_LZO_LIBRARY), AC_MSG_ERROR(lzo headers were not found... native-hadoop library needs lzo to build. Please install the requisite lzo development package.))
+
 # Checks for typedefs, structures, and compiler characteristics.
 AC_C_CONST
 
@@ -97,6 +103,7 @@ AC_CHECK_FUNCS([memset])
 
 AC_CONFIG_FILES([Makefile
                  src/org/apache/hadoop/io/compress/zlib/Makefile
+                 src/org/apache/hadoop/io/compress/lzo/Makefile
                  lib/Makefile])
 AC_OUTPUT
 

+ 253 - 0
src/native/src/org/apache/hadoop/io/compress/lzo/LzoCompressor.c

@@ -0,0 +1,253 @@
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HAVE_STDIO_H
+  #include <stdio.h>
+#else
+  #error 'stdio.h not found'
+#endif  
+
+#if defined HAVE_STDLIB_H
+  #include <stdlib.h>
+#else
+  #error 'stdlib.h not found'
+#endif  
+
+#include "org_apache_hadoop_io_compress_lzo.h"
+
+// The lzo2 library-handle
+static void *liblzo2 = NULL;
+
+// The lzo 'compressors'
+typedef struct {
+  const char *function;           // The compression function
+  int wrkmem;                     // The 'working memory' needed
+  int compression_level;          // Compression level if required;
+                                  // else UNDEFINED_COMPRESSION_LEVEL
+} lzo_compressor;
+
+#define UNDEFINED_COMPRESSION_LEVEL -999
+
+static lzo_compressor lzo_compressors[] = {
+  /** lzo1 compressors */
+  /* 0 */   {"lzo1_compress", LZO1_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 1 */   {"lzo1_99_compress", LZO1_99_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo1a compressors */
+  /* 2 */   {"lzo1a_compress", LZO1A_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 3 */   {"lzo1a_99_compress", LZO1A_99_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo1b compressors */
+  /* 4 */   {"lzo1b_compress", LZO1B_MEM_COMPRESS, LZO1B_DEFAULT_COMPRESSION}, 
+  /* 5 */   {"lzo1b_compress", LZO1B_MEM_COMPRESS, LZO1B_BEST_SPEED}, 
+  /* 6 */   {"lzo1b_compress", LZO1B_MEM_COMPRESS, LZO1B_BEST_COMPRESSION}, 
+  /* 7 */   {"lzo1b_1_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 8 */   {"lzo1b_2_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 9 */   {"lzo1b_3_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 10 */  {"lzo1b_4_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 11 */  {"lzo1b_5_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 12 */  {"lzo1b_6_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 13 */  {"lzo1b_7_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 14 */  {"lzo1b_8_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 15 */  {"lzo1b_9_compress", LZO1B_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 16 */  {"lzo1b_99_compress", LZO1B_99_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 17 */  {"lzo1b_999_compress", LZO1B_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  
+  /** lzo1c compressors */
+  /* 18 */  {"lzo1c_compress", LZO1C_MEM_COMPRESS, LZO1C_DEFAULT_COMPRESSION}, 
+  /* 19 */  {"lzo1c_compress", LZO1C_MEM_COMPRESS, LZO1C_BEST_SPEED}, 
+  /* 20 */  {"lzo1c_compress", LZO1C_MEM_COMPRESS, LZO1C_BEST_COMPRESSION}, 
+  /* 21 */  {"lzo1c_1_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 22 */  {"lzo1c_2_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 23 */  {"lzo1c_3_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 24 */  {"lzo1c_4_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 25 */  {"lzo1c_5_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 26 */  {"lzo1c_6_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 27 */  {"lzo1c_7_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 28 */  {"lzo1c_8_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 29 */  {"lzo1c_9_compress", LZO1C_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 30 */  {"lzo1c_99_compress", LZO1C_99_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  /* 31 */  {"lzo1c_999_compress", LZO1C_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL}, 
+  
+  /** lzo1f compressors */
+  /* 32 */  {"lzo1f_1_compress", LZO1F_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 33 */  {"lzo1f_999_compress", LZO1F_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo1x compressors */
+  /* 34 */  {"lzo1x_1_compress", LZO1X_1_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 35 */  {"lzo1x_11_compress", LZO1X_1_11_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 36 */  {"lzo1x_12_compress", LZO1X_1_12_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 37 */  {"lzo1x_15_compress", LZO1X_1_15_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 38 */  {"lzo1x_999_compress", LZO1X_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo1y compressors */
+  /* 39 */  {"lzo1y_1_compress", LZO1Y_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+  /* 40 */  {"lzo1y_999_compress", LZO1Y_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo1z compressors */
+  /* 41 */  {"lzo1z_999_compress", LZO1Z_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+
+  /** lzo2a compressors */
+  /* 42 */  {"lzo2a_999_compress", LZO2A_999_MEM_COMPRESS, UNDEFINED_COMPRESSION_LEVEL},
+};
+
+// The second lzo* compressor prototype - this really should be in lzoconf.h!
+typedef int
+(__LZO_CDECL *lzo_compress2_t)   ( const lzo_bytep src, lzo_uint  src_len,
+                                  lzo_bytep dst, lzo_uintp dst_len,
+                                  lzo_voidp wrkmem, int compression_level );
+
+static jfieldID LzoCompressor_finish;
+static jfieldID LzoCompressor_finished;
+static jfieldID LzoCompressor_uncompressedDirectBuf;
+static jfieldID LzoCompressor_uncompressedDirectBufLen;
+static jfieldID LzoCompressor_compressedDirectBuf;
+static jfieldID LzoCompressor_directBufferSize;
+static jfieldID LzoCompressor_lzoCompressor;
+static jfieldID LzoCompressor_workingMemoryBufLen;
+static jfieldID LzoCompressor_workingMemoryBuf;
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoCompressor_initIDs(
+	JNIEnv *env, jclass class
+	) {
+	// Load liblzo2.so
+	liblzo2 = dlopen(HADOOP_LZO_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+	if (!liblzo2) {
+		THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load liblzo2.so!");
+	  return;
+	}
+    
+  LzoCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z");
+  LzoCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
+  LzoCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, 
+                                                    "uncompressedDirectBuf", 
+                                                    "Ljava/nio/Buffer;");
+  LzoCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class, 
+                                            "uncompressedDirectBufLen", "I");
+  LzoCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, 
+                                                        "compressedDirectBuf",
+                                                        "Ljava/nio/Buffer;");
+  LzoCompressor_directBufferSize = (*env)->GetFieldID(env, class, 
+                                            "directBufferSize", "I");
+  LzoCompressor_lzoCompressor = (*env)->GetFieldID(env, class, 
+                                          "lzoCompressor", "J");
+  LzoCompressor_workingMemoryBufLen = (*env)->GetFieldID(env, class,
+                                                "workingMemoryBufLen", "I");
+  LzoCompressor_workingMemoryBuf = (*env)->GetFieldID(env, class, 
+                                              "workingMemoryBuf", 
+                                              "Ljava/nio/Buffer;");
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoCompressor_init(
+  JNIEnv *env, jobject this, jint compressor 
+  ) {
+  const char *lzo_compressor_function = lzo_compressors[compressor].function;
+ 
+  // Locate the requisite symbols from liblzo2.so
+  dlerror();                                 // Clear any existing error
+
+  // Initialize the lzo library 
+  void *lzo_init_func_ptr = NULL;
+  typedef int (__LZO_CDECL *lzo_init_t) (unsigned,int,int,int,int,int,int,int,int,int);
+  LOAD_DYNAMIC_SYMBOL(lzo_init_func_ptr, env, liblzo2, "__lzo_init_v2");
+  lzo_init_t lzo_init_function = (lzo_init_t)(lzo_init_func_ptr);
+  int rv = lzo_init_function(LZO_VERSION, (int)sizeof(short), (int)sizeof(int), 
+              (int)sizeof(long), (int)sizeof(lzo_uint32), (int)sizeof(lzo_uint), 
+              (int)lzo_sizeof_dict_t, (int)sizeof(char*), (int)sizeof(lzo_voidp),
+              (int)sizeof(lzo_callback_t));
+  if (rv != LZO_E_OK) {
+    THROW(env, "Ljava/lang/InternalError", "Could not initialize lzo library!");
+    return;
+  }
+  
+  // Save the compressor-function into LzoCompressor_lzoCompressor
+  void *compressor_func_ptr = NULL;
+  LOAD_DYNAMIC_SYMBOL(compressor_func_ptr, env, liblzo2, lzo_compressor_function);
+  (*env)->SetLongField(env, this, LzoCompressor_lzoCompressor,
+                       JLONG(compressor_func_ptr));
+  
+  // Save the compressor-function into LzoCompressor_lzoCompressor
+  (*env)->SetIntField(env, this, LzoCompressor_workingMemoryBufLen,
+                      lzo_compressors[compressor].wrkmem);
+
+  return;
+}
+
+JNIEXPORT jint JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoCompressor_compressBytesDirect(
+  JNIEnv *env, jobject this, jint compressor 
+	) {
+  const char *lzo_compressor_function = lzo_compressors[compressor].function;
+
+	// Get members of LzoCompressor
+	jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, 
+									                    LzoCompressor_uncompressedDirectBuf);
+	lzo_uint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, 
+									                  LzoCompressor_uncompressedDirectBufLen);
+
+	jobject compressed_direct_buf = (*env)->GetObjectField(env, this, 
+									                        LzoCompressor_compressedDirectBuf);
+	lzo_uint compressed_direct_buf_len = (*env)->GetIntField(env, this, 
+									                            LzoCompressor_directBufferSize);
+
+	jobject working_memory_buf = (*env)->GetObjectField(env, this, 
+									                      LzoCompressor_workingMemoryBuf);
+
+  jlong lzo_compressor_funcptr = (*env)->GetLongField(env, this,
+                  LzoCompressor_lzoCompressor);
+
+  // Get direct buffers
+	lzo_bytep uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
+                                            uncompressed_direct_buf);
+  if (uncompressed_bytes == 0) {
+    	return (jint)0;
+	}
+	
+	lzo_bytep compressed_bytes = (*env)->GetDirectBufferAddress(env, 
+                                            compressed_direct_buf);
+  if (compressed_bytes == 0) {
+		return (jint)0;
+	}
+	
+  lzo_voidp workmem = (*env)->GetDirectBufferAddress(env, working_memory_buf);
+  if (workmem == 0) {
+    return (jint)0;
+  }
+  
+	// Compress
+  lzo_uint no_compressed_bytes = compressed_direct_buf_len;
+	int rv = 0;
+  int compression_level = lzo_compressors[compressor].compression_level;
+  if (compression_level == UNDEFINED_COMPRESSION_LEVEL) {
+    lzo_compress_t fptr = (lzo_compress_t) FUNC_PTR(lzo_compressor_funcptr);
+    rv = fptr(uncompressed_bytes, uncompressed_direct_buf_len,
+              compressed_bytes, &no_compressed_bytes, 
+              workmem);
+  } else {
+    lzo_compress2_t fptr = (lzo_compress2_t) FUNC_PTR(lzo_compressor_funcptr);
+    rv = fptr(uncompressed_bytes, uncompressed_direct_buf_len,
+              compressed_bytes, &no_compressed_bytes, 
+              workmem, compression_level); 
+  }
+
+  if (rv == LZO_E_OK) {
+    // lzo compresses all input data
+    (*env)->SetIntField(env, this, 
+                LzoCompressor_uncompressedDirectBufLen, 0);
+  } else {
+    const int msg_len = 32;
+    char exception_msg[msg_len];
+    snprintf(exception_msg, msg_len, "%s returned: %d", lzo_compressor_function, rv);
+    THROW(env, "java/lang/InternalError", exception_msg);
+  }
+
+  return (jint)no_compressed_bytes;
+}
+
+/**
+ * vim: sw=2: ts=2: et:
+ */
+

+ 194 - 0
src/native/src/org/apache/hadoop/io/compress/lzo/LzoDecompressor.c

@@ -0,0 +1,194 @@
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HAVE_STDIO_H
+  #include <stdio.h>
+#else
+  #error 'stdio.h not found'
+#endif  
+
+#if defined HAVE_STDLIB_H
+  #include <stdlib.h>
+#else
+  #error 'stdlib.h not found'
+#endif  
+
+#include "org_apache_hadoop_io_compress_lzo.h"
+
+// The lzo2 library-handle
+static void *liblzo2 = NULL;
+
+// The lzo 'decompressors'
+static char* lzo_decompressors[] = {
+  /** lzo1 decompressors */
+  /* 0 */   "lzo1_decompress", 
+  
+  /** lzo1a compressors */
+  /* 1 */   "lzo1a_decompress",
+
+  /** lzo1b compressors */
+  /* 2 */   "lzo1b_decompress", 
+  /* 3 */   "lzo1b_decompress_safe",
+
+  /** lzo1c compressors */
+  /* 4 */   "lzo1c_decompress",
+  /* 5 */   "lzo1c_decompress_safe",
+  /* 6 */   "lzo1c_decompress_asm",
+  /* 7 */   "lzo1c_decompress_asm_safe",
+  
+  /** lzo1f compressors */
+  /* 8 */   "lzo1f_decompress",
+  /* 9 */   "lzo1f_decompress_safe",
+  /* 10 */  "lzo1f_decompress_asm_fast",
+  /* 11 */  "lzo1f_decompress_asm_fast_safe",
+
+  /** lzo1x compressors */
+  /* 12 */  "lzo1x_decompress",
+  /* 13 */  "lzo1x_decompress_safe",
+  /* 14 */  "lzo1x_decompress_asm",
+  /* 15 */  "lzo1x_decompress_asm_safe",
+  /* 16 */  "lzo1x_decompress_asm_fast",
+  /* 17 */  "lzo1x_decompress_asm_fast_safe"
+  
+  /** lzo1y compressors */
+  /* 18 */  "lzo1y_decompress",
+  /* 19 */  "lzo1y_decompress_safe",
+  /* 20 */  "lzo1y_decompress_asm",
+  /* 21 */  "lzo1y_decompress_asm_safe",
+  /* 22 */  "lzo1y_decompress_asm_fast",
+  /* 23 */  "lzo1y_decompress_asm_fast_safe",
+
+  /** lzo1z compressors */
+  /* 24 */  "lzo1z_decompress", 
+  /* 25 */  "lzo1z_decompress_safe",
+
+  /** lzo2a compressors */
+  /* 26 */  "lzo2a_decompress",
+  /* 27 */  "lzo2a_decompress_safe"
+};
+
+static jfieldID LzoDecompressor_finished;
+static jfieldID LzoDecompressor_compressedDirectBuf;
+static jfieldID LzoDecompressor_compressedDirectBufLen;
+static jfieldID LzoDecompressor_uncompressedDirectBuf;
+static jfieldID LzoDecompressor_directBufferSize;
+static jfieldID LzoDecompressor_lzoDecompressor;
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoDecompressor_initIDs(
+	JNIEnv *env, jclass class
+	) {
+	// Load liblzo2.so
+	liblzo2 = dlopen(HADOOP_LZO_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+	if (!liblzo2) {
+		THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load liblzo2.so!");
+	  return;
+	}
+    
+  LzoDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
+  LzoDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, 
+                                                "compressedDirectBuf", 
+                                                "Ljava/nio/Buffer;");
+  LzoDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class, 
+                                                    "compressedDirectBufLen", "I");
+  LzoDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, 
+                                                  "uncompressedDirectBuf", 
+                                                  "Ljava/nio/Buffer;");
+  LzoDecompressor_directBufferSize = (*env)->GetFieldID(env, class, 
+                                              "directBufferSize", "I");
+  LzoDecompressor_lzoDecompressor = (*env)->GetFieldID(env, class,
+                                              "lzoDecompressor", "J");
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoDecompressor_init(
+  JNIEnv *env, jobject this, jint decompressor 
+  ) {
+  const char *lzo_decompressor_function = lzo_decompressors[decompressor];
+ 
+  // Locate the requisite symbols from liblzo2.so
+  dlerror();                                 // Clear any existing error
+
+  // Initialize the lzo library 
+  void *lzo_init_func_ptr = NULL;
+  typedef int (__LZO_CDECL *lzo_init_t) (unsigned,int,int,int,int,int,int,int,int,int);
+  LOAD_DYNAMIC_SYMBOL(lzo_init_func_ptr, env, liblzo2, "__lzo_init_v2");
+  lzo_init_t lzo_init_function = (lzo_init_t)(lzo_init_func_ptr);
+  int rv = lzo_init_function(LZO_VERSION, (int)sizeof(short), (int)sizeof(int), 
+              (int)sizeof(long), (int)sizeof(lzo_uint32), (int)sizeof(lzo_uint), 
+              (int)lzo_sizeof_dict_t, (int)sizeof(char*), (int)sizeof(lzo_voidp),
+              (int)sizeof(lzo_callback_t));
+  if (rv != LZO_E_OK) {
+    THROW(env, "Ljava/lang/InternalError", "Could not initialize lzo library!");
+    return;
+  }
+  
+  // Save the decompressor-function into LzoDecompressor_lzoDecompressor
+  void *decompressor_func_ptr = NULL;
+  LOAD_DYNAMIC_SYMBOL(decompressor_func_ptr, env, liblzo2,
+      lzo_decompressor_function);
+  (*env)->SetLongField(env, this, LzoDecompressor_lzoDecompressor,
+                       JLONG(decompressor_func_ptr));
+
+  return;
+}
+
+JNIEXPORT jint JNICALL
+Java_org_apache_hadoop_io_compress_lzo_LzoDecompressor_decompressBytesDirect(
+	JNIEnv *env, jobject this, jint decompressor
+	) {
+  const char *lzo_decompressor_function = lzo_decompressors[decompressor];
+
+	// Get members of LzoDecompressor
+	jobject compressed_direct_buf = (*env)->GetObjectField(env, this,
+                                              LzoDecompressor_compressedDirectBuf);
+	lzo_uint compressed_direct_buf_len = (*env)->GetIntField(env, this, 
+                        		  							LzoDecompressor_compressedDirectBufLen);
+
+	jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, 
+                            								  LzoDecompressor_uncompressedDirectBuf);
+	lzo_uint uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
+                                                LzoDecompressor_directBufferSize);
+
+  jlong lzo_decompressor_funcptr = (*env)->GetLongField(env, this,
+                                              LzoDecompressor_lzoDecompressor);
+
+  // Get direct buffers
+	lzo_bytep uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
+											                    uncompressed_direct_buf);
+ 	if (uncompressed_bytes == 0) {
+    return (jint)0;
+	}
+	
+	lzo_bytep compressed_bytes = (*env)->GetDirectBufferAddress(env, 
+										                    compressed_direct_buf);
+  if (compressed_bytes == 0) {
+		return (jint)0;
+	}
+	
+	// Decompress
+  lzo_uint no_uncompressed_bytes = uncompressed_direct_buf_len;
+  lzo_decompress_t fptr = (lzo_decompress_t) FUNC_PTR(lzo_decompressor_funcptr);
+	int rv = fptr(compressed_bytes, compressed_direct_buf_len,
+                uncompressed_bytes, &no_uncompressed_bytes,
+                NULL); 
+
+  if (rv == LZO_E_OK) {
+    // lzo decompresses all input data
+    (*env)->SetIntField(env, this, LzoDecompressor_compressedDirectBufLen, 0);
+  } else {
+    const int msg_len = 32;
+    char exception_msg[msg_len];
+    snprintf(exception_msg, msg_len, "%s returned: %d", 
+              lzo_decompressor_function, rv);
+    THROW(env, "java/lang/InternalError", exception_msg);
+  }
+  
+  return no_uncompressed_bytes;
+}
+
+/**
+ * vim: sw=2: ts=2: et:
+ */
+

+ 50 - 0
src/native/src/org/apache/hadoop/io/compress/lzo/Makefile.am

@@ -0,0 +1,50 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Makefile template for building native 'lzo' for hadoop.
+#
+
+#
+# Notes: 
+# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) .
+# 2. This makefile depends on the following environment variables to function correctly:
+#    * HADOOP_NATIVE_SRCDIR 
+#    * JAVA_HOME
+#    * JVM_DATA_MODEL
+#    * OS_ARCH 
+#    * PLATFORM
+#    All these are setup by build.xml and/or the top-level makefile.
+# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are
+#    assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/lzo.
+#
+
+# The 'vpath directive' to locate the actual source files 
+vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir)
+
+AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src
+AM_LDFLAGS = @JNI_LDFLAGS@
+AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL)
+
+noinst_LTLIBRARIES = libnativelzo.la
+libnativelzo_la_SOURCES = LzoCompressor.c LzoDecompressor.c
+libnativelzo_la_LIBADD = -ldl -ljvm
+
+#
+#vim: sw=4: ts=4: noet
+#

+ 469 - 0
src/native/src/org/apache/hadoop/io/compress/lzo/Makefile.in

@@ -0,0 +1,469 @@
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Makefile template for building native 'lzo' for hadoop.
+#
+
+#
+# Notes: 
+# 1. This makefile is designed to do the actual builds in $(HADOOP_HOME)/build/native/${os.name}-${os.arch}/$(subdir) .
+# 2. This makefile depends on the following environment variables to function correctly:
+#    * HADOOP_NATIVE_SRCDIR 
+#    * JAVA_HOME
+#    * JVM_DATA_MODEL
+#    * OS_ARCH 
+#    * PLATFORM
+#    All these are setup by build.xml and/or the top-level makefile.
+# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are
+#    assumed to be in $(HADOOP_HOME)/build/native/src/org/apache/hadoop/io/compress/lzo.
+#
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = ../../../../../../..
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = src/org/apache/hadoop/io/compress/lzo
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libnativelzo_la_DEPENDENCIES =
+am_libnativelzo_la_OBJECTS = LzoCompressor.lo LzoDecompressor.lo
+libnativelzo_la_OBJECTS = $(am_libnativelzo_la_OBJECTS)
+DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/config/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+SOURCES = $(libnativelzo_la_SOURCES)
+DIST_SOURCES = $(libnativelzo_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FFLAGS = @FFLAGS@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JNI_CPPFLAGS = @JNI_CPPFLAGS@
+JNI_LDFLAGS = @JNI_LDFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+ac_ct_RANLIB = @ac_ct_RANLIB@
+ac_ct_STRIP = @ac_ct_STRIP@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
+am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src
+AM_LDFLAGS = @JNI_LDFLAGS@
+AM_CFLAGS = -g -Wall -fPIC -O2 -m$(JVM_DATA_MODEL)
+noinst_LTLIBRARIES = libnativelzo.la
+libnativelzo_la_SOURCES = LzoCompressor.c LzoDecompressor.c
+libnativelzo_la_LIBADD = -ldl -ljvm
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu  src/org/apache/hadoop/io/compress/lzo/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu  src/org/apache/hadoop/io/compress/lzo/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libnativelzo.la: $(libnativelzo_la_OBJECTS) $(libnativelzo_la_DEPENDENCIES) 
+	$(LINK)  $(libnativelzo_la_LDFLAGS) $(libnativelzo_la_OBJECTS) $(libnativelzo_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LzoCompressor.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LzoDecompressor.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \
+@am__fastdepCC_TRUE@	then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \
+@am__fastdepCC_TRUE@	then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	if $(LTCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \
+@am__fastdepCC_TRUE@	then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Plo"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+distclean-libtool:
+	-rm -f libtool
+uninstall-info-am:
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '    { files[$$0] = 1; } \
+	       END { for (i in files) print i; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '    { files[$$0] = 1; } \
+	       END { for (i in files) print i; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '    { files[$$0] = 1; } \
+	       END { for (i in files) print i; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+	list='$(DISTFILES)'; for file in $$list; do \
+	  case $$file in \
+	    $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+	    $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+	  esac; \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+	  if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+	    dir="/$$dir"; \
+	    $(mkdir_p) "$(distdir)$$dir"; \
+	  else \
+	    dir=''; \
+	  fi; \
+	  if test -d $$d/$$file; then \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-libtool distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-exec-am:
+
+install-info: install-info-am
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-info-am
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-exec \
+	install-exec-am install-info install-info-am install-man \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am \
+	uninstall-info-am
+
+
+# The 'vpath directive' to locate the actual source files 
+vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir)
+
+#
+#vim: sw=4: ts=4: noet
+#
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:

+ 112 - 0
src/native/src/org/apache/hadoop/io/compress/lzo/org_apache_hadoop_io_compress_lzo.h

@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_LZO_LZO_H
+#define ORG_APACHE_HADOOP_IO_COMPRESS_LZO_LZO_H
+
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HAVE_STDDEF_H
+  #include <stddef.h>
+#else
+  #error 'stddef.h not found'
+#endif
+    
+#if defined HAVE_DLFCN_H
+  #include <dlfcn.h>
+#else
+  #error "dlfcn.h not found"
+#endif  
+
+#if defined HAVE_JNI_H    
+  #include <jni.h>
+#else
+  #error 'jni.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1_H
+  #include <lzo/lzo1.h>
+#else
+  #error 'lzo/lzo1.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1A_H
+  #include <lzo/lzo1a.h>
+#else
+  #error 'lzo/lzo1a.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1B_H
+  #include <lzo/lzo1b.h>
+#else
+  #error 'lzo/lzo1b.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1C_H
+  #include <lzo/lzo1c.h>
+#else
+  #error 'lzo/lzo1c.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1F_H
+  #include <lzo/lzo1f.h>
+#else
+  #error 'lzo/lzo1f.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1X_H
+  #include <lzo/lzo1x.h>
+#else
+  #error 'lzo/lzo1x.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1Y_H
+  #include <lzo/lzo1y.h>
+#else
+  #error 'lzo/lzo1y.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO1Z_H
+  #include <lzo/lzo1z.h>
+#else
+  #error 'lzo/lzo1z.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO2A_H
+  #include <lzo/lzo2a.h>
+#else
+  #error 'lzo/lzo2a.h not found'
+#endif
+
+#if defined HAVE_LZO_LZO_ASM_H
+  #include <lzo/lzo_asm.h>
+#else
+  #error 'lzo/lzo_asm.h not found'
+#endif
+
+#include "org_apache_hadoop.h"
+
+/* A helper macro to convert the java 'function-pointer' to a void*. */
+#define FUNC_PTR(func_ptr) ((void*)((ptrdiff_t)(func_ptr)))
+
+/* A helper macro to convert the void* to the java 'function-pointer'. */
+#define JLONG(func_ptr) ((jlong)((ptrdiff_t)(func_ptr)))
+
+#endif //ORG_APACHE_HADOOP_IO_COMPRESS_LZO_LZO_H

+ 5 - 5
src/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c

@@ -75,11 +75,11 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_initIDs(
 
 	// Locate the requisite symbols from libz.so
 	dlerror();                                 // Clear any existing error
-	LOAD_ZLIB_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
-	LOAD_ZLIB_SYMBOL(dlsym_deflate, env, libz, "deflate");
-	LOAD_ZLIB_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
-	LOAD_ZLIB_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
-	LOAD_ZLIB_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
+	LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
+	LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate");
+	LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
+	LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
+	LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
 
 	// Initialize the requisite fieldIds
     ZlibCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");

+ 5 - 5
src/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c

@@ -75,11 +75,11 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_initIDs(
 
 	// Locate the requisite symbols from libz.so
 	dlerror();                                 // Clear any existing error
-	LOAD_ZLIB_SYMBOL(dlsym_inflateInit2_, env, libz, "inflateInit2_");
-	LOAD_ZLIB_SYMBOL(dlsym_inflate, env, libz, "inflate");
-	LOAD_ZLIB_SYMBOL(dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
-	LOAD_ZLIB_SYMBOL(dlsym_inflateReset, env, libz, "inflateReset");
-	LOAD_ZLIB_SYMBOL(dlsym_inflateEnd, env, libz, "inflateEnd");
+	LOAD_DYNAMIC_SYMBOL(dlsym_inflateInit2_, env, libz, "inflateInit2_");
+	LOAD_DYNAMIC_SYMBOL(dlsym_inflate, env, libz, "inflate");
+	LOAD_DYNAMIC_SYMBOL(dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
+	LOAD_DYNAMIC_SYMBOL(dlsym_inflateReset, env, libz, "inflateReset");
+	LOAD_DYNAMIC_SYMBOL(dlsym_inflateEnd, env, libz, "inflateEnd");
 
 	// Initialize the requisite fieldIds
     ZlibDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");

+ 0 - 6
src/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h

@@ -55,12 +55,6 @@
 
 #include "org_apache_hadoop.h"
 
-/* A helper macro to dlsym the requisite zlib symbol. */
-#define LOAD_ZLIB_SYMBOL(func_ptr, env, handle, symbol) \
-  if ((func_ptr = do_dlsym(env, handle, symbol)) == NULL) { \
-  	return; \
-  }
-
 /* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */
 #define ZSTREAM(stream) ((z_stream*)((ptrdiff_t)(stream)))
 

+ 7 - 0
src/native/src/org_apache_hadoop.h

@@ -73,6 +73,13 @@ static void *do_dlsym(JNIEnv *env, void *handle, const char *symbol) {
   return func_ptr;
 }
 
+/* A helper macro to dlsym the requisite dynamic symbol and bail-out on error. */
+#define LOAD_DYNAMIC_SYMBOL(func_ptr, env, handle, symbol) \
+  if ((func_ptr = do_dlsym(env, handle, symbol)) == NULL) { \
+    return; \
+  }
+
+
 #endif
 
 //vim: sw=2: ts=2: et

+ 20 - 3
src/test/org/apache/hadoop/io/TestSequenceFile.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.io.compress.LzoCodec;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.conf.*;
 
@@ -41,8 +42,26 @@ public class TestSequenceFile extends TestCase {
   public TestSequenceFile(String name) { super(name); }
 
   /** Unit tests for SequenceFile. */
-  public void testSequenceFile() throws Exception {
+  public void testZlibSequenceFile() throws Exception {
+    LOG.info("Testing SequenceFile with DefaultCodec");
     compressedSeqFileTest(new DefaultCodec());
+    LOG.info("Successfully tested SequenceFile with DefaultCodec");
+  }
+  
+  public void testLzoSequenceFile() throws Exception {
+    if (LzoCodec.isNativeLzoLoaded()) {
+      LOG.info("Testing SequenceFile with LzoCodec");
+      CompressionCodec lzoCodec = null;
+      try {
+        lzoCodec = (CompressionCodec) ReflectionUtils.newInstance(
+                conf.getClassByName(LzoCodec.class.getName()), conf);
+      } catch (ClassNotFoundException cnfe) {
+        throw new IOException("Cannot find LzoCodec!");
+      }
+
+      compressedSeqFileTest(lzoCodec);
+      LOG.info("Successfully tested SequenceFile with LzoCodec");
+    }
   }
   
   public void compressedSeqFileTest(CompressionCodec codec) throws Exception {
@@ -60,8 +79,6 @@ public class TestSequenceFile extends TestCase {
 
     FileSystem fs = new LocalFileSystem(conf);
     try {
-        //LOG.setLevel(Level.FINE);
-
         // SequenceFile.Writer
         writeTest(fs, count, seed, file, CompressionType.NONE, null);
         readTest(fs, count, seed, file);

+ 6 - 0
src/test/org/apache/hadoop/io/compress/TestCodec.java

@@ -51,6 +51,12 @@ public class TestCodec extends TestCase {
     codecTest(seed, count, "org.apache.hadoop.io.compress.GzipCodec");
   }
   
+  public void testLzoCodec() throws IOException {
+    if (LzoCodec.isNativeLzoLoaded()) {
+      codecTest(seed, count, "org.apache.hadoop.io.compress.LzoCodec");
+    }
+  }
+  
   private static void codecTest(int seed, int count, String codecClass) 
   throws IOException {
     

Some files were not shown because too many files changed in this diff