Przeglądaj źródła

HADOOP-4950. Make the CompressorStream, DecompressorStream,
BlockCompressorStream, and BlockDecompressorStream public to facilitate
non-Hadoop codecs. (omalley)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.20@732840 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 16 lat temu
rodzic
commit
033a2bfeb4

+ 4 - 0
CHANGES.txt

@@ -298,6 +298,10 @@ Release 0.20.0 - Unreleased
     HADOOP-4916. Make user/location of Chukwa installation configurable by an
     external properties file. (Eric Yang via cdouglas)
 
+    HADOOP-4950. Make the CompressorStream, DecompressorStream, 
+    BlockCompressorStream, and BlockDecompressorStream public to facilitate 
+    non-Hadoop codecs. (omalley)
+
   OPTIMIZATIONS
 
     HADOOP-3293. Fixes FileInputFormat to do provide locations for splits

+ 2 - 2
src/core/org/apache/hadoop/io/compress/BlockCompressorStream.java

@@ -31,7 +31,7 @@ import java.io.OutputStream;
  * {@link org.apache.hadoop.io.compress.Compressor} requires buffering to
  * effect meaningful compression, it is responsible for it.
  */
-class BlockCompressorStream extends CompressorStream {
+public class BlockCompressorStream extends CompressorStream {
 
   // The 'maximum' size of input data to be compressed, to account
   // for the overhead of the compression algorithm.
@@ -137,7 +137,7 @@ class BlockCompressorStream extends CompressorStream {
     }
   }
 
-  void compress() throws IOException {
+  protected void compress() throws IOException {
     int len = compressor.compress(buffer, 0, buffer.length);
     if (len > 0) {
       // Write out the compressed chunk

+ 3 - 3
src/core/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -28,7 +28,7 @@ import java.io.InputStream;
  * 'stream-based' compression algorithms.
  *  
  */
-class BlockDecompressorStream extends DecompressorStream {
+public class BlockDecompressorStream extends DecompressorStream {
   private int originalBlockSize = 0;
   private int noUncompressedBytes = 0;
 
@@ -58,7 +58,7 @@ class BlockDecompressorStream extends DecompressorStream {
     super(in);
   }
 
-  int decompress(byte[] b, int off, int len) throws IOException {
+  protected int decompress(byte[] b, int off, int len) throws IOException {
     // Check if we are the beginning of a block
     if (noUncompressedBytes == originalBlockSize) {
       // Get original data size
@@ -89,7 +89,7 @@ class BlockDecompressorStream extends DecompressorStream {
     return n;
   }
 
-  void getCompressedData() throws IOException {
+  protected void getCompressedData() throws IOException {
     checkStream();
 
     // Get the size of the compressed chunk

+ 6 - 6
src/core/org/apache/hadoop/io/compress/CompressorStream.java

@@ -24,10 +24,10 @@ import java.io.OutputStream;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.Compressor;
 
-class CompressorStream extends CompressionOutputStream {
-  Compressor compressor;
-  byte[] buffer;
-  boolean closed = false;
+public class CompressorStream extends CompressionOutputStream {
+  protected Compressor compressor;
+  protected byte[] buffer;
+  protected boolean closed = false;
   
   public CompressorStream(OutputStream out, Compressor compressor, int bufferSize) {
     super(out);
@@ -72,7 +72,7 @@ class CompressorStream extends CompressionOutputStream {
     }
   }
 
-  void compress() throws IOException {
+  protected void compress() throws IOException {
     int len = compressor.compress(buffer, 0, buffer.length);
     if (len > 0) {
       out.write(buffer, 0, len);
@@ -100,7 +100,7 @@ class CompressorStream extends CompressionOutputStream {
     }
   }
 
-  byte[] oneByte = new byte[1];
+  private byte[] oneByte = new byte[1];
   public void write(int b) throws IOException {
     oneByte[0] = (byte)(b & 0xff);
     write(oneByte, 0, oneByte.length);

+ 10 - 10
src/core/org/apache/hadoop/io/compress/DecompressorStream.java

@@ -24,11 +24,11 @@ import java.io.InputStream;
 
 import org.apache.hadoop.io.compress.Decompressor;
 
-class DecompressorStream extends CompressionInputStream {
-  Decompressor decompressor = null;
-  byte[] buffer;
-  boolean eof = false;
-  boolean closed = false;
+public class DecompressorStream extends CompressionInputStream {
+  protected Decompressor decompressor = null;
+  protected byte[] buffer;
+  protected boolean eof = false;
+  protected boolean closed = false;
   
   public DecompressorStream(InputStream in, Decompressor decompressor, int bufferSize) {
     super(in);
@@ -56,7 +56,7 @@ class DecompressorStream extends CompressionInputStream {
     super(in);
   }
   
-  byte[] oneByte = new byte[1];
+  private byte[] oneByte = new byte[1];
   public int read() throws IOException {
     checkStream();
     return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff);
@@ -74,7 +74,7 @@ class DecompressorStream extends CompressionInputStream {
     return decompress(b, off, len);
   }
 
-  int decompress(byte[] b, int off, int len) throws IOException {
+  protected int decompress(byte[] b, int off, int len) throws IOException {
     int n = 0;
     
     while ((n = decompressor.decompress(b, off, len)) == 0) {
@@ -90,7 +90,7 @@ class DecompressorStream extends CompressionInputStream {
     return n;
   }
   
-  void getCompressedData() throws IOException {
+  protected void getCompressedData() throws IOException {
     checkStream();
   
     int n = in.read(buffer, 0, buffer.length);
@@ -101,7 +101,7 @@ class DecompressorStream extends CompressionInputStream {
     decompressor.setInput(buffer, 0, n);
   }
   
-  void checkStream() throws IOException {
+  protected void checkStream() throws IOException {
     if (closed) {
       throw new IOException("Stream closed");
     }
@@ -111,7 +111,7 @@ class DecompressorStream extends CompressionInputStream {
     decompressor.reset();
   }
 
-  byte[] skipBytes = new byte[512];
+  private byte[] skipBytes = new byte[512];
   public long skip(long n) throws IOException {
     // Sanity checks
     if (n < 0) {