|
@@ -1,19 +1,19 @@
|
|
/*
|
|
/*
|
|
LZ4 - Fast LZ compression algorithm
|
|
LZ4 - Fast LZ compression algorithm
|
|
- Copyright (C) 2011, Yann Collet.
|
|
|
|
- BSD License
|
|
|
|
|
|
+ Copyright (C) 2011-2013, Yann Collet.
|
|
|
|
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
Redistribution and use in source and binary forms, with or without
|
|
modification, are permitted provided that the following conditions are
|
|
modification, are permitted provided that the following conditions are
|
|
met:
|
|
met:
|
|
-
|
|
|
|
|
|
+
|
|
* Redistributions of source code must retain the above copyright
|
|
* Redistributions of source code must retain the above copyright
|
|
notice, this list of conditions and the following disclaimer.
|
|
notice, this list of conditions and the following disclaimer.
|
|
* Redistributions in binary form must reproduce the above
|
|
* Redistributions in binary form must reproduce the above
|
|
copyright notice, this list of conditions and the following disclaimer
|
|
copyright notice, this list of conditions and the following disclaimer
|
|
in the documentation and/or other materials provided with the
|
|
in the documentation and/or other materials provided with the
|
|
distribution.
|
|
distribution.
|
|
-
|
|
|
|
|
|
+
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
@@ -25,621 +25,672 @@
|
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
+
|
|
|
|
+ You can contact the author at :
|
|
|
|
+ - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
|
|
|
|
+ - LZ4 source repository : http://code.google.com/p/lz4/
|
|
|
|
+*/
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+Note : this source file requires "lz4_encoder.h"
|
|
*/
|
|
*/
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
-// Copy from:
|
|
|
|
-// URL: http://lz4.googlecode.com/svn/trunk/lz4.c
|
|
|
|
-// Repository Root: http://lz4.googlecode.com/svn
|
|
|
|
-// Repository UUID: 650e7d94-2a16-8b24-b05c-7c0b3f6821cd
|
|
|
|
-// Revision: 43
|
|
|
|
-// Node Kind: file
|
|
|
|
-// Last Changed Author: yann.collet.73@gmail.com
|
|
|
|
-// Last Changed Rev: 43
|
|
|
|
-// Last Changed Date: 2011-12-16 15:41:46 -0800 (Fri, 16 Dec 2011)
|
|
|
|
-// Sha1: 9db7b2c57698c528d79572e6bce2e7dc33fa5998
|
|
|
|
|
|
+// Tuning parameters
|
|
//**************************************
|
|
//**************************************
|
|
|
|
+// MEMORY_USAGE :
|
|
|
|
+// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
|
|
|
+// Increasing memory usage improves compression ratio
|
|
|
|
+// Reduced memory usage can improve speed, due to cache effect
|
|
|
|
+// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
|
|
|
+#define MEMORY_USAGE 14
|
|
|
|
+
|
|
|
|
+// HEAPMODE :
|
|
|
|
+// Select how default compression function will allocate memory for its hash table,
|
|
|
|
+// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
|
|
|
|
+// Default allocation strategy is to use stack (HEAPMODE 0)
|
|
|
|
+// Note : explicit functions *_stack* and *_heap* are unaffected by this setting
|
|
|
|
+#define HEAPMODE 0
|
|
|
|
+
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
-// Compilation Directives
|
|
|
|
|
|
+// CPU Feature Detection
|
|
//**************************************
|
|
//**************************************
|
|
-#if __STDC_VERSION__ >= 199901L
|
|
|
|
- /* "restrict" is a known keyword */
|
|
|
|
|
|
+// 32 or 64 bits ?
|
|
|
|
+#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
|
|
|
|
+ || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
|
|
|
|
+ || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
|
|
|
|
+ || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
|
|
|
|
+# define LZ4_ARCH64 1
|
|
#else
|
|
#else
|
|
-#define restrict // Disable restrict
|
|
|
|
|
|
+# define LZ4_ARCH64 0
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+// Little Endian or Big Endian ?
|
|
|
|
+// Overwrite the #define below if you know your architecture endianess
|
|
|
|
+#if defined (__GLIBC__)
|
|
|
|
+# include <endian.h>
|
|
|
|
+# if (__BYTE_ORDER == __BIG_ENDIAN)
|
|
|
|
+# define LZ4_BIG_ENDIAN 1
|
|
|
|
+# endif
|
|
|
|
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
|
|
|
|
+# define LZ4_BIG_ENDIAN 1
|
|
|
|
+#elif defined(__sparc) || defined(__sparc__) \
|
|
|
|
+ || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
|
|
|
|
+ || defined(__hpux) || defined(__hppa) \
|
|
|
|
+ || defined(_MIPSEB) || defined(__s390__)
|
|
|
|
+# define LZ4_BIG_ENDIAN 1
|
|
|
|
+#else
|
|
|
|
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
|
|
|
|
+// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
|
|
|
|
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
|
|
|
|
+#if defined(__ARM_FEATURE_UNALIGNED)
|
|
|
|
+# define LZ4_FORCE_UNALIGNED_ACCESS 1
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+// Define this parameter if your target system or compiler does not support hardware bit count
|
|
|
|
+#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
|
|
|
|
+# define LZ4_FORCE_SW_BITCOUNT
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
|
|
|
|
+// This option may provide a small boost to performance for some big endian cpu, although probably modest.
|
|
|
|
+// You may set this option to 1 if data will remain within closed environment.
|
|
|
|
+// This option is useless on Little_Endian CPU (such as x86)
|
|
|
|
+//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
|
|
|
|
+
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
-// Includes
|
|
|
|
|
|
+// Compiler Options
|
|
//**************************************
|
|
//**************************************
|
|
-#include <stdlib.h> // for malloc
|
|
|
|
-#include <string.h> // for memset
|
|
|
|
|
|
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) // C99
|
|
|
|
+/* "restrict" is a known keyword */
|
|
|
|
+#else
|
|
|
|
+# define restrict // Disable restrict
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef _MSC_VER // Visual Studio
|
|
|
|
+# define forceinline static __forceinline
|
|
|
|
+# include <intrin.h> // For Visual 2005
|
|
|
|
+# if LZ4_ARCH64 // 64-bits
|
|
|
|
+# pragma intrinsic(_BitScanForward64) // For Visual 2005
|
|
|
|
+# pragma intrinsic(_BitScanReverse64) // For Visual 2005
|
|
|
|
+# else // 32-bits
|
|
|
|
+# pragma intrinsic(_BitScanForward) // For Visual 2005
|
|
|
|
+# pragma intrinsic(_BitScanReverse) // For Visual 2005
|
|
|
|
+# endif
|
|
|
|
+# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
|
|
|
|
+#else
|
|
|
|
+# ifdef __GNUC__
|
|
|
|
+# define forceinline static inline __attribute__((always_inline))
|
|
|
|
+# else
|
|
|
|
+# define forceinline static inline
|
|
|
|
+# endif
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef _MSC_VER
|
|
|
|
+# define lz4_bswap16(x) _byteswap_ushort(x)
|
|
|
|
+#else
|
|
|
|
+# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
|
|
|
+
|
|
|
|
+#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
|
|
|
|
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
|
|
|
|
+#else
|
|
|
|
+# define expect(expr,value) (expr)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define likely(expr) expect((expr) != 0, 1)
|
|
|
|
+#define unlikely(expr) expect((expr) != 0, 0)
|
|
|
|
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
-// Performance parameter
|
|
|
|
|
|
+// Includes
|
|
//**************************************
|
|
//**************************************
|
|
-// Increasing this value improves compression ratio
|
|
|
|
-// Lowering this value reduces memory usage
|
|
|
|
-// Lowering may also improve speed, typically on reaching cache size limits (L1 32KB for Intel, 64KB for AMD)
|
|
|
|
-// Memory usage formula for 32 bits systems : N->2^(N+2) Bytes (examples : 17 -> 512KB ; 12 -> 16KB)
|
|
|
|
-#define HASH_LOG 12
|
|
|
|
|
|
+#include <stdlib.h> // for malloc
|
|
|
|
+#include <string.h> // for memset
|
|
|
|
+#include "lz4.h"
|
|
|
|
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
// Basic Types
|
|
// Basic Types
|
|
//**************************************
|
|
//**************************************
|
|
-#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
|
|
|
|
-#define BYTE unsigned __int8
|
|
|
|
-#define U16 unsigned __int16
|
|
|
|
-#define U32 unsigned __int32
|
|
|
|
-#define S32 __int32
|
|
|
|
|
|
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
|
|
|
|
+# include <stdint.h>
|
|
|
|
+ typedef uint8_t BYTE;
|
|
|
|
+ typedef uint16_t U16;
|
|
|
|
+ typedef uint32_t U32;
|
|
|
|
+ typedef int32_t S32;
|
|
|
|
+ typedef uint64_t U64;
|
|
#else
|
|
#else
|
|
-#include <stdint.h>
|
|
|
|
-#define BYTE uint8_t
|
|
|
|
-#define U16 uint16_t
|
|
|
|
-#define U32 uint32_t
|
|
|
|
-#define S32 int32_t
|
|
|
|
|
|
+ typedef unsigned char BYTE;
|
|
|
|
+ typedef unsigned short U16;
|
|
|
|
+ typedef unsigned int U32;
|
|
|
|
+ typedef signed int S32;
|
|
|
|
+ typedef unsigned long long U64;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
|
|
|
|
+# define _PACKED __attribute__ ((packed))
|
|
|
|
+#else
|
|
|
|
+# define _PACKED
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
|
|
|
|
+# ifdef __IBMC__
|
|
|
|
+# pragma pack(1)
|
|
|
|
+# else
|
|
|
|
+# pragma pack(push, 1)
|
|
|
|
+# endif
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+typedef struct { U16 v; } _PACKED U16_S;
|
|
|
|
+typedef struct { U32 v; } _PACKED U32_S;
|
|
|
|
+typedef struct { U64 v; } _PACKED U64_S;
|
|
|
|
+typedef struct {size_t v;} _PACKED size_t_S;
|
|
|
|
+
|
|
|
|
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
|
|
|
|
+# pragma pack(pop)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define A16(x) (((U16_S *)(x))->v)
|
|
|
|
+#define A32(x) (((U32_S *)(x))->v)
|
|
|
|
+#define A64(x) (((U64_S *)(x))->v)
|
|
|
|
+#define AARCH(x) (((size_t_S *)(x))->v)
|
|
|
|
+
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
// Constants
|
|
// Constants
|
|
//**************************************
|
|
//**************************************
|
|
|
|
+#define HASHTABLESIZE (1 << MEMORY_USAGE)
|
|
|
|
+
|
|
#define MINMATCH 4
|
|
#define MINMATCH 4
|
|
-#define SKIPSTRENGTH 6
|
|
|
|
-#define STACKLIMIT 13
|
|
|
|
-#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
|
|
|
|
-#define COPYTOKEN 4
|
|
|
|
|
|
+
|
|
#define COPYLENGTH 8
|
|
#define COPYLENGTH 8
|
|
#define LASTLITERALS 5
|
|
#define LASTLITERALS 5
|
|
#define MFLIMIT (COPYLENGTH+MINMATCH)
|
|
#define MFLIMIT (COPYLENGTH+MINMATCH)
|
|
#define MINLENGTH (MFLIMIT+1)
|
|
#define MINLENGTH (MFLIMIT+1)
|
|
|
|
|
|
|
|
+#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
|
|
|
|
+#define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
|
|
|
|
+
|
|
#define MAXD_LOG 16
|
|
#define MAXD_LOG 16
|
|
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
|
|
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
|
|
|
|
|
|
-#define HASHTABLESIZE (1 << HASH_LOG)
|
|
|
|
-#define HASH_MASK (HASHTABLESIZE - 1)
|
|
|
|
-
|
|
|
|
-#define ML_BITS 4
|
|
|
|
-#define ML_MASK ((1U<<ML_BITS)-1)
|
|
|
|
|
|
+#define ML_BITS 4
|
|
|
|
+#define ML_MASK ((1U<<ML_BITS)-1)
|
|
#define RUN_BITS (8-ML_BITS)
|
|
#define RUN_BITS (8-ML_BITS)
|
|
#define RUN_MASK ((1U<<RUN_BITS)-1)
|
|
#define RUN_MASK ((1U<<RUN_BITS)-1)
|
|
|
|
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
-// Local structures
|
|
|
|
|
|
+// Architecture-specific macros
|
|
//**************************************
|
|
//**************************************
|
|
-struct refTables
|
|
|
|
-{
|
|
|
|
- const BYTE* hashTable[HASHTABLESIZE];
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-#ifdef __GNUC__
|
|
|
|
-# define _PACKED __attribute__ ((packed))
|
|
|
|
-#else
|
|
|
|
-# define _PACKED
|
|
|
|
|
|
+#define STEPSIZE sizeof(size_t)
|
|
|
|
+#define LZ4_COPYSTEP(s,d) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
|
|
|
|
+#define LZ4_COPY8(s,d) { LZ4_COPYSTEP(s,d); if (STEPSIZE<8) LZ4_COPYSTEP(s,d); }
|
|
|
|
+#define LZ4_SECURECOPY(s,d,e) { if ((STEPSIZE==8)&&(d<e)) LZ4_WILDCOPY(s,d,e); }
|
|
|
|
+
|
|
|
|
+#if LZ4_ARCH64 // 64-bit
|
|
|
|
+# define HTYPE U32
|
|
|
|
+# define INITBASE(base) const BYTE* const base = ip
|
|
|
|
+#else // 32-bit
|
|
|
|
+# define HTYPE const BYTE*
|
|
|
|
+# define INITBASE(base) const int base = 0
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-typedef struct _U32_S
|
|
|
|
-{
|
|
|
|
- U32 v;
|
|
|
|
-} _PACKED U32_S;
|
|
|
|
-
|
|
|
|
-typedef struct _U16_S
|
|
|
|
-{
|
|
|
|
- U16 v;
|
|
|
|
-} _PACKED U16_S;
|
|
|
|
-
|
|
|
|
-#define A32(x) (((U32_S *)(x))->v)
|
|
|
|
-#define A16(x) (((U16_S *)(x))->v)
|
|
|
|
|
|
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
|
|
|
|
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
|
|
|
|
+# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
|
|
|
|
+#else // Little Endian
|
|
|
|
+# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
|
|
|
|
+# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
|
|
//**************************************
|
|
//**************************************
|
|
// Macros
|
|
// Macros
|
|
//**************************************
|
|
//**************************************
|
|
-#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
|
|
|
|
-#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
|
|
|
|
-#define LZ4_COPYPACKET(s,d) A32(d) = A32(s); d+=4; s+=4; A32(d) = A32(s); d+=4; s+=4;
|
|
|
|
-#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
|
|
|
|
-#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
|
|
|
|
-
|
|
|
|
|
|
+#define LZ4_WILDCOPY(s,d,e) { do { LZ4_COPY8(s,d) } while (d<e); }
|
|
|
|
+#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
|
|
|
|
|
|
|
|
|
|
//****************************
|
|
//****************************
|
|
-// Compression CODE
|
|
|
|
|
|
+// Private functions
|
|
//****************************
|
|
//****************************
|
|
|
|
+#if LZ4_ARCH64
|
|
|
|
|
|
-int LZ4_compressCtx(void** ctx,
|
|
|
|
- char* source,
|
|
|
|
- char* dest,
|
|
|
|
- int isize)
|
|
|
|
-{
|
|
|
|
-#if HEAPMODE
|
|
|
|
- struct refTables *srt = (struct refTables *) (*ctx);
|
|
|
|
- const BYTE** HashTable;
|
|
|
|
-#else
|
|
|
|
- const BYTE* HashTable[HASHTABLESIZE] = {0};
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
- const BYTE* ip = (BYTE*) source;
|
|
|
|
- const BYTE* anchor = ip;
|
|
|
|
- const BYTE* const iend = ip + isize;
|
|
|
|
- const BYTE* const mflimit = iend - MFLIMIT;
|
|
|
|
-#define matchlimit (iend - LASTLITERALS)
|
|
|
|
-
|
|
|
|
- BYTE* op = (BYTE*) dest;
|
|
|
|
-
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
|
|
|
|
-#endif
|
|
|
|
- int len, length;
|
|
|
|
- const int skipStrength = SKIPSTRENGTH;
|
|
|
|
- U32 forwardH;
|
|
|
|
-
|
|
|
|
|
|
+forceinline int LZ4_NbCommonBytes (register U64 val)
|
|
|
|
+{
|
|
|
|
+# if defined(LZ4_BIG_ENDIAN)
|
|
|
|
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ unsigned long r = 0;
|
|
|
|
+ _BitScanReverse64( &r, val );
|
|
|
|
+ return (int)(r>>3);
|
|
|
|
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ return (__builtin_clzll(val) >> 3);
|
|
|
|
+# else
|
|
|
|
+ int r;
|
|
|
|
+ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
|
|
|
|
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
|
|
|
|
+ r += (!val);
|
|
|
|
+ return r;
|
|
|
|
+# endif
|
|
|
|
+# else
|
|
|
|
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ unsigned long r = 0;
|
|
|
|
+ _BitScanForward64( &r, val );
|
|
|
|
+ return (int)(r>>3);
|
|
|
|
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ return (__builtin_ctzll(val) >> 3);
|
|
|
|
+# else
|
|
|
|
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
|
|
|
|
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
|
|
|
+# endif
|
|
|
|
+# endif
|
|
|
|
+}
|
|
|
|
|
|
- // Init
|
|
|
|
- if (isize<MINLENGTH) goto _last_literals;
|
|
|
|
-#if HEAPMODE
|
|
|
|
- if (*ctx == NULL)
|
|
|
|
- {
|
|
|
|
- srt = (struct refTables *) malloc ( sizeof(struct refTables) );
|
|
|
|
- *ctx = (void*) srt;
|
|
|
|
- }
|
|
|
|
- HashTable = srt->hashTable;
|
|
|
|
- memset((void*)HashTable, 0, sizeof(srt->hashTable));
|
|
|
|
#else
|
|
#else
|
|
- (void) ctx;
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- // First Byte
|
|
|
|
- HashTable[LZ4_HASH_VALUE(ip)] = ip;
|
|
|
|
- ip++; forwardH = LZ4_HASH_VALUE(ip);
|
|
|
|
-
|
|
|
|
- // Main Loop
|
|
|
|
- for ( ; ; )
|
|
|
|
- {
|
|
|
|
- int findMatchAttempts = (1U << skipStrength) + 3;
|
|
|
|
- const BYTE* forwardIp = ip;
|
|
|
|
- const BYTE* ref;
|
|
|
|
- BYTE* token;
|
|
|
|
|
|
|
|
- // Find a match
|
|
|
|
- do {
|
|
|
|
- U32 h = forwardH;
|
|
|
|
- int step = findMatchAttempts++ >> skipStrength;
|
|
|
|
- ip = forwardIp;
|
|
|
|
- forwardIp = ip + step;
|
|
|
|
-
|
|
|
|
- if (forwardIp > mflimit) { goto _last_literals; }
|
|
|
|
-
|
|
|
|
- forwardH = LZ4_HASH_VALUE(forwardIp);
|
|
|
|
- ref = HashTable[h];
|
|
|
|
- HashTable[h] = ip;
|
|
|
|
-
|
|
|
|
- } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
|
|
|
|
|
|
+forceinline int LZ4_NbCommonBytes (register U32 val)
|
|
|
|
+{
|
|
|
|
+# if defined(LZ4_BIG_ENDIAN)
|
|
|
|
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ unsigned long r = 0;
|
|
|
|
+ _BitScanReverse( &r, val );
|
|
|
|
+ return (int)(r>>3);
|
|
|
|
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ return (__builtin_clz(val) >> 3);
|
|
|
|
+# else
|
|
|
|
+ int r;
|
|
|
|
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
|
|
|
+ r += (!val);
|
|
|
|
+ return r;
|
|
|
|
+# endif
|
|
|
|
+# else
|
|
|
|
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ unsigned long r;
|
|
|
|
+ _BitScanForward( &r, val );
|
|
|
|
+ return (int)(r>>3);
|
|
|
|
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
|
|
|
+ return (__builtin_ctz(val) >> 3);
|
|
|
|
+# else
|
|
|
|
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
|
|
|
|
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
|
|
|
+# endif
|
|
|
|
+# endif
|
|
|
|
+}
|
|
|
|
|
|
- // Catch up
|
|
|
|
- while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
- // Encode Literal length
|
|
|
|
- length = ip - anchor;
|
|
|
|
- token = op++;
|
|
|
|
- if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
|
|
|
|
- else *token = (length<<ML_BITS);
|
|
|
|
|
|
|
|
- // Copy Literals
|
|
|
|
- LZ4_BLINDCOPY(anchor, op, length);
|
|
|
|
|
|
|
|
|
|
+//******************************
|
|
|
|
+// Compression functions
|
|
|
|
+//******************************
|
|
|
|
|
|
-_next_match:
|
|
|
|
- // Encode Offset
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- A16(op) = (ip-ref); op+=2;
|
|
|
|
-#else
|
|
|
|
- { int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
|
|
|
|
-#endif
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress_stack(
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
|
|
|
|
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
|
|
|
|
+return : the number of bytes written in buffer 'dest'
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress_stack
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- // Start Counting
|
|
|
|
- ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
|
|
|
|
- anchor = ip;
|
|
|
|
- while (ip<matchlimit-3)
|
|
|
|
- {
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- int diff = A32(ref) ^ A32(ip);
|
|
|
|
- if (!diff) { ip+=4; ref+=4; continue; }
|
|
|
|
- ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
|
|
|
|
-#else
|
|
|
|
- if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
|
|
|
|
- if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
|
|
|
|
- if (*ref == *ip) ip++;
|
|
|
|
-#endif
|
|
|
|
- goto _endCount;
|
|
|
|
- }
|
|
|
|
- if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
|
|
|
|
- if ((ip<matchlimit) && (*ref == *ip)) ip++;
|
|
|
|
-_endCount:
|
|
|
|
- len = (ip - anchor);
|
|
|
|
-
|
|
|
|
- // Encode MatchLength
|
|
|
|
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
|
|
|
|
- else *token += len;
|
|
|
|
-
|
|
|
|
- // Test end of chunk
|
|
|
|
- if (ip > mflimit) { anchor = ip; break; }
|
|
|
|
-
|
|
|
|
- // Fill table
|
|
|
|
- HashTable[LZ4_HASH_VALUE(ip-2)] = ip-2;
|
|
|
|
-
|
|
|
|
- // Test next position
|
|
|
|
- ref = HashTable[LZ4_HASH_VALUE(ip)];
|
|
|
|
- HashTable[LZ4_HASH_VALUE(ip)] = ip;
|
|
|
|
- if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
|
|
|
|
-
|
|
|
|
- // Prepare next loop
|
|
|
|
- anchor = ip++;
|
|
|
|
- forwardH = LZ4_HASH_VALUE(ip);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
-_last_literals:
|
|
|
|
- // Encode Last Literals
|
|
|
|
- {
|
|
|
|
- int lastRun = iend - anchor;
|
|
|
|
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
|
|
|
|
- else *op++ = (lastRun<<ML_BITS);
|
|
|
|
- memcpy(op, anchor, iend - anchor);
|
|
|
|
- op += iend-anchor;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- // End
|
|
|
|
- return (int) (((char*)op)-dest);
|
|
|
|
-}
|
|
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress_stack_limitedOutput(
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize,
|
|
|
|
+ int maxOutputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
|
|
|
|
+If it cannot achieve it, compression will stop, and result of the function will be zero.
|
|
|
|
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress_stack_limitedOutput
|
|
|
|
+#define LIMITED_OUTPUT
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
|
|
|
|
-// Note : this function is valid only if isize < LZ4_64KLIMIT
|
|
|
|
-#define LZ4_64KLIMIT ((1U<<16) + (MFLIMIT-1))
|
|
|
|
-#define HASHLOG64K (HASH_LOG+1)
|
|
|
|
-#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
|
|
|
|
-#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
|
|
|
|
-int LZ4_compress64kCtx(void** ctx,
|
|
|
|
- char* source,
|
|
|
|
- char* dest,
|
|
|
|
- int isize)
|
|
|
|
-{
|
|
|
|
-#if HEAPMODE
|
|
|
|
- struct refTables *srt = (struct refTables *) (*ctx);
|
|
|
|
- U16* HashTable;
|
|
|
|
-#else
|
|
|
|
- U16 HashTable[HASHTABLESIZE<<1] = {0};
|
|
|
|
-#endif
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress64k_stack(
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
|
|
|
|
+This function compresses better than LZ4_compress_stack(), on the condition that
|
|
|
|
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
|
|
|
|
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
|
|
|
|
+return : the number of bytes written in buffer 'dest', or 0 if compression fails
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress64k_stack
|
|
|
|
+#define COMPRESS_64K
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- const BYTE* ip = (BYTE*) source;
|
|
|
|
- const BYTE* anchor = ip;
|
|
|
|
- const BYTE* const base = ip;
|
|
|
|
- const BYTE* const iend = ip + isize;
|
|
|
|
- const BYTE* const mflimit = iend - MFLIMIT;
|
|
|
|
-#define matchlimit (iend - LASTLITERALS)
|
|
|
|
-
|
|
|
|
- BYTE* op = (BYTE*) dest;
|
|
|
|
-
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- const size_t DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
|
|
|
|
-#endif
|
|
|
|
- int len, length;
|
|
|
|
- const int skipStrength = SKIPSTRENGTH;
|
|
|
|
- U32 forwardH;
|
|
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress64k_stack_limitedOutput(
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize,
|
|
|
|
+ int maxOutputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
|
|
|
|
+This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
|
|
|
|
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
|
|
|
|
+If it cannot achieve it, compression will stop, and result of the function will be zero.
|
|
|
|
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
|
|
|
|
+#define COMPRESS_64K
|
|
|
|
+#define LIMITED_OUTPUT
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- // Init
|
|
|
|
- if (isize<MINLENGTH) goto _last_literals;
|
|
|
|
-#if HEAPMODE
|
|
|
|
- if (*ctx == NULL)
|
|
|
|
- {
|
|
|
|
- srt = (struct refTables *) malloc ( sizeof(struct refTables) );
|
|
|
|
- *ctx = (void*) srt;
|
|
|
|
- }
|
|
|
|
- HashTable = (U16*)(srt->hashTable);
|
|
|
|
- memset((void*)HashTable, 0, sizeof(srt->hashTable));
|
|
|
|
-#else
|
|
|
|
- (void) ctx;
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+void* LZ4_createHeapMemory();
|
|
|
|
+int LZ4_freeHeapMemory(void* ctx);
|
|
|
|
|
|
- // First Byte
|
|
|
|
- ip++; forwardH = LZ4_HASH64K_VALUE(ip);
|
|
|
|
-
|
|
|
|
- // Main Loop
|
|
|
|
- for ( ; ; )
|
|
|
|
- {
|
|
|
|
- int findMatchAttempts = (1U << skipStrength) + 3;
|
|
|
|
- const BYTE* forwardIp = ip;
|
|
|
|
- const BYTE* ref;
|
|
|
|
- BYTE* token;
|
|
|
|
|
|
+Used to allocate and free hashTable memory
|
|
|
|
+to be used by the LZ4_compress_heap* family of functions.
|
|
|
|
+LZ4_createHeapMemory() returns NULL is memory allocation fails.
|
|
|
|
+*/
|
|
|
|
+void* LZ4_create() { return malloc(HASHTABLESIZE); }
|
|
|
|
+int LZ4_free(void* ctx) { free(ctx); return 0; }
|
|
|
|
|
|
- // Find a match
|
|
|
|
- do {
|
|
|
|
- U32 h = forwardH;
|
|
|
|
- int step = findMatchAttempts++ >> skipStrength;
|
|
|
|
- ip = forwardIp;
|
|
|
|
- forwardIp = ip + step;
|
|
|
|
|
|
|
|
- if (forwardIp > mflimit) { goto _last_literals; }
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress_heap(
|
|
|
|
+ void* ctx,
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
|
|
|
|
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
|
|
|
|
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
|
|
|
|
+return : the number of bytes written in buffer 'dest'
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress_heap
|
|
|
|
+#define USE_HEAPMEMORY
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- forwardH = LZ4_HASH64K_VALUE(forwardIp);
|
|
|
|
- ref = base + HashTable[h];
|
|
|
|
- HashTable[h] = ip - base;
|
|
|
|
|
|
|
|
- } while (A32(ref) != A32(ip));
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress_heap_limitedOutput(
|
|
|
|
+ void* ctx,
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize,
|
|
|
|
+ int maxOutputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
|
|
|
|
+If it cannot achieve it, compression will stop, and result of the function will be zero.
|
|
|
|
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
|
|
|
|
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress_heap_limitedOutput
|
|
|
|
+#define LIMITED_OUTPUT
|
|
|
|
+#define USE_HEAPMEMORY
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- // Catch up
|
|
|
|
- while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
|
|
|
|
|
|
|
|
- // Encode Literal length
|
|
|
|
- length = ip - anchor;
|
|
|
|
- token = op++;
|
|
|
|
- if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
|
|
|
|
- else *token = (length<<ML_BITS);
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress64k_heap(
|
|
|
|
+ void* ctx,
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
|
|
|
|
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
|
|
|
|
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
|
|
|
|
+Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
|
|
|
|
+return : the number of bytes written in buffer 'dest'
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress64k_heap
|
|
|
|
+#define COMPRESS_64K
|
|
|
|
+#define USE_HEAPMEMORY
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
- // Copy Literals
|
|
|
|
- LZ4_BLINDCOPY(anchor, op, length);
|
|
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+int LZ4_compress64k_heap_limitedOutput(
|
|
|
|
+ void* ctx,
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize,
|
|
|
|
+ int maxOutputSize)
|
|
|
|
+
|
|
|
|
+Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
|
|
|
|
+If it cannot achieve it, compression will stop, and result of the function will be zero.
|
|
|
|
+The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
|
|
|
|
+'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
|
|
|
|
+return : the number of bytes written in buffer 'dest', or 0 if the compression fails
|
|
|
|
+*/
|
|
|
|
+#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
|
|
|
|
+#define COMPRESS_64K
|
|
|
|
+#define LIMITED_OUTPUT
|
|
|
|
+#define USE_HEAPMEMORY
|
|
|
|
+#include "lz4_encoder.h"
|
|
|
|
|
|
-_next_match:
|
|
|
|
- // Encode Offset
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- A16(op) = (ip-ref); op+=2;
|
|
|
|
-#else
|
|
|
|
- { int delta = ip-ref; *op++ = delta; *op++ = delta>>8; }
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
- // Start Counting
|
|
|
|
- ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
|
|
|
|
- anchor = ip;
|
|
|
|
- while (ip<matchlimit-3)
|
|
|
|
- {
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- int diff = A32(ref) ^ A32(ip);
|
|
|
|
- if (!diff) { ip+=4; ref+=4; continue; }
|
|
|
|
- ip += DeBruijnBytePos[((U32)((diff & -diff) * 0x077CB531U)) >> 27];
|
|
|
|
|
|
+int LZ4_compress(const char* source, char* dest, int inputSize)
|
|
|
|
+{
|
|
|
|
+#if HEAPMODE
|
|
|
|
+ void* ctx = LZ4_create();
|
|
|
|
+ int result;
|
|
|
|
+ if (ctx == NULL) return 0; // Failed allocation => compression not done
|
|
|
|
+ if (inputSize < LZ4_64KLIMIT)
|
|
|
|
+ result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
|
|
|
|
+ else result = LZ4_compress_heap(ctx, source, dest, inputSize);
|
|
|
|
+ LZ4_free(ctx);
|
|
|
|
+ return result;
|
|
#else
|
|
#else
|
|
- if (A32(ref) == A32(ip)) { ip+=4; ref+=4; continue; }
|
|
|
|
- if (A16(ref) == A16(ip)) { ip+=2; ref+=2; }
|
|
|
|
- if (*ref == *ip) ip++;
|
|
|
|
|
|
+ if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
|
|
|
|
+ return LZ4_compress_stack(source, dest, inputSize);
|
|
#endif
|
|
#endif
|
|
- goto _endCount;
|
|
|
|
- }
|
|
|
|
- if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
|
|
|
|
- if ((ip<matchlimit) && (*ref == *ip)) ip++;
|
|
|
|
-_endCount:
|
|
|
|
- len = (ip - anchor);
|
|
|
|
-
|
|
|
|
- // Encode MatchLength
|
|
|
|
- if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
|
|
|
|
- else *token += len;
|
|
|
|
-
|
|
|
|
- // Test end of chunk
|
|
|
|
- if (ip > mflimit) { anchor = ip; break; }
|
|
|
|
-
|
|
|
|
- // Test next position
|
|
|
|
- ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
|
|
|
|
- HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
|
|
|
|
- if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
|
|
|
|
-
|
|
|
|
- // Prepare next loop
|
|
|
|
- anchor = ip++;
|
|
|
|
- forwardH = LZ4_HASH64K_VALUE(ip);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
-_last_literals:
|
|
|
|
- // Encode Last Literals
|
|
|
|
- {
|
|
|
|
- int lastRun = iend - anchor;
|
|
|
|
- if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
|
|
|
|
- else *op++ = (lastRun<<ML_BITS);
|
|
|
|
- memcpy(op, anchor, iend - anchor);
|
|
|
|
- op += iend-anchor;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- // End
|
|
|
|
- return (int) (((char*)op)-dest);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-
|
|
|
|
-int LZ4_compress(char* source,
|
|
|
|
- char* dest,
|
|
|
|
- int isize)
|
|
|
|
|
|
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
|
|
{
|
|
{
|
|
#if HEAPMODE
|
|
#if HEAPMODE
|
|
- void* ctx = malloc(sizeof(struct refTables));
|
|
|
|
- int result;
|
|
|
|
- if (isize < LZ4_64KLIMIT)
|
|
|
|
- result = LZ4_compress64kCtx(&ctx, source, dest, isize);
|
|
|
|
- else result = LZ4_compressCtx(&ctx, source, dest, isize);
|
|
|
|
- free(ctx);
|
|
|
|
- return result;
|
|
|
|
|
|
+ void* ctx = LZ4_create();
|
|
|
|
+ int result;
|
|
|
|
+ if (ctx == NULL) return 0; // Failed allocation => compression not done
|
|
|
|
+ if (inputSize < LZ4_64KLIMIT)
|
|
|
|
+ result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
|
|
|
|
+ else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
|
|
|
|
+ LZ4_free(ctx);
|
|
|
|
+ return result;
|
|
#else
|
|
#else
|
|
- if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
|
|
|
|
- return LZ4_compressCtx(NULL, source, dest, isize);
|
|
|
|
|
|
+ if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
|
|
|
|
+ return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-
|
|
|
|
-
|
|
|
|
//****************************
|
|
//****************************
|
|
-// Decompression CODE
|
|
|
|
|
|
+// Decompression functions
|
|
//****************************
|
|
//****************************
|
|
|
|
|
|
-// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()
|
|
|
|
-// are safe against "buffer overflow" attack type
|
|
|
|
-// since they will *never* write outside of the provided output buffer :
|
|
|
|
-// they both check this condition *before* writing anything.
|
|
|
|
-// A corrupted packet however can make them *read* within the first 64K before the output buffer.
|
|
|
|
-
|
|
|
|
-int LZ4_uncompress(char* source,
|
|
|
|
- char* dest,
|
|
|
|
- int osize)
|
|
|
|
-{
|
|
|
|
- // Local Variables
|
|
|
|
- const BYTE* restrict ip = (const BYTE*) source;
|
|
|
|
- const BYTE* restrict ref;
|
|
|
|
-
|
|
|
|
- BYTE* restrict op = (BYTE*) dest;
|
|
|
|
- BYTE* const oend = op + osize;
|
|
|
|
- BYTE* cpy;
|
|
|
|
-
|
|
|
|
- BYTE token;
|
|
|
|
-
|
|
|
|
- U32 dec[4]={0, 3, 2, 3};
|
|
|
|
- int len, length;
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- // Main Loop
|
|
|
|
- while (1)
|
|
|
|
- {
|
|
|
|
- // get runlength
|
|
|
|
- token = *ip++;
|
|
|
|
- if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
|
|
|
|
-
|
|
|
|
- // copy literals
|
|
|
|
- cpy = op+length;
|
|
|
|
- if (cpy>oend-COPYLENGTH)
|
|
|
|
- {
|
|
|
|
- if (cpy > oend) goto _output_error;
|
|
|
|
- memcpy(op, ip, length);
|
|
|
|
- ip += length;
|
|
|
|
- break; // Necessarily EOF
|
|
|
|
- }
|
|
|
|
- LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- // get offset
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- ref = cpy - A16(ip); ip+=2;
|
|
|
|
-#else
|
|
|
|
- { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
|
|
|
|
|
|
+typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
|
|
|
|
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
|
|
|
|
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+// This generic decompression function cover all use cases.
|
|
|
|
+// It shall be instanciated several times, using different sets of directives
|
|
|
|
+// Note that it is essential this generic function is really inlined,
|
|
|
|
+// in order to remove useless branches during compilation optimisation.
|
|
|
|
+forceinline int LZ4_decompress_generic(
|
|
|
|
+ const char* source,
|
|
|
|
+ char* dest,
|
|
|
|
+ int inputSize, //
|
|
|
|
+ int outputSize, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
|
|
|
|
+
|
|
|
|
+ int endOnInput, // endOnOutputSize, endOnInputSize
|
|
|
|
+ int prefix64k, // noPrefix, withPrefix
|
|
|
|
+ int partialDecoding, // full, partial
|
|
|
|
+ int targetOutputSize // only used if partialDecoding==partial
|
|
|
|
+ )
|
|
|
|
+{
|
|
|
|
+ // Local Variables
|
|
|
|
+ const BYTE* restrict ip = (const BYTE*) source;
|
|
|
|
+ const BYTE* ref;
|
|
|
|
+ const BYTE* const iend = ip + inputSize;
|
|
|
|
+
|
|
|
|
+ BYTE* op = (BYTE*) dest;
|
|
|
|
+ BYTE* const oend = op + outputSize;
|
|
|
|
+ BYTE* cpy;
|
|
|
|
+ BYTE* oexit = op + targetOutputSize;
|
|
|
|
+
|
|
|
|
+ size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
|
|
|
|
+#if LZ4_ARCH64
|
|
|
|
+ size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- // get matchlength
|
|
|
|
- if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
|
|
|
|
-
|
|
|
|
- // copy repeated sequence
|
|
|
|
- if (op-ref<COPYTOKEN)
|
|
|
|
- {
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- ref -= dec[op-ref];
|
|
|
|
- A32(op)=A32(ref);
|
|
|
|
- } else { A32(op)=A32(ref); op+=4; ref+=4; }
|
|
|
|
- cpy = op + length;
|
|
|
|
- if (cpy > oend-COPYLENGTH)
|
|
|
|
- {
|
|
|
|
- if (cpy > oend) goto _output_error;
|
|
|
|
- LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
|
|
|
|
- while(op<cpy) *op++=*ref++;
|
|
|
|
- op=cpy;
|
|
|
|
- if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- LZ4_WILDCOPY(ref, op, cpy);
|
|
|
|
- op=cpy; // correction
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- // end of decoding
|
|
|
|
- return (int) (((char*)ip)-source);
|
|
|
|
-
|
|
|
|
- // write overflow error detected
|
|
|
|
|
|
+
|
|
|
|
+ // Special cases
|
|
|
|
+ if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too high => decode everything
|
|
|
|
+ if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1; // Empty output buffer
|
|
|
|
+ if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // Main Loop
|
|
|
|
+ while (1)
|
|
|
|
+ {
|
|
|
|
+ unsigned token;
|
|
|
|
+ size_t length;
|
|
|
|
+
|
|
|
|
+ // get runlength
|
|
|
|
+ token = *ip++;
|
|
|
|
+ if ((length=(token>>ML_BITS)) == RUN_MASK)
|
|
|
|
+ {
|
|
|
|
+ unsigned s=255;
|
|
|
|
+ while (((endOnInput)?ip<iend:1) && (s==255))
|
|
|
|
+ {
|
|
|
|
+ s = *ip++;
|
|
|
|
+ length += s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // copy literals
|
|
|
|
+ cpy = op+length;
|
|
|
|
+ if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
|
|
|
|
+ || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
|
|
|
|
+ {
|
|
|
|
+ if (partialDecoding)
|
|
|
|
+ {
|
|
|
|
+ if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer
|
|
|
|
+ if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there
|
|
|
|
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : input must be consumed
|
|
|
|
+ }
|
|
|
|
+ memcpy(op, ip, length);
|
|
|
|
+ ip += length;
|
|
|
|
+ op += length;
|
|
|
|
+ break; // Necessarily EOF, due to parsing restrictions
|
|
|
|
+ }
|
|
|
|
+ LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
|
|
|
|
+
|
|
|
|
+ // get offset
|
|
|
|
+ LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
|
|
|
|
+ if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer
|
|
|
|
+
|
|
|
|
+ // get matchlength
|
|
|
|
+ if ((length=(token&ML_MASK)) == ML_MASK)
|
|
|
|
+ {
|
|
|
|
+ for ( ; (!endOnInput) || (ip<iend-(LASTLITERALS+1)) ; ) // Ensure enough bytes remain for LASTLITERALS + token
|
|
|
|
+ {
|
|
|
|
+ unsigned s = *ip++;
|
|
|
|
+ length += s;
|
|
|
|
+ if (s==255) continue;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // copy repeated sequence
|
|
|
|
+ if unlikely((op-ref)<(int)STEPSIZE)
|
|
|
|
+ {
|
|
|
|
+#if LZ4_ARCH64
|
|
|
|
+ size_t dec64 = dec64table[op-ref];
|
|
|
|
+#else
|
|
|
|
+ const size_t dec64 = 0;
|
|
|
|
+#endif
|
|
|
|
+ op[0] = ref[0];
|
|
|
|
+ op[1] = ref[1];
|
|
|
|
+ op[2] = ref[2];
|
|
|
|
+ op[3] = ref[3];
|
|
|
|
+ op += 4, ref += 4; ref -= dec32table[op-ref];
|
|
|
|
+ A32(op) = A32(ref);
|
|
|
|
+ op += STEPSIZE-4; ref -= dec64;
|
|
|
|
+ } else { LZ4_COPYSTEP(ref,op); }
|
|
|
|
+ cpy = op + length - (STEPSIZE-4);
|
|
|
|
+
|
|
|
|
+ if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
|
|
|
|
+ {
|
|
|
|
+ if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals
|
|
|
|
+ LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
|
|
|
|
+ while(op<cpy) *op++=*ref++;
|
|
|
|
+ op=cpy;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ LZ4_WILDCOPY(ref, op, cpy);
|
|
|
|
+ op=cpy; // correction
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // end of decoding
|
|
|
|
+ if (endOnInput)
|
|
|
|
+ return (int) (((char*)op)-dest); // Nb of output bytes decoded
|
|
|
|
+ else
|
|
|
|
+ return (int) (((char*)ip)-source); // Nb of input bytes read
|
|
|
|
+
|
|
|
|
+ // Overflow error detected
|
|
_output_error:
|
|
_output_error:
|
|
- return (int) (-(((char*)ip)-source));
|
|
|
|
|
|
+ return (int) (-(((char*)ip)-source))-1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-int LZ4_uncompress_unknownOutputSize(
|
|
|
|
- char* source,
|
|
|
|
- char* dest,
|
|
|
|
- int isize,
|
|
|
|
- int maxOutputSize)
|
|
|
|
-{
|
|
|
|
- // Local Variables
|
|
|
|
- const BYTE* restrict ip = (const BYTE*) source;
|
|
|
|
- const BYTE* const iend = ip + isize;
|
|
|
|
- const BYTE* restrict ref;
|
|
|
|
-
|
|
|
|
- BYTE* restrict op = (BYTE*) dest;
|
|
|
|
- BYTE* const oend = op + maxOutputSize;
|
|
|
|
- BYTE* cpy;
|
|
|
|
-
|
|
|
|
- BYTE token;
|
|
|
|
-
|
|
|
|
- U32 dec[4]={0, 3, 2, 3};
|
|
|
|
- int len, length;
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- // Main Loop
|
|
|
|
- while (ip<iend)
|
|
|
|
- {
|
|
|
|
- // get runlength
|
|
|
|
- token = *ip++;
|
|
|
|
- if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
|
|
|
|
-
|
|
|
|
- // copy literals
|
|
|
|
- cpy = op+length;
|
|
|
|
- if (cpy>oend-COPYLENGTH)
|
|
|
|
- {
|
|
|
|
- if (cpy > oend) goto _output_error;
|
|
|
|
- memcpy(op, ip, length);
|
|
|
|
- op += length;
|
|
|
|
- break; // Necessarily EOF
|
|
|
|
- }
|
|
|
|
- LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
|
|
|
|
- if (ip>=iend) break; // check EOF
|
|
|
|
-
|
|
|
|
- // get offset
|
|
|
|
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
- ref = cpy - A16(ip); ip+=2;
|
|
|
|
-#else
|
|
|
|
- { int delta = *ip++; delta += *ip++ << 8; ref = cpy - delta; }
|
|
|
|
-#endif
|
|
|
|
|
|
+int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
|
|
|
|
+{
|
|
|
|
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
|
|
|
|
+}
|
|
|
|
|
|
- // get matchlength
|
|
|
|
- if ((length=(token&ML_MASK)) == ML_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
|
|
|
|
-
|
|
|
|
- // copy repeated sequence
|
|
|
|
- if (op-ref<COPYTOKEN)
|
|
|
|
- {
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- *op++ = *ref++;
|
|
|
|
- ref -= dec[op-ref];
|
|
|
|
- A32(op)=A32(ref);
|
|
|
|
- } else { A32(op)=A32(ref); op+=4; ref+=4; }
|
|
|
|
- cpy = op + length;
|
|
|
|
- if (cpy>oend-COPYLENGTH)
|
|
|
|
- {
|
|
|
|
- if (cpy > oend) goto _output_error;
|
|
|
|
- LZ4_WILDCOPY(ref, op, (oend-COPYLENGTH));
|
|
|
|
- while(op<cpy) *op++=*ref++;
|
|
|
|
- op=cpy;
|
|
|
|
- if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- LZ4_WILDCOPY(ref, op, cpy);
|
|
|
|
- op=cpy; // correction
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- // end of decoding
|
|
|
|
- return (int) (((char*)op)-dest);
|
|
|
|
-
|
|
|
|
- // write overflow error detected
|
|
|
|
-_output_error:
|
|
|
|
- return (int) (-(((char*)ip)-source));
|
|
|
|
|
|
+int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
|
|
|
|
+{
|
|
|
|
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
|
|
|
|
+{
|
|
|
|
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
|
|
|
|
+{
|
|
|
|
+ return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
|
|
|
|
+{
|
|
|
|
+ return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
|
|
}
|
|
}
|
|
|
|
|