/*
* LZ4 - Fast LZ compression algorithm
* Header File
* Copyright (C) 2011-2013, Yann Collet.
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
* - LZ4 source repository : http://code.google.com/p/lz4/
*/
#include <sys/byteorder.h>
#include <assert.h>
#include <string.h>
#include <umem.h>
int osize);
int isize, int maxOutputSize);
/*ARGSUSED*/
{
/* Signal an error if the compression routine returned zero. */
if (bufsiz == 0)
return (s_len);
/*
* Encode the compresed buffer size at the start. We'll need this in
* decompression to counter the effects of padding which might be
* added to the compressed buffer and which, if unhandled, would
* confuse the hell out of our decompression function.
*/
}
/*ARGSUSED*/
int
{
/* invalid compressed buffer size encoded at start */
return (1);
/*
* Returns 0 on success (decompression function returned non-negative)
* and non-zero on failure (decompression function returned negative.
*/
}
/*
* LZ4 API Description:
*
* Simple Functions:
* real_LZ4_compress() :
* isize : is the input size. Max supported value is ~1.9GB
* return : the number of bytes written in buffer dest
* or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
* note : destination buffer must be already allocated.
* destination buffer must be sized to handle worst cases
* situations (input data not compressible)
*
* Advanced Functions
*
* LZ4_uncompress_unknownOutputSize() :
* isize : is the input size, therefore the compressed size
* maxOutputSize : is the size of the destination buffer (which must be
* already allocated)
* return : the number of bytes decoded in the destination buffer
* (necessarily <= maxOutputSize). If the source stream is
* malformed, the function will stop decoding and return a
* negative result, indicating the byte position of the faulty
* instruction. This function never writes beyond dest +
* maxOutputSize, and is therefore protected against malicious
* data packets.
* note : Destination buffer must be already allocated.
*
* LZ4_compressCtx() :
* This function explicitly handles the CTX memory structure.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
* by the caller (either on the stack or using kmem_zalloc). Passing NULL
* isn't valid.
*
* LZ4_compress64kCtx() :
* Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
* isize *Must* be <64KB, otherwise the output will be corrupted.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
* by the caller (either on the stack or using kmem_zalloc). Passing NULL
* isn't valid.
*/
/*
* Tuning parameters
*/
/*
* COMPRESSIONLEVEL: Increasing this value improves compression ratio
* Lowering this value reduces memory usage. Reduced memory usage
* typically improves speed, due to cache effect (ex: L1 32KB for Intel,
* L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
* (examples : 12 -> 16KB ; 17 -> 512KB)
*/
/*
* NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
* algorithm skip faster data segments considered "incompressible".
* This may decrease compression ratio dramatically, but will be
* faster on incompressible data. Increasing this value will make
* the algorithm search more before declaring a segment "incompressible".
* This could improve compression a bit, but will be slower on
* incompressible data. The default value (6) is recommended.
*/
/*
* BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
* performance for big endian cpu, but the resulting compressed stream
* will be incompatible with little-endian CPU. You can set this option
* to 1 in situations where data will stay within closed environment.
* This option is useless on Little_Endian CPU (such as x86).
*/
/* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
/*
* CPU Feature Detection
*/
/* 32 or 64 bits ? */
#else
#define LZ4_ARCH64 0
#endif
/*
* Limits the amount of stack space that the algorithm may consume to hold
* the compression lookup table. The value `9' here means we'll never use
* more than 2k of stack (see above for a description of COMPRESSIONLEVEL).
* If more memory is needed, it is allocated from the heap.
*/
/*
* Little Endian or Big Endian?
* Note: overwrite the below #define if you know your architecture endianess.
*/
#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || \
#else
/*
* Little Endian assumed. PDP Endian and other very rare endian format
* are unsupported.
*/
#endif
/*
* Unaligned memory access is automatically enabled for "common" CPU,
* such as x86. For others CPU, the compiler will be more cautious, and
* insert extra code to ensure aligned access is respected. If you know
* your target CPU supports unaligned memory access, you may want to
* force this option manually to improve performance
*/
#if defined(__ARM_FEATURE_UNALIGNED)
#endif
#ifdef __sparc
#define LZ4_FORCE_SW_BITCOUNT
#endif
/*
* Compiler Options
*/
/* "restrict" is a known keyword */
#else
/* Disable restrict */
#define restrict
#endif
#ifdef _MSC_VER
/* Visual Studio */
/* Visual is not C99, but supports some kind of inline */
#define inline __forceinline
#if LZ4_ARCH64
/* For Visual 2005 */
#pragma intrinsic(_BitScanForward64)
#pragma intrinsic(_BitScanReverse64)
#else /* !LZ4_ARCH64 */
/* For Visual 2005 */
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
#endif /* !LZ4_ARCH64 */
#endif /* _MSC_VER */
#ifdef _MSC_VER
#else /* !_MSC_VER */
(((x) & 0xffu) << 8)))
#endif /* !_MSC_VER */
#else
#endif
/* Basic types */
#if defined(_MSC_VER)
/* Visual Studio does not support 'stdint' natively */
#else /* !defined(_MSC_VER) */
#endif /* !defined(_MSC_VER) */
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
#pragma pack(1)
#endif
typedef struct _U16_S {
U16 v;
} U16_S;
typedef struct _U32_S {
U32 v;
} U32_S;
typedef struct _U64_S {
U64 v;
} U64_S;
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
#pragma pack()
#endif
/*
* Constants
*/
/*
* Defines if memory is allocated into the stack (local variable),
* or into the heap (kmem_alloc()).
*/
/*
* Architecture-specific macros
*/
#if LZ4_ARCH64
#else /* !LZ4_ARCH64 */
#endif /* !LZ4_ARCH64 */
#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
#define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
#else
#endif
/* Local structures */
struct refTables {
};
/* Macros */
HASH_LOG))
d = e; }
/* Private functions */
#if LZ4_ARCH64
static int
{
#if defined(LZ4_BIG_ENDIAN)
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanReverse64(&r, val);
return (int)(r >> 3);
!defined(LZ4_FORCE_SW_BITCOUNT)
#else
int r;
if (!(val >> 32)) {
r = 4;
} else {
r = 0;
val >>= 32;
}
if (!(val >> 16)) {
r += 2;
val >>= 8;
} else {
val >>= 24;
}
r += (!val);
return (r);
#endif
#else
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanForward64(&r, val);
return (int)(r >> 3);
!defined(LZ4_FORCE_SW_BITCOUNT)
#else
static const int DeBruijnBytePos[64] =
{ 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
4, 5, 7, 2, 6, 5, 7, 6, 7, 7
};
58];
#endif
#endif
}
#else
static int
{
#if defined(LZ4_BIG_ENDIAN)
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanReverse(&r, val);
return (int)(r >> 3);
!defined(LZ4_FORCE_SW_BITCOUNT)
#else
int r;
if (!(val >> 16)) {
r = 2;
val >>= 8;
} else {
r = 0;
val >>= 24;
}
r += (!val);
return (r);
#endif
#else
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanForward(&r, val);
return (int)(r >> 3);
!defined(LZ4_FORCE_SW_BITCOUNT)
#else
static const int DeBruijnBytePos[32] = {
0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1
};
27];
#endif
#endif
}
#endif
/* Public functions */
/* Compression functions */
/*ARGSUSED*/
static int
int osize)
{
#if HEAPMODE
#else
#endif
/* Init */
goto _last_literals;
/* First Byte */
ip++;
/* Main Loop */
for (;;) {
/* Find a match */
do {
goto _last_literals;
}
/* Catch up */
ip--;
ref--;
}
/* Encode Literal length */
/* Check output limit */
return (0);
*op++ = 255;
} else
/* Copy Literals */
/* Encode Offset */
/* Start Counting */
if (!diff) {
continue;
}
goto _endCount;
}
#if LZ4_ARCH64
ip += 4;
ref += 4;
}
#endif
ip += 2;
ref += 2;
}
ip++;
/* Encode MatchLength */
/* Check output limit */
return (0);
*op++ = 255;
*op++ = 255;
}
if (len > 254) {
len -= 255;
*op++ = 255;
}
} else
/* Test end of chunk */
break;
}
/* Fill table */
/* Test next position */
*token = 0;
goto _next_match;
}
/* Prepare next loop */
}
/* Encode Last Literals */
{
oend)
return (0);
*op++ = 255;
}
} else
}
/* End */
}
/* Note : this function is valid only if isize < LZ4_64KLIMIT */
/*ARGSUSED*/
static int
int osize)
{
#if HEAPMODE
#else
#endif
/* Init */
goto _last_literals;
/* First Byte */
ip++;
/* Main Loop */
for (;;) {
/* Find a match */
do {
goto _last_literals;
}
/* Catch up */
ip--;
ref--;
}
/* Encode Literal length */
/* Check output limit */
return (0);
*op++ = 255;
} else
/* Copy Literals */
/* Encode Offset */
/* Start Counting */
if (!diff) {
continue;
}
goto _endCount;
}
#if LZ4_ARCH64
ip += 4;
ref += 4;
}
#endif
ip += 2;
ref += 2;
}
ip++;
/* Encode MatchLength */
/* Check output limit */
return (0);
*op++ = 255;
*op++ = 255;
}
if (len > 254) {
len -= 255;
*op++ = 255;
}
} else
/* Test end of chunk */
break;
}
/* Fill table */
/* Test next position */
*token = 0;
goto _next_match;
}
/* Prepare next loop */
}
/* Encode Last Literals */
{
oend)
return (0);
*op++ = 255;
} else
}
/* End */
}
static int
{
#if HEAPMODE
int result;
/*
* out of kernel memory, gently fall through - this will disable
* compression in zio_compress_data
*/
return (0);
if (isize < LZ4_64KLIMIT)
else
return (result);
#else
if (isize < (int)LZ4_64KLIMIT)
#endif
}
/* Decompression functions */
/*
* Note: The decoding function LZ4_uncompress_unknownOutputSize() is safe
* against "buffer overflow" attack type.
* LZ4_uncompress_unknownOutputSize() insures that it will never read
* outside of the input buffer. A corrupted input will produce an error
* result, a negative int, indicating the position of the error within
* input stream.
*/
static int
int maxOutputSize)
{
/* Local Variables */
#if LZ4_ARCH64
#endif
/* Main Loop */
unsigned token;
/* get runlength */
int s = 255;
s = *ip++;
length += s;
}
}
/* copy literals */
/* CORNER-CASE: cpy might overflow. */
goto _output_error; /* cpy was overflowed, bail! */
/* Error: writes beyond output buffer */
goto _output_error;
/*
* Error: LZ4 format requires to consume all
* input at this stage
*/
goto _output_error;
/* Necessarily EOF, due to parsing restrictions */
break;
}
/* get offset */
ip += 2;
/*
* Error: offset creates reference outside of
* destination buffer
*/
goto _output_error;
/* get matchlength */
int s = *ip++;
length += s;
if (s == 255)
continue;
break;
}
}
/* copy repeated sequence */
#if LZ4_ARCH64
#else
const int dec64 = 0;
#endif
op += 4;
ref += 4;
} else {
}
/*
* Error: request to write outside of
* destination buffer
*/
goto _output_error;
/*
* Check EOF (should never happen, since
* last 5 bytes are supposed to be literals)
*/
goto _output_error;
continue;
}
}
/* end of decoding */
/* write overflow error detected */
}