mirror of
https://github.com/Xevion/easy7zip.git
synced 2026-01-31 10:24:13 -06:00
Update lz4, lz5 and zstd to latest stable series
- added some more error checking in zstdmt also
This commit is contained in:
+3
-3
@@ -2,11 +2,11 @@
|
||||
#define MY_VER_MINOR 04
|
||||
#define MY_VER_BUILD 0
|
||||
#define MY_VERSION_NUMBERS "16.04 ZS"
|
||||
#define MY_VERSION "16.04 ZS"
|
||||
#define MY_DATE "2016-11-20"
|
||||
#define MY_VERSION "16.04 ZS"
|
||||
#define MY_DATE "2016-12-22"
|
||||
#undef MY_COPYRIGHT
|
||||
#undef MY_VERSION_COPYRIGHT_DATE
|
||||
#define MY_AUTHOR_NAME "Igor Pavlov, Tino Reichardt"
|
||||
#define MY_AUTHOR_NAME "Igor Pavlov, Tino Reichardt"
|
||||
#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"
|
||||
#define MY_COPYRIGHT_CR "Copyright (c) 1999-2016 Igor Pavlov"
|
||||
|
||||
|
||||
+3
-3
@@ -1,9 +1,9 @@
|
||||
#define MY_VER_MAJOR 1
|
||||
#define MY_VER_MINOR 1
|
||||
#define MY_VER_BUILD 0
|
||||
#define MY_VERSION_NUMBERS "1.1.0"
|
||||
#define MY_VERSION "1.1.0"
|
||||
#define MY_DATE "2016-11-20"
|
||||
#define MY_VERSION_NUMBERS "1.1.2"
|
||||
#define MY_VERSION "1.1.2"
|
||||
#define MY_DATE "2016-12-22"
|
||||
#undef MY_COPYRIGHT
|
||||
#undef MY_VERSION_COPYRIGHT_DATE
|
||||
#define MY_AUTHOR_NAME "Tino Reichardt"
|
||||
|
||||
+45
-47
@@ -141,22 +141,25 @@
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef uintptr_t uptrval;
|
||||
#else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64;
|
||||
typedef size_t uptrval; /* generally true, except OpenVMS-64 */
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__)
|
||||
typedef U64 reg_t; /* 64-bits in x32 mode */
|
||||
#else
|
||||
typedef size_t reg_t; /* 32-bits in x32 mode */
|
||||
#endif
|
||||
|
||||
/*-************************************
|
||||
* Reading and writing into memory
|
||||
**************************************/
|
||||
#define STEPSIZE sizeof(size_t)
|
||||
|
||||
static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
|
||||
|
||||
static unsigned LZ4_isLittleEndian(void)
|
||||
{
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
@@ -169,7 +172,7 @@ static unsigned LZ4_isLittleEndian(void)
|
||||
|
||||
static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
static size_t LZ4_read_ARCH(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
|
||||
|
||||
static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
@@ -178,11 +181,11 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U16 u16; U32 u32; size_t uArch; } __attribute__((packed)) unalign;
|
||||
typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
|
||||
|
||||
static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
|
||||
static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
static size_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
|
||||
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
|
||||
|
||||
static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
|
||||
static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
|
||||
@@ -199,9 +202,9 @@ static U32 LZ4_read32(const void* memPtr)
|
||||
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
static size_t LZ4_read_ARCH(const void* memPtr)
|
||||
static reg_t LZ4_read_ARCH(const void* memPtr)
|
||||
{
|
||||
size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
static void LZ4_write16(void* memPtr, U16 value)
|
||||
@@ -250,12 +253,6 @@ static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
|
||||
const BYTE* s = (const BYTE*)srcPtr;
|
||||
BYTE* const e = (BYTE*)dstEnd;
|
||||
|
||||
#if 0
|
||||
const size_t l2 = 8 - (((size_t)d) & (sizeof(void*)-1));
|
||||
LZ4_copy8(d,s); if (d>e-9) return;
|
||||
d+=l2; s+=l2;
|
||||
#endif /* join to align */
|
||||
|
||||
do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
|
||||
}
|
||||
|
||||
@@ -292,10 +289,10 @@ static const int LZ4_minLength = (MFLIMIT+1);
|
||||
/*-************************************
|
||||
* Common functions
|
||||
**************************************/
|
||||
static unsigned LZ4_NbCommonBytes (register size_t val)
|
||||
static unsigned LZ4_NbCommonBytes (register reg_t val)
|
||||
{
|
||||
if (LZ4_isLittleEndian()) {
|
||||
if (LZ4_64bits()) {
|
||||
if (sizeof(val)==8) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
||||
unsigned long r = 0;
|
||||
_BitScanForward64( &r, (U64)val );
|
||||
@@ -319,7 +316,7 @@ static unsigned LZ4_NbCommonBytes (register size_t val)
|
||||
# endif
|
||||
}
|
||||
} else /* Big Endian CPU */ {
|
||||
if (LZ4_64bits()) {
|
||||
if (sizeof(val)==8) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
|
||||
unsigned long r = 0;
|
||||
_BitScanReverse64( &r, val );
|
||||
@@ -350,18 +347,19 @@ static unsigned LZ4_NbCommonBytes (register size_t val)
|
||||
}
|
||||
}
|
||||
|
||||
#define STEPSIZE sizeof(reg_t)
|
||||
static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
|
||||
{
|
||||
const BYTE* const pStart = pIn;
|
||||
|
||||
while (likely(pIn<pInLimit-(STEPSIZE-1))) {
|
||||
size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
|
||||
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
|
||||
if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
|
||||
pIn += LZ4_NbCommonBytes(diff);
|
||||
return (unsigned)(pIn - pStart);
|
||||
}
|
||||
|
||||
if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
|
||||
if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
|
||||
if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
|
||||
if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
|
||||
return (unsigned)(pIn - pStart);
|
||||
@@ -419,9 +417,9 @@ static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
|
||||
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 LZ4_hashPosition(const void* p, tableType_t const tableType)
|
||||
FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
|
||||
{
|
||||
if ((LZ4_64bits()) && (tableType == byU32)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
|
||||
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
|
||||
return LZ4_hash4(LZ4_read32(p), tableType);
|
||||
}
|
||||
|
||||
@@ -458,7 +456,7 @@ FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableTy
|
||||
/** LZ4_compress_generic() :
|
||||
inlined, to ensure branches are decided at compilation time */
|
||||
FORCE_INLINE int LZ4_compress_generic(
|
||||
LZ4_stream_t_internal* const dictPtr,
|
||||
LZ4_stream_t_internal* const cctx,
|
||||
const char* const source,
|
||||
char* const dest,
|
||||
const int inputSize,
|
||||
@@ -472,10 +470,10 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
const BYTE* ip = (const BYTE*) source;
|
||||
const BYTE* base;
|
||||
const BYTE* lowLimit;
|
||||
const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
|
||||
const BYTE* const dictionary = dictPtr->dictionary;
|
||||
const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
|
||||
const size_t dictDelta = dictEnd - (const BYTE*)source;
|
||||
const BYTE* const lowRefLimit = ip - cctx->dictSize;
|
||||
const BYTE* const dictionary = cctx->dictionary;
|
||||
const BYTE* const dictEnd = dictionary + cctx->dictSize;
|
||||
const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
|
||||
const BYTE* anchor = (const BYTE*) source;
|
||||
const BYTE* const iend = ip + inputSize;
|
||||
const BYTE* const mflimit = iend - MFLIMIT;
|
||||
@@ -485,7 +483,6 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
BYTE* const olimit = op + maxOutputSize;
|
||||
|
||||
U32 forwardH;
|
||||
size_t refDelta=0;
|
||||
|
||||
/* Init conditions */
|
||||
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
|
||||
@@ -497,11 +494,11 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
lowLimit = (const BYTE*)source;
|
||||
break;
|
||||
case withPrefix64k:
|
||||
base = (const BYTE*)source - dictPtr->currentOffset;
|
||||
lowLimit = (const BYTE*)source - dictPtr->dictSize;
|
||||
base = (const BYTE*)source - cctx->currentOffset;
|
||||
lowLimit = (const BYTE*)source - cctx->dictSize;
|
||||
break;
|
||||
case usingExtDict:
|
||||
base = (const BYTE*)source - dictPtr->currentOffset;
|
||||
base = (const BYTE*)source - cctx->currentOffset;
|
||||
lowLimit = (const BYTE*)source;
|
||||
break;
|
||||
}
|
||||
@@ -509,11 +506,12 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
|
||||
/* First Byte */
|
||||
LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
|
||||
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
|
||||
ip++; forwardH = LZ4_hashPosition(ip, tableType);
|
||||
|
||||
/* Main Loop */
|
||||
for ( ; ; ) {
|
||||
ptrdiff_t refDelta = 0;
|
||||
const BYTE* match;
|
||||
BYTE* token;
|
||||
|
||||
@@ -529,7 +527,7 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
|
||||
if (unlikely(forwardIp > mflimit)) goto _last_literals;
|
||||
|
||||
match = LZ4_getPositionOnHash(h, dictPtr->hashTable, tableType, base);
|
||||
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
|
||||
if (dict==usingExtDict) {
|
||||
if (match < (const BYTE*)source) {
|
||||
refDelta = dictDelta;
|
||||
@@ -539,7 +537,7 @@ FORCE_INLINE int LZ4_compress_generic(
|
||||
lowLimit = (const BYTE*)source;
|
||||
} }
|
||||
forwardH = LZ4_hashPosition(forwardIp, tableType);
|
||||
LZ4_putPositionOnHash(ip, h, dictPtr->hashTable, tableType, base);
|
||||
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
|
||||
|
||||
} while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
|
||||
|| ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
|
||||
@@ -612,10 +610,10 @@ _next_match:
|
||||
if (ip > mflimit) break;
|
||||
|
||||
/* Fill table */
|
||||
LZ4_putPosition(ip-2, dictPtr->hashTable, tableType, base);
|
||||
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
|
||||
|
||||
/* Test next position */
|
||||
match = LZ4_getPosition(ip, dictPtr->hashTable, tableType, base);
|
||||
match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
|
||||
if (dict==usingExtDict) {
|
||||
if (match < (const BYTE*)source) {
|
||||
refDelta = dictDelta;
|
||||
@@ -624,7 +622,7 @@ _next_match:
|
||||
refDelta = 0;
|
||||
lowLimit = (const BYTE*)source;
|
||||
} }
|
||||
LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
|
||||
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
|
||||
if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
|
||||
&& (match+MAX_DISTANCE>=ip)
|
||||
&& (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
|
||||
@@ -667,12 +665,12 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
|
||||
if (inputSize < LZ4_64Klimit)
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
|
||||
else
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
} else {
|
||||
if (inputSize < LZ4_64Klimit)
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
|
||||
else
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -711,7 +709,7 @@ int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int m
|
||||
if (inputSize < LZ4_64Klimit)
|
||||
return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
|
||||
else
|
||||
return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
|
||||
}
|
||||
|
||||
|
||||
@@ -885,7 +883,7 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src,
|
||||
if (*srcSizePtr < LZ4_64Klimit)
|
||||
return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
|
||||
else
|
||||
return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, LZ4_64bits() ? byU32 : byPtr);
|
||||
return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -933,7 +931,7 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
|
||||
}
|
||||
|
||||
|
||||
#define HASH_UNIT sizeof(size_t)
|
||||
#define HASH_UNIT sizeof(reg_t)
|
||||
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
{
|
||||
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
|
||||
@@ -969,7 +967,7 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
|
||||
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
|
||||
{
|
||||
if ((LZ4_dict->currentOffset > 0x80000000) ||
|
||||
((size_t)LZ4_dict->currentOffset > (size_t)src)) { /* address space overflow */
|
||||
((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */
|
||||
/* rescale hash table */
|
||||
U32 const delta = LZ4_dict->currentOffset - 64 KB;
|
||||
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
|
||||
@@ -1139,8 +1137,8 @@ FORCE_INLINE int LZ4_decompress_generic(
|
||||
s = *ip++;
|
||||
length += s;
|
||||
} while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
|
||||
if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
|
||||
if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
|
||||
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */
|
||||
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */
|
||||
}
|
||||
|
||||
/* copy literals */
|
||||
@@ -1178,7 +1176,7 @@ FORCE_INLINE int LZ4_decompress_generic(
|
||||
if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
|
||||
length += s;
|
||||
} while (s==255);
|
||||
if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */
|
||||
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
|
||||
}
|
||||
length += MINMATCH;
|
||||
|
||||
@@ -1430,7 +1428,7 @@ static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
|
||||
|
||||
int LZ4_resetStreamState(void* state, char* inputBuffer)
|
||||
{
|
||||
if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
|
||||
if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
|
||||
LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
+6
-3
@@ -85,7 +85,7 @@ extern "C" {
|
||||
/*========== Version =========== */
|
||||
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
||||
#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
|
||||
|
||||
@@ -412,9 +412,12 @@ union LZ4_streamDecode_u {
|
||||
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define LZ4_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
# elif (LZ4_GCC_VERSION >= 301)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
|
||||
|
||||
+9
-1
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
LZ4 auto-framing library
|
||||
Header File
|
||||
Copyright (C) 2011-2015, Yann Collet.
|
||||
Copyright (C) 2011-2016, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -130,6 +130,10 @@ typedef LZ4F_frameType_t frameType_t;
|
||||
typedef LZ4F_contentChecksum_t contentChecksum_t;
|
||||
#endif
|
||||
|
||||
/* LZ4F_frameInfo_t :
|
||||
* makes it possible to supply detailed frame parameters to the stream interface.
|
||||
* It's not required to set all fields, as long as the structure was initially memset() to zero.
|
||||
* All reserved fields must be set to zero. */
|
||||
typedef struct {
|
||||
LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB ; 0 == default */
|
||||
LZ4F_blockMode_t blockMode; /* blockLinked, blockIndependent ; 0 == default */
|
||||
@@ -139,6 +143,10 @@ typedef struct {
|
||||
unsigned reserved[2]; /* must be zero for forward compatibility */
|
||||
} LZ4F_frameInfo_t;
|
||||
|
||||
/* LZ4F_preferences_t :
|
||||
* makes it possible to supply detailed compression parameters to the stream interface.
|
||||
* It's not required to set all fields, as long as the structure was initially memset() to zero.
|
||||
* All reserved fields must be set to zero. */
|
||||
typedef struct {
|
||||
LZ4F_frameInfo_t frameInfo;
|
||||
int compressionLevel; /* 0 == default (fast mode); values above 16 count as 16; values below 0 count as 0 */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
LZ4 auto-framing library
|
||||
Header File for static linking only
|
||||
Copyright (C) 2011-2015, Yann Collet.
|
||||
Copyright (C) 2011-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
LZ4 HC - High Compression Mode of LZ4
|
||||
Copyright (C) 2011-2015, Yann Collet.
|
||||
Copyright (C) 2011-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
||||
+4
-4
@@ -266,7 +266,7 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
|
||||
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
} else {
|
||||
bitD->start = (const char*)srcBuffer;
|
||||
@@ -298,7 +298,7 @@ MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
|
||||
|
||||
MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) /* experimental */
|
||||
#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */
|
||||
# if defined(__x86_64__)
|
||||
if (sizeof(bitContainer)==8)
|
||||
return _bextr_u64(bitContainer, start, nbBits);
|
||||
@@ -367,10 +367,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `BIT_DStream_t` from src buffer previously defined (see BIT_initDStream() ).
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
if status == unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
|
||||
if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */
|
||||
|
||||
@@ -159,6 +159,7 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
@@ -187,16 +188,17 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
huffWeight[n+1] = ip[n/2] & 15;
|
||||
} } }
|
||||
else { /* header compressed with FSE (normal case) */
|
||||
FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
|
||||
oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
|
||||
if (FSE_isError(oSize)) return oSize;
|
||||
}
|
||||
|
||||
/* collect weight stats */
|
||||
memset(rankStats, 0, (HUF_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));
|
||||
memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||
weightTotal = 0;
|
||||
{ U32 n; for (n=0; n<oSize; n++) {
|
||||
if (huffWeight[n] >= HUF_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
|
||||
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||
} }
|
||||
@@ -204,7 +206,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
|
||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||
{ U32 const tableLog = BIT_highbit32(weightTotal) + 1;
|
||||
if (tableLog > HUF_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
*tableLogPtr = tableLog;
|
||||
/* determine last weight */
|
||||
{ U32 const total = 1 << tableLog;
|
||||
|
||||
+46
-12
@@ -286,7 +286,7 @@ If there is an error, the function will return an error code, which can be teste
|
||||
#define FSE_BLOCKBOUND(size) (size + (size>>7))
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
|
||||
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
|
||||
|
||||
@@ -294,37 +294,72 @@ If there is an error, the function will return an error code, which can be teste
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
*******************************************/
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned
|
||||
*/
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace);
|
||||
|
||||
/** FSE_countFast() :
|
||||
* same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
|
||||
*/
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
/**< same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` must be a table of minimum `1024` unsigned
|
||||
*/
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
|
||||
|
||||
/*! FSE_count_simple
|
||||
* Same as FSE_countFast(), but does not use any additional memory (not even on stack).
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
|
||||
/**< same as FSE_optimalTableLog(), which used `minus==2` */
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
|
||||
*/
|
||||
#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + (1<<((maxTableLog>2)?(maxTableLog-2):0)) )
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
|
||||
/**< build a fake FSE_CTable, designed to not compress an input, where each symbol uses nbBits */
|
||||
/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
||||
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` must be >= `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
|
||||
/**< build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
|
||||
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
|
||||
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
|
||||
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
|
||||
Visual seems to do it automatically.
|
||||
For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
|
||||
If none of these solutions is applicable, include "fse.c" directly.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
typedef struct {
|
||||
ptrdiff_t value;
|
||||
const void* stateTable;
|
||||
const void* symbolTT;
|
||||
@@ -384,8 +419,7 @@ If there is an error, it returns an errorCode (which can be tested using FSE_isE
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
typedef struct
|
||||
{
|
||||
typedef struct {
|
||||
size_t state;
|
||||
const void* table; /* precise table may vary, depending on U16 */
|
||||
} FSE_DState_t;
|
||||
|
||||
+122
-82
@@ -70,12 +70,6 @@
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Complex types
|
||||
****************************************************************/
|
||||
typedef U32 CTable_max_t[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
@@ -100,7 +94,13 @@ typedef U32 CTable_max_t[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VA
|
||||
|
||||
|
||||
/* Function templates */
|
||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 const tableMask = tableSize - 1;
|
||||
@@ -111,10 +111,11 @@ size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
|
||||
|
||||
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
||||
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* CTable header */
|
||||
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
tableU16[-2] = (U16) tableLog;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
|
||||
@@ -181,6 +182,13 @@ size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
||||
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
@@ -189,7 +197,7 @@ size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned
|
||||
****************************************************************/
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
|
||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
@@ -300,21 +308,20 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalized
|
||||
* Counting histogram
|
||||
****************************************************************/
|
||||
/*! FSE_count_simple
|
||||
This function just counts byte values within `src`,
|
||||
and store the histogram into table `count`.
|
||||
This function is unsafe : it doesn't check that all values within `src` can fit into `count`.
|
||||
This function counts byte values within `src`, and store the histogram into table `count`.
|
||||
It doesn't use any additional memory.
|
||||
But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
|
||||
For this reason, prefer using a table `count` with 256 elements.
|
||||
@return : count of most numerous element
|
||||
*/
|
||||
static size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const end = ip + srcSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
|
||||
|
||||
memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
@@ -329,20 +336,24 @@ static size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
}
|
||||
|
||||
|
||||
static size_t FSE_count_parallel(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
/* FSE_count_parallel_wksp() :
|
||||
* Same as FSE_count_parallel(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
|
||||
static size_t FSE_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
unsigned checkMax)
|
||||
unsigned checkMax, unsigned* const workSpace)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)source;
|
||||
const BYTE* const iend = ip+sourceSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
U32* const Counting1 = workSpace;
|
||||
U32* const Counting2 = Counting1 + 256;
|
||||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
|
||||
U32 Counting1[256] = { 0 };
|
||||
U32 Counting2[256] = { 0 };
|
||||
U32 Counting3[256] = { 0 };
|
||||
U32 Counting4[256] = { 0 };
|
||||
memset(Counting1, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* safety checks */
|
||||
if (!sourceSize) {
|
||||
@@ -388,31 +399,51 @@ static size_t FSE_count_parallel(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
|
||||
} }
|
||||
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) {
|
||||
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (count[s] > max) max = count[s];
|
||||
}}
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) {
|
||||
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (count[s] > max) max = count[s];
|
||||
} }
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
{
|
||||
if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
|
||||
}
|
||||
|
||||
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
{
|
||||
if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
return FSE_count_parallel(count, maxSymbolValuePtr, source, sourceSize, 0);
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
|
||||
}
|
||||
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
{
|
||||
if (*maxSymbolValuePtr < 255)
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
|
||||
}
|
||||
|
||||
size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
if (*maxSymbolValuePtr <255)
|
||||
return FSE_count_parallel(count, maxSymbolValuePtr, source, sourceSize, 1);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return FSE_countFast(count, maxSymbolValuePtr, source, sourceSize);
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
|
||||
}
|
||||
|
||||
|
||||
@@ -428,14 +459,10 @@ size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
`FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
|
||||
Allocation is manual (C standard does not support variable-size structures).
|
||||
*/
|
||||
|
||||
size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t size;
|
||||
FSE_STATIC_ASSERT((size_t)FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)*4 >= sizeof(CTable_max_t)); /* A compilation error here means FSE_CTABLE_SIZE_U32 is not large enough */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(GENERIC);
|
||||
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
return size;
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
}
|
||||
|
||||
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
@@ -486,7 +513,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
||||
U32 ToDistribute;
|
||||
|
||||
/* Init */
|
||||
U32 lowThreshold = (U32)(total >> tableLog);
|
||||
U32 const lowThreshold = (U32)(total >> tableLog);
|
||||
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
@@ -534,17 +561,16 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
||||
return 0;
|
||||
}
|
||||
|
||||
{
|
||||
U64 const vStepLog = 62 - tableLog;
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==-2) {
|
||||
U64 end = tmpTotal + (count[s] * rStep);
|
||||
U32 sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 sEnd = (U32)(end >> vStepLog);
|
||||
U32 weight = sEnd - sStart;
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
U32 const weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return ERROR(GENERIC);
|
||||
norm[s] = (short)weight;
|
||||
@@ -566,7 +592,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
||||
|
||||
{ U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
|
||||
U64 const scale = 62 - tableLog;
|
||||
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
|
||||
U64 const vStep = 1ULL<<(scale-20);
|
||||
@@ -594,7 +619,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
} }
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
||||
/* corner case, need another normalization method */
|
||||
size_t errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
|
||||
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
}
|
||||
else normalizedCounter[largest] += (short)stillToDistribute;
|
||||
@@ -643,17 +668,15 @@ size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
symbolTT[s].deltaNbBits = deltaNbBits;
|
||||
symbolTT[s].deltaFindState = s-1;
|
||||
} }
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle (100% always same symbol) input */
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
@@ -685,14 +708,13 @@ static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* ip=iend;
|
||||
|
||||
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
/* init */
|
||||
if (srcSize <= 2) return 0;
|
||||
{ size_t const errorCode = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(errorCode)) return 0; }
|
||||
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
|
||||
|
||||
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
|
||||
@@ -715,7 +737,7 @@ static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
}
|
||||
|
||||
/* 2 or 4 encoding per loop */
|
||||
for ( ; ip>istart ; ) {
|
||||
while ( ip>istart ) {
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
|
||||
@@ -741,7 +763,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct)
|
||||
{
|
||||
const unsigned fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
|
||||
if (fast)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
@@ -752,58 +774,76 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) src;
|
||||
const BYTE* ip = istart;
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` size must be `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 count[FSE_MAX_SYMBOL_VALUE+1];
|
||||
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
|
||||
CTable_max_t ct;
|
||||
size_t errorCode;
|
||||
FSE_CTable* CTable = (FSE_CTable*)workSpace;
|
||||
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
|
||||
void* scratchBuffer = (void*)(CTable + CTableSize);
|
||||
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
|
||||
|
||||
/* init conditions */
|
||||
if (srcSize <= 1) return 0; /* Uncompressible */
|
||||
if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
|
||||
if (srcSize <= 1) return 0; /* Not compressible */
|
||||
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
errorCode = FSE_count (count, &maxSymbolValue, ip, srcSize);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
if (errorCode == srcSize) return 1;
|
||||
if (errorCode == 1) return 0; /* each symbol only present once */
|
||||
if (errorCode < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
|
||||
{ CHECK_V_F(maxCount, FSE_count(count, &maxSymbolValue, src, srcSize) );
|
||||
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
|
||||
errorCode = FSE_normalizeCount (norm, tableLog, count, srcSize, maxSymbolValue);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
errorCode = FSE_writeNCount (op, oend-op, norm, maxSymbolValue, tableLog);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
op += errorCode;
|
||||
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += nc_err;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
errorCode = FSE_buildCTable (ct, norm, maxSymbolValue, tableLog);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
errorCode = FSE_compress_usingCTable(op, oend - op, ip, srcSize, ct);
|
||||
if (errorCode == 0) return 0; /* not enough space for compressed data */
|
||||
op += errorCode;
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
/* check compressibility */
|
||||
if ( (size_t)(op-ostart) >= srcSize-1 )
|
||||
return 0;
|
||||
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
size_t FSE_compress (void* dst, size_t dstSize, const void* src, size_t srcSize)
|
||||
typedef struct {
|
||||
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
|
||||
BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
|
||||
} fseWkspMax_t;
|
||||
|
||||
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
return FSE_compress2(dst, dstSize, src, (U32)srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
|
||||
fseWkspMax_t scratchBuffer;
|
||||
FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
|
||||
}
|
||||
|
||||
size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
|
||||
+18
-18
@@ -75,12 +75,6 @@
|
||||
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Complex types
|
||||
****************************************************************/
|
||||
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
@@ -300,28 +294,34 @@ size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)cSrc;
|
||||
const BYTE* ip = istart;
|
||||
short counting[FSE_MAX_SYMBOL_VALUE+1];
|
||||
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
|
||||
unsigned tableLog;
|
||||
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
|
||||
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
|
||||
|
||||
/* normal FSE decoding mode */
|
||||
{ size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
}
|
||||
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
//if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
|
||||
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
|
||||
CHECK_F( FSE_buildDTable (dt, counting, maxSymbolValue, tableLog) );
|
||||
CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
|
||||
|
||||
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */
|
||||
return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
|
||||
}
|
||||
|
||||
|
||||
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
|
||||
|
||||
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
|
||||
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
|
||||
+28
-18
@@ -62,21 +62,19 @@ size_t HUF_compress(void* dst, size_t dstCapacity,
|
||||
HUF_decompress() :
|
||||
Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated buffer 'dst', of minimum size 'dstSize'.
|
||||
`dstSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
`originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
Note : in contrast with FSE, HUF_decompress can regenerate
|
||||
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||
because it knows size to regenerate.
|
||||
@return : size of regenerated data (== dstSize),
|
||||
@return : size of regenerated data (== originalSize),
|
||||
or an error code, which can be tested using HUF_isError()
|
||||
*/
|
||||
size_t HUF_decompress(void* dst, size_t dstSize,
|
||||
size_t HUF_decompress(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024)
|
||||
/* *** Tool functions *** */
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
||||
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
||||
|
||||
/* Error Management */
|
||||
@@ -84,12 +82,18 @@ unsigned HUF_isError(size_t code); /**< tells if a return value is an
|
||||
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/* *** Advanced function *** */
|
||||
/* *** Advanced function *** */
|
||||
|
||||
/** HUF_compress2() :
|
||||
* Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog` */
|
||||
* Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog` .
|
||||
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/** HUF_compress4X_wksp() :
|
||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least 1024 unsigned */
|
||||
|
||||
|
||||
|
||||
#ifdef HUF_STATIC_LINKING_ONLY
|
||||
|
||||
@@ -98,7 +102,7 @@ size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize
|
||||
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
@@ -125,9 +129,9 @@ size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize
|
||||
typedef U32 HUF_DTable;
|
||||
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
|
||||
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
|
||||
/* ****************************************
|
||||
@@ -141,10 +145,6 @@ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, con
|
||||
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
@@ -169,6 +169,12 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, un
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
*/
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
@@ -208,16 +214,20 @@ size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* c
|
||||
/* single stream variants */
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least 1024 unsigned */
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
||||
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
|
||||
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
|
||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
|
||||
+130
-54
@@ -56,6 +56,8 @@
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
@@ -70,31 +72,73 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
|
||||
/* *******************************************************
|
||||
* HUF : Huffman block compression
|
||||
*********************************************************/
|
||||
/* HUF_compressWeights() :
|
||||
* Same as FSE_compress(), but dedicated to huff0's weights compression.
|
||||
* The use case needs much less stack memory.
|
||||
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
|
||||
*/
|
||||
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
|
||||
size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 maxSymbolValue = HUF_TABLELOG_MAX;
|
||||
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
||||
|
||||
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
|
||||
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
|
||||
|
||||
U32 count[HUF_TABLELOG_MAX+1];
|
||||
S16 norm[HUF_TABLELOG_MAX+1];
|
||||
|
||||
/* init conditions */
|
||||
if (wtSize <= 1) return 0; /* Not compressible */
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
|
||||
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += hSize;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
struct HUF_CElt_s {
|
||||
U16 val;
|
||||
BYTE nbBits;
|
||||
}; /* typedef'd to HUF_CElt within "huf.h" */
|
||||
|
||||
typedef struct nodeElt_s {
|
||||
U32 count;
|
||||
U16 parent;
|
||||
BYTE byte;
|
||||
BYTE nbBits;
|
||||
} nodeElt;
|
||||
|
||||
/*! HUF_writeCTable() :
|
||||
`CTable` : huffman tree to save, using huf representation.
|
||||
@return : size of saved CTable */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
||||
{
|
||||
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];
|
||||
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
|
||||
BYTE* op = (BYTE*)dst;
|
||||
U32 n;
|
||||
|
||||
/* check conditions */
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
|
||||
/* convert to weight */
|
||||
bitsToWeight[0] = 0;
|
||||
@@ -103,38 +147,33 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||
for (n=0; n<maxSymbolValue; n++)
|
||||
huffWeight[n] = bitsToWeight[CTable[n].nbBits];
|
||||
|
||||
{ size_t const size = FSE_compress(op+1, maxDstSize-1, huffWeight, maxSymbolValue);
|
||||
if (FSE_isError(size)) return size;
|
||||
if ((size>1) & (size < maxSymbolValue/2)) { /* FSE compressed */
|
||||
op[0] = (BYTE)size;
|
||||
return size+1;
|
||||
}
|
||||
}
|
||||
/* attempt weights compression by FSE */
|
||||
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
|
||||
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
|
||||
op[0] = (BYTE)hSize;
|
||||
return hSize+1;
|
||||
} }
|
||||
|
||||
/* raw values */
|
||||
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen */
|
||||
/* write raw values as 4-bits (max : 15) */
|
||||
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
|
||||
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
|
||||
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
|
||||
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause issue in final combination */
|
||||
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
|
||||
for (n=0; n<maxSymbolValue; n+=2)
|
||||
op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
|
||||
return ((maxSymbolValue+1)/2) + 1;
|
||||
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
|
||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
||||
U32 tableLog = 0;
|
||||
size_t readSize;
|
||||
U32 nbSymbols = 0;
|
||||
/*memset(huffWeight, 0, sizeof(huffWeight));*/ /* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
/* get symbol weights */
|
||||
readSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize);
|
||||
if (HUF_isError(readSize)) return readSize;
|
||||
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
|
||||
|
||||
/* check result */
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
@@ -174,6 +213,13 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si
|
||||
}
|
||||
|
||||
|
||||
typedef struct nodeElt_s {
|
||||
U32 count;
|
||||
U16 parent;
|
||||
BYTE byte;
|
||||
BYTE nbBits;
|
||||
} nodeElt;
|
||||
|
||||
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
||||
{
|
||||
const U32 largestBits = huffNode[lastNonNull].nbBits;
|
||||
@@ -279,20 +325,26 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
||||
}
|
||||
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
*/
|
||||
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
|
||||
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
|
||||
typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
nodeElt huffNode0[2*HUF_SYMBOLVALUE_MAX+1 +1];
|
||||
nodeElt* huffNode = huffNode0 + 1;
|
||||
nodeElt* const huffNode0 = (nodeElt*)workSpace;
|
||||
nodeElt* const huffNode = huffNode0+1;
|
||||
U32 n, nonNullRank;
|
||||
int lowS, lowN;
|
||||
U16 nodeNb = STARTNODE;
|
||||
U32 nodeRoot;
|
||||
|
||||
/* safety checks */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */
|
||||
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
|
||||
memset(huffNode0, 0, sizeof(huffNode0));
|
||||
memset(huffNode0, 0, sizeof(huffNodeTable));
|
||||
|
||||
/* sort, decreasing order */
|
||||
HUF_sort(huffNode, count, maxSymbolValue);
|
||||
@@ -305,7 +357,7 @@ size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U3
|
||||
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
|
||||
nodeNb++; lowS-=2;
|
||||
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
|
||||
huffNode0[0].count = (U32)(1U<<31);
|
||||
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
|
||||
|
||||
/* create parents */
|
||||
while (nodeNb <= nodeRoot) {
|
||||
@@ -348,6 +400,15 @@ size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U3
|
||||
return maxNbBits;
|
||||
}
|
||||
|
||||
/** HUF_buildCTable() :
|
||||
* Note : count is used before tree is written, so they can safely overlap
|
||||
*/
|
||||
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
|
||||
{
|
||||
huffNodeTable nodeTable;
|
||||
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
|
||||
}
|
||||
|
||||
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
{
|
||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
||||
@@ -375,8 +436,8 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
||||
|
||||
/* init */
|
||||
if (dstSize < 8) return 0; /* not enough space to compress */
|
||||
{ size_t const errorCode = BIT_initCStream(&bitC, op, oend-op);
|
||||
if (HUF_isError(errorCode)) return 0; }
|
||||
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
|
||||
if (HUF_isError(initErr)) return 0; }
|
||||
|
||||
n = srcSize & ~3; /* join to mod 4 */
|
||||
switch (srcSize & 3)
|
||||
@@ -419,32 +480,28 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
||||
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
||||
op += 6; /* jumpTable */
|
||||
|
||||
{ size_t const cSize = HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ size_t const cSize = HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart+2, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ size_t const cSize = HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
MEM_writeLE16(ostart+4, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ size_t const cSize = HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
|
||||
if (cSize==0) return 0;
|
||||
op += cSize;
|
||||
}
|
||||
@@ -453,20 +510,25 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
||||
}
|
||||
|
||||
|
||||
/* `workSpace` must a table of at least 1024 unsigned */
|
||||
static size_t HUF_compress_internal (
|
||||
void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
unsigned singleStream)
|
||||
unsigned singleStream,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
U32 count[HUF_SYMBOLVALUE_MAX+1];
|
||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX+1];
|
||||
union {
|
||||
U32 count[HUF_SYMBOLVALUE_MAX+1];
|
||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX+1];
|
||||
} table; /* `count` can overlap with `CTable`; saves 1 KB */
|
||||
|
||||
/* checks & inits */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC);
|
||||
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
|
||||
if (!dstSize) return 0; /* cannot fit within dst budget */
|
||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||
@@ -475,30 +537,27 @@ static size_t HUF_compress_internal (
|
||||
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ size_t const largest = FSE_count (count, &maxSymbolValue, (const BYTE*)src, srcSize);
|
||||
if (HUF_isError(largest)) return largest;
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (table.count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
|
||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
/* Build Huffman Tree */
|
||||
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
|
||||
{ size_t const maxBits = HUF_buildCTable (CTable, count, maxSymbolValue, huffLog);
|
||||
if (HUF_isError(maxBits)) return maxBits;
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (table.CTable, table.count, maxSymbolValue, huffLog, workSpace, wkspSize) );
|
||||
huffLog = (U32)maxBits;
|
||||
}
|
||||
|
||||
/* Write table description header */
|
||||
{ size_t const hSize = HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table.CTable, maxSymbolValue, huffLog) );
|
||||
if (hSize + 12 >= srcSize) return 0; /* not useful to try compression */
|
||||
op += hSize;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
{ size_t const cSize = (singleStream) ?
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : /* single segment */
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, table.CTable) : /* single segment */
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, table.CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
if (cSize==0) return 0; /* uncompressible */
|
||||
op += cSize;
|
||||
@@ -512,21 +571,38 @@ static size_t HUF_compress_internal (
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1);
|
||||
unsigned workSpace[1024];
|
||||
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0);
|
||||
unsigned workSpace[1024];
|
||||
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
||||
{
|
||||
return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT);
|
||||
|
||||
@@ -358,13 +358,15 @@ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* doubl
|
||||
|
||||
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
|
||||
|
||||
/* HUF_fillDTableX4Level2() :
|
||||
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
|
||||
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
|
||||
const U32* rankValOrigin, const int minWeight,
|
||||
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
|
||||
U32 nbBitsBaseline, U16 baseSeq)
|
||||
{
|
||||
HUF_DEltX4 DElt;
|
||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
|
||||
U32 rankVal[HUF_TABLELOG_MAX + 1];
|
||||
|
||||
/* get pre-calculated rankVal */
|
||||
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
|
||||
@@ -398,14 +400,14 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
|
||||
} }
|
||||
}
|
||||
|
||||
typedef U32 rankVal_t[HUF_TABLELOG_ABSOLUTEMAX][HUF_TABLELOG_ABSOLUTEMAX + 1];
|
||||
typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
|
||||
|
||||
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
|
||||
const sortedSymbol_t* sortedList, const U32 sortedListSize,
|
||||
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
|
||||
const U32 nbBitsBaseline)
|
||||
{
|
||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
|
||||
U32 rankVal[HUF_TABLELOG_MAX + 1];
|
||||
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
|
||||
const U32 minBits = nbBitsBaseline - maxWeight;
|
||||
U32 s;
|
||||
@@ -446,8 +448,8 @@ size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
|
||||
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
|
||||
U32 rankStats[HUF_TABLELOG_ABSOLUTEMAX + 1] = { 0 };
|
||||
U32 rankStart0[HUF_TABLELOG_ABSOLUTEMAX + 2] = { 0 };
|
||||
U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 };
|
||||
U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 };
|
||||
U32* const rankStart = rankStart0+1;
|
||||
rankVal_t rankVal;
|
||||
U32 tableLog, maxW, sizeOfSort, nbSymbols;
|
||||
@@ -458,7 +460,7 @@ size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
|
||||
|
||||
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compilation fails here, assertion is false */
|
||||
if (maxTableLog > HUF_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);
|
||||
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
|
||||
|
||||
+3
-1
@@ -55,14 +55,16 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
typedef intptr_t iPtrDiff;
|
||||
#else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef signed short S16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
typedef ptrdiff_t iPtrDiff;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
+80
-47
@@ -7,24 +7,24 @@
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_H_235446
|
||||
#define ZSTD_H_235446
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_H_235446
|
||||
#define ZSTD_H_235446
|
||||
|
||||
/* ====== Dependency ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ====== Export for Windows ======*/
|
||||
/*
|
||||
* ZSTD_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
*/
|
||||
#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
|
||||
#if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZSTDLIB_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDLIB_API __declspec(dllexport)
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZSTDLIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZSTDLIB_API
|
||||
#endif
|
||||
@@ -51,11 +51,9 @@ extern "C" {
|
||||
*********************************************************************************************************/
|
||||
|
||||
/*------ Version ------*/
|
||||
ZSTDLIB_API unsigned ZSTD_versionNumber (void); /**< returns version number of ZSTD */
|
||||
|
||||
#define ZSTD_VERSION_MAJOR 1
|
||||
#define ZSTD_VERSION_MINOR 1
|
||||
#define ZSTD_VERSION_RELEASE 1
|
||||
#define ZSTD_VERSION_RELEASE 2
|
||||
|
||||
#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
|
||||
#define ZSTD_QUOTE(str) #str
|
||||
@@ -63,6 +61,7 @@ ZSTDLIB_API unsigned ZSTD_versionNumber (void); /**< returns version number of
|
||||
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
|
||||
|
||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
|
||||
/***************************************
|
||||
@@ -72,7 +71,7 @@ ZSTDLIB_API unsigned ZSTD_versionNumber (void); /**< returns version number of
|
||||
Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
|
||||
Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
|
||||
@return : compressed size written into `dst` (<= `dstCapacity),
|
||||
or an error code if it fails (which can be tested using ZSTD_isError()) */
|
||||
or an error code if it fails (which can be tested using ZSTD_isError()). */
|
||||
ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
int compressionLevel);
|
||||
@@ -82,7 +81,7 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
|
||||
`dstCapacity` is an upper bound of originalSize.
|
||||
If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTD_isError()) */
|
||||
or an errorCode if it fails (which can be tested using ZSTD_isError()). */
|
||||
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
@@ -116,16 +115,16 @@ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readab
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/*= Compression context
|
||||
* When compressing many messages / blocks,
|
||||
* When compressing many times,
|
||||
* it is recommended to allocate a context just once, and re-use it for each successive compression operation.
|
||||
* This will make the situation much easier for the system's memory.
|
||||
* This will make workload friendlier for system's memory.
|
||||
* Use one context per thread for parallel execution in multi-threaded environments. */
|
||||
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
|
||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
|
||||
|
||||
/*! ZSTD_compressCCtx() :
|
||||
Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()) */
|
||||
Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
|
||||
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
|
||||
|
||||
/*= Decompression context */
|
||||
@@ -134,7 +133,7 @@ ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
|
||||
|
||||
/*! ZSTD_decompressDCtx() :
|
||||
* Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */
|
||||
* Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */
|
||||
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
@@ -143,7 +142,8 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapa
|
||||
***************************/
|
||||
/*! ZSTD_compress_usingDict() :
|
||||
* Compression using a predefined Dictionary (see dictBuilder/zdict.h).
|
||||
* Note : This function load the dictionary, resulting in significant startup delay. */
|
||||
* Note : This function loads the dictionary, resulting in significant startup delay.
|
||||
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
|
||||
ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
@@ -153,7 +153,8 @@ ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
|
||||
/*! ZSTD_decompress_usingDict() :
|
||||
* Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
|
||||
* Dictionary must be identical to the one used during compression.
|
||||
* Note : This function load the dictionary, resulting in significant startup delay */
|
||||
* Note : This function loads the dictionary, resulting in significant startup delay.
|
||||
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
|
||||
ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
@@ -169,17 +170,17 @@ typedef struct ZSTD_CDict_s ZSTD_CDict;
|
||||
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
|
||||
* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
||||
* ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
|
||||
* `dict` can be released after ZSTD_CDict creation */
|
||||
* `dict` can be released after ZSTD_CDict creation. */
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel);
|
||||
|
||||
/*! ZSTD_freeCDict() :
|
||||
* Function frees memory allocated by ZSTD_createCDict() */
|
||||
* Function frees memory allocated by ZSTD_createCDict(). */
|
||||
ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
|
||||
|
||||
/*! ZSTD_compress_usingCDict() :
|
||||
* Compression using a digested Dictionary.
|
||||
* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
|
||||
* Note that compression level is decided during dictionary creation */
|
||||
* Note that compression level is decided during dictionary creation. */
|
||||
ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
@@ -190,7 +191,7 @@ typedef struct ZSTD_DDict_s ZSTD_DDict;
|
||||
|
||||
/*! ZSTD_createDDict() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* `dict` can be released after creation */
|
||||
* `dict` can be released after creation. */
|
||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize);
|
||||
|
||||
/*! ZSTD_freeDDict() :
|
||||
@@ -198,7 +199,7 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize);
|
||||
ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
|
||||
|
||||
/*! ZSTD_decompress_usingDDict() :
|
||||
* Decompression using a digested Dictionary
|
||||
* Decompression using a digested Dictionary.
|
||||
* Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
|
||||
ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
@@ -236,20 +237,20 @@ typedef struct ZSTD_outBuffer_s {
|
||||
*
|
||||
* Start a new compression by initializing ZSTD_CStream.
|
||||
* Use ZSTD_initCStream() to start a new compression operation.
|
||||
* Use ZSTD_initCStream_usingDict() for a compression which requires a dictionary.
|
||||
* Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
|
||||
*
|
||||
* Use ZSTD_compressStream() repetitively to consume input stream.
|
||||
* The function will automatically update both `pos` fields.
|
||||
* Note that it may not consume the entire input, in which case `pos < size`,
|
||||
* and it's up to the caller to present again remaining data.
|
||||
* @return : a size hint, preferred nb of bytes to use as input for next function call
|
||||
* (it's just a hint, to help latency a little, any other value will work fine)
|
||||
* (note : the size hint is guaranteed to be <= ZSTD_CStreamInSize() )
|
||||
* or an error code, which can be tested using ZSTD_isError().
|
||||
* Note 1 : it's just a hint, to help latency a little, any other value will work fine.
|
||||
* Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
|
||||
*
|
||||
* At any moment, it's possible to flush whatever data remains within buffer, using ZSTD_flushStream().
|
||||
* At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
|
||||
* `output->pos` will be updated.
|
||||
* Note some content might still be left within internal buffer if `output->size` is too small.
|
||||
* Note that some content might still be left within internal buffer if `output->size` is too small.
|
||||
* @return : nb of bytes still present within internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZSTD_isError().
|
||||
*
|
||||
@@ -258,15 +259,15 @@ typedef struct ZSTD_outBuffer_s {
|
||||
* The epilogue is required for decoders to consider a frame completed.
|
||||
* Similar to ZSTD_flushStream(), it may not be able to flush the full content if `output->size` is too small.
|
||||
* In which case, call again ZSTD_endStream() to complete the flush.
|
||||
* @return : nb of bytes still present within internal buffer (0 if it's empty)
|
||||
* @return : nb of bytes still present within internal buffer (0 if it's empty, hence compression completed)
|
||||
* or an error code, which can be tested using ZSTD_isError().
|
||||
*
|
||||
* *******************************************************************/
|
||||
|
||||
/*===== Streaming compression functions ======*/
|
||||
typedef struct ZSTD_CStream_s ZSTD_CStream;
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
|
||||
@@ -295,23 +296,25 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
|
||||
* If `output.pos < output.size`, decoder has flushed everything it could.
|
||||
* @return : 0 when a frame is completely decoded and fully flushed,
|
||||
* an error code, which can be tested using ZSTD_isError(),
|
||||
* any other value > 0, which means there is still some work to do to complete the frame.
|
||||
* The return value is a suggested next input size (just an hint, to help latency).
|
||||
* any other value > 0, which means there is still some decoding to do to complete current frame.
|
||||
* The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
|
||||
* *******************************************************************************/
|
||||
|
||||
/*===== Streaming decompression functions =====*/
|
||||
typedef struct ZSTD_DStream_s ZSTD_DStream;
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
|
||||
ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
|
||||
ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
|
||||
|
||||
#endif /* ZSTD_H_235446 */
|
||||
|
||||
|
||||
#ifdef ZSTD_STATIC_LINKING_ONLY
|
||||
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
|
||||
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
|
||||
|
||||
/****************************************************************************************
|
||||
* START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
|
||||
@@ -403,15 +406,15 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictS
|
||||
* Gives the amount of memory used by a given ZSTD_sizeof_CDict */
|
||||
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
|
||||
|
||||
/*! ZSTD_getParams() :
|
||||
* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of a `ZSTD_compressionParameters`.
|
||||
* All fields of `ZSTD_frameParameters` are set to default (0) */
|
||||
ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize);
|
||||
|
||||
/*! ZSTD_getCParams() :
|
||||
* @return ZSTD_compressionParameters structure for a selected compression level and srcSize.
|
||||
* `srcSize` value is optional, select 0 if not known */
|
||||
ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize);
|
||||
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
|
||||
* `estimatedSrcSize` value is optional, select 0 if not known */
|
||||
ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
||||
|
||||
/*! ZSTD_getParams() :
|
||||
* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
|
||||
* All fields of `ZSTD_frameParameters` are set to default (0) */
|
||||
ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
|
||||
|
||||
/*! ZSTD_checkCParams() :
|
||||
* Ensure param values remain within authorized range */
|
||||
@@ -433,6 +436,13 @@ ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
|
||||
|
||||
/*--- Advanced decompression functions ---*/
|
||||
|
||||
/*! ZSTD_isFrame() :
|
||||
* Tells if the content of `buffer` starts with a valid Frame Identifier.
|
||||
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
|
||||
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
|
||||
* Note 3 : Skippable Frame Identifiers are considered valid. */
|
||||
ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
|
||||
|
||||
/*! ZSTD_estimateDCtxSize() :
|
||||
* Gives the potential amount of memory allocated to create a ZSTD_DCtx */
|
||||
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
|
||||
@@ -449,6 +459,30 @@ ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
|
||||
* Gives the amount of memory used by a given ZSTD_DDict */
|
||||
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
||||
|
||||
/*! ZSTD_getDictID_fromDict() :
|
||||
* Provides the dictID stored within dictionary.
|
||||
* if @return == 0, the dictionary is not conformant with Zstandard specification.
|
||||
* It can still be loaded, but as a content-only dictionary. */
|
||||
unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
|
||||
|
||||
/*! ZSTD_getDictID_fromDDict() :
|
||||
* Provides the dictID of the dictionary loaded into `ddict`.
|
||||
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
|
||||
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
|
||||
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
|
||||
|
||||
/*! ZSTD_getDictID_fromFrame() :
|
||||
* Provides the dictID required to decompressed the frame stored within `src`.
|
||||
* If @return == 0, the dictID could not be decoded.
|
||||
* This could for one of the following reasons :
|
||||
* - The frame does not require a dictionary to be decoded (most common case).
|
||||
* - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
|
||||
* Note : this use case also happens when using a non-conformant dictionary.
|
||||
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
|
||||
* - This is not a Zstandard frame.
|
||||
* When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
|
||||
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/********************************************************************
|
||||
* Advanced streaming functions
|
||||
@@ -456,6 +490,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
||||
|
||||
/*===== Advanced Streaming compression functions =====*/
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct */
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */
|
||||
@@ -631,10 +666,8 @@ ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCa
|
||||
ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */
|
||||
|
||||
|
||||
#endif /* ZSTD_STATIC_LINKING_ONLY */
|
||||
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_H_235446 */
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
|
||||
#include "zbuff.h" /* declaration of ZBUFF_isError, ZBUFF_getErrorName */
|
||||
|
||||
|
||||
/*-****************************************
|
||||
@@ -44,16 +43,11 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* ZBUFF Error Management
|
||||
****************************************************************/
|
||||
/* --- ZBUFF Error Management (deprecated) --- */
|
||||
unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
|
||||
|
||||
const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
|
||||
|
||||
|
||||
|
||||
/*=**************************************************************
|
||||
* Custom allocator
|
||||
****************************************************************/
|
||||
|
||||
+56
-29
@@ -33,6 +33,7 @@ typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZS
|
||||
/*-*************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
|
||||
size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
|
||||
|
||||
|
||||
@@ -82,6 +83,7 @@ struct ZSTD_CCtx_s
|
||||
FSE_CTable offcodeCTable [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
unsigned tmpCounters[1024];
|
||||
};
|
||||
|
||||
ZSTD_CCtx* ZSTD_createCCtx(void)
|
||||
@@ -147,6 +149,14 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_cycleLog() :
|
||||
* condition for correct operation : hashLog > 1 */
|
||||
static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
|
||||
{
|
||||
U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
|
||||
return hashLog - btScale;
|
||||
}
|
||||
|
||||
/** ZSTD_adjustCParams() :
|
||||
optimize `cPar` for a given input (`srcSize` and `dictSize`).
|
||||
mostly downsizing to reduce memory consumption and initialization.
|
||||
@@ -165,9 +175,9 @@ ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, u
|
||||
if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
|
||||
} }
|
||||
if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
|
||||
{ U32 const btPlus = (cPar.strategy == ZSTD_btlazy2) | (cPar.strategy == ZSTD_btopt) | (cPar.strategy == ZSTD_btopt2);
|
||||
U32 const maxChainLog = cPar.windowLog+btPlus;
|
||||
if (cPar.chainLog > maxChainLog) cPar.chainLog = maxChainLog; } /* <= ZSTD_CHAINLOG_MAX */
|
||||
{ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
|
||||
if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog);
|
||||
}
|
||||
|
||||
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
|
||||
|
||||
@@ -470,8 +480,8 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
|
||||
singleStream = 1;
|
||||
cLitSize = HUF_compress1X_usingCTable(ostart+lhSize, dstCapacity-lhSize, src, srcSize, zc->hufTable);
|
||||
} else {
|
||||
cLitSize = singleStream ? HUF_compress1X(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11)
|
||||
: HUF_compress2 (ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11);
|
||||
cLitSize = singleStream ? HUF_compress1X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters))
|
||||
: HUF_compress4X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters));
|
||||
}
|
||||
|
||||
if ((cLitSize==0) | (cLitSize >= srcSize - minGain))
|
||||
@@ -566,6 +576,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
BYTE* op = ostart;
|
||||
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
|
||||
BYTE* seqHead;
|
||||
BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
|
||||
|
||||
/* Compress literals */
|
||||
{ const BYTE* const literals = seqStorePtr->litStart;
|
||||
@@ -593,7 +604,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
|
||||
/* CTable for Literal Lengths */
|
||||
{ U32 max = MaxLL;
|
||||
size_t const mostFrequent = FSE_countFast(count, &max, llCodeTable, nbSeq);
|
||||
size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters);
|
||||
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
|
||||
*op++ = llCodeTable[0];
|
||||
FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
|
||||
@@ -601,7 +612,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
|
||||
LLtype = set_repeat;
|
||||
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
|
||||
FSE_buildCTable(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog);
|
||||
FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
LLtype = set_basic;
|
||||
} else {
|
||||
size_t nbSeq_1 = nbSeq;
|
||||
@@ -611,13 +622,13 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
|
||||
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
|
||||
op += NCountSize; }
|
||||
FSE_buildCTable(CTable_LitLength, norm, max, tableLog);
|
||||
FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
LLtype = set_compressed;
|
||||
} }
|
||||
|
||||
/* CTable for Offsets */
|
||||
{ U32 max = MaxOff;
|
||||
size_t const mostFrequent = FSE_countFast(count, &max, ofCodeTable, nbSeq);
|
||||
size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters);
|
||||
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
|
||||
*op++ = ofCodeTable[0];
|
||||
FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
|
||||
@@ -625,7 +636,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
|
||||
Offtype = set_repeat;
|
||||
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
|
||||
FSE_buildCTable(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog);
|
||||
FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
Offtype = set_basic;
|
||||
} else {
|
||||
size_t nbSeq_1 = nbSeq;
|
||||
@@ -635,13 +646,13 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
|
||||
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
|
||||
op += NCountSize; }
|
||||
FSE_buildCTable(CTable_OffsetBits, norm, max, tableLog);
|
||||
FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
Offtype = set_compressed;
|
||||
} }
|
||||
|
||||
/* CTable for MatchLengths */
|
||||
{ U32 max = MaxML;
|
||||
size_t const mostFrequent = FSE_countFast(count, &max, mlCodeTable, nbSeq);
|
||||
size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters);
|
||||
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
|
||||
*op++ = *mlCodeTable;
|
||||
FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
|
||||
@@ -649,7 +660,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
|
||||
MLtype = set_repeat;
|
||||
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
|
||||
FSE_buildCTable(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog);
|
||||
FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
MLtype = set_basic;
|
||||
} else {
|
||||
size_t nbSeq_1 = nbSeq;
|
||||
@@ -659,7 +670,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
|
||||
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
|
||||
op += NCountSize; }
|
||||
FSE_buildCTable(CTable_MatchLength, norm, max, tableLog);
|
||||
FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
|
||||
MLtype = set_compressed;
|
||||
} }
|
||||
|
||||
@@ -739,8 +750,8 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
|
||||
{
|
||||
#if 0 /* for debug */
|
||||
static const BYTE* g_start = NULL;
|
||||
const U32 pos = (U32)(literals - g_start);
|
||||
if (g_start==NULL) g_start = literals;
|
||||
const U32 pos = (U32)((const BYTE*)literals - g_start);
|
||||
if (g_start==NULL) g_start = (const BYTE*)literals;
|
||||
//if ((pos > 1) && (pos < 50000))
|
||||
printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
|
||||
pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
|
||||
@@ -1482,8 +1493,9 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, co
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* nextPtr = bt + 2*(matchIndex & btMask);
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
|
||||
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
|
||||
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
|
||||
if (matchIndex == predictedSmall) {
|
||||
@@ -1579,7 +1591,7 @@ static size_t ZSTD_insertBtAndFindBestMatch (
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* nextPtr = bt + 2*(matchIndex & btMask);
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
|
||||
@@ -2271,16 +2283,16 @@ static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
|
||||
if (remaining < blockSize) blockSize = remaining;
|
||||
|
||||
/* preemptive overflow correction */
|
||||
if (cctx->lowLimit > (1<<30)) {
|
||||
U32 const btplus = (cctx->params.cParams.strategy == ZSTD_btlazy2) | (cctx->params.cParams.strategy == ZSTD_btopt) | (cctx->params.cParams.strategy == ZSTD_btopt2);
|
||||
U32 const chainMask = (1 << (cctx->params.cParams.chainLog - btplus)) - 1;
|
||||
U32 const supLog = MAX(cctx->params.cParams.chainLog, 17 /* blockSize */);
|
||||
U32 const newLowLimit = (cctx->lowLimit & chainMask) + (1 << supLog); /* preserve position % chainSize, ensure current-repcode doesn't underflow */
|
||||
U32 const correction = cctx->lowLimit - newLowLimit;
|
||||
if (cctx->lowLimit > (2U<<30)) {
|
||||
U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
|
||||
U32 const current = (U32)(ip - cctx->base);
|
||||
U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog);
|
||||
U32 const correction = current - newCurrent;
|
||||
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
|
||||
ZSTD_reduceIndex(cctx, correction);
|
||||
cctx->base += correction;
|
||||
cctx->dictBase += correction;
|
||||
cctx->lowLimit = newLowLimit;
|
||||
cctx->lowLimit -= correction;
|
||||
cctx->dictLimit -= correction;
|
||||
if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
|
||||
else cctx->nextToUpdate -= correction;
|
||||
@@ -2506,6 +2518,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
|
||||
const BYTE* const dictEnd = dictPtr + dictSize;
|
||||
short offcodeNCount[MaxOff+1];
|
||||
unsigned offcodeMaxValue = MaxOff;
|
||||
BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
|
||||
|
||||
{ size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dict, dictSize);
|
||||
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
@@ -2517,7 +2530,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
|
||||
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
|
||||
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
|
||||
CHECK_E (FSE_buildCTable(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted);
|
||||
CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
|
||||
dictPtr += offcodeHeaderSize;
|
||||
}
|
||||
|
||||
@@ -2528,7 +2541,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
|
||||
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
|
||||
/* Every match length code must have non-zero probability */
|
||||
CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
|
||||
CHECK_E (FSE_buildCTable(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted);
|
||||
CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
|
||||
dictPtr += matchlengthHeaderSize;
|
||||
}
|
||||
|
||||
@@ -2539,7 +2552,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
|
||||
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
|
||||
/* Every literal length code must have non-zero probability */
|
||||
CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
|
||||
CHECK_E(FSE_buildCTable(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted);
|
||||
CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
|
||||
dictPtr += litlengthHeaderSize;
|
||||
}
|
||||
|
||||
@@ -2695,7 +2708,7 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
|
||||
|
||||
size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel)
|
||||
{
|
||||
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dictSize);
|
||||
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0);
|
||||
params.fParams.contentSizeFlag = 1;
|
||||
return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
|
||||
}
|
||||
@@ -2839,6 +2852,8 @@ struct ZSTD_CStream_s {
|
||||
ZSTD_cStreamStage stage;
|
||||
U32 checksum;
|
||||
U32 frameEnded;
|
||||
U64 pledgedSrcSize;
|
||||
U64 inputProcessed;
|
||||
ZSTD_parameters params;
|
||||
ZSTD_customMem customMem;
|
||||
}; /* typedef'd to ZSTD_CStream within "zstd.h" */
|
||||
@@ -2896,6 +2911,8 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
|
||||
zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
|
||||
zcs->stage = zcss_load;
|
||||
zcs->frameEnded = 0;
|
||||
zcs->pledgedSrcSize = pledgedSrcSize;
|
||||
zcs->inputProcessed = 0;
|
||||
return 0; /* ready to go */
|
||||
}
|
||||
|
||||
@@ -2948,6 +2965,12 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t di
|
||||
return ZSTD_initCStream_advanced(zcs, dict, dictSize, params, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
|
||||
return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
|
||||
}
|
||||
|
||||
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
|
||||
{
|
||||
return ZSTD_initCStream_usingDict(zcs, NULL, 0, compressionLevel);
|
||||
@@ -3044,6 +3067,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
||||
|
||||
*srcSizePtr = ip - istart;
|
||||
*dstCapacityPtr = op - ostart;
|
||||
zcs->inputProcessed += *srcSizePtr;
|
||||
if (zcs->frameEnded) return 0;
|
||||
{ size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
|
||||
if (hintInSize==0) hintInSize = zcs->blockSize;
|
||||
@@ -3088,6 +3112,9 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
|
||||
BYTE* const oend = (BYTE*)(output->dst) + output->size;
|
||||
BYTE* op = ostart;
|
||||
|
||||
if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
|
||||
return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
|
||||
|
||||
if (zcs->stage != zcss_final) {
|
||||
/* flush whatever remains */
|
||||
size_t srcSize = 0;
|
||||
|
||||
+412
-100
@@ -56,6 +56,15 @@
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define ZSTD_PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0)
|
||||
#elif defined(__GNUC__)
|
||||
# define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
|
||||
#else
|
||||
# define ZSTD_PREFETCH(ptr) /* disabled */
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Macros
|
||||
***************************************/
|
||||
@@ -104,7 +113,6 @@ struct ZSTD_DCtx_s
|
||||
U32 dictID;
|
||||
const BYTE* litPtr;
|
||||
ZSTD_customMem customMem;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
size_t rleSize;
|
||||
BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
|
||||
@@ -193,7 +201,24 @@ static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
|
||||
* Decompression section
|
||||
***************************************************************/
|
||||
|
||||
/* See compression format details in : doc/zstd_compression_format.md */
|
||||
/*! ZSTD_isFrame() :
|
||||
* Tells if the content of `buffer` starts with a valid Frame Identifier.
|
||||
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
|
||||
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
|
||||
* Note 3 : Skippable Frame Identifiers are considered valid. */
|
||||
unsigned ZSTD_isFrame(const void* buffer, size_t size)
|
||||
{
|
||||
if (size < 4) return 0;
|
||||
{ U32 const magic = MEM_readLE32(buffer);
|
||||
if (magic == ZSTD_MAGICNUMBER) return 1;
|
||||
if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
|
||||
}
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
if (ZSTD_isLegacy(buffer, size)) return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_frameHeaderSize() :
|
||||
* srcSize must be >= ZSTD_frameHeaderSize_prefix.
|
||||
@@ -412,10 +437,10 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
dctx->litEntropy = 1;
|
||||
if (litEncType==set_compressed) dctx->HUFptr = dctx->hufTable;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
|
||||
@@ -442,13 +467,12 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart+lhSize, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTD_BLOCKSIZE_ABSOLUTEMAX+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return lhSize+litSize;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+lhSize;
|
||||
dctx->litBufSize = srcSize-lhSize;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+litSize;
|
||||
}
|
||||
@@ -473,9 +497,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
break;
|
||||
}
|
||||
if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+1;
|
||||
}
|
||||
@@ -761,6 +784,7 @@ typedef struct {
|
||||
size_t litLength;
|
||||
size_t matchLength;
|
||||
size_t offset;
|
||||
const BYTE* match;
|
||||
} seq_t;
|
||||
|
||||
typedef struct {
|
||||
@@ -769,88 +793,16 @@ typedef struct {
|
||||
FSE_DState_t stateOffb;
|
||||
FSE_DState_t stateML;
|
||||
size_t prevOffset[ZSTD_REP_NUM];
|
||||
const BYTE* base;
|
||||
size_t pos;
|
||||
iPtrDiff gotoDict;
|
||||
} seqState_t;
|
||||
|
||||
|
||||
static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
||||
{
|
||||
seq_t seq;
|
||||
|
||||
U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
|
||||
U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
|
||||
U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
|
||||
|
||||
U32 const llBits = LL_bits[llCode];
|
||||
U32 const mlBits = ML_bits[mlCode];
|
||||
U32 const ofBits = ofCode;
|
||||
U32 const totalBits = llBits+mlBits+ofBits;
|
||||
|
||||
static const U32 LL_base[MaxLL+1] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
|
||||
0x2000, 0x4000, 0x8000, 0x10000 };
|
||||
|
||||
static const U32 ML_base[MaxML+1] = {
|
||||
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
||||
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
|
||||
35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
|
||||
0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
|
||||
|
||||
static const U32 OF_base[MaxOff+1] = {
|
||||
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
|
||||
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
|
||||
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
||||
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
|
||||
|
||||
/* sequence */
|
||||
{ size_t offset;
|
||||
if (!ofCode)
|
||||
offset = 0;
|
||||
else {
|
||||
offset = OF_base[ofCode] + BIT_readBits(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
|
||||
}
|
||||
|
||||
if (ofCode <= 1) {
|
||||
offset += (llCode==0);
|
||||
if (offset) {
|
||||
size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
|
||||
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
|
||||
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset = temp;
|
||||
} else {
|
||||
offset = seqState->prevOffset[0];
|
||||
}
|
||||
} else {
|
||||
seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset;
|
||||
}
|
||||
seq.offset = offset;
|
||||
}
|
||||
|
||||
seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBits(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBits(&seqState->DStream, llBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() ||
|
||||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
/* ANS state update */
|
||||
FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
|
||||
FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
|
||||
FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
|
||||
|
||||
return seq;
|
||||
}
|
||||
|
||||
|
||||
FORCE_NOINLINE
|
||||
size_t ZSTD_execSequenceLast7(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_w,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
@@ -862,7 +814,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
|
||||
|
||||
/* check */
|
||||
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit_w) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */
|
||||
|
||||
/* copy literals */
|
||||
@@ -894,10 +846,87 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
||||
{
|
||||
seq_t seq;
|
||||
|
||||
U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
|
||||
U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
|
||||
U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
|
||||
|
||||
U32 const llBits = LL_bits[llCode];
|
||||
U32 const mlBits = ML_bits[mlCode];
|
||||
U32 const ofBits = ofCode;
|
||||
U32 const totalBits = llBits+mlBits+ofBits;
|
||||
|
||||
static const U32 LL_base[MaxLL+1] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
|
||||
0x2000, 0x4000, 0x8000, 0x10000 };
|
||||
|
||||
static const U32 ML_base[MaxML+1] = {
|
||||
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
||||
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
|
||||
35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
|
||||
0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
|
||||
|
||||
static const U32 OF_base[MaxOff+1] = {
|
||||
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
|
||||
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
|
||||
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
||||
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
|
||||
|
||||
/* sequence */
|
||||
{ size_t offset;
|
||||
if (!ofCode)
|
||||
offset = 0;
|
||||
else {
|
||||
offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
|
||||
}
|
||||
|
||||
if (ofCode <= 1) {
|
||||
offset += (llCode==0);
|
||||
if (offset) {
|
||||
size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
|
||||
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
|
||||
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset = temp;
|
||||
} else {
|
||||
offset = seqState->prevOffset[0];
|
||||
}
|
||||
} else {
|
||||
seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset;
|
||||
}
|
||||
seq.offset = offset;
|
||||
}
|
||||
|
||||
seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() ||
|
||||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
/* ANS state update */
|
||||
FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
|
||||
FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
|
||||
FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
|
||||
|
||||
return seq;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE
|
||||
size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_w,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
@@ -909,8 +938,8 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
|
||||
/* check */
|
||||
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit_w) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit_w, base, vBase, dictEnd);
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_copy8(op, *litPtr);
|
||||
@@ -923,7 +952,7 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
if (sequence.offset > (size_t)(oLitEnd - base)) {
|
||||
/* offset beyond prefix */
|
||||
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
|
||||
match = dictEnd - (base-match);
|
||||
match += (dictEnd-base);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
@@ -934,13 +963,13 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_w) {
|
||||
if (op > oend_w || sequence.matchLength < MINMATCH) {
|
||||
U32 i;
|
||||
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
|
||||
return sequenceLength;
|
||||
}
|
||||
} }
|
||||
/* Requirement: op <= oend_w */
|
||||
/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
|
||||
|
||||
/* match within prefix */
|
||||
if (sequence.offset < 8) {
|
||||
@@ -968,7 +997,7 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -985,7 +1014,6 @@ static size_t ZSTD_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_w = litPtr + dctx->litBufSize - WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
const BYTE* const base = (const BYTE*) (dctx->base);
|
||||
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
|
||||
@@ -1011,7 +1039,7 @@ static size_t ZSTD_decompressSequences(
|
||||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
|
||||
nbSeq--;
|
||||
{ seq_t const sequence = ZSTD_decodeSequence(&seqState);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litLimit_w, base, vBase, dictEnd);
|
||||
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
} }
|
||||
@@ -1033,14 +1061,247 @@ static size_t ZSTD_decompressSequences(
|
||||
}
|
||||
|
||||
|
||||
static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
|
||||
static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState)
|
||||
{
|
||||
if (dst != dctx->previousDstEnd) { /* not contiguous */
|
||||
dctx->dictEnd = dctx->previousDstEnd;
|
||||
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
|
||||
dctx->base = dst;
|
||||
dctx->previousDstEnd = dst;
|
||||
seq_t seq;
|
||||
|
||||
U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
|
||||
U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
|
||||
U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
|
||||
|
||||
U32 const llBits = LL_bits[llCode];
|
||||
U32 const mlBits = ML_bits[mlCode];
|
||||
U32 const ofBits = ofCode;
|
||||
U32 const totalBits = llBits+mlBits+ofBits;
|
||||
|
||||
static const U32 LL_base[MaxLL+1] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
|
||||
0x2000, 0x4000, 0x8000, 0x10000 };
|
||||
|
||||
static const U32 ML_base[MaxML+1] = {
|
||||
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
||||
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
|
||||
35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
|
||||
0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
|
||||
|
||||
static const U32 OF_base[MaxOff+1] = {
|
||||
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
|
||||
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
|
||||
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
||||
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
|
||||
|
||||
/* sequence */
|
||||
{ size_t offset;
|
||||
if (!ofCode)
|
||||
offset = 0;
|
||||
else {
|
||||
offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
|
||||
}
|
||||
|
||||
if (ofCode <= 1) {
|
||||
offset += (llCode==0);
|
||||
if (offset) {
|
||||
size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
|
||||
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
|
||||
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset = temp;
|
||||
} else {
|
||||
offset = seqState->prevOffset[0];
|
||||
}
|
||||
} else {
|
||||
seqState->prevOffset[2] = seqState->prevOffset[1];
|
||||
seqState->prevOffset[1] = seqState->prevOffset[0];
|
||||
seqState->prevOffset[0] = offset;
|
||||
}
|
||||
seq.offset = offset;
|
||||
}
|
||||
|
||||
seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
|
||||
if (MEM_32bits() ||
|
||||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
|
||||
|
||||
{ size_t const pos = seqState->pos + seq.litLength;
|
||||
seq.match = seqState->base + pos - seq.offset; /* single memory segment */
|
||||
if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */
|
||||
seqState->pos = pos + seq.matchLength;
|
||||
}
|
||||
|
||||
/* ANS state update */
|
||||
FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
|
||||
FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
|
||||
FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
|
||||
|
||||
return seq;
|
||||
}
|
||||
|
||||
FORCE_INLINE
|
||||
size_t ZSTD_execSequenceLong(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
|
||||
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
|
||||
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
|
||||
const BYTE* match = sequence.match;
|
||||
|
||||
/* check */
|
||||
#if 1
|
||||
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
|
||||
#endif
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_copy8(op, *litPtr);
|
||||
if (sequence.litLength > 8)
|
||||
ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
|
||||
op = oLitEnd;
|
||||
*litPtr = iLitEnd; /* update for next sequence */
|
||||
|
||||
/* copy Match */
|
||||
#if 1
|
||||
if (sequence.offset > (size_t)(oLitEnd - base)) {
|
||||
/* offset beyond prefix */
|
||||
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
}
|
||||
/* span extDict & currentPrefixSegment */
|
||||
{ size_t const length1 = dictEnd - match;
|
||||
memmove(oLitEnd, match, length1);
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_w || sequence.matchLength < MINMATCH) {
|
||||
U32 i;
|
||||
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
|
||||
return sequenceLength;
|
||||
}
|
||||
} }
|
||||
/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
|
||||
#endif
|
||||
|
||||
/* match within prefix */
|
||||
if (sequence.offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* substracted */
|
||||
int const sub2 = dec64table[sequence.offset];
|
||||
op[0] = match[0];
|
||||
op[1] = match[1];
|
||||
op[2] = match[2];
|
||||
op[3] = match[3];
|
||||
match += dec32table[sequence.offset];
|
||||
ZSTD_copy4(op+4, match);
|
||||
match -= sub2;
|
||||
} else {
|
||||
ZSTD_copy8(op, match);
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-(16-MINMATCH)) {
|
||||
if (op < oend_w) {
|
||||
ZSTD_wildcopy(op, match, oend_w - op);
|
||||
match += oend_w - op;
|
||||
op = oend_w;
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
|
||||
static size_t ZSTD_decompressSequencesLong(
|
||||
ZSTD_DCtx* dctx,
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* seqStart, size_t seqSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)seqStart;
|
||||
const BYTE* const iend = ip + seqSize;
|
||||
BYTE* const ostart = (BYTE* const)dst;
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
const BYTE* const base = (const BYTE*) (dctx->base);
|
||||
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
|
||||
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
|
||||
int nbSeq;
|
||||
|
||||
/* Build Decoding Tables */
|
||||
{ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
|
||||
if (ZSTD_isError(seqHSize)) return seqHSize;
|
||||
ip += seqHSize;
|
||||
}
|
||||
|
||||
/* Regen sequences */
|
||||
if (nbSeq) {
|
||||
#define STORED_SEQS 4
|
||||
#define STOSEQ_MASK (STORED_SEQS-1)
|
||||
#define ADVANCED_SEQS 4
|
||||
seq_t sequences[STORED_SEQS];
|
||||
int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
|
||||
seqState_t seqState;
|
||||
int seqNb;
|
||||
dctx->fseEntropy = 1;
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->rep[i]; }
|
||||
seqState.base = base;
|
||||
seqState.pos = (size_t)(op-base);
|
||||
seqState.gotoDict = (iPtrDiff)(dictEnd - base);
|
||||
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
|
||||
FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
|
||||
FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
|
||||
FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
|
||||
|
||||
/* prepare in advance */
|
||||
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb<seqAdvance; seqNb++) {
|
||||
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState);
|
||||
}
|
||||
if (seqNb<seqAdvance) return ERROR(corruption_detected);
|
||||
|
||||
/* decode and decompress */
|
||||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb<nbSeq ; seqNb++) {
|
||||
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState);
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
ZSTD_PREFETCH(sequence.match);
|
||||
sequences[seqNb&STOSEQ_MASK] = sequence;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
if (seqNb<nbSeq) return ERROR(corruption_detected);
|
||||
|
||||
/* finish queue */
|
||||
seqNb -= seqAdvance;
|
||||
for ( ; seqNb<nbSeq ; seqNb++) {
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
||||
/* save reps for next block */
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
}
|
||||
|
||||
/* last literal segment */
|
||||
{ size_t const lastLLSize = litEnd - litPtr;
|
||||
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
|
||||
memcpy(op, litPtr, lastLLSize);
|
||||
op += lastLLSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
@@ -1058,10 +1319,21 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
||||
ip += litCSize;
|
||||
srcSize -= litCSize;
|
||||
}
|
||||
if (dctx->fParams.windowSize > (1<<23)) return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
|
||||
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
|
||||
}
|
||||
|
||||
|
||||
static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
|
||||
{
|
||||
if (dst != dctx->previousDstEnd) { /* not contiguous */
|
||||
dctx->dictEnd = dctx->previousDstEnd;
|
||||
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
|
||||
dctx->base = dst;
|
||||
dctx->previousDstEnd = dst;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize)
|
||||
@@ -1506,6 +1778,45 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
|
||||
return sizeof(*ddict) + sizeof(ddict->refContext) + ddict->dictSize;
|
||||
}
|
||||
|
||||
/*! ZSTD_getDictID_fromDict() :
|
||||
* Provides the dictID stored within dictionary.
|
||||
* if @return == 0, the dictionary is not conformant with Zstandard specification.
|
||||
* It can still be loaded, but as a content-only dictionary. */
|
||||
unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
|
||||
{
|
||||
if (dictSize < 8) return 0;
|
||||
if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return 0;
|
||||
return MEM_readLE32((const char*)dict + 4);
|
||||
}
|
||||
|
||||
/*! ZSTD_getDictID_fromDDict() :
|
||||
* Provides the dictID of the dictionary loaded into `ddict`.
|
||||
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
|
||||
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
|
||||
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0;
|
||||
return ZSTD_getDictID_fromDict(ddict->dict, ddict->dictSize);
|
||||
}
|
||||
|
||||
/*! ZSTD_getDictID_fromFrame() :
|
||||
* Provides the dictID required to decompressed the frame stored within `src`.
|
||||
* If @return == 0, the dictID could not be decoded.
|
||||
* This could for one of the following reasons :
|
||||
* - The frame does not require a dictionary to be decoded (most common case).
|
||||
* - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
|
||||
* Note : this use case also happens when using a non-conformant dictionary.
|
||||
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
|
||||
* - This is not a Zstandard frame.
|
||||
* When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
|
||||
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 };
|
||||
size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
|
||||
if (ZSTD_isError(hError)) return 0;
|
||||
return zfp.dictID;
|
||||
}
|
||||
|
||||
|
||||
/*! ZSTD_decompress_usingDDict() :
|
||||
* Decompression using a pre-digested Dictionary
|
||||
@@ -1687,7 +1998,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
switch(zds->stage)
|
||||
{
|
||||
case zdss_init :
|
||||
return ERROR(init_missing);
|
||||
ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
|
||||
/* fall-through */
|
||||
|
||||
case zdss_loadHeader :
|
||||
{ size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
|
||||
|
||||
@@ -147,7 +147,7 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
/*! ZSTD_wildcopy() :
|
||||
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, size_t length)
|
||||
MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -222,6 +222,7 @@ typedef struct {
|
||||
U32 log2litSum;
|
||||
U32 log2offCodeSum;
|
||||
U32 factor;
|
||||
U32 staticPrices;
|
||||
U32 cachedPrice;
|
||||
U32 cachedLitLength;
|
||||
const BYTE* cachedLiterals;
|
||||
@@ -234,7 +235,9 @@ int ZSTD_isSkipFrame(ZSTD_DCtx* dctx);
|
||||
/* custom memory allocation functions */
|
||||
void* ZSTD_defaultAllocFunction(void* opaque, size_t size);
|
||||
void ZSTD_defaultFreeFunction(void* opaque, void* address);
|
||||
#ifndef ZSTD_DLL_IMPORT
|
||||
static const ZSTD_customMem defaultCustomMem = { ZSTD_defaultAllocFunction, ZSTD_defaultFreeFunction, NULL };
|
||||
#endif
|
||||
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
|
||||
void ZSTD_free(void* ptr, ZSTD_customMem customMem);
|
||||
|
||||
|
||||
+39
-20
@@ -15,8 +15,9 @@
|
||||
#define ZSTD_OPT_H_91842398743
|
||||
|
||||
|
||||
#define ZSTD_FREQ_DIV 5
|
||||
#define ZSTD_MAX_PRICE (1<<30)
|
||||
#define ZSTD_LITFREQ_ADD 2
|
||||
#define ZSTD_FREQ_DIV 4
|
||||
#define ZSTD_MAX_PRICE (1<<30)
|
||||
|
||||
/*-*************************************
|
||||
* Price functions for optimal parser
|
||||
@@ -31,22 +32,32 @@ FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr)
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr)
|
||||
MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize)
|
||||
{
|
||||
unsigned u;
|
||||
|
||||
ssPtr->cachedLiterals = NULL;
|
||||
ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
|
||||
ssPtr->staticPrices = 0;
|
||||
|
||||
if (ssPtr->litLengthSum == 0) {
|
||||
ssPtr->litSum = (2<<Litbits);
|
||||
if (srcSize <= 1024) ssPtr->staticPrices = 1;
|
||||
|
||||
for (u=0; u<=MaxLit; u++)
|
||||
ssPtr->litFreq[u] = 0;
|
||||
for (u=0; u<srcSize; u++)
|
||||
ssPtr->litFreq[src[u]]++;
|
||||
|
||||
ssPtr->litSum = 0;
|
||||
ssPtr->litLengthSum = MaxLL+1;
|
||||
ssPtr->matchLengthSum = MaxML+1;
|
||||
ssPtr->offCodeSum = (MaxOff+1);
|
||||
ssPtr->matchSum = (2<<Litbits);
|
||||
ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
|
||||
|
||||
for (u=0; u<=MaxLit; u++)
|
||||
ssPtr->litFreq[u] = 2;
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->litSum += ssPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++)
|
||||
ssPtr->litLengthFreq[u] = 1;
|
||||
for (u=0; u<=MaxML; u++)
|
||||
@@ -61,11 +72,11 @@ MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr)
|
||||
ssPtr->litSum = 0;
|
||||
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
ssPtr->litSum += ssPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++) {
|
||||
ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxML; u++) {
|
||||
@@ -73,6 +84,7 @@ MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr)
|
||||
ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
|
||||
ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
|
||||
}
|
||||
ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u<=MaxOff; u++) {
|
||||
ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
|
||||
@@ -87,6 +99,9 @@ FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BY
|
||||
{
|
||||
U32 price, u;
|
||||
|
||||
if (ssPtr->staticPrices)
|
||||
return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
|
||||
|
||||
if (litLength == 0)
|
||||
return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1);
|
||||
|
||||
@@ -124,9 +139,13 @@ FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BY
|
||||
FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
|
||||
{
|
||||
/* offset */
|
||||
U32 price;
|
||||
BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
|
||||
U32 price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
|
||||
|
||||
if (seqStorePtr->staticPrices)
|
||||
return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
|
||||
|
||||
price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
|
||||
if (!ultra && offCode >= 20) price += (offCode-19)*2;
|
||||
|
||||
/* match Length */
|
||||
@@ -144,9 +163,9 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B
|
||||
U32 u;
|
||||
|
||||
/* literals */
|
||||
seqStorePtr->litSum += litLength;
|
||||
seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u < litLength; u++)
|
||||
seqStorePtr->litFreq[literals[u]]++;
|
||||
seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
||||
|
||||
/* literal Length */
|
||||
{ const BYTE LL_deltaCode = 19;
|
||||
@@ -401,7 +420,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
|
||||
/* init */
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(seqStorePtr);
|
||||
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
|
||||
|
||||
@@ -416,7 +435,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
/* check repCode */
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
|
||||
for (i=(ip == anchor); i<last_i; i++) {
|
||||
const S32 repCur = ((i==ZSTD_REP_MOVE_OPT) && (ip==anchor)) ? (rep[0] - 1) : rep[i];
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
|
||||
if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
|
||||
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) {
|
||||
mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
|
||||
@@ -501,7 +520,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
best_mlen = minMatch;
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
|
||||
for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
|
||||
const S32 repCur = ((i==ZSTD_REP_MOVE_OPT) && (opt[cur].mlen != 1)) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
|
||||
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) {
|
||||
mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
|
||||
@@ -601,7 +620,7 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
offset--;
|
||||
} else {
|
||||
if (offset != 0) {
|
||||
best_off = ((offset==ZSTD_REP_MOVE_OPT) && (litLength==0)) ? (rep[0] - 1) : (rep[offset]);
|
||||
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
|
||||
if (offset != 1) rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = best_off;
|
||||
@@ -656,7 +675,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
|
||||
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(seqStorePtr);
|
||||
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
@@ -671,7 +690,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
/* check repCode */
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
|
||||
for (i = (ip==anchor); i<last_i; i++) {
|
||||
const S32 repCur = ((i==ZSTD_REP_MOVE_OPT) && (ip==anchor)) ? (rep[0] - 1) : rep[i];
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
|
||||
const U32 repIndex = (U32)(current - repCur);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
@@ -767,7 +786,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
best_mlen = minMatch;
|
||||
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
|
||||
for (i = (mlen != 1); i<last_i; i++) {
|
||||
const S32 repCur = ((i==ZSTD_REP_MOVE_OPT) && (opt[cur].mlen != 1)) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
|
||||
const U32 repIndex = (U32)(current+cur - repCur);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
@@ -873,7 +892,7 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
offset--;
|
||||
} else {
|
||||
if (offset != 0) {
|
||||
best_off = ((offset==ZSTD_REP_MOVE_OPT) && (litLength==0)) ? (rep[0] - 1) : (rep[offset]);
|
||||
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
|
||||
if (offset != 1) rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = best_off;
|
||||
|
||||
+3
-3
@@ -1354,7 +1354,7 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, size_t length)
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -1803,7 +1803,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
} else { ZSTD_copy8(op, match); }
|
||||
op += 8; match += 8;
|
||||
|
||||
if (endMatch > oend-12)
|
||||
if (endMatch > oend-(16-MINMATCH))
|
||||
{
|
||||
if (op < oend-8)
|
||||
{
|
||||
@@ -1814,7 +1814,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
while (op<endMatch) *op++ = *match++;
|
||||
}
|
||||
else
|
||||
ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
|
||||
/* restore, in case of overlap */
|
||||
if (overlapRisk) memcpy(endMatch, saved, qutt);
|
||||
|
||||
+8
-12
@@ -2808,7 +2808,7 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, size_t length)
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -2868,7 +2868,6 @@ struct ZSTD_DCtx_s
|
||||
blockType_t bType;
|
||||
U32 phase;
|
||||
const BYTE* litPtr;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
|
||||
}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
|
||||
@@ -2940,8 +2939,8 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
size_t litSize = BLOCKSIZE;
|
||||
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return readSize; /* works if it's an error too */
|
||||
}
|
||||
case IS_RAW:
|
||||
@@ -2952,13 +2951,12 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
if (litSize > srcSize-3) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return litSize+3;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+3;
|
||||
dctx->litBufSize = srcSize-3;
|
||||
dctx->litSize = litSize;
|
||||
return litSize+3;
|
||||
}
|
||||
@@ -2966,9 +2964,8 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
{
|
||||
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[3], litSize);
|
||||
memset(dctx->litBuffer, istart[3], litSize + 8);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
return 4;
|
||||
}
|
||||
@@ -3175,7 +3172,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
/* checks */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (litEnd > litLimit-8) return ERROR(corruption_detected); /* overRead beyond lit buffer */
|
||||
if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
@@ -3209,7 +3206,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-12)
|
||||
if (oMatchEnd > oend-(16-MINMATCH))
|
||||
{
|
||||
if (op < oend_8)
|
||||
{
|
||||
@@ -3221,7 +3218,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
else
|
||||
{
|
||||
ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3241,7 +3238,6 @@ static size_t ZSTD_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
size_t errorCode, dumpsLength;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litMax = litPtr + dctx->litBufSize;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
int nbSeq;
|
||||
const BYTE* dumps;
|
||||
@@ -3277,7 +3273,7 @@ static size_t ZSTD_decompressSequences(
|
||||
size_t oneSeqSize;
|
||||
nbSeq--;
|
||||
ZSTD_decodeSequence(&sequence, &seqState);
|
||||
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litMax, base, oend);
|
||||
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
||||
+8
-12
@@ -2449,7 +2449,7 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, size_t length)
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -2509,7 +2509,6 @@ struct ZSTD_DCtx_s
|
||||
blockType_t bType;
|
||||
U32 phase;
|
||||
const BYTE* litPtr;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
|
||||
}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
|
||||
@@ -2581,8 +2580,8 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
size_t litSize = BLOCKSIZE;
|
||||
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return readSize; /* works if it's an error too */
|
||||
}
|
||||
case IS_RAW:
|
||||
@@ -2593,13 +2592,12 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
if (litSize > srcSize-3) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return litSize+3;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+3;
|
||||
dctx->litBufSize = srcSize-3;
|
||||
dctx->litSize = litSize;
|
||||
return litSize+3;
|
||||
}
|
||||
@@ -2607,9 +2605,8 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
|
||||
{
|
||||
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[3], litSize);
|
||||
memset(dctx->litBuffer, istart[3], litSize + 8);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE;
|
||||
dctx->litSize = litSize;
|
||||
return 4;
|
||||
}
|
||||
@@ -2816,7 +2813,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
/* checks */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (litEnd > litLimit-8) return ERROR(corruption_detected); /* overRead beyond lit buffer */
|
||||
if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
@@ -2850,7 +2847,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-12)
|
||||
if (oMatchEnd > oend-(16-MINMATCH))
|
||||
{
|
||||
if (op < oend_8)
|
||||
{
|
||||
@@ -2862,7 +2859,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
else
|
||||
{
|
||||
ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2882,7 +2879,6 @@ static size_t ZSTD_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
size_t errorCode, dumpsLength;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litMax = litPtr + dctx->litBufSize;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
int nbSeq;
|
||||
const BYTE* dumps;
|
||||
@@ -2918,7 +2914,7 @@ static size_t ZSTD_decompressSequences(
|
||||
size_t oneSeqSize;
|
||||
nbSeq--;
|
||||
ZSTD_decodeSequence(&sequence, &seqState);
|
||||
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litMax, base, oend);
|
||||
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
||||
+10
-14
@@ -487,7 +487,7 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, size_t length)
|
||||
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -2706,7 +2706,6 @@ struct ZSTDv04_Dctx_s
|
||||
blockType_t bType;
|
||||
ZSTD_dStage stage;
|
||||
const BYTE* litPtr;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
|
||||
BYTE headerBuffer[ZSTD_frameHeaderSize_max];
|
||||
@@ -2847,8 +2846,8 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
size_t litSize = BLOCKSIZE;
|
||||
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return readSize; /* works if it's an error too */
|
||||
}
|
||||
case IS_RAW:
|
||||
@@ -2859,22 +2858,20 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
if (litSize > srcSize-3) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, 8);
|
||||
return litSize+3;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+3;
|
||||
dctx->litBufSize = srcSize-3;
|
||||
dctx->litSize = litSize;
|
||||
return litSize+3; }
|
||||
case IS_RLE:
|
||||
{
|
||||
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[3], litSize);
|
||||
memset(dctx->litBuffer, istart[3], litSize + 8);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+8;
|
||||
dctx->litSize = litSize;
|
||||
return 4;
|
||||
}
|
||||
@@ -3069,7 +3066,7 @@ static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
|
||||
|
||||
static size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_8,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
@@ -3084,7 +3081,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
/* check */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (litEnd > litLimit_8) return ERROR(corruption_detected); /* risk read beyond lit buffer */
|
||||
if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
@@ -3110,7 +3107,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_8) {
|
||||
if (op > oend_8 || sequence.matchLength < MINMATCH) {
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3137,7 +3134,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-12)
|
||||
if (oMatchEnd > oend-(16-MINMATCH))
|
||||
{
|
||||
if (op < oend_8)
|
||||
{
|
||||
@@ -3149,7 +3146,7 @@ static size_t ZSTD_execSequence(BYTE* op,
|
||||
}
|
||||
else
|
||||
{
|
||||
ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3167,7 +3164,6 @@ static size_t ZSTD_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
size_t errorCode, dumpsLength;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
int nbSeq;
|
||||
const BYTE* dumps;
|
||||
@@ -3206,7 +3202,7 @@ static size_t ZSTD_decompressSequences(
|
||||
size_t oneSeqSize;
|
||||
nbSeq--;
|
||||
ZSTD_decodeSequence(&sequence, &seqState);
|
||||
oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litLimit_8, base, vBase, dictEnd);
|
||||
oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
||||
+12
-16
@@ -509,7 +509,7 @@ static void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
|
||||
/*! ZSTDv05_wildcopy() :
|
||||
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
|
||||
MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, size_t length)
|
||||
MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -2731,7 +2731,6 @@ struct ZSTDv05_DCtx_s
|
||||
ZSTDv05_dStage stage;
|
||||
U32 flagStaticTables;
|
||||
const BYTE* litPtr;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];
|
||||
BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
|
||||
@@ -2978,8 +2977,8 @@ size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
|
||||
return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case IS_PCH:
|
||||
@@ -2996,14 +2995,14 @@ size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
|
||||
lhSize=3;
|
||||
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
|
||||
litCSize = ((istart[1] & 3) << 8) + istart[2];
|
||||
if (litCSize + litSize > srcSize) return ERROR(corruption_detected);
|
||||
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
|
||||
|
||||
errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
|
||||
if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case IS_RAW:
|
||||
@@ -3028,13 +3027,12 @@ size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
|
||||
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart+lhSize, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return lhSize+litSize;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+lhSize;
|
||||
dctx->litBufSize = srcSize-lhSize;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+litSize;
|
||||
}
|
||||
@@ -3057,9 +3055,8 @@ size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
|
||||
break;
|
||||
}
|
||||
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = BLOCKSIZE+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+1;
|
||||
}
|
||||
@@ -3289,7 +3286,7 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
|
||||
|
||||
static size_t ZSTDv05_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_8,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
@@ -3304,7 +3301,7 @@ static size_t ZSTDv05_execSequence(BYTE* op,
|
||||
/* check */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (litEnd > litLimit_8) return ERROR(corruption_detected); /* risk read beyond lit buffer */
|
||||
if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
@@ -3328,7 +3325,7 @@ static size_t ZSTDv05_execSequence(BYTE* op,
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_8) {
|
||||
if (op > oend_8 || sequence.matchLength < MINMATCH) {
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3351,7 +3348,7 @@ static size_t ZSTDv05_execSequence(BYTE* op,
|
||||
}
|
||||
op += 8; match += 8;
|
||||
|
||||
if (oMatchEnd > oend-12) {
|
||||
if (oMatchEnd > oend-(16-MINMATCH)) {
|
||||
if (op < oend_8) {
|
||||
ZSTDv05_wildcopy(op, match, oend_8 - op);
|
||||
match += oend_8 - op;
|
||||
@@ -3360,7 +3357,7 @@ static size_t ZSTDv05_execSequence(BYTE* op,
|
||||
while (op < oMatchEnd)
|
||||
*op++ = *match++;
|
||||
} else {
|
||||
ZSTDv05_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3378,7 +3375,6 @@ static size_t ZSTDv05_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
size_t errorCode, dumpsLength;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
int nbSeq;
|
||||
const BYTE* dumps;
|
||||
@@ -3416,7 +3412,7 @@ static size_t ZSTDv05_decompressSequences(
|
||||
size_t oneSeqSize;
|
||||
nbSeq--;
|
||||
ZSTDv05_decodeSequence(&sequence, &seqState);
|
||||
oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litLimit_8, base, vBase, dictEnd);
|
||||
oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
|
||||
+11
-15
@@ -537,7 +537,7 @@ static void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
/*! ZSTDv06_wildcopy() :
|
||||
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, size_t length)
|
||||
MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -2893,7 +2893,6 @@ struct ZSTDv06_DCtx_s
|
||||
ZSTDv06_dStage stage;
|
||||
U32 flagRepeatTable;
|
||||
const BYTE* litPtr;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
|
||||
BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
|
||||
@@ -3170,8 +3169,8 @@ size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
|
||||
return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv06_BLOCKSIZE_MAX+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case IS_PCH:
|
||||
@@ -3186,14 +3185,14 @@ size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
|
||||
lhSize=3;
|
||||
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
|
||||
litCSize = ((istart[1] & 3) << 8) + istart[2];
|
||||
if (litCSize + litSize > srcSize) return ERROR(corruption_detected);
|
||||
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
|
||||
|
||||
{ size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
|
||||
if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);
|
||||
}
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case IS_RAW:
|
||||
@@ -3217,13 +3216,12 @@ size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
|
||||
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart+lhSize, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv06_BLOCKSIZE_MAX+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return lhSize+litSize;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+lhSize;
|
||||
dctx->litBufSize = srcSize-lhSize;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+litSize;
|
||||
}
|
||||
@@ -3245,9 +3243,8 @@ size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
|
||||
break;
|
||||
}
|
||||
if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+1;
|
||||
}
|
||||
@@ -3438,7 +3435,7 @@ static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)
|
||||
|
||||
size_t ZSTDv06_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_8,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
@@ -3451,7 +3448,7 @@ size_t ZSTDv06_execSequence(BYTE* op,
|
||||
/* check */
|
||||
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
|
||||
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
|
||||
if (iLitEnd > litLimit_8) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
|
||||
@@ -3473,7 +3470,7 @@ size_t ZSTDv06_execSequence(BYTE* op,
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_8) {
|
||||
if (op > oend_8 || sequence.matchLength < MINMATCH) {
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3506,7 +3503,7 @@ size_t ZSTDv06_execSequence(BYTE* op,
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTDv06_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3523,7 +3520,6 @@ static size_t ZSTDv06_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
FSEv06_DTable* DTableLL = dctx->LLTable;
|
||||
FSEv06_DTable* DTableML = dctx->MLTable;
|
||||
@@ -3567,7 +3563,7 @@ static size_t ZSTDv06_decompressSequences(
|
||||
pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);
|
||||
#endif
|
||||
|
||||
{ size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litLimit_8, base, vBase, dictEnd);
|
||||
{ size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
} }
|
||||
|
||||
+10
-14
@@ -2845,7 +2845,7 @@ static void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
/*! ZSTDv07_wildcopy() :
|
||||
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, size_t length)
|
||||
MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
@@ -3021,7 +3021,6 @@ struct ZSTDv07_DCtx_s
|
||||
U32 dictID;
|
||||
const BYTE* litPtr;
|
||||
ZSTDv07_customMem customMem;
|
||||
size_t litBufSize;
|
||||
size_t litSize;
|
||||
BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
|
||||
BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
|
||||
@@ -3395,9 +3394,9 @@ size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
|
||||
return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+8;
|
||||
dctx->litSize = litSize;
|
||||
dctx->litEntropy = 1;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case lbt_repeat:
|
||||
@@ -3418,8 +3417,8 @@ size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
|
||||
if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);
|
||||
}
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
case lbt_raw:
|
||||
@@ -3443,13 +3442,12 @@ size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
|
||||
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
|
||||
memcpy(dctx->litBuffer, istart+lhSize, litSize);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+8;
|
||||
dctx->litSize = litSize;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return lhSize+litSize;
|
||||
}
|
||||
/* direct reference into compressed stream */
|
||||
dctx->litPtr = istart+lhSize;
|
||||
dctx->litBufSize = srcSize-lhSize;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+litSize;
|
||||
}
|
||||
@@ -3471,9 +3469,8 @@ size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
|
||||
break;
|
||||
}
|
||||
if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize);
|
||||
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litBufSize = ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH;
|
||||
dctx->litSize = litSize;
|
||||
return lhSize+1;
|
||||
}
|
||||
@@ -3662,7 +3659,7 @@ static seq_t ZSTDv07_decodeSequence(seqState_t* seqState)
|
||||
static
|
||||
size_t ZSTDv07_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit_w,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
@@ -3674,7 +3671,7 @@ size_t ZSTDv07_execSequence(BYTE* op,
|
||||
|
||||
/* check */
|
||||
if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
|
||||
if (iLitEnd > litLimit_w) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
|
||||
@@ -3696,7 +3693,7 @@ size_t ZSTDv07_execSequence(BYTE* op,
|
||||
op = oLitEnd + length1;
|
||||
sequence.matchLength -= length1;
|
||||
match = base;
|
||||
if (op > oend_w) {
|
||||
if (op > oend_w || sequence.matchLength < MINMATCH) {
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3729,7 +3726,7 @@ size_t ZSTDv07_execSequence(BYTE* op,
|
||||
}
|
||||
while (op < oMatchEnd) *op++ = *match++;
|
||||
} else {
|
||||
ZSTDv07_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
|
||||
}
|
||||
return sequenceLength;
|
||||
}
|
||||
@@ -3746,7 +3743,6 @@ static size_t ZSTDv07_decompressSequences(
|
||||
BYTE* const oend = ostart + maxDstSize;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* litPtr = dctx->litPtr;
|
||||
const BYTE* const litLimit_w = litPtr + dctx->litBufSize - WILDCOPY_OVERLENGTH;
|
||||
const BYTE* const litEnd = litPtr + dctx->litSize;
|
||||
FSEv07_DTable* DTableLL = dctx->LLTable;
|
||||
FSEv07_DTable* DTableML = dctx->MLTable;
|
||||
@@ -3776,7 +3772,7 @@ static size_t ZSTDv07_decompressSequences(
|
||||
for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {
|
||||
nbSeq--;
|
||||
{ seq_t const sequence = ZSTDv07_decodeSequence(&seqState);
|
||||
size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litLimit_w, base, vBase, dictEnd);
|
||||
size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;
|
||||
op += oneSeqSize;
|
||||
} }
|
||||
|
||||
+5
-60
@@ -1,66 +1,11 @@
|
||||
|
||||
# Multithreading Library for [LZ4], [LZ5] and [ZStandard]
|
||||
|
||||
- this library is a temporary solution to the threading problem with zstd, lz4 and lz5
|
||||
- future version will support threading in some way, I think ;)
|
||||
- it's based on my zstdmt version @ github...
|
||||
|
||||
### Compression
|
||||
```
|
||||
typedef struct {
|
||||
void *buf; /* ptr to data */
|
||||
size_t size; /* current filled in buf */
|
||||
size_t allocated; /* length of buf */
|
||||
} LZ4MT_Buffer;
|
||||
zstdmt_compress.c
|
||||
|
||||
/**
|
||||
* reading and writing functions
|
||||
* - you can use stdio functions or plain read/write ...
|
||||
* - a sample is given in 7-Zip ZS or lz4mt.c
|
||||
*/
|
||||
typedef int (fn_read) (void *args, LZ4MT_Buffer * in);
|
||||
typedef int (fn_write) (void *args, LZ4MT_Buffer * out);
|
||||
/TR 2016-12-22
|
||||
|
||||
typedef struct {
|
||||
fn_read *fn_read;
|
||||
void *arg_read;
|
||||
fn_write *fn_write;
|
||||
void *arg_write;
|
||||
} LZ4MT_RdWr_t;
|
||||
|
||||
typedef struct LZ4MT_CCtx_s LZ4MT_CCtx;
|
||||
|
||||
/* 1) allocate new cctx */
|
||||
LZ4MT_CCtx *LZ4MT_createCCtx(int threads, int level, int inputsize);
|
||||
|
||||
/* 2) threaded compression */
|
||||
size_t LZ4MT_CompressCCtx(LZ4MT_CCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
|
||||
/* 3) get some statistic */
|
||||
size_t LZ4MT_GetFramesCCtx(LZ4MT_CCtx * ctx);
|
||||
size_t LZ4MT_GetInsizeCCtx(LZ4MT_CCtx * ctx);
|
||||
size_t LZ4MT_GetOutsizeCCtx(LZ4MT_CCtx * ctx);
|
||||
|
||||
/* 4) free cctx */
|
||||
void LZ4MT_freeCCtx(LZ4MT_CCtx * ctx);
|
||||
```
|
||||
|
||||
### Decompression
|
||||
```
|
||||
typedef struct LZ4MT_DCtx_s LZ4MT_DCtx;
|
||||
|
||||
/* 1) allocate new cctx */
|
||||
LZ4MT_DCtx *LZ4MT_createDCtx(int threads, int inputsize);
|
||||
|
||||
/* 2) threaded compression */
|
||||
size_t LZ4MT_DecompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
|
||||
/* 3) get some statistic */
|
||||
size_t LZ4MT_GetFramesDCtx(LZ4MT_DCtx * ctx);
|
||||
size_t LZ4MT_GetInsizeDCtx(LZ4MT_DCtx * ctx);
|
||||
size_t LZ4MT_GetOutsizeDCtx(LZ4MT_DCtx * ctx);
|
||||
|
||||
/* 4) free cctx */
|
||||
void LZ4MT_freeDCtx(LZ4MT_DCtx * ctx);
|
||||
```
|
||||
|
||||
## Todo
|
||||
|
||||
- add Makefile
|
||||
|
||||
+3
-2
@@ -47,6 +47,7 @@ typedef enum {
|
||||
LZ4MT_error_frame_decompress,
|
||||
LZ4MT_error_compressionParameter_unsupported,
|
||||
LZ4MT_error_compression_library,
|
||||
LZ4MT_error_canceled,
|
||||
LZ4MT_error_maxCode
|
||||
} LZ4MT_ErrorCode;
|
||||
|
||||
@@ -107,7 +108,7 @@ LZ4MT_CCtx *LZ4MT_createCCtx(int threads, int level, int inputsize);
|
||||
* 2) threaded compression
|
||||
* - errorcheck via
|
||||
*/
|
||||
size_t LZ4MT_CompressCCtx(LZ4MT_CCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
size_t LZ4MT_compressCCtx(LZ4MT_CCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
|
||||
/**
|
||||
* 3) get some statistic
|
||||
@@ -141,7 +142,7 @@ LZ4MT_DCtx *LZ4MT_createDCtx(int threads, int inputsize);
|
||||
* 2) threaded compression
|
||||
* - return -1 on error
|
||||
*/
|
||||
size_t LZ4MT_DecompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
size_t LZ4MT_decompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr);
|
||||
|
||||
/**
|
||||
* 3) get some statistic
|
||||
|
||||
@@ -32,12 +32,11 @@ unsigned LZ4MT_isError(size_t code)
|
||||
*/
|
||||
const char *LZ4MT_getErrorString(size_t code)
|
||||
{
|
||||
static const char *notErrorCode = "Unspecified error lz4mt code";
|
||||
|
||||
if (LZ4F_isError(lz4mt_errcode))
|
||||
return LZ4F_getErrorName(lz4mt_errcode);
|
||||
|
||||
switch ((LZ4MT_ErrorCode)(0-code)) {
|
||||
static const char *notErrorCode = "Unspecified error lz4mt code";
|
||||
switch ((LZ4MT_ErrorCode) (0 - code)) {
|
||||
case PREFIX(no_error):
|
||||
return "No error detected";
|
||||
case PREFIX(memory_allocation):
|
||||
|
||||
@@ -155,6 +155,24 @@ LZ4MT_CCtx *LZ4MT_createCCtx(int threads, int level, int inputsize)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ERROR(read_fail);
|
||||
case -2:
|
||||
return ERROR(canceled);
|
||||
case -3:
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for compressed output
|
||||
*/
|
||||
@@ -175,8 +193,8 @@ static size_t pt_write(LZ4MT_CCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
int rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ERROR(write_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -239,9 +257,9 @@ static void *pt_compress(void *arg)
|
||||
pthread_mutex_lock(&ctx->read_mutex);
|
||||
in.size = ctx->inputsize;
|
||||
rv = ctx->fn_read(ctx->arg_read, &in);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return (void *)ERROR(read_fail);
|
||||
return (void*)mt_error(rv);
|
||||
}
|
||||
|
||||
/* eof */
|
||||
@@ -277,8 +295,7 @@ static void *pt_compress(void *arg)
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 0,
|
||||
LZ4FMT_MAGIC_SKIPPABLE);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 4, 4);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 8,
|
||||
(U32)result);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 8, (U32) result);
|
||||
wl->out.size = result + 12;
|
||||
|
||||
/* write result */
|
||||
@@ -293,7 +310,7 @@ static void *pt_compress(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t LZ4MT_CompressCCtx(LZ4MT_CCtx * ctx, LZ4MT_RdWr_t * rdwr)
|
||||
size_t LZ4MT_compressCCtx(LZ4MT_CCtx * ctx, LZ4MT_RdWr_t * rdwr)
|
||||
{
|
||||
int t;
|
||||
|
||||
|
||||
+39
-15
@@ -141,6 +141,24 @@ LZ4MT_DCtx *LZ4MT_createDCtx(int threads, int inputsize)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ERROR(read_fail);
|
||||
case -2:
|
||||
return ERROR(canceled);
|
||||
case -3:
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for decompressed output
|
||||
*/
|
||||
@@ -156,8 +174,8 @@ static size_t pt_write(LZ4MT_DCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
int rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ERROR(write_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -185,8 +203,10 @@ static size_t pt_read(LZ4MT_DCtx * ctx, LZ4MT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf + 4;
|
||||
hdr.size = 8;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
if (hdr.size != 8)
|
||||
goto error_read;
|
||||
hdr.buf = hdrbuf;
|
||||
@@ -194,8 +214,10 @@ static size_t pt_read(LZ4MT_DCtx * ctx, LZ4MT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf;
|
||||
hdr.size = 12;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
/* eof reached ? */
|
||||
if (hdr.size == 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
@@ -231,8 +253,10 @@ static size_t pt_read(LZ4MT_DCtx * ctx, LZ4MT_Buffer * in, size_t * frame)
|
||||
in->size = toRead;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
/* generic read failure! */
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
/* needed more bytes! */
|
||||
if (in->size != toRead)
|
||||
goto error_data;
|
||||
@@ -406,10 +430,10 @@ static size_t st_decompress(void *arg)
|
||||
/* read new input */
|
||||
in->size = nextToLoad;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
free(in->buf);
|
||||
free(out->buf);
|
||||
return ERROR(read_fail);
|
||||
return mt_error(rv);
|
||||
}
|
||||
|
||||
/* done, eof reached */
|
||||
@@ -435,10 +459,10 @@ static size_t st_decompress(void *arg)
|
||||
/* have some output */
|
||||
if (out->size) {
|
||||
rv = ctx->fn_write(ctx->arg_write, out);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
free(in->buf);
|
||||
free(out->buf);
|
||||
return ERROR(write_fail);
|
||||
return mt_error(rv);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -455,7 +479,7 @@ static size_t st_decompress(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t LZ4MT_DecompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr)
|
||||
size_t LZ4MT_decompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr)
|
||||
{
|
||||
unsigned char buf[4];
|
||||
int t, rv;
|
||||
@@ -475,8 +499,8 @@ size_t LZ4MT_DecompressDCtx(LZ4MT_DCtx * ctx, LZ4MT_RdWr_t * rdwr)
|
||||
in->buf = buf;
|
||||
in->size = 4;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1)
|
||||
return ERROR(read_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
if (in->size != 4)
|
||||
return ERROR(data_error);
|
||||
|
||||
|
||||
+3
-2
@@ -47,6 +47,7 @@ typedef enum {
|
||||
LZ5MT_error_frame_decompress,
|
||||
LZ5MT_error_compressionParameter_unsupported,
|
||||
LZ5MT_error_compression_library,
|
||||
LZ5MT_error_canceled,
|
||||
LZ5MT_error_maxCode
|
||||
} LZ5MT_ErrorCode;
|
||||
|
||||
@@ -105,7 +106,7 @@ LZ5MT_CCtx *LZ5MT_createCCtx(int threads, int level, int inputsize);
|
||||
* 2) threaded compression
|
||||
* - return -1 on error
|
||||
*/
|
||||
size_t LZ5MT_CompressCCtx(LZ5MT_CCtx * ctx, LZ5MT_RdWr_t * rdwr);
|
||||
size_t LZ5MT_compressCCtx(LZ5MT_CCtx * ctx, LZ5MT_RdWr_t * rdwr);
|
||||
|
||||
/**
|
||||
* 3) get some statistic
|
||||
@@ -141,7 +142,7 @@ LZ5MT_DCtx *LZ5MT_createDCtx(int threads, int inputsize);
|
||||
* 2) threaded compression
|
||||
* - return -1 on error
|
||||
*/
|
||||
size_t LZ5MT_DecompressDCtx(LZ5MT_DCtx * ctx, LZ5MT_RdWr_t * rdwr);
|
||||
size_t LZ5MT_decompressDCtx(LZ5MT_DCtx * ctx, LZ5MT_RdWr_t * rdwr);
|
||||
|
||||
/**
|
||||
* 3) get some statistic
|
||||
|
||||
@@ -32,12 +32,11 @@ unsigned LZ5MT_isError(size_t code)
|
||||
*/
|
||||
const char *LZ5MT_getErrorString(size_t code)
|
||||
{
|
||||
static const char *notErrorCode = "Unspecified error lz4mt code";
|
||||
|
||||
if (LZ5F_isError(lz5mt_errcode))
|
||||
return LZ5F_getErrorName(lz5mt_errcode);
|
||||
|
||||
switch ((LZ5MT_ErrorCode)(0-code)) {
|
||||
static const char *notErrorCode = "Unspecified error lz4mt code";
|
||||
switch ((LZ5MT_ErrorCode) (0 - code)) {
|
||||
case PREFIX(no_error):
|
||||
return "No error detected";
|
||||
case PREFIX(memory_allocation):
|
||||
|
||||
@@ -155,6 +155,24 @@ LZ5MT_CCtx *LZ5MT_createCCtx(int threads, int level, int inputsize)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ERROR(read_fail);
|
||||
case -2:
|
||||
return ERROR(canceled);
|
||||
case -3:
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for compressed output
|
||||
*/
|
||||
@@ -175,8 +193,8 @@ static size_t pt_write(LZ5MT_CCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
int rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ERROR(write_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -239,9 +257,9 @@ static void *pt_compress(void *arg)
|
||||
pthread_mutex_lock(&ctx->read_mutex);
|
||||
in.size = ctx->inputsize;
|
||||
rv = ctx->fn_read(ctx->arg_read, &in);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return (void *)ERROR(read_fail);
|
||||
return (void*)mt_error(rv);
|
||||
}
|
||||
|
||||
/* eof */
|
||||
@@ -277,8 +295,7 @@ static void *pt_compress(void *arg)
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 0,
|
||||
LZ5FMT_MAGIC_SKIPPABLE);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 4, 4);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 8,
|
||||
(U32)result);
|
||||
MEM_writeLE32((unsigned char *)wl->out.buf + 8, (U32) result);
|
||||
wl->out.size = result + 12;
|
||||
|
||||
/* write result */
|
||||
@@ -293,7 +310,7 @@ static void *pt_compress(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t LZ5MT_CompressCCtx(LZ5MT_CCtx * ctx, LZ5MT_RdWr_t * rdwr)
|
||||
size_t LZ5MT_compressCCtx(LZ5MT_CCtx * ctx, LZ5MT_RdWr_t * rdwr)
|
||||
{
|
||||
int t;
|
||||
|
||||
|
||||
+42
-19
@@ -141,6 +141,24 @@ LZ5MT_DCtx *LZ5MT_createDCtx(int threads, int inputsize)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ERROR(read_fail);
|
||||
case -2:
|
||||
return ERROR(canceled);
|
||||
case -3:
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for decompressed output
|
||||
*/
|
||||
@@ -156,8 +174,8 @@ static size_t pt_write(LZ5MT_DCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
int rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ERROR(write_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -185,8 +203,10 @@ static size_t pt_read(LZ5MT_DCtx * ctx, LZ5MT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf + 4;
|
||||
hdr.size = 8;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
if (hdr.size != 8)
|
||||
goto error_read;
|
||||
hdr.buf = hdrbuf;
|
||||
@@ -194,8 +214,10 @@ static size_t pt_read(LZ5MT_DCtx * ctx, LZ5MT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf;
|
||||
hdr.size = 12;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
/* eof reached ? */
|
||||
if (hdr.size == 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
@@ -231,8 +253,10 @@ static size_t pt_read(LZ5MT_DCtx * ctx, LZ5MT_Buffer * in, size_t * frame)
|
||||
in->size = toRead;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
/* generic read failure! */
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
/* needed more bytes! */
|
||||
if (in->size != toRead)
|
||||
goto error_data;
|
||||
@@ -293,15 +317,14 @@ static void *pt_decompress(void *arg)
|
||||
|
||||
/* zero should not happen here! */
|
||||
result = pt_read(ctx, in, &wl->frame);
|
||||
if (in->size == 0)
|
||||
break;
|
||||
|
||||
if (LZ5MT_isError(result)) {
|
||||
list_move(&wl->node, &ctx->writelist_free);
|
||||
|
||||
goto error_lock;
|
||||
}
|
||||
|
||||
if (in->size == 0)
|
||||
break;
|
||||
|
||||
{
|
||||
/* get frame size for output buffer */
|
||||
unsigned char *src = (unsigned char *)in->buf + 6;
|
||||
@@ -407,10 +430,10 @@ static size_t st_decompress(void *arg)
|
||||
/* read new input */
|
||||
in->size = nextToLoad;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
free(in->buf);
|
||||
free(out->buf);
|
||||
return ERROR(read_fail);
|
||||
return mt_error(rv);
|
||||
}
|
||||
|
||||
/* done, eof reached */
|
||||
@@ -436,10 +459,10 @@ static size_t st_decompress(void *arg)
|
||||
/* have some output */
|
||||
if (out->size) {
|
||||
rv = ctx->fn_write(ctx->arg_write, out);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
free(in->buf);
|
||||
free(out->buf);
|
||||
return ERROR(write_fail);
|
||||
return mt_error(rv);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,7 +479,7 @@ static size_t st_decompress(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t LZ5MT_DecompressDCtx(LZ5MT_DCtx * ctx, LZ5MT_RdWr_t * rdwr)
|
||||
size_t LZ5MT_decompressDCtx(LZ5MT_DCtx * ctx, LZ5MT_RdWr_t * rdwr)
|
||||
{
|
||||
unsigned char buf[4];
|
||||
int t, rv;
|
||||
@@ -476,8 +499,8 @@ size_t LZ5MT_DecompressDCtx(LZ5MT_DCtx * ctx, LZ5MT_RdWr_t * rdwr)
|
||||
in->buf = buf;
|
||||
in->size = 4;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1)
|
||||
return ERROR(read_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
if (in->size != 4)
|
||||
return ERROR(data_error);
|
||||
|
||||
|
||||
@@ -52,12 +52,10 @@ pthread_create(pthread_t * thread, const void *unused,
|
||||
|
||||
int _pthread_join(pthread_t * thread, void **value_ptr)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
if (!thread->handle)
|
||||
return 0;
|
||||
|
||||
result = WaitForSingleObject(thread->handle, INFINITE);
|
||||
DWORD result = WaitForSingleObject(thread->handle, INFINITE);
|
||||
switch (result) {
|
||||
case WAIT_OBJECT_0:
|
||||
if (value_ptr)
|
||||
|
||||
@@ -31,7 +31,7 @@ extern "C" {
|
||||
#include <windows.h>
|
||||
|
||||
/* mutex */
|
||||
#define pthread_mutex_t CRITICAL_SECTION
|
||||
#define pthread_mutex_t CRITICAL_SECTION
|
||||
#define pthread_mutex_init(a,b) InitializeCriticalSection((a))
|
||||
#define pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define pthread_mutex_lock EnterCriticalSection
|
||||
|
||||
+34
-22
@@ -154,11 +154,29 @@ ZSTDMT_CCtx *ZSTDMT_createCCtx(int threads, int level, int inputsize)
|
||||
|
||||
return ctx;
|
||||
|
||||
err_ctx:
|
||||
err_ctx:
|
||||
free(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ZSTDMT_ERROR(read_fail);
|
||||
case -2:
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
case -3:
|
||||
return ZSTDMT_ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ZSTDMT_ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for compressed output
|
||||
*/
|
||||
@@ -178,10 +196,8 @@ static size_t pt_write(ZSTDMT_CCtx * ctx, struct writelist *wl)
|
||||
b.buf = frame0;
|
||||
b.size = 9;
|
||||
rv = ctx->fn_write(ctx->arg_write, &b);
|
||||
if (rv == -1)
|
||||
return ZSTDMT_ERROR(write_fail);
|
||||
if (rv == -2)
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
if (b.size != 9)
|
||||
return ZSTDMT_ERROR(write_fail);
|
||||
ctx->outsize += 9;
|
||||
@@ -197,10 +213,8 @@ static size_t pt_write(ZSTDMT_CCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ZSTDMT_ERROR(write_fail);
|
||||
if (rv == -2)
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -264,13 +278,9 @@ static void *pt_compress(void *arg)
|
||||
pthread_mutex_lock(&ctx->read_mutex);
|
||||
in.size = ctx->inputsize;
|
||||
rv = ctx->fn_read(ctx->arg_read, &in);
|
||||
if (rv == -1) {
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
result = ZSTDMT_ERROR(read_fail);
|
||||
goto error;
|
||||
} else if (rv == -2) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
result = ZSTDMT_ERROR(canceled);
|
||||
result = mt_error(rv);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -291,13 +301,15 @@ static void *pt_compress(void *arg)
|
||||
|
||||
/* compress whole frame */
|
||||
{
|
||||
unsigned char *outbuf = out->buf;
|
||||
result = ZSTD_compress(outbuf + 12, out->size - 12, in.buf, in.size, ctx->level);
|
||||
if (ZSTD_isError(result)) {
|
||||
zstdmt_errcode = result;
|
||||
result = ZSTDMT_ERROR(compression_library);
|
||||
goto error;
|
||||
}
|
||||
unsigned char *outbuf = out->buf;
|
||||
result =
|
||||
ZSTD_compress(outbuf + 12, out->size - 12, in.buf,
|
||||
in.size, ctx->level);
|
||||
if (ZSTD_isError(result)) {
|
||||
zstdmt_errcode = result;
|
||||
result = ZSTDMT_ERROR(compression_library);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* write skippable frame */
|
||||
|
||||
@@ -158,6 +158,24 @@ static int IsZstd_Skippable(unsigned char *buf)
|
||||
return (MEM_readLE32(buf) == ZSTDMT_MAGIC_SKIPPABLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* mt_error - return mt lib specific error code
|
||||
*/
|
||||
static size_t mt_error(int rv)
|
||||
{
|
||||
switch (rv) {
|
||||
case -1:
|
||||
return ZSTDMT_ERROR(read_fail);
|
||||
case -2:
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
case -3:
|
||||
return ZSTDMT_ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* XXX, some catch all other errors */
|
||||
return ZSTDMT_ERROR(read_fail);
|
||||
}
|
||||
|
||||
/**
|
||||
* pt_write - queue for decompressed output
|
||||
*/
|
||||
@@ -173,10 +191,8 @@ static size_t pt_write(ZSTDMT_DCtx * ctx, struct writelist *wl)
|
||||
wl = list_entry(entry, struct writelist, node);
|
||||
if (wl->frame == ctx->curframe) {
|
||||
int rv = ctx->fn_write(ctx->arg_write, &wl->out);
|
||||
if (rv == -1)
|
||||
return ZSTDMT_ERROR(write_fail);
|
||||
if (rv == -2)
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
ctx->outsize += wl->out.size;
|
||||
ctx->curframe++;
|
||||
list_move(entry, &ctx->writelist_free);
|
||||
@@ -217,10 +233,10 @@ static size_t pt_read(ZSTDMT_DCtx * ctx, ZSTDMT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf + 7;
|
||||
hdr.size = 5;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv == -2)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
if (hdr.size != 5)
|
||||
goto error_data;
|
||||
hdr.buf = hdrbuf;
|
||||
@@ -234,8 +250,10 @@ static size_t pt_read(ZSTDMT_DCtx * ctx, ZSTDMT_Buffer * in, size_t * frame)
|
||||
goto error_nomem;
|
||||
in->allocated = in->size;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
if (in->size != toRead)
|
||||
goto error_data;
|
||||
ctx->insize += in->size;
|
||||
@@ -264,8 +282,10 @@ static size_t pt_read(ZSTDMT_DCtx * ctx, ZSTDMT_Buffer * in, size_t * frame)
|
||||
in->buf = start + 4;
|
||||
in->size = toRead - 4;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
if (in->size != toRead - 4)
|
||||
goto error_data;
|
||||
ctx->insize += in->size;
|
||||
@@ -286,10 +306,10 @@ static size_t pt_read(ZSTDMT_DCtx * ctx, ZSTDMT_Buffer * in, size_t * frame)
|
||||
hdr.buf = hdrbuf;
|
||||
hdr.size = 12;
|
||||
rv = ctx->fn_read(ctx->arg_read, &hdr);
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv == -2)
|
||||
goto error_canceled;
|
||||
if (rv < 0) {
|
||||
pthread_mutex_unlock(&ctx->read_mutex);
|
||||
return mt_error(rv);
|
||||
}
|
||||
|
||||
/* eof reached ? */
|
||||
if (unlikely(hdr.size == 0)) {
|
||||
@@ -325,7 +345,7 @@ static size_t pt_read(ZSTDMT_DCtx * ctx, ZSTDMT_Buffer * in, size_t * frame)
|
||||
if (rv == -1)
|
||||
goto error_read;
|
||||
if (rv == -2)
|
||||
goto error_canceled;
|
||||
goto error_canceled;
|
||||
/* needed more bytes! */
|
||||
if (in->size != toRead)
|
||||
goto error_data;
|
||||
@@ -361,12 +381,12 @@ static void *pt_decompress(void *arg)
|
||||
size_t result = 0;
|
||||
ZSTDMT_Buffer collect;
|
||||
|
||||
/* init dstream stream */
|
||||
result = ZSTD_initDStream(w->dctx);
|
||||
if (ZSTD_isError(result)) {
|
||||
zstdmt_errcode = result;
|
||||
return (void *)ZSTDMT_ERROR(compression_library);
|
||||
}
|
||||
/* init dstream stream */
|
||||
result = ZSTD_initDStream(w->dctx);
|
||||
if (ZSTD_isError(result)) {
|
||||
zstdmt_errcode = result;
|
||||
return (void *)ZSTDMT_ERROR(compression_library);
|
||||
}
|
||||
|
||||
collect.buf = 0;
|
||||
collect.size = 0;
|
||||
@@ -452,7 +472,8 @@ static void *pt_decompress(void *arg)
|
||||
bnew = malloc(collect.size + zOut.pos);
|
||||
if (!bnew) {
|
||||
result =
|
||||
ZSTDMT_ERROR(memory_allocation);
|
||||
ZSTDMT_ERROR
|
||||
(memory_allocation);
|
||||
goto error_lock;
|
||||
}
|
||||
memcpy((char *)bnew, collect.buf,
|
||||
@@ -496,7 +517,8 @@ static void *pt_decompress(void *arg)
|
||||
pthread_mutex_unlock(&ctx->write_mutex);
|
||||
out->buf = realloc(out->buf, out->size);
|
||||
if (!out->buf) {
|
||||
result = ZSTDMT_ERROR(memory_allocation);
|
||||
result =
|
||||
ZSTDMT_ERROR(memory_allocation);
|
||||
goto error_lock;
|
||||
}
|
||||
out->allocated = out->size;
|
||||
@@ -581,8 +603,8 @@ static size_t st_decompress(void *arg)
|
||||
|
||||
/* read more bytes, to fill buffer */
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1) {
|
||||
result = ZSTDMT_ERROR(read_fail);
|
||||
if (rv < 0) {
|
||||
result = mt_error(rv);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -621,6 +643,10 @@ static size_t st_decompress(void *arg)
|
||||
return ZSTDMT_ERROR(canceled);
|
||||
goto error_clib;
|
||||
}
|
||||
if (rv == -3) {
|
||||
return ZSTDMT_ERROR(memory_allocation);
|
||||
goto error_clib;
|
||||
}
|
||||
ctx->outsize += zOut.pos;
|
||||
}
|
||||
|
||||
@@ -643,8 +669,8 @@ static size_t st_decompress(void *arg)
|
||||
/* read next input */
|
||||
in->size = in->allocated;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1) {
|
||||
result = ZSTDMT_ERROR(read_fail);
|
||||
if (rv < 0) {
|
||||
result = mt_error(rv);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -706,8 +732,8 @@ size_t ZSTDMT_decompressDCtx(ZSTDMT_DCtx * ctx, ZSTDMT_RdWr_t * rdwr)
|
||||
in->buf = buf;
|
||||
in->size = 16;
|
||||
rv = ctx->fn_read(ctx->arg_read, in);
|
||||
if (rv == -1)
|
||||
return ZSTDMT_ERROR(read_fail);
|
||||
if (rv < 0)
|
||||
return mt_error(rv);
|
||||
|
||||
/* must be single threaded standard zstd, when smaller 16 bytes */
|
||||
if (in->size < 16) {
|
||||
|
||||
Reference in New Issue
Block a user