mirror of
https://github.com/Xevion/easy7zip.git
synced 2025-12-07 03:15:00 -06:00
Update to ZStandard v1.1.4
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
#define MY_VER_BUILD 0
|
||||
#define MY_VERSION_NUMBERS "16.04 ZS"
|
||||
#define MY_VERSION "16.04 ZS"
|
||||
#define MY_DATE "2017-02-06"
|
||||
#define MY_DATE "2017-03-20"
|
||||
#undef MY_COPYRIGHT
|
||||
#undef MY_VERSION_COPYRIGHT_DATE
|
||||
#define MY_AUTHOR_NAME "Igor Pavlov, Tino Reichardt"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#define MY_VER_MAJOR 1
|
||||
#define MY_VER_MINOR 1
|
||||
#define MY_VER_BUILD 3
|
||||
#define MY_VERSION_NUMBERS "1.1.3"
|
||||
#define MY_VERSION "1.1.3"
|
||||
#define MY_DATE "2017-02-06"
|
||||
#define MY_VER_BUILD 4
|
||||
#define MY_VERSION_NUMBERS "1.1.4"
|
||||
#define MY_VERSION "1.1.4"
|
||||
#define MY_DATE "2017-03-20"
|
||||
#undef MY_COPYRIGHT
|
||||
#undef MY_VERSION_COPYRIGHT_DATE
|
||||
#define MY_AUTHOR_NAME "Tino Reichardt"
|
||||
#define MY_COPYRIGHT "Copyright (c) 2016 Tino Reichardt"
|
||||
#define MY_COPYRIGHT "Copyright (c) 2016 - 2017 Tino Reichardt"
|
||||
#define MY_VERSION_COPYRIGHT_DATE MY_VERSION " : " MY_COPYRIGHT " : " MY_DATE
|
||||
|
||||
@@ -60,6 +60,9 @@ extern "C" {
|
||||
# include <immintrin.h> /* support for bextr (experimental) */
|
||||
#endif
|
||||
|
||||
#define STREAM_ACCUMULATOR_MIN_32 25
|
||||
#define STREAM_ACCUMULATOR_MIN_64 57
|
||||
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
|
||||
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
|
||||
@@ -43,27 +43,21 @@
|
||||
#include "huf.h"
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* FSE Error Management
|
||||
******************************************/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
/*=== Version ===*/
|
||||
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/*=== Error Management ===*/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* HUF Error Management
|
||||
****************************************************************/
|
||||
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
static short FSE_abs(short a) { return (short)(a<0 ? -a : a); }
|
||||
|
||||
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
@@ -117,21 +111,21 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
|
||||
} else {
|
||||
bitStream >>= 2;
|
||||
} }
|
||||
{ short const max = (short)((2*threshold-1)-remaining);
|
||||
short count;
|
||||
{ int const max = (2*threshold-1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (threshold-1)) < (U32)max) {
|
||||
count = (short)(bitStream & (threshold-1));
|
||||
bitCount += nbBits-1;
|
||||
count = bitStream & (threshold-1);
|
||||
bitCount += nbBits-1;
|
||||
} else {
|
||||
count = (short)(bitStream & (2*threshold-1));
|
||||
count = bitStream & (2*threshold-1);
|
||||
if (count >= threshold) count -= max;
|
||||
bitCount += nbBits;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--; /* extra accuracy */
|
||||
remaining -= FSE_abs(count);
|
||||
normalizedCounter[charnum++] = count;
|
||||
remaining -= count < 0 ? -count : count; /* -1 means +1 */
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = !count;
|
||||
while (remaining < threshold) {
|
||||
nbBits--;
|
||||
|
||||
70
C/zstd/fse.h
70
C/zstd/fse.h
@@ -45,6 +45,32 @@ extern "C" {
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE_PUBLIC_API : control library symbols visibility
|
||||
******************************************/
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define FSE_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define FSE_PUBLIC_API
|
||||
#endif
|
||||
|
||||
/*------ Version ------*/
|
||||
#define FSE_VERSION_MAJOR 0
|
||||
#define FSE_VERSION_MINOR 9
|
||||
#define FSE_VERSION_RELEASE 0
|
||||
|
||||
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
|
||||
#define FSE_QUOTE(str) #str
|
||||
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
|
||||
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
|
||||
|
||||
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
|
||||
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
/*-****************************************
|
||||
* FSE simple functions
|
||||
******************************************/
|
||||
@@ -56,8 +82,8 @@ extern "C" {
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
|
||||
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
|
||||
*/
|
||||
size_t FSE_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_decompress():
|
||||
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
|
||||
@@ -69,18 +95,18 @@ size_t FSE_compress(void* dst, size_t dstCapacity,
|
||||
Why ? : making this distinction requires a header.
|
||||
Header management is intentionally delegated to the user layer, which can better manage special cases.
|
||||
*/
|
||||
size_t FSE_decompress(void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
|
||||
/* Error Management */
|
||||
unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
@@ -94,7 +120,7 @@ const char* FSE_getErrorName(size_t code); /* provides error code string (usef
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
|
||||
if FSE_isError(return), it's an error code.
|
||||
*/
|
||||
size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
@@ -127,50 +153,50 @@ or to save and provide normalized distribution using external method.
|
||||
@return : the count of the most frequent symbol (which is not identified).
|
||||
if return == srcSize, there is only one symbol.
|
||||
Can also return an error code, which can be tested with FSE_isError(). */
|
||||
size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
|
||||
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_NCountWriteBound():
|
||||
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
|
||||
Typically useful for allocation purpose. */
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*! Constructor and Destructor of FSE_CTable.
|
||||
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
|
||||
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
|
||||
FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue);
|
||||
void FSE_freeCTable (FSE_CTable* ct);
|
||||
FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue);
|
||||
FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
@@ -223,25 +249,25 @@ If there is an error, the function will return an ErrorCode (which can be tested
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
|
||||
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
|
||||
|
||||
/*! Constructor and Destructor of FSE_DTable.
|
||||
Note that its size depends on 'tableLog' */
|
||||
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
||||
FSE_DTable* FSE_createDTable(unsigned tableLog);
|
||||
void FSE_freeDTable(FSE_DTable* dt);
|
||||
FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
|
||||
FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
|
||||
|
||||
/*! FSE_buildDTable():
|
||||
Builds 'dt', which must be already allocated, using FSE_createDTable().
|
||||
return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_decompress_usingDTable():
|
||||
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
|
||||
into `dst` which must be already allocated.
|
||||
@return : size of regenerated data (necessarily <= `dstCapacity`),
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
|
||||
FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
|
||||
@@ -201,8 +201,6 @@ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
static short FSE_abs(short a) { return (short)(a<0 ? -a : a); }
|
||||
|
||||
static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
unsigned writeIsSafe)
|
||||
@@ -258,16 +256,16 @@ static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
{ short count = normalizedCounter[charnum++];
|
||||
const short max = (short)((2*threshold-1)-remaining);
|
||||
remaining -= FSE_abs(count);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
{ int count = normalizedCounter[charnum++];
|
||||
int const max = (2*threshold-1)-remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++; /* +1 for extra accuracy */
|
||||
if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
|
||||
bitStream += count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= (count<max);
|
||||
previous0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) nbBits--, threshold>>=1;
|
||||
}
|
||||
if (bitCount>16) {
|
||||
@@ -508,6 +506,7 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
|
||||
|
||||
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
|
||||
{
|
||||
short const NOT_YET_ASSIGNED = -2;
|
||||
U32 s;
|
||||
U32 distributed = 0;
|
||||
U32 ToDistribute;
|
||||
@@ -533,7 +532,8 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
norm[s]=-2;
|
||||
|
||||
norm[s]=NOT_YET_ASSIGNED;
|
||||
}
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
|
||||
@@ -541,7 +541,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
||||
/* risk of rounding to zero */
|
||||
lowOne = (U32)((total * 3) / (ToDistribute * 2));
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if ((norm[s] == -2) && (count[s] <= lowOne)) {
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
@@ -561,12 +561,19 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) ToDistribute--, norm[s]++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==-2) {
|
||||
if (norm[s]==NOT_YET_ASSIGNED) {
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
|
||||
@@ -59,7 +59,6 @@
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
|
||||
32
C/zstd/huf.h
32
C/zstd/huf.h
@@ -91,7 +91,7 @@ size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize
|
||||
|
||||
/** HUF_compress4X_wksp() :
|
||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least 1024 unsigned */
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
|
||||
|
||||
|
||||
@@ -102,10 +102,11 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t s
|
||||
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
@@ -133,6 +134,10 @@ typedef U32 HUF_DTable;
|
||||
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
/* The workspace must have alignment at least 4 and be at least this large */
|
||||
#define HUF_WORKSPACE_SIZE (6 << 10)
|
||||
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Advanced decompression functions
|
||||
@@ -168,6 +173,17 @@ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSym
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
typedef enum {
|
||||
HUF_repeat_none, /**< Cannot use the previous table */
|
||||
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
||||
HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} HUF_repeat;
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
@@ -214,8 +230,14 @@ size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* c
|
||||
/* single stream variants */
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least 1024 unsigned */
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
||||
|
||||
@@ -127,7 +127,7 @@ struct HUF_CElt_s {
|
||||
}; /* typedef'd to HUF_CElt within "huf.h" */
|
||||
|
||||
/*! HUF_writeCTable() :
|
||||
`CTable` : huffman tree to save, using huf representation.
|
||||
`CTable` : Huffman tree to save, using huf representation.
|
||||
@return : size of saved CTable */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
||||
@@ -409,6 +409,25 @@ size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U3
|
||||
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
|
||||
}
|
||||
|
||||
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
|
||||
{
|
||||
size_t nbBits = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
nbBits += CTable[s].nbBits * count[s];
|
||||
}
|
||||
return nbBits >> 3;
|
||||
}
|
||||
|
||||
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
|
||||
int bad = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
|
||||
}
|
||||
return !bad;
|
||||
}
|
||||
|
||||
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
{
|
||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
||||
@@ -510,25 +529,43 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_compressCTable_internal(
|
||||
BYTE* const ostart, BYTE* op, BYTE* const oend,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned singleStream, const HUF_CElt* CTable)
|
||||
{
|
||||
size_t const cSize = singleStream ?
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) :
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
|
||||
if (HUF_isError(cSize)) { return cSize; }
|
||||
if (cSize==0) { return 0; } /* uncompressible */
|
||||
op += cSize;
|
||||
/* check compressibility */
|
||||
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
/* `workSpace` must a table of at least 1024 unsigned */
|
||||
static size_t HUF_compress_internal (
|
||||
void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
unsigned singleStream,
|
||||
void* workSpace, size_t wkspSize)
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
union {
|
||||
U32 count[HUF_SYMBOLVALUE_MAX+1];
|
||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX+1];
|
||||
} table; /* `count` can overlap with `CTable`; saves 1 KB */
|
||||
U32* count;
|
||||
size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
HUF_CElt* CTable;
|
||||
size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
|
||||
/* checks & inits */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC);
|
||||
if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
|
||||
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
|
||||
if (!dstSize) return 0; /* cannot fit within dst budget */
|
||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||
@@ -536,38 +573,58 @@ static size_t HUF_compress_internal (
|
||||
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
|
||||
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
|
||||
|
||||
count = (U32*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + countSize;
|
||||
wkspSize -= countSize;
|
||||
CTable = (HUF_CElt*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + CTableSize;
|
||||
wkspSize -= CTableSize;
|
||||
|
||||
/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (table.count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
|
||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
/* Check validity of previous table */
|
||||
if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
|
||||
*repeat = HUF_repeat_none;
|
||||
}
|
||||
/* Heuristic : use existing table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
|
||||
/* Build Huffman Tree */
|
||||
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (table.CTable, table.count, maxSymbolValue, huffLog, workSpace, wkspSize) );
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
|
||||
huffLog = (U32)maxBits;
|
||||
/* Zero the unused symbols so we can check it for validity */
|
||||
memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
|
||||
}
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table.CTable, maxSymbolValue, huffLog) );
|
||||
if (hSize + 12 >= srcSize) return 0; /* not useful to try compression */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
|
||||
/* Check if using the previous table will be beneficial */
|
||||
if (repeat && *repeat != HUF_repeat_none) {
|
||||
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
|
||||
size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
|
||||
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
}
|
||||
/* Use the new table */
|
||||
if (hSize + 12ul >= srcSize) { return 0; }
|
||||
op += hSize;
|
||||
if (repeat) { *repeat = HUF_repeat_none; }
|
||||
if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
{ size_t const cSize = (singleStream) ?
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, table.CTable) : /* single segment */
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, table.CTable);
|
||||
if (HUF_isError(cSize)) return cSize;
|
||||
if (cSize==0) return 0; /* uncompressible */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
/* check compressibility */
|
||||
if ((size_t)(op-ostart) >= srcSize-1)
|
||||
return 0;
|
||||
|
||||
return op-ostart;
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
|
||||
}
|
||||
|
||||
|
||||
@@ -576,7 +633,16 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize,
|
||||
@@ -592,7 +658,16 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
}
|
||||
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize,
|
||||
|
||||
@@ -35,16 +35,19 @@
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
/* inline is defined */
|
||||
#elif defined(_MSC_VER) || defined(__GNUC__)
|
||||
# define inline __inline
|
||||
#else
|
||||
# define inline /* disable inline */
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
|
||||
@@ -102,16 +105,16 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
|
||||
/* Table header */
|
||||
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, huffman tree cannot fit in */
|
||||
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
|
||||
dtd.tableType = 0;
|
||||
dtd.tableLog = (BYTE)tableLog;
|
||||
memcpy(DTable, &dtd, sizeof(dtd));
|
||||
}
|
||||
|
||||
/* Prepare ranks */
|
||||
/* Calculate starting value for each rank */
|
||||
{ U32 n, nextRankStart = 0;
|
||||
for (n=1; n<tableLog+1; n++) {
|
||||
U32 current = nextRankStart;
|
||||
U32 const current = nextRankStart;
|
||||
nextRankStart += (rankVal[n] << (n-1));
|
||||
rankVal[n] = current;
|
||||
} }
|
||||
@@ -121,11 +124,11 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
for (n=0; n<nbSymbols; n++) {
|
||||
U32 const w = huffWeight[n];
|
||||
U32 const length = (1 << w) >> 1;
|
||||
U32 i;
|
||||
U32 u;
|
||||
HUF_DEltX2 D;
|
||||
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
|
||||
for (i = rankVal[w]; i < rankVal[w] + length; i++)
|
||||
dt[i] = D;
|
||||
for (u = rankVal[w]; u < rankVal[w] + length; u++)
|
||||
dt[u] = D;
|
||||
rankVal[w] += length;
|
||||
} }
|
||||
|
||||
@@ -152,7 +155,7 @@ static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, con
|
||||
if (MEM_64bits()) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
@@ -459,7 +462,7 @@ size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
|
||||
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
|
||||
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
|
||||
|
||||
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compilation fails here, assertion is false */
|
||||
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
|
||||
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
@@ -559,7 +562,7 @@ static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DE
|
||||
if (MEM_64bits()) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
|
||||
26
C/zstd/mem.h
26
C/zstd/mem.h
@@ -48,14 +48,15 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
|
||||
*****************************************************************/
|
||||
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
typedef intptr_t iPtrDiff;
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
typedef intptr_t iPtrDiff;
|
||||
typedef uintptr_t uPtrDiff;
|
||||
#else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
@@ -65,6 +66,7 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
typedef ptrdiff_t iPtrDiff;
|
||||
typedef size_t uPtrDiff;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -76,11 +78,11 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method is portable but violate C standard.
|
||||
* It can generate buggy code on targets depending on alignment.
|
||||
* In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
||||
* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
|
||||
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
@@ -182,7 +184,7 @@ MEM_STATIC U32 MEM_swap32(U32 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_ulong(in);
|
||||
#elif defined (__GNUC__)
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap32(in);
|
||||
#else
|
||||
return ((in << 24) & 0xff000000 ) |
|
||||
@@ -196,7 +198,7 @@ MEM_STATIC U64 MEM_swap64(U64 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_uint64(in);
|
||||
#elif defined (__GNUC__)
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap64(in);
|
||||
#else
|
||||
return ((in << 56) & 0xff00000000000000ULL) |
|
||||
|
||||
@@ -104,7 +104,9 @@ static void XXH_free (void* p) { free(p); }
|
||||
#include <string.h>
|
||||
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
#include "xxhash.h"
|
||||
|
||||
|
||||
|
||||
@@ -64,16 +64,12 @@ XXH64 13.8 GB/s 1.9 GB/s
|
||||
XXH32 6.8 GB/s 6.0 GB/s
|
||||
*/
|
||||
|
||||
#ifndef XXHASH_H_5627135585666179
|
||||
#define XXHASH_H_5627135585666179 1
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef XXH_NAMESPACE
|
||||
# define XXH_NAMESPACE ZSTD_ /* Zstandard specific */
|
||||
#endif
|
||||
#ifndef XXHASH_H_5627135585666179
|
||||
#define XXHASH_H_5627135585666179 1
|
||||
|
||||
|
||||
/* ****************************
|
||||
@@ -242,6 +238,11 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH
|
||||
/* **************************
|
||||
* Canonical representation
|
||||
****************************/
|
||||
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
|
||||
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
|
||||
*/
|
||||
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
|
||||
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
|
||||
|
||||
@@ -251,14 +252,9 @@ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
|
||||
|
||||
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
|
||||
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
|
||||
*/
|
||||
#endif /* XXHASH_H_5627135585666179 */
|
||||
|
||||
|
||||
#ifdef XXH_STATIC_LINKING_ONLY
|
||||
|
||||
/* ================================================================================================
|
||||
This section contains definitions which are not guaranteed to remain stable.
|
||||
@@ -266,6 +262,8 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
|
||||
They shall only be used with static linking.
|
||||
Never use these definitions in association with dynamic linking !
|
||||
=================================================================================================== */
|
||||
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
|
||||
#define XXH_STATIC_H_3543687687345
|
||||
|
||||
/* These definitions are only meant to allow allocation of XXH state
|
||||
statically, on stack, or in a struct for example.
|
||||
@@ -299,11 +297,9 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src
|
||||
# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
|
||||
# endif
|
||||
|
||||
#endif /* XXH_STATIC_LINKING_ONLY */
|
||||
#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* XXHASH_H_5627135585666179 */
|
||||
|
||||
@@ -41,7 +41,7 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
|
||||
/*! ZSTD_getErrorString() :
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); }
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
||||
|
||||
|
||||
/*=**************************************************************
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
***************************************/
|
||||
#include <string.h> /* memset */
|
||||
#include "mem.h"
|
||||
#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
@@ -62,6 +60,7 @@ struct ZSTD_CCtx_s {
|
||||
U32 hashLog3; /* dispatch table : larger == faster, more memory */
|
||||
U32 loadedDictEnd; /* index of end of dictionary */
|
||||
U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
|
||||
U32 forceRawDict; /* Force loading dictionary in "content-only" mode (no header analysis) */
|
||||
ZSTD_compressionStage_e stage;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
U32 repToConfirm[ZSTD_REP_NUM];
|
||||
@@ -80,10 +79,11 @@ struct ZSTD_CCtx_s {
|
||||
U32* chainTable;
|
||||
HUF_CElt* hufTable;
|
||||
U32 flagStaticTables;
|
||||
HUF_repeat flagStaticHufTable;
|
||||
FSE_CTable offcodeCTable [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
unsigned tmpCounters[1024];
|
||||
unsigned tmpCounters[HUF_WORKSPACE_SIZE_U32];
|
||||
};
|
||||
|
||||
ZSTD_CCtx* ZSTD_createCCtx(void)
|
||||
@@ -124,6 +124,7 @@ size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned
|
||||
switch(param)
|
||||
{
|
||||
case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0;
|
||||
case ZSTD_p_forceRawDict : cctx->forceRawDict = value>0; return 0;
|
||||
default: return ERROR(parameter_unknown);
|
||||
}
|
||||
}
|
||||
@@ -246,14 +247,17 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 fra
|
||||
typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
|
||||
|
||||
/*! ZSTD_resetCCtx_advanced() :
|
||||
note : 'params' must be validated */
|
||||
note : `params` must be validated */
|
||||
static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
|
||||
ZSTD_parameters params, U64 frameContentSize,
|
||||
ZSTD_compResetPolicy_e const crp)
|
||||
{
|
||||
if (crp == ZSTDcrp_continue)
|
||||
if (ZSTD_equivalentParams(params, zc->params))
|
||||
if (ZSTD_equivalentParams(params, zc->params)) {
|
||||
zc->flagStaticTables = 0;
|
||||
zc->flagStaticHufTable = HUF_repeat_none;
|
||||
return ZSTD_continueCCtx(zc, params, frameContentSize);
|
||||
}
|
||||
|
||||
{ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
|
||||
U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
|
||||
@@ -287,6 +291,7 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
|
||||
ptr = zc->hashTable3 + h3Size;
|
||||
zc->hufTable = (HUF_CElt*)ptr;
|
||||
zc->flagStaticTables = 0;
|
||||
zc->flagStaticHufTable = HUF_repeat_none;
|
||||
ptr = ((U32*)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
|
||||
|
||||
zc->nextToUpdate = 1;
|
||||
@@ -344,8 +349,12 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
|
||||
{
|
||||
if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
|
||||
|
||||
|
||||
memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
|
||||
ZSTD_resetCCtx_advanced(dstCCtx, srcCCtx->params, pledgedSrcSize, ZSTDcrp_noMemset);
|
||||
{ ZSTD_parameters params = srcCCtx->params;
|
||||
params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
|
||||
ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
|
||||
}
|
||||
|
||||
/* copy tables */
|
||||
{ size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
|
||||
@@ -368,12 +377,15 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
|
||||
|
||||
/* copy entropy tables */
|
||||
dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
|
||||
dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
|
||||
if (srcCCtx->flagStaticTables) {
|
||||
memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
|
||||
memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
|
||||
memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
|
||||
memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
|
||||
}
|
||||
if (srcCCtx->flagStaticHufTable) {
|
||||
memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -487,24 +499,28 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
|
||||
|
||||
/* small ? don't even attempt compression (speed opt) */
|
||||
# define LITERAL_NOENTROPY 63
|
||||
{ size_t const minLitSize = zc->flagStaticTables ? 6 : LITERAL_NOENTROPY;
|
||||
{ size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
|
||||
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
}
|
||||
|
||||
if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
|
||||
if (zc->flagStaticTables && (lhSize==3)) {
|
||||
hType = set_repeat;
|
||||
singleStream = 1;
|
||||
cLitSize = HUF_compress1X_usingCTable(ostart+lhSize, dstCapacity-lhSize, src, srcSize, zc->hufTable);
|
||||
} else {
|
||||
cLitSize = singleStream ? HUF_compress1X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters))
|
||||
: HUF_compress4X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters));
|
||||
{ HUF_repeat repeat = zc->flagStaticHufTable;
|
||||
int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
|
||||
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
||||
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
|
||||
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
|
||||
if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */
|
||||
else { zc->flagStaticHufTable = HUF_repeat_check; } /* now have a table to reuse */
|
||||
}
|
||||
|
||||
if ((cLitSize==0) | (cLitSize >= srcSize - minGain))
|
||||
if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) {
|
||||
zc->flagStaticHufTable = HUF_repeat_none;
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
if (cLitSize==1)
|
||||
}
|
||||
if (cLitSize==1) {
|
||||
zc->flagStaticHufTable = HUF_repeat_none;
|
||||
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
|
||||
}
|
||||
|
||||
/* Build header */
|
||||
switch(lhSize)
|
||||
@@ -572,11 +588,11 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
|
||||
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
|
||||
void* dst, size_t dstCapacity,
|
||||
size_t srcSize)
|
||||
{
|
||||
const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
|
||||
const seqStore_t* seqStorePtr = &(zc->seqStore);
|
||||
U32 count[MaxSeq+1];
|
||||
S16 norm[MaxSeq+1];
|
||||
@@ -710,7 +726,18 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
|
||||
if (longOffsets) {
|
||||
U32 const ofBits = ofCodeTable[nbSeq-1];
|
||||
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
|
||||
BIT_flushBits(&blockStream);
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
|
||||
ofBits - extraBits);
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
|
||||
}
|
||||
BIT_flushBits(&blockStream);
|
||||
|
||||
{ size_t n;
|
||||
@@ -732,7 +759,17 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
|
||||
BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
|
||||
if (longOffsets) {
|
||||
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[n].offset, extraBits);
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
|
||||
ofBits - extraBits); /* 31 */
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
|
||||
}
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
} }
|
||||
|
||||
@@ -747,9 +784,12 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
|
||||
|
||||
/* check compressibility */
|
||||
_check_compressibility:
|
||||
{ size_t const minGain = ZSTD_minGain(srcSize);
|
||||
size_t const maxCSize = srcSize - minGain;
|
||||
if ((size_t)(op-ostart) >= maxCSize) return 0; }
|
||||
{ size_t const minGain = ZSTD_minGain(srcSize);
|
||||
size_t const maxCSize = srcSize - minGain;
|
||||
if ((size_t)(op-ostart) >= maxCSize) {
|
||||
zc->flagStaticHufTable = HUF_repeat_none;
|
||||
return 0;
|
||||
} }
|
||||
|
||||
/* confirm repcodes */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; }
|
||||
@@ -757,7 +797,6 @@ _check_compressibility:
|
||||
return op - ostart;
|
||||
}
|
||||
|
||||
|
||||
#if 0 /* for debug */
|
||||
# define STORESEQ_DEBUG
|
||||
#include <stdio.h> /* fprintf */
|
||||
@@ -1748,7 +1787,7 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
|
||||
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
|
||||
|
||||
/* Update chains up to ip (excluded)
|
||||
Assumption : always within prefix (ie. not within extDict) */
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
FORCE_INLINE
|
||||
U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
|
||||
{
|
||||
@@ -2308,7 +2347,7 @@ static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
|
||||
if (remaining < blockSize) blockSize = remaining;
|
||||
|
||||
/* preemptive overflow correction */
|
||||
if (cctx->lowLimit > (2U<<30)) {
|
||||
if (cctx->lowLimit > (3U<<29)) {
|
||||
U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
|
||||
U32 const current = (U32)(ip - cctx->base);
|
||||
U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog);
|
||||
@@ -2362,7 +2401,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
|
||||
U32 const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
|
||||
U32 const checksumFlag = params.fParams.checksumFlag>0;
|
||||
U32 const windowSize = 1U << params.cParams.windowLog;
|
||||
U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize > (pledgedSrcSize-1));
|
||||
U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
|
||||
BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
|
||||
U32 const fcsCode = params.fParams.contentSizeFlag ?
|
||||
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : /* 0-3 */
|
||||
@@ -2508,7 +2547,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t
|
||||
return ERROR(GENERIC); /* strategy doesn't exist; impossible */
|
||||
}
|
||||
|
||||
zc->nextToUpdate = zc->loadedDictEnd;
|
||||
zc->nextToUpdate = (U32)(iend - zc->base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2600,6 +2639,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
|
||||
}
|
||||
|
||||
cctx->flagStaticTables = 1;
|
||||
cctx->flagStaticHufTable = HUF_repeat_valid;
|
||||
return dictPtr - (const BYTE*)dict;
|
||||
}
|
||||
|
||||
@@ -2609,8 +2649,9 @@ static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* zc, const void* dict, si
|
||||
{
|
||||
if ((dict==NULL) || (dictSize<=8)) return 0;
|
||||
|
||||
/* default : dict is pure content */
|
||||
if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return ZSTD_loadDictionaryContent(zc, dict, dictSize);
|
||||
/* dict as pure content */
|
||||
if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (zc->forceRawDict))
|
||||
return ZSTD_loadDictionaryContent(zc, dict, dictSize);
|
||||
zc->dictID = zc->params.fParams.noDictIDFlag ? 0 : MEM_readLE32((const char*)dict+4);
|
||||
|
||||
/* known magic number : dict is parsed for entropy stats and content */
|
||||
@@ -2782,7 +2823,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, u
|
||||
|
||||
if (!cdict || !cctx) {
|
||||
ZSTD_free(cdict, customMem);
|
||||
ZSTD_free(cctx, customMem);
|
||||
ZSTD_freeCCtx(cctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -2800,8 +2841,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, u
|
||||
{ size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
|
||||
if (ZSTD_isError(errorCode)) {
|
||||
ZSTD_free(cdict->dictBuffer, customMem);
|
||||
ZSTD_free(cctx, customMem);
|
||||
ZSTD_free(cdict, customMem);
|
||||
ZSTD_freeCCtx(cctx);
|
||||
return NULL;
|
||||
} }
|
||||
|
||||
@@ -2845,7 +2886,11 @@ static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) {
|
||||
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
|
||||
else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
|
||||
else {
|
||||
ZSTD_parameters params = cdict->refContext->params;
|
||||
params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
|
||||
CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2939,7 +2984,7 @@ size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
|
||||
size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
|
||||
size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; }
|
||||
|
||||
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
|
||||
static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
|
||||
|
||||
@@ -2957,6 +3002,14 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
|
||||
return 0; /* ready to go */
|
||||
}
|
||||
|
||||
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
|
||||
zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
|
||||
|
||||
return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
|
||||
}
|
||||
|
||||
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize)
|
||||
@@ -2988,7 +3041,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
|
||||
zcs->checksum = params.fParams.checksumFlag > 0;
|
||||
zcs->params = params;
|
||||
|
||||
return ZSTD_resetCStream(zcs, pledgedSrcSize);
|
||||
return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
|
||||
}
|
||||
|
||||
/* note : cdict must outlive compression session */
|
||||
@@ -3022,7 +3075,7 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
|
||||
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
|
||||
{
|
||||
if (zcs==NULL) return 0; /* support sizeof on NULL */
|
||||
return sizeof(zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize;
|
||||
return sizeof(*zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize;
|
||||
}
|
||||
|
||||
/*====== Compression ======*/
|
||||
|
||||
@@ -43,8 +43,6 @@
|
||||
*********************************************************/
|
||||
#include <string.h> /* memcpy, memmove, memset */
|
||||
#include "mem.h" /* low level memory routines */
|
||||
#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#include "xxhash.h" /* XXH64_* */
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
@@ -87,22 +85,26 @@ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
|
||||
ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
|
||||
ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
|
||||
|
||||
typedef struct {
|
||||
FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
|
||||
FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
|
||||
FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
|
||||
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_entropyTables_t;
|
||||
|
||||
struct ZSTD_DCtx_s
|
||||
{
|
||||
const FSE_DTable* LLTptr;
|
||||
const FSE_DTable* MLTptr;
|
||||
const FSE_DTable* OFTptr;
|
||||
const HUF_DTable* HUFptr;
|
||||
FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
|
||||
FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
|
||||
FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
|
||||
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
|
||||
const void* previousDstEnd;
|
||||
const void* base;
|
||||
const void* vBase;
|
||||
const void* dictEnd;
|
||||
ZSTD_entropyTables_t entropy;
|
||||
const void* previousDstEnd; /* detect continuity */
|
||||
const void* base; /* start of current segment */
|
||||
const void* vBase; /* virtual start of previous segment if it was just before current one */
|
||||
const void* dictEnd; /* end of previous segment */
|
||||
size_t expected;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
ZSTD_frameParams fParams;
|
||||
blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
|
||||
ZSTD_dStage stage;
|
||||
@@ -131,15 +133,15 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
|
||||
dctx->base = NULL;
|
||||
dctx->vBase = NULL;
|
||||
dctx->dictEnd = NULL;
|
||||
dctx->hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
|
||||
dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
|
||||
dctx->litEntropy = dctx->fseEntropy = 0;
|
||||
dctx->dictID = 0;
|
||||
MEM_STATIC_ASSERT(sizeof(dctx->rep) == sizeof(repStartValue));
|
||||
memcpy(dctx->rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
|
||||
dctx->LLTptr = dctx->LLTable;
|
||||
dctx->MLTptr = dctx->MLTable;
|
||||
dctx->OFTptr = dctx->OFTable;
|
||||
dctx->HUFptr = dctx->hufTable;
|
||||
MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
|
||||
memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
|
||||
dctx->LLTptr = dctx->entropy.LLTable;
|
||||
dctx->MLTptr = dctx->entropy.MLTable;
|
||||
dctx->OFTptr = dctx->entropy.OFTable;
|
||||
dctx->HUFptr = dctx->entropy.hufTable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -175,6 +177,8 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
|
||||
memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* deprecated */
|
||||
static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
|
||||
{
|
||||
ZSTD_decompressBegin(dstDCtx); /* init */
|
||||
@@ -186,15 +190,18 @@ static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
|
||||
dstDCtx->dictID = srcDCtx->dictID;
|
||||
dstDCtx->litEntropy = srcDCtx->litEntropy;
|
||||
dstDCtx->fseEntropy = srcDCtx->fseEntropy;
|
||||
dstDCtx->LLTptr = srcDCtx->LLTable;
|
||||
dstDCtx->MLTptr = srcDCtx->MLTable;
|
||||
dstDCtx->OFTptr = srcDCtx->OFTable;
|
||||
dstDCtx->HUFptr = srcDCtx->hufTable;
|
||||
dstDCtx->rep[0] = srcDCtx->rep[0];
|
||||
dstDCtx->rep[1] = srcDCtx->rep[1];
|
||||
dstDCtx->rep[2] = srcDCtx->rep[2];
|
||||
dstDCtx->LLTptr = srcDCtx->entropy.LLTable;
|
||||
dstDCtx->MLTptr = srcDCtx->entropy.MLTable;
|
||||
dstDCtx->OFTptr = srcDCtx->entropy.OFTable;
|
||||
dstDCtx->HUFptr = srcDCtx->entropy.hufTable;
|
||||
dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0];
|
||||
dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1];
|
||||
dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict);
|
||||
|
||||
|
||||
/*-*************************************************************
|
||||
@@ -306,6 +313,86 @@ size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** ZSTD_getFrameContentSize() :
|
||||
* compatible with legacy mode
|
||||
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
|
||||
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
|
||||
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
|
||||
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) {
|
||||
unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
|
||||
return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
|
||||
}
|
||||
#endif
|
||||
{
|
||||
ZSTD_frameParams fParams;
|
||||
if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR;
|
||||
if (fParams.windowSize == 0) {
|
||||
/* Either skippable or empty frame, size == 0 either way */
|
||||
return 0;
|
||||
} else if (fParams.frameContentSize != 0) {
|
||||
return fParams.frameContentSize;
|
||||
} else {
|
||||
return ZSTD_CONTENTSIZE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_findDecompressedSize() :
|
||||
* compatible with legacy mode
|
||||
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
|
||||
* skippable frames
|
||||
* @return : decompressed size of the frames contained */
|
||||
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
{
|
||||
unsigned long long totalDstSize = 0;
|
||||
while (srcSize >= ZSTD_frameHeaderSize_prefix) {
|
||||
const U32 magicNumber = MEM_readLE32(src);
|
||||
|
||||
if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
|
||||
size_t skippableSize;
|
||||
if (srcSize < ZSTD_skippableHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
skippableSize = MEM_readLE32((const BYTE *)src + 4) +
|
||||
ZSTD_skippableHeaderSize;
|
||||
if (srcSize < skippableSize) {
|
||||
return ZSTD_CONTENTSIZE_ERROR;
|
||||
}
|
||||
|
||||
src = (const BYTE *)src + skippableSize;
|
||||
srcSize -= skippableSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
|
||||
if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
|
||||
|
||||
/* check for overflow */
|
||||
if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
|
||||
totalDstSize += ret;
|
||||
}
|
||||
{
|
||||
size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
|
||||
if (ZSTD_isError(frameSrcSize)) {
|
||||
return ZSTD_CONTENTSIZE_ERROR;
|
||||
}
|
||||
|
||||
src = (const BYTE *)src + frameSrcSize;
|
||||
srcSize -= frameSrcSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (srcSize) {
|
||||
return ZSTD_CONTENTSIZE_ERROR;
|
||||
}
|
||||
|
||||
return totalDstSize;
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_getDecompressedSize() :
|
||||
* compatible with legacy mode
|
||||
@@ -316,14 +403,8 @@ size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t
|
||||
- frame header not complete (`srcSize` too small) */
|
||||
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) return ZSTD_getDecompressedSize_legacy(src, srcSize);
|
||||
#endif
|
||||
{ ZSTD_frameParams fparams;
|
||||
size_t const frResult = ZSTD_getFrameParams(&fparams, src, srcSize);
|
||||
if (frResult!=0) return 0;
|
||||
return fparams.frameContentSize;
|
||||
}
|
||||
unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
|
||||
return ret >= ZSTD_CONTENTSIZE_ERROR ? 0 : ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -432,14 +513,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
|
||||
HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) :
|
||||
HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) :
|
||||
( singleStream ?
|
||||
HUF_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
|
||||
HUF_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) ))
|
||||
HUF_decompress1X2_DCtx(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
|
||||
HUF_decompress4X_hufOnly (dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) ))
|
||||
return ERROR(corruption_detected);
|
||||
|
||||
dctx->litPtr = dctx->litBuffer;
|
||||
dctx->litSize = litSize;
|
||||
dctx->litEntropy = 1;
|
||||
if (litEncType==set_compressed) dctx->HUFptr = dctx->hufTable;
|
||||
if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
|
||||
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
|
||||
return litCSize + lhSize;
|
||||
}
|
||||
@@ -756,19 +837,19 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
||||
ip++;
|
||||
|
||||
/* Build DTables */
|
||||
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->LLTable, &dctx->LLTptr,
|
||||
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
|
||||
LLtype, MaxLL, LLFSELog,
|
||||
ip, iend-ip, LL_defaultDTable, dctx->fseEntropy);
|
||||
if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
|
||||
ip += llhSize;
|
||||
}
|
||||
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->OFTable, &dctx->OFTptr,
|
||||
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
|
||||
OFtype, MaxOff, OffFSELog,
|
||||
ip, iend-ip, OF_defaultDTable, dctx->fseEntropy);
|
||||
if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
|
||||
ip += ofhSize;
|
||||
}
|
||||
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->MLTable, &dctx->MLTptr,
|
||||
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
|
||||
MLtype, MaxML, MLFSELog,
|
||||
ip, iend-ip, ML_defaultDTable, dctx->fseEntropy);
|
||||
if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
|
||||
@@ -795,7 +876,7 @@ typedef struct {
|
||||
size_t prevOffset[ZSTD_REP_NUM];
|
||||
const BYTE* base;
|
||||
size_t pos;
|
||||
iPtrDiff gotoDict;
|
||||
uPtrDiff gotoDict;
|
||||
} seqState_t;
|
||||
|
||||
|
||||
@@ -925,9 +1006,9 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
||||
|
||||
FORCE_INLINE
|
||||
size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
|
||||
{
|
||||
BYTE* const oLitEnd = op + sequence.litLength;
|
||||
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
|
||||
@@ -952,7 +1033,7 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
if (sequence.offset > (size_t)(oLitEnd - base)) {
|
||||
/* offset beyond prefix */
|
||||
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
|
||||
match += (dictEnd-base);
|
||||
match = dictEnd + (match - base);
|
||||
if (match + sequence.matchLength <= dictEnd) {
|
||||
memmove(oLitEnd, match, sequence.matchLength);
|
||||
return sequenceLength;
|
||||
@@ -975,7 +1056,7 @@ size_t ZSTD_execSequence(BYTE* op,
|
||||
if (sequence.offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* substracted */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
|
||||
int const sub2 = dec64table[sequence.offset];
|
||||
op[0] = match[0];
|
||||
op[1] = match[1];
|
||||
@@ -1030,7 +1111,7 @@ static size_t ZSTD_decompressSequences(
|
||||
if (nbSeq) {
|
||||
seqState_t seqState;
|
||||
dctx->fseEntropy = 1;
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->rep[i]; }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
|
||||
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
|
||||
FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
|
||||
FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
|
||||
@@ -1047,7 +1128,7 @@ static size_t ZSTD_decompressSequences(
|
||||
/* check if reached exact end */
|
||||
if (nbSeq) return ERROR(corruption_detected);
|
||||
/* save reps for next block */
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
}
|
||||
|
||||
/* last literal segment */
|
||||
@@ -1061,7 +1142,7 @@ static size_t ZSTD_decompressSequences(
|
||||
}
|
||||
|
||||
|
||||
static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState)
|
||||
FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets)
|
||||
{
|
||||
seq_t seq;
|
||||
|
||||
@@ -1096,8 +1177,15 @@ static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState)
|
||||
if (!ofCode)
|
||||
offset = 0;
|
||||
else {
|
||||
offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
|
||||
if (longOffsets) {
|
||||
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
|
||||
offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
|
||||
if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
|
||||
if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
|
||||
} else {
|
||||
offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
|
||||
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
|
||||
}
|
||||
}
|
||||
|
||||
if (ofCode <= 1) {
|
||||
@@ -1141,6 +1229,14 @@ static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState)
|
||||
return seq;
|
||||
}
|
||||
|
||||
static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) {
|
||||
if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
|
||||
return ZSTD_decodeSequenceLong_generic(seqState, 1);
|
||||
} else {
|
||||
return ZSTD_decodeSequenceLong_generic(seqState, 0);
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE
|
||||
size_t ZSTD_execSequenceLong(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
@@ -1196,7 +1292,7 @@ size_t ZSTD_execSequenceLong(BYTE* op,
|
||||
if (sequence.offset < 8) {
|
||||
/* close range match, overlap */
|
||||
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* substracted */
|
||||
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
|
||||
int const sub2 = dec64table[sequence.offset];
|
||||
op[0] = match[0];
|
||||
op[1] = match[1];
|
||||
@@ -1238,6 +1334,7 @@ static size_t ZSTD_decompressSequencesLong(
|
||||
const BYTE* const base = (const BYTE*) (dctx->base);
|
||||
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
|
||||
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
|
||||
unsigned const windowSize = dctx->fParams.windowSize;
|
||||
int nbSeq;
|
||||
|
||||
/* Build Decoding Tables */
|
||||
@@ -1256,10 +1353,10 @@ static size_t ZSTD_decompressSequencesLong(
|
||||
seqState_t seqState;
|
||||
int seqNb;
|
||||
dctx->fseEntropy = 1;
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->rep[i]; }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
|
||||
seqState.base = base;
|
||||
seqState.pos = (size_t)(op-base);
|
||||
seqState.gotoDict = (iPtrDiff)(dictEnd - base);
|
||||
seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
|
||||
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
|
||||
FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
|
||||
FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
|
||||
@@ -1267,13 +1364,13 @@ static size_t ZSTD_decompressSequencesLong(
|
||||
|
||||
/* prepare in advance */
|
||||
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb<seqAdvance; seqNb++) {
|
||||
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState);
|
||||
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
|
||||
}
|
||||
if (seqNb<seqAdvance) return ERROR(corruption_detected);
|
||||
|
||||
/* decode and decompress */
|
||||
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb<nbSeq ; seqNb++) {
|
||||
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState);
|
||||
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
ZSTD_PREFETCH(sequence.match);
|
||||
@@ -1291,7 +1388,7 @@ static size_t ZSTD_decompressSequencesLong(
|
||||
}
|
||||
|
||||
/* save reps for next block */
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
|
||||
}
|
||||
|
||||
/* last literal segment */
|
||||
@@ -1313,13 +1410,18 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
||||
|
||||
if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
|
||||
|
||||
/* Decode literals sub-block */
|
||||
/* Decode literals section */
|
||||
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
|
||||
if (ZSTD_isError(litCSize)) return litCSize;
|
||||
ip += litCSize;
|
||||
srcSize -= litCSize;
|
||||
}
|
||||
if (dctx->fParams.windowSize > (1<<23)) return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
|
||||
if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
|
||||
/* likely because of register pressure */
|
||||
/* if that's the correct cause, then 32-bits ARM should be affected differently */
|
||||
/* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
|
||||
if (dctx->fParams.windowSize > (1<<23))
|
||||
return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
|
||||
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
|
||||
}
|
||||
|
||||
@@ -1363,27 +1465,81 @@ size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t len
|
||||
return length;
|
||||
}
|
||||
|
||||
/** ZSTD_findFrameCompressedSize() :
|
||||
* compatible with legacy mode
|
||||
* `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
|
||||
* `srcSize` must be at least as large as the frame contained
|
||||
* @return : the compressed size of the frame starting at `src` */
|
||||
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
#endif
|
||||
if (srcSize >= ZSTD_skippableHeaderSize &&
|
||||
(MEM_readLE32(src) & 0xFFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
|
||||
return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4);
|
||||
} else {
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const ipstart = ip;
|
||||
size_t remainingSize = srcSize;
|
||||
ZSTD_frameParams fParams;
|
||||
|
||||
size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
|
||||
if (ZSTD_isError(headerSize)) return headerSize;
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
|
||||
if (ZSTD_isError(ret)) return ret;
|
||||
if (ret > 0) return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
ip += headerSize;
|
||||
remainingSize -= headerSize;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1) {
|
||||
blockProperties_t blockProperties;
|
||||
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
ip += ZSTD_blockHeaderSize + cBlockSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
|
||||
|
||||
if (blockProperties.lastBlock) break;
|
||||
}
|
||||
|
||||
if (fParams.checksumFlag) { /* Frame content checksum */
|
||||
if (remainingSize < 4) return ERROR(srcSize_wrong);
|
||||
ip += 4;
|
||||
remainingSize -= 4;
|
||||
}
|
||||
|
||||
return ip - ipstart;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZSTD_decompressFrame() :
|
||||
* `dctx` must be properly initialized */
|
||||
* @dctx must be properly initialized */
|
||||
static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize)
|
||||
const void** srcPtr, size_t *srcSizePtr)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* ip = (const BYTE*)(*srcPtr);
|
||||
BYTE* const ostart = (BYTE* const)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart;
|
||||
size_t remainingSize = srcSize;
|
||||
size_t remainingSize = *srcSizePtr;
|
||||
|
||||
/* check */
|
||||
if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
|
||||
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
|
||||
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
|
||||
if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
CHECK_F(ZSTD_decodeFrameHeader(dctx, src, frameHeaderSize));
|
||||
if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
|
||||
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
|
||||
}
|
||||
|
||||
@@ -1428,25 +1584,109 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
||||
if (remainingSize<4) return ERROR(checksum_wrong);
|
||||
checkRead = MEM_readLE32(ip);
|
||||
if (checkRead != checkCalc) return ERROR(checksum_wrong);
|
||||
ip += 4;
|
||||
remainingSize -= 4;
|
||||
}
|
||||
|
||||
if (remainingSize) return ERROR(srcSize_wrong);
|
||||
/* Allow caller to get size read */
|
||||
*srcPtr = ip;
|
||||
*srcSizePtr = remainingSize;
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
|
||||
static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
|
||||
|
||||
static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void *dict, size_t dictSize,
|
||||
const ZSTD_DDict* ddict)
|
||||
{
|
||||
void* const dststart = dst;
|
||||
|
||||
if (ddict) {
|
||||
if (dict) {
|
||||
/* programmer error, these two cases should be mutually exclusive */
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
|
||||
dict = ZSTD_DDictDictContent(ddict);
|
||||
dictSize = ZSTD_DDictDictSize(ddict);
|
||||
}
|
||||
|
||||
while (srcSize >= ZSTD_frameHeaderSize_prefix) {
|
||||
U32 magicNumber;
|
||||
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) {
|
||||
size_t decodedSize;
|
||||
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
if (ZSTD_isError(frameSize)) return frameSize;
|
||||
|
||||
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
|
||||
|
||||
dst = (BYTE*)dst + decodedSize;
|
||||
dstCapacity -= decodedSize;
|
||||
|
||||
src = (const BYTE*)src + frameSize;
|
||||
srcSize -= frameSize;
|
||||
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
magicNumber = MEM_readLE32(src);
|
||||
if (magicNumber != ZSTD_MAGICNUMBER) {
|
||||
if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
|
||||
size_t skippableSize;
|
||||
if (srcSize < ZSTD_skippableHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
skippableSize = MEM_readLE32((const BYTE *)src + 4) +
|
||||
ZSTD_skippableHeaderSize;
|
||||
if (srcSize < skippableSize) {
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
src = (const BYTE *)src + skippableSize;
|
||||
srcSize -= skippableSize;
|
||||
continue;
|
||||
} else {
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
if (ddict) {
|
||||
/* we were called from ZSTD_decompress_usingDDict */
|
||||
ZSTD_refDDict(dctx, ddict);
|
||||
} else {
|
||||
/* this will initialize correctly with no dict if dict == NULL, so
|
||||
* use this in all cases but ddict */
|
||||
CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
|
||||
}
|
||||
ZSTD_checkContinuity(dctx, dst);
|
||||
|
||||
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
|
||||
&src, &srcSize);
|
||||
if (ZSTD_isError(res)) return res;
|
||||
/* don't need to bounds check this, ZSTD_decompressFrame will have
|
||||
* already */
|
||||
dst = (BYTE*)dst + res;
|
||||
dstCapacity -= res;
|
||||
}
|
||||
}
|
||||
|
||||
if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
|
||||
|
||||
return (BYTE*)dst - (BYTE*)dststart;
|
||||
}
|
||||
|
||||
size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict, size_t dictSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, dict, dictSize);
|
||||
#endif
|
||||
CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
|
||||
ZSTD_checkContinuity(dctx, dst);
|
||||
return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
|
||||
return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -1633,22 +1873,29 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t const dictSize)
|
||||
/* ZSTD_loadEntropy() :
|
||||
* dict : must point at beginning of a valid zstd dictionary
|
||||
* @return : size of entropy tables read */
|
||||
static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize)
|
||||
{
|
||||
const BYTE* dictPtr = (const BYTE*)dict;
|
||||
const BYTE* const dictEnd = dictPtr + dictSize;
|
||||
|
||||
{ size_t const hSize = HUF_readDTableX4(dctx->hufTable, dict, dictSize);
|
||||
if (dictSize <= 8) return ERROR(dictionary_corrupted);
|
||||
dictPtr += 8; /* skip header = magic + dictID */
|
||||
|
||||
|
||||
{ size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr);
|
||||
if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
|
||||
dictPtr += hSize;
|
||||
}
|
||||
|
||||
{ short offcodeNCount[MaxOff+1];
|
||||
U32 offcodeMaxValue=MaxOff, offcodeLog;
|
||||
U32 offcodeMaxValue = MaxOff, offcodeLog;
|
||||
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(dctx->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted);
|
||||
dictPtr += offcodeHeaderSize;
|
||||
}
|
||||
|
||||
@@ -1657,7 +1904,7 @@ static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t c
|
||||
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted);
|
||||
dictPtr += matchlengthHeaderSize;
|
||||
}
|
||||
|
||||
@@ -1666,17 +1913,19 @@ static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t c
|
||||
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
|
||||
if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted);
|
||||
CHECK_E(FSE_buildDTable(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted);
|
||||
dictPtr += litlengthHeaderSize;
|
||||
}
|
||||
|
||||
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
|
||||
dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
|
||||
dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
|
||||
dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
|
||||
dictPtr += 12;
|
||||
{ int i;
|
||||
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
|
||||
for (i=0; i<3; i++) {
|
||||
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
|
||||
if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
|
||||
entropy->rep[i] = rep;
|
||||
} }
|
||||
|
||||
dctx->litEntropy = dctx->fseEntropy = 1;
|
||||
return dictPtr - (const BYTE*)dict;
|
||||
}
|
||||
|
||||
@@ -1690,13 +1939,12 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
|
||||
dctx->dictID = MEM_readLE32((const char*)dict + 4);
|
||||
|
||||
/* load entropy tables */
|
||||
dict = (const char*)dict + 8;
|
||||
dictSize -= 8;
|
||||
{ size_t const eSize = ZSTD_loadEntropy(dctx, dict, dictSize);
|
||||
{ size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
|
||||
if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
|
||||
dict = (const char*)dict + eSize;
|
||||
dictSize -= eSize;
|
||||
}
|
||||
dctx->litEntropy = dctx->fseEntropy = 1;
|
||||
|
||||
/* reference dictionary content */
|
||||
return ZSTD_refDictContent(dctx, dict, dictSize);
|
||||
@@ -1716,50 +1964,101 @@ struct ZSTD_DDict_s {
|
||||
void* dictBuffer;
|
||||
const void* dictContent;
|
||||
size_t dictSize;
|
||||
ZSTD_DCtx* refContext;
|
||||
ZSTD_entropyTables_t entropy;
|
||||
U32 dictID;
|
||||
U32 entropyPresent;
|
||||
ZSTD_customMem cMem;
|
||||
}; /* typedef'd to ZSTD_DDict within "zstd.h" */
|
||||
|
||||
static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
|
||||
{
|
||||
return ddict->dictContent;
|
||||
}
|
||||
|
||||
static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
|
||||
{
|
||||
return ddict->dictSize;
|
||||
}
|
||||
|
||||
static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
|
||||
{
|
||||
ZSTD_decompressBegin(dstDCtx); /* init */
|
||||
if (ddict) { /* support refDDict on NULL */
|
||||
dstDCtx->dictID = ddict->dictID;
|
||||
dstDCtx->base = ddict->dictContent;
|
||||
dstDCtx->vBase = ddict->dictContent;
|
||||
dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
|
||||
dstDCtx->previousDstEnd = dstDCtx->dictEnd;
|
||||
if (ddict->entropyPresent) {
|
||||
dstDCtx->litEntropy = 1;
|
||||
dstDCtx->fseEntropy = 1;
|
||||
dstDCtx->LLTptr = ddict->entropy.LLTable;
|
||||
dstDCtx->MLTptr = ddict->entropy.MLTable;
|
||||
dstDCtx->OFTptr = ddict->entropy.OFTable;
|
||||
dstDCtx->HUFptr = ddict->entropy.hufTable;
|
||||
dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
|
||||
dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
|
||||
dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
|
||||
} else {
|
||||
dstDCtx->litEntropy = 0;
|
||||
dstDCtx->fseEntropy = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict)
|
||||
{
|
||||
ddict->dictID = 0;
|
||||
ddict->entropyPresent = 0;
|
||||
if (ddict->dictSize < 8) return 0;
|
||||
{ U32 const magic = MEM_readLE32(ddict->dictContent);
|
||||
if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */
|
||||
}
|
||||
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4);
|
||||
|
||||
/* load entropy tables */
|
||||
CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
|
||||
ddict->entropyPresent = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
|
||||
{
|
||||
if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
|
||||
if (!customMem.customAlloc || !customMem.customFree) return NULL;
|
||||
|
||||
{ ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
|
||||
ZSTD_DCtx* const dctx = ZSTD_createDCtx_advanced(customMem);
|
||||
|
||||
if (!ddict || !dctx) {
|
||||
ZSTD_free(ddict, customMem);
|
||||
ZSTD_free(dctx, customMem);
|
||||
return NULL;
|
||||
}
|
||||
if (!ddict) return NULL;
|
||||
ddict->cMem = customMem;
|
||||
|
||||
if ((byReference) || (!dict) || (!dictSize)) {
|
||||
ddict->dictBuffer = NULL;
|
||||
ddict->dictContent = dict;
|
||||
} else {
|
||||
void* const internalBuffer = ZSTD_malloc(dictSize, customMem);
|
||||
if (!internalBuffer) { ZSTD_free(dctx, customMem); ZSTD_free(ddict, customMem); return NULL; }
|
||||
if (!internalBuffer) { ZSTD_freeDDict(ddict); return NULL; }
|
||||
memcpy(internalBuffer, dict, dictSize);
|
||||
ddict->dictBuffer = internalBuffer;
|
||||
ddict->dictContent = internalBuffer;
|
||||
}
|
||||
{ size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, ddict->dictContent, dictSize);
|
||||
ddict->dictSize = dictSize;
|
||||
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
|
||||
/* parse dictionary content */
|
||||
{ size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
|
||||
if (ZSTD_isError(errorCode)) {
|
||||
ZSTD_free(ddict->dictBuffer, customMem);
|
||||
ZSTD_free(ddict, customMem);
|
||||
ZSTD_free(dctx, customMem);
|
||||
ZSTD_freeDDict(ddict);
|
||||
return NULL;
|
||||
} }
|
||||
|
||||
ddict->dictSize = dictSize;
|
||||
ddict->refContext = dctx;
|
||||
return ddict;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZSTD_createDDict() :
|
||||
* Create a digested dictionary, ready to start decompression without startup delay.
|
||||
* `dict` can be released after `ZSTD_DDict` creation */
|
||||
* Create a digested dictionary, to start decompression without startup delay.
|
||||
* `dict` content is copied inside DDict.
|
||||
* Consequently, `dict` can be released after `ZSTD_DDict` creation */
|
||||
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
|
||||
{
|
||||
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
||||
@@ -1768,9 +2067,9 @@ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
|
||||
|
||||
|
||||
/*! ZSTD_createDDict_byReference() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* Dictionary content is simply referenced, and therefore stays in dictBuffer.
|
||||
* It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
|
||||
* Create a digested dictionary, to start decompression without startup delay.
|
||||
* Dictionary content is simply referenced, it will be accessed during decompression.
|
||||
* Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
|
||||
ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
|
||||
{
|
||||
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
||||
@@ -1781,8 +2080,7 @@ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize
|
||||
size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0; /* support free on NULL */
|
||||
{ ZSTD_customMem const cMem = ddict->refContext->customMem;
|
||||
ZSTD_freeDCtx(ddict->refContext);
|
||||
{ ZSTD_customMem const cMem = ddict->cMem;
|
||||
ZSTD_free(ddict->dictBuffer, cMem);
|
||||
ZSTD_free(ddict, cMem);
|
||||
return 0;
|
||||
@@ -1792,7 +2090,7 @@ size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
|
||||
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0; /* support sizeof on NULL */
|
||||
return sizeof(*ddict) + ZSTD_sizeof_DCtx(ddict->refContext) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
|
||||
return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
|
||||
}
|
||||
|
||||
/*! ZSTD_getDictID_fromDict() :
|
||||
@@ -1843,12 +2141,10 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_DDict* ddict)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dictContent, ddict->dictSize);
|
||||
#endif
|
||||
ZSTD_refDCtx(dctx, ddict->refContext);
|
||||
ZSTD_checkContinuity(dctx, dst);
|
||||
return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
|
||||
/* pass content and size in case legacy frames are encountered */
|
||||
return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
|
||||
NULL, 0,
|
||||
ddict);
|
||||
}
|
||||
|
||||
|
||||
@@ -2048,10 +2344,23 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
break;
|
||||
} }
|
||||
|
||||
/* check for single-pass mode opportunity */
|
||||
if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
|
||||
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
|
||||
size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
|
||||
if (cSize <= (size_t)(iend-istart)) {
|
||||
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend-op, istart, cSize, zds->ddict);
|
||||
if (ZSTD_isError(decompressedSize)) return decompressedSize;
|
||||
ip = istart + cSize;
|
||||
op += decompressedSize;
|
||||
zds->dctx->expected = 0;
|
||||
zds->stage = zdss_init;
|
||||
someMoreWork = 0;
|
||||
break;
|
||||
} }
|
||||
|
||||
/* Consume header */
|
||||
{ const ZSTD_DCtx* refContext = zds->ddict ? zds->ddict->refContext : NULL;
|
||||
ZSTD_refDCtx(zds->dctx, refContext);
|
||||
}
|
||||
ZSTD_refDDict(zds->dctx, zds->ddict);
|
||||
{ size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
|
||||
CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
|
||||
{ size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
|
||||
@@ -2063,7 +2372,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
|
||||
/* Adapt buffer sizes to frame header instructions */
|
||||
{ size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
|
||||
size_t const neededOutSize = zds->fParams.windowSize + blockSize;
|
||||
size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
|
||||
zds->blockSize = blockSize;
|
||||
if (zds->inBuffSize < blockSize) {
|
||||
ZSTD_free(zds->inBuff, zds->customMem);
|
||||
|
||||
@@ -49,6 +49,10 @@
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
|
||||
@@ -20,14 +20,33 @@ extern "C" {
|
||||
#include "mem.h" /* MEM_STATIC */
|
||||
#include "error_private.h" /* ERROR */
|
||||
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer */
|
||||
#include "zstd_v01.h"
|
||||
#include "zstd_v02.h"
|
||||
#include "zstd_v03.h"
|
||||
#include "zstd_v04.h"
|
||||
#include "zstd_v05.h"
|
||||
#include "zstd_v06.h"
|
||||
#include "zstd_v07.h"
|
||||
|
||||
#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
|
||||
# undef ZSTD_LEGACY_SUPPORT
|
||||
# define ZSTD_LEGACY_SUPPORT 8
|
||||
#endif
|
||||
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
# include "zstd_v01.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
# include "zstd_v02.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
# include "zstd_v03.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
# include "zstd_v04.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
# include "zstd_v05.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
# include "zstd_v06.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
# include "zstd_v07.h"
|
||||
#endif
|
||||
|
||||
/** ZSTD_isLegacy() :
|
||||
@return : > 0 if supported by legacy decoder. 0 otherwise.
|
||||
@@ -40,13 +59,27 @@ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
|
||||
magicNumberLE = MEM_readLE32(src);
|
||||
switch(magicNumberLE)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case ZSTDv01_magicNumberLE:return 1;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case ZSTDv02_magicNumber : return 2;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case ZSTDv03_magicNumber : return 3;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case ZSTDv04_magicNumber : return 4;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case ZSTDv05_MAGICNUMBER : return 5;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case ZSTDv06_MAGICNUMBER : return 6;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case ZSTDv07_MAGICNUMBER : return 7;
|
||||
#endif
|
||||
default : return 0;
|
||||
}
|
||||
}
|
||||
@@ -56,24 +89,30 @@ MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, s
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, srcSize);
|
||||
if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
if (version==5) {
|
||||
ZSTDv05_parameters fParams;
|
||||
size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.srcSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
if (version==6) {
|
||||
ZSTDv06_frameParams fParams;
|
||||
size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
if (version==7) {
|
||||
ZSTDv07_frameParams fParams;
|
||||
size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
return 0; /* should not be possible */
|
||||
}
|
||||
|
||||
@@ -86,14 +125,23 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{ size_t result;
|
||||
ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
|
||||
@@ -102,6 +150,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
ZSTDv05_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{ size_t result;
|
||||
ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
|
||||
@@ -110,6 +160,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
ZSTDv06_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{ size_t result;
|
||||
ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
|
||||
@@ -118,11 +170,50 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
ZSTDv07_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
|
||||
size_t compressedSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
return ZSTDv05_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
return ZSTDv06_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
return ZSTDv07_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
|
||||
{
|
||||
@@ -133,10 +224,18 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
|
||||
case 2 :
|
||||
case 3 :
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,6 +251,7 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
|
||||
case 2 :
|
||||
case 3 :
|
||||
return 0;
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
|
||||
@@ -161,6 +261,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
|
||||
@@ -169,6 +271,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
|
||||
@@ -177,6 +281,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
|
||||
@@ -185,6 +291,7 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,6 +307,7 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
case 2 :
|
||||
case 3 :
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
|
||||
@@ -212,6 +320,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
|
||||
@@ -224,6 +334,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
|
||||
@@ -236,6 +348,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
|
||||
@@ -248,6 +362,7 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -203,7 +203,7 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B
|
||||
|
||||
|
||||
/* Update hashTable3 up to ip (excluded)
|
||||
Assumption : always within prefix (ie. not within extDict) */
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
FORCE_INLINE
|
||||
U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
|
||||
{
|
||||
|
||||
@@ -1992,6 +1992,37 @@ size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t
|
||||
return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
|
||||
}
|
||||
|
||||
size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
U32 magicNumber;
|
||||
blockProperties_t blockProperties;
|
||||
|
||||
/* Frame Header */
|
||||
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
magicNumber = ZSTD_readBE32(src);
|
||||
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
|
||||
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1)
|
||||
{
|
||||
size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTDv01_isError(blockSize)) return blockSize;
|
||||
|
||||
ip += ZSTD_blockHeaderSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize;
|
||||
if (blockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (blockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += blockSize;
|
||||
remainingSize -= blockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/*******************************
|
||||
* Streaming Decompression API
|
||||
|
||||
@@ -34,6 +34,14 @@ ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
|
||||
size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
|
||||
*/
|
||||
|
||||
@@ -3378,6 +3378,38 @@ static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, siz
|
||||
return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
|
||||
}
|
||||
|
||||
static size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
{
|
||||
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
U32 magicNumber;
|
||||
blockProperties_t blockProperties;
|
||||
|
||||
/* Frame Header */
|
||||
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
magicNumber = MEM_readLE32(src);
|
||||
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
|
||||
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1)
|
||||
{
|
||||
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTD_blockHeaderSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize;
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (cBlockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/*******************************
|
||||
* Streaming Decompression API
|
||||
@@ -3492,6 +3524,11 @@ size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
|
||||
return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
|
||||
}
|
||||
|
||||
size_t ZSTDv02_findFrameCompressedSize(const void *src, size_t compressedSize)
|
||||
{
|
||||
return ZSTD_findFrameCompressedSize(src, compressedSize);
|
||||
}
|
||||
|
||||
ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
|
||||
{
|
||||
return (ZSTDv02_Dctx*)ZSTD_createDCtx();
|
||||
|
||||
@@ -34,6 +34,14 @@ ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
|
||||
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.2.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv02_isError())
|
||||
*/
|
||||
size_t ZSTDv02_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
|
||||
*/
|
||||
|
||||
@@ -3019,6 +3019,38 @@ static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, siz
|
||||
return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
|
||||
}
|
||||
|
||||
static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
U32 magicNumber;
|
||||
blockProperties_t blockProperties;
|
||||
|
||||
/* Frame Header */
|
||||
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
magicNumber = MEM_readLE32(src);
|
||||
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
|
||||
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1)
|
||||
{
|
||||
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTD_blockHeaderSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize;
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (cBlockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
|
||||
/*******************************
|
||||
* Streaming Decompression API
|
||||
@@ -3133,6 +3165,11 @@ size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
|
||||
return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
|
||||
}
|
||||
|
||||
size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_findFrameCompressedSize(src, srcSize);
|
||||
}
|
||||
|
||||
ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
|
||||
{
|
||||
return (ZSTDv03_Dctx*)ZSTD_createDCtx();
|
||||
|
||||
@@ -35,6 +35,14 @@ size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.3.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv03_isError())
|
||||
*/
|
||||
size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv03_isError(size_t code);
|
||||
|
||||
@@ -3326,6 +3326,35 @@ static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
blockProperties_t blockProperties;
|
||||
|
||||
/* Frame Header */
|
||||
if (srcSize < ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
|
||||
if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
|
||||
ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1)
|
||||
{
|
||||
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTD_blockHeaderSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize;
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (cBlockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/* ******************************
|
||||
* Streaming Decompression API
|
||||
@@ -3753,6 +3782,10 @@ size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_findFrameCompressedSize(src, srcSize);
|
||||
}
|
||||
|
||||
size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
|
||||
|
||||
|
||||
@@ -34,6 +34,14 @@ ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
|
||||
size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.4.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv04_isError())
|
||||
*/
|
||||
size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
|
||||
*/
|
||||
|
||||
@@ -3583,6 +3583,35 @@ size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t ZSTDv05_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
blockProperties_t blockProperties;
|
||||
|
||||
/* Frame Header */
|
||||
if (srcSize < ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);
|
||||
if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
|
||||
ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1)
|
||||
{
|
||||
size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTDv05_blockHeaderSize;
|
||||
remainingSize -= ZSTDv05_blockHeaderSize;
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (cBlockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/* ******************************
|
||||
* Streaming Decompression API
|
||||
|
||||
@@ -32,6 +32,13 @@ extern "C" {
|
||||
size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv05_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv05_isError())
|
||||
*/
|
||||
size_t ZSTDv05_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
|
||||
@@ -3729,6 +3729,37 @@ size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
blockProperties_t blockProperties = { bt_compressed, 0 };
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
|
||||
if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
|
||||
if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
|
||||
if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
|
||||
}
|
||||
|
||||
/* Loop on each block */
|
||||
while (1) {
|
||||
size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTDv06_blockHeaderSize;
|
||||
remainingSize -= ZSTDv06_blockHeaderSize;
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
if (cBlockSize == 0) break; /* bt_end */
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/*_******************************
|
||||
* Streaming Decompression API
|
||||
@@ -4077,7 +4108,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
|
||||
zbd->inBuff = (char*)malloc(blockSize);
|
||||
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
|
||||
}
|
||||
{ size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize;
|
||||
{ size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
|
||||
if (zbd->outBuffSize < neededOutSize) {
|
||||
free(zbd->outBuff);
|
||||
zbd->outBuffSize = neededOutSize;
|
||||
|
||||
@@ -41,6 +41,13 @@ extern "C" {
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv06_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv06_isError())
|
||||
*/
|
||||
size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
|
||||
@@ -13,12 +13,14 @@
|
||||
#include <string.h> /* memcpy */
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#include "xxhash.h" /* XXH64_* */
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH64_* */
|
||||
#include "zstd_v07.h"
|
||||
|
||||
#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
|
||||
#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
|
||||
#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
|
||||
#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
|
||||
#define ZSTDv07_STATIC_LINKING_ONLY
|
||||
|
||||
#include "error_private.h"
|
||||
@@ -3968,6 +3970,41 @@ size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
size_t remainingSize = srcSize;
|
||||
|
||||
/* check */
|
||||
if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
|
||||
if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
|
||||
if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) return ERROR(prefix_unknown);
|
||||
if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
|
||||
}
|
||||
|
||||
/* Loop on each block */
|
||||
while (1) {
|
||||
blockProperties_t blockProperties;
|
||||
size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
ip += ZSTDv07_blockHeaderSize;
|
||||
remainingSize -= ZSTDv07_blockHeaderSize;
|
||||
|
||||
if (blockProperties.blockType == bt_end) break;
|
||||
|
||||
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
}
|
||||
|
||||
return ip - (const BYTE*)src;
|
||||
}
|
||||
|
||||
/*_******************************
|
||||
* Streaming Decompression API
|
||||
@@ -4448,7 +4485,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
|
||||
zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
|
||||
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
|
||||
}
|
||||
{ size_t const neededOutSize = zbd->fParams.windowSize + blockSize;
|
||||
{ size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
|
||||
if (zbd->outBuffSize < neededOutSize) {
|
||||
zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
|
||||
zbd->outBuffSize = neededOutSize;
|
||||
@@ -4501,7 +4538,8 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
|
||||
if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
|
||||
zbd->outEnd = zbd->outStart + decodedSize;
|
||||
zbd->stage = ZBUFFds_flush;
|
||||
// break; /* ZBUFFds_flush follows */
|
||||
/* break; */
|
||||
/* pass-through */
|
||||
} }
|
||||
|
||||
case ZBUFFds_flush:
|
||||
|
||||
@@ -48,6 +48,14 @@ unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv07_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv07_isError())
|
||||
*/
|
||||
size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
|
||||
|
||||
Reference in New Issue
Block a user