mirror of
https://github.com/Xevion/easy7zip.git
synced 2025-12-15 04:11:46 -06:00
Update Zstandard to Version 1.5.5
Signed-off-by: Tino Reichardt <milky-7zip@mcmilk.de>
This commit is contained in:
@@ -14,7 +14,9 @@
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
||||
#include "zstd_internal.h"
|
||||
#include "portability_macros.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
@@ -44,8 +46,9 @@ extern "C" {
|
||||
***************************************/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_alloc_objects,
|
||||
ZSTD_cwksp_alloc_buffers,
|
||||
ZSTD_cwksp_alloc_aligned
|
||||
ZSTD_cwksp_alloc_aligned_init_once,
|
||||
ZSTD_cwksp_alloc_aligned,
|
||||
ZSTD_cwksp_alloc_buffers
|
||||
} ZSTD_cwksp_alloc_phase_e;
|
||||
|
||||
/**
|
||||
@@ -98,8 +101,8 @@ typedef enum {
|
||||
*
|
||||
* Workspace Layout:
|
||||
*
|
||||
* [ ... workspace ... ]
|
||||
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
|
||||
* [ ... workspace ... ]
|
||||
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
|
||||
*
|
||||
* The various objects that live in the workspace are divided into the
|
||||
* following categories, and are allocated separately:
|
||||
@@ -123,9 +126,18 @@ typedef enum {
|
||||
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
||||
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
||||
*
|
||||
* - Aligned: these buffers are used for various purposes that require 4 byte
|
||||
* alignment, but don't require any initialization before they're used. These
|
||||
* buffers are each aligned to 64 bytes.
|
||||
* - Init once: these buffers require to be initialized at least once before
|
||||
* use. They should be used when we want to skip memory initialization
|
||||
* while not triggering memory checkers (like Valgrind) when reading from
|
||||
* from this memory without writing to it first.
|
||||
* These buffers should be used carefully as they might contain data
|
||||
* from previous compressions.
|
||||
* Buffers are aligned to 64 bytes.
|
||||
*
|
||||
* - Aligned: these buffers don't require any initialization before they're
|
||||
* used. The user of the buffer should make sure they write into a buffer
|
||||
* location before reading from it.
|
||||
* Buffers are aligned to 64 bytes.
|
||||
*
|
||||
* - Buffers: these buffers are used for various purposes that don't require
|
||||
* any alignment or initialization before they're used. This means they can
|
||||
@@ -137,8 +149,9 @@ typedef enum {
|
||||
* correctly packed into the workspace buffer. That order is:
|
||||
*
|
||||
* 1. Objects
|
||||
* 2. Buffers
|
||||
* 3. Aligned/Tables
|
||||
* 2. Init once / Tables
|
||||
* 3. Aligned / Tables
|
||||
* 4. Buffers / Tables
|
||||
*
|
||||
* Attempts to reserve objects of different types out of order will fail.
|
||||
*/
|
||||
@@ -150,6 +163,7 @@ typedef struct {
|
||||
void* tableEnd;
|
||||
void* tableValidEnd;
|
||||
void* allocStart;
|
||||
void* initOnceStart;
|
||||
|
||||
BYTE allocFailed;
|
||||
int workspaceOversizedDuration;
|
||||
@@ -162,6 +176,7 @@ typedef struct {
|
||||
***************************************/
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
||||
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
||||
(void)ws;
|
||||
@@ -171,6 +186,20 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
||||
assert(ws->tableEnd <= ws->allocStart);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
assert(ws->allocStart <= ws->workspaceEnd);
|
||||
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
|
||||
assert(ws->workspace <= ws->initOnceStart);
|
||||
#if ZSTD_MEMORY_SANITIZER
|
||||
{
|
||||
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
|
||||
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
|
||||
#if defined(ZSTD_MSAN_PRINT)
|
||||
if(offset!=-1) {
|
||||
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
|
||||
}
|
||||
#endif
|
||||
assert(offset==-1);
|
||||
};
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -217,14 +246,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
||||
* for internal purposes (currently only alignment).
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
||||
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
||||
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
||||
* to align the beginning of the aligned section.
|
||||
*
|
||||
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
||||
* aligneds being sized in multiples of 64 bytes.
|
||||
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
|
||||
* bytes to align the beginning of tables section and end of buffers;
|
||||
*/
|
||||
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
|
||||
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
|
||||
return slackSpace;
|
||||
}
|
||||
|
||||
@@ -237,10 +262,18 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
|
||||
size_t const alignBytesMask = alignBytes - 1;
|
||||
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
||||
assert((alignBytes & alignBytesMask) == 0);
|
||||
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
assert(bytes < alignBytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the initial value for allocStart which is used to determine the position from
|
||||
* which we can allocate from the end of the workspace.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
|
||||
return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
||||
@@ -281,27 +314,16 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
|
||||
{
|
||||
assert(phase >= ws->phase);
|
||||
if (phase > ws->phase) {
|
||||
/* Going from allocating objects to allocating buffers */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
|
||||
phase >= ZSTD_cwksp_alloc_buffers) {
|
||||
/* Going from allocating objects to allocating initOnce / tables */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
|
||||
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
}
|
||||
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
|
||||
/* Going from allocating buffers to allocating aligneds/tables */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
||||
phase >= ZSTD_cwksp_alloc_aligned) {
|
||||
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
|
||||
size_t const bytesToAlign =
|
||||
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
|
||||
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
|
||||
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
|
||||
memory_allocation, "aligned phase - alignment initial allocation failed!");
|
||||
}
|
||||
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
||||
void* const alloc = ws->objectEnd;
|
||||
void *const alloc = ws->objectEnd;
|
||||
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
void* const objectEnd = (BYTE*)alloc + bytesToAlign;
|
||||
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
|
||||
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
||||
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
||||
"table phase - alignment initial allocation failed!");
|
||||
@@ -309,7 +331,9 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
|
||||
ws->tableEnd = objectEnd; /* table area starts being empty */
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
} } }
|
||||
}
|
||||
}
|
||||
}
|
||||
ws->phase = phase;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
@@ -321,7 +345,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
||||
{
|
||||
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
|
||||
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -366,6 +390,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
||||
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
||||
* This memory has been initialized at least once in the past.
|
||||
* This doesn't mean it has been initialized this time, and it might contain data from previous
|
||||
* operations.
|
||||
* The main usage is for algorithms that might need read access into uninitialized memory.
|
||||
* The algorithm must maintain safety under these conditions and must make sure it doesn't
|
||||
* leak any of the past data (directly or in side channels).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
|
||||
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
if(ptr && ptr < ws->initOnceStart) {
|
||||
/* We assume the memory following the current allocation is either:
|
||||
* 1. Not usable as initOnce memory (end of workspace)
|
||||
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
|
||||
* 3. An ASAN redzone, in which case we don't want to write on it
|
||||
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
|
||||
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
|
||||
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
|
||||
ws->initOnceStart = ptr;
|
||||
}
|
||||
#if ZSTD_MEMORY_SANITIZER
|
||||
assert(__msan_test_shadow(ptr, bytes) == -1);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
||||
*/
|
||||
@@ -384,13 +438,17 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
||||
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
|
||||
void* alloc;
|
||||
void* end;
|
||||
void* top;
|
||||
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
||||
return NULL;
|
||||
/* We can only start allocating tables after we are done reserving space for objects at the
|
||||
* start of the workspace */
|
||||
if(ws->phase < phase) {
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
alloc = ws->tableEnd;
|
||||
end = (BYTE *)alloc + bytes;
|
||||
@@ -469,11 +527,19 @@ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the table re-use logic is sound, and that we don't
|
||||
* access table space that we haven't cleaned, we re-"poison" the table
|
||||
* space every time we mark it dirty. */
|
||||
* space every time we mark it dirty.
|
||||
* Since tableValidEnd space and initOnce space may overlap we don't poison
|
||||
* the initOnce portion as it break its promise. This means that this poisoning
|
||||
* check isn't always applied fully. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
||||
__msan_poison(ws->objectEnd, size);
|
||||
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
||||
__msan_poison(ws->objectEnd, size);
|
||||
} else {
|
||||
assert(ws->initOnceStart >= ws->objectEnd);
|
||||
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -538,11 +604,14 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the context re-use logic is sound, and that we don't
|
||||
* access stuff that this compression hasn't initialized, we re-"poison"
|
||||
* the workspace (or at least the non-static, non-table parts of it)
|
||||
* every time we start a new compression. */
|
||||
* the workspace except for the areas in which we expect memory re-use
|
||||
* without initialization (objects, valid tables area and init once
|
||||
* memory). */
|
||||
{
|
||||
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
|
||||
__msan_poison(ws->tableValidEnd, size);
|
||||
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
||||
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
|
||||
__msan_poison(ws->tableValidEnd, size);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -558,10 +627,10 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ws->allocStart = ws->workspaceEnd;
|
||||
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
ws->allocFailed = 0;
|
||||
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
|
||||
ws->phase = ZSTD_cwksp_alloc_buffers;
|
||||
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
|
||||
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
@@ -578,6 +647,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c
|
||||
ws->workspaceEnd = (BYTE*)start + size;
|
||||
ws->objectEnd = ws->workspace;
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
ws->phase = ZSTD_cwksp_alloc_objects;
|
||||
ws->isStatic = isStatic;
|
||||
ZSTD_cwksp_clear(ws);
|
||||
@@ -630,17 +700,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
||||
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
||||
* actual amount of space used.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
|
||||
size_t const estimatedSpace, int resizedWorkspace) {
|
||||
if (resizedWorkspace) {
|
||||
/* Resized/newly allocated wksp should have exact bounds */
|
||||
return ZSTD_cwksp_used(ws) == estimatedSpace;
|
||||
} else {
|
||||
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
|
||||
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
|
||||
*/
|
||||
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
|
||||
}
|
||||
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
|
||||
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
|
||||
* the alignment bytes difference between estimation and actual usage */
|
||||
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
|
||||
ZSTD_cwksp_used(ws) <= estimatedSpace;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user