Update Zstandard to Version 1.5.4

Signed-off-by: Tino Reichardt <milky-7zip@mcmilk.de>
This commit is contained in:
Tino Reichardt
2023-02-11 10:20:24 +01:00
parent 5fac496461
commit 83b87f6fe3
79 changed files with 4636 additions and 3335 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -190,25 +190,6 @@ typedef signed long long S64;
/****************************************************************
* Memory I/O
*****************************************************************/
/* FSE_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets generating assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
# define FSE_FORCE_MEMORY_ACCESS 1
# endif
#endif
static unsigned FSE_32bits(void)
{
@@ -221,24 +202,6 @@ static unsigned FSE_isLittleEndian(void)
return one.c[0];
}
#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
#else
static U16 FSE_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
@@ -254,8 +217,6 @@ static U64 FSE_read64(const void* memPtr)
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
#endif /* FSE_FORCE_MEMORY_ACCESS */
static U16 FSE_readLE16(const void* memPtr)
{
if (FSE_isLittleEndian())
@@ -1190,7 +1151,7 @@ static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, si
zstd - standard compression library
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -1759,20 +1720,26 @@ static size_t ZSTD_execSequence(BYTE* op,
static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
const BYTE* const ostart = op;
BYTE* const oLitEnd = op + sequence.litLength;
const size_t litLength = sequence.litLength;
BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
const BYTE* const litEnd = *litPtr + litLength;
/* check */
/* checks */
size_t const seqLength = sequence.litLength + sequence.matchLength;
if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall);
if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected);
/* Now we know there are no overflow in literal nor match lengths, can use pointer checks */
if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected);
if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected);
if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */
if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */
/* copy Literals */
if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
memmove(op, *litPtr, litLength); /* overwrite risk */
else
ZSTD_wildcopy(op, *litPtr, litLength);
ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */
op += litLength;
*litPtr = litEnd; /* update for next sequence */