zstd: Update to upstream 1.4.0

One step towards fixing #17374 as most experimental APIs we use are now
part of the stable 1.4.0.
This commit is contained in:
Rémi Verschelde 2019-04-18 11:53:29 +02:00
parent 6697fd9a05
commit 88cb9bd27f
20 changed files with 1779 additions and 1241 deletions

View File

@ -559,7 +559,7 @@ Files extracted from upstream source:
## zstd
- Upstream: https://github.com/facebook/zstd
- Version: 1.3.8
- Version: 1.4.0
- License: BSD-3-Clause
Files extracted from upstream source:

View File

@ -40,7 +40,7 @@
/**
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
* parameters. They must be inlined for the compiler to elimininate the constant
* parameters. They must be inlined for the compiler to eliminate the constant
* branches.
*/
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR

View File

@ -358,7 +358,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
typedef enum {
FSE_repeat_none, /**< Cannot use the previous table */
FSE_repeat_check, /**< Can use the previous table but it must be checked */
FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
} FSE_repeat;
/* *****************************************

View File

@ -14,8 +14,8 @@
* This file will hold wrapper for systems, which do not support pthreads
*/
/* create fake symbol to avoid empty trnaslation unit warning */
int g_ZSTD_threading_useles_symbol;
/* create fake symbol to avoid empty translation unit warning */
int g_ZSTD_threading_useless_symbol;
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)

View File

@ -66,10 +66,10 @@
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
/*!XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independance be of no importance for your application, you may set the #define below to 1,
* Should endian-independence be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/

View File

@ -53,8 +53,50 @@ extern "C" {
#undef MAX
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
/**
* Return the specified error if the condition evaluates to true.
*
* In debug modes, prints additional information. In order to do that
* (particularly, printing the conditional that failed), this can't just wrap
* RETURN_ERROR().
*/
#define RETURN_ERROR_IF(cond, err, ...) \
if (cond) { \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
}
/**
* Unconditionally return the specified error.
*
* In debug modes, prints additional information.
*/
#define RETURN_ERROR(err, ...) \
do { \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
} while(0);
/**
* If the provided expression evaluates to an error code, returns that error code.
*
* In debug modes, prints additional information.
*/
#define FORWARD_IF_ERROR(err, ...) \
do { \
size_t const err_code = (err); \
if (ERR_isError(err_code)) { \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return err_code; \
} \
} while(0);
/*-*************************************
@ -200,6 +242,17 @@ typedef struct {
U32 longLengthPos;
} seqStore_t;
/**
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
* Note: before using `compressedSize`, check for errors using ZSTD_isError().
* similarly, before using `decompressedBound`, check for errors using:
* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
*/
typedef struct {
size_t compressedSize;
unsigned long long decompressedBound;
} ZSTD_frameSizeInfo; /* decompress & legacy */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */

View File

@ -129,9 +129,9 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
{ U32 position = 0;
U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
int nbOccurences;
int nbOccurrences;
int const freq = normalizedCounter[symbol];
for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
while (position > highThreshold)

File diff suppressed because it is too large Load Diff

View File

@ -36,9 +36,9 @@ extern "C" {
#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
It's not a big deal though : candidate will just be sorted again.
Additionnally, candidate position 1 will be lost.
Additionally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
@ -54,6 +54,14 @@ typedef struct ZSTD_prefixDict_s {
ZSTD_dictContentType_e dictContentType;
} ZSTD_prefixDict;
typedef struct {
void* dictBuffer;
void const* dict;
size_t dictSize;
ZSTD_dictContentType_e dictContentType;
ZSTD_CDict* cdict;
} ZSTD_localDict;
typedef struct {
U32 CTable[HUF_CTABLE_SIZE_U32(255)];
HUF_repeat repeatMode;
@ -107,6 +115,7 @@ typedef struct {
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
ZSTD_literalCompressionMode_e literalCompressionMode;
} optState_t;
typedef struct {
@ -188,6 +197,7 @@ struct ZSTD_CCtx_params_s {
* 1<<wLog, even for dictionary */
ZSTD_dictAttachPref_e attachDictPref;
ZSTD_literalCompressionMode_e literalCompressionMode;
/* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
@ -243,7 +253,7 @@ struct ZSTD_CCtx_s {
U32 frameEnded;
/* Dictionary */
ZSTD_CDict* cdictLocal;
ZSTD_localDict localDict;
const ZSTD_CDict* cdict;
ZSTD_prefixDict prefixDict; /* single-usage dictionary */
@ -806,13 +816,6 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
/*! ZSTD_compressStream_generic() :
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode);
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
@ -839,7 +842,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
* or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
* or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
*/
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);

View File

@ -45,7 +45,155 @@ FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls, ZSTD_dictMode_e const dictMode)
U32 const mls)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
/* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
const BYTE* ip0 = istart;
const BYTE* ip1;
const BYTE* anchor = istart;
const U32 prefixStartIndex = ms->window.dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
/* init */
ip0 += (ip0 == prefixStart);
ip1 = ip0 + 1;
{
U32 const maxRep = (U32)(ip0 - prefixStart);
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
/* Main Search Loop */
while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
size_t mLength;
BYTE const* ip2 = ip0 + 2;
size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
U32 const val0 = MEM_read32(ip0);
size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
U32 const val1 = MEM_read32(ip1);
U32 const current0 = (U32)(ip0-base);
U32 const current1 = (U32)(ip1-base);
U32 const matchIndex0 = hashTable[h0];
U32 const matchIndex1 = hashTable[h1];
BYTE const* repMatch = ip2-offset_1;
const BYTE* match0 = base + matchIndex0;
const BYTE* match1 = base + matchIndex1;
U32 offcode;
hashTable[h0] = current0; /* update hash table */
hashTable[h1] = current1; /* update hash table */
assert(ip0 + 1 == ip1);
if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
ip0 = ip2 - mLength;
match0 = repMatch - mLength;
offcode = 0;
goto _match;
}
if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
/* found a regular match */
goto _offset;
}
if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
/* found a regular match after one literal */
ip0 = ip1;
match0 = match1;
goto _offset;
}
{
size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
assert(step >= 2);
ip0 += step;
ip1 += step;
continue;
}
_offset: /* Requires: ip0, match0 */
/* Compute the offset code */
offset_2 = offset_1;
offset_1 = (U32)(ip0-match0);
offcode = offset_1 + ZSTD_REP_MOVE;
mLength = 0;
/* Count the backwards match length */
while (((ip0>anchor) & (match0>prefixStart))
&& (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length */
mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
/* match found */
ip0 += mLength;
anchor = ip0;
ip1 = ip0 + 1;
if (ip0 <= ilimit) {
/* Fill Table */
assert(base+current0+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
while ( (ip0 <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ip1 = ip0 + 1;
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
}
}
}
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
rep[1] = offset_2 ? offset_2 : offsetSaved;
/* Return the last literals size */
return iend - anchor;
}
size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32 const mls = cParams->minMatch;
assert(ms->dictMatchState == NULL);
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
}
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
@ -64,46 +212,26 @@ size_t ZSTD_compressBlock_fast_generic(
U32 offsetSaved = 0;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams =
dictMode == ZSTD_dictMatchState ?
&dms->cParams : NULL;
const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
dms->hashTable : NULL;
const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
dms->window.dictLimit : 0;
const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
dms->window.base : NULL;
const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
dictBase + dictStartIndex : NULL;
const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
dms->window.nextSrc : NULL;
const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
prefixStartIndex - (U32)(dictEnd - dictBase) :
0;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
const U32* const dictHashTable = dms->hashTable;
const U32 dictStartIndex = dms->window.dictLimit;
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
const U32 dictHLog = dictMode == ZSTD_dictMatchState ?
dictCParams->hashLog : hlog;
assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
const U32 dictHLog = dictCParams->hashLog;
/* otherwise, we would get index underflow when translating a dict index
* into a local index */
assert(dictMode != ZSTD_dictMatchState
|| prefixStartIndex >= (U32)(dictEnd - dictBase));
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
/* init */
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
U32 const maxRep = (U32)(ip - prefixStart);
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
if (dictMode == ZSTD_dictMatchState) {
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
}
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
@ -113,50 +241,37 @@ size_t ZSTD_compressBlock_fast_generic(
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
const U32 repIndex = current + 1 - offset_1;
const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
&& repIndex < prefixStartIndex) ?
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashTable[h] = current; /* update hash table */
if ( (dictMode == ZSTD_dictMatchState)
&& ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
} else if ( dictMode == ZSTD_noDict
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
} else if ( (matchIndex <= prefixStartIndex) ) {
if (dictMode == ZSTD_dictMatchState) {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
U32 const dictMatchIndex = dictHashTable[dictHash];
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex <= dictStartIndex ||
MEM_read32(dictMatch) != MEM_read32(ip)) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a dict match */
U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
while (((ip>anchor) & (dictMatch>dictStart))
&& (ip[-1] == dictMatch[-1])) {
ip--; dictMatch--; mLength++;
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
} else {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
U32 const dictMatchIndex = dictHashTable[dictHash];
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex <= dictStartIndex ||
MEM_read32(dictMatch) != MEM_read32(ip)) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a dict match */
U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
while (((ip>anchor) & (dictMatch>dictStart))
&& (ip[-1] == dictMatch[-1])) {
ip--; dictMatch--; mLength++;
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
} else if (MEM_read32(match) != MEM_read32(ip)) {
/* it's not a match, and we're not going to check the dictionary */
@ -185,41 +300,27 @@ size_t ZSTD_compressBlock_fast_generic(
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
if (dictMode == ZSTD_dictMatchState) {
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
}
}
if (dictMode == ZSTD_noDict) {
while ( (ip <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
/* store sequence */
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
ip += rLength;
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
continue; /* faster when present ... (?) */
} } } }
continue;
}
break;
}
}
}
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
@ -229,28 +330,6 @@ size_t ZSTD_compressBlock_fast_generic(
return iend - anchor;
}
size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32 const mls = cParams->minMatch;
assert(ms->dictMatchState == NULL);
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
case 5 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
case 6 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
case 7 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
}
}
size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
@ -262,13 +341,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
}
}

View File

@ -19,7 +19,7 @@ extern "C" {
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],

View File

@ -429,7 +429,7 @@ size_t ZSTD_ldm_generateSequences(
*/
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
/* The input could be very large (in zstdmt), so it must be broken up into
* chunks to enforce the maximmum distance and handle overflow correction.
* chunks to enforce the maximum distance and handle overflow correction.
*/
assert(sequences->pos <= sequences->size);
assert(sequences->size <= sequences->capacity);

View File

@ -64,9 +64,15 @@ MEM_STATIC double ZSTD_fCost(U32 price)
}
#endif
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
{
return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
}
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
{
optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
if (ZSTD_compressedLiterals(optPtr))
optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
@ -99,6 +105,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
const BYTE* const src, size_t const srcSize,
int const optLevel)
{
int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
@ -113,9 +120,10 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
/* huffman table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
{ unsigned lit;
if (compressedLiterals) {
unsigned lit;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
for (lit=0; lit<=MaxLit; lit++) {
U32 const scaleLog = 11; /* scale to 2K */
U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
@ -163,10 +171,11 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
} else { /* not a dictionary */
assert(optPtr->litFreq != NULL);
{ unsigned lit = MaxLit;
if (compressedLiterals) {
unsigned lit = MaxLit;
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
}
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
{ unsigned ll;
for (ll=0; ll<=MaxLL; ll++)
@ -190,7 +199,8 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
} else { /* new block : re-use previous statistics, scaled down */
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
if (compressedLiterals)
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@ -207,6 +217,10 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
int optLevel)
{
if (litLength == 0) return 0;
if (!ZSTD_compressedLiterals(optPtr))
return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
if (optPtr->priceType == zop_predef)
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
@ -310,7 +324,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
U32 offsetCode, U32 matchLength)
{
/* literals */
{ U32 u;
if (ZSTD_compressedLiterals(optPtr)) {
U32 u;
for (u=0; u < litLength; u++)
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
@ -870,7 +885,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
U32 const maxOffset = matches[nbMatches-1].off;
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
@ -1108,7 +1123,8 @@ static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
/* used in 2-pass strategy */
MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
{
optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
if (ZSTD_compressedLiterals(optPtr))
optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@ -1117,7 +1133,7 @@ MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
* this function cannot error, hence its constract must be respected.
* this function cannot error, hence its contract must be respected.
*/
static void
ZSTD_initStats_ultra(ZSTD_matchState_t* ms,

View File

@ -22,6 +22,7 @@
/* ====== Dependencies ====== */
#include <string.h> /* memcpy, memset */
#include <limits.h> /* INT_MAX, UINT_MAX */
#include "mem.h" /* MEM_STATIC */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@ -456,7 +457,7 @@ typedef struct {
* Must be acquired after the main mutex when acquiring both.
*/
ZSTD_pthread_mutex_t ldmWindowMutex;
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is udpated */
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
} serialState_t;
@ -647,7 +648,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
buffer_t dstBuff = job->dstBuff;
size_t lastCBlockSize = 0;
/* ressources */
/* resources */
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
dstBuff = ZSTDMT_getBuffer(job->bufPool);
@ -672,7 +673,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} else { /* srcStart points at reloaded section */
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
{ size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
}
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
@ -864,14 +865,10 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
* Internal use only */
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
{
if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
params->nbWorkers = nbWorkers;
params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
params->jobSize = 0;
return nbWorkers;
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
}
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
{
ZSTDMT_CCtx* mtctx;
U32 nbJobs = nbWorkers + 2;
@ -906,6 +903,17 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
return mtctx;
}
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
{
#ifdef ZSTD_MULTITHREAD
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
#else
(void)nbWorkers;
(void)cMem;
return NULL;
#endif
}
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
{
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
@ -986,26 +994,13 @@ ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
{
case ZSTDMT_p_jobSize :
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
if ( value != 0 /* default */
&& value < ZSTDMT_JOBSIZE_MIN)
value = ZSTDMT_JOBSIZE_MIN;
assert(value >= 0);
if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
params->jobSize = value;
return value;
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
case ZSTDMT_p_overlapLog :
DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
if (value < ZSTD_OVERLAPLOG_MIN) value = ZSTD_OVERLAPLOG_MIN;
if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
params->overlapLog = value;
return value;
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
case ZSTDMT_p_rsyncable :
value = (value != 0);
params->rsyncable = value;
return value;
DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
default :
return ERROR(parameter_unsupported);
}
@ -1021,32 +1016,29 @@ size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
{
switch (parameter) {
case ZSTDMT_p_jobSize:
assert(mtctx->params.jobSize <= INT_MAX);
*value = (int)(mtctx->params.jobSize);
break;
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
case ZSTDMT_p_overlapLog:
*value = mtctx->params.overlapLog;
break;
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
case ZSTDMT_p_rsyncable:
*value = mtctx->params.rsyncable;
break;
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
default:
return ERROR(parameter_unsupported);
}
return 0;
}
/* Sets parameters relevant to the compression job,
* initializing others to default values. */
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
{
ZSTD_CCtx_params jobParams;
memset(&jobParams, 0, sizeof(jobParams));
jobParams.cParams = params.cParams;
jobParams.fParams = params.fParams;
jobParams.compressionLevel = params.compressionLevel;
ZSTD_CCtx_params jobParams = params;
/* Clear parameters related to multithreading */
jobParams.forceWindow = 0;
jobParams.nbWorkers = 0;
jobParams.jobSize = 0;
jobParams.overlapLog = 0;
jobParams.rsyncable = 0;
memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
return jobParams;
}
@ -1056,7 +1048,7 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
{
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
@ -1263,7 +1255,7 @@ static size_t ZSTDMT_compress_advanced_internal(
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
return ERROR(memory_allocation);
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
{ unsigned u;
for (u=0; u<nbJobs; u++) {
@ -1396,7 +1388,7 @@ size_t ZSTDMT_initCStream_internal(
/* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
@ -1547,7 +1539,7 @@ size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
/* ZSTDMT_writeLastEmptyBlock()
* Write a single empty block with an end-of-frame to finish a frame.
* Job must be created from streaming variant.
* This function is always successfull if expected conditions are fulfilled.
* This function is always successful if expected conditions are fulfilled.
*/
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
{
@ -1987,7 +1979,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
assert(input->pos <= input->size);
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
}
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
@ -2051,7 +2043,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
size_t const jobSize = mtctx->inBuff.filled;
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
}
/* check for potential compressed data ready to be flushed */
@ -2065,7 +2057,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
/* recommended next input size : fill current input buffer */
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@ -2082,7 +2074,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
(U32)srcSize, (U32)endFrame);
CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
}
/* check if there is any data available to flush */

View File

@ -17,10 +17,25 @@
/* Note : This is an internal API.
* Some methods are still exposed (ZSTDLIB_API),
* These APIs used to be exposed with ZSTDLIB_API,
* because it used to be the only way to invoke MT compression.
* Now, it's recommended to use ZSTD_compress_generic() instead.
* These methods will stop being exposed in a future version */
* Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
* instead.
*
* If you depend on these APIs and can't switch, then define
* ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
* However, we may completely remove these functions in a future
* release, so please switch soon.
*
* This API requires ZSTD_MULTITHREAD to be defined during compilation,
* otherwise ZSTDMT_createCCtx*() will fail.
*/
#ifdef ZSTD_LEGACY_MULTITHREADED_API
# define ZSTDMT_API ZSTDLIB_API
#else
# define ZSTDMT_API
#endif
/* === Dependencies === */
#include <stddef.h> /* size_t */
@ -40,17 +55,19 @@
/* === Memory management === */
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
ZSTD_customMem cMem);
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
/* === Simple one-pass compression function === */
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
@ -59,31 +76,31 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
/* === Streaming functions === */
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
ZSTDLIB_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
/* === Advanced functions and parameters === */
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters params,
int overlapLog);
ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters params,
int overlapLog);
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
ZSTD_parameters params,
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fparams,
unsigned long long pledgedSrcSize); /* note : zero means empty */
@ -92,7 +109,7 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
typedef enum {
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
} ZSTDMT_parameter;
@ -101,12 +118,12 @@ typedef enum {
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
/* ZSTDMT_getMTCtxParameter() :
* Query the ZSTDMT_CCtx for a parameter value.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
/*! ZSTDMT_compressStream_generic() :
@ -116,7 +133,7 @@ ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter
* 0 if fully flushed
* or an error code
* note : needs to be init using any ZSTD_initCStream*() variant */
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);

View File

@ -105,9 +105,9 @@ ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
/* load entropy tables */
CHECK_E( ZSTD_loadDEntropy(&ddict->entropy,
ddict->dictContent, ddict->dictSize),
dictionary_corrupted );
RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
&ddict->entropy, ddict->dictContent, ddict->dictSize)),
dictionary_corrupted);
ddict->entropyPresent = 1;
return 0;
}
@ -133,7 +133,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
CHECK_F( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
return 0;
}

View File

@ -106,6 +106,7 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
dctx->ddictIsCold = 0;
dctx->dictUses = ZSTD_dont_use;
dctx->inBuff = NULL;
dctx->inBuffSize = 0;
dctx->outBuffSize = 0;
@ -147,13 +148,20 @@ ZSTD_DCtx* ZSTD_createDCtx(void)
return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
}
static void ZSTD_clearDict(ZSTD_DCtx* dctx)
{
ZSTD_freeDDict(dctx->ddictLocal);
dctx->ddictLocal = NULL;
dctx->ddict = NULL;
dctx->dictUses = ZSTD_dont_use;
}
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support free on NULL */
if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */
RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
{ ZSTD_customMem const cMem = dctx->customMem;
ZSTD_freeDDict(dctx->ddictLocal);
dctx->ddictLocal = NULL;
ZSTD_clearDict(dctx);
ZSTD_free(dctx->inBuff, cMem);
dctx->inBuff = NULL;
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
@ -203,7 +211,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
{
size_t const minInputSize = ZSTD_startingInputLength(format);
if (srcSize < minInputSize) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
{ BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
@ -238,7 +246,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
if (srcSize < minInputSize) return minInputSize;
if (src==NULL) return ERROR(GENERIC); /* invalid parameter */
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
@ -251,7 +259,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
}
return ERROR(prefix_unknown);
RETURN_ERROR(prefix_unknown);
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
@ -269,14 +277,13 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
U64 windowSize = 0;
U32 dictID = 0;
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
if ((fhdByte & 0x08) != 0)
return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
"reserved bits, must be zero");
if (!singleSegment) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
if (windowLog > ZSTD_WINDOWLOG_MAX)
return ERROR(frameParameter_windowTooLarge);
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
windowSize = (1ULL << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
@ -348,12 +355,11 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
U32 sizeU32;
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
if ((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32)
return ERROR(frameParameter_unsupported);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported);
return skippableHeaderSize + sizeU32;
}
@ -428,13 +434,89 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
{
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
if (ZSTD_isError(result)) return result; /* invalid header */
if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
return ERROR(dictionary_wrong);
RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
/* Skip the dictID check in fuzzing mode, because it makes the search
* harder.
*/
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
dictionary_wrong);
#endif
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
return 0;
}
static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
{
ZSTD_frameSizeInfo frameSizeInfo;
frameSizeInfo.compressedSize = ret;
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
return frameSizeInfo;
}
static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
{
ZSTD_frameSizeInfo frameSizeInfo;
memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize))
return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
#endif
if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
&& (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
return frameSizeInfo;
} else {
const BYTE* ip = (const BYTE*)src;
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
ZSTD_frameHeader zfh;
/* Extract Frame Header */
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(ret))
return ZSTD_errorFrameSizeInfo(ret);
if (ret > 0)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
}
ip += zfh.headerSize;
remainingSize -= zfh.headerSize;
/* Iterate over each block */
while (1) {
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize))
return ZSTD_errorFrameSizeInfo(cBlockSize);
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += ZSTD_blockHeaderSize + cBlockSize;
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
nbBlocks++;
if (blockProperties.lastBlock) break;
}
/* Final frame content checksum */
if (zfh.checksumFlag) {
if (remainingSize < 4)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += 4;
}
frameSizeInfo.compressedSize = ip - ipstart;
frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
? zfh.frameContentSize
: nbBlocks * zfh.blockSizeMax;
return frameSizeInfo;
}
}
/** ZSTD_findFrameCompressedSize() :
* compatible with legacy mode
@ -443,53 +525,34 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
* @return : the compressed size of the frame starting at `src` */
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
{
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize))
return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
#endif
if ( (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
&& (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START ) {
return readSkippableFrameSize(src, srcSize);
} else {
const BYTE* ip = (const BYTE*)src;
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
ZSTD_frameHeader zfh;
/* Extract Frame Header */
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(ret)) return ret;
if (ret > 0) return ERROR(srcSize_wrong);
}
ip += zfh.headerSize;
remainingSize -= zfh.headerSize;
/* Loop on each block */
while (1) {
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
return ERROR(srcSize_wrong);
ip += ZSTD_blockHeaderSize + cBlockSize;
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
if (blockProperties.lastBlock) break;
}
if (zfh.checksumFlag) { /* Final frame content checksum */
if (remainingSize < 4) return ERROR(srcSize_wrong);
ip += 4;
}
return ip - ipstart;
}
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
return frameSizeInfo.compressedSize;
}
/** ZSTD_decompressBound() :
* compatible with legacy mode
* `src` must point to the start of a ZSTD frame or a skippeable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
*/
unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
{
unsigned long long bound = 0;
/* Iterate over each frame */
while (srcSize > 0) {
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
return ZSTD_CONTENTSIZE_ERROR;
src = (const BYTE*)src + compressedSize;
srcSize -= compressedSize;
bound += decompressedBound;
}
return bound;
}
/*-*************************************************************
* Frame decoding
@ -522,9 +585,9 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
DEBUGLOG(5, "ZSTD_copyRawBlock");
if (dst == NULL) {
if (srcSize == 0) return 0;
return ERROR(dstBuffer_null);
RETURN_ERROR(dstBuffer_null);
}
if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
memcpy(dst, src, srcSize);
return srcSize;
}
@ -535,9 +598,9 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
{
if (dst == NULL) {
if (regenSize == 0) return 0;
return ERROR(dstBuffer_null);
RETURN_ERROR(dstBuffer_null);
}
if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
memset(dst, b, regenSize);
return regenSize;
}
@ -560,15 +623,16 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
/* check */
if (remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize)
return ERROR(srcSize_wrong);
RETURN_ERROR_IF(
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
srcSize_wrong);
/* Frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
if (remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize)
return ERROR(srcSize_wrong);
CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
srcSize_wrong);
FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
@ -581,7 +645,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
ip += ZSTD_blockHeaderSize;
remainingSrcSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSrcSize) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
switch(blockProperties.blockType)
{
@ -596,7 +660,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
break;
case bt_reserved :
default:
return ERROR(corruption_detected);
RETURN_ERROR(corruption_detected);
}
if (ZSTD_isError(decodedSize)) return decodedSize;
@ -609,15 +673,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
return ERROR(corruption_detected);
} }
RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
corruption_detected);
}
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
if (remainingSrcSize<4) return ERROR(checksum_wrong);
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
checkRead = MEM_readLE32(ip);
if (checkRead != checkCalc) return ERROR(checksum_wrong);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
ip += 4;
remainingSrcSize -= 4;
}
@ -652,8 +716,8 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
size_t decodedSize;
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
if (ZSTD_isError(frameSize)) return frameSize;
/* legacy support is not compatible with static dctx */
if (dctx->staticSize) return ERROR(memory_allocation);
RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
"legacy support is not compatible with static dctx");
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
@ -676,7 +740,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize))
return skippableSize;
if (srcSize < skippableSize) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize < skippableSize, srcSize_wrong);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
@ -685,29 +749,29 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
if (ddict) {
/* we were called from ZSTD_decompress_usingDDict */
CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
} else {
/* this will initialize correctly with no dict if dict == NULL, so
* use this in all cases but ddict */
CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
}
ZSTD_checkContinuity(dctx, dst);
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
&src, &srcSize);
if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
&& (moreThan1Frame==1) ) {
/* at least one frame successfully completed,
* but following bytes are garbage :
* it's more likely to be a srcSize error,
* specifying more bytes than compressed size of frame(s).
* This error message replaces ERROR(prefix_unknown),
* which would be confusing, as the first header is actually correct.
* Note that one could be unlucky, it might be a corruption error instead,
* happening right at the place where we expect zstd magic bytes.
* But this is _much_ less likely than a srcSize field error. */
return ERROR(srcSize_wrong);
}
RETURN_ERROR_IF(
(ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
&& (moreThan1Frame==1),
srcSize_wrong,
"at least one frame successfully completed, but following "
"bytes are garbage: it's more likely to be a srcSize error, "
"specifying more bytes than compressed size of frame(s). This "
"error message replaces ERROR(prefix_unknown), which would be "
"confusing, as the first header is actually correct. Note that "
"one could be unlucky, it might be a corruption error instead, "
"happening right at the place where we expect zstd magic "
"bytes. But this is _much_ less likely than a srcSize field "
"error.");
if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
dst = (BYTE*)dst + res;
@ -716,7 +780,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
moreThan1Frame = 1;
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
return (BYTE*)dst - (BYTE*)dststart;
}
@ -730,9 +794,26 @@ size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
}
static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
{
switch (dctx->dictUses) {
default:
assert(0 /* Impossible */);
/* fall-through */
case ZSTD_dont_use:
ZSTD_clearDict(dctx);
return NULL;
case ZSTD_use_indefinitely:
return dctx->ddict;
case ZSTD_use_once:
dctx->dictUses = ZSTD_dont_use;
return dctx->ddict;
}
}
size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
}
@ -741,7 +822,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
if (dctx==NULL) return ERROR(memory_allocation);
RETURN_ERROR_IF(dctx==NULL, memory_allocation);
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTD_freeDCtx(dctx);
return regenSize;
@ -791,8 +872,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
if (srcSize != dctx->expected)
return ERROR(srcSize_wrong); /* not allowed */
RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
switch (dctx->stage)
@ -817,7 +897,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
@ -867,7 +947,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
break;
case bt_reserved : /* should never happen */
default:
return ERROR(corruption_detected);
RETURN_ERROR(corruption_detected);
}
if (ZSTD_isError(rSize)) return rSize;
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
@ -876,10 +956,10 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
if (dctx->decodedSize != dctx->fParams.frameContentSize) {
return ERROR(corruption_detected);
} }
RETURN_ERROR_IF(
dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& dctx->decodedSize != dctx->fParams.frameContentSize,
corruption_detected);
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
@ -900,7 +980,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
if (check32 != h32) return ERROR(checksum_wrong);
RETURN_ERROR_IF(check32 != h32, checksum_wrong);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
@ -921,7 +1001,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
default:
assert(0); /* impossible */
return ERROR(GENERIC); /* some compiler require default to do something */
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
}
}
@ -945,7 +1025,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
if (dictSize <= 8) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
dictPtr += 8; /* skip header = magic + dictID */
@ -964,16 +1044,16 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr, dictEnd - dictPtr,
workspace, workspaceSize);
#endif
if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
dictPtr += hSize;
}
{ short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
@ -984,9 +1064,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
@ -997,9 +1077,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
@ -1007,12 +1087,13 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr += litlengthHeaderSize;
}
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
{ int i;
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
dictionary_corrupted);
entropy->rep[i] = rep;
} }
@ -1030,7 +1111,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
/* load entropy tables */
{ size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
@ -1064,9 +1145,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
CHECK_F( ZSTD_decompressBegin(dctx) );
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
if (dict && dictSize)
CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
RETURN_ERROR_IF(
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
dictionary_corrupted);
return 0;
}
@ -1085,7 +1168,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
DEBUGLOG(4, "DDict is %s",
dctx->ddictIsCold ? "~cold~" : "hot!");
}
CHECK_F( ZSTD_decompressBegin(dctx) );
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
if (ddict) { /* NULL ddict is equivalent to no dictionary */
ZSTD_copyDDictParameters(dctx, ddict);
}
@ -1104,7 +1187,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
}
/*! ZSTD_getDictID_fromFrame() :
* Provides the dictID required to decompresse frame stored within `src`.
* Provides the dictID required to decompress frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
@ -1176,15 +1259,14 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
ZSTD_freeDDict(dctx->ddictLocal);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
ZSTD_clearDict(dctx);
if (dict && dictSize >= 8) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
} else {
dctx->ddictLocal = NULL;
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
dctx->ddict = dctx->ddictLocal;
dctx->dictUses = ZSTD_use_indefinitely;
}
dctx->ddict = dctx->ddictLocal;
return 0;
}
@ -1200,7 +1282,9 @@ size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSi
size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
dctx->dictUses = ZSTD_use_once;
return 0;
}
size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
@ -1215,9 +1299,8 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
zds->streamStage = zdss_init;
zds->noForwardProgress = 0;
CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
@ -1225,7 +1308,7 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
return ZSTD_initDStream_usingDict(zds, NULL, 0);
return ZSTD_initDStream_usingDDict(zds, NULL);
}
/* ZSTD_initDStream_usingDDict() :
@ -1233,9 +1316,9 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
size_t const initResult = ZSTD_initDStream(dctx);
dctx->ddict = ddict;
return initResult;
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
/* ZSTD_resetDStream() :
@ -1243,19 +1326,19 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
DEBUGLOG(4, "ZSTD_resetDStream");
dctx->streamStage = zdss_loadHeader;
dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
dctx->legacyVersion = 0;
dctx->hostageByte = 0;
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
dctx->ddict = ddict;
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
ZSTD_clearDict(dctx);
if (ddict) {
dctx->ddict = ddict;
dctx->dictUses = ZSTD_use_indefinitely;
}
return 0;
}
@ -1267,9 +1350,9 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
size_t const min = (size_t)1 << bounds.lowerBound;
size_t const max = (size_t)1 << bounds.upperBound;
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
if (maxWindowSize < min) return ERROR(parameter_outOfBound);
if (maxWindowSize > max) return ERROR(parameter_outOfBound);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
dctx->maxWindowSize = maxWindowSize;
return 0;
}
@ -1311,15 +1394,15 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
}
#define CHECK_DBOUNDS(p,v) { \
if (!ZSTD_dParam_withinBounds(p, v)) \
return ERROR(parameter_outOfBound); \
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
}
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
{
if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
switch(dParam) {
case ZSTD_d_windowLogMax:
if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
dctx->maxWindowSize = ((size_t)1) << value;
return 0;
@ -1329,19 +1412,20 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
return 0;
default:;
}
return ERROR(parameter_unsupported);
RETURN_ERROR(parameter_unsupported);
}
size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
(void)ZSTD_initDStream(dctx);
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
if (dctx->streamStage != zdss_init)
return ERROR(stage_wrong);
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
ZSTD_clearDict(dctx);
dctx->format = ZSTD_f_zstd1;
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
}
@ -1360,7 +1444,8 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
frameParameter_windowTooLarge);
return minRBSize;
}
@ -1378,9 +1463,9 @@ size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
ZSTD_frameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
if (err>0) return ERROR(srcSize_wrong);
if (zfh.windowSize > windowSizeMax)
return ERROR(frameParameter_windowTooLarge);
RETURN_ERROR_IF(err>0, srcSize_wrong);
RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
frameParameter_windowTooLarge);
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
}
@ -1406,16 +1491,16 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
if (input->pos > input->size) { /* forbidden */
DEBUGLOG(5, "in: pos: %u vs size: %u",
(U32)input->pos, (U32)input->size);
return ERROR(srcSize_wrong);
}
if (output->pos > output->size) { /* forbidden */
DEBUGLOG(5, "out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
return ERROR(dstSize_tooSmall);
}
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
"forbidden. in: pos: %u vs size: %u",
(U32)input->pos, (U32)input->size);
RETURN_ERROR_IF(
output->pos > output->size,
dstSize_tooSmall,
"forbidden. out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
while (someMoreWork) {
@ -1423,15 +1508,18 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
{
case zdss_init :
DEBUGLOG(5, "stage zdss_init => transparent reset ");
ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
zds->streamStage = zdss_loadHeader;
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
zds->legacyVersion = 0;
zds->hostageByte = 0;
/* fall-through */
case zdss_loadHeader :
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
if (zds->legacyVersion) {
/* legacy support is incompatible with static dctx */
if (zds->staticSize) return ERROR(memory_allocation);
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
if (hint==0) zds->streamStage = zdss_init;
return hint;
@ -1443,12 +1531,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
if (legacyVersion) {
const void* const dict = zds->ddict ? ZSTD_DDict_dictContent(zds->ddict) : NULL;
size_t const dictSize = zds->ddict ? ZSTD_DDict_dictSize(zds->ddict) : 0;
ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
/* legacy support is incompatible with static dctx */
if (zds->staticSize) return ERROR(memory_allocation);
CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
zds->previousLegacyVersion, legacyVersion,
dict, dictSize));
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
@ -1482,7 +1571,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
ip = istart + cSize;
@ -1495,13 +1584,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
/* Consume header (see ZSTDds_decodeFrameHeader) */
DEBUGLOG(4, "Consume header");
CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
zds->expected = ZSTD_blockHeaderSize;
zds->stage = ZSTDds_decodeBlockHeader;
}
@ -1511,7 +1600,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
(U32)(zds->fParams.windowSize >>10),
(U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge);
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
@ -1525,14 +1615,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->staticSize) { /* static DCtx */
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
return ERROR(memory_allocation);
RETURN_ERROR_IF(
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
memory_allocation);
} else {
ZSTD_free(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
if (zds->inBuff == NULL) return ERROR(memory_allocation);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
}
zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
@ -1574,7 +1665,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
corruption_detected,
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
}
ip += loadedSize;
@ -1615,7 +1708,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
default:
assert(0); /* impossible */
return ERROR(GENERIC); /* some compiler require default to do something */
RETURN_ERROR(GENERIC); /* some compiler require default to do something */
} }
/* result */
@ -1624,8 +1717,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
if (op==oend) return ERROR(dstSize_tooSmall);
if (ip==iend) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
RETURN_ERROR_IF(ip==iend, srcSize_wrong);
assert(0);
}
} else {

View File

@ -56,14 +56,15 @@ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr)
{
if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
{ U32 const cBlockHeader = MEM_readLE24(src);
U32 const cSize = cBlockHeader >> 3;
bpPtr->lastBlock = cBlockHeader & 1;
bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
bpPtr->origSize = cSize; /* only useful for RLE */
if (bpPtr->blockType == bt_rle) return 1;
if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
return cSize;
}
}
@ -78,7 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
{ const BYTE* const istart = (const BYTE*) src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
@ -86,11 +87,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
switch(litEncType)
{
case set_repeat:
if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
/* fall-through */
case set_compressed:
if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
@ -118,8 +119,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
litCSize = (lhc >> 22) + (istart[4] << 10);
break;
}
if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@ -157,7 +158,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
}
if (HUF_isError(hufSuccess)) return ERROR(corruption_detected);
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@ -187,7 +188,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@ -216,17 +217,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
return ERROR(corruption_detected); /* impossible */
RETURN_ERROR(corruption_detected, "impossible");
}
}
}
@ -436,8 +437,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
switch(type)
{
case set_rle :
if (!srcSize) return ERROR(srcSize_wrong);
if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
RETURN_ERROR_IF(!srcSize, srcSize_wrong);
RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
{ U32 const symbol = *(const BYTE*)src;
U32 const baseline = baseValue[symbol];
U32 const nbBits = nbAdditionalBits[symbol];
@ -449,7 +450,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
*DTablePtr = defaultTable;
return 0;
case set_repeat:
if (!flagRepeatTable) return ERROR(corruption_detected);
RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
/* prefetch FSE table if used */
if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
const void* const pStart = *DTablePtr;
@ -461,15 +462,15 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
{ unsigned tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
if (FSE_isError(headerSize)) return ERROR(corruption_detected);
if (tableLog > maxLog) return ERROR(corruption_detected);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
*DTablePtr = DTableSpace;
return headerSize;
}
default : /* impossible */
default :
assert(0);
return ERROR(GENERIC);
RETURN_ERROR(GENERIC, "impossible");
}
}
@ -483,28 +484,28 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
/* SeqHead */
nbSeq = *ip++;
if (!nbSeq) {
*nbSeqPtr=0;
if (srcSize != 1) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
return 1;
}
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
if (ip+2 > iend) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
} else {
if (ip >= iend) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
/* FSE table descriptors */
if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
@ -517,7 +518,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
ip += llhSize;
}
@ -527,7 +528,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
ip += ofhSize;
}
@ -537,7 +538,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
ip += mlhSize;
}
}
@ -590,8 +591,8 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must fit within dstBuffer */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* try to read beyond literal buffer */
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
/* copy literals */
while (op < oLitEnd) *op++ = *(*litPtr)++;
@ -599,7 +600,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@ -631,8 +632,8 @@ size_t ZSTD_execSequence(BYTE* op,
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* copy Literals */
@ -645,8 +646,7 @@ size_t ZSTD_execSequence(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
if (sequence.offset > (size_t)(oLitEnd - virtualStart))
return ERROR(corruption_detected);
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@ -712,8 +712,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
const BYTE* match = sequence.match;
/* check */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
/* copy Literals */
@ -726,7 +726,7 @@ size_t ZSTD_execSequenceLong(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
@ -801,7 +801,7 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
* offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
* bits before reloading. This value is the maximum number of bytes we read
* after reloading when we are decoding long offets.
* after reloading when we are decoding long offsets.
*/
#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
(ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
@ -911,7 +911,9 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected);
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@ -927,14 +929,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
if (nbSeq) return ERROR(corruption_detected);
RETURN_ERROR_IF(nbSeq, corruption_detected);
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
@ -1066,7 +1068,9 @@ ZSTD_decompressSequencesLong_body(
seqState.pos = (size_t)(op-prefixStart);
seqState.dictEnd = dictEnd;
assert(iend >= ip);
CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected);
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@ -1076,7 +1080,7 @@ ZSTD_decompressSequencesLong_body(
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
if (seqNb<seqAdvance) return ERROR(corruption_detected);
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
@ -1087,7 +1091,7 @@ ZSTD_decompressSequencesLong_body(
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
if (seqNb<nbSeq) return ERROR(corruption_detected);
RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
/* finish queue */
seqNb -= seqAdvance;
@ -1103,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
@ -1176,7 +1180,7 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
/* ZSTD_decompressSequencesLong() :
* decompression function triggered when a minimum share of offsets is considered "long",
* aka out of cache.
* note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes mearning "farther than memory cache distance".
* note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
* This function will try to mitigate main memory latency through the use of prefetching */
static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
@ -1240,7 +1244,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);

View File

@ -89,6 +89,12 @@ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
typedef enum { zdss_init=0, zdss_loadHeader,
zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
typedef enum {
ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
} ZSTD_dictUses_e;
struct ZSTD_DCtx_s
{
const ZSTD_seqSymbol* LLTptr;
@ -123,6 +129,7 @@ struct ZSTD_DCtx_s
const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
U32 dictID;
int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
ZSTD_dictUses_e dictUses;
/* streaming */
ZSTD_dStreamStage streamStage;

1085
thirdparty/zstd/zstd.h vendored

File diff suppressed because it is too large Load Diff