commit
5e96eaf162
|
@ -480,7 +480,7 @@ Files extracted from upstream source:
|
|||
## zstd
|
||||
|
||||
- Upstream: https://github.com/facebook/zstd
|
||||
- Version: 1.3.3
|
||||
- Version: 1.3.4
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
|
|
@ -426,7 +426,7 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
|||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
|
|
|
@ -63,6 +63,31 @@
|
|||
# endif
|
||||
#endif
|
||||
|
||||
/* target attribute */
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
||||
#else
|
||||
# define TARGET_ATTRIBUTE(target)
|
||||
#endif
|
||||
|
||||
/* Enable runtime BMI2 dispatch based on the CPU.
|
||||
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
||||
*/
|
||||
#ifndef DYNAMIC_BMI2
|
||||
#if (defined(__clang__) && __has_attribute(__target__)) \
|
||||
|| (defined(__GNUC__) \
|
||||
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \
|
||||
&& (defined(__x86_64__) || defined(_M_X86)) \
|
||||
&& !defined(__BMI2__)
|
||||
# define DYNAMIC_BMI2 1
|
||||
#else
|
||||
# define DYNAMIC_BMI2 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* prefetch */
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
|
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Copyright (c) 2018-present, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMMON_CPU_H
|
||||
#define ZSTD_COMMON_CPU_H
|
||||
|
||||
/**
|
||||
* Implementation taken from folly/CpuId.h
|
||||
* https://github.com/facebook/folly/blob/master/folly/CpuId.h
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
U32 f1c;
|
||||
U32 f1d;
|
||||
U32 f7b;
|
||||
U32 f7c;
|
||||
} ZSTD_cpuid_t;
|
||||
|
||||
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
|
||||
U32 f1c = 0;
|
||||
U32 f1d = 0;
|
||||
U32 f7b = 0;
|
||||
U32 f7c = 0;
|
||||
#ifdef _MSC_VER
|
||||
int reg[4];
|
||||
__cpuid((int*)reg, 0);
|
||||
{
|
||||
int const n = reg[0];
|
||||
if (n >= 1) {
|
||||
__cpuid((int*)reg, 1);
|
||||
f1c = (U32)reg[2];
|
||||
f1d = (U32)reg[3];
|
||||
}
|
||||
if (n >= 7) {
|
||||
__cpuidex((int*)reg, 7, 0);
|
||||
f7b = (U32)reg[1];
|
||||
f7c = (U32)reg[2];
|
||||
}
|
||||
}
|
||||
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
|
||||
/* The following block like the normal cpuid branch below, but gcc
|
||||
* reserves ebx for use of its pic register so we must specially
|
||||
* handle the save and restore to avoid clobbering the register
|
||||
*/
|
||||
U32 n;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(n)
|
||||
: "a"(0)
|
||||
: "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(f1a), "=c"(f1c), "=d"(f1d)
|
||||
: "a"(1)
|
||||
:);
|
||||
}
|
||||
if (n >= 7) {
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl %%ebx, %%eax\n\r"
|
||||
"popl %%ebx"
|
||||
: "=a"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
|
||||
U32 n;
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
|
||||
}
|
||||
if (n >= 7) {
|
||||
U32 f7a;
|
||||
__asm__("cpuid"
|
||||
: "=a"(f7a), "=b"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#endif
|
||||
{
|
||||
ZSTD_cpuid_t cpuid;
|
||||
cpuid.f1c = f1c;
|
||||
cpuid.f1d = f1d;
|
||||
cpuid.f7b = f7b;
|
||||
cpuid.f7c = f7c;
|
||||
return cpuid;
|
||||
}
|
||||
}
|
||||
|
||||
#define X(name, r, bit) \
|
||||
MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
|
||||
return ((cpuid.r) & (1U << bit)) != 0; \
|
||||
}
|
||||
|
||||
/* cpuid(1): Processor Info and Feature Bits. */
|
||||
#define C(name, bit) X(name, f1c, bit)
|
||||
C(sse3, 0)
|
||||
C(pclmuldq, 1)
|
||||
C(dtes64, 2)
|
||||
C(monitor, 3)
|
||||
C(dscpl, 4)
|
||||
C(vmx, 5)
|
||||
C(smx, 6)
|
||||
C(eist, 7)
|
||||
C(tm2, 8)
|
||||
C(ssse3, 9)
|
||||
C(cnxtid, 10)
|
||||
C(fma, 12)
|
||||
C(cx16, 13)
|
||||
C(xtpr, 14)
|
||||
C(pdcm, 15)
|
||||
C(pcid, 17)
|
||||
C(dca, 18)
|
||||
C(sse41, 19)
|
||||
C(sse42, 20)
|
||||
C(x2apic, 21)
|
||||
C(movbe, 22)
|
||||
C(popcnt, 23)
|
||||
C(tscdeadline, 24)
|
||||
C(aes, 25)
|
||||
C(xsave, 26)
|
||||
C(osxsave, 27)
|
||||
C(avx, 28)
|
||||
C(f16c, 29)
|
||||
C(rdrand, 30)
|
||||
#undef C
|
||||
#define D(name, bit) X(name, f1d, bit)
|
||||
D(fpu, 0)
|
||||
D(vme, 1)
|
||||
D(de, 2)
|
||||
D(pse, 3)
|
||||
D(tsc, 4)
|
||||
D(msr, 5)
|
||||
D(pae, 6)
|
||||
D(mce, 7)
|
||||
D(cx8, 8)
|
||||
D(apic, 9)
|
||||
D(sep, 11)
|
||||
D(mtrr, 12)
|
||||
D(pge, 13)
|
||||
D(mca, 14)
|
||||
D(cmov, 15)
|
||||
D(pat, 16)
|
||||
D(pse36, 17)
|
||||
D(psn, 18)
|
||||
D(clfsh, 19)
|
||||
D(ds, 21)
|
||||
D(acpi, 22)
|
||||
D(mmx, 23)
|
||||
D(fxsr, 24)
|
||||
D(sse, 25)
|
||||
D(sse2, 26)
|
||||
D(ss, 27)
|
||||
D(htt, 28)
|
||||
D(tm, 29)
|
||||
D(pbe, 31)
|
||||
#undef D
|
||||
|
||||
/* cpuid(7): Extended Features. */
|
||||
#define B(name, bit) X(name, f7b, bit)
|
||||
B(bmi1, 3)
|
||||
B(hle, 4)
|
||||
B(avx2, 5)
|
||||
B(smep, 7)
|
||||
B(bmi2, 8)
|
||||
B(erms, 9)
|
||||
B(invpcid, 10)
|
||||
B(rtm, 11)
|
||||
B(mpx, 14)
|
||||
B(avx512f, 16)
|
||||
B(avx512dq, 17)
|
||||
B(rdseed, 18)
|
||||
B(adx, 19)
|
||||
B(smap, 20)
|
||||
B(avx512ifma, 21)
|
||||
B(pcommit, 22)
|
||||
B(clflushopt, 23)
|
||||
B(clwb, 24)
|
||||
B(avx512pf, 26)
|
||||
B(avx512er, 27)
|
||||
B(avx512cd, 28)
|
||||
B(sha, 29)
|
||||
B(avx512bw, 30)
|
||||
B(avx512vl, 31)
|
||||
#undef B
|
||||
#define C(name, bit) X(name, f7c, bit)
|
||||
C(prefetchwt1, 0)
|
||||
C(avx512vbmi, 1)
|
||||
#undef C
|
||||
|
||||
#undef X
|
||||
|
||||
#endif /* ZSTD_COMMON_CPU_H */
|
|
@ -29,6 +29,7 @@ const char* ERR_getErrorString(ERR_enum code)
|
|||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
|
|
|
@ -345,7 +345,7 @@ size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* s
|
|||
*/
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
|
||||
|
||||
/*! FSE_count_simple
|
||||
/*! FSE_count_simple() :
|
||||
* Same as FSE_countFast(), but does not use any additional memory (not even on stack).
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
|
||||
*/
|
||||
|
|
|
@ -139,8 +139,8 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
|
|||
{ U32 u;
|
||||
for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
|
||||
U16 nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
|
||||
U32 const nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
|
||||
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
|
||||
} }
|
||||
|
||||
|
|
|
@ -58,32 +58,32 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
|
||||
/* *** simple functions *** */
|
||||
/**
|
||||
HUF_compress() :
|
||||
Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
|
||||
'dst' buffer must be already allocated.
|
||||
Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
|
||||
`srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
|
||||
@return : size of compressed data (<= `dstCapacity`).
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
if return == 1, srcData is a single repeated byte symbol (RLE compression).
|
||||
if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
||||
*/
|
||||
/* ========================== */
|
||||
/* *** simple functions *** */
|
||||
/* ========================== */
|
||||
|
||||
/** HUF_compress() :
|
||||
* Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
|
||||
* 'dst' buffer must be already allocated.
|
||||
* Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
|
||||
* `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
|
||||
* @return : size of compressed data (<= `dstCapacity`).
|
||||
* Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
* if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
HUF_decompress() :
|
||||
Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated buffer 'dst', of minimum size 'dstSize'.
|
||||
`originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
Note : in contrast with FSE, HUF_decompress can regenerate
|
||||
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||
because it knows size to regenerate.
|
||||
@return : size of regenerated data (== originalSize),
|
||||
or an error code, which can be tested using HUF_isError()
|
||||
*/
|
||||
/** HUF_decompress() :
|
||||
* Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
||||
* into already allocated buffer 'dst', of minimum size 'dstSize'.
|
||||
* `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
* Note : in contrast with FSE, HUF_decompress can regenerate
|
||||
* RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||
* because it knows size to regenerate (originalSize).
|
||||
* @return : size of regenerated data (== originalSize),
|
||||
* or an error code, which can be tested using HUF_isError()
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
@ -100,39 +100,32 @@ HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error c
|
|||
/* *** Advanced function *** */
|
||||
|
||||
/** HUF_compress2() :
|
||||
* Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog`.
|
||||
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
||||
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
* Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
|
||||
* `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
|
||||
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
||||
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/** HUF_compress4X_wksp() :
|
||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
||||
* `workspace` must have minimum alignment of 4, and be at least as large as following macro */
|
||||
* `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
|
||||
#define HUF_WORKSPACE_SIZE (6 << 10)
|
||||
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
|
||||
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize);
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
/* ******************************************************************
|
||||
* WARNING !!
|
||||
* The following section contains advanced and experimental definitions
|
||||
* which shall never be used in the context of dll
|
||||
* which shall never be used in the context of a dynamic library,
|
||||
* because they are not guaranteed to remain stable in the future.
|
||||
* Only consider them in association with static linking.
|
||||
*******************************************************************/
|
||||
* *****************************************************************/
|
||||
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
|
||||
#define HUF_H_HUF_STATIC_LINKING_ONLY
|
||||
|
||||
|
@ -141,11 +134,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const
|
|||
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
|
||||
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
|
@ -192,24 +185,23 @@ size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
HUF_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[] using FSE_count()
|
||||
2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
3. build Huffman table from count using HUF_buildCTable()
|
||||
4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
* HUF detailed API
|
||||
* ****************************************/
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
/* FSE_count() : find it within "fse.h" */
|
||||
/*! HUF_compress() does the following:
|
||||
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
|
||||
* 2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
* 3. build Huffman table from count using HUF_buildCTable()
|
||||
* 4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
* 5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
*
|
||||
* The following API allows targeting specific sub-functions for advanced tasks.
|
||||
* For example, it's possible to compress several blocks using the same 'CTable',
|
||||
* or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
|
||||
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);
|
||||
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
|
@ -219,46 +211,65 @@ typedef enum {
|
|||
HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} HUF_repeat;
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
|
||||
*/
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
* Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
* `huffWeight` is destination buffer.
|
||||
* @return : size read from `src` , or an error Code .
|
||||
* Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HUF_readCTable() :
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*
|
||||
HUF_decompress() does the following:
|
||||
1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
|
||||
2. build Huffman table from save, using HUF_readDTableXn()
|
||||
3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
|
||||
*/
|
||||
* HUF_decompress() does the following:
|
||||
* 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
|
||||
* 2. build Huffman table from save, using HUF_readDTableX?()
|
||||
* 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
|
||||
*/
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-determined metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-computed metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
||||
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
|
@ -269,17 +280,23 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c
|
|||
size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
|
||||
/* ====================== */
|
||||
/* single stream variants */
|
||||
/* ====================== */
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
||||
|
@ -295,6 +312,14 @@ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cS
|
|||
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
/* BMI2 variants.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
|
||||
size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
|
||||
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||
|
||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
/* ====== Dependencies ======= */
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "pool.h"
|
||||
#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
#if defined(_MSC_VER)
|
||||
|
@ -193,32 +194,54 @@ static int isQueueFull(POOL_ctx const* ctx) {
|
|||
}
|
||||
}
|
||||
|
||||
void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
|
||||
if (!ctx) { return; }
|
||||
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
{ POOL_job const job = {function, opaque};
|
||||
static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
||||
{
|
||||
POOL_job const job = {function, opaque};
|
||||
assert(ctx != NULL);
|
||||
if (ctx->shutdown) return;
|
||||
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && !ctx->shutdown) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
/* The queue is still going => there is space */
|
||||
if (!ctx->shutdown) {
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
}
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
||||
}
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multi-threading support */
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
/* We don't need any data, but if it is empty malloc() might return NULL. */
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
if (isQueueFull(ctx)) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 0;
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
|
||||
/* ========================== */
|
||||
/* No multi-threading support */
|
||||
/* ========================== */
|
||||
|
||||
|
||||
/* We don't need any data, but if it is empty, malloc() might return NULL. */
|
||||
struct POOL_ctx_s {
|
||||
int dummy;
|
||||
};
|
||||
|
@ -240,11 +263,17 @@ void POOL_free(POOL_ctx* ctx) {
|
|||
(void)ctx;
|
||||
}
|
||||
|
||||
void POOL_add(void* ctx, POOL_function function, void* opaque) {
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
assert(ctx == &g_ctx);
|
||||
|
|
|
@ -17,7 +17,8 @@ extern "C" {
|
|||
|
||||
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd_internal.h" /* ZSTD_customMem */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
||||
#include "zstd.h"
|
||||
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
|
@ -27,35 +28,43 @@ typedef struct POOL_ctx_s POOL_ctx;
|
|||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
POOL_ctx *POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
|
||||
|
||||
/*! POOL_free() :
|
||||
Free a thread pool returned by POOL_create().
|
||||
*/
|
||||
void POOL_free(POOL_ctx *ctx);
|
||||
void POOL_free(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_sizeof() :
|
||||
return memory usage of pool returned by POOL_create().
|
||||
*/
|
||||
size_t POOL_sizeof(POOL_ctx *ctx);
|
||||
size_t POOL_sizeof(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_function :
|
||||
The function type that can be added to a thread pool.
|
||||
*/
|
||||
typedef void (*POOL_function)(void *);
|
||||
typedef void (*POOL_function)(void*);
|
||||
/*! POOL_add_function :
|
||||
The function type for a generic thread pool add function.
|
||||
*/
|
||||
typedef void (*POOL_add_function)(void *, POOL_function, void *);
|
||||
typedef void (*POOL_add_function)(void*, POOL_function, void*);
|
||||
|
||||
/*! POOL_add() :
|
||||
Add the job `function(opaque)` to the thread pool.
|
||||
Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
|
||||
Possibly blocks until there is room in the queue.
|
||||
Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
|
||||
*/
|
||||
void POOL_add(void *ctx, POOL_function function, void *opaque);
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
/*! POOL_tryAdd() :
|
||||
Add the job `function(opaque)` to the thread pool if a worker is available.
|
||||
return immediately otherwise.
|
||||
@return : 1 if successful, 0 if not.
|
||||
*/
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
|
|
|
@ -45,15 +45,15 @@ extern "C" {
|
|||
|
||||
/* mutex */
|
||||
#define ZSTD_pthread_mutex_t CRITICAL_SECTION
|
||||
#define ZSTD_pthread_mutex_init(a, b) (InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define ZSTD_pthread_cond_t CONDITION_VARIABLE
|
||||
#define ZSTD_pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) /* No delete */
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
||||
|
@ -100,17 +100,17 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
|
|||
/* No multithreading support */
|
||||
|
||||
typedef int ZSTD_pthread_mutex_t;
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)a, 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a)
|
||||
#define ZSTD_pthread_mutex_lock(a)
|
||||
#define ZSTD_pthread_mutex_unlock(a)
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_lock(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
|
||||
|
||||
typedef int ZSTD_pthread_cond_t;
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)a, 0)
|
||||
#define ZSTD_pthread_cond_destroy(a)
|
||||
#define ZSTD_pthread_cond_wait(a, b)
|
||||
#define ZSTD_pthread_cond_signal(a)
|
||||
#define ZSTD_pthread_cond_broadcast(a)
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
|
||||
#define ZSTD_pthread_cond_signal(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
|
||||
|
||||
/* do not use ZSTD_pthread_t */
|
||||
|
||||
|
|
|
@ -35,12 +35,20 @@ extern "C" {
|
|||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* error codes list
|
||||
* note : this API is still considered unstable
|
||||
* and shall not be used with a dynamic library.
|
||||
* only static linking is allowed
|
||||
******************************************/
|
||||
/*-*********************************************
|
||||
* Error codes list
|
||||
*-*********************************************
|
||||
* Error codes _values_ are pinned down since v1.3.1 only.
|
||||
* Therefore, don't rely on values if you may link to any version < v1.3.1.
|
||||
*
|
||||
* Only values < 100 are considered stable.
|
||||
*
|
||||
* note 1 : this API shall be used with static linking only.
|
||||
* dynamic linking is not yet officially supported.
|
||||
* note 2 : Prefer relying on the enum than on its value whenever possible
|
||||
* This is the only supported way to use the error list < v1.3.1
|
||||
* note 3 : ZSTD_isError() is always correct, whatever the library version.
|
||||
**********************************************/
|
||||
typedef enum {
|
||||
ZSTD_error_no_error = 0,
|
||||
ZSTD_error_GENERIC = 1,
|
||||
|
@ -61,9 +69,10 @@ typedef enum {
|
|||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
ZSTD_error_workSpace_tooSmall= 66,
|
||||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
||||
|
|
|
@ -132,14 +132,15 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
|
|||
|
||||
#define Litbits 8
|
||||
#define MaxLit ((1<<Litbits) - 1)
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define DefaultMaxOff 28
|
||||
#define MaxOff 31
|
||||
#define MaxOff 31
|
||||
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
|
||||
#define MLFSELog 9
|
||||
#define LLFSELog 9
|
||||
#define OffFSELog 8
|
||||
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
|
||||
|
||||
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
|
@ -228,8 +229,6 @@ typedef struct {
|
|||
BYTE* ofCode;
|
||||
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
|
||||
U32 longLengthPos;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
U32 repToConfirm[ZSTD_REP_NUM];
|
||||
} seqStore_t;
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
|
||||
|
|
|
@ -248,7 +248,7 @@ static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
|||
bitCount -= (count<max);
|
||||
previous0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) nbBits--, threshold>>=1;
|
||||
while (remaining<threshold) { nbBits--; threshold>>=1; }
|
||||
}
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
|
@ -292,7 +292,7 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalized
|
|||
It doesn't use any additional memory.
|
||||
But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
|
||||
For this reason, prefer using a table `count` with 256 elements.
|
||||
@return : count of most numerous element
|
||||
@return : count of most numerous element.
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
|
@ -305,7 +305,10 @@ size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
|||
memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
while (ip<end) count[*ip++]++;
|
||||
while (ip<end) {
|
||||
assert(*ip <= maxSymbolValue);
|
||||
count[*ip++]++;
|
||||
}
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
|
@ -318,7 +321,8 @@ size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
|||
|
||||
/* FSE_count_parallel_wksp() :
|
||||
* Same as FSE_count_parallel(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
|
||||
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
|
||||
* @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
|
||||
static size_t FSE_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
|
@ -333,7 +337,7 @@ static size_t FSE_count_parallel_wksp(
|
|||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
memset(Counting1, 0, 4*256*sizeof(unsigned));
|
||||
memset(workSpace, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* safety checks */
|
||||
if (!sourceSize) {
|
||||
|
@ -379,7 +383,9 @@ static size_t FSE_count_parallel_wksp(
|
|||
if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
|
||||
} }
|
||||
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) {
|
||||
{ U32 s;
|
||||
if (maxSymbolValue > 255) maxSymbolValue = 255;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (count[s] > max) max = count[s];
|
||||
} }
|
||||
|
@ -393,9 +399,11 @@ static size_t FSE_count_parallel_wksp(
|
|||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
const void* source, size_t sourceSize,
|
||||
unsigned* workSpace)
|
||||
{
|
||||
if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
if (sourceSize < 1500) /* heuristic threshold */
|
||||
return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
|
||||
}
|
||||
|
||||
|
@ -540,7 +548,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
|||
find max, then give all remaining points to max */
|
||||
U32 maxV = 0, maxC = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > maxC) maxV=s, maxC=count[s];
|
||||
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
@ -548,7 +556,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
|
|||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) ToDistribute--, norm[s]++;
|
||||
if (norm[s] > 0) { ToDistribute--; norm[s]++; }
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -604,7 +612,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
|||
U64 restToBeat = vStep * rtbTable[proba];
|
||||
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
|
||||
}
|
||||
if (proba > largestP) largestP=proba, largest=s;
|
||||
if (proba > largestP) { largestP=proba; largest=s; }
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
} }
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
|
@ -322,7 +323,10 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
|||
U32 const c = count[n];
|
||||
U32 const r = BIT_highbit32(c+1) + 1;
|
||||
U32 pos = rank[r].current++;
|
||||
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
|
||||
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
|
||||
huffNode[pos] = huffNode[pos-1];
|
||||
pos--;
|
||||
}
|
||||
huffNode[pos].count = c;
|
||||
huffNode[pos].byte = (BYTE)n;
|
||||
}
|
||||
|
@ -331,10 +335,10 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
|||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
|
||||
*/
|
||||
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
|
||||
typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
|
||||
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
nodeElt* const huffNode0 = (nodeElt*)workSpace;
|
||||
|
@ -345,9 +349,10 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu
|
|||
U32 nodeRoot;
|
||||
|
||||
/* safety checks */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */
|
||||
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
|
||||
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
memset(huffNode0, 0, sizeof(huffNodeTable));
|
||||
|
||||
/* sort, decreasing order */
|
||||
|
@ -405,6 +410,7 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu
|
|||
}
|
||||
|
||||
/** HUF_buildCTable() :
|
||||
* @return : maxNbBits
|
||||
* Note : count is used before tree is written, so they can safely overlap
|
||||
*/
|
||||
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
|
||||
|
@ -432,13 +438,14 @@ static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, uns
|
|||
return !bad;
|
||||
}
|
||||
|
||||
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||
|
||||
FORCE_INLINE_TEMPLATE void
|
||||
HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
{
|
||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
||||
}
|
||||
|
||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||
|
||||
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
|
||||
|
||||
#define HUF_FLUSHBITS_1(stream) \
|
||||
|
@ -447,7 +454,10 @@ size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
|||
#define HUF_FLUSHBITS_2(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
|
@ -491,8 +501,58 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
|||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
static TARGET_ATTRIBUTE("bmi2") size_t
|
||||
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
if (bmi2) {
|
||||
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
(void)bmi2;
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, int bmi2)
|
||||
{
|
||||
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
|
@ -505,28 +565,31 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
|||
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
||||
op += 6; /* jumpTable */
|
||||
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart+2, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart+4, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
op += cSize;
|
||||
}
|
||||
|
@ -534,15 +597,20 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
|||
return op-ostart;
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_compressCTable_internal(
|
||||
BYTE* const ostart, BYTE* op, BYTE* const oend,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned singleStream, const HUF_CElt* CTable)
|
||||
unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
size_t const cSize = singleStream ?
|
||||
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) :
|
||||
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
|
||||
HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
|
||||
HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
|
||||
if (HUF_isError(cSize)) { return cSize; }
|
||||
if (cSize==0) { return 0; } /* uncompressible */
|
||||
op += cSize;
|
||||
|
@ -551,86 +619,98 @@ static size_t HUF_compressCTable_internal(
|
|||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
U32 count[HUF_SYMBOLVALUE_MAX + 1];
|
||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
|
||||
huffNodeTable nodeTable;
|
||||
} HUF_compress_tables_t;
|
||||
|
||||
/* `workSpace` must a table of at least 1024 unsigned */
|
||||
/* HUF_compress_internal() :
|
||||
* `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
static size_t HUF_compress_internal (
|
||||
void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
unsigned singleStream,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
|
||||
const int bmi2)
|
||||
{
|
||||
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
U32* count;
|
||||
size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
HUF_CElt* CTable;
|
||||
size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
|
||||
|
||||
/* checks & inits */
|
||||
if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
|
||||
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
|
||||
if (!dstSize) return 0; /* cannot fit within dst budget */
|
||||
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
|
||||
if (!srcSize) return 0; /* Uncompressed */
|
||||
if (!dstSize) return 0; /* cannot fit anything within dst budget */
|
||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
|
||||
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
|
||||
|
||||
count = (U32*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + countSize;
|
||||
wkspSize -= countSize;
|
||||
CTable = (HUF_CElt*)workSpace;
|
||||
workSpace = (BYTE*)workSpace + CTableSize;
|
||||
wkspSize -= CTableSize;
|
||||
|
||||
/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
|
||||
/* Heuristic : If old table is valid, use it for small inputs */
|
||||
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
}
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
|
||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* heuristic : probably not compressible enough */
|
||||
}
|
||||
|
||||
/* Check validity of previous table */
|
||||
if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
|
||||
if ( repeat
|
||||
&& *repeat == HUF_repeat_check
|
||||
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
|
||||
*repeat = HUF_repeat_none;
|
||||
}
|
||||
/* Heuristic : use existing table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
}
|
||||
|
||||
/* Build Huffman Tree */
|
||||
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
|
||||
maxSymbolValue, huffLog,
|
||||
table->nodeTable, sizeof(table->nodeTable)) );
|
||||
huffLog = (U32)maxBits;
|
||||
/* Zero the unused symbols so we can check it for validity */
|
||||
memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
|
||||
/* Zero unused symbols in CTable, so we can check it for validity */
|
||||
memset(table->CTable + (maxSymbolValue + 1), 0,
|
||||
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
|
||||
}
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
|
||||
/* Check if using the previous table will be beneficial */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
|
||||
/* Check if using previous huffman table is beneficial */
|
||||
if (repeat && *repeat != HUF_repeat_none) {
|
||||
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
|
||||
size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
|
||||
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
|
||||
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
|
||||
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
|
||||
}
|
||||
}
|
||||
/* Use the new table */
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
} }
|
||||
|
||||
/* Use the new huffman table */
|
||||
if (hSize + 12ul >= srcSize) { return 0; }
|
||||
op += hSize;
|
||||
if (repeat) { *repeat = HUF_repeat_none; }
|
||||
if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */
|
||||
if (oldHufTable)
|
||||
memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
|
||||
}
|
||||
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, table->CTable, bmi2);
|
||||
}
|
||||
|
||||
|
||||
|
@ -639,52 +719,70 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
|||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 1 /*single stream*/,
|
||||
workSpace, wkspSize,
|
||||
NULL, NULL, 0, 0 /*bmi2*/);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 1 /*single stream*/,
|
||||
workSpace, wkspSize, hufTable,
|
||||
repeat, preferRepeat, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[1024];
|
||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
||||
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
/* HUF_compress4X_repeat():
|
||||
* compress input using 4 streams.
|
||||
* provide workspace to generate compression tables */
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 0 /*4 streams*/,
|
||||
workSpace, wkspSize,
|
||||
NULL, NULL, 0, 0 /*bmi2*/);
|
||||
}
|
||||
|
||||
/* HUF_compress4X_repeat():
|
||||
* compress input using 4 streams.
|
||||
* re-use an existing huffman compression table */
|
||||
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 0 /* 4 streams */,
|
||||
workSpace, wkspSize,
|
||||
hufTable, repeat, preferRepeat, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[1024];
|
||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
||||
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
||||
{
|
||||
return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT);
|
||||
return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -30,8 +30,14 @@ extern "C" {
|
|||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
static const U32 g_searchStrength = 8;
|
||||
#define HASH_READ_SIZE 8
|
||||
#define kSearchStrength 8
|
||||
#define HASH_READ_SIZE 8
|
||||
#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
|
||||
It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
|
||||
It's not a big deal though : candidate will just be sorted again.
|
||||
Additionnally, candidate position 1 will be lost.
|
||||
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
|
||||
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
|
@ -43,7 +49,7 @@ typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
|
|||
typedef struct ZSTD_prefixDict_s {
|
||||
const void* dict;
|
||||
size_t dictSize;
|
||||
ZSTD_dictMode_e dictMode;
|
||||
ZSTD_dictContentType_e dictContentType;
|
||||
} ZSTD_prefixDict;
|
||||
|
||||
typedef struct {
|
||||
|
@ -51,7 +57,6 @@ typedef struct {
|
|||
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
U32 workspace[HUF_WORKSPACE_SIZE_U32];
|
||||
HUF_repeat hufCTable_repeatMode;
|
||||
FSE_repeat offcode_repeatMode;
|
||||
FSE_repeat matchlength_repeatMode;
|
||||
|
@ -93,12 +98,44 @@ typedef struct {
|
|||
U32 staticPrices; /* prices follow a pre-defined cost structure, statistics are irrelevant */
|
||||
} optState_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_entropyCTables_t entropy;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_compressedBlockState_t;
|
||||
|
||||
typedef struct {
|
||||
BYTE const* nextSrc; /* next block here to continue on current prefix */
|
||||
BYTE const* base; /* All regular indexes relative to this position */
|
||||
BYTE const* dictBase; /* extDict indexes relative to this position */
|
||||
U32 dictLimit; /* below that point, need extDict */
|
||||
U32 lowLimit; /* below that point, no more data */
|
||||
} ZSTD_window_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_window_t window; /* State for window round buffer management */
|
||||
U32 loadedDictEnd; /* index of end of dictionary */
|
||||
U32 nextToUpdate; /* index from which to continue table update */
|
||||
U32 nextToUpdate3; /* index from which to continue table update */
|
||||
U32 hashLog3; /* dispatch table : larger == faster, more memory */
|
||||
U32* hashTable;
|
||||
U32* hashTable3;
|
||||
U32* chainTable;
|
||||
optState_t opt; /* optimal parser state */
|
||||
} ZSTD_matchState_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_compressedBlockState_t* prevCBlock;
|
||||
ZSTD_compressedBlockState_t* nextCBlock;
|
||||
ZSTD_matchState_t matchState;
|
||||
} ZSTD_blockState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 offset;
|
||||
U32 checksum;
|
||||
} ldmEntry_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_window_t window; /* State for the window round buffer management */
|
||||
ldmEntry_t* hashTable;
|
||||
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
|
||||
U64 hashPower; /* Used to compute the rolling hash.
|
||||
|
@ -111,60 +148,68 @@ typedef struct {
|
|||
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
||||
U32 minMatchLength; /* Minimum match length */
|
||||
U32 hashEveryLog; /* Log number of entries to skip */
|
||||
U32 windowLog; /* Window log for the LDM */
|
||||
} ldmParams_t;
|
||||
|
||||
typedef struct {
|
||||
U32 offset;
|
||||
U32 litLength;
|
||||
U32 matchLength;
|
||||
} rawSeq;
|
||||
|
||||
typedef struct {
|
||||
rawSeq* seq; /* The start of the sequences */
|
||||
size_t pos; /* The position where reading stopped. <= size. */
|
||||
size_t size; /* The number of sequences. <= capacity. */
|
||||
size_t capacity; /* The capacity of the `seq` pointer */
|
||||
} rawSeqStore_t;
|
||||
|
||||
struct ZSTD_CCtx_params_s {
|
||||
ZSTD_format_e format;
|
||||
ZSTD_compressionParameters cParams;
|
||||
ZSTD_frameParameters fParams;
|
||||
|
||||
int compressionLevel;
|
||||
U32 forceWindow; /* force back-references to respect limit of
|
||||
int disableLiteralCompression;
|
||||
int forceWindow; /* force back-references to respect limit of
|
||||
* 1<<wLog, even for dictionary */
|
||||
|
||||
/* Multithreading: used to pass parameters to mtctx */
|
||||
U32 nbThreads;
|
||||
unsigned nbWorkers;
|
||||
unsigned jobSize;
|
||||
unsigned overlapSizeLog;
|
||||
|
||||
/* Long distance matching parameters */
|
||||
ldmParams_t ldmParams;
|
||||
|
||||
/* For use with createCCtxParams() and freeCCtxParams() only */
|
||||
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
|
||||
ZSTD_customMem customMem;
|
||||
|
||||
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
|
||||
|
||||
struct ZSTD_CCtx_s {
|
||||
const BYTE* nextSrc; /* next block here to continue on current prefix */
|
||||
const BYTE* base; /* All regular indexes relative to this position */
|
||||
const BYTE* dictBase; /* extDict indexes relative to this position */
|
||||
U32 dictLimit; /* below that point, need extDict */
|
||||
U32 lowLimit; /* below that point, no more data */
|
||||
U32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
U32 nextToUpdate3; /* index from which to continue dictionary update */
|
||||
U32 hashLog3; /* dispatch table : larger == faster, more memory */
|
||||
U32 loadedDictEnd; /* index of end of dictionary */
|
||||
ZSTD_compressionStage_e stage;
|
||||
U32 dictID;
|
||||
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
||||
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
||||
ZSTD_CCtx_params requestedParams;
|
||||
ZSTD_CCtx_params appliedParams;
|
||||
U32 dictID;
|
||||
void* workSpace;
|
||||
size_t workSpaceSize;
|
||||
size_t blockSize;
|
||||
U64 pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
|
||||
U64 consumedSrcSize;
|
||||
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
|
||||
unsigned long long consumedSrcSize;
|
||||
unsigned long long producedCSize;
|
||||
XXH64_state_t xxhState;
|
||||
ZSTD_customMem customMem;
|
||||
size_t staticSize;
|
||||
|
||||
seqStore_t seqStore; /* sequences storage ptrs */
|
||||
optState_t optState;
|
||||
ldmState_t ldmState; /* long distance matching state */
|
||||
U32* hashTable;
|
||||
U32* hashTable3;
|
||||
U32* chainTable;
|
||||
ZSTD_entropyCTables_t* entropy;
|
||||
seqStore_t seqStore; /* sequences storage ptrs */
|
||||
ldmState_t ldmState; /* long distance matching state */
|
||||
rawSeq* ldmSequences; /* Storage for the ldm output sequences */
|
||||
size_t maxNbLdmSequences;
|
||||
rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
|
||||
ZSTD_blockState_t blockState;
|
||||
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
|
||||
|
||||
/* streaming */
|
||||
char* inBuff;
|
||||
|
@ -191,6 +236,12 @@ struct ZSTD_CCtx_s {
|
|||
};
|
||||
|
||||
|
||||
typedef size_t (*ZSTD_blockCompressor) (
|
||||
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
|
||||
|
||||
|
||||
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
|
||||
{
|
||||
static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
|
@ -359,10 +410,12 @@ MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* co
|
|||
}
|
||||
|
||||
/** ZSTD_count_2segments() :
|
||||
* can count match length with `ip` & `match` in 2 different segments.
|
||||
* convention : on reaching mEnd, match count continue starting from iStart
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
|
||||
* can count match length with `ip` & `match` in 2 different segments.
|
||||
* convention : on reaching mEnd, match count continue starting from iStart
|
||||
*/
|
||||
MEM_STATIC size_t
|
||||
ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
|
||||
const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
|
||||
{
|
||||
const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
|
||||
size_t const matchLength = ZSTD_count(ip, match, vEnd);
|
||||
|
@ -372,8 +425,8 @@ MEM_STATIC size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const
|
|||
|
||||
|
||||
/*-*************************************
|
||||
* Hashes
|
||||
***************************************/
|
||||
* Hashes
|
||||
***************************************/
|
||||
static const U32 prime3bytes = 506832829U;
|
||||
static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
|
||||
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
|
||||
|
@ -411,6 +464,171 @@ MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
|||
}
|
||||
}
|
||||
|
||||
/*-*************************************
|
||||
* Round buffer management
|
||||
***************************************/
|
||||
/* Max current allowed */
|
||||
#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
|
||||
/* Maximum chunk size before overflow correction needs to be called again */
|
||||
#define ZSTD_CHUNKSIZE_MAX \
|
||||
( ((U32)-1) /* Maximum ending current index */ \
|
||||
- ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
|
||||
|
||||
/**
|
||||
* ZSTD_window_clear():
|
||||
* Clears the window containing the history by simply setting it to empty.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
|
||||
{
|
||||
size_t const endT = (size_t)(window->nextSrc - window->base);
|
||||
U32 const end = (U32)endT;
|
||||
|
||||
window->lowLimit = end;
|
||||
window->dictLimit = end;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_hasExtDict():
|
||||
* Returns non-zero if the window has a non-empty extDict.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
|
||||
{
|
||||
return window.lowLimit < window.dictLimit;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_needOverflowCorrection():
|
||||
* Returns non-zero if the indices are getting too large and need overflow
|
||||
* protection.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
|
||||
void const* srcEnd)
|
||||
{
|
||||
U32 const current = (U32)((BYTE const*)srcEnd - window.base);
|
||||
return current > ZSTD_CURRENT_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_correctOverflow():
|
||||
* Reduces the indices to protect from index overflow.
|
||||
* Returns the correction made to the indices, which must be applied to every
|
||||
* stored index.
|
||||
*
|
||||
* The least significant cycleLog bits of the indices must remain the same,
|
||||
* which may be 0. Every index up to maxDist in the past must be valid.
|
||||
* NOTE: (maxDist & cycleMask) must be zero.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
||||
U32 maxDist, void const* src)
|
||||
{
|
||||
/* preemptive overflow correction:
|
||||
* 1. correction is large enough:
|
||||
* lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
|
||||
* 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
|
||||
*
|
||||
* current - newCurrent
|
||||
* > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
|
||||
* > (3<<29) - (1<<chainLog)
|
||||
* > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
|
||||
* > 1<<29
|
||||
*
|
||||
* 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
|
||||
* After correction, current is less than (1<<chainLog + 1<<windowLog).
|
||||
* In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
|
||||
* In 32-bit mode we are safe, because (chainLog <= 29), so
|
||||
* ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
|
||||
* 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
|
||||
* windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
|
||||
*/
|
||||
U32 const cycleMask = (1U << cycleLog) - 1;
|
||||
U32 const current = (U32)((BYTE const*)src - window->base);
|
||||
U32 const newCurrent = (current & cycleMask) + maxDist;
|
||||
U32 const correction = current - newCurrent;
|
||||
assert((maxDist & cycleMask) == 0);
|
||||
assert(current > newCurrent);
|
||||
/* Loose bound, should be around 1<<29 (see above) */
|
||||
assert(correction > 1<<28);
|
||||
|
||||
window->base += correction;
|
||||
window->dictBase += correction;
|
||||
window->lowLimit -= correction;
|
||||
window->dictLimit -= correction;
|
||||
|
||||
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
|
||||
window->lowLimit);
|
||||
return correction;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_enforceMaxDist():
|
||||
* Updates lowLimit so that:
|
||||
* (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
|
||||
* This allows a simple check that index >= lowLimit to see if index is valid.
|
||||
* This must be called before a block compression call, with srcEnd as the block
|
||||
* source end.
|
||||
* If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
|
||||
* This is because dictionaries are allowed to be referenced as long as the last
|
||||
* byte of the dictionary is in the window, but once they are out of range,
|
||||
* they cannot be referenced. If loadedDictEndPtr is NULL, we use
|
||||
* loadedDictEnd == 0.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
|
||||
void const* srcEnd, U32 maxDist,
|
||||
U32* loadedDictEndPtr)
|
||||
{
|
||||
U32 const current = (U32)((BYTE const*)srcEnd - window->base);
|
||||
U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
|
||||
if (current > maxDist + loadedDictEnd) {
|
||||
U32 const newLowLimit = current - maxDist;
|
||||
if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
|
||||
if (window->dictLimit < window->lowLimit) {
|
||||
DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
|
||||
window->lowLimit);
|
||||
window->dictLimit = window->lowLimit;
|
||||
}
|
||||
if (loadedDictEndPtr)
|
||||
*loadedDictEndPtr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_update():
|
||||
* Updates the window by appending [src, src + srcSize) to the window.
|
||||
* If it is not contiguous, the current prefix becomes the extDict, and we
|
||||
* forget about the extDict. Handles overlap of the prefix and extDict.
|
||||
* Returns non-zero if the segment is contiguous.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
BYTE const* const ip = (BYTE const*)src;
|
||||
U32 contiguous = 1;
|
||||
/* Check if blocks follow each other */
|
||||
if (src != window->nextSrc) {
|
||||
/* not contiguous */
|
||||
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
|
||||
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
|
||||
window->dictLimit);
|
||||
window->lowLimit = window->dictLimit;
|
||||
assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
|
||||
window->dictLimit = (U32)distanceFromBase;
|
||||
window->dictBase = window->base;
|
||||
window->base = ip - distanceFromBase;
|
||||
// ms->nextToUpdate = window->dictLimit;
|
||||
if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
|
||||
contiguous = 0;
|
||||
}
|
||||
window->nextSrc = ip + srcSize;
|
||||
/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
|
||||
if ( (ip+srcSize > window->dictBase + window->lowLimit)
|
||||
& (ip < window->dictBase + window->dictLimit)) {
|
||||
ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
|
||||
U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
|
||||
window->lowLimit = lowLimitMax;
|
||||
}
|
||||
return contiguous;
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
@ -421,6 +639,13 @@ MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
|||
* These prototypes shall only be called from within lib/compress
|
||||
* ============================================================== */
|
||||
|
||||
/* ZSTD_getCParamsFromCCtxParams() :
|
||||
* cParams are built depending on compressionLevel, src size hints,
|
||||
* LDM and manually set compression parameters.
|
||||
*/
|
||||
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
|
||||
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
|
||||
|
||||
/*! ZSTD_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
|
@ -446,7 +671,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
|
|||
* Private use only. To be called from zstdmt_compress.c. */
|
||||
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictMode_e dictMode,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params,
|
||||
unsigned long long pledgedSrcSize);
|
||||
|
@ -459,4 +684,26 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
|
|||
const void* dict,size_t dictSize,
|
||||
ZSTD_CCtx_params params);
|
||||
|
||||
|
||||
/* ZSTD_writeLastEmptyBlock() :
|
||||
* output an empty Block with end-of-frame mark to complete a frame
|
||||
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
|
||||
* or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
|
||||
*/
|
||||
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
|
||||
|
||||
|
||||
/* ZSTD_referenceExternalSequences() :
|
||||
* Must be called before starting a compression operation.
|
||||
* seqs must parse a prefix of the source.
|
||||
* This cannot be used when long range matching is enabled.
|
||||
* Zstd will use these sequences, and pass the literals to a secondary block
|
||||
* compressor.
|
||||
* @return : An error code on failure.
|
||||
* NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
|
||||
* access and data corruption.
|
||||
*/
|
||||
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
|
||||
|
||||
|
||||
#endif /* ZSTD_COMPRESS_H */
|
||||
|
|
|
@ -12,44 +12,58 @@
|
|||
#include "zstd_double_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls)
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
U32* const hashLarge = cctx->hashTable;
|
||||
U32 const hBitsL = cctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = cctx->chainTable;
|
||||
U32 const hBitsS = cctx->appliedParams.cParams.chainLog;
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* ip = base + cctx->nextToUpdate;
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const size_t fastHashFillStep = 3;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
while(ip <= iend) {
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
|
||||
hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
|
||||
ip += fastHashFillStep;
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0)
|
||||
hashSmall[smHash] = current + i;
|
||||
if (i == 0 || hashLarge[lgHash] == 0)
|
||||
hashLarge[lgHash] = current + i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
size_t ZSTD_compressBlock_doubleFast_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
U32* const hashLong = cctx->hashTable;
|
||||
const U32 hBitsL = cctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = cctx->chainTable;
|
||||
const U32 hBitsS = cctx->appliedParams.cParams.chainLog;
|
||||
seqStore_t* seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const U32 lowestIndex = ms->window.dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
|
@ -76,7 +90,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
|
|||
/* favor repcode */
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
U32 offset;
|
||||
if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
|
||||
|
@ -99,14 +113,14 @@ size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
|
|||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
} else {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
|
@ -129,61 +143,63 @@ size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
|
|||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
const U32 mls = cParams->searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4);
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5);
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6);
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7);
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
U32* const hashLong = ctx->hashTable;
|
||||
U32 const hBitsL = ctx->appliedParams.cParams.hashLog;
|
||||
U32* const hashSmall = ctx->chainTable;
|
||||
U32 const hBitsS = ctx->appliedParams.cParams.chainLog;
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
|
@ -209,7 +225,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
|
||||
|
@ -220,7 +236,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
|
@ -245,10 +261,10 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
} }
|
||||
|
||||
|
@ -272,7 +288,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
|
@ -283,27 +299,29 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
U32 const mls = cParams->searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4);
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5);
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6);
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7);
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,11 +16,18 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "mem.h" /* U32 */
|
||||
#include "zstd.h" /* ZSTD_CCtx, size_t */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end);
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls);
|
||||
size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
|
@ -12,39 +12,48 @@
|
|||
#include "zstd_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hBits = zc->appliedParams.cParams.hashLog;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* ip = base + zc->nextToUpdate;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const size_t fastHashFillStep = 3;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
while(ip <= iend) {
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
|
||||
ip += fastHashFillStep;
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
|
||||
if (i == 0 || hashTable[hash] == 0)
|
||||
hashTable[hash] = current + i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
size_t ZSTD_compressBlock_fast_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const hlog, U32 const stepSize, U32 const mls)
|
||||
{
|
||||
U32* const hashTable = cctx->hashTable;
|
||||
U32 const hBits = cctx->appliedParams.cParams.hashLog;
|
||||
seqStore_t* seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const U32 lowestIndex = ms->window.dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
|
@ -57,7 +66,7 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
size_t const h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndex = hashTable[h];
|
||||
const BYTE* match = base + matchIndex;
|
||||
|
@ -66,21 +75,21 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
U32 offset;
|
||||
if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
if ( (matchIndex <= lowestIndex)
|
||||
|| (MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
assert(stepSize >= 1);
|
||||
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
|
||||
continue;
|
||||
}
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
{ U32 const offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
|
@ -88,8 +97,8 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
|
@ -97,65 +106,67 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
|
|||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32 const stepSize = cParams->targetLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4);
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5);
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6);
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7);
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 mls)
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const hlog, U32 const stepSize, U32 const mls)
|
||||
{
|
||||
U32* hashTable = ctx->hashTable;
|
||||
const U32 hBits = ctx->appliedParams.cParams.hashLog;
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
U32* hashTable = ms->hashTable;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t h = ZSTD_hashPtr(ip, hBits, mls);
|
||||
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
|
||||
const U32 matchIndex = hashTable[h];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
|
@ -171,11 +182,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex < lowestIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1;
|
||||
assert(stepSize >= 1);
|
||||
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
|
||||
continue;
|
||||
}
|
||||
{ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
|
@ -186,7 +198,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
offset = current - matchIndex;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
|
@ -195,8 +207,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
|
||||
hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
|
||||
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
|
@ -207,8 +219,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
|
@ -217,27 +229,31 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
|
|||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32 const stepSize = cParams->targetLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4);
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5);
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6);
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7);
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,13 +16,17 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "mem.h" /* U32 */
|
||||
#include "zstd.h" /* ZSTD_CCtx, size_t */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_CCtx* zc, const void* end, const U32 mls);
|
||||
size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end);
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
|
@ -15,76 +15,90 @@
|
|||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
||||
* ip : assumed <= iend-8 .
|
||||
* @return : nb of positions added */
|
||||
static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
U32 nbCompares, U32 const mls, U32 const extDict)
|
||||
|
||||
void ZSTD_updateDUBT(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend,
|
||||
U32 mls)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hashLog = zc->appliedParams.cParams.hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = zc->chainTable;
|
||||
U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = ms->nextToUpdate;
|
||||
|
||||
if (idx != target)
|
||||
DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
|
||||
idx, target, ms->window.dictLimit);
|
||||
assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
|
||||
(void)iend;
|
||||
|
||||
assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
|
||||
for ( ; idx < target ; idx++) {
|
||||
size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
|
||||
U32 const matchIndex = hashTable[h];
|
||||
|
||||
U32* const nextCandidatePtr = bt + 2*(idx&btMask);
|
||||
U32* const sortMarkPtr = nextCandidatePtr + 1;
|
||||
|
||||
DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
|
||||
hashTable[h] = idx; /* Update Hash Table */
|
||||
*nextCandidatePtr = matchIndex; /* update BT like a chain */
|
||||
*sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
|
||||
}
|
||||
ms->nextToUpdate = target;
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_insertDUBT1() :
|
||||
* sort one already inserted but unsorted position
|
||||
* assumption : current >= btlow == (current - btmask)
|
||||
* doesn't fail */
|
||||
static void ZSTD_insertDUBT1(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
U32 current, const BYTE* inputEnd,
|
||||
U32 nbCompares, U32 btLow, int extDict)
|
||||
{
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
|
||||
const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* match;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = smallerPtr + 1;
|
||||
U32 matchIndex = *smallerPtr;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 const windowLow = zc->lowLimit;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
size_t bestLength = 8;
|
||||
#ifdef ZSTD_C_PREDICT
|
||||
U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
|
||||
U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
|
||||
predictedSmall += (predictedSmall>0);
|
||||
predictedLarge += (predictedLarge>0);
|
||||
#endif /* ZSTD_C_PREDICT */
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
|
||||
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
|
||||
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
|
||||
current, dictLimit, windowLow);
|
||||
assert(current >= btLow);
|
||||
assert(ip < iend); /* condition for ZSTD_count */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
assert(matchIndex < current);
|
||||
|
||||
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
|
||||
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
|
||||
if (matchIndex == predictedSmall) {
|
||||
/* no need to check length, result known */
|
||||
*smallerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
|
||||
continue;
|
||||
}
|
||||
if (matchIndex == predictedLarge) {
|
||||
*largerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */
|
||||
match = base + matchIndex;
|
||||
if ( (!extDict)
|
||||
|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/
|
||||
|| (current < dictLimit) /* both in extDict */) {
|
||||
const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
|
||||
assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
|
||||
|| (current < dictLimit) );
|
||||
match = mBase + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
|
@ -93,11 +107,8 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
|
|||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
bestLength = matchLength;
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
}
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
|
||||
current, matchIndex, (U32)matchLength);
|
||||
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
|
||||
|
@ -108,6 +119,8 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
|
|||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
|
||||
matchIndex, btLow, nextPtr[1]);
|
||||
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
|
||||
} else {
|
||||
|
@ -115,184 +128,205 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
|
|||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
|
||||
matchIndex, btLow, nextPtr[0]);
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
|
||||
assert(matchEndIdx > current + 8);
|
||||
return matchEndIdx - (current + 8);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
void ZSTD_updateTree_internal(ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
const U32 nbCompares, const U32 mls, const U32 extDict)
|
||||
{
|
||||
const BYTE* const base = zc->base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)",
|
||||
idx, target, extDict);
|
||||
|
||||
while(idx < target)
|
||||
idx += ZSTD_insertBt1(zc, base+idx, iend, nbCompares, mls, extDict);
|
||||
zc->nextToUpdate = target;
|
||||
}
|
||||
|
||||
void ZSTD_updateTree(ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
const U32 nbCompares, const U32 mls)
|
||||
{
|
||||
ZSTD_updateTree_internal(zc, ip, iend, nbCompares, mls, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
void ZSTD_updateTree_extDict(ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
const U32 nbCompares, const U32 mls)
|
||||
{
|
||||
ZSTD_updateTree_internal(zc, ip, iend, nbCompares, mls, 1 /*extDict*/);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_insertBtAndFindBestMatch (
|
||||
ZSTD_CCtx* zc,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
size_t* offsetPtr,
|
||||
U32 nbCompares, const U32 mls,
|
||||
U32 extDict)
|
||||
static size_t ZSTD_DUBT_findBestMatch (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
size_t* offsetPtr,
|
||||
U32 const mls,
|
||||
U32 const extDict)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32 const hashLog = zc->appliedParams.cParams.hashLog;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = zc->chainTable;
|
||||
U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
const U32 windowLow = zc->lowLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
size_t bestLength = 0;
|
||||
U32 const btLow = (btMask >= current) ? 0 : current - btMask;
|
||||
U32 const unsortLimit = MAX(btLow, windowLow);
|
||||
|
||||
U32* nextCandidate = bt + 2*(matchIndex&btMask);
|
||||
U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
U32 nbCandidates = nbCompares;
|
||||
U32 previousCandidate = 0;
|
||||
|
||||
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
/* reach end of unsorted candidates list */
|
||||
while ( (matchIndex > unsortLimit)
|
||||
&& (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
|
||||
&& (nbCandidates > 1) ) {
|
||||
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
|
||||
matchIndex);
|
||||
*unsortedMark = previousCandidate;
|
||||
previousCandidate = matchIndex;
|
||||
matchIndex = *nextCandidate;
|
||||
nextCandidate = bt + 2*(matchIndex&btMask);
|
||||
unsortedMark = bt + 2*(matchIndex&btMask) + 1;
|
||||
nbCandidates --;
|
||||
}
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
if ( (matchIndex > unsortLimit)
|
||||
&& (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
|
||||
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
|
||||
matchIndex);
|
||||
*nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
|
||||
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||
/* batch sort stacked candidates */
|
||||
matchIndex = previousCandidate;
|
||||
while (matchIndex) { /* will end on matchIndex == 0 */
|
||||
U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
|
||||
U32 const nextCandidateIdx = *nextCandidateIdxPtr;
|
||||
ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
|
||||
nbCandidates, unsortLimit, extDict);
|
||||
matchIndex = nextCandidateIdx;
|
||||
nbCandidates++;
|
||||
}
|
||||
|
||||
/* find longest match */
|
||||
{ size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
size_t bestLength = 0;
|
||||
|
||||
matchIndex = hashTable[h];
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
|
||||
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||
}
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
|
||||
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
if (bestLength >= MINMATCH) {
|
||||
U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
|
||||
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
|
||||
current, (U32)bestLength, (U32)*offsetPtr, mIndex);
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
assert(matchEndIdx > current+8);
|
||||
zc->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
return bestLength;
|
||||
return bestLength;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch (
|
||||
ZSTD_CCtx* zc,
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls)
|
||||
const U32 mls /* template */)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
|
||||
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
|
||||
return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
|
||||
case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
|
||||
case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch_extDict (
|
||||
ZSTD_CCtx* zc,
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls)
|
||||
const U32 mls)
|
||||
{
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
|
||||
return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
|
||||
DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
|
||||
return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
|
||||
case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
|
||||
case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,15 +339,17 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
|
|||
|
||||
/* Update chains up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
|
||||
static U32 ZSTD_insertAndFindFirstIndex_internal(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, U32 const mls)
|
||||
{
|
||||
U32* const hashTable = zc->hashTable;
|
||||
const U32 hashLog = zc->appliedParams.cParams.hashLog;
|
||||
U32* const chainTable = zc->chainTable;
|
||||
const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1;
|
||||
const BYTE* const base = zc->base;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
const U32 hashLog = cParams->hashLog;
|
||||
U32* const chainTable = ms->chainTable;
|
||||
const U32 chainMask = (1 << cParams->chainLog) - 1;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = zc->nextToUpdate;
|
||||
U32 idx = ms->nextToUpdate;
|
||||
|
||||
while(idx < target) { /* catch up */
|
||||
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
|
||||
|
@ -322,35 +358,42 @@ U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
|
|||
idx++;
|
||||
}
|
||||
|
||||
zc->nextToUpdate = target;
|
||||
ms->nextToUpdate = target;
|
||||
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
|
||||
}
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip)
|
||||
{
|
||||
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
|
||||
}
|
||||
|
||||
|
||||
/* inlining is important to hardwire a hot branch (template emulation) */
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_HcFindBestMatch_generic (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 mls, const U32 extDict)
|
||||
const U32 mls, const U32 extDict)
|
||||
{
|
||||
U32* const chainTable = zc->chainTable;
|
||||
const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog);
|
||||
U32* const chainTable = ms->chainTable;
|
||||
const U32 chainSize = (1 << cParams->chainLog);
|
||||
const U32 chainMask = chainSize-1;
|
||||
const BYTE* const base = zc->base;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
const U32 dictLimit = zc->dictLimit;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const U32 lowLimit = zc->lowLimit;
|
||||
const U32 lowLimit = ms->window.lowLimit;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 minChain = current > chainSize ? current - chainSize : 0;
|
||||
int nbAttempts=maxNbAttempts;
|
||||
U32 nbAttempts = 1U << cParams->searchLog;
|
||||
size_t ml=4-1;
|
||||
|
||||
/* HC4 match finder */
|
||||
U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
|
||||
U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
|
||||
|
||||
for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
|
||||
size_t currentMl=0;
|
||||
|
@ -381,35 +424,33 @@ size_t ZSTD_HcFindBestMatch_generic (
|
|||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
|
||||
ZSTD_CCtx* zc,
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 maxNbAttempts, const U32 matchLengthSearch)
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
|
||||
ZSTD_CCtx* const zc,
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* const offsetPtr,
|
||||
U32 const maxNbAttempts, U32 const matchLengthSearch)
|
||||
size_t* const offsetPtr)
|
||||
{
|
||||
switch(matchLengthSearch)
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,30 +459,29 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
|
|||
* Common parser - lazy strategy
|
||||
*********************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
size_t ZSTD_compressBlock_lazy_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base + ctx->dictLimit;
|
||||
const BYTE* const base = ms->window.base + ms->window.dictLimit;
|
||||
|
||||
U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
|
||||
typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
|
||||
size_t* offsetPtr,
|
||||
U32 maxNbAttempts, U32 matchLengthSearch);
|
||||
typedef size_t (*searchMax_f)(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
|
||||
searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
|
||||
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
|
||||
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==base);
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
{ U32 const maxRep = (U32)(ip-base);
|
||||
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
|
||||
|
@ -462,13 +502,13 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
|||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -484,7 +524,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
|||
matchLength = mlRep, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
|
@ -503,7 +543,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
|||
matchLength = ml2, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
|
@ -528,7 +568,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
|
|||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
|
@ -538,73 +578,80 @@ _storeSequence:
|
|||
/* store sequence */
|
||||
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
|
||||
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
|
||||
rep[0] = offset_1 ? offset_1 : savedOffset;
|
||||
rep[1] = offset_2 ? offset_2 : savedOffset;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
size_t ZSTD_compressBlock_lazy_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const dictStart = dictBase + ctx->lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
|
||||
const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
|
||||
typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
|
||||
size_t* offsetPtr,
|
||||
U32 maxNbAttempts, U32 matchLengthSearch);
|
||||
typedef size_t (*searchMax_f)(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
|
||||
searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
|
||||
|
||||
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
|
||||
U32 offset_1 = rep[0], offset_2 = rep[1];
|
||||
|
||||
/* init */
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
ip += (ip == prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
|
@ -628,13 +675,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
|||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -661,7 +708,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
|||
|
||||
/* search match, depth 1 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
|
@ -691,7 +738,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
|||
|
||||
/* search match, depth 2 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
|
@ -713,7 +760,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
|
|||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
|
@ -728,7 +775,7 @@ _storeSequence:
|
|||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
|
@ -737,29 +784,41 @@ _storeSequence:
|
|||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
|
||||
}
|
||||
|
|
|
@ -15,22 +15,39 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "mem.h" /* U32 */
|
||||
#include "zstd.h" /* ZSTD_CCtx, size_t */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls);
|
||||
void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
|
||||
void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls);
|
||||
U32 ZSTD_insertAndFindFirstIndex(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip);
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
|
@ -17,36 +17,45 @@
|
|||
#define LDM_HASH_RLOG 7
|
||||
#define LDM_HASH_CHAR_OFFSET 10
|
||||
|
||||
size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm)
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams)
|
||||
{
|
||||
U32 const windowLog = cParams->windowLog;
|
||||
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
|
||||
params->enableLdm = enableLdm>0;
|
||||
params->hashLog = 0;
|
||||
params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
|
||||
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
if (cParams->strategy >= ZSTD_btopt) {
|
||||
/* Get out of the way of the optimal parser */
|
||||
U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
|
||||
assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
|
||||
assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
|
||||
params->minMatchLength = minMatch;
|
||||
}
|
||||
if (params->hashLog == 0) {
|
||||
params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
|
||||
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
||||
}
|
||||
if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) {
|
||||
if (params->hashEveryLog == 0) {
|
||||
params->hashEveryLog =
|
||||
windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
|
||||
}
|
||||
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) {
|
||||
size_t const ldmHSize = ((size_t)1) << hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog);
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
|
||||
{
|
||||
size_t const ldmHSize = ((size_t)1) << params.hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
|
||||
size_t const ldmBucketSize =
|
||||
((size_t)1) << (hashLog - ldmBucketSizeLog);
|
||||
return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t)));
|
||||
((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
||||
size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
|
||||
return params.enableLdm ? totalSize : 0;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
||||
{
|
||||
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getSmallHash() :
|
||||
|
@ -167,6 +176,7 @@ static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
|
|||
}
|
||||
|
||||
U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
|
||||
DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength);
|
||||
assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
|
||||
return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
|
||||
}
|
||||
|
@ -205,21 +215,22 @@ static size_t ZSTD_ldm_countBackwardsMatch(
|
|||
*
|
||||
* The tables for the other strategies are filled within their
|
||||
* block compressors. */
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end)
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
const BYTE* const iend = (const BYTE*)end;
|
||||
const U32 mls = zc->appliedParams.cParams.searchLength;
|
||||
|
||||
switch(zc->appliedParams.cParams.strategy)
|
||||
switch(cParams->strategy)
|
||||
{
|
||||
case ZSTD_fast:
|
||||
ZSTD_fillHashTable(zc, iend, mls);
|
||||
zc->nextToUpdate = (U32)(iend - zc->base);
|
||||
ZSTD_fillHashTable(ms, cParams, iend);
|
||||
ms->nextToUpdate = (U32)(iend - ms->window.base);
|
||||
break;
|
||||
|
||||
case ZSTD_dfast:
|
||||
ZSTD_fillDoubleHashTable(zc, iend, mls);
|
||||
zc->nextToUpdate = (U32)(iend - zc->base);
|
||||
ZSTD_fillDoubleHashTable(ms, cParams, iend);
|
||||
ms->nextToUpdate = (U32)(iend - ms->window.base);
|
||||
break;
|
||||
|
||||
case ZSTD_greedy:
|
||||
|
@ -268,69 +279,62 @@ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
|
|||
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
|
||||
* if it is far way
|
||||
* (after a long match, only update tables a limited amount). */
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor)
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
|
||||
{
|
||||
U32 const current = (U32)(anchor - cctx->base);
|
||||
if (current > cctx->nextToUpdate + 1024) {
|
||||
cctx->nextToUpdate =
|
||||
current - MIN(512, current - cctx->nextToUpdate - 1024);
|
||||
U32 const current = (U32)(anchor - ms->window.base);
|
||||
if (current > ms->nextToUpdate + 1024) {
|
||||
ms->nextToUpdate =
|
||||
current - MIN(512, current - ms->nextToUpdate - 1024);
|
||||
}
|
||||
}
|
||||
|
||||
typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
/* defined in zstd_compress.c */
|
||||
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
|
||||
const void* src, size_t srcSize)
|
||||
static size_t ZSTD_ldm_generateSequences_internal(
|
||||
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
ldmState_t* const ldmState = &(cctx->ldmState);
|
||||
const ldmParams_t ldmParams = cctx->appliedParams.ldmParams;
|
||||
const U64 hashPower = ldmState->hashPower;
|
||||
const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
|
||||
const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
|
||||
const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
seqStore_t* const seqStorePtr = &(cctx->seqStore);
|
||||
const BYTE* const base = cctx->base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = cctx->dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
|
||||
|
||||
const ZSTD_blockCompressor blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0);
|
||||
U32* const repToConfirm = seqStorePtr->repToConfirm;
|
||||
U32 savedRep[ZSTD_REP_NUM];
|
||||
/* LDM parameters */
|
||||
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U64 const hashPower = ldmState->hashPower;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
U32 const ldmBucketSize = 1U << params->bucketSizeLog;
|
||||
U32 const hashEveryLog = params->hashEveryLog;
|
||||
U32 const ldmTagMask = (1U << params->hashEveryLog) - 1;
|
||||
/* Prefix and extDict parameters */
|
||||
U32 const dictLimit = ldmState->window.dictLimit;
|
||||
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
|
||||
BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
|
||||
BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
|
||||
BYTE const* const lowPrefixPtr = base + dictLimit;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
|
||||
/* Input positions */
|
||||
BYTE const* anchor = istart;
|
||||
BYTE const* ip = istart;
|
||||
/* Rolling hash */
|
||||
BYTE const* lastHashed = NULL;
|
||||
U64 rollingHash = 0;
|
||||
const BYTE* lastHashed = NULL;
|
||||
size_t i, lastLiterals;
|
||||
|
||||
/* Save seqStorePtr->rep and copy repToConfirm */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
while (ip <= ilimit) {
|
||||
size_t mLength;
|
||||
U32 const current = (U32)(ip - base);
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0;
|
||||
ldmEntry_t* bestEntry = NULL;
|
||||
if (ip != istart) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
|
||||
lastHashed[ldmParams.minMatchLength],
|
||||
lastHashed[minMatchLength],
|
||||
hashPower);
|
||||
} else {
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength);
|
||||
}
|
||||
lastHashed = ip;
|
||||
|
||||
/* Do not insert and do not look for a match */
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
|
||||
ldmTagMask) {
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) {
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
@ -340,27 +344,49 @@ size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
|
|||
ldmEntry_t* const bucket =
|
||||
ZSTD_ldm_getBucket(ldmState,
|
||||
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
||||
ldmParams);
|
||||
*params);
|
||||
ldmEntry_t* cur;
|
||||
size_t bestMatchLength = 0;
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
|
||||
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
||||
const BYTE* const pMatch = cur->offset + base;
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
if (extDict) {
|
||||
BYTE const* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
BYTE const* const pMatch = curMatchBase + cur->offset;
|
||||
BYTE const* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
BYTE const* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
|
||||
curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
|
||||
if (curForwardMatchLength < ldmParams.minMatchLength) {
|
||||
continue;
|
||||
curForwardMatchLength = ZSTD_count_2segments(
|
||||
ip, pMatch, iend,
|
||||
matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
|
||||
lowMatchPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
} else { /* !extDict */
|
||||
BYTE const* const pMatch = base + cur->offset;
|
||||
curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
|
||||
lowPrefixPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
|
||||
ip, anchor, pMatch, lowest);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
|
@ -375,7 +401,7 @@ size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
|
|||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
|
||||
hBits, current,
|
||||
ldmParams);
|
||||
*params);
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
@ -384,324 +410,244 @@ size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
|
|||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
ip -= backwardMatchLength;
|
||||
|
||||
/* Call the block compressor on the remaining literals */
|
||||
{
|
||||
/* Store the sequence:
|
||||
* ip = current - backwardMatchLength
|
||||
* The match is at (bestEntry->offset - backwardMatchLength)
|
||||
*/
|
||||
U32 const matchIndex = bestEntry->offset;
|
||||
const BYTE* const match = base + matchIndex - backwardMatchLength;
|
||||
U32 const offset = (U32)(ip - match);
|
||||
U32 const offset = current - matchIndex;
|
||||
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
|
||||
|
||||
/* Overwrite rep codes */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(cctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(cctx, anchor);
|
||||
|
||||
/* Call block compressor and get remaining literals */
|
||||
lastLiterals = blockCompressor(cctx, anchor, ip - anchor);
|
||||
cctx->nextToUpdate = (U32)(ip - base);
|
||||
|
||||
/* Update repToConfirm with the new offset */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
repToConfirm[i] = repToConfirm[i-1];
|
||||
repToConfirm[0] = offset;
|
||||
|
||||
/* Store the sequence with the leftover literals */
|
||||
ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
|
||||
offset + ZSTD_REP_MOVE, mLength - MINMATCH);
|
||||
/* Out of sequence storage */
|
||||
if (rawSeqStore->size == rawSeqStore->capacity)
|
||||
return ERROR(dstSize_tooSmall);
|
||||
seq->litLength = (U32)(ip - anchor);
|
||||
seq->matchLength = (U32)mLength;
|
||||
seq->offset = offset;
|
||||
rawSeqStore->size++;
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table */
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
*params);
|
||||
|
||||
assert(ip + backwardMatchLength == lastHashed);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+mLength*/
|
||||
/* Heuristic: don't need to fill the entire table at end of block */
|
||||
if (ip + mLength < ilimit) {
|
||||
if (ip + mLength <= ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + mLength, base, hBits, ldmParams);
|
||||
ip + mLength, base, hBits, *params);
|
||||
lastHashed = ip + mLength - 1;
|
||||
}
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
/* Check immediate repcode */
|
||||
while ( (ip < ilimit)
|
||||
&& ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest))
|
||||
&& (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) {
|
||||
}
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1],
|
||||
iend) + 4;
|
||||
/* Swap repToConfirm[1] <=> repToConfirm[0] */
|
||||
{
|
||||
U32 const tmpOff = repToConfirm[1];
|
||||
repToConfirm[1] = repToConfirm[0];
|
||||
repToConfirm[0] = tmpOff;
|
||||
}
|
||||
/*! ZSTD_ldm_reduceTable() :
|
||||
* reduce table indexes by `reducerValue` */
|
||||
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
|
||||
U32 const reducerValue)
|
||||
{
|
||||
U32 u;
|
||||
for (u = 0; u < size; u++) {
|
||||
if (table[u].offset < reducerValue) table[u].offset = 0;
|
||||
else table[u].offset -= reducerValue;
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldmState, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const maxDist = 1U << params->windowLog;
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
size_t const kMaxChunkSize = 1 << 20;
|
||||
size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
|
||||
size_t chunk;
|
||||
size_t leftoverSize = 0;
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+rLength*/
|
||||
if (ip + rLength < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + rLength, base, hBits, ldmParams);
|
||||
lastHashed = ip + rLength - 1;
|
||||
}
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
|
||||
/* Check that ZSTD_window_update() has been called for this chunk prior
|
||||
* to passing it to this function.
|
||||
*/
|
||||
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
|
||||
/* The input could be very large (in zstdmt), so it must be broken up into
|
||||
* chunks to enforce the maximmum distance and handle overflow correction.
|
||||
*/
|
||||
assert(sequences->pos <= sequences->size);
|
||||
assert(sequences->size <= sequences->capacity);
|
||||
for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
|
||||
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
|
||||
size_t const remaining = (size_t)(iend - chunkStart);
|
||||
BYTE const *const chunkEnd =
|
||||
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
|
||||
size_t const chunkSize = chunkEnd - chunkStart;
|
||||
size_t newLeftoverSize;
|
||||
size_t const prevSize = sequences->size;
|
||||
|
||||
assert(chunkStart < iend);
|
||||
/* 1. Perform overflow correction if necessary. */
|
||||
if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
|
||||
U32 const ldmHSize = 1U << params->hashLog;
|
||||
U32 const correction = ZSTD_window_correctOverflow(
|
||||
&ldmState->window, /* cycleLog */ 0, maxDist, src);
|
||||
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
|
||||
}
|
||||
}
|
||||
|
||||
/* Overwrite rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
ZSTD_ldm_limitTableUpdate(cctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(cctx, anchor);
|
||||
|
||||
lastLiterals = blockCompressor(cctx, anchor, iend - anchor);
|
||||
cctx->nextToUpdate = (U32)(iend - base);
|
||||
|
||||
/* Restore seqStorePtr->rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = savedRep[i];
|
||||
|
||||
/* Return the last literals size */
|
||||
return lastLiterals;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize);
|
||||
}
|
||||
|
||||
static size_t ZSTD_compressBlock_ldm_extDict_generic(
|
||||
ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
ldmState_t* const ldmState = &(ctx->ldmState);
|
||||
const ldmParams_t ldmParams = ctx->appliedParams.ldmParams;
|
||||
const U64 hashPower = ldmState->hashPower;
|
||||
const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
|
||||
const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
|
||||
const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
seqStore_t* const seqStorePtr = &(ctx->seqStore);
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const dictBase = ctx->dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ctx->lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ctx->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
|
||||
|
||||
const ZSTD_blockCompressor blockCompressor =
|
||||
ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1);
|
||||
U32* const repToConfirm = seqStorePtr->repToConfirm;
|
||||
U32 savedRep[ZSTD_REP_NUM];
|
||||
U64 rollingHash = 0;
|
||||
const BYTE* lastHashed = NULL;
|
||||
size_t i, lastLiterals;
|
||||
|
||||
/* Save seqStorePtr->rep and copy repToConfirm */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++) {
|
||||
savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
|
||||
}
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
size_t mLength;
|
||||
const U32 current = (U32)(ip-base);
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0;
|
||||
ldmEntry_t* bestEntry = NULL;
|
||||
if (ip != istart) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
|
||||
lastHashed[ldmParams.minMatchLength],
|
||||
hashPower);
|
||||
/* 2. We enforce the maximum offset allowed.
|
||||
*
|
||||
* kMaxChunkSize should be small enough that we don't lose too much of
|
||||
* the window through early invalidation.
|
||||
* TODO: * Test the chunk size.
|
||||
* * Try invalidation after the sequence generation and test the
|
||||
* the offset against maxDist directly.
|
||||
*/
|
||||
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
|
||||
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
|
||||
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
|
||||
ldmState, sequences, params, chunkStart, chunkSize);
|
||||
if (ZSTD_isError(newLeftoverSize))
|
||||
return newLeftoverSize;
|
||||
/* 4. We add the leftover literals from previous iterations to the first
|
||||
* newly generated sequence, or add the `newLeftoverSize` if none are
|
||||
* generated.
|
||||
*/
|
||||
/* Prepend the leftover literals from the last call */
|
||||
if (prevSize < sequences->size) {
|
||||
sequences->seq[prevSize].litLength += (U32)leftoverSize;
|
||||
leftoverSize = newLeftoverSize;
|
||||
} else {
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
|
||||
}
|
||||
lastHashed = ip;
|
||||
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
|
||||
ldmTagMask) {
|
||||
/* Don't insert and don't look for a match */
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the best entry and compute the match lengths */
|
||||
{
|
||||
ldmEntry_t* const bucket =
|
||||
ZSTD_ldm_getBucket(ldmState,
|
||||
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
||||
ldmParams);
|
||||
ldmEntry_t* cur;
|
||||
size_t bestMatchLength = 0;
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
|
||||
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
||||
const BYTE* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
const BYTE* const pMatch = curMatchBase + cur->offset;
|
||||
const BYTE* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
curForwardMatchLength = ZSTD_count_2segments(
|
||||
ip, pMatch, iend,
|
||||
matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < ldmParams.minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
|
||||
ip, anchor, pMatch, lowMatchPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- continue searching */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
ip -= backwardMatchLength;
|
||||
|
||||
/* Call the block compressor on the remaining literals */
|
||||
{
|
||||
/* ip = current - backwardMatchLength
|
||||
* The match is at (bestEntry->offset - backwardMatchLength) */
|
||||
U32 const matchIndex = bestEntry->offset;
|
||||
U32 const offset = current - matchIndex;
|
||||
|
||||
/* Overwrite rep codes */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
/* Fill the hash table for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(ctx, anchor);
|
||||
|
||||
/* Call block compressor and get remaining literals */
|
||||
lastLiterals = blockCompressor(ctx, anchor, ip - anchor);
|
||||
ctx->nextToUpdate = (U32)(ip - base);
|
||||
|
||||
/* Update repToConfirm with the new offset */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
repToConfirm[i] = repToConfirm[i-1];
|
||||
repToConfirm[0] = offset;
|
||||
|
||||
/* Store the sequence with the leftover literals */
|
||||
ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
|
||||
offset + ZSTD_REP_MOVE, mLength - MINMATCH);
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table */
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
ldmParams);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+mLength */
|
||||
assert(ip + backwardMatchLength == lastHashed);
|
||||
if (ip + mLength < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + mLength, base, hBits,
|
||||
ldmParams);
|
||||
lastHashed = ip + mLength - 1;
|
||||
}
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip < ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - repToConfirm[1];
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ?
|
||||
dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) &
|
||||
(repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ?
|
||||
dictEnd : iend;
|
||||
size_t const repLength2 =
|
||||
ZSTD_count_2segments(ip+4, repMatch2+4, iend,
|
||||
repEnd2, lowPrefixPtr) + 4;
|
||||
|
||||
U32 tmpOffset = repToConfirm[1];
|
||||
repToConfirm[1] = repToConfirm[0];
|
||||
repToConfirm[0] = tmpOffset;
|
||||
|
||||
ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+repLength2*/
|
||||
if (ip + repLength2 < ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + repLength2, base, hBits,
|
||||
ldmParams);
|
||||
lastHashed = ip + repLength2 - 1;
|
||||
}
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
assert(newLeftoverSize == chunkSize);
|
||||
leftoverSize += chunkSize;
|
||||
}
|
||||
}
|
||||
|
||||
/* Overwrite rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = repToConfirm[i];
|
||||
|
||||
ZSTD_ldm_limitTableUpdate(ctx, anchor);
|
||||
ZSTD_ldm_fillFastTables(ctx, anchor);
|
||||
|
||||
/* Call the block compressor one last time on the last literals */
|
||||
lastLiterals = blockCompressor(ctx, anchor, iend - anchor);
|
||||
ctx->nextToUpdate = (U32)(iend - base);
|
||||
|
||||
/* Restore seqStorePtr->rep */
|
||||
for (i = 0; i < ZSTD_REP_NUM; i++)
|
||||
seqStorePtr->rep[i] = savedRep[i];
|
||||
|
||||
/* Return the last literals size */
|
||||
return lastLiterals;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize)
|
||||
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
|
||||
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
|
||||
if (srcSize <= seq->litLength) {
|
||||
/* Skip past srcSize literals */
|
||||
seq->litLength -= (U32)srcSize;
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->litLength;
|
||||
seq->litLength = 0;
|
||||
if (srcSize < seq->matchLength) {
|
||||
/* Skip past the first srcSize of the match */
|
||||
seq->matchLength -= (U32)srcSize;
|
||||
if (seq->matchLength < minMatch) {
|
||||
/* The match is too short, omit it */
|
||||
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
|
||||
seq[1].litLength += seq[0].matchLength;
|
||||
}
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->matchLength;
|
||||
seq->matchLength = 0;
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the sequence length is longer than remaining then the sequence is split
|
||||
* between this block and the next.
|
||||
*
|
||||
* Returns the current sequence to handle, or if the rest of the block should
|
||||
* be literals, it returns a sequence with offset == 0.
|
||||
*/
|
||||
static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
|
||||
U32 const remaining, U32 const minMatch)
|
||||
{
|
||||
return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize);
|
||||
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
|
||||
assert(sequence.offset > 0);
|
||||
/* Likely: No partial sequence */
|
||||
if (remaining >= sequence.litLength + sequence.matchLength) {
|
||||
rawSeqStore->pos++;
|
||||
return sequence;
|
||||
}
|
||||
/* Cut the sequence short (offset == 0 ==> rest is literals). */
|
||||
if (remaining <= sequence.litLength) {
|
||||
sequence.offset = 0;
|
||||
} else if (remaining < sequence.litLength + sequence.matchLength) {
|
||||
sequence.matchLength = remaining - sequence.litLength;
|
||||
if (sequence.matchLength < minMatch) {
|
||||
sequence.offset = 0;
|
||||
}
|
||||
}
|
||||
/* Skip past `remaining` bytes for the future sequences. */
|
||||
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
|
||||
return sequence;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
int const extDict)
|
||||
{
|
||||
unsigned const minMatch = cParams->searchLength;
|
||||
ZSTD_blockCompressor const blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cParams->strategy, extDict);
|
||||
BYTE const* const base = ms->window.base;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
/* Input positions */
|
||||
BYTE const* ip = istart;
|
||||
|
||||
assert(rawSeqStore->pos <= rawSeqStore->size);
|
||||
assert(rawSeqStore->size <= rawSeqStore->capacity);
|
||||
/* Loop through each sequence and apply the block compressor to the lits */
|
||||
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
|
||||
/* maybeSplitSequence updates rawSeqStore->pos */
|
||||
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
|
||||
(U32)(iend - ip), minMatch);
|
||||
int i;
|
||||
/* End signal */
|
||||
if (sequence.offset == 0)
|
||||
break;
|
||||
|
||||
assert(sequence.offset <= (1U << cParams->windowLog));
|
||||
assert(ip + sequence.litLength + sequence.matchLength <= iend);
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, cParams, ip);
|
||||
/* Run the block compressor */
|
||||
{
|
||||
size_t const newLitLength =
|
||||
blockCompressor(ms, seqStore, rep, cParams, ip,
|
||||
sequence.litLength);
|
||||
ip += sequence.litLength;
|
||||
ms->nextToUpdate = (U32)(ip - base);
|
||||
/* Update the repcodes */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
rep[i] = rep[i-1];
|
||||
rep[0] = sequence.offset;
|
||||
/* Store the sequence */
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
|
||||
sequence.offset + ZSTD_REP_MOVE,
|
||||
sequence.matchLength - MINMATCH);
|
||||
ip += sequence.matchLength;
|
||||
}
|
||||
}
|
||||
/* Fill the tables for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, cParams, ip);
|
||||
/* Compress the last literals */
|
||||
{
|
||||
size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
|
||||
ip, iend - ip);
|
||||
ms->nextToUpdate = (U32)(iend - base);
|
||||
return lastLiterals;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,32 +22,71 @@ extern "C" {
|
|||
***************************************/
|
||||
|
||||
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
|
||||
#define ZSTD_LDM_HASHEVERYLOG_NOTSET 9999
|
||||
|
||||
/** ZSTD_compressBlock_ldm_generic() :
|
||||
/**
|
||||
* ZSTD_ldm_generateSequences():
|
||||
*
|
||||
* This is a block compressor intended for long distance matching.
|
||||
* Generates the sequences using the long distance match finder.
|
||||
* Generates long range matching sequences in `sequences`, which parse a prefix
|
||||
* of the source. `sequences` must be large enough to store every sequence,
|
||||
* which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
|
||||
* @returns 0 or an error code.
|
||||
*
|
||||
* The function searches for matches of length at least
|
||||
* ldmParams.minMatchLength using a hash table in cctx->ldmState.
|
||||
* Matches can be at a distance of up to cParams.windowLog.
|
||||
*
|
||||
* Upon finding a match, the unmatched literals are compressed using a
|
||||
* ZSTD_blockCompressor (depending on the strategy in the compression
|
||||
* parameters), which stores the matched sequences. The "long distance"
|
||||
* match is then stored with the remaining literals from the
|
||||
* ZSTD_blockCompressor. */
|
||||
size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* cctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize);
|
||||
* NOTE: The user must have called ZSTD_window_update() for all of the input
|
||||
* they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
|
||||
* NOTE: This function returns an error if it runs out of space to store
|
||||
* sequences.
|
||||
*/
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldms, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_blockCompress():
|
||||
*
|
||||
* Compresses a block using the predefined sequences, along with a secondary
|
||||
* block compressor. The literals section of every sequence is passed to the
|
||||
* secondary block compressor, and those sequences are interspersed with the
|
||||
* predefined sequences. Returns the length of the last literals.
|
||||
* Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
|
||||
* `rawSeqStore.seq` may also be updated to split the last sequence between two
|
||||
* blocks.
|
||||
* @return The length of the last literals.
|
||||
*
|
||||
* NOTE: The source must be at most the maximum block size, but the predefined
|
||||
* sequences can be any size, and may be longer than the block. In the case that
|
||||
* they are longer than the block, the last sequences may need to be split into
|
||||
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
|
||||
* NOTE: This function does not return any errors.
|
||||
*/
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* src, size_t srcSize,
|
||||
int const extDict);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_skipSequences():
|
||||
*
|
||||
* Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
|
||||
* Avoids emitting matches less than `minMatch` bytes.
|
||||
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
|
||||
*/
|
||||
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
|
||||
U32 const minMatch);
|
||||
|
||||
/** ZSTD_ldm_initializeParameters() :
|
||||
* Initialize the long distance matching parameters to their default values. */
|
||||
size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Estimate the space needed for long distance matching tables. */
|
||||
size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog);
|
||||
* Estimate the space needed for long distance matching tables or 0 if LDM is
|
||||
* disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params);
|
||||
|
||||
/** ZSTD_ldm_getSeqSpace() :
|
||||
* Return an upper bound on the number of sequences that can be produced by
|
||||
* the long distance matcher, or 0 if LDM is disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Return prime8bytes^(minMatchLength-1) */
|
||||
|
@ -58,8 +97,12 @@ U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
|
|||
* windowLog and params->hashLog.
|
||||
*
|
||||
* Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
|
||||
* params->hashLog if it is not). */
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog);
|
||||
* params->hashLog if it is not).
|
||||
*
|
||||
* Ensures that the minMatchLength >= targetLength during optimal parsing.
|
||||
*/
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_opt.h"
|
||||
#include "zstd_lazy.h" /* ZSTD_updateTree, ZSTD_updateTree_extDict */
|
||||
|
||||
|
||||
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
|
||||
|
@ -244,14 +243,15 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
|||
|
||||
/* Update hashTable3 up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* const cctx, const BYTE* const ip)
|
||||
static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
|
||||
{
|
||||
U32* const hashTable3 = cctx->hashTable3;
|
||||
U32 const hashLog3 = cctx->hashLog3;
|
||||
const BYTE* const base = cctx->base;
|
||||
U32 idx = cctx->nextToUpdate3;
|
||||
U32 const target = cctx->nextToUpdate3 = (U32)(ip - base);
|
||||
U32* const hashTable3 = ms->hashTable3;
|
||||
U32 const hashLog3 = ms->hashLog3;
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 idx = ms->nextToUpdate3;
|
||||
U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
|
||||
size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
|
||||
assert(hashLog3 > 0);
|
||||
|
||||
while(idx < target) {
|
||||
hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
|
||||
|
@ -265,36 +265,173 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* const cctx, const BYTE*
|
|||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
||||
* ip : assumed <= iend-8 .
|
||||
* @return : nb of positions added */
|
||||
static U32 ZSTD_insertBt1(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
U32 const mls, U32 const extDict)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* match;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = smallerPtr + 1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
size_t bestLength = 8;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
#ifdef ZSTD_C_PREDICT
|
||||
U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
|
||||
U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
|
||||
predictedSmall += (predictedSmall>0);
|
||||
predictedLarge += (predictedLarge>0);
|
||||
#endif /* ZSTD_C_PREDICT */
|
||||
|
||||
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
|
||||
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
assert(matchIndex < current);
|
||||
|
||||
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
|
||||
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
|
||||
if (matchIndex == predictedSmall) {
|
||||
/* no need to check length, result known */
|
||||
*smallerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
|
||||
continue;
|
||||
}
|
||||
if (matchIndex == predictedLarge) {
|
||||
*largerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
bestLength = matchLength;
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
}
|
||||
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
|
||||
assert(matchEndIdx > current + 8);
|
||||
return matchEndIdx - (current + 8);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
void ZSTD_updateTree_internal(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
const U32 mls, const U32 extDict)
|
||||
{
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = ms->nextToUpdate;
|
||||
DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)",
|
||||
idx, target, extDict);
|
||||
|
||||
while(idx < target)
|
||||
idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
|
||||
ms->nextToUpdate = target;
|
||||
}
|
||||
|
||||
void ZSTD_updateTree(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend)
|
||||
{
|
||||
ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
U32 ZSTD_insertBtAndGetAllMatches (
|
||||
ZSTD_CCtx* zc,
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit, int const extDict,
|
||||
U32 nbCompares, U32 const mls, U32 const sufficient_len,
|
||||
U32 rep[ZSTD_REP_NUM], U32 const ll0,
|
||||
ZSTD_match_t* matches, const U32 lengthToBeat)
|
||||
ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
|
||||
{
|
||||
const BYTE* const base = zc->base;
|
||||
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const hashLog = zc->appliedParams.cParams.hashLog;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
U32 const minMatch = (mls==3) ? 3 : 4;
|
||||
U32* const hashTable = zc->hashTable;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32 matchIndex = hashTable[h];
|
||||
U32* const bt = zc->chainTable;
|
||||
U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask= (1U << btLog) - 1;
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const dictBase = zc->dictBase;
|
||||
U32 const dictLimit = zc->dictLimit;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
U32 const dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
U32 const btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32 const windowLow = zc->lowLimit;
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 mnum = 0;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
|
||||
size_t bestLength = lengthToBeat-1;
|
||||
DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
|
||||
|
@ -335,7 +472,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||
|
||||
/* HC3 match finder */
|
||||
if ((mls == 3) /*static*/ && (bestLength < mls)) {
|
||||
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
|
||||
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
|
||||
if ((matchIndex3 > windowLow)
|
||||
& (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
|
||||
size_t mlen;
|
||||
|
@ -359,7 +496,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||
mnum = 1;
|
||||
if ( (mlen > sufficient_len) |
|
||||
(ip+mlen == iLimit) ) { /* best possible length */
|
||||
zc->nextToUpdate = current+1; /* skip insertion */
|
||||
ms->nextToUpdate = current+1; /* skip insertion */
|
||||
return 1;
|
||||
} } } }
|
||||
|
||||
|
@ -416,30 +553,29 @@ U32 ZSTD_insertBtAndGetAllMatches (
|
|||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
assert(matchEndIdx > current+8);
|
||||
zc->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
return mnum;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
|
||||
ZSTD_CCtx* zc, /* Index table will be updated */
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
|
||||
U32 const maxNbAttempts, U32 const matchLengthSearch, U32 const sufficient_len,
|
||||
U32 rep[ZSTD_REP_NUM], U32 const ll0,
|
||||
ZSTD_match_t* matches, U32 const lengthToBeat)
|
||||
{
|
||||
U32 const matchLengthSearch = cParams->searchLength;
|
||||
DEBUGLOG(7, "ZSTD_BtGetAllMatches");
|
||||
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
|
||||
if (extDict) ZSTD_updateTree_extDict(zc, ip, iHighLimit, maxNbAttempts, matchLengthSearch);
|
||||
else ZSTD_updateTree(zc, ip, iHighLimit, maxNbAttempts, matchLengthSearch);
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
case 3 : return ZSTD_insertBtAndGetAllMatches(zc, ip, iHighLimit, extDict, maxNbAttempts, 3, sufficient_len, rep, ll0, matches, lengthToBeat);
|
||||
case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
|
||||
default :
|
||||
case 4 : return ZSTD_insertBtAndGetAllMatches(zc, ip, iHighLimit, extDict, maxNbAttempts, 4, sufficient_len, rep, ll0, matches, lengthToBeat);
|
||||
case 5 : return ZSTD_insertBtAndGetAllMatches(zc, ip, iHighLimit, extDict, maxNbAttempts, 5, sufficient_len, rep, ll0, matches, lengthToBeat);
|
||||
case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
|
||||
case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_insertBtAndGetAllMatches(zc, ip, iHighLimit, extDict, maxNbAttempts, 6, sufficient_len, rep, ll0, matches, lengthToBeat);
|
||||
case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,36 +663,33 @@ static int ZSTD_literalsContribution_cached(
|
|||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const int optLevel, const int extDict)
|
||||
{
|
||||
seqStore_t* const seqStorePtr = &(ctx->seqStore);
|
||||
optState_t* const optStatePtr = &(ctx->optState);
|
||||
optState_t* const optStatePtr = &ms->opt;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ctx->base;
|
||||
const BYTE* const prefixStart = base + ctx->dictLimit;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const prefixStart = base + ms->window.dictLimit;
|
||||
|
||||
U32 const maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
|
||||
U32 const sufficient_len = MIN(ctx->appliedParams.cParams.targetLength, ZSTD_OPT_NUM -1);
|
||||
U32 const mls = ctx->appliedParams.cParams.searchLength;
|
||||
U32 const minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
|
||||
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
||||
U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* const matches = optStatePtr->matchTable;
|
||||
cachedLiteralPrice_t cachedLitPrice;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
|
||||
/* init */
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
|
||||
memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
|
||||
|
||||
/* Match Loop */
|
||||
|
@ -567,7 +700,7 @@ size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
|||
/* find first match */
|
||||
{ U32 const litlen = (U32)(ip - anchor);
|
||||
U32 const ll0 = !litlen;
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ctx, ip, iend, extDict, maxSearches, mls, sufficient_len, rep, ll0, matches, minMatch);
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
|
||||
if (!nbMatches) { ip++; continue; }
|
||||
|
||||
/* initialize opt[0] */
|
||||
|
@ -653,7 +786,7 @@ size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
|||
U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
|
||||
U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
|
||||
U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ctx, inr, iend, extDict, maxSearches, mls, sufficient_len, opt[cur].rep, ll0, matches, minMatch);
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
|
||||
U32 matchNb;
|
||||
if (!nbMatches) continue;
|
||||
|
||||
|
@ -749,37 +882,42 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
|||
}
|
||||
|
||||
ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
|
||||
ZSTD_storeSeq(seqStorePtr, llen, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip;
|
||||
} }
|
||||
ZSTD_setLog2Prices(optStatePtr);
|
||||
} /* while (ip < ilimit) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btopt(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btultra(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btopt_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
|
||||
size_t ZSTD_compressBlock_btultra_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
|
||||
}
|
||||
|
|
|
@ -15,13 +15,25 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd.h" /* ZSTD_CCtx, size_t */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
void ZSTD_updateTree(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend); /* used in ZSTD_loadDictionaryContent() */
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btopt(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -30,15 +30,15 @@
|
|||
|
||||
/* === Memory management === */
|
||||
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads,
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
|
||||
ZSTD_customMem cMem);
|
||||
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
/* === Simple buffer-to-butter one-pass function === */
|
||||
/* === Simple one-pass compression function === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
|
@ -50,7 +50,7 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
|||
/* === Streaming functions === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it may change in the future, to mean "empty" */
|
||||
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
|
||||
|
@ -68,7 +68,7 @@ ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
|||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters const params,
|
||||
ZSTD_parameters params,
|
||||
unsigned overlapLog);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
||||
|
@ -85,7 +85,7 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
|||
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
||||
typedef enum {
|
||||
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
|
||||
ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
|
||||
ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
|
||||
} ZSTDMT_parameter;
|
||||
|
||||
/* ZSTDMT_setMTCtxParameter() :
|
||||
|
@ -97,30 +97,46 @@ ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter
|
|||
|
||||
|
||||
/*! ZSTDMT_compressStream_generic() :
|
||||
* Combines ZSTDMT_compressStream() with ZSTDMT_flushStream() or ZSTDMT_endStream()
|
||||
* Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
|
||||
* depending on flush directive.
|
||||
* @return : minimum amount of data still to be flushed
|
||||
* 0 if fully flushed
|
||||
* or an error code */
|
||||
* or an error code
|
||||
* note : needs to be init using any ZSTD_initCStream*() variant */
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp);
|
||||
|
||||
|
||||
/* === Private definitions; never ever use directly === */
|
||||
/* ========================================================
|
||||
* === Private interface, for use by ZSTD_compress.c ===
|
||||
* === Not exposed in libzstd. Never invoke directly ===
|
||||
* ======================================================== */
|
||||
|
||||
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
|
||||
|
||||
/* ZSTDMT_CCtxParam_setNbThreads()
|
||||
* Set nbThreads, and clamp it correctly,
|
||||
* also reset jobSize and overlapLog */
|
||||
size_t ZSTDMT_CCtxParam_setNbThreads(ZSTD_CCtx_params* params, unsigned nbThreads);
|
||||
/* ZSTDMT_CCtxParam_setNbWorkers()
|
||||
* Set nbWorkers, and clamp it.
|
||||
* Also reset jobSize and overlapLog */
|
||||
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
|
||||
|
||||
/* ZSTDMT_getNbThreads():
|
||||
/*! ZSTDMT_updateCParams_whileCompressing() :
|
||||
* Updates only a selected set of compression parameters, to remain compatible with current frame.
|
||||
* New parameters will be applied to next compression job. */
|
||||
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
|
||||
|
||||
/* ZSTDMT_getNbWorkers():
|
||||
* @return nb threads currently active in mtctx.
|
||||
* mtctx must be valid */
|
||||
size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx);
|
||||
unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
|
||||
|
||||
/* ZSTDMT_getFrameProgression():
|
||||
* tells how much data has been consumed (input) and produced (output) for current frame.
|
||||
* able to count progression inside worker threads.
|
||||
*/
|
||||
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
/*! ZSTDMT_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
|
@ -128,7 +144,7 @@ size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx);
|
|||
* must receive dict, or cdict, or none, but not both.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
|
||||
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
|
|
@ -49,18 +49,19 @@
|
|||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Byte alignment for workSpace management
|
||||
****************************************************************/
|
||||
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
|
||||
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
|
||||
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
||||
|
||||
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
|
||||
typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
|
||||
|
||||
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
|
||||
|
@ -74,7 +75,6 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
|
|||
/*-***************************/
|
||||
/* single-symbol decoding */
|
||||
/*-***************************/
|
||||
|
||||
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
|
||||
|
||||
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
|
||||
|
@ -94,10 +94,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
|
|||
huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
||||
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
||||
|
||||
if ((spaceUsed32 << 2) > wkspSize)
|
||||
return ERROR(tableLog_tooLarge);
|
||||
workSpace = (U32 *)workSpace + spaceUsed32;
|
||||
wkspSize -= (spaceUsed32 << 2);
|
||||
if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
|
||||
HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
|
||||
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
|
||||
|
@ -144,8 +141,10 @@ size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
|
|||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
|
||||
|
||||
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
||||
FORCE_INLINE_TEMPLATE BYTE
|
||||
HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
|
||||
BYTE const c = dt[val].byte;
|
||||
|
@ -156,7 +155,7 @@ static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, con
|
|||
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
|
||||
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
|
||||
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
|
||||
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
|
@ -164,30 +163,33 @@ static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, con
|
|||
if (MEM_64bits()) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
HINT_INLINE size_t
|
||||
HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
/* up to 4 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) {
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
|
||||
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
}
|
||||
|
||||
/* closer to the end */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
/* [0-3] symbols remaining */
|
||||
if (MEM_32bits())
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
|
||||
/* no more data to retrieve from bitstream, hence no need to reload */
|
||||
/* no more data to retrieve from bitstream, no need to reload */
|
||||
while (p < pEnd)
|
||||
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
|
||||
|
||||
return pEnd-pStart;
|
||||
}
|
||||
|
||||
static size_t HUF_decompress1X2_usingDTable_internal(
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_decompress1X2_usingDTable_internal_body(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
|
@ -200,58 +202,17 @@ static size_t HUF_decompress1X2_usingDTable_internal(
|
|||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
|
||||
|
||||
HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
|
||||
|
||||
return dstSize;
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 0) return ERROR(GENERIC);
|
||||
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_decompress4X2_usingDTable_internal(
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_decompress4X2_usingDTable_internal_body(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
|
@ -286,23 +247,19 @@ static size_t HUF_decompress4X2_usingDTable_internal(
|
|||
BYTE* op2 = opStart2;
|
||||
BYTE* op3 = opStart3;
|
||||
BYTE* op4 = opStart4;
|
||||
U32 endSignal;
|
||||
U32 endSignal = BIT_DStream_unfinished;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
|
||||
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
|
||||
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
|
||||
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
|
||||
|
||||
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
||||
/* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) {
|
||||
while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
|
||||
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
|
||||
|
@ -319,10 +276,15 @@ static size_t HUF_decompress4X2_usingDTable_internal(
|
|||
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
BIT_reloadDStream(&bitD1);
|
||||
BIT_reloadDStream(&bitD2);
|
||||
BIT_reloadDStream(&bitD3);
|
||||
BIT_reloadDStream(&bitD4);
|
||||
}
|
||||
|
||||
/* check corruption */
|
||||
/* note : should not be necessary : op# advance in lock step, and we control op4.
|
||||
* but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
|
||||
if (op1 > opStart2) return ERROR(corruption_detected);
|
||||
if (op2 > opStart3) return ERROR(corruption_detected);
|
||||
if (op3 > opStart4) return ERROR(corruption_detected);
|
||||
|
@ -335,8 +297,8 @@ static size_t HUF_decompress4X2_usingDTable_internal(
|
|||
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endSignal) return ERROR(corruption_detected);
|
||||
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endCheck) return ERROR(corruption_detected); }
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
|
@ -344,6 +306,279 @@ static size_t HUF_decompress4X2_usingDTable_internal(
|
|||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32
|
||||
HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 2);
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
return dt[val].length;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32
|
||||
HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 1);
|
||||
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
|
||||
else {
|
||||
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
|
||||
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
|
||||
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
|
||||
} }
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
|
||||
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
|
||||
if (MEM_64bits()) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
HINT_INLINE size_t
|
||||
HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
|
||||
const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
/* up to 8 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
}
|
||||
|
||||
/* closer to end : up to 2 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
|
||||
while (p <= pEnd-2)
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
|
||||
|
||||
if (p < pEnd)
|
||||
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
|
||||
|
||||
return p-pStart;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_decompress1X4_usingDTable_internal_body(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
BIT_DStream_t bitD;
|
||||
|
||||
/* Init */
|
||||
CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
|
||||
|
||||
/* decode */
|
||||
{ BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
|
||||
}
|
||||
|
||||
/* check */
|
||||
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_decompress4X4_usingDTable_internal_body(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||
|
||||
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1;
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
|
||||
/* Init */
|
||||
BIT_DStream_t bitD1;
|
||||
BIT_DStream_t bitD2;
|
||||
BIT_DStream_t bitD3;
|
||||
BIT_DStream_t bitD4;
|
||||
size_t const length1 = MEM_readLE16(istart);
|
||||
size_t const length2 = MEM_readLE16(istart+2);
|
||||
size_t const length3 = MEM_readLE16(istart+4);
|
||||
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
|
||||
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
||||
const BYTE* const istart2 = istart1 + length1;
|
||||
const BYTE* const istart3 = istart2 + length2;
|
||||
const BYTE* const istart4 = istart3 + length3;
|
||||
size_t const segmentSize = (dstSize+3) / 4;
|
||||
BYTE* const opStart2 = ostart + segmentSize;
|
||||
BYTE* const opStart3 = opStart2 + segmentSize;
|
||||
BYTE* const opStart4 = opStart3 + segmentSize;
|
||||
BYTE* op1 = ostart;
|
||||
BYTE* op2 = opStart2;
|
||||
BYTE* op3 = opStart3;
|
||||
BYTE* op4 = opStart4;
|
||||
U32 endSignal;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
||||
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
|
||||
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
|
||||
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
|
||||
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
|
||||
|
||||
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
|
||||
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
}
|
||||
|
||||
/* check corruption */
|
||||
if (op1 > opStart2) return ERROR(corruption_detected);
|
||||
if (op2 > opStart3) return ERROR(corruption_detected);
|
||||
if (op3 > opStart4) return ERROR(corruption_detected);
|
||||
/* note : op4 already verified within main loop */
|
||||
|
||||
/* finish bitStreams one by one */
|
||||
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
|
||||
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
|
||||
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
|
||||
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endCheck) return ERROR(corruption_detected); }
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
|
||||
const void *cSrc,
|
||||
size_t cSrcSize,
|
||||
const HUF_DTable *DTable);
|
||||
#if DYNAMIC_BMI2
|
||||
|
||||
#define X(fn) \
|
||||
\
|
||||
static size_t fn##_default( \
|
||||
void* dst, size_t dstSize, \
|
||||
const void* cSrc, size_t cSrcSize, \
|
||||
const HUF_DTable* DTable) \
|
||||
{ \
|
||||
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
||||
} \
|
||||
\
|
||||
static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
|
||||
void* dst, size_t dstSize, \
|
||||
const void* cSrc, size_t cSrcSize, \
|
||||
const HUF_DTable* DTable) \
|
||||
{ \
|
||||
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
||||
} \
|
||||
\
|
||||
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
||||
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
||||
{ \
|
||||
if (bmi2) { \
|
||||
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
|
||||
} \
|
||||
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define X(fn) \
|
||||
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
|
||||
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
|
||||
{ \
|
||||
(void)bmi2; \
|
||||
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
X(HUF_decompress1X2_usingDTable_internal)
|
||||
X(HUF_decompress4X2_usingDTable_internal)
|
||||
X(HUF_decompress1X4_usingDTable_internal)
|
||||
X(HUF_decompress4X4_usingDTable_internal)
|
||||
|
||||
#undef X
|
||||
|
||||
|
||||
size_t HUF_decompress1X2_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 0) return ERROR(GENERIC);
|
||||
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
|
||||
return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X2_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
|
@ -351,13 +586,12 @@ size_t HUF_decompress4X2_usingDTable(
|
|||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 0) return ERROR(GENERIC);
|
||||
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
|
@ -367,7 +601,14 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
|
||||
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -387,8 +628,6 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||
/* *************************/
|
||||
/* double-symbols decoding */
|
||||
/* *************************/
|
||||
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
|
||||
|
||||
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
|
||||
|
||||
/* HUF_fillDTableX4Level2() :
|
||||
|
@ -508,10 +747,7 @@ size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
|
|||
weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
|
||||
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
|
||||
|
||||
if ((spaceUsed32 << 2) > wkspSize)
|
||||
return ERROR(tableLog_tooLarge);
|
||||
workSpace = (U32 *)workSpace + spaceUsed32;
|
||||
wkspSize -= (spaceUsed32 << 2);
|
||||
if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
|
||||
rankStart = rankStart0 + 1;
|
||||
memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
|
||||
|
@ -588,95 +824,6 @@ size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
|
|||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 2);
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
return dt[val].length;
|
||||
}
|
||||
|
||||
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
|
||||
{
|
||||
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
|
||||
memcpy(op, dt+val, 1);
|
||||
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
|
||||
else {
|
||||
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
|
||||
BIT_skipBits(DStream, dt[val].nbBits);
|
||||
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
|
||||
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
|
||||
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
|
||||
} }
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
|
||||
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
|
||||
if (MEM_64bits()) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
HINT_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
/* up to 8 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
}
|
||||
|
||||
/* closer to end : up to 2 symbols at a time */
|
||||
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
|
||||
|
||||
while (p <= pEnd-2)
|
||||
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
|
||||
|
||||
if (p < pEnd)
|
||||
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
|
||||
|
||||
return p-pStart;
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_decompress1X4_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
BIT_DStream_t bitD;
|
||||
|
||||
/* Init */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||
if (HUF_isError(errorCode)) return errorCode;
|
||||
}
|
||||
|
||||
/* decode */
|
||||
{ BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
|
||||
}
|
||||
|
||||
/* check */
|
||||
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X4_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
|
@ -684,7 +831,7 @@ size_t HUF_decompress1X4_usingDTable(
|
|||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 1) return ERROR(GENERIC);
|
||||
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
||||
|
@ -699,7 +846,7 @@ size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
|
|||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
|
||||
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -717,99 +864,6 @@ size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||
return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
|
||||
}
|
||||
|
||||
static size_t HUF_decompress4X4_usingDTable_internal(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const HUF_DTable* DTable)
|
||||
{
|
||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||
|
||||
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
const void* const dtPtr = DTable+1;
|
||||
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
|
||||
|
||||
/* Init */
|
||||
BIT_DStream_t bitD1;
|
||||
BIT_DStream_t bitD2;
|
||||
BIT_DStream_t bitD3;
|
||||
BIT_DStream_t bitD4;
|
||||
size_t const length1 = MEM_readLE16(istart);
|
||||
size_t const length2 = MEM_readLE16(istart+2);
|
||||
size_t const length3 = MEM_readLE16(istart+4);
|
||||
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
|
||||
const BYTE* const istart1 = istart + 6; /* jumpTable */
|
||||
const BYTE* const istart2 = istart1 + length1;
|
||||
const BYTE* const istart3 = istart2 + length2;
|
||||
const BYTE* const istart4 = istart3 + length3;
|
||||
size_t const segmentSize = (dstSize+3) / 4;
|
||||
BYTE* const opStart2 = ostart + segmentSize;
|
||||
BYTE* const opStart3 = opStart2 + segmentSize;
|
||||
BYTE* const opStart4 = opStart3 + segmentSize;
|
||||
BYTE* op1 = ostart;
|
||||
BYTE* op2 = opStart2;
|
||||
BYTE* op3 = opStart3;
|
||||
BYTE* op4 = opStart4;
|
||||
U32 endSignal;
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
U32 const dtLog = dtd.tableLog;
|
||||
|
||||
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
|
||||
if (HUF_isError(errorCode)) return errorCode; }
|
||||
|
||||
/* 16-32 symbols per loop (4-8 symbols per stream) */
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
|
||||
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
|
||||
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
|
||||
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
|
||||
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
|
||||
|
||||
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
|
||||
}
|
||||
|
||||
/* check corruption */
|
||||
if (op1 > opStart2) return ERROR(corruption_detected);
|
||||
if (op2 > opStart3) return ERROR(corruption_detected);
|
||||
if (op3 > opStart4) return ERROR(corruption_detected);
|
||||
/* note : op4 already verified within main loop */
|
||||
|
||||
/* finish bitStreams one by one */
|
||||
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
|
||||
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
|
||||
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
|
||||
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
|
||||
|
||||
/* check */
|
||||
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
|
||||
if (!endCheck) return ERROR(corruption_detected); }
|
||||
|
||||
/* decoded size */
|
||||
return dstSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X4_usingDTable(
|
||||
void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
|
@ -817,13 +871,12 @@ size_t HUF_decompress4X4_usingDTable(
|
|||
{
|
||||
DTableDesc dtd = HUF_getDTableDesc(DTable);
|
||||
if (dtd.tableType != 1) return ERROR(GENERIC);
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
|
@ -833,7 +886,14 @@ size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
|
||||
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -861,8 +921,8 @@ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
|
|||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
|
||||
HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
|
||||
return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
|
||||
HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
|
||||
|
@ -870,8 +930,8 @@ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
|
|||
const HUF_DTable* DTable)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
|
||||
HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
|
||||
return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
|
||||
HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -898,21 +958,22 @@ static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, qu
|
|||
};
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-determined metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < cSrcSize, dstSize <= 128 KB */
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-computed metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
||||
{
|
||||
assert(dstSize > 0);
|
||||
assert(dstSize <= 128 KB);
|
||||
/* decoder timing evaluation */
|
||||
U32 const Q = cSrcSize >= dstSize ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
||||
U32 const D256 = (U32)(dstSize >> 8);
|
||||
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
|
||||
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
||||
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
|
||||
|
||||
return DTime1 < DTime0;
|
||||
}
|
||||
{ U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
||||
U32 const D256 = (U32)(dstSize >> 8);
|
||||
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
|
||||
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
||||
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
|
||||
return DTime1 < DTime0;
|
||||
} }
|
||||
|
||||
|
||||
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
|
@ -994,3 +1055,42 @@ size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
|
|||
return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
|
||||
workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
|
||||
HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) cSrc;
|
||||
|
||||
size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
|
||||
if (HUF_isError(hSize)) return hSize;
|
||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||
ip += hSize; cSrcSize -= hSize;
|
||||
|
||||
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
|
||||
{
|
||||
DTableDesc const dtd = HUF_getDTableDesc(DTable);
|
||||
return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
|
||||
HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if (cSrcSize == 0) return ERROR(corruption_detected);
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
|
||||
HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -45,11 +45,11 @@ extern "C" {
|
|||
Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
|
||||
Compression can be done in:
|
||||
- a single step (described as Simple API)
|
||||
- a single step, reusing a context (described as Explicit memory management)
|
||||
- a single step, reusing a context (described as Explicit context)
|
||||
- unbounded multiple steps (described as Streaming compression)
|
||||
The compression ratio achievable on small data can be highly improved using a dictionary in:
|
||||
- a single step (described as Simple dictionary API)
|
||||
- a single step, reusing a dictionary (described as Fast dictionary API)
|
||||
- a single step, reusing a dictionary (described as Bulk-processing dictionary API)
|
||||
|
||||
Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
|
||||
Advanced experimental APIs shall never be used with a dynamic library.
|
||||
|
@ -59,7 +59,7 @@ extern "C" {
|
|||
/*------ Version ------*/
|
||||
#define ZSTD_VERSION_MAJOR 1
|
||||
#define ZSTD_VERSION_MINOR 3
|
||||
#define ZSTD_VERSION_RELEASE 3
|
||||
#define ZSTD_VERSION_RELEASE 4
|
||||
|
||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */
|
||||
|
@ -68,7 +68,7 @@ ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll versio
|
|||
#define ZSTD_QUOTE(str) #str
|
||||
#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
|
||||
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
|
||||
ZSTDLIB_API const char* ZSTD_versionString(void); /* v1.3.0 */
|
||||
ZSTDLIB_API const char* ZSTD_versionString(void); /* added in v1.3.0 */
|
||||
|
||||
|
||||
/***************************************
|
||||
|
@ -92,7 +92,7 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
|
|||
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/*! ZSTD_getFrameContentSize() : v1.3.0
|
||||
/*! ZSTD_getFrameContentSize() : added in v1.3.0
|
||||
* `src` should point to the start of a ZSTD encoded frame.
|
||||
* `srcSize` must be at least as large as the frame header.
|
||||
* hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
|
||||
|
@ -120,26 +120,24 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t
|
|||
|
||||
/*! ZSTD_getDecompressedSize() :
|
||||
* NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
|
||||
* Both functions work the same way,
|
||||
* but ZSTD_getDecompressedSize() blends
|
||||
* "empty", "unknown" and "error" results in the same return value (0),
|
||||
* while ZSTD_getFrameContentSize() distinguishes them.
|
||||
*
|
||||
* 'src' is the start of a zstd compressed frame.
|
||||
* @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
|
||||
* Both functions work the same way, but ZSTD_getDecompressedSize() blends
|
||||
* "empty", "unknown" and "error" results to the same return value (0),
|
||||
* while ZSTD_getFrameContentSize() gives them separate return values.
|
||||
* `src` is the start of a zstd compressed frame.
|
||||
* @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
|
||||
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
|
||||
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case scenario */
|
||||
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
|
||||
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
|
||||
ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
|
||||
|
||||
|
||||
/***************************************
|
||||
* Explicit memory management
|
||||
* Explicit context
|
||||
***************************************/
|
||||
/*= Compression context
|
||||
* When compressing many times,
|
||||
|
@ -345,7 +343,7 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
|
|||
* *******************************************************************************/
|
||||
|
||||
typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
|
||||
/* Continue to distinguish them for compatibility with versions <= v1.2.0 */
|
||||
/* For compatibility with versions <= v1.2.0, continue to consider them separated. */
|
||||
/*===== ZSTD_DStream management functions =====*/
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
|
||||
|
@ -375,23 +373,24 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output
|
|||
/* --- Constants ---*/
|
||||
#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
|
||||
#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
|
||||
#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* v0.7+ */
|
||||
#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* >= v0.7.0 */
|
||||
|
||||
#define ZSTD_WINDOWLOG_MAX_32 30
|
||||
#define ZSTD_WINDOWLOG_MAX_64 31
|
||||
#define ZSTD_WINDOWLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
|
||||
#define ZSTD_WINDOWLOG_MIN 10
|
||||
#define ZSTD_HASHLOG_MAX MIN(ZSTD_WINDOWLOG_MAX, 30)
|
||||
#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
|
||||
#define ZSTD_HASHLOG_MIN 6
|
||||
#define ZSTD_CHAINLOG_MAX MIN(ZSTD_WINDOWLOG_MAX+1, 30)
|
||||
#define ZSTD_CHAINLOG_MAX_32 29
|
||||
#define ZSTD_CHAINLOG_MAX_64 30
|
||||
#define ZSTD_CHAINLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
|
||||
#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
|
||||
#define ZSTD_HASHLOG3_MAX 17
|
||||
#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
|
||||
#define ZSTD_SEARCHLOG_MIN 1
|
||||
#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
|
||||
#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */
|
||||
#define ZSTD_TARGETLENGTH_MIN 4 /* only useful for btopt */
|
||||
#define ZSTD_TARGETLENGTH_MAX 999 /* only useful for btopt */
|
||||
#define ZSTD_TARGETLENGTH_MIN 1 /* only used by btopt, btultra and btfast */
|
||||
#define ZSTD_LDM_MINMATCH_MIN 4
|
||||
#define ZSTD_LDM_MINMATCH_MAX 4096
|
||||
#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
|
||||
|
@ -432,12 +431,17 @@ typedef struct {
|
|||
|
||||
typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
|
||||
|
||||
/*--- Custom memory allocation functions ---*/
|
||||
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
|
||||
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
|
||||
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
|
||||
/* use this constant to defer to stdlib's functions */
|
||||
static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };
|
||||
typedef enum {
|
||||
ZSTD_dct_auto=0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
|
||||
ZSTD_dct_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
|
||||
ZSTD_dct_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */
|
||||
} ZSTD_dictContentType_e;
|
||||
|
||||
typedef enum {
|
||||
ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
|
||||
ZSTD_dlm_byRef, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
|
||||
} ZSTD_dictLoadMethod_e;
|
||||
|
||||
|
||||
|
||||
/***************************************
|
||||
|
@ -483,12 +487,12 @@ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
|
|||
|
||||
|
||||
/***************************************
|
||||
* Context memory usage
|
||||
* Memory management
|
||||
***************************************/
|
||||
|
||||
/*! ZSTD_sizeof_*() :
|
||||
* These functions give the current memory usage of selected object.
|
||||
* Object memory usage can evolve when re-used multiple times. */
|
||||
* Object memory usage can evolve when re-used. */
|
||||
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
|
||||
ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
|
||||
ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
|
||||
|
@ -503,8 +507,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||
* It will also consider src size to be arbitrarily "large", which is worst case.
|
||||
* If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
|
||||
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is > 1.
|
||||
* Note : CCtx estimation is only correct for single-threaded compression */
|
||||
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
|
||||
* Note : CCtx size estimation is only correct for single-threaded compression. */
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||
|
@ -515,8 +519,8 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
|
|||
* It will also consider src size to be arbitrarily "large", which is worst case.
|
||||
* If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
|
||||
* ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
|
||||
* ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is set to a value > 1.
|
||||
* Note : CStream estimation is only correct for single-threaded compression.
|
||||
* ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
|
||||
* Note : CStream size estimation is only correct for single-threaded compression.
|
||||
* ZSTD_DStream memory budget depends on window Size.
|
||||
* This information can be passed manually, using ZSTD_estimateDStreamSize,
|
||||
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
|
||||
|
@ -529,46 +533,86 @@ ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_para
|
|||
ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
|
||||
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
|
||||
|
||||
typedef enum {
|
||||
ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
|
||||
ZSTD_dlm_byRef, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
|
||||
} ZSTD_dictLoadMethod_e;
|
||||
|
||||
/*! ZSTD_estimate?DictSize() :
|
||||
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
|
||||
* ZSTD_estimateCStreamSize_advanced_usingCParams() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
|
||||
* Note : dictionary created by reference using ZSTD_dlm_byRef are smaller
|
||||
* ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
|
||||
* Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
|
||||
ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
|
||||
|
||||
/*! ZSTD_initStatic*() :
|
||||
* Initialize an object using a pre-allocated fixed-size buffer.
|
||||
* workspace: The memory area to emplace the object into.
|
||||
* Provided pointer *must be 8-bytes aligned*.
|
||||
* Buffer must outlive object.
|
||||
* workspaceSize: Use ZSTD_estimate*Size() to determine
|
||||
* how large workspace must be to support target scenario.
|
||||
* @return : pointer to object (same address as workspace, just different type),
|
||||
* or NULL if error (size too small, incorrect alignment, etc.)
|
||||
* Note : zstd will never resize nor malloc() when using a static buffer.
|
||||
* If the object requires more memory than available,
|
||||
* zstd will just error out (typically ZSTD_error_memory_allocation).
|
||||
* Note 2 : there is no corresponding "free" function.
|
||||
* Since workspace is allocated externally, it must be freed externally too.
|
||||
* Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
|
||||
* into its associated cParams.
|
||||
* Limitation 1 : currently not compatible with internal dictionary creation, triggered by
|
||||
* ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
|
||||
* Limitation 2 : static cctx currently not compatible with multi-threading.
|
||||
* Limitation 3 : static dctx is incompatible with legacy support.
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
|
||||
|
||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
|
||||
|
||||
ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
|
||||
void* workspace, size_t workspaceSize,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
ZSTD_compressionParameters cParams);
|
||||
|
||||
ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
|
||||
void* workspace, size_t workspaceSize,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType);
|
||||
|
||||
/*! Custom memory allocation :
|
||||
* These prototypes make it possible to pass your own allocation/free functions.
|
||||
* ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
|
||||
* All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
|
||||
*/
|
||||
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
|
||||
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
|
||||
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
|
||||
static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
||||
|
||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
|
||||
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
ZSTD_compressionParameters cParams,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
|
||||
|
||||
/***************************************
|
||||
* Advanced compression functions
|
||||
***************************************/
|
||||
/*! ZSTD_createCCtx_advanced() :
|
||||
* Create a ZSTD compression context using external alloc and free functions */
|
||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
/*! ZSTD_initStaticCCtx() : initialize a fixed-size zstd compression context
|
||||
* workspace: The memory area to emplace the context into.
|
||||
* Provided pointer must 8-bytes aligned.
|
||||
* It must outlive context usage.
|
||||
* workspaceSize: Use ZSTD_estimateCCtxSize() or ZSTD_estimateCStreamSize()
|
||||
* to determine how large workspace must be to support scenario.
|
||||
* @return : pointer to ZSTD_CCtx* (same address as workspace, but different type),
|
||||
* or NULL if error (typically size too small)
|
||||
* Note : zstd will never resize nor malloc() when using a static cctx.
|
||||
* If it needs more memory than available, it will simply error out.
|
||||
* Note 2 : there is no corresponding "free" function.
|
||||
* Since workspace was allocated externally, it must be freed externally too.
|
||||
* Limitation 1 : currently not compatible with internal CDict creation, such as
|
||||
* ZSTD_CCtx_loadDictionary() or ZSTD_initCStream_usingDict().
|
||||
* Limitation 2 : currently not compatible with multi-threading
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
|
||||
|
||||
|
||||
/*! ZSTD_createCDict_byReference() :
|
||||
* Create a digested dictionary for compression
|
||||
|
@ -576,38 +620,6 @@ ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize
|
|||
* It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
|
||||
|
||||
typedef enum { ZSTD_dm_auto=0, /* dictionary is "full" if it starts with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
|
||||
ZSTD_dm_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
|
||||
ZSTD_dm_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */
|
||||
} ZSTD_dictMode_e;
|
||||
/*! ZSTD_createCDict_advanced() :
|
||||
* Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictMode_e dictMode,
|
||||
ZSTD_compressionParameters cParams,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
/*! ZSTD_initStaticCDict() :
|
||||
* Generate a digested dictionary in provided memory area.
|
||||
* workspace: The memory area to emplace the dictionary into.
|
||||
* Provided pointer must 8-bytes aligned.
|
||||
* It must outlive dictionary usage.
|
||||
* workspaceSize: Use ZSTD_estimateCDictSize()
|
||||
* to determine how large workspace must be.
|
||||
* cParams : use ZSTD_getCParams() to transform a compression level
|
||||
* into its relevants cParams.
|
||||
* @return : pointer to ZSTD_CDict* (same address as workspace, but different type),
|
||||
* or NULL if error (typically, size too small).
|
||||
* Note : there is no corresponding "free" function.
|
||||
* Since workspace was allocated externally, it must be freed externally.
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_CDict* ZSTD_initStaticCDict(
|
||||
void* workspace, size_t workspaceSize,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode,
|
||||
ZSTD_compressionParameters cParams);
|
||||
|
||||
/*! ZSTD_getCParams() :
|
||||
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
|
||||
* `estimatedSrcSize` value is optional, select 0 if not known */
|
||||
|
@ -652,28 +664,6 @@ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
|
|||
* Note 3 : Skippable Frame Identifiers are considered valid. */
|
||||
ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
|
||||
|
||||
/*! ZSTD_createDCtx_advanced() :
|
||||
* Create a ZSTD decompression context using external alloc and free functions */
|
||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
/*! ZSTD_initStaticDCtx() : initialize a fixed-size zstd decompression context
|
||||
* workspace: The memory area to emplace the context into.
|
||||
* Provided pointer must 8-bytes aligned.
|
||||
* It must outlive context usage.
|
||||
* workspaceSize: Use ZSTD_estimateDCtxSize() or ZSTD_estimateDStreamSize()
|
||||
* to determine how large workspace must be to support scenario.
|
||||
* @return : pointer to ZSTD_DCtx* (same address as workspace, but different type),
|
||||
* or NULL if error (typically size too small)
|
||||
* Note : zstd will never resize nor malloc() when using a static dctx.
|
||||
* If it needs more memory than available, it will simply error out.
|
||||
* Note 2 : static dctx is incompatible with legacy support
|
||||
* Note 3 : there is no corresponding "free" function.
|
||||
* Since workspace was allocated externally, it must be freed externally.
|
||||
* Limitation : currently not compatible with internal DDict creation,
|
||||
* such as ZSTD_initDStream_usingDict().
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
|
||||
|
||||
/*! ZSTD_createDDict_byReference() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* Dictionary content is referenced, and therefore stays in dictBuffer.
|
||||
|
@ -681,26 +671,6 @@ ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize
|
|||
* it must remain read accessible throughout the lifetime of DDict */
|
||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
|
||||
|
||||
/*! ZSTD_createDDict_advanced() :
|
||||
* Create a ZSTD_DDict using external alloc and free, optionally by reference */
|
||||
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
/*! ZSTD_initStaticDDict() :
|
||||
* Generate a digested dictionary in provided memory area.
|
||||
* workspace: The memory area to emplace the dictionary into.
|
||||
* Provided pointer must 8-bytes aligned.
|
||||
* It must outlive dictionary usage.
|
||||
* workspaceSize: Use ZSTD_estimateDDictSize()
|
||||
* to determine how large workspace must be.
|
||||
* @return : pointer to ZSTD_DDict*, or NULL if error (size too small)
|
||||
* Note : there is no corresponding "free" function.
|
||||
* Since workspace was allocated externally, it must be freed externally.
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod);
|
||||
|
||||
/*! ZSTD_getDictID_fromDict() :
|
||||
* Provides the dictID stored within dictionary.
|
||||
|
@ -732,8 +702,6 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
|
|||
********************************************************************/
|
||||
|
||||
/*===== Advanced Streaming compression functions =====*/
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
|
||||
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
|
||||
|
@ -748,14 +716,28 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const
|
|||
* If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
|
||||
* If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
|
||||
* For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
|
||||
* but it may change to mean "empty" in some future version, so prefer using macro ZSTD_CONTENTSIZE_UNKNOWN.
|
||||
* but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
|
||||
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
||||
ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
typedef struct {
|
||||
unsigned long long ingested;
|
||||
unsigned long long consumed;
|
||||
unsigned long long produced;
|
||||
} ZSTD_frameProgression;
|
||||
|
||||
/* ZSTD_getFrameProgression():
|
||||
* tells how much data has been ingested (read from input)
|
||||
* consumed (input actually compressed) and produced (output) for current frame.
|
||||
* Therefore, (ingested - consumed) is amount of input data buffered internally, not yet compressed.
|
||||
* Can report progression inside worker threads (multi-threading and non-blocking mode).
|
||||
*/
|
||||
ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
|
||||
|
||||
|
||||
|
||||
/*===== Advanced Streaming decompression functions =====*/
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
|
||||
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
|
||||
typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
|
||||
ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); /* obsolete : this API will be removed in a future version */
|
||||
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
|
||||
|
@ -924,10 +906,8 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
|||
* and then applied on all subsequent compression jobs.
|
||||
* When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
|
||||
*
|
||||
* This API is intended to replace all others experimental API.
|
||||
* It can basically do all other use cases, and even new ones.
|
||||
* In constrast with _advanced() variants, it stands a reasonable chance to become "stable",
|
||||
* after a good testing period.
|
||||
* This API is intended to replace all others advanced / experimental API entry points.
|
||||
* But it stands a reasonable chance to become "stable", after a reasonable testing period.
|
||||
*/
|
||||
|
||||
/* note on naming convention :
|
||||
|
@ -944,12 +924,12 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
|||
* All enum will be pinned to explicit values before reaching "stable API" status */
|
||||
|
||||
typedef enum {
|
||||
/* Question : should we have a format ZSTD_f_auto ?
|
||||
* For the time being, it would mean exactly the same as ZSTD_f_zstd1.
|
||||
* But, in the future, should several formats be supported,
|
||||
/* Opened question : should we have a format ZSTD_f_auto ?
|
||||
* Today, it would mean exactly the same as ZSTD_f_zstd1.
|
||||
* But, in the future, should several formats become supported,
|
||||
* on the compression side, it would mean "default format".
|
||||
* On the decompression side, it would mean "multi format",
|
||||
* and ZSTD_f_zstd1 could be reserved to mean "accept *only* zstd frames".
|
||||
* On the decompression side, it would mean "automatic format detection",
|
||||
* so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
|
||||
* Since meaning is a little different, another option could be to define different enums for compression and decompression.
|
||||
* This question could be kept for later, when there are actually multiple formats to support,
|
||||
* but there is also the question of pinning enum values, and pinning value `0` is especially important */
|
||||
|
@ -967,42 +947,76 @@ typedef enum {
|
|||
/* compression parameters */
|
||||
ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
|
||||
* Default level is ZSTD_CLEVEL_DEFAULT==3.
|
||||
* Special: value 0 means "do not change cLevel". */
|
||||
* Special: value 0 means "do not change cLevel".
|
||||
* Note 1 : it's possible to pass a negative compression level by casting it to unsigned type.
|
||||
* Note 2 : setting a level sets all default values of other compression parameters.
|
||||
* Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */
|
||||
ZSTD_p_windowLog, /* Maximum allowed back-reference distance, expressed as power of 2.
|
||||
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
|
||||
* Special: value 0 means "do not change windowLog".
|
||||
* Special: value 0 means "use default windowLog".
|
||||
* Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
|
||||
* requires setting the maximum window size at least as large during decompression. */
|
||||
* requires explicitly allowing such window size during decompression stage. */
|
||||
ZSTD_p_hashLog, /* Size of the probe table, as a power of 2.
|
||||
* Resulting table size is (1 << (hashLog+2)).
|
||||
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
|
||||
* Larger tables improve compression ratio of strategies <= dFast,
|
||||
* and improve speed of strategies > dFast.
|
||||
* Special: value 0 means "do not change hashLog". */
|
||||
* Special: value 0 means "use default hashLog". */
|
||||
ZSTD_p_chainLog, /* Size of the full-search table, as a power of 2.
|
||||
* Resulting table size is (1 << (chainLog+2)).
|
||||
* Larger tables result in better and slower compression.
|
||||
* This parameter is useless when using "fast" strategy.
|
||||
* Special: value 0 means "do not change chainLog". */
|
||||
* Special: value 0 means "use default chainLog". */
|
||||
ZSTD_p_searchLog, /* Number of search attempts, as a power of 2.
|
||||
* More attempts result in better and slower compression.
|
||||
* This parameter is useless when using "fast" and "dFast" strategies.
|
||||
* Special: value 0 means "do not change searchLog". */
|
||||
* Special: value 0 means "use default searchLog". */
|
||||
ZSTD_p_minMatch, /* Minimum size of searched matches (note : repCode matches can be smaller).
|
||||
* Larger values make faster compression and decompression, but decrease ratio.
|
||||
* Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
|
||||
* Note that currently, for all strategies < btopt, effective minimum is 4.
|
||||
* Note that currently, for all strategies > fast, effective maximum is 6.
|
||||
* Special: value 0 means "do not change minMatchLength". */
|
||||
ZSTD_p_targetLength, /* Only useful for strategies >= btopt.
|
||||
* Length of Match considered "good enough" to stop search.
|
||||
* Larger values make compression stronger and slower.
|
||||
* Special: value 0 means "do not change targetLength". */
|
||||
* , for all strategies > fast, effective maximum is 6.
|
||||
* Special: value 0 means "use default minMatchLength". */
|
||||
ZSTD_p_targetLength, /* Impact of this field depends on strategy.
|
||||
* For strategies btopt & btultra:
|
||||
* Length of Match considered "good enough" to stop search.
|
||||
* Larger values make compression stronger, and slower.
|
||||
* For strategy fast:
|
||||
* Distance between match sampling.
|
||||
* Larger values make compression faster, and weaker.
|
||||
* Special: value 0 means "use default targetLength". */
|
||||
ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
|
||||
* Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
|
||||
* The higher the value of selected strategy, the more complex it is,
|
||||
* resulting in stronger and slower compression.
|
||||
* Special: value 0 means "do not change strategy". */
|
||||
* Special: value 0 means "use default strategy". */
|
||||
|
||||
ZSTD_p_enableLongDistanceMatching=160, /* Enable long distance matching.
|
||||
* This parameter is designed to improve compression ratio
|
||||
* for large inputs, by finding large matches at long distance.
|
||||
* It increases memory usage and window size.
|
||||
* Note: enabling this parameter increases ZSTD_p_windowLog to 128 MB
|
||||
* except when expressly set to a different value. */
|
||||
ZSTD_p_ldmHashLog, /* Size of the table for long distance matching, as a power of 2.
|
||||
* Larger values increase memory usage and compression ratio,
|
||||
* but decrease compression speed.
|
||||
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
|
||||
* default: windowlog - 7.
|
||||
* Special: value 0 means "automatically determine hashlog". */
|
||||
ZSTD_p_ldmMinMatch, /* Minimum match size for long distance matcher.
|
||||
* Larger/too small values usually decrease compression ratio.
|
||||
* Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
|
||||
* Special: value 0 means "use default value" (default: 64). */
|
||||
ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
|
||||
* Larger values improve collision resolution but decrease compression speed.
|
||||
* The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX .
|
||||
* Special: value 0 means "use default value" (default: 3). */
|
||||
ZSTD_p_ldmHashEveryLog, /* Frequency of inserting/looking up entries in the LDM hash table.
|
||||
* Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
|
||||
* Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
|
||||
* Larger values improve compression speed.
|
||||
* Deviating far from default value will likely result in a compression ratio decrease.
|
||||
* Special: value 0 means "automatically determine hashEveryLog". */
|
||||
|
||||
/* frame parameters */
|
||||
ZSTD_p_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
|
||||
|
@ -1012,58 +1026,45 @@ typedef enum {
|
|||
ZSTD_p_dictIDFlag, /* When applicable, dictionary's ID is written into frame header (default:1) */
|
||||
|
||||
/* multi-threading parameters */
|
||||
ZSTD_p_nbThreads=400, /* Select how many threads a compression job can spawn (default:1)
|
||||
* More threads improve speed, but also increase memory usage.
|
||||
* Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled.
|
||||
* Special: value 0 means "do not change nbThreads" */
|
||||
ZSTD_p_jobSize, /* Size of a compression job. This value is only enforced in streaming (non-blocking) mode.
|
||||
* Each compression job is completed in parallel, so indirectly controls the nb of active threads.
|
||||
/* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD).
|
||||
* They return an error otherwise. */
|
||||
ZSTD_p_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
|
||||
* When nbWorkers >= 1, triggers asynchronous mode :
|
||||
* ZSTD_compress_generic() consumes some input, flush some output if possible, and immediately gives back control to caller,
|
||||
* while compression work is performed in parallel, within worker threads.
|
||||
* (note : a strong exception to this rule is when first invocation sets ZSTD_e_end : it becomes a blocking call).
|
||||
* More workers improve speed, but also increase memory usage.
|
||||
* Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
|
||||
ZSTD_p_jobSize, /* Size of a compression job. This value is enforced only in non-blocking mode.
|
||||
* Each compression job is completed in parallel, so this value indirectly controls the nb of active threads.
|
||||
* 0 means default, which is dynamically determined based on compression parameters.
|
||||
* Job size must be a minimum of overlapSize, or 1 KB, whichever is largest
|
||||
* Job size must be a minimum of overlapSize, or 1 MB, whichever is largest.
|
||||
* The minimum size is automatically and transparently enforced */
|
||||
ZSTD_p_overlapSizeLog, /* Size of previous input reloaded at the beginning of each job.
|
||||
* 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
|
||||
|
||||
/* advanced parameters - may not remain available after API update */
|
||||
/* =================================================================== */
|
||||
/* experimental parameters - no stability guaranteed */
|
||||
/* =================================================================== */
|
||||
|
||||
ZSTD_p_compressLiterals=1000, /* control huffman compression of literals (enabled) by default.
|
||||
* disabling it improves speed and decreases compression ratio by a large amount.
|
||||
* note : this setting is automatically updated when changing compression level.
|
||||
* positive compression levels set ZSTD_p_compressLiterals to 1.
|
||||
* negative compression levels set ZSTD_p_compressLiterals to 0. */
|
||||
|
||||
ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
|
||||
* even when referencing into Dictionary content (default:0) */
|
||||
ZSTD_p_enableLongDistanceMatching=1200, /* Enable long distance matching.
|
||||
* This parameter is designed to improve the compression
|
||||
* ratio for large inputs with long distance matches.
|
||||
* This increases the memory usage as well as window size.
|
||||
* Note: setting this parameter sets all the LDM parameters
|
||||
* as well as ZSTD_p_windowLog. It should be set after
|
||||
* ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
|
||||
* other LDM parameters. Setting the compression level
|
||||
* after this parameter overrides the window log, though LDM
|
||||
* will remain enabled until explicitly disabled. */
|
||||
ZSTD_p_ldmHashLog, /* Size of the table for long distance matching, as a power of 2.
|
||||
* Larger values increase memory usage and compression ratio, but decrease
|
||||
* compression speed.
|
||||
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
|
||||
* (default: windowlog - 7). */
|
||||
ZSTD_p_ldmMinMatch, /* Minimum size of searched matches for long distance matcher.
|
||||
* Larger/too small values usually decrease compression ratio.
|
||||
* Must be clamped between ZSTD_LDM_MINMATCH_MIN
|
||||
* and ZSTD_LDM_MINMATCH_MAX (default: 64). */
|
||||
ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
|
||||
* Larger values usually improve collision resolution but may decrease
|
||||
* compression speed.
|
||||
* The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
|
||||
ZSTD_p_ldmHashEveryLog, /* Frequency of inserting/looking up entries in the LDM hash table.
|
||||
* The default is MAX(0, (windowLog - ldmHashLog)) to
|
||||
* optimize hash table usage.
|
||||
* Larger values improve compression speed. Deviating far from the
|
||||
* default value will likely result in a decrease in compression ratio.
|
||||
* Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
|
||||
|
||||
} ZSTD_cParameter;
|
||||
|
||||
|
||||
/*! ZSTD_CCtx_setParameter() :
|
||||
* Set one compression parameter, selected by enum ZSTD_cParameter.
|
||||
* Setting a parameter is generally only possible during frame initialization (before starting compression),
|
||||
* except for a few exceptions which can be updated during compression: compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
|
||||
* Note : when `value` is an enum, cast it to unsigned for proper type checking.
|
||||
* @result : informational value (typically, the one being set, possibly corrected),
|
||||
* @result : informational value (typically, value being set clamped correctly),
|
||||
* or an error code (which can be tested with ZSTD_isError()). */
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
|
||||
|
||||
|
@ -1079,26 +1080,24 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param
|
|||
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
|
||||
|
||||
/*! ZSTD_CCtx_loadDictionary() :
|
||||
* Create an internal CDict from dict buffer.
|
||||
* Decompression will have to use same buffer.
|
||||
* Create an internal CDict from `dict` buffer.
|
||||
* Decompression will have to use same dictionary.
|
||||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
|
||||
* meaning "return to no-dictionary mode".
|
||||
* Note 1 : `dict` content will be copied internally. Use
|
||||
* ZSTD_CCtx_loadDictionary_byReference() to reference dictionary
|
||||
* content instead. The dictionary buffer must then outlive its
|
||||
* users.
|
||||
* Special: Adding a NULL (or 0-size) dictionary invalidates previous dictionary,
|
||||
* meaning "return to no-dictionary mode".
|
||||
* Note 1 : Dictionary will be used for all future compression jobs.
|
||||
* To return to "no-dictionary" situation, load a NULL dictionary
|
||||
* Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
|
||||
* For this reason, compression parameters cannot be changed anymore after loading a dictionary.
|
||||
* It's also a CPU-heavy operation, with non-negligible impact on latency.
|
||||
* Note 3 : Dictionary will be used for all future compression jobs.
|
||||
* To return to "no-dictionary" situation, load a NULL dictionary
|
||||
* Note 5 : Use ZSTD_CCtx_loadDictionary_advanced() to select how dictionary
|
||||
* content will be interpreted.
|
||||
*/
|
||||
* It's also a CPU consuming operation, with non-negligible impact on latency.
|
||||
* Note 3 :`dict` content will be copied internally.
|
||||
* Use ZSTD_CCtx_loadDictionary_byReference() to reference dictionary content instead.
|
||||
* In such a case, dictionary buffer must outlive its users.
|
||||
* Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
|
||||
* to precisely select how dictionary content must be interpreted. */
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode);
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
||||
|
||||
|
||||
/*! ZSTD_CCtx_refCDict() :
|
||||
|
@ -1110,8 +1109,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void
|
|||
* Special : adding a NULL CDict means "return to no-dictionary mode".
|
||||
* Note 1 : Currently, only one dictionary can be managed.
|
||||
* Adding a new dictionary effectively "discards" any previous one.
|
||||
* Note 2 : CDict is just referenced, its lifetime must outlive CCtx.
|
||||
*/
|
||||
* Note 2 : CDict is just referenced, its lifetime must outlive CCtx. */
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
|
||||
|
||||
/*! ZSTD_CCtx_refPrefix() :
|
||||
|
@ -1121,20 +1119,29 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
|
|||
* Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
|
||||
* If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
|
||||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||
* Special : Adding any prefix (including NULL) invalidates any previous prefix or dictionary
|
||||
* Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
|
||||
* Note 1 : Prefix buffer is referenced. It must outlive compression job.
|
||||
* Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
|
||||
* It's a CPU-heavy operation, with non-negligible impact on latency.
|
||||
* Note 3 : By default, the prefix is treated as raw content
|
||||
* (ZSTD_dm_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter
|
||||
* dictMode. */
|
||||
* It's a CPU consuming operation, with non-negligible impact on latency.
|
||||
* Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
|
||||
* Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode);
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
||||
|
||||
/*! ZSTD_CCtx_reset() :
|
||||
* Return a CCtx to clean state.
|
||||
* Useful after an error, or to interrupt an ongoing compression job and start a new one.
|
||||
* Any internal data not yet flushed is cancelled.
|
||||
* Dictionary (if any) is dropped.
|
||||
* All parameters are back to default values.
|
||||
* It's possible to modify compression parameters after a reset.
|
||||
*/
|
||||
ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);
|
||||
|
||||
|
||||
|
||||
typedef enum {
|
||||
ZSTD_e_continue=0, /* collect more data, encoder transparently decides when to output result, for optimal conditions */
|
||||
ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal conditions */
|
||||
ZSTD_e_flush, /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
|
||||
ZSTD_e_end /* flush any remaining data and close current frame. Any additional data starts a new frame. */
|
||||
} ZSTD_EndDirective;
|
||||
|
@ -1150,10 +1157,11 @@ typedef enum {
|
|||
* and then immediately returns, just indicating that there is some data remaining to be flushed.
|
||||
* The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
|
||||
* - Exception : in multi-threading mode, if the first call requests a ZSTD_e_end directive, it is blocking : it will complete compression before giving back control to caller.
|
||||
* - @return provides the minimum amount of data remaining to be flushed from internal buffers
|
||||
* - @return provides a minimum amount of data remaining to be flushed from internal buffers
|
||||
* or an error code, which can be tested using ZSTD_isError().
|
||||
* if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
|
||||
* This is useful to determine if a ZSTD_e_flush or ZSTD_e_end directive is completed.
|
||||
* This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
|
||||
* For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
|
||||
* - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
|
||||
* only ZSTD_e_end or ZSTD_e_flush operations are allowed.
|
||||
* Before starting a new compression job, or changing compression parameters,
|
||||
|
@ -1164,16 +1172,6 @@ ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
|
|||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp);
|
||||
|
||||
/*! ZSTD_CCtx_reset() :
|
||||
* Return a CCtx to clean state.
|
||||
* Useful after an error, or to interrupt an ongoing compression job and start a new one.
|
||||
* Any internal data not yet flushed is cancelled.
|
||||
* Dictionary (if any) is dropped.
|
||||
* All parameters are back to default values.
|
||||
* It's possible to modify compression parameters after a reset.
|
||||
*/
|
||||
ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); /* Not ready yet ! */
|
||||
|
||||
|
||||
/*! ZSTD_compress_generic_simpleArgs() :
|
||||
* Same as ZSTD_compress_generic(),
|
||||
|
@ -1207,25 +1205,26 @@ ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs (
|
|||
* for static allocation for single-threaded compression.
|
||||
*/
|
||||
ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
|
||||
ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
|
||||
|
||||
/*! ZSTD_resetCCtxParams() :
|
||||
* Reset params to default, with the default compression level.
|
||||
|
||||
/*! ZSTD_CCtxParams_reset() :
|
||||
* Reset params to default values.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params);
|
||||
ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
|
||||
|
||||
/*! ZSTD_initCCtxParams() :
|
||||
/*! ZSTD_CCtxParams_init() :
|
||||
* Initializes the compression parameters of cctxParams according to
|
||||
* compression level. All other parameters are reset to their default values.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
|
||||
|
||||
/*! ZSTD_initCCtxParams_advanced() :
|
||||
/*! ZSTD_CCtxParams_init_advanced() :
|
||||
* Initializes the compression and frame parameters of cctxParams according to
|
||||
* params. All other parameters are reset to their default values.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
|
||||
ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
|
||||
|
||||
/*! ZSTD_CCtxParam_setParameter() :
|
||||
* Similar to ZSTD_CCtx_setParameter.
|
||||
|
@ -1238,9 +1237,10 @@ ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cP
|
|||
|
||||
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
|
||||
* Apply a set of ZSTD_CCtx_params to the compression context.
|
||||
* This must be done before the dictionary is loaded.
|
||||
* The pledgedSrcSize is treated as unknown.
|
||||
* Multithreading parameters are applied only if nbThreads > 1.
|
||||
* This can be done even after compression is started,
|
||||
* if nbWorkers==0, this will have no impact until a new compression is started.
|
||||
* if nbWorkers>=1, new parameters will be picked up at next job,
|
||||
* with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
||||
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
|
||||
|
@ -1267,9 +1267,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
|
|||
* Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
|
||||
* how dictionary content will be interpreted and loaded.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
|
||||
|
||||
|
||||
/*! ZSTD_DCtx_refDDict() :
|
||||
|
@ -1281,7 +1281,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void
|
|||
* Special : adding a NULL DDict means "return to no-dictionary mode".
|
||||
* Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
|
||||
|
||||
|
||||
/*! ZSTD_DCtx_refPrefix() :
|
||||
|
@ -1295,8 +1295,8 @@ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
|
|||
* Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
|
||||
* Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode); /* not implemented */
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize);
|
||||
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
|
||||
|
||||
|
||||
/*! ZSTD_DCtx_setMaxWindowSize() :
|
||||
|
@ -1389,7 +1389,7 @@ ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
|
|||
ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
|
||||
ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression */
|
||||
ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
|
||||
|
||||
|
||||
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
|
||||
|
|
Loading…
Reference in New Issue