2017-10-26 20:41:47 +00:00
/*
2023-05-22 12:32:14 +00:00
* Copyright ( c ) Meta Platforms , Inc . and affiliates .
2017-10-26 20:41:47 +00:00
* All rights reserved .
*
* This source code is licensed under both the BSD - style license ( found in the
* LICENSE file in the root directory of this source tree ) and the GPLv2 ( found
* in the COPYING file in the root directory of this source tree ) .
* You may select , at your option , one of the above - listed licenses .
*/
2018-01-13 12:50:59 +00:00
# include "zstd_compress_internal.h"
2019-01-04 00:30:03 +00:00
# include "hist.h"
2017-10-26 20:41:47 +00:00
# include "zstd_opt.h"
2019-01-04 00:30:03 +00:00
# define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
2018-01-13 12:50:59 +00:00
# define ZSTD_MAX_PRICE (1<<30)
2023-05-22 12:32:14 +00:00
# define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
2019-01-04 00:30:03 +00:00
2017-10-26 20:41:47 +00:00
/*-*************************************
* Price functions for optimal parser
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-01-04 00:30:03 +00:00
2022-01-24 10:04:45 +00:00
#if 0 /* approximation at bit level (for tests) */
2019-01-04 00:30:03 +00:00
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
2023-05-22 12:32:14 +00:00
# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat))
2022-01-24 10:04:45 +00:00
# elif 0 /* fractional bit accuracy (for tests) */
2019-01-04 00:30:03 +00:00
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
2023-05-22 12:32:14 +00:00
# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat))
2019-01-04 00:30:03 +00:00
# else /* opt==approx, ultra==accurate */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
2023-05-22 12:32:14 +00:00
# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
2019-01-04 00:30:03 +00:00
# endif
2023-05-22 12:32:14 +00:00
/* ZSTD_bitWeight() :
* provide estimated " cost " of a stat in full bits only */
2019-01-04 00:30:03 +00:00
MEM_STATIC U32 ZSTD_bitWeight ( U32 stat )
2017-10-26 20:41:47 +00:00
{
2019-01-04 00:30:03 +00:00
return ( ZSTD_highbit32 ( stat + 1 ) * BITCOST_MULTIPLIER ) ;
2017-10-26 20:41:47 +00:00
}
2023-05-22 12:32:14 +00:00
/* ZSTD_fracWeight() :
* provide fractional - bit " cost " of a stat ,
* using linear interpolation approximation */
2019-01-04 00:30:03 +00:00
MEM_STATIC U32 ZSTD_fracWeight ( U32 rawStat )
{
U32 const stat = rawStat + 1 ;
U32 const hb = ZSTD_highbit32 ( stat ) ;
U32 const BWeight = hb * BITCOST_MULTIPLIER ;
2023-05-22 12:32:14 +00:00
/* Fweight was meant for "Fractional weight"
* but it ' s effectively a value between 1 and 2
* using fixed point arithmetic */
2019-01-04 00:30:03 +00:00
U32 const FWeight = ( stat < < BITCOST_ACCURACY ) > > hb ;
U32 const weight = BWeight + FWeight ;
assert ( hb + BITCOST_ACCURACY < 31 ) ;
return weight ;
}
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
# if (DEBUGLEVEL>=2)
/* debugging function,
* @ return price in bytes as fractional value
* for debug messages only */
2023-05-22 12:32:14 +00:00
MEM_STATIC double ZSTD_fCost ( int price )
2017-10-26 20:41:47 +00:00
{
2019-01-04 00:30:03 +00:00
return ( double ) price / ( BITCOST_MULTIPLIER * 8 ) ;
}
# endif
2017-10-26 20:41:47 +00:00
2019-04-18 09:53:29 +00:00
static int ZSTD_compressedLiterals ( optState_t const * const optPtr )
{
2022-01-24 10:04:45 +00:00
return optPtr - > literalCompressionMode ! = ZSTD_ps_disable ;
2019-04-18 09:53:29 +00:00
}
2019-01-04 00:30:03 +00:00
static void ZSTD_setBasePrices ( optState_t * optPtr , int optLevel )
{
2019-04-18 09:53:29 +00:00
if ( ZSTD_compressedLiterals ( optPtr ) )
optPtr - > litSumBasePrice = WEIGHT ( optPtr - > litSum , optLevel ) ;
2019-01-04 00:30:03 +00:00
optPtr - > litLengthSumBasePrice = WEIGHT ( optPtr - > litLengthSum , optLevel ) ;
optPtr - > matchLengthSumBasePrice = WEIGHT ( optPtr - > matchLengthSum , optLevel ) ;
optPtr - > offCodeSumBasePrice = WEIGHT ( optPtr - > offCodeSum , optLevel ) ;
}
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
2022-01-24 10:04:45 +00:00
static U32 sum_u32 ( const unsigned table [ ] , size_t nbElts )
{
size_t n ;
U32 total = 0 ;
for ( n = 0 ; n < nbElts ; n + + ) {
total + = table [ n ] ;
}
return total ;
}
2023-05-22 12:32:14 +00:00
typedef enum { base_0possible = 0 , base_1guaranteed = 1 } base_directive_e ;
static U32
ZSTD_downscaleStats ( unsigned * table , U32 lastEltIndex , U32 shift , base_directive_e base1 )
2019-01-04 00:30:03 +00:00
{
U32 s , sum = 0 ;
2023-05-22 12:32:14 +00:00
DEBUGLOG ( 5 , " ZSTD_downscaleStats (nbElts=%u, shift=%u) " ,
( unsigned ) lastEltIndex + 1 , ( unsigned ) shift ) ;
2022-01-24 10:04:45 +00:00
assert ( shift < 30 ) ;
2019-01-04 00:30:03 +00:00
for ( s = 0 ; s < lastEltIndex + 1 ; s + + ) {
2023-05-22 12:32:14 +00:00
unsigned const base = base1 ? 1 : ( table [ s ] > 0 ) ;
unsigned const newStat = base + ( table [ s ] > > shift ) ;
sum + = newStat ;
table [ s ] = newStat ;
2019-01-04 00:30:03 +00:00
}
return sum ;
}
2022-01-24 10:04:45 +00:00
/* ZSTD_scaleStats() :
2023-05-22 12:32:14 +00:00
* reduce all elt frequencies in table if sum too large
2022-01-24 10:04:45 +00:00
* return the resulting sum of elements */
static U32 ZSTD_scaleStats ( unsigned * table , U32 lastEltIndex , U32 logTarget )
{
U32 const prevsum = sum_u32 ( table , lastEltIndex + 1 ) ;
U32 const factor = prevsum > > logTarget ;
DEBUGLOG ( 5 , " ZSTD_scaleStats (nbElts=%u, target=%u) " , ( unsigned ) lastEltIndex + 1 , ( unsigned ) logTarget ) ;
assert ( logTarget < 30 ) ;
if ( factor < = 1 ) return prevsum ;
2023-05-22 12:32:14 +00:00
return ZSTD_downscaleStats ( table , lastEltIndex , ZSTD_highbit32 ( factor ) , base_1guaranteed ) ;
2022-01-24 10:04:45 +00:00
}
2019-01-04 00:30:03 +00:00
/* ZSTD_rescaleFreqs() :
* if first block ( detected by optPtr - > litLengthSum = = 0 ) : init statistics
* take hints from dictionary if there is one
2022-01-24 10:04:45 +00:00
* and init from zero if there is none ,
* using src for literals stats , and baseline stats for sequence symbols
2019-01-04 00:30:03 +00:00
* otherwise downscale existing stats , to be used as seed for next block .
*/
static void
ZSTD_rescaleFreqs ( optState_t * const optPtr ,
const BYTE * const src , size_t const srcSize ,
int const optLevel )
{
2019-04-18 09:53:29 +00:00
int const compressedLiterals = ZSTD_compressedLiterals ( optPtr ) ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 5 , " ZSTD_rescaleFreqs (srcSize=%u) " , ( unsigned ) srcSize ) ;
optPtr - > priceType = zop_dynamic ;
2023-05-22 12:32:14 +00:00
if ( optPtr - > litLengthSum = = 0 ) { /* no literals stats collected -> first block assumed -> init */
/* heuristic: use pre-defined stats for too small inputs */
if ( srcSize < = ZSTD_PREDEF_THRESHOLD ) {
DEBUGLOG ( 5 , " srcSize <= %i : use predefined stats " , ZSTD_PREDEF_THRESHOLD ) ;
2019-01-04 00:30:03 +00:00
optPtr - > priceType = zop_predef ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
assert ( optPtr - > symbolCosts ! = NULL ) ;
if ( optPtr - > symbolCosts - > huf . repeatMode = = HUF_repeat_valid ) {
2023-05-22 12:32:14 +00:00
/* huffman stats covering the full value set : table presumed generated by dictionary */
2019-01-04 00:30:03 +00:00
optPtr - > priceType = zop_dynamic ;
2019-04-18 09:53:29 +00:00
if ( compressedLiterals ) {
2023-05-22 12:32:14 +00:00
/* generate literals statistics from huffman table */
2019-04-18 09:53:29 +00:00
unsigned lit ;
assert ( optPtr - > litFreq ! = NULL ) ;
optPtr - > litSum = 0 ;
2019-01-04 00:30:03 +00:00
for ( lit = 0 ; lit < = MaxLit ; lit + + ) {
U32 const scaleLog = 11 ; /* scale to 2K */
2022-01-24 10:04:45 +00:00
U32 const bitCost = HUF_getNbBitsFromCTable ( optPtr - > symbolCosts - > huf . CTable , lit ) ;
2019-01-04 00:30:03 +00:00
assert ( bitCost < = scaleLog ) ;
optPtr - > litFreq [ lit ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > litSum + = optPtr - > litFreq [ lit ] ;
} }
{ unsigned ll ;
FSE_CState_t llstate ;
FSE_initCState ( & llstate , optPtr - > symbolCosts - > fse . litlengthCTable ) ;
optPtr - > litLengthSum = 0 ;
for ( ll = 0 ; ll < = MaxLL ; ll + + ) {
U32 const scaleLog = 10 ; /* scale to 1K */
U32 const bitCost = FSE_getMaxNbBits ( llstate . symbolTT , ll ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > litLengthFreq [ ll ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > litLengthSum + = optPtr - > litLengthFreq [ ll ] ;
} }
{ unsigned ml ;
FSE_CState_t mlstate ;
FSE_initCState ( & mlstate , optPtr - > symbolCosts - > fse . matchlengthCTable ) ;
optPtr - > matchLengthSum = 0 ;
for ( ml = 0 ; ml < = MaxML ; ml + + ) {
U32 const scaleLog = 10 ;
U32 const bitCost = FSE_getMaxNbBits ( mlstate . symbolTT , ml ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > matchLengthFreq [ ml ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > matchLengthSum + = optPtr - > matchLengthFreq [ ml ] ;
} }
{ unsigned of ;
FSE_CState_t ofstate ;
FSE_initCState ( & ofstate , optPtr - > symbolCosts - > fse . offcodeCTable ) ;
optPtr - > offCodeSum = 0 ;
for ( of = 0 ; of < = MaxOff ; of + + ) {
U32 const scaleLog = 10 ;
U32 const bitCost = FSE_getMaxNbBits ( ofstate . symbolTT , of ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > offCodeFreq [ of ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > offCodeSum + = optPtr - > offCodeFreq [ of ] ;
} }
2023-05-22 12:32:14 +00:00
} else { /* first block, no dictionary */
2019-01-04 00:30:03 +00:00
assert ( optPtr - > litFreq ! = NULL ) ;
2019-04-18 09:53:29 +00:00
if ( compressedLiterals ) {
2023-05-22 12:32:14 +00:00
/* base initial cost of literals on direct frequency within src */
2019-04-18 09:53:29 +00:00
unsigned lit = MaxLit ;
2019-01-04 00:30:03 +00:00
HIST_count_simple ( optPtr - > litFreq , & lit , src , srcSize ) ; /* use raw first block to init statistics */
2023-05-22 12:32:14 +00:00
optPtr - > litSum = ZSTD_downscaleStats ( optPtr - > litFreq , MaxLit , 8 , base_0possible ) ;
2019-01-04 00:30:03 +00:00
}
2022-01-24 10:04:45 +00:00
{ unsigned const baseLLfreqs [ MaxLL + 1 ] = {
4 , 2 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1
} ;
ZSTD_memcpy ( optPtr - > litLengthFreq , baseLLfreqs , sizeof ( baseLLfreqs ) ) ;
optPtr - > litLengthSum = sum_u32 ( baseLLfreqs , MaxLL + 1 ) ;
2019-01-04 00:30:03 +00:00
}
{ unsigned ml ;
for ( ml = 0 ; ml < = MaxML ; ml + + )
optPtr - > matchLengthFreq [ ml ] = 1 ;
}
optPtr - > matchLengthSum = MaxML + 1 ;
2022-01-24 10:04:45 +00:00
{ unsigned const baseOFCfreqs [ MaxOff + 1 ] = {
6 , 2 , 1 , 1 , 2 , 3 , 4 , 4 ,
4 , 3 , 2 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
1 , 1 , 1 , 1 , 1 , 1 , 1 , 1
} ;
ZSTD_memcpy ( optPtr - > offCodeFreq , baseOFCfreqs , sizeof ( baseOFCfreqs ) ) ;
optPtr - > offCodeSum = sum_u32 ( baseOFCfreqs , MaxOff + 1 ) ;
2019-01-04 00:30:03 +00:00
}
2022-01-24 10:04:45 +00:00
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
2023-05-22 12:32:14 +00:00
} else { /* new block : scale down accumulated statistics */
2019-01-04 00:30:03 +00:00
2019-04-18 09:53:29 +00:00
if ( compressedLiterals )
2022-01-24 10:04:45 +00:00
optPtr - > litSum = ZSTD_scaleStats ( optPtr - > litFreq , MaxLit , 12 ) ;
optPtr - > litLengthSum = ZSTD_scaleStats ( optPtr - > litLengthFreq , MaxLL , 11 ) ;
optPtr - > matchLengthSum = ZSTD_scaleStats ( optPtr - > matchLengthFreq , MaxML , 11 ) ;
optPtr - > offCodeSum = ZSTD_scaleStats ( optPtr - > offCodeFreq , MaxOff , 11 ) ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
ZSTD_setBasePrices ( optPtr , optLevel ) ;
2017-10-26 20:41:47 +00:00
}
2018-01-13 12:50:59 +00:00
/* ZSTD_rawLiteralsCost() :
2019-01-04 00:30:03 +00:00
* price of literals ( only ) in specified segment ( which length can be 0 ) .
* does not include price of literalLength symbol */
2018-01-13 12:50:59 +00:00
static U32 ZSTD_rawLiteralsCost ( const BYTE * const literals , U32 const litLength ,
2019-01-04 00:30:03 +00:00
const optState_t * const optPtr ,
int optLevel )
2017-10-26 20:41:47 +00:00
{
2018-01-13 12:50:59 +00:00
if ( litLength = = 0 ) return 0 ;
2019-04-18 09:53:29 +00:00
if ( ! ZSTD_compressedLiterals ( optPtr ) )
return ( litLength < < 3 ) * BITCOST_MULTIPLIER ; /* Uncompressed - 8 bytes per literal. */
2019-01-04 00:30:03 +00:00
if ( optPtr - > priceType = = zop_predef )
return ( litLength * 6 ) * BITCOST_MULTIPLIER ; /* 6 bit per literal - no statistic used */
/* dynamic statistics */
2023-05-22 12:32:14 +00:00
{ U32 price = optPtr - > litSumBasePrice * litLength ;
U32 const litPriceMax = optPtr - > litSumBasePrice - BITCOST_MULTIPLIER ;
2019-01-04 00:30:03 +00:00
U32 u ;
2023-05-22 12:32:14 +00:00
assert ( optPtr - > litSumBasePrice > = BITCOST_MULTIPLIER ) ;
2019-01-04 00:30:03 +00:00
for ( u = 0 ; u < litLength ; u + + ) {
2023-05-22 12:32:14 +00:00
U32 litPrice = WEIGHT ( optPtr - > litFreq [ literals [ u ] ] , optLevel ) ;
if ( UNLIKELY ( litPrice > litPriceMax ) ) litPrice = litPriceMax ;
price - = litPrice ;
2019-01-04 00:30:03 +00:00
}
return price ;
2017-10-26 20:41:47 +00:00
}
2018-01-13 12:50:59 +00:00
}
/* ZSTD_litLengthPrice() :
* cost of literalLength symbol */
2019-01-04 00:30:03 +00:00
static U32 ZSTD_litLengthPrice ( U32 const litLength , const optState_t * const optPtr , int optLevel )
2018-01-13 12:50:59 +00:00
{
2022-01-24 10:04:45 +00:00
assert ( litLength < = ZSTD_BLOCKSIZE_MAX ) ;
if ( optPtr - > priceType = = zop_predef )
return WEIGHT ( litLength , optLevel ) ;
2023-05-22 12:32:14 +00:00
/* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
* because it isn ' t representable in the zstd format .
* So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1.
* In such a case , the block would be all literals .
2022-01-24 10:04:45 +00:00
*/
if ( litLength = = ZSTD_BLOCKSIZE_MAX )
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice ( ZSTD_BLOCKSIZE_MAX - 1 , optPtr , optLevel ) ;
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
/* dynamic statistics */
2018-01-13 12:50:59 +00:00
{ U32 const llCode = ZSTD_LLcode ( litLength ) ;
2019-01-04 00:30:03 +00:00
return ( LL_bits [ llCode ] * BITCOST_MULTIPLIER )
+ optPtr - > litLengthSumBasePrice
- WEIGHT ( optPtr - > litLengthFreq [ llCode ] , optLevel ) ;
2017-10-26 20:41:47 +00:00
}
2018-01-13 12:50:59 +00:00
}
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
/* ZSTD_getMatchPrice() :
2023-05-22 12:32:14 +00:00
* Provides the cost of the match part ( offset + matchLength ) of a sequence .
2018-01-13 12:50:59 +00:00
* Must be combined with ZSTD_fullLiteralsCost ( ) to get the full cost of a sequence .
2023-05-22 12:32:14 +00:00
* @ offBase : sumtype , representing an offset or a repcode , and using numeric representation of ZSTD_storeSeq ( )
2022-01-24 10:04:45 +00:00
* @ optLevel : when < 2 , favors small offset for decompression speed ( improved cache efficiency )
*/
2019-01-04 00:30:03 +00:00
FORCE_INLINE_TEMPLATE U32
2023-05-22 12:32:14 +00:00
ZSTD_getMatchPrice ( U32 const offBase ,
2019-01-04 00:30:03 +00:00
U32 const matchLength ,
const optState_t * const optPtr ,
int const optLevel )
2017-10-26 20:41:47 +00:00
{
U32 price ;
2023-05-22 12:32:14 +00:00
U32 const offCode = ZSTD_highbit32 ( offBase ) ;
2018-01-13 12:50:59 +00:00
U32 const mlBase = matchLength - MINMATCH ;
assert ( matchLength > = MINMATCH ) ;
2017-10-26 20:41:47 +00:00
2023-05-22 12:32:14 +00:00
if ( optPtr - > priceType = = zop_predef ) /* fixed scheme, does not use statistics */
return WEIGHT ( mlBase , optLevel )
+ ( ( 16 + offCode ) * BITCOST_MULTIPLIER ) ; /* emulated offset cost */
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
/* dynamic statistics */
price = ( offCode * BITCOST_MULTIPLIER ) + ( optPtr - > offCodeSumBasePrice - WEIGHT ( optPtr - > offCodeFreq [ offCode ] , optLevel ) ) ;
if ( ( optLevel < 2 ) /*static*/ & & offCode > = 20 )
price + = ( offCode - 19 ) * 2 * BITCOST_MULTIPLIER ; /* handicap for long distance offsets, favor decompression speed */
2017-10-26 20:41:47 +00:00
/* match Length */
2018-01-13 12:50:59 +00:00
{ U32 const mlCode = ZSTD_MLcode ( mlBase ) ;
2019-01-04 00:30:03 +00:00
price + = ( ML_bits [ mlCode ] * BITCOST_MULTIPLIER ) + ( optPtr - > matchLengthSumBasePrice - WEIGHT ( optPtr - > matchLengthFreq [ mlCode ] , optLevel ) ) ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
price + = BITCOST_MULTIPLIER / 5 ; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
2018-01-13 12:50:59 +00:00
DEBUGLOG ( 8 , " ZSTD_getMatchPrice(ml:%u) = %u " , matchLength , price ) ;
return price ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
/* ZSTD_updateStats() :
2023-05-22 12:32:14 +00:00
* assumption : literals + litLength < = iend */
2018-01-13 12:50:59 +00:00
static void ZSTD_updateStats ( optState_t * const optPtr ,
U32 litLength , const BYTE * literals ,
2023-05-22 12:32:14 +00:00
U32 offBase , U32 matchLength )
2017-10-26 20:41:47 +00:00
{
/* literals */
2019-04-18 09:53:29 +00:00
if ( ZSTD_compressedLiterals ( optPtr ) ) {
U32 u ;
2018-01-13 12:50:59 +00:00
for ( u = 0 ; u < litLength ; u + + )
optPtr - > litFreq [ literals [ u ] ] + = ZSTD_LITFREQ_ADD ;
optPtr - > litSum + = litLength * ZSTD_LITFREQ_ADD ;
}
2017-10-26 20:41:47 +00:00
/* literal Length */
2018-01-13 12:50:59 +00:00
{ U32 const llCode = ZSTD_LLcode ( litLength ) ;
2017-10-26 20:41:47 +00:00
optPtr - > litLengthFreq [ llCode ] + + ;
optPtr - > litLengthSum + + ;
}
2023-05-22 12:32:14 +00:00
/* offset code : follows storeSeq() numeric representation */
{ U32 const offCode = ZSTD_highbit32 ( offBase ) ;
2018-01-13 12:50:59 +00:00
assert ( offCode < = MaxOff ) ;
2017-10-26 20:41:47 +00:00
optPtr - > offCodeFreq [ offCode ] + + ;
2018-01-13 12:50:59 +00:00
optPtr - > offCodeSum + + ;
2017-10-26 20:41:47 +00:00
}
/* match Length */
2018-01-13 12:50:59 +00:00
{ U32 const mlBase = matchLength - MINMATCH ;
U32 const mlCode = ZSTD_MLcode ( mlBase ) ;
2017-10-26 20:41:47 +00:00
optPtr - > matchLengthFreq [ mlCode ] + + ;
optPtr - > matchLengthSum + + ;
}
}
2018-01-13 12:50:59 +00:00
/* ZSTD_readMINMATCH() :
* function safe only for comparisons
* assumption : memPtr must be at least 4 bytes before end of buffer */
MEM_STATIC U32 ZSTD_readMINMATCH ( const void * memPtr , U32 length )
2017-10-26 20:41:47 +00:00
{
switch ( length )
{
default :
case 4 : return MEM_read32 ( memPtr ) ;
case 3 : if ( MEM_isLittleEndian ( ) )
return MEM_read32 ( memPtr ) < < 8 ;
else
return MEM_read32 ( memPtr ) > > 8 ;
}
}
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix ( i . e . not within extDict ) */
2022-01-24 10:04:45 +00:00
static U32 ZSTD_insertAndFindFirstIndexHash3 ( const ZSTD_matchState_t * ms ,
2019-07-20 18:47:07 +00:00
U32 * nextToUpdate3 ,
const BYTE * const ip )
2017-10-26 20:41:47 +00:00
{
2018-05-15 17:45:22 +00:00
U32 * const hashTable3 = ms - > hashTable3 ;
U32 const hashLog3 = ms - > hashLog3 ;
const BYTE * const base = ms - > window . base ;
2019-07-20 18:47:07 +00:00
U32 idx = * nextToUpdate3 ;
U32 const target = ( U32 ) ( ip - base ) ;
2018-01-13 12:50:59 +00:00
size_t const hash3 = ZSTD_hash3Ptr ( ip , hashLog3 ) ;
2018-05-15 17:45:22 +00:00
assert ( hashLog3 > 0 ) ;
2017-10-26 20:41:47 +00:00
while ( idx < target ) {
hashTable3 [ ZSTD_hash3Ptr ( base + idx , hashLog3 ) ] = idx ;
idx + + ;
}
2019-07-20 18:47:07 +00:00
* nextToUpdate3 = target ;
2017-10-26 20:41:47 +00:00
return hashTable3 [ hash3 ] ;
}
/*-*************************************
* Binary Tree search
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2018-05-15 17:45:22 +00:00
/** ZSTD_insertBt1() : add one or multiple positions to tree.
2022-01-24 10:04:45 +00:00
* @ param ip assumed < = iend - 8 .
* @ param target The target of ZSTD_updateTree_internal ( ) - we are filling to this position
2018-05-15 17:45:22 +00:00
* @ return : nb of positions added */
static U32 ZSTD_insertBt1 (
2022-01-24 10:04:45 +00:00
const ZSTD_matchState_t * ms ,
2018-05-15 17:45:22 +00:00
const BYTE * const ip , const BYTE * const iend ,
2022-01-24 10:04:45 +00:00
U32 const target ,
2019-01-04 00:30:03 +00:00
U32 const mls , const int extDict )
2018-05-15 17:45:22 +00:00
{
2019-01-04 00:30:03 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2018-05-15 17:45:22 +00:00
U32 * const hashTable = ms - > hashTable ;
U32 const hashLog = cParams - > hashLog ;
size_t const h = ZSTD_hashPtr ( ip , hashLog , mls ) ;
U32 * const bt = ms - > chainTable ;
U32 const btLog = cParams - > chainLog - 1 ;
U32 const btMask = ( 1 < < btLog ) - 1 ;
U32 matchIndex = hashTable [ h ] ;
size_t commonLengthSmaller = 0 , commonLengthLarger = 0 ;
const BYTE * const base = ms - > window . base ;
const BYTE * const dictBase = ms - > window . dictBase ;
const U32 dictLimit = ms - > window . dictLimit ;
const BYTE * const dictEnd = dictBase + dictLimit ;
const BYTE * const prefixStart = base + dictLimit ;
const BYTE * match ;
2021-01-08 10:21:43 +00:00
const U32 curr = ( U32 ) ( ip - base ) ;
const U32 btLow = btMask > = curr ? 0 : curr - btMask ;
U32 * smallerPtr = bt + 2 * ( curr & btMask ) ;
2018-05-15 17:45:22 +00:00
U32 * largerPtr = smallerPtr + 1 ;
U32 dummy32 ; /* to be nullified at the end */
2022-01-24 10:04:45 +00:00
/* windowLow is based on target because
* we only need positions that will be in the window at the end of the tree update .
*/
U32 const windowLow = ZSTD_getLowestMatchIndex ( ms , target , cParams - > windowLog ) ;
2021-01-08 10:21:43 +00:00
U32 matchEndIdx = curr + 8 + 1 ;
2018-05-15 17:45:22 +00:00
size_t bestLength = 8 ;
U32 nbCompares = 1U < < cParams - > searchLog ;
# ifdef ZSTD_C_PREDICT
2021-01-08 10:21:43 +00:00
U32 predictedSmall = * ( bt + 2 * ( ( curr - 1 ) & btMask ) + 0 ) ;
U32 predictedLarge = * ( bt + 2 * ( ( curr - 1 ) & btMask ) + 1 ) ;
2018-05-15 17:45:22 +00:00
predictedSmall + = ( predictedSmall > 0 ) ;
predictedLarge + = ( predictedLarge > 0 ) ;
# endif /* ZSTD_C_PREDICT */
2021-01-08 10:21:43 +00:00
DEBUGLOG ( 8 , " ZSTD_insertBt1 (%u) " , curr ) ;
2018-05-15 17:45:22 +00:00
2022-01-24 10:04:45 +00:00
assert ( curr < = target ) ;
2018-05-15 17:45:22 +00:00
assert ( ip < = iend - 8 ) ; /* required for h calculation */
2021-01-08 10:21:43 +00:00
hashTable [ h ] = curr ; /* Update Hash Table */
2018-05-15 17:45:22 +00:00
2019-01-04 00:30:03 +00:00
assert ( windowLow > 0 ) ;
2022-01-24 10:04:45 +00:00
for ( ; nbCompares & & ( matchIndex > = windowLow ) ; - - nbCompares ) {
2018-05-15 17:45:22 +00:00
U32 * const nextPtr = bt + 2 * ( matchIndex & btMask ) ;
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
2021-01-08 10:21:43 +00:00
assert ( matchIndex < curr ) ;
2018-05-15 17:45:22 +00:00
# ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
const U32 * predictPtr = bt + 2 * ( ( matchIndex - 1 ) & btMask ) ; /* written this way, as bt is a roll buffer */
if ( matchIndex = = predictedSmall ) {
/* no need to check length, result known */
* smallerPtr = matchIndex ;
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
smallerPtr = nextPtr + 1 ; /* new "smaller" => larger of match */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex larger than previous (closer to current) */
predictedSmall = predictPtr [ 1 ] + ( predictPtr [ 1 ] > 0 ) ;
continue ;
}
if ( matchIndex = = predictedLarge ) {
* largerPtr = matchIndex ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
predictedLarge = predictPtr [ 0 ] + ( predictPtr [ 0 ] > 0 ) ;
continue ;
}
# endif
2019-01-04 00:30:03 +00:00
if ( ! extDict | | ( matchIndex + matchLength > = dictLimit ) ) {
assert ( matchIndex + matchLength > = dictLimit ) ; /* might be wrong if actually extDict */
2018-05-15 17:45:22 +00:00
match = base + matchIndex ;
matchLength + = ZSTD_count ( ip + matchLength , match + matchLength , iend ) ;
} else {
match = dictBase + matchIndex ;
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iend , dictEnd , prefixStart ) ;
if ( matchIndex + matchLength > = dictLimit )
match = base + matchIndex ; /* to prepare for next usage of match[matchLength] */
}
if ( matchLength > bestLength ) {
bestLength = matchLength ;
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
}
if ( ip + matchLength = = iend ) { /* equal : no way to know if inf or sup */
break ; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
}
if ( match [ matchLength ] < ip [ matchLength ] ) { /* necessarily within buffer */
/* match is smaller than current */
* smallerPtr = matchIndex ; /* update smaller idx */
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop searching */
smallerPtr = nextPtr + 1 ; /* new "candidate" => larger than match, which was smaller than target */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex, larger than previous and closer to current */
} else {
/* match is larger than current */
* largerPtr = matchIndex ;
commonLengthLarger = matchLength ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop searching */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
} }
* smallerPtr = * largerPtr = 0 ;
2019-07-20 18:47:07 +00:00
{ U32 positions = 0 ;
if ( bestLength > 384 ) positions = MIN ( 192 , ( U32 ) ( bestLength - 384 ) ) ; /* speed optimization */
2021-01-08 10:21:43 +00:00
assert ( matchEndIdx > curr + 8 ) ;
return MAX ( positions , matchEndIdx - ( curr + 8 ) ) ;
2019-07-20 18:47:07 +00:00
}
2018-05-15 17:45:22 +00:00
}
FORCE_INLINE_TEMPLATE
void ZSTD_updateTree_internal (
2019-01-04 00:30:03 +00:00
ZSTD_matchState_t * ms ,
2018-05-15 17:45:22 +00:00
const BYTE * const ip , const BYTE * const iend ,
2019-01-04 00:30:03 +00:00
const U32 mls , const ZSTD_dictMode_e dictMode )
2018-05-15 17:45:22 +00:00
{
const BYTE * const base = ms - > window . base ;
U32 const target = ( U32 ) ( ip - base ) ;
U32 idx = ms - > nextToUpdate ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 6 , " ZSTD_updateTree_internal, from %u to %u (dictMode:%u) " ,
idx , target , dictMode ) ;
2018-05-15 17:45:22 +00:00
2019-07-20 18:47:07 +00:00
while ( idx < target ) {
2022-01-24 10:04:45 +00:00
U32 const forward = ZSTD_insertBt1 ( ms , base + idx , iend , target , mls , dictMode = = ZSTD_extDict ) ;
2019-07-20 18:47:07 +00:00
assert ( idx < ( U32 ) ( idx + forward ) ) ;
idx + = forward ;
}
assert ( ( size_t ) ( ip - base ) < = ( size_t ) ( U32 ) ( - 1 ) ) ;
assert ( ( size_t ) ( iend - base ) < = ( size_t ) ( U32 ) ( - 1 ) ) ;
2018-05-15 17:45:22 +00:00
ms - > nextToUpdate = target ;
}
2019-01-04 00:30:03 +00:00
void ZSTD_updateTree ( ZSTD_matchState_t * ms , const BYTE * ip , const BYTE * iend ) {
ZSTD_updateTree_internal ( ms , ip , iend , ms - > cParams . minMatch , ZSTD_noDict ) ;
2018-05-15 17:45:22 +00:00
}
2023-05-22 12:32:14 +00:00
FORCE_INLINE_TEMPLATE U32
ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t * matches , /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t * ms ,
U32 * nextToUpdate3 ,
const BYTE * const ip , const BYTE * const iLimit ,
const ZSTD_dictMode_e dictMode ,
const U32 rep [ ZSTD_REP_NUM ] ,
const U32 ll0 , /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat ,
const U32 mls /* template */ )
2017-10-26 20:41:47 +00:00
{
2019-01-04 00:30:03 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2018-05-15 17:45:22 +00:00
U32 const sufficient_len = MIN ( cParams - > targetLength , ZSTD_OPT_NUM - 1 ) ;
const BYTE * const base = ms - > window . base ;
2021-01-08 10:21:43 +00:00
U32 const curr = ( U32 ) ( ip - base ) ;
2018-05-15 17:45:22 +00:00
U32 const hashLog = cParams - > hashLog ;
2018-01-13 12:50:59 +00:00
U32 const minMatch = ( mls = = 3 ) ? 3 : 4 ;
2018-05-15 17:45:22 +00:00
U32 * const hashTable = ms - > hashTable ;
2018-01-13 12:50:59 +00:00
size_t const h = ZSTD_hashPtr ( ip , hashLog , mls ) ;
2017-10-26 20:41:47 +00:00
U32 matchIndex = hashTable [ h ] ;
2018-05-15 17:45:22 +00:00
U32 * const bt = ms - > chainTable ;
U32 const btLog = cParams - > chainLog - 1 ;
2018-01-13 12:50:59 +00:00
U32 const btMask = ( 1U < < btLog ) - 1 ;
2017-10-26 20:41:47 +00:00
size_t commonLengthSmaller = 0 , commonLengthLarger = 0 ;
2018-05-15 17:45:22 +00:00
const BYTE * const dictBase = ms - > window . dictBase ;
U32 const dictLimit = ms - > window . dictLimit ;
2017-10-26 20:41:47 +00:00
const BYTE * const dictEnd = dictBase + dictLimit ;
const BYTE * const prefixStart = base + dictLimit ;
2021-01-08 10:21:43 +00:00
U32 const btLow = ( btMask > = curr ) ? 0 : curr - btMask ;
U32 const windowLow = ZSTD_getLowestMatchIndex ( ms , curr , cParams - > windowLog ) ;
2019-01-04 00:30:03 +00:00
U32 const matchLow = windowLow ? windowLow : 1 ;
2021-01-08 10:21:43 +00:00
U32 * smallerPtr = bt + 2 * ( curr & btMask ) ;
U32 * largerPtr = bt + 2 * ( curr & btMask ) + 1 ;
U32 matchEndIdx = curr + 8 + 1 ; /* farthest referenced position of any match => detects repetitive patterns */
2017-10-26 20:41:47 +00:00
U32 dummy32 ; /* to be nullified at the end */
U32 mnum = 0 ;
2018-05-15 17:45:22 +00:00
U32 nbCompares = 1U < < cParams - > searchLog ;
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
const ZSTD_matchState_t * dms = dictMode = = ZSTD_dictMatchState ? ms - > dictMatchState : NULL ;
const ZSTD_compressionParameters * const dmsCParams =
dictMode = = ZSTD_dictMatchState ? & dms - > cParams : NULL ;
const BYTE * const dmsBase = dictMode = = ZSTD_dictMatchState ? dms - > window . base : NULL ;
const BYTE * const dmsEnd = dictMode = = ZSTD_dictMatchState ? dms - > window . nextSrc : NULL ;
U32 const dmsHighLimit = dictMode = = ZSTD_dictMatchState ? ( U32 ) ( dmsEnd - dmsBase ) : 0 ;
U32 const dmsLowLimit = dictMode = = ZSTD_dictMatchState ? dms - > window . lowLimit : 0 ;
U32 const dmsIndexDelta = dictMode = = ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0 ;
U32 const dmsHashLog = dictMode = = ZSTD_dictMatchState ? dmsCParams - > hashLog : hashLog ;
U32 const dmsBtLog = dictMode = = ZSTD_dictMatchState ? dmsCParams - > chainLog - 1 : btLog ;
U32 const dmsBtMask = dictMode = = ZSTD_dictMatchState ? ( 1U < < dmsBtLog ) - 1 : 0 ;
U32 const dmsBtLow = dictMode = = ZSTD_dictMatchState & & dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit ;
2018-01-13 12:50:59 +00:00
size_t bestLength = lengthToBeat - 1 ;
2021-01-08 10:21:43 +00:00
DEBUGLOG ( 8 , " ZSTD_insertBtAndGetAllMatches: current=%u " , curr ) ;
2018-01-13 12:50:59 +00:00
/* check repCode */
2019-01-04 00:30:03 +00:00
assert ( ll0 < = 1 ) ; /* necessarily 1 or 0 */
2018-01-13 12:50:59 +00:00
{ U32 const lastR = ZSTD_REP_NUM + ll0 ;
U32 repCode ;
for ( repCode = ll0 ; repCode < lastR ; repCode + + ) {
U32 const repOffset = ( repCode = = ZSTD_REP_NUM ) ? ( rep [ 0 ] - 1 ) : rep [ repCode ] ;
2021-01-08 10:21:43 +00:00
U32 const repIndex = curr - repOffset ;
2018-01-13 12:50:59 +00:00
U32 repLen = 0 ;
2021-01-08 10:21:43 +00:00
assert ( curr > = dictLimit ) ;
if ( repOffset - 1 /* intentional overflow, discards 0 and -1 */ < curr - dictLimit ) { /* equivalent to `curr > repIndex >= dictLimit` */
2020-09-18 19:38:36 +00:00
/* We must validate the repcode offset because when we're using a dictionary the
* valid offset range shrinks when the dictionary goes out of bounds .
*/
if ( ( repIndex > = windowLow ) & ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( ip - repOffset , minMatch ) ) ) {
2018-01-13 12:50:59 +00:00
repLen = ( U32 ) ZSTD_count ( ip + minMatch , ip + minMatch - repOffset , iLimit ) + minMatch ;
}
2021-01-08 10:21:43 +00:00
} else { /* repIndex < dictLimit || repIndex >= curr */
2019-01-04 00:30:03 +00:00
const BYTE * const repMatch = dictMode = = ZSTD_dictMatchState ?
dmsBase + repIndex - dmsIndexDelta :
dictBase + repIndex ;
2021-01-08 10:21:43 +00:00
assert ( curr > = windowLow ) ;
2019-01-04 00:30:03 +00:00
if ( dictMode = = ZSTD_extDict
2021-01-08 10:21:43 +00:00
& & ( ( ( repOffset - 1 ) /*intentional overflow*/ < curr - windowLow ) /* equivalent to `curr > repIndex >= windowLow` */
2018-01-13 12:50:59 +00:00
& ( ( ( U32 ) ( ( dictLimit - 1 ) - repIndex ) > = 3 ) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ )
& & ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( repMatch , minMatch ) ) ) {
repLen = ( U32 ) ZSTD_count_2segments ( ip + minMatch , repMatch + minMatch , iLimit , dictEnd , prefixStart ) + minMatch ;
2019-01-04 00:30:03 +00:00
}
if ( dictMode = = ZSTD_dictMatchState
2021-01-08 10:21:43 +00:00
& & ( ( ( repOffset - 1 ) /*intentional overflow*/ < curr - ( dmsLowLimit + dmsIndexDelta ) ) /* equivalent to `curr > repIndex >= dmsLowLimit` */
2019-01-04 00:30:03 +00:00
& ( ( U32 ) ( ( dictLimit - 1 ) - repIndex ) > = 3 ) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
& & ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( repMatch , minMatch ) ) ) {
repLen = ( U32 ) ZSTD_count_2segments ( ip + minMatch , repMatch + minMatch , iLimit , dmsEnd , prefixStart ) + minMatch ;
2018-01-13 12:50:59 +00:00
} }
/* save longer solution */
if ( repLen > bestLength ) {
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 8 , " found repCode %u (ll0:%u, offset:%u) of length %u " ,
repCode , ll0 , repOffset , repLen ) ;
2018-01-13 12:50:59 +00:00
bestLength = repLen ;
2023-05-22 12:32:14 +00:00
matches [ mnum ] . off = REPCODE_TO_OFFBASE ( repCode - ll0 + 1 ) ; /* expect value between 1 and 3 */
2018-01-13 12:50:59 +00:00
matches [ mnum ] . len = ( U32 ) repLen ;
mnum + + ;
if ( ( repLen > sufficient_len )
| ( ip + repLen = = iLimit ) ) { /* best possible */
return mnum ;
} } } }
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
/* HC3 match finder */
if ( ( mls = = 3 ) /*static*/ & & ( bestLength < mls ) ) {
2019-07-20 18:47:07 +00:00
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 ( ms , nextToUpdate3 , ip ) ;
2019-01-04 00:30:03 +00:00
if ( ( matchIndex3 > = matchLow )
2021-01-08 10:21:43 +00:00
& ( curr - matchIndex3 < ( 1 < < 18 ) ) /*heuristic : longer distance likely too expensive*/ ) {
2018-01-13 12:50:59 +00:00
size_t mlen ;
2019-01-04 00:30:03 +00:00
if ( ( dictMode = = ZSTD_noDict ) /*static*/ | | ( dictMode = = ZSTD_dictMatchState ) /*static*/ | | ( matchIndex3 > = dictLimit ) ) {
2018-01-13 12:50:59 +00:00
const BYTE * const match = base + matchIndex3 ;
mlen = ZSTD_count ( ip , match , iLimit ) ;
2017-10-26 20:41:47 +00:00
} else {
2018-01-13 12:50:59 +00:00
const BYTE * const match = dictBase + matchIndex3 ;
mlen = ZSTD_count_2segments ( ip , match , iLimit , dictEnd , prefixStart ) ;
2017-10-26 20:41:47 +00:00
}
/* save best solution */
2018-01-13 12:50:59 +00:00
if ( mlen > = mls /* == 3 > bestLength */ ) {
DEBUGLOG ( 8 , " found small match with hlog3, of length %u " ,
( U32 ) mlen ) ;
bestLength = mlen ;
2021-01-08 10:21:43 +00:00
assert ( curr > matchIndex3 ) ;
2018-01-13 12:50:59 +00:00
assert ( mnum = = 0 ) ; /* no prior solution */
2023-05-22 12:32:14 +00:00
matches [ 0 ] . off = OFFSET_TO_OFFBASE ( curr - matchIndex3 ) ;
2018-01-13 12:50:59 +00:00
matches [ 0 ] . len = ( U32 ) mlen ;
mnum = 1 ;
if ( ( mlen > sufficient_len ) |
( ip + mlen = = iLimit ) ) { /* best possible length */
2021-01-08 10:21:43 +00:00
ms - > nextToUpdate = curr + 1 ; /* skip insertion */
2018-01-13 12:50:59 +00:00
return 1 ;
2019-07-20 18:47:07 +00:00
} } }
2019-01-04 00:30:03 +00:00
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
2022-01-24 10:04:45 +00:00
} /* if (mls == 3) */
2017-10-26 20:41:47 +00:00
2021-01-08 10:21:43 +00:00
hashTable [ h ] = curr ; /* Update Hash Table */
2017-10-26 20:41:47 +00:00
2022-01-24 10:04:45 +00:00
for ( ; nbCompares & & ( matchIndex > = matchLow ) ; - - nbCompares ) {
2018-01-13 12:50:59 +00:00
U32 * const nextPtr = bt + 2 * ( matchIndex & btMask ) ;
2017-10-26 20:41:47 +00:00
const BYTE * match ;
2019-11-02 02:36:06 +00:00
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
2021-01-08 10:21:43 +00:00
assert ( curr > matchIndex ) ;
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
if ( ( dictMode = = ZSTD_noDict ) | | ( dictMode = = ZSTD_dictMatchState ) | | ( matchIndex + matchLength > = dictLimit ) ) {
2018-01-13 12:50:59 +00:00
assert ( matchIndex + matchLength > = dictLimit ) ; /* ensure the condition is correct when !extDict */
2017-10-26 20:41:47 +00:00
match = base + matchIndex ;
2019-11-02 02:36:06 +00:00
if ( matchIndex > = dictLimit ) assert ( memcmp ( match , ip , matchLength ) = = 0 ) ; /* ensure early section of match is equal as expected */
2018-01-13 12:50:59 +00:00
matchLength + = ZSTD_count ( ip + matchLength , match + matchLength , iLimit ) ;
2017-10-26 20:41:47 +00:00
} else {
match = dictBase + matchIndex ;
2019-11-02 02:36:06 +00:00
assert ( memcmp ( match , ip , matchLength ) = = 0 ) ; /* ensure early section of match is equal as expected */
2017-10-26 20:41:47 +00:00
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iLimit , dictEnd , prefixStart ) ;
if ( matchIndex + matchLength > = dictLimit )
2019-11-02 02:36:06 +00:00
match = base + matchIndex ; /* prepare for match[matchLength] read */
2017-10-26 20:41:47 +00:00
}
if ( matchLength > bestLength ) {
2023-05-22 12:32:14 +00:00
DEBUGLOG ( 8 , " found match of length %u at distance %u (offBase=%u) " ,
( U32 ) matchLength , curr - matchIndex , OFFSET_TO_OFFBASE ( curr - matchIndex ) ) ;
2018-01-13 12:50:59 +00:00
assert ( matchEndIdx > matchIndex ) ;
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
2017-10-26 20:41:47 +00:00
bestLength = matchLength ;
2023-05-22 12:32:14 +00:00
matches [ mnum ] . off = OFFSET_TO_OFFBASE ( curr - matchIndex ) ;
2017-10-26 20:41:47 +00:00
matches [ mnum ] . len = ( U32 ) matchLength ;
mnum + + ;
2019-01-04 00:30:03 +00:00
if ( ( matchLength > ZSTD_OPT_NUM )
| ( ip + matchLength = = iLimit ) /* equal : no way to know if inf or sup */ ) {
if ( dictMode = = ZSTD_dictMatchState ) nbCompares = 0 ; /* break should also skip searching dms */
break ; /* drop, to preserve bt consistency (miss a little bit of compression) */
2022-01-24 10:04:45 +00:00
} }
2017-10-26 20:41:47 +00:00
if ( match [ matchLength ] < ip [ matchLength ] ) {
2018-01-13 12:50:59 +00:00
/* match smaller than current */
2017-10-26 20:41:47 +00:00
* smallerPtr = matchIndex ; /* update smaller idx */
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
2018-01-13 12:50:59 +00:00
smallerPtr = nextPtr + 1 ; /* new candidate => larger than match, which was smaller than current */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex, larger than previous, closer to current */
2017-10-26 20:41:47 +00:00
} else {
* largerPtr = matchIndex ;
commonLengthLarger = matchLength ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
} }
* smallerPtr = * largerPtr = 0 ;
2022-01-24 10:04:45 +00:00
assert ( nbCompares < = ( 1U < < ZSTD_SEARCHLOG_MAX ) ) ; /* Check we haven't underflowed. */
2019-01-04 00:30:03 +00:00
if ( dictMode = = ZSTD_dictMatchState & & nbCompares ) {
size_t const dmsH = ZSTD_hashPtr ( ip , dmsHashLog , mls ) ;
U32 dictMatchIndex = dms - > hashTable [ dmsH ] ;
const U32 * const dmsBt = dms - > chainTable ;
commonLengthSmaller = commonLengthLarger = 0 ;
2022-01-24 10:04:45 +00:00
for ( ; nbCompares & & ( dictMatchIndex > dmsLowLimit ) ; - - nbCompares ) {
2019-01-04 00:30:03 +00:00
const U32 * const nextPtr = dmsBt + 2 * ( dictMatchIndex & dmsBtMask ) ;
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
const BYTE * match = dmsBase + dictMatchIndex ;
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iLimit , dmsEnd , prefixStart ) ;
if ( dictMatchIndex + matchLength > = dmsHighLimit )
match = base + dictMatchIndex + dmsIndexDelta ; /* to prepare for next usage of match[matchLength] */
if ( matchLength > bestLength ) {
matchIndex = dictMatchIndex + dmsIndexDelta ;
2023-05-22 12:32:14 +00:00
DEBUGLOG ( 8 , " found dms match of length %u at distance %u (offBase=%u) " ,
( U32 ) matchLength , curr - matchIndex , OFFSET_TO_OFFBASE ( curr - matchIndex ) ) ;
2019-01-04 00:30:03 +00:00
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
bestLength = matchLength ;
2023-05-22 12:32:14 +00:00
matches [ mnum ] . off = OFFSET_TO_OFFBASE ( curr - matchIndex ) ;
2019-01-04 00:30:03 +00:00
matches [ mnum ] . len = ( U32 ) matchLength ;
mnum + + ;
if ( ( matchLength > ZSTD_OPT_NUM )
| ( ip + matchLength = = iLimit ) /* equal : no way to know if inf or sup */ ) {
break ; /* drop, to guarantee consistency (miss a little bit of compression) */
2022-01-24 10:04:45 +00:00
} }
2019-01-04 00:30:03 +00:00
if ( dictMatchIndex < = dmsBtLow ) { break ; } /* beyond tree size, stop the search */
if ( match [ matchLength ] < ip [ matchLength ] ) {
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
dictMatchIndex = nextPtr [ 1 ] ; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
commonLengthLarger = matchLength ;
dictMatchIndex = nextPtr [ 0 ] ;
2022-01-24 10:04:45 +00:00
} } } /* if (dictMode == ZSTD_dictMatchState) */
2019-01-04 00:30:03 +00:00
2021-01-08 10:21:43 +00:00
assert ( matchEndIdx > curr + 8 ) ;
2018-05-15 17:45:22 +00:00
ms - > nextToUpdate = matchEndIdx - 8 ; /* skip repetitive patterns */
2017-10-26 20:41:47 +00:00
return mnum ;
}
2022-01-24 10:04:45 +00:00
typedef U32 ( * ZSTD_getAllMatchesFn ) (
ZSTD_match_t * ,
ZSTD_matchState_t * ,
U32 * ,
const BYTE * ,
const BYTE * ,
const U32 rep [ ZSTD_REP_NUM ] ,
U32 const ll0 ,
U32 const lengthToBeat ) ;
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal (
ZSTD_match_t * matches ,
ZSTD_matchState_t * ms ,
U32 * nextToUpdate3 ,
const BYTE * ip ,
const BYTE * const iHighLimit ,
const U32 rep [ ZSTD_REP_NUM ] ,
U32 const ll0 ,
U32 const lengthToBeat ,
const ZSTD_dictMode_e dictMode ,
const U32 mls )
2017-10-26 20:41:47 +00:00
{
2022-01-24 10:04:45 +00:00
assert ( BOUNDED ( 3 , ms - > cParams . minMatch , 6 ) = = mls ) ;
DEBUGLOG ( 8 , " ZSTD_BtGetAllMatches(dictMode=%d, mls=%u) " , ( int ) dictMode , mls ) ;
if ( ip < ms - > window . base + ms - > nextToUpdate )
return 0 ; /* skipped area */
ZSTD_updateTree_internal ( ms , ip , iHighLimit , mls , dictMode ) ;
return ZSTD_insertBtAndGetAllMatches ( matches , ms , nextToUpdate3 , ip , iHighLimit , dictMode , rep , ll0 , lengthToBeat , mls ) ;
}
# define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
# define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
static U32 ZSTD_BT_GET_ALL_MATCHES_FN ( dictMode , mls ) ( \
ZSTD_match_t * matches , \
ZSTD_matchState_t * ms , \
U32 * nextToUpdate3 , \
const BYTE * ip , \
const BYTE * const iHighLimit , \
const U32 rep [ ZSTD_REP_NUM ] , \
U32 const ll0 , \
U32 const lengthToBeat ) \
{ \
return ZSTD_btGetAllMatches_internal ( \
matches , ms , nextToUpdate3 , ip , iHighLimit , \
rep , ll0 , lengthToBeat , ZSTD_ # # dictMode , mls ) ; \
}
# define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
GEN_ZSTD_BT_GET_ALL_MATCHES_ ( dictMode , 3 ) \
GEN_ZSTD_BT_GET_ALL_MATCHES_ ( dictMode , 4 ) \
GEN_ZSTD_BT_GET_ALL_MATCHES_ ( dictMode , 5 ) \
GEN_ZSTD_BT_GET_ALL_MATCHES_ ( dictMode , 6 )
GEN_ZSTD_BT_GET_ALL_MATCHES ( noDict )
GEN_ZSTD_BT_GET_ALL_MATCHES ( extDict )
GEN_ZSTD_BT_GET_ALL_MATCHES ( dictMatchState )
# define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
{ \
ZSTD_BT_GET_ALL_MATCHES_FN ( dictMode , 3 ) , \
ZSTD_BT_GET_ALL_MATCHES_FN ( dictMode , 4 ) , \
ZSTD_BT_GET_ALL_MATCHES_FN ( dictMode , 5 ) , \
ZSTD_BT_GET_ALL_MATCHES_FN ( dictMode , 6 ) \
2017-10-26 20:41:47 +00:00
}
2022-01-24 10:04:45 +00:00
static ZSTD_getAllMatchesFn
ZSTD_selectBtGetAllMatches ( ZSTD_matchState_t const * ms , ZSTD_dictMode_e const dictMode )
{
ZSTD_getAllMatchesFn const getAllMatchesFns [ 3 ] [ 4 ] = {
ZSTD_BT_GET_ALL_MATCHES_ARRAY ( noDict ) ,
ZSTD_BT_GET_ALL_MATCHES_ARRAY ( extDict ) ,
ZSTD_BT_GET_ALL_MATCHES_ARRAY ( dictMatchState )
} ;
U32 const mls = BOUNDED ( 3 , ms - > cParams . minMatch , 6 ) ;
assert ( ( U32 ) dictMode < 3 ) ;
assert ( mls - 3 < 4 ) ;
return getAllMatchesFns [ ( int ) dictMode ] [ mls - 3 ] ;
2017-10-26 20:41:47 +00:00
}
2021-01-08 10:21:43 +00:00
/*************************
* LDM helper functions *
* * * * * * * * * * * * * * * * * * * * * * * * */
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
2022-01-24 10:04:45 +00:00
rawSeqStore_t seqStore ; /* External match candidates store for this block */
U32 startPosInBlock ; /* Start position of the current match candidate */
U32 endPosInBlock ; /* End position of the current match candidate */
U32 offset ; /* Offset of the match candidate */
2021-01-08 10:21:43 +00:00
} ZSTD_optLdm_t ;
/* ZSTD_optLdm_skipRawSeqStoreBytes():
2022-01-24 10:04:45 +00:00
* Moves forward in @ rawSeqStore by @ nbBytes ,
* which will update the fields ' pos ' and ' posInSequence ' .
2021-01-08 10:21:43 +00:00
*/
2022-01-24 10:04:45 +00:00
static void ZSTD_optLdm_skipRawSeqStoreBytes ( rawSeqStore_t * rawSeqStore , size_t nbBytes )
{
2021-01-08 10:21:43 +00:00
U32 currPos = ( U32 ) ( rawSeqStore - > posInSequence + nbBytes ) ;
while ( currPos & & rawSeqStore - > pos < rawSeqStore - > size ) {
rawSeq currSeq = rawSeqStore - > seq [ rawSeqStore - > pos ] ;
if ( currPos > = currSeq . litLength + currSeq . matchLength ) {
currPos - = currSeq . litLength + currSeq . matchLength ;
rawSeqStore - > pos + + ;
} else {
rawSeqStore - > posInSequence = currPos ;
break ;
}
}
if ( currPos = = 0 | | rawSeqStore - > pos = = rawSeqStore - > size ) {
rawSeqStore - > posInSequence = 0 ;
}
}
/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
* Calculates the beginning and end of the next match in the current block .
* Updates ' pos ' and ' posInSequence ' of the ldmSeqStore .
*/
2022-01-24 10:04:45 +00:00
static void
ZSTD_opt_getNextMatchAndUpdateSeqStore ( ZSTD_optLdm_t * optLdm , U32 currPosInBlock ,
U32 blockBytesRemaining )
{
2021-01-08 10:21:43 +00:00
rawSeq currSeq ;
U32 currBlockEndPos ;
U32 literalsBytesRemaining ;
U32 matchBytesRemaining ;
/* Setting match end position to MAX to ensure we never use an LDM during this block */
if ( optLdm - > seqStore . size = = 0 | | optLdm - > seqStore . pos > = optLdm - > seqStore . size ) {
optLdm - > startPosInBlock = UINT_MAX ;
optLdm - > endPosInBlock = UINT_MAX ;
return ;
}
2022-01-24 10:04:45 +00:00
/* Calculate appropriate bytes left in matchLength and litLength
* after adjusting based on ldmSeqStore - > posInSequence */
2021-01-08 10:21:43 +00:00
currSeq = optLdm - > seqStore . seq [ optLdm - > seqStore . pos ] ;
assert ( optLdm - > seqStore . posInSequence < = currSeq . litLength + currSeq . matchLength ) ;
currBlockEndPos = currPosInBlock + blockBytesRemaining ;
literalsBytesRemaining = ( optLdm - > seqStore . posInSequence < currSeq . litLength ) ?
currSeq . litLength - ( U32 ) optLdm - > seqStore . posInSequence :
0 ;
matchBytesRemaining = ( literalsBytesRemaining = = 0 ) ?
currSeq . matchLength - ( ( U32 ) optLdm - > seqStore . posInSequence - currSeq . litLength ) :
currSeq . matchLength ;
/* If there are more literal bytes than bytes remaining in block, no ldm is possible */
if ( literalsBytesRemaining > = blockBytesRemaining ) {
optLdm - > startPosInBlock = UINT_MAX ;
optLdm - > endPosInBlock = UINT_MAX ;
ZSTD_optLdm_skipRawSeqStoreBytes ( & optLdm - > seqStore , blockBytesRemaining ) ;
return ;
}
/* Matches may be < MINMATCH by this process. In that case, we will reject them
when we are deciding whether or not to add the ldm */
optLdm - > startPosInBlock = currPosInBlock + literalsBytesRemaining ;
optLdm - > endPosInBlock = optLdm - > startPosInBlock + matchBytesRemaining ;
optLdm - > offset = currSeq . offset ;
if ( optLdm - > endPosInBlock > currBlockEndPos ) {
/* Match ends after the block ends, we can't use the whole match */
optLdm - > endPosInBlock = currBlockEndPos ;
ZSTD_optLdm_skipRawSeqStoreBytes ( & optLdm - > seqStore , currBlockEndPos - currPosInBlock ) ;
} else {
/* Consume nb of bytes equal to size of sequence left */
ZSTD_optLdm_skipRawSeqStoreBytes ( & optLdm - > seqStore , literalsBytesRemaining + matchBytesRemaining ) ;
}
}
/* ZSTD_optLdm_maybeAddMatch():
2022-01-24 10:04:45 +00:00
* Adds a match if it ' s long enough ,
* based on it ' s ' matchStartPosInBlock ' and ' matchEndPosInBlock ' ,
* into ' matches ' . Maintains the correct ordering of ' matches ' .
2021-01-08 10:21:43 +00:00
*/
static void ZSTD_optLdm_maybeAddMatch ( ZSTD_match_t * matches , U32 * nbMatches ,
2022-01-24 10:04:45 +00:00
const ZSTD_optLdm_t * optLdm , U32 currPosInBlock )
{
U32 const posDiff = currPosInBlock - optLdm - > startPosInBlock ;
2023-05-22 12:32:14 +00:00
/* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
2022-01-24 10:04:45 +00:00
U32 const candidateMatchLength = optLdm - > endPosInBlock - optLdm - > startPosInBlock - posDiff ;
2021-01-08 10:21:43 +00:00
/* Ensure that current block position is not outside of the match */
if ( currPosInBlock < optLdm - > startPosInBlock
| | currPosInBlock > = optLdm - > endPosInBlock
| | candidateMatchLength < MINMATCH ) {
return ;
}
if ( * nbMatches = = 0 | | ( ( candidateMatchLength > matches [ * nbMatches - 1 ] . len ) & & * nbMatches < ZSTD_OPT_NUM ) ) {
2023-05-22 12:32:14 +00:00
U32 const candidateOffBase = OFFSET_TO_OFFBASE ( optLdm - > offset ) ;
DEBUGLOG ( 6 , " ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u " ,
candidateOffBase , candidateMatchLength , currPosInBlock ) ;
2021-01-08 10:21:43 +00:00
matches [ * nbMatches ] . len = candidateMatchLength ;
2023-05-22 12:32:14 +00:00
matches [ * nbMatches ] . off = candidateOffBase ;
2021-01-08 10:21:43 +00:00
( * nbMatches ) + + ;
}
}
/* ZSTD_optLdm_processMatchCandidate():
* Wrapper function to update ldm seq store and call ldm functions as necessary .
*/
2022-01-24 10:04:45 +00:00
static void
ZSTD_optLdm_processMatchCandidate ( ZSTD_optLdm_t * optLdm ,
ZSTD_match_t * matches , U32 * nbMatches ,
U32 currPosInBlock , U32 remainingBytes )
{
2021-01-08 10:21:43 +00:00
if ( optLdm - > seqStore . size = = 0 | | optLdm - > seqStore . pos > = optLdm - > seqStore . size ) {
return ;
}
if ( currPosInBlock > = optLdm - > endPosInBlock ) {
if ( currPosInBlock > optLdm - > endPosInBlock ) {
/* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
* at the end of a match from the ldm seq store , and will often be some bytes
* over beyond matchEndPosInBlock . As such , we need to correct for these " overshoots "
*/
2022-01-24 10:04:45 +00:00
U32 const posOvershoot = currPosInBlock - optLdm - > endPosInBlock ;
2021-01-08 10:21:43 +00:00
ZSTD_optLdm_skipRawSeqStoreBytes ( & optLdm - > seqStore , posOvershoot ) ;
2022-01-24 10:04:45 +00:00
}
2021-01-08 10:21:43 +00:00
ZSTD_opt_getNextMatchAndUpdateSeqStore ( optLdm , currPosInBlock , remainingBytes ) ;
}
ZSTD_optLdm_maybeAddMatch ( matches , nbMatches , optLdm , currPosInBlock ) ;
}
2017-10-26 20:41:47 +00:00
2022-01-24 10:04:45 +00:00
2017-10-26 20:41:47 +00:00
/*-*******************************
* Optimal parser
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-01-04 00:30:03 +00:00
static U32 ZSTD_totalLen ( ZSTD_optimal_t sol )
2018-01-13 12:50:59 +00:00
{
2019-01-04 00:30:03 +00:00
return sol . litlen + sol . mlen ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
#if 0 /* debug */
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
static void
listStats ( const U32 * table , int lastEltID )
2017-10-26 20:41:47 +00:00
{
2019-01-04 00:30:03 +00:00
int const nbElts = lastEltID + 1 ;
int enb ;
for ( enb = 0 ; enb < nbElts ; enb + + ) {
( void ) table ;
2020-09-18 19:38:36 +00:00
/* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
2019-01-04 00:30:03 +00:00
RAWLOG ( 2 , " %4i, " , table [ enb ] ) ;
}
RAWLOG ( 2 , " \n " ) ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
# endif
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_opt_generic ( ZSTD_matchState_t * ms ,
seqStore_t * seqStore ,
U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize ,
const int optLevel ,
const ZSTD_dictMode_e dictMode )
2017-10-26 20:41:47 +00:00
{
2018-05-15 17:45:22 +00:00
optState_t * const optStatePtr = & ms - > opt ;
2017-10-26 20:41:47 +00:00
const BYTE * const istart = ( const BYTE * ) src ;
const BYTE * ip = istart ;
const BYTE * anchor = istart ;
const BYTE * const iend = istart + srcSize ;
const BYTE * const ilimit = iend - 8 ;
2018-05-15 17:45:22 +00:00
const BYTE * const base = ms - > window . base ;
const BYTE * const prefixStart = base + ms - > window . dictLimit ;
2019-01-04 00:30:03 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2017-10-26 20:41:47 +00:00
2022-01-24 10:04:45 +00:00
ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches ( ms , dictMode ) ;
2018-05-15 17:45:22 +00:00
U32 const sufficient_len = MIN ( cParams - > targetLength , ZSTD_OPT_NUM - 1 ) ;
2019-01-04 00:30:03 +00:00
U32 const minMatch = ( cParams - > minMatch = = 3 ) ? 3 : 4 ;
2019-07-20 18:47:07 +00:00
U32 nextToUpdate3 = ms - > nextToUpdate ;
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
ZSTD_optimal_t * const opt = optStatePtr - > priceTable ;
ZSTD_match_t * const matches = optStatePtr - > matchTable ;
2019-01-04 00:30:03 +00:00
ZSTD_optimal_t lastSequence ;
2021-01-08 10:21:43 +00:00
ZSTD_optLdm_t optLdm ;
2023-05-22 12:32:14 +00:00
ZSTD_memset ( & lastSequence , 0 , sizeof ( ZSTD_optimal_t ) ) ;
2021-01-08 10:21:43 +00:00
optLdm . seqStore = ms - > ldmSeqStore ? * ms - > ldmSeqStore : kNullRawSeqStore ;
optLdm . endPosInBlock = optLdm . startPosInBlock = optLdm . offset = 0 ;
ZSTD_opt_getNextMatchAndUpdateSeqStore ( & optLdm , ( U32 ) ( ip - istart ) , ( U32 ) ( iend - ip ) ) ;
2017-10-26 20:41:47 +00:00
/* init */
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u " ,
( U32 ) ( ip - base ) , ms - > window . dictLimit , ms - > nextToUpdate ) ;
assert ( optLevel < = 2 ) ;
ZSTD_rescaleFreqs ( optStatePtr , ( const BYTE * ) src , srcSize , optLevel ) ;
2017-10-26 20:41:47 +00:00
ip + = ( ip = = prefixStart ) ;
/* Match Loop */
while ( ip < ilimit ) {
2018-01-13 12:50:59 +00:00
U32 cur , last_pos = 0 ;
/* find first match */
{ U32 const litlen = ( U32 ) ( ip - anchor ) ;
U32 const ll0 = ! litlen ;
2022-01-24 10:04:45 +00:00
U32 nbMatches = getAllMatches ( matches , ms , & nextToUpdate3 , ip , iend , rep , ll0 , minMatch ) ;
2021-01-08 10:21:43 +00:00
ZSTD_optLdm_processMatchCandidate ( & optLdm , matches , & nbMatches ,
( U32 ) ( ip - istart ) , ( U32 ) ( iend - ip ) ) ;
2018-01-13 12:50:59 +00:00
if ( ! nbMatches ) { ip + + ; continue ; }
/* initialize opt[0] */
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) opt [ 0 ] . rep [ i ] = rep [ i ] ; }
2019-01-04 00:30:03 +00:00
opt [ 0 ] . mlen = 0 ; /* means is_a_literal */
2018-01-13 12:50:59 +00:00
opt [ 0 ] . litlen = litlen ;
2020-09-18 19:38:36 +00:00
/* We don't need to include the actual price of the literals because
* it is static for the duration of the forward pass , and is included
* in every price . We include the literal length to avoid negative
* prices when we subtract the previous literal length .
*/
2022-01-24 10:04:45 +00:00
opt [ 0 ] . price = ( int ) ZSTD_litLengthPrice ( litlen , optStatePtr , optLevel ) ;
2018-01-13 12:50:59 +00:00
/* large match -> immediate encoding */
{ U32 const maxML = matches [ nbMatches - 1 ] . len ;
2023-05-22 12:32:14 +00:00
U32 const maxOffBase = matches [ nbMatches - 1 ] . off ;
DEBUGLOG ( 6 , " found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series " ,
nbMatches , maxML , maxOffBase , ( U32 ) ( ip - prefixStart ) ) ;
2018-01-13 12:50:59 +00:00
if ( maxML > sufficient_len ) {
2019-01-04 00:30:03 +00:00
lastSequence . litlen = litlen ;
lastSequence . mlen = maxML ;
2023-05-22 12:32:14 +00:00
lastSequence . off = maxOffBase ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 6 , " large match (%u>%u), immediate encoding " ,
maxML , sufficient_len ) ;
2018-01-13 12:50:59 +00:00
cur = 0 ;
2019-01-04 00:30:03 +00:00
last_pos = ZSTD_totalLen ( lastSequence ) ;
2018-01-13 12:50:59 +00:00
goto _shortestPath ;
} }
/* set prices for first matches starting position == 0 */
2022-01-24 10:04:45 +00:00
assert ( opt [ 0 ] . price > = 0 ) ;
{ U32 const literalsPrice = ( U32 ) opt [ 0 ] . price + ZSTD_litLengthPrice ( 0 , optStatePtr , optLevel ) ;
2018-01-13 12:50:59 +00:00
U32 pos ;
U32 matchNb ;
2019-01-04 00:30:03 +00:00
for ( pos = 1 ; pos < minMatch ; pos + + ) {
opt [ pos ] . price = ZSTD_MAX_PRICE ; /* mlen, litlen and price will be fixed during forward scanning */
2018-01-13 12:50:59 +00:00
}
for ( matchNb = 0 ; matchNb < nbMatches ; matchNb + + ) {
2023-05-22 12:32:14 +00:00
U32 const offBase = matches [ matchNb ] . off ;
2018-01-13 12:50:59 +00:00
U32 const end = matches [ matchNb ] . len ;
for ( ; pos < = end ; pos + + ) {
2023-05-22 12:32:14 +00:00
U32 const matchPrice = ZSTD_getMatchPrice ( offBase , pos , optStatePtr , optLevel ) ;
2019-01-04 00:30:03 +00:00
U32 const sequencePrice = literalsPrice + matchPrice ;
DEBUGLOG ( 7 , " rPos:%u => set initial price : %.2f " ,
2023-05-22 12:32:14 +00:00
pos , ZSTD_fCost ( ( int ) sequencePrice ) ) ;
2018-01-13 12:50:59 +00:00
opt [ pos ] . mlen = pos ;
2023-05-22 12:32:14 +00:00
opt [ pos ] . off = offBase ;
2018-01-13 12:50:59 +00:00
opt [ pos ] . litlen = litlen ;
2022-01-24 10:04:45 +00:00
opt [ pos ] . price = ( int ) sequencePrice ;
2018-01-13 12:50:59 +00:00
} }
last_pos = pos - 1 ;
}
2017-10-26 20:41:47 +00:00
}
/* check further positions */
for ( cur = 1 ; cur < = last_pos ; cur + + ) {
2018-01-13 12:50:59 +00:00
const BYTE * const inr = ip + cur ;
assert ( cur < ZSTD_OPT_NUM ) ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u " , inr - istart , cur )
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
/* Fix current position with one literal if cheaper */
2019-01-04 00:30:03 +00:00
{ U32 const litlen = ( opt [ cur - 1 ] . mlen = = 0 ) ? opt [ cur - 1 ] . litlen + 1 : 1 ;
int const price = opt [ cur - 1 ] . price
2022-01-24 10:04:45 +00:00
+ ( int ) ZSTD_rawLiteralsCost ( ip + cur - 1 , 1 , optStatePtr , optLevel )
+ ( int ) ZSTD_litLengthPrice ( litlen , optStatePtr , optLevel )
- ( int ) ZSTD_litLengthPrice ( litlen - 1 , optStatePtr , optLevel ) ;
2018-01-13 12:50:59 +00:00
assert ( price < 1000000000 ) ; /* overflow check */
if ( price < = opt [ cur ] . price ) {
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u) " ,
inr - istart , cur , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ cur ] . price ) , litlen ,
opt [ cur - 1 ] . rep [ 0 ] , opt [ cur - 1 ] . rep [ 1 ] , opt [ cur - 1 ] . rep [ 2 ] ) ;
opt [ cur ] . mlen = 0 ;
2018-01-13 12:50:59 +00:00
opt [ cur ] . off = 0 ;
opt [ cur ] . litlen = litlen ;
opt [ cur ] . price = price ;
2019-01-04 00:30:03 +00:00
} else {
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u) " ,
inr - istart , cur , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ cur ] . price ) ,
opt [ cur ] . rep [ 0 ] , opt [ cur ] . rep [ 1 ] , opt [ cur ] . rep [ 2 ] ) ;
}
}
2018-01-13 12:50:59 +00:00
2020-09-18 19:38:36 +00:00
/* Set the repcodes of the current position. We must do it here
* because we rely on the repcodes of the 2 nd to last sequence being
* correct to set the next chunks repcodes during the backward
* traversal .
*/
ZSTD_STATIC_ASSERT ( sizeof ( opt [ cur ] . rep ) = = sizeof ( repcodes_t ) ) ;
assert ( cur > = opt [ cur ] . mlen ) ;
if ( opt [ cur ] . mlen ! = 0 ) {
U32 const prev = cur - opt [ cur ] . mlen ;
2022-01-24 10:04:45 +00:00
repcodes_t const newReps = ZSTD_newRep ( opt [ prev ] . rep , opt [ cur ] . off , opt [ cur ] . litlen = = 0 ) ;
2021-01-08 10:21:43 +00:00
ZSTD_memcpy ( opt [ cur ] . rep , & newReps , sizeof ( repcodes_t ) ) ;
2020-09-18 19:38:36 +00:00
} else {
2021-01-08 10:21:43 +00:00
ZSTD_memcpy ( opt [ cur ] . rep , opt [ cur - 1 ] . rep , sizeof ( repcodes_t ) ) ;
2020-09-18 19:38:36 +00:00
}
2018-01-13 12:50:59 +00:00
/* last match must start at a minimum distance of 8 from oend */
if ( inr > ilimit ) continue ;
2017-10-26 20:41:47 +00:00
if ( cur = = last_pos ) break ;
2019-01-04 00:30:03 +00:00
if ( ( optLevel = = 0 ) /*static_test*/
& & ( opt [ cur + 1 ] . price < = opt [ cur ] . price + ( BITCOST_MULTIPLIER / 2 ) ) ) {
DEBUGLOG ( 7 , " move to next rPos:%u : price is <= " , cur + 1 ) ;
2018-01-13 12:50:59 +00:00
continue ; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
2019-01-04 00:30:03 +00:00
}
2018-01-13 12:50:59 +00:00
2022-01-24 10:04:45 +00:00
assert ( opt [ cur ] . price > = 0 ) ;
2019-01-04 00:30:03 +00:00
{ U32 const ll0 = ( opt [ cur ] . mlen ! = 0 ) ;
U32 const litlen = ( opt [ cur ] . mlen = = 0 ) ? opt [ cur ] . litlen : 0 ;
2022-01-24 10:04:45 +00:00
U32 const previousPrice = ( U32 ) opt [ cur ] . price ;
2019-01-04 00:30:03 +00:00
U32 const basePrice = previousPrice + ZSTD_litLengthPrice ( 0 , optStatePtr , optLevel ) ;
2022-01-24 10:04:45 +00:00
U32 nbMatches = getAllMatches ( matches , ms , & nextToUpdate3 , inr , iend , opt [ cur ] . rep , ll0 , minMatch ) ;
2018-01-13 12:50:59 +00:00
U32 matchNb ;
2021-01-08 10:21:43 +00:00
ZSTD_optLdm_processMatchCandidate ( & optLdm , matches , & nbMatches ,
( U32 ) ( inr - istart ) , ( U32 ) ( iend - inr ) ) ;
2019-01-04 00:30:03 +00:00
if ( ! nbMatches ) {
DEBUGLOG ( 7 , " rPos:%u : no match found " , cur ) ;
continue ;
}
2018-01-13 12:50:59 +00:00
{ U32 const maxML = matches [ nbMatches - 1 ] . len ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u, found %u matches, of maxLength=%u " ,
inr - istart , cur , nbMatches , maxML ) ;
2018-01-13 12:50:59 +00:00
if ( ( maxML > sufficient_len )
2019-01-04 00:30:03 +00:00
| | ( cur + maxML > = ZSTD_OPT_NUM ) ) {
lastSequence . mlen = maxML ;
lastSequence . off = matches [ nbMatches - 1 ] . off ;
lastSequence . litlen = litlen ;
cur - = ( opt [ cur ] . mlen = = 0 ) ? opt [ cur ] . litlen : 0 ; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
last_pos = cur + ZSTD_totalLen ( lastSequence ) ;
if ( cur > ZSTD_OPT_NUM ) cur = 0 ; /* underflow => first match */
2018-01-13 12:50:59 +00:00
goto _shortestPath ;
2019-01-04 00:30:03 +00:00
} }
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
/* set prices using matches found at position == cur */
for ( matchNb = 0 ; matchNb < nbMatches ; matchNb + + ) {
U32 const offset = matches [ matchNb ] . off ;
U32 const lastML = matches [ matchNb ] . len ;
U32 const startML = ( matchNb > 0 ) ? matches [ matchNb - 1 ] . len + 1 : minMatch ;
U32 mlen ;
2023-05-22 12:32:14 +00:00
DEBUGLOG ( 7 , " testing match %u => offBase=%4u, mlen=%2u, llen=%2u " ,
2018-01-13 12:50:59 +00:00
matchNb , matches [ matchNb ] . off , lastML , litlen ) ;
2019-01-04 00:30:03 +00:00
for ( mlen = lastML ; mlen > = startML ; mlen - - ) { /* scan downward */
2018-01-13 12:50:59 +00:00
U32 const pos = cur + mlen ;
2022-01-24 10:04:45 +00:00
int const price = ( int ) basePrice + ( int ) ZSTD_getMatchPrice ( offset , mlen , optStatePtr , optLevel ) ;
2018-01-13 12:50:59 +00:00
if ( ( pos > last_pos ) | | ( price < opt [ pos ] . price ) ) {
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 7 , " rPos:%u (ml=%2u) => new better price (%.2f<%.2f) " ,
pos , mlen , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ pos ] . price ) ) ;
while ( last_pos < pos ) { opt [ last_pos + 1 ] . price = ZSTD_MAX_PRICE ; last_pos + + ; } /* fill empty positions */
2018-01-13 12:50:59 +00:00
opt [ pos ] . mlen = mlen ;
opt [ pos ] . off = offset ;
opt [ pos ] . litlen = litlen ;
opt [ pos ] . price = price ;
} else {
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 7 , " rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f) " ,
pos , mlen , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ pos ] . price ) ) ;
if ( optLevel = = 0 ) break ; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
2017-10-26 20:41:47 +00:00
}
} } }
2018-01-13 12:50:59 +00:00
} /* for (cur = 1; cur <= last_pos; cur++) */
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
lastSequence = opt [ last_pos ] ;
cur = last_pos > ZSTD_totalLen ( lastSequence ) ? last_pos - ZSTD_totalLen ( lastSequence ) : 0 ; /* single sequence, and it starts before `ip` */
assert ( cur < ZSTD_OPT_NUM ) ; /* control overflow*/
2017-10-26 20:41:47 +00:00
2018-01-13 12:50:59 +00:00
_shortestPath : /* cur, last_pos, best_mlen, best_off have to be set */
2019-01-04 00:30:03 +00:00
assert ( opt [ 0 ] . mlen = = 0 ) ;
2020-09-18 19:38:36 +00:00
/* Set the next chunk's repcodes based on the repcodes of the beginning
* of the last match , and the last sequence . This avoids us having to
* update them while traversing the sequences .
*/
if ( lastSequence . mlen ! = 0 ) {
2022-01-24 10:04:45 +00:00
repcodes_t const reps = ZSTD_newRep ( opt [ cur ] . rep , lastSequence . off , lastSequence . litlen = = 0 ) ;
2021-01-08 10:21:43 +00:00
ZSTD_memcpy ( rep , & reps , sizeof ( reps ) ) ;
2020-09-18 19:38:36 +00:00
} else {
2021-01-08 10:21:43 +00:00
ZSTD_memcpy ( rep , opt [ cur ] . rep , sizeof ( repcodes_t ) ) ;
2020-09-18 19:38:36 +00:00
}
2019-01-04 00:30:03 +00:00
{ U32 const storeEnd = cur + 1 ;
U32 storeStart = storeEnd ;
U32 seqPos = cur ;
DEBUGLOG ( 6 , " start reverse traversal (last_pos:%u, cur:%u) " ,
last_pos , cur ) ; ( void ) last_pos ;
assert ( storeEnd < ZSTD_OPT_NUM ) ;
DEBUGLOG ( 6 , " last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u) " ,
storeEnd , lastSequence . litlen , lastSequence . mlen , lastSequence . off ) ;
opt [ storeEnd ] = lastSequence ;
while ( seqPos > 0 ) {
U32 const backDist = ZSTD_totalLen ( opt [ seqPos ] ) ;
storeStart - - ;
DEBUGLOG ( 6 , " sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u) " ,
seqPos , storeStart , opt [ seqPos ] . litlen , opt [ seqPos ] . mlen , opt [ seqPos ] . off ) ;
opt [ storeStart ] = opt [ seqPos ] ;
seqPos = ( seqPos > backDist ) ? seqPos - backDist : 0 ;
}
/* save sequences */
DEBUGLOG ( 6 , " sending selected sequences into seqStore " )
{ U32 storePos ;
for ( storePos = storeStart ; storePos < = storeEnd ; storePos + + ) {
U32 const llen = opt [ storePos ] . litlen ;
U32 const mlen = opt [ storePos ] . mlen ;
2023-05-22 12:32:14 +00:00
U32 const offBase = opt [ storePos ] . off ;
2019-01-04 00:30:03 +00:00
U32 const advance = llen + mlen ;
DEBUGLOG ( 6 , " considering seq starting at %zi, llen=%u, mlen=%u " ,
anchor - istart , ( unsigned ) llen , ( unsigned ) mlen ) ;
if ( mlen = = 0 ) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert ( storePos = = storeEnd ) ; /* must be last sequence */
ip = anchor + llen ; /* last "sequence" is a bunch of literals => don't progress anchor */
continue ; /* will finish */
2018-01-13 12:50:59 +00:00
}
2017-10-26 20:41:47 +00:00
2019-01-04 00:30:03 +00:00
assert ( anchor + llen < = iend ) ;
2023-05-22 12:32:14 +00:00
ZSTD_updateStats ( optStatePtr , llen , anchor , offBase , mlen ) ;
ZSTD_storeSeq ( seqStore , llen , anchor , iend , offBase , mlen ) ;
2019-01-04 00:30:03 +00:00
anchor + = advance ;
ip = anchor ;
} }
ZSTD_setBasePrices ( optStatePtr , optLevel ) ;
}
2018-01-13 12:50:59 +00:00
} /* while (ip < ilimit) */
2017-10-26 20:41:47 +00:00
/* Return the last literals size */
2019-07-20 18:47:07 +00:00
return ( size_t ) ( iend - anchor ) ;
2017-10-26 20:41:47 +00:00
}
2022-01-24 10:04:45 +00:00
static size_t ZSTD_compressBlock_opt0 (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize , const ZSTD_dictMode_e dictMode )
{
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 0 /* optLevel */ , dictMode ) ;
}
static size_t ZSTD_compressBlock_opt2 (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize , const ZSTD_dictMode_e dictMode )
{
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 2 /* optLevel */ , dictMode ) ;
}
2017-10-26 20:41:47 +00:00
2018-05-15 17:45:22 +00:00
size_t ZSTD_compressBlock_btopt (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2019-01-04 00:30:03 +00:00
const void * src , size_t srcSize )
2018-01-13 12:50:59 +00:00
{
DEBUGLOG ( 5 , " ZSTD_compressBlock_btopt " ) ;
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt0 ( ms , seqStore , rep , src , srcSize , ZSTD_noDict ) ;
2019-01-04 00:30:03 +00:00
}
/* ZSTD_initStats_ultra():
* make a first compression pass , just to seed stats with more accurate starting values .
* only works on first block , with no dictionary and no ldm .
2023-05-22 12:32:14 +00:00
* this function cannot error out , its narrow contract must be respected .
2019-01-04 00:30:03 +00:00
*/
static void
ZSTD_initStats_ultra ( ZSTD_matchState_t * ms ,
seqStore_t * seqStore ,
U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
{
U32 tmpRep [ ZSTD_REP_NUM ] ; /* updated rep codes will sink here */
2021-01-08 10:21:43 +00:00
ZSTD_memcpy ( tmpRep , rep , sizeof ( tmpRep ) ) ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 4 , " ZSTD_initStats_ultra (srcSize=%zu) " , srcSize ) ;
assert ( ms - > opt . litLengthSum = = 0 ) ; /* first block */
assert ( seqStore - > sequences = = seqStore - > sequencesStart ) ; /* no ldm */
assert ( ms - > window . dictLimit = = ms - > window . lowLimit ) ; /* no dictionary */
assert ( ms - > window . dictLimit - ms - > nextToUpdate < = 1 ) ; /* no prefix (note: intentional overflow, defined as 2-complement) */
2022-01-24 10:04:45 +00:00
ZSTD_compressBlock_opt2 ( ms , seqStore , tmpRep , src , srcSize , ZSTD_noDict ) ; /* generate stats into ms->opt*/
2019-01-04 00:30:03 +00:00
2023-05-22 12:32:14 +00:00
/* invalidate first scan from history, only keep entropy stats */
2019-01-04 00:30:03 +00:00
ZSTD_resetSeqStore ( seqStore ) ;
ms - > window . base - = srcSize ;
ms - > window . dictLimit + = ( U32 ) srcSize ;
ms - > window . lowLimit = ms - > window . dictLimit ;
ms - > nextToUpdate = ms - > window . dictLimit ;
2018-01-13 12:50:59 +00:00
}
2018-05-15 17:45:22 +00:00
size_t ZSTD_compressBlock_btultra (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2019-01-04 00:30:03 +00:00
const void * src , size_t srcSize )
{
DEBUGLOG ( 5 , " ZSTD_compressBlock_btultra (srcSize=%zu) " , srcSize ) ;
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt2 ( ms , seqStore , rep , src , srcSize , ZSTD_noDict ) ;
2019-01-04 00:30:03 +00:00
}
size_t ZSTD_compressBlock_btultra2 (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
{
2021-01-08 10:21:43 +00:00
U32 const curr = ( U32 ) ( ( const BYTE * ) src - ms - > window . base ) ;
2019-01-04 00:30:03 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_btultra2 (srcSize=%zu) " , srcSize ) ;
2023-05-22 12:32:14 +00:00
/* 2-passes strategy:
2019-01-04 00:30:03 +00:00
* this strategy makes a first pass over first block to collect statistics
2023-05-22 12:32:14 +00:00
* in order to seed next round ' s statistics with it .
* After 1 st pass , function forgets history , and starts a new block .
2019-01-04 00:30:03 +00:00
* Consequently , this can only work if no data has been previously loaded in tables ,
* aka , no dictionary , no prefix , no ldm preprocessing .
* The compression ratio gain is generally small ( ~ 0.5 % on first block ) ,
2023-05-22 12:32:14 +00:00
* * the cost is 2 x cpu time on first block . */
2019-01-04 00:30:03 +00:00
assert ( srcSize < = ZSTD_BLOCKSIZE_MAX ) ;
if ( ( ms - > opt . litLengthSum = = 0 ) /* first block */
& & ( seqStore - > sequences = = seqStore - > sequencesStart ) /* no ldm */
& & ( ms - > window . dictLimit = = ms - > window . lowLimit ) /* no dictionary */
2023-05-22 12:32:14 +00:00
& & ( curr = = ms - > window . dictLimit ) /* start of frame, nothing already loaded nor skipped */
& & ( srcSize > ZSTD_PREDEF_THRESHOLD ) /* input large enough to not employ default stats */
2019-01-04 00:30:03 +00:00
) {
ZSTD_initStats_ultra ( ms , seqStore , rep , src , srcSize ) ;
}
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt2 ( ms , seqStore , rep , src , srcSize , ZSTD_noDict ) ;
2019-01-04 00:30:03 +00:00
}
size_t ZSTD_compressBlock_btopt_dictMatchState (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
{
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt0 ( ms , seqStore , rep , src , srcSize , ZSTD_dictMatchState ) ;
2019-01-04 00:30:03 +00:00
}
size_t ZSTD_compressBlock_btultra_dictMatchState (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
2018-01-13 12:50:59 +00:00
{
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt2 ( ms , seqStore , rep , src , srcSize , ZSTD_dictMatchState ) ;
2018-01-13 12:50:59 +00:00
}
2018-05-15 17:45:22 +00:00
size_t ZSTD_compressBlock_btopt_extDict (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2019-01-04 00:30:03 +00:00
const void * src , size_t srcSize )
2017-10-26 20:41:47 +00:00
{
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt0 ( ms , seqStore , rep , src , srcSize , ZSTD_extDict ) ;
2017-10-26 20:41:47 +00:00
}
2018-05-15 17:45:22 +00:00
size_t ZSTD_compressBlock_btultra_extDict (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2019-01-04 00:30:03 +00:00
const void * src , size_t srcSize )
2017-10-26 20:41:47 +00:00
{
2022-01-24 10:04:45 +00:00
return ZSTD_compressBlock_opt2 ( ms , seqStore , rep , src , srcSize , ZSTD_extDict ) ;
2017-10-26 20:41:47 +00:00
}
2019-01-04 00:30:03 +00:00
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block ( no prefix ) */