summaryrefslogtreecommitdiff
path: root/Utilities/cmzstd
diff options
context:
space:
mode:
authorBrad King <brad.king@kitware.com>2021-10-13 10:04:27 -0400
committerBrad King <brad.king@kitware.com>2021-10-13 10:04:27 -0400
commiteb8db99596522060db081ea1b824a0302df3551d (patch)
treeb548c406dd855fcf46abd187722b8175f9dd300f /Utilities/cmzstd
parent8c65ea474437f20f787038e0c955dbb1582d7e5d (diff)
parentda1e841ccf2fd4600b97abf1695b307592867352 (diff)
downloadcmake-eb8db99596522060db081ea1b824a0302df3551d.tar.gz
Merge branch 'upstream-zstd' into update-zstd
# By zstd upstream * upstream-zstd: zstd 2021-05-14 (a488ba11)
Diffstat (limited to 'Utilities/cmzstd')
-rw-r--r--Utilities/cmzstd/README.md12
-rw-r--r--Utilities/cmzstd/lib/common/bitstream.h41
-rw-r--r--Utilities/cmzstd/lib/common/compiler.h122
-rw-r--r--Utilities/cmzstd/lib/common/cpu.h4
-rw-r--r--Utilities/cmzstd/lib/common/debug.c2
-rw-r--r--Utilities/cmzstd/lib/common/debug.h31
-rw-r--r--Utilities/cmzstd/lib/common/entropy_common.c232
-rw-r--r--Utilities/cmzstd/lib/common/error_private.c3
-rw-r--r--Utilities/cmzstd/lib/common/error_private.h6
-rw-r--r--Utilities/cmzstd/lib/common/fse.h52
-rw-r--r--Utilities/cmzstd/lib/common/fse_decompress.c161
-rw-r--r--Utilities/cmzstd/lib/common/huf.h36
-rw-r--r--Utilities/cmzstd/lib/common/mem.h167
-rw-r--r--Utilities/cmzstd/lib/common/pool.c40
-rw-r--r--Utilities/cmzstd/lib/common/pool.h4
-rw-r--r--Utilities/cmzstd/lib/common/threading.c11
-rw-r--r--Utilities/cmzstd/lib/common/xxhash.c80
-rw-r--r--Utilities/cmzstd/lib/common/xxhash.h4
-rw-r--r--Utilities/cmzstd/lib/common/zstd_common.c20
-rw-r--r--Utilities/cmzstd/lib/common/zstd_deps.h111
-rw-r--r--Utilities/cmzstd/lib/common/zstd_internal.h167
-rw-r--r--Utilities/cmzstd/lib/common/zstd_trace.h154
-rw-r--r--Utilities/cmzstd/lib/compress/fse_compress.c55
-rw-r--r--Utilities/cmzstd/lib/compress/hist.c56
-rw-r--r--Utilities/cmzstd/lib/compress/hist.h4
-rw-r--r--Utilities/cmzstd/lib/compress/huf_compress.c391
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress.c3525
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_internal.h368
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_literals.c16
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_literals.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_sequences.c36
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_sequences.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_superblock.c317
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_compress_superblock.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_cwksp.h271
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_double_fast.c50
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_double_fast.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_fast.c46
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_fast.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_lazy.c1228
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_lazy.h60
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_ldm.c525
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_ldm.h11
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_ldm_geartab.h103
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_opt.c237
-rw-r--r--Utilities/cmzstd/lib/compress/zstd_opt.h2
-rw-r--r--Utilities/cmzstd/lib/compress/zstdmt_compress.c508
-rw-r--r--Utilities/cmzstd/lib/compress/zstdmt_compress.h144
-rw-r--r--Utilities/cmzstd/lib/decompress/huf_decompress.c581
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_ddict.c18
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_ddict.h4
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_decompress.c456
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_decompress_block.c266
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_decompress_block.h9
-rw-r--r--Utilities/cmzstd/lib/decompress/zstd_decompress_internal.h38
-rw-r--r--Utilities/cmzstd/lib/deprecated/zbuff.h2
-rw-r--r--Utilities/cmzstd/lib/deprecated/zbuff_common.c2
-rw-r--r--Utilities/cmzstd/lib/deprecated/zbuff_compress.c2
-rw-r--r--Utilities/cmzstd/lib/deprecated/zbuff_decompress.c2
-rw-r--r--Utilities/cmzstd/lib/dictBuilder/cover.c82
-rw-r--r--Utilities/cmzstd/lib/dictBuilder/cover.h13
-rw-r--r--Utilities/cmzstd/lib/dictBuilder/divsufsort.c2
-rw-r--r--Utilities/cmzstd/lib/dictBuilder/fastcover.c62
-rw-r--r--Utilities/cmzstd/lib/dictBuilder/zdict.c59
-rw-r--r--Utilities/cmzstd/lib/zdict.h (renamed from Utilities/cmzstd/lib/dictBuilder/zdict.h)157
-rw-r--r--Utilities/cmzstd/lib/zstd.h694
-rw-r--r--Utilities/cmzstd/lib/zstd_errors.h (renamed from Utilities/cmzstd/lib/common/zstd_errors.h)3
67 files changed, 8561 insertions, 3314 deletions
diff --git a/Utilities/cmzstd/README.md b/Utilities/cmzstd/README.md
index 5c300fdc49..dcca7662d2 100644
--- a/Utilities/cmzstd/README.md
+++ b/Utilities/cmzstd/README.md
@@ -176,6 +176,12 @@ Going into `build` directory, you will find additional possibilities:
You can build the zstd binary via buck by executing: `buck build programs:zstd` from the root of the repo.
The output binary will be in `buck-out/gen/programs/`.
+## Testing
+
+You can run quick local smoke tests by executing the `playTest.sh` script from the `src/tests` directory.
+Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the zstd and datagen binary.
+For information on CI testing, please refer to TESTING.md
+
## Status
Zstandard is currently deployed within Facebook. It is used continuously to compress large amounts of data in multiple formats and use cases.
@@ -187,7 +193,7 @@ Zstandard is dual-licensed under [BSD](LICENSE) and [GPLv2](COPYING).
## Contributing
-The "dev" branch is the one where all contributions are merged before reaching "master".
-If you plan to propose a patch, please commit into the "dev" branch, or its own feature branch.
-Direct commit to "master" are not permitted.
+The `dev` branch is the one where all contributions are merged before reaching `release`.
+If you plan to propose a patch, please commit into the `dev` branch, or its own feature branch.
+Direct commit to `release` are not permitted.
For more information, please read [CONTRIBUTING](CONTRIBUTING.md).
diff --git a/Utilities/cmzstd/lib/common/bitstream.h b/Utilities/cmzstd/lib/common/bitstream.h
index 37b99c01ee..2e5a933ad3 100644
--- a/Utilities/cmzstd/lib/common/bitstream.h
+++ b/Utilities/cmzstd/lib/common/bitstream.h
@@ -1,7 +1,7 @@
/* ******************************************************************
* bitstream
* Part of FSE library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -17,7 +17,6 @@
#if defined (__cplusplus)
extern "C" {
#endif
-
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
@@ -36,10 +35,12 @@ extern "C" {
/*=========================================
* Target specific
=========================================*/
-#if defined(__BMI__) && defined(__GNUC__)
-# include <immintrin.h> /* support for bextr (experimental) */
-#elif defined(__ICCARM__)
-# include <intrinsics.h>
+#ifndef ZSTD_NO_INTRINSICS
+# if defined(__BMI__) && defined(__GNUC__)
+# include <immintrin.h> /* support for bextr (experimental) */
+# elif defined(__ICCARM__)
+# include <intrinsics.h>
+# endif
#endif
#define STREAM_ACCUMULATOR_MIN_32 25
@@ -141,8 +142,12 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
- unsigned long r=0;
- return _BitScanReverse ( &r, val ) ? (unsigned)r : 0;
+# if STATIC_BMI2 == 1
+ return _lzcnt_u32(val) ^ 31;
+# else
+ unsigned long r = 0;
+ return _BitScanReverse(&r, val) ? (unsigned)r : 0;
+# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
@@ -198,7 +203,7 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
size_t value, unsigned nbBits)
{
- MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
@@ -271,7 +276,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
- if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+ if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
bitD->start = (const char*)srcBuffer;
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
@@ -317,12 +322,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
return srcSize;
}
-MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
{
return bitContainer >> start;
}
-MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
{
U32 const regMask = sizeof(bitContainer)*8 - 1;
/* if start > regMask, bitstream is corrupted, and result is undefined */
@@ -330,10 +335,14 @@ MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 co
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
}
-MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
{
+#if defined(STATIC_BMI2) && STATIC_BMI2 == 1
+ return _bzhi_u64(bitContainer, nbBits);
+#else
assert(nbBits < BIT_MASK_SIZE);
return bitContainer & BIT_mask[nbBits];
+#endif
}
/*! BIT_lookBits() :
@@ -342,7 +351,7 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
-MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
/* arbitrate between double-shift and shift+mask */
#if 1
@@ -365,7 +374,7 @@ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
}
-MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
@@ -374,7 +383,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
diff --git a/Utilities/cmzstd/lib/common/compiler.h b/Utilities/cmzstd/lib/common/compiler.h
index 1b864eaeeb..f717855639 100644
--- a/Utilities/cmzstd/lib/common/compiler.h
+++ b/Utilities/cmzstd/lib/common/compiler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -39,6 +39,17 @@
#endif
/**
+ On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
+ This explictly marks such functions as __cdecl so that the code will still compile
+ if a CC other than __cdecl has been made the default.
+*/
+#if defined(_MSC_VER)
+# define WIN_CDECL __cdecl
+#else
+# define WIN_CDECL
+#endif
+
+/**
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
* parameters. They must be inlined for the compiler to eliminate the constant
* branches.
@@ -79,6 +90,7 @@
# endif
#endif
+
/* target attribute */
#ifndef __has_attribute
#define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
@@ -114,12 +126,12 @@
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
-# elif defined(__aarch64__)
-# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
-# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+# elif defined(__aarch64__)
+# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
# else
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
@@ -172,4 +184,106 @@
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
+/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
+#ifndef STATIC_BMI2
+# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
+# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
+# define STATIC_BMI2 1
+# endif
+# endif
+#endif
+
+#ifndef STATIC_BMI2
+ #define STATIC_BMI2 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+/* detects whether we are being compiled under msan */
+#ifndef ZSTD_MEMORY_SANITIZER
+# if __has_feature(memory_sanitizer)
+# define ZSTD_MEMORY_SANITIZER 1
+# else
+# define ZSTD_MEMORY_SANITIZER 0
+# endif
+#endif
+
+#if ZSTD_MEMORY_SANITIZER
+/* Not all platforms that support msan provide sanitizers/msan_interface.h.
+ * We therefore declare the functions we need ourselves, rather than trying to
+ * include the header file... */
+#include <stddef.h> /* size_t */
+#define ZSTD_DEPS_NEED_STDINT
+#include "zstd_deps.h" /* intptr_t */
+
+/* Make memory region fully initialized (without changing its contents). */
+void __msan_unpoison(const volatile void *a, size_t size);
+
+/* Make memory region fully uninitialized (without changing its contents).
+ This is a legacy interface that does not update origin information. Use
+ __msan_allocated_memory() instead. */
+void __msan_poison(const volatile void *a, size_t size);
+
+/* Returns the offset of the first (at least partially) poisoned byte in the
+ memory range, or -1 if the whole range is good. */
+intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+#endif
+
+/* detects whether we are being compiled under asan */
+#ifndef ZSTD_ADDRESS_SANITIZER
+# if __has_feature(address_sanitizer)
+# define ZSTD_ADDRESS_SANITIZER 1
+# elif defined(__SANITIZE_ADDRESS__)
+# define ZSTD_ADDRESS_SANITIZER 1
+# else
+# define ZSTD_ADDRESS_SANITIZER 0
+# endif
+#endif
+
+#if ZSTD_ADDRESS_SANITIZER
+/* Not all platforms that support asan provide sanitizers/asan_interface.h.
+ * We therefore declare the functions we need ourselves, rather than trying to
+ * include the header file... */
+#include <stddef.h> /* size_t */
+
+/**
+ * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
+ *
+ * This memory must be previously allocated by your program. Instrumented
+ * code is forbidden from accessing addresses in this region until it is
+ * unpoisoned. This function is not guaranteed to poison the entire region -
+ * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
+ * alignment restrictions.
+ *
+ * \note This function is not thread-safe because no two threads can poison or
+ * unpoison memory in the same memory region simultaneously.
+ *
+ * \param addr Start of memory region.
+ * \param size Size of memory region. */
+void __asan_poison_memory_region(void const volatile *addr, size_t size);
+
+/**
+ * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
+ *
+ * This memory must be previously allocated by your program. Accessing
+ * addresses in this region is allowed until this region is poisoned again.
+ * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
+ * to ASan alignment restrictions.
+ *
+ * \note This function is not thread-safe because no two threads can
+ * poison or unpoison memory in the same memory region simultaneously.
+ *
+ * \param addr Start of memory region.
+ * \param size Size of memory region. */
+void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+#endif
+
#endif /* ZSTD_COMPILER_H */
diff --git a/Utilities/cmzstd/lib/common/cpu.h b/Utilities/cmzstd/lib/common/cpu.h
index 6e8a974f62..8acd33be3c 100644
--- a/Utilities/cmzstd/lib/common/cpu.h
+++ b/Utilities/cmzstd/lib/common/cpu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Facebook, Inc.
+ * Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,8 +16,6 @@
* https://github.com/facebook/folly/blob/master/folly/CpuId.h
*/
-#include <string.h>
-
#include "mem.h"
#ifdef _MSC_VER
diff --git a/Utilities/cmzstd/lib/common/debug.c b/Utilities/cmzstd/lib/common/debug.c
index f303f4a2e5..bb863c9ea6 100644
--- a/Utilities/cmzstd/lib/common/debug.c
+++ b/Utilities/cmzstd/lib/common/debug.c
@@ -1,7 +1,7 @@
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
diff --git a/Utilities/cmzstd/lib/common/debug.h b/Utilities/cmzstd/lib/common/debug.h
index ac6224888d..3b2a320a18 100644
--- a/Utilities/cmzstd/lib/common/debug.h
+++ b/Utilities/cmzstd/lib/common/debug.h
@@ -1,7 +1,7 @@
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -51,15 +51,6 @@ extern "C" {
#endif
-/* DEBUGFILE can be defined externally,
- * typically through compiler command line.
- * note : currently useless.
- * Value must be stderr or stdout */
-#ifndef DEBUGFILE
-# define DEBUGFILE stderr
-#endif
-
-
/* recommended values for DEBUGLEVEL :
* 0 : release mode, no debug, all run-time checks disabled
* 1 : enables assert() only, no display
@@ -76,7 +67,8 @@ extern "C" {
*/
#if (DEBUGLEVEL>=1)
-# include <assert.h>
+# define ZSTD_DEPS_NEED_ASSERT
+# include "zstd_deps.h"
#else
# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
# define assert(condition) ((void)0) /* disable assert (default) */
@@ -84,7 +76,8 @@ extern "C" {
#endif
#if (DEBUGLEVEL>=2)
-# include <stdio.h>
+# define ZSTD_DEPS_NEED_IO
+# include "zstd_deps.h"
extern int g_debuglevel; /* the variable is only declared,
it actually lives in debug.c,
and is shared by the whole process.
@@ -92,14 +85,14 @@ extern int g_debuglevel; /* the variable is only declared,
It's useful when enabling very verbose levels
on selective conditions (such as position in src) */
-# define RAWLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- fprintf(stderr, __VA_ARGS__); \
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
} }
-# define DEBUGLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
- fprintf(stderr, " \n"); \
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
} }
#else
# define RAWLOG(l, ...) {} /* disabled */
diff --git a/Utilities/cmzstd/lib/common/entropy_common.c b/Utilities/cmzstd/lib/common/entropy_common.c
index 9d3e4e8e36..41cd69566b 100644
--- a/Utilities/cmzstd/lib/common/entropy_common.c
+++ b/Utilities/cmzstd/lib/common/entropy_common.c
@@ -1,6 +1,6 @@
/* ******************************************************************
* Common functions of New Generation Entropy library
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -38,8 +38,31 @@ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
-size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
- const void* headerBuffer, size_t hbSize)
+static U32 FSE_ctz(U32 val)
+{
+ assert(val != 0);
+ {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ return _BitScanForward(&r, val) ? (unsigned)r : 0;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_ctz(val);
+# elif defined(__ICCARM__) /* IAR Intrinsic */
+ return __CTZ(val);
+# else /* Software version */
+ U32 count = 0;
+ while ((val & 1) == 0) {
+ val >>= 1;
+ ++count;
+ }
+ return count;
+# endif
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
@@ -50,23 +73,23 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
U32 bitStream;
int bitCount;
unsigned charnum = 0;
+ unsigned const maxSV1 = *maxSVPtr + 1;
int previous0 = 0;
- if (hbSize < 4) {
- /* This function only works when hbSize >= 4 */
- char buffer[4];
- memset(buffer, 0, sizeof(buffer));
- memcpy(buffer, headerBuffer, hbSize);
+ if (hbSize < 8) {
+ /* This function only works when hbSize >= 8 */
+ char buffer[8] = {0};
+ ZSTD_memcpy(buffer, headerBuffer, hbSize);
{ size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
buffer, sizeof(buffer));
if (FSE_isError(countSize)) return countSize;
if (countSize > hbSize) return ERROR(corruption_detected);
return countSize;
} }
- assert(hbSize >= 4);
+ assert(hbSize >= 8);
/* init */
- memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
@@ -77,36 +100,58 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
threshold = 1<<nbBits;
nbBits++;
- while ((remaining>1) & (charnum<=*maxSVPtr)) {
+ for (;;) {
if (previous0) {
- unsigned n0 = charnum;
- while ((bitStream & 0xFFFF) == 0xFFFF) {
- n0 += 24;
- if (ip < iend-5) {
- ip += 2;
- bitStream = MEM_readLE32(ip) >> bitCount;
+ /* Count the number of repeats. Each time the
+ * 2-bit repeat code is 0b11 there is another
+ * repeat.
+ * Avoid UB by setting the high bit to 1.
+ */
+ int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ while (repeats >= 12) {
+ charnum += 3 * 12;
+ if (LIKELY(ip <= iend-7)) {
+ ip += 3;
} else {
- bitStream >>= 16;
- bitCount += 16;
- } }
- while ((bitStream & 3) == 3) {
- n0 += 3;
- bitStream >>= 2;
- bitCount += 2;
+ bitCount -= (int)(8 * (iend - 7 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
}
- n0 += bitStream & 3;
+ charnum += 3 * repeats;
+ bitStream >>= 2 * repeats;
+ bitCount += 2 * repeats;
+
+ /* Add the final repeat which isn't 0b11. */
+ assert((bitStream & 3) < 3);
+ charnum += bitStream & 3;
bitCount += 2;
- if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
- while (charnum < n0) normalizedCounter[charnum++] = 0;
- if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+
+ /* This is an error, but break and return an error
+ * at the end, because returning out of a loop makes
+ * it harder for the compiler to optimize.
+ */
+ if (charnum >= maxSV1) break;
+
+ /* We don't need to set the normalized count to 0
+ * because we already memset the whole buffer to 0.
+ */
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
assert((bitCount >> 3) <= 3); /* For first condition to work */
ip += bitCount>>3;
bitCount &= 7;
- bitStream = MEM_readLE32(ip) >> bitCount;
} else {
- bitStream >>= 2;
- } }
- { int const max = (2*threshold-1) - remaining;
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ {
+ int const max = (2*threshold-1) - remaining;
int count;
if ((bitStream & (threshold-1)) < (U32)max) {
@@ -119,24 +164,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
}
count--; /* extra accuracy */
- remaining -= count < 0 ? -count : count; /* -1 means +1 */
+ /* When it matters (small blocks), this is a
+ * predictable branch, because we don't use -1.
+ */
+ if (count >= 0) {
+ remaining -= count;
+ } else {
+ assert(count == -1);
+ remaining += count;
+ }
normalizedCounter[charnum++] = (short)count;
previous0 = !count;
- while (remaining < threshold) {
- nbBits--;
- threshold >>= 1;
+
+ assert(threshold > 1);
+ if (remaining < threshold) {
+ /* This branch can be folded into the
+ * threshold update condition because we
+ * know that threshold > 1.
+ */
+ if (remaining <= 1) break;
+ nbBits = BIT_highbit32(remaining) + 1;
+ threshold = 1 << (nbBits - 1);
}
+ if (charnum >= maxSV1) break;
- if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
ip = iend - 4;
}
- bitStream = MEM_readLE32(ip) >> (bitCount & 31);
- } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } }
if (remaining != 1) return ERROR(corruption_detected);
+ /* Only possible when there are too many zeros. */
+ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
if (bitCount > 32) return ERROR(corruption_detected);
*maxSVPtr = charnum-1;
@@ -144,6 +208,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
return ip-istart;
}
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_readNCount_body_default(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+#endif
+
+size_t FSE_readNCount_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+size_t FSE_readNCount(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
+}
+
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
@@ -156,6 +257,17 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
+ U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
@@ -163,7 +275,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
- /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+ /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
oSize = iSize - 127;
@@ -177,14 +289,14 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
huffWeight[n+1] = ip[n/2] & 15;
} } }
else { /* header compressed with FSE (normal case) */
- FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
- oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
+ /* max (hwSize-1) values decoded, as last one is implied */
+ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
- memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n<oSize; n++) {
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
@@ -214,3 +326,37 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+}
diff --git a/Utilities/cmzstd/lib/common/error_private.c b/Utilities/cmzstd/lib/common/error_private.c
index cd437529c1..6d1135f8c3 100644
--- a/Utilities/cmzstd/lib/common/error_private.c
+++ b/Utilities/cmzstd/lib/common/error_private.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -48,6 +48,7 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
+ case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
case PREFIX(maxCode):
default: return notErrorCode;
}
diff --git a/Utilities/cmzstd/lib/common/error_private.h b/Utilities/cmzstd/lib/common/error_private.h
index 982cf8e9fe..6d8b9f7763 100644
--- a/Utilities/cmzstd/lib/common/error_private.h
+++ b/Utilities/cmzstd/lib/common/error_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -21,8 +21,8 @@ extern "C" {
/* ****************************************
* Dependencies
******************************************/
-#include <stddef.h> /* size_t */
-#include "zstd_errors.h" /* enum list */
+#include "../zstd_errors.h" /* enum list */
+#include "zstd_deps.h" /* size_t */
/* ****************************************
diff --git a/Utilities/cmzstd/lib/common/fse.h b/Utilities/cmzstd/lib/common/fse.h
index ff54e70ea7..19dd4febcd 100644
--- a/Utilities/cmzstd/lib/common/fse.h
+++ b/Utilities/cmzstd/lib/common/fse.h
@@ -1,7 +1,7 @@
/* ******************************************************************
* FSE : Finite State Entropy codec
* Public Prototypes declaration
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -23,7 +23,7 @@ extern "C" {
/*-*****************************************
* Dependencies
******************************************/
-#include <stddef.h> /* size_t, ptrdiff_t */
+#include "zstd_deps.h" /* size_t, ptrdiff_t */
/*-*****************************************
@@ -137,10 +137,16 @@ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize
/*! FSE_normalizeCount():
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ useLowProbCount is a boolean parameter which trades off compressed size for
+ faster header decoding. When it is set to 1, the compressed data will be slightly
+ smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
+ faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
+ is a good default, since header deserialization makes a big speed difference.
+ Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
@return : tableLog,
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
- const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
/*! FSE_NCountWriteBound():
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
@@ -228,6 +234,13 @@ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
const void* rBuffer, size_t rBuffSize);
+/*! FSE_readNCount_bmi2():
+ * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
+ */
+FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize, int bmi2);
+
/*! Constructor and Destructor of FSE_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
@@ -288,12 +301,12 @@ If there is an error, the function will return an error code, which can be teste
*******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
+#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
@@ -309,9 +322,9 @@ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsi
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
- * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
*/
-#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
@@ -322,18 +335,30 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` must be >= `(1<<tableLog)`.
+ * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
*/
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
+#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
+
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
-/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
+/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
typedef enum {
FSE_repeat_none, /**< Cannot use the previous table */
@@ -644,6 +669,9 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
#ifndef FSE_DEFAULT_MEMORY_USAGE
# define FSE_DEFAULT_MEMORY_USAGE 13
#endif
+#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
+# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
+#endif
/*!FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
@@ -677,7 +705,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
-#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
#endif /* FSE_STATIC_LINKING_ONLY */
diff --git a/Utilities/cmzstd/lib/common/fse_decompress.c b/Utilities/cmzstd/lib/common/fse_decompress.c
index bcc2223ccc..f4ff58fa0a 100644
--- a/Utilities/cmzstd/lib/common/fse_decompress.c
+++ b/Utilities/cmzstd/lib/common/fse_decompress.c
@@ -1,6 +1,6 @@
/* ******************************************************************
* FSE : Finite State Entropy decoder
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -16,13 +16,14 @@
/* **************************************************************
* Includes
****************************************************************/
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memcpy, memset */
+#include "debug.h" /* assert */
#include "bitstream.h"
#include "compiler.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h"
/* **************************************************************
@@ -59,25 +60,27 @@
FSE_DTable* FSE_createDTable (unsigned tableLog)
{
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+ return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
}
void FSE_freeDTable (FSE_DTable* dt)
{
- free(dt);
+ ZSTD_free(dt);
}
-size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
- U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+ U16* symbolNext = (U16*)workSpace;
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
@@ -95,11 +98,57 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
} } }
- memcpy(dt, &DTableH, sizeof(DTableH));
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
- { U32 const tableMask = tableSize-1;
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].symbol = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
@@ -124,6 +173,11 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
return 0;
}
+size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
+}
+
#ifndef FSE_COMMONDEFS_ONLY
@@ -251,36 +305,99 @@ size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
}
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+ FSE_DTable dtable[1]; /* Dynamically sized */
+} FSE_DecompressWksp;
+
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+ void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize,
+ unsigned maxLog, void* workSpace, size_t wkspSize,
+ int bmi2)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
- short counting[FSE_MAX_SYMBOL_VALUE+1];
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+
+ DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
/* normal FSE decoding mode */
- size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
- if (FSE_isError(NCountLength)) return NCountLength;
- /* if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); */ /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
- if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
- ip += NCountLength;
- cSrcSize -= NCountLength;
+ {
+ size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ assert(NCountLength <= cSrcSize);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
+ workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
+
+ CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+
+ {
+ const void* ptr = wksp->dtable;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
- CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
+}
+#endif
- return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
+size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) {
+ U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)];
+ return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp));
+}
+
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
{
- DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
- return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
+ /* Static analyzer seems unable to understand this table will be properly initialized later */
+ U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
+ return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp));
}
-
+#endif
#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/Utilities/cmzstd/lib/common/huf.h b/Utilities/cmzstd/lib/common/huf.h
index ef432685da..3d47ced030 100644
--- a/Utilities/cmzstd/lib/common/huf.h
+++ b/Utilities/cmzstd/lib/common/huf.h
@@ -1,7 +1,7 @@
/* ******************************************************************
* huff0 huffman codec,
* part of Finite State Entropy library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -20,7 +20,7 @@ extern "C" {
#define HUF_H_298734234
/* *** Dependencies *** */
-#include <stddef.h> /* size_t */
+#include "zstd_deps.h" /* size_t */
/* *** library symbols visibility *** */
@@ -111,6 +111,8 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
/* *** Dependencies *** */
#include "mem.h" /* U32 */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
/* *** Constants *** */
@@ -133,12 +135,16 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of HUF's Compression Table */
+/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
+struct HUF_CElt_s {
+ U16 val;
+ BYTE nbBits;
+}; /* typedef'd to HUF_CElt */
+typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */
#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
- U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
- void* name##hv = &(name##hb); \
- HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
+ HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
/* static allocation of HUF's DTable */
typedef U32 HUF_DTable;
@@ -184,9 +190,9 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
* or to save and regenerate 'CTable' using external methods.
*/
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
@@ -226,6 +232,19 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize);
+/*! HUF_readStats_wksp() :
+ * Same as HUF_readStats() but takes an external workspace which must be
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
+#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2);
+
/** HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
@@ -260,7 +279,7 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
* a required workspace size greater than that specified in the following
* macro.
*/
-#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
+#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
#ifndef HUF_FORCE_DECOMPRESS_X2
@@ -332,6 +351,9 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
#endif
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
#endif /* HUF_STATIC_LINKING_ONLY */
diff --git a/Utilities/cmzstd/lib/common/mem.h b/Utilities/cmzstd/lib/common/mem.h
index 89c8aea7d2..9f3b81ab9d 100644
--- a/Utilities/cmzstd/lib/common/mem.h
+++ b/Utilities/cmzstd/lib/common/mem.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -18,8 +18,10 @@ extern "C" {
/*-****************************************
* Dependencies
******************************************/
-#include <stddef.h> /* size_t, ptrdiff_t */
-#include <string.h> /* memcpy */
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include "compiler.h" /* __has_builtin */
+#include "debug.h" /* DEBUG_STATIC_ASSERT */
+#include "zstd_deps.h" /* ZSTD_memcpy */
/*-****************************************
@@ -39,93 +41,15 @@ extern "C" {
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
-#ifndef __has_builtin
-# define __has_builtin(x) 0 /* compat. with non-clang compilers */
-#endif
-
-/* code only tested on 32 and 64 bits systems */
-#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
-MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
-
-/* detects whether we are being compiled under msan */
-#if defined (__has_feature)
-# if __has_feature(memory_sanitizer)
-# define MEMORY_SANITIZER 1
-# endif
-#endif
-
-#if defined (MEMORY_SANITIZER)
-/* Not all platforms that support msan provide sanitizers/msan_interface.h.
- * We therefore declare the functions we need ourselves, rather than trying to
- * include the header file... */
-
-#include <stdint.h> /* intptr_t */
-
-/* Make memory region fully initialized (without changing its contents). */
-void __msan_unpoison(const volatile void *a, size_t size);
-
-/* Make memory region fully uninitialized (without changing its contents).
- This is a legacy interface that does not update origin information. Use
- __msan_allocated_memory() instead. */
-void __msan_poison(const volatile void *a, size_t size);
-
-/* Returns the offset of the first (at least partially) poisoned byte in the
- memory range, or -1 if the whole range is good. */
-intptr_t __msan_test_shadow(const volatile void *x, size_t size);
-#endif
-
-/* detects whether we are being compiled under asan */
-#if defined (__has_feature)
-# if __has_feature(address_sanitizer)
-# define ADDRESS_SANITIZER 1
-# endif
-#elif defined(__SANITIZE_ADDRESS__)
-# define ADDRESS_SANITIZER 1
-#endif
-
-#if defined (ADDRESS_SANITIZER)
-/* Not all platforms that support asan provide sanitizers/asan_interface.h.
- * We therefore declare the functions we need ourselves, rather than trying to
- * include the header file... */
-
-/**
- * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
- *
- * This memory must be previously allocated by your program. Instrumented
- * code is forbidden from accessing addresses in this region until it is
- * unpoisoned. This function is not guaranteed to poison the entire region -
- * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
- * alignment restrictions.
- *
- * \note This function is not thread-safe because no two threads can poison or
- * unpoison memory in the same memory region simultaneously.
- *
- * \param addr Start of memory region.
- * \param size Size of memory region. */
-void __asan_poison_memory_region(void const volatile *addr, size_t size);
-
-/**
- * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
- *
- * This memory must be previously allocated by your program. Accessing
- * addresses in this region is allowed until this region is poisoned again.
- * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
- * to ASan alignment restrictions.
- *
- * \note This function is not thread-safe because no two threads can
- * poison or unpoison memory in the same memory region simultaneously.
- *
- * \param addr Start of memory region.
- * \param size Size of memory region. */
-void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
-#endif
-
-
/*-**************************************************************
* Basic Types
*****************************************************************/
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
+# if defined(_AIX)
+# include <inttypes.h>
+# else
+# include <stdint.h> /* intptr_t */
+# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
@@ -157,7 +81,53 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
/*-**************************************************************
-* Memory I/O
+* Memory I/O API
+*****************************************************************/
+/*=== Static platform detection ===*/
+MEM_STATIC unsigned MEM_32bits(void);
+MEM_STATIC unsigned MEM_64bits(void);
+MEM_STATIC unsigned MEM_isLittleEndian(void);
+
+/*=== Native unaligned read/write ===*/
+MEM_STATIC U16 MEM_read16(const void* memPtr);
+MEM_STATIC U32 MEM_read32(const void* memPtr);
+MEM_STATIC U64 MEM_read64(const void* memPtr);
+MEM_STATIC size_t MEM_readST(const void* memPtr);
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value);
+MEM_STATIC void MEM_write32(void* memPtr, U32 value);
+MEM_STATIC void MEM_write64(void* memPtr, U64 value);
+
+/*=== Little endian unaligned read/write ===*/
+MEM_STATIC U16 MEM_readLE16(const void* memPtr);
+MEM_STATIC U32 MEM_readLE24(const void* memPtr);
+MEM_STATIC U32 MEM_readLE32(const void* memPtr);
+MEM_STATIC U64 MEM_readLE64(const void* memPtr);
+MEM_STATIC size_t MEM_readLEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
+
+/*=== Big endian unaligned read/write ===*/
+MEM_STATIC U32 MEM_readBE32(const void* memPtr);
+MEM_STATIC U64 MEM_readBE64(const void* memPtr);
+MEM_STATIC size_t MEM_readBEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
+
+/*=== Byteswap ===*/
+MEM_STATIC U32 MEM_swap32(U32 in);
+MEM_STATIC U64 MEM_swap64(U64 in);
+MEM_STATIC size_t MEM_swapST(size_t in);
+
+
+/*-**************************************************************
+* Memory I/O Implementation
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
@@ -173,9 +143,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-# define MEM_FORCE_MEMORY_ACCESS 2
-# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
+# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -236,37 +204,37 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v =
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
- U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
- U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
- U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC size_t MEM_readST(const void* memPtr)
{
- size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+ size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
}
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
}
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
@@ -338,7 +306,7 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
{
- return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+ return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16);
}
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
@@ -445,6 +413,9 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
MEM_writeBE64(memPtr, (U64)val);
}
+/* code only tested on 32 and 64 bits systems */
+MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
+
#if defined (__cplusplus)
}
diff --git a/Utilities/cmzstd/lib/common/pool.c b/Utilities/cmzstd/lib/common/pool.c
index aa4b4de0d3..ea70b8b65a 100644
--- a/Utilities/cmzstd/lib/common/pool.c
+++ b/Utilities/cmzstd/lib/common/pool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -10,9 +10,9 @@
/* ====== Dependencies ======= */
-#include <stddef.h> /* size_t */
+#include "zstd_deps.h" /* size_t */
#include "debug.h" /* assert */
-#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
+#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "pool.h"
/* ====== Compiler specifics ====== */
@@ -105,6 +105,10 @@ static void* POOL_thread(void* opaque) {
assert(0); /* Unreachable */
}
+POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
+ return POOL_create (numThreads, 0);
+}
+
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
}
@@ -115,14 +119,14 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
/* Check parameters */
if (!numThreads) { return NULL; }
/* Allocate the context and zero initialize */
- ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
+ ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
if (!ctx) { return NULL; }
/* Initialize the job queue.
* It needs one extra space since one space is wasted to differentiate
* empty and full queues.
*/
ctx->queueSize = queueSize + 1;
- ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
+ ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
ctx->queueHead = 0;
ctx->queueTail = 0;
ctx->numThreadsBusy = 0;
@@ -136,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
}
ctx->shutdown = 0;
/* Allocate space for the thread handles */
- ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
+ ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
ctx->threadCapacity = 0;
ctx->customMem = customMem;
/* Check for errors */
@@ -179,12 +183,14 @@ void POOL_free(POOL_ctx *ctx) {
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
- ZSTD_free(ctx->queue, ctx->customMem);
- ZSTD_free(ctx->threads, ctx->customMem);
- ZSTD_free(ctx, ctx->customMem);
+ ZSTD_customFree(ctx->queue, ctx->customMem);
+ ZSTD_customFree(ctx->threads, ctx->customMem);
+ ZSTD_customFree(ctx, ctx->customMem);
}
-
+void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
+ POOL_free (pool);
+}
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
@@ -203,11 +209,11 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
return 0;
}
/* numThreads > threadCapacity */
- { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
+ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
if (!threadPool) return 1;
/* replace existing thread pool */
- memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
- ZSTD_free(ctx->threads, ctx->customMem);
+ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
+ ZSTD_customFree(ctx->threads, ctx->customMem);
ctx->threads = threadPool;
/* Initialize additional threads */
{ size_t threadId;
@@ -301,7 +307,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
struct POOL_ctx_s {
int dummy;
};
-static POOL_ctx g_ctx;
+static POOL_ctx g_poolCtx;
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
@@ -311,11 +317,11 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
(void)numThreads;
(void)queueSize;
(void)customMem;
- return &g_ctx;
+ return &g_poolCtx;
}
void POOL_free(POOL_ctx* ctx) {
- assert(!ctx || ctx == &g_ctx);
+ assert(!ctx || ctx == &g_poolCtx);
(void)ctx;
}
@@ -337,7 +343,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
size_t POOL_sizeof(POOL_ctx* ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
- assert(ctx == &g_ctx);
+ assert(ctx == &g_poolCtx);
return sizeof(*ctx);
}
diff --git a/Utilities/cmzstd/lib/common/pool.h b/Utilities/cmzstd/lib/common/pool.h
index 259bafc975..e18aa0708f 100644
--- a/Utilities/cmzstd/lib/common/pool.h
+++ b/Utilities/cmzstd/lib/common/pool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,7 +16,7 @@ extern "C" {
#endif
-#include <stddef.h> /* size_t */
+#include "zstd_deps.h"
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
#include "../zstd.h"
diff --git a/Utilities/cmzstd/lib/common/threading.c b/Utilities/cmzstd/lib/common/threading.c
index e2edb313eb..92cf57c195 100644
--- a/Utilities/cmzstd/lib/common/threading.c
+++ b/Utilities/cmzstd/lib/common/threading.c
@@ -78,11 +78,12 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
-#include <stdlib.h>
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h"
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
{
- *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
+ *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
if (!*mutex)
return 1;
return pthread_mutex_init(*mutex, attr);
@@ -94,14 +95,14 @@ int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
return 0;
{
int const ret = pthread_mutex_destroy(*mutex);
- free(*mutex);
+ ZSTD_free(*mutex);
return ret;
}
}
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
{
- *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
+ *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
if (!*cond)
return 1;
return pthread_cond_init(*cond, attr);
@@ -113,7 +114,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
return 0;
{
int const ret = pthread_cond_destroy(*cond);
- free(*cond);
+ ZSTD_free(*cond);
return ret;
}
}
diff --git a/Utilities/cmzstd/lib/common/xxhash.c b/Utilities/cmzstd/lib/common/xxhash.c
index 597de18fc8..926b33604e 100644
--- a/Utilities/cmzstd/lib/common/xxhash.c
+++ b/Utilities/cmzstd/lib/common/xxhash.c
@@ -1,6 +1,6 @@
/*
* xxHash - Fast Hash algorithm
- * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - xxHash homepage: http://www.xxhash.com
@@ -30,9 +30,7 @@
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-# define XXH_FORCE_MEMORY_ACCESS 2
-# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+# if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
defined(__ICCARM__)
# define XXH_FORCE_MEMORY_ACCESS 1
@@ -77,14 +75,12 @@
* Includes & Memory related functions
***************************************/
/* Modify the local functions below should you wish to use some other memory routines */
-/* for malloc(), free() */
-#include <stdlib.h>
-#include <stddef.h> /* size_t */
-static void* XXH_malloc(size_t s) { return malloc(s); }
-static void XXH_free (void* p) { free(p); }
-/* for memcpy() */
-#include <string.h>
-static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+/* for ZSTD_malloc(), ZSTD_free() */
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
+static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
+static void XXH_free (void* p) { ZSTD_free(p); }
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
#ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
@@ -95,49 +91,13 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
/* *************************************
* Compiler Specific Options
***************************************/
-#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
-# define INLINE_KEYWORD inline
-#else
-# define INLINE_KEYWORD
-#endif
-
-#if defined(__GNUC__) || defined(__ICCARM__)
-# define FORCE_INLINE_ATTR __attribute__((always_inline))
-#elif defined(_MSC_VER)
-# define FORCE_INLINE_ATTR __forceinline
-#else
-# define FORCE_INLINE_ATTR
-#endif
-
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
-
-
-#ifdef _MSC_VER
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif
+#include "compiler.h"
/* *************************************
* Basic Types
***************************************/
-#ifndef MEM_MODULE
-# define MEM_MODULE
-# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
-# else
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
- typedef signed int S32;
- typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
-# endif
-#endif
-
+#include "mem.h" /* BYTE, U32, U64, size_t */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
@@ -163,14 +123,14 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
static U32 XXH_read32(const void* memPtr)
{
U32 val;
- memcpy(&val, memPtr, sizeof(val));
+ ZSTD_memcpy(&val, memPtr, sizeof(val));
return val;
}
static U64 XXH_read64(const void* memPtr)
{
U64 val;
- memcpy(&val, memPtr, sizeof(val));
+ ZSTD_memcpy(&val, memPtr, sizeof(val));
return val;
}
@@ -307,12 +267,12 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
****************************/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
{
- memcpy(dstState, srcState, sizeof(*dstState));
+ ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
}
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
{
- memcpy(dstState, srcState, sizeof(*dstState));
+ ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
}
@@ -554,12 +514,12 @@ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
- memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
+ ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME32_1;
- memcpy(statePtr, &state, sizeof(state));
+ ZSTD_memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
@@ -567,12 +527,12 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
{
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
- memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
+ ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME64_1 + PRIME64_2;
state.v2 = seed + PRIME64_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME64_1;
- memcpy(statePtr, &state, sizeof(state));
+ ZSTD_memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
@@ -843,14 +803,14 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t
{
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
- memcpy(dst, &hash, sizeof(*dst));
+ ZSTD_memcpy(dst, &hash, sizeof(*dst));
}
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
- memcpy(dst, &hash, sizeof(*dst));
+ ZSTD_memcpy(dst, &hash, sizeof(*dst));
}
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
diff --git a/Utilities/cmzstd/lib/common/xxhash.h b/Utilities/cmzstd/lib/common/xxhash.h
index 4207eba832..16c1f1617b 100644
--- a/Utilities/cmzstd/lib/common/xxhash.h
+++ b/Utilities/cmzstd/lib/common/xxhash.h
@@ -1,7 +1,7 @@
/*
* xxHash - Extremely Fast Hash algorithm
* Header File
- * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - xxHash source repository : https://github.com/Cyan4973/xxHash
@@ -55,7 +55,7 @@ extern "C" {
/* ****************************
* Definitions
******************************/
-#include <stddef.h> /* size_t */
+#include "zstd_deps.h"
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
diff --git a/Utilities/cmzstd/lib/common/zstd_common.c b/Utilities/cmzstd/lib/common/zstd_common.c
index 91fe3323a5..3d7e35b309 100644
--- a/Utilities/cmzstd/lib/common/zstd_common.c
+++ b/Utilities/cmzstd/lib/common/zstd_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,8 +13,8 @@
/*-*************************************
* Dependencies
***************************************/
-#include <stdlib.h> /* malloc, calloc, free */
-#include <string.h> /* memset */
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
#include "zstd_internal.h"
@@ -53,31 +53,31 @@ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString
/*=**************************************************************
* Custom allocator
****************************************************************/
-void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
{
if (customMem.customAlloc)
return customMem.customAlloc(customMem.opaque, size);
- return malloc(size);
+ return ZSTD_malloc(size);
}
-void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
{
if (customMem.customAlloc) {
/* calloc implemented as malloc+memset;
* not as efficient as calloc, but next best guess for custom malloc */
void* const ptr = customMem.customAlloc(customMem.opaque, size);
- memset(ptr, 0, size);
+ ZSTD_memset(ptr, 0, size);
return ptr;
}
- return calloc(1, size);
+ return ZSTD_calloc(1, size);
}
-void ZSTD_free(void* ptr, ZSTD_customMem customMem)
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
{
if (ptr!=NULL) {
if (customMem.customFree)
customMem.customFree(customMem.opaque, ptr);
else
- free(ptr);
+ ZSTD_free(ptr);
}
}
diff --git a/Utilities/cmzstd/lib/common/zstd_deps.h b/Utilities/cmzstd/lib/common/zstd_deps.h
new file mode 100644
index 0000000000..14211344a0
--- /dev/null
+++ b/Utilities/cmzstd/lib/common/zstd_deps.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This file provides common libc dependencies that zstd requires.
+ * The purpose is to allow replacing this file with a custom implementation
+ * to compile zstd without libc support.
+ */
+
+/* Need:
+ * NULL
+ * INT_MAX
+ * UINT_MAX
+ * ZSTD_memcpy()
+ * ZSTD_memset()
+ * ZSTD_memmove()
+ */
+#ifndef ZSTD_DEPS_COMMON
+#define ZSTD_DEPS_COMMON
+
+#include <limits.h>
+#include <stddef.h>
+#include <string.h>
+
+#if defined(__GNUC__) && __GNUC__ >= 4
+# define ZSTD_memcpy(d,s,l) __builtin_memcpy((d),(s),(l))
+# define ZSTD_memmove(d,s,l) __builtin_memmove((d),(s),(l))
+# define ZSTD_memset(p,v,l) __builtin_memset((p),(v),(l))
+#else
+# define ZSTD_memcpy(d,s,l) memcpy((d),(s),(l))
+# define ZSTD_memmove(d,s,l) memmove((d),(s),(l))
+# define ZSTD_memset(p,v,l) memset((p),(v),(l))
+#endif
+
+#endif /* ZSTD_DEPS_COMMON */
+
+/* Need:
+ * ZSTD_malloc()
+ * ZSTD_free()
+ * ZSTD_calloc()
+ */
+#ifdef ZSTD_DEPS_NEED_MALLOC
+#ifndef ZSTD_DEPS_MALLOC
+#define ZSTD_DEPS_MALLOC
+
+#include <stdlib.h>
+
+#define ZSTD_malloc(s) malloc(s)
+#define ZSTD_calloc(n,s) calloc((n), (s))
+#define ZSTD_free(p) free((p))
+
+#endif /* ZSTD_DEPS_MALLOC */
+#endif /* ZSTD_DEPS_NEED_MALLOC */
+
+/*
+ * Provides 64-bit math support.
+ * Need:
+ * U64 ZSTD_div64(U64 dividend, U32 divisor)
+ */
+#ifdef ZSTD_DEPS_NEED_MATH64
+#ifndef ZSTD_DEPS_MATH64
+#define ZSTD_DEPS_MATH64
+
+#define ZSTD_div64(dividend, divisor) ((dividend) / (divisor))
+
+#endif /* ZSTD_DEPS_MATH64 */
+#endif /* ZSTD_DEPS_NEED_MATH64 */
+
+/* Need:
+ * assert()
+ */
+#ifdef ZSTD_DEPS_NEED_ASSERT
+#ifndef ZSTD_DEPS_ASSERT
+#define ZSTD_DEPS_ASSERT
+
+#include <assert.h>
+
+#endif /* ZSTD_DEPS_ASSERT */
+#endif /* ZSTD_DEPS_NEED_ASSERT */
+
+/* Need:
+ * ZSTD_DEBUG_PRINT()
+ */
+#ifdef ZSTD_DEPS_NEED_IO
+#ifndef ZSTD_DEPS_IO
+#define ZSTD_DEPS_IO
+
+#include <stdio.h>
+#define ZSTD_DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__)
+
+#endif /* ZSTD_DEPS_IO */
+#endif /* ZSTD_DEPS_NEED_IO */
+
+/* Only requested when <stdint.h> is known to be present.
+ * Need:
+ * intptr_t
+ */
+#ifdef ZSTD_DEPS_NEED_STDINT
+#ifndef ZSTD_DEPS_STDINT
+#define ZSTD_DEPS_STDINT
+
+#include <stdint.h>
+
+#endif /* ZSTD_DEPS_STDINT */
+#endif /* ZSTD_DEPS_NEED_STDINT */
diff --git a/Utilities/cmzstd/lib/common/zstd_internal.h b/Utilities/cmzstd/lib/common/zstd_internal.h
index 3bc7e55a0a..68252e987e 100644
--- a/Utilities/cmzstd/lib/common/zstd_internal.h
+++ b/Utilities/cmzstd/lib/common/zstd_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -19,7 +19,7 @@
/*-*************************************
* Dependencies
***************************************/
-#ifdef __aarch64__
+#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
#include <arm_neon.h>
#endif
#include "compiler.h"
@@ -36,6 +36,11 @@
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
#endif
#include "xxhash.h" /* XXH_reset, update, digest */
+#ifndef ZSTD_NO_TRACE
+# include "zstd_trace.h"
+#else
+# define ZSTD_TRACE 0
+#endif
#if defined (__cplusplus)
extern "C" {
@@ -139,7 +144,7 @@ void _force_has_format_string(const char *format, ...) {
#define ZSTD_REP_NUM 3 /* number of repcodes */
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
-static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
#define KB *(1 <<10)
#define MB *(1 <<20)
@@ -153,13 +158,13 @@ static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
#define BIT0 1
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
-static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
-static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
-static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#define ZSTD_FRAMECHECKSUMSIZE 4
@@ -186,61 +191,75 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
#define OffFSELog 8
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
-static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 2, 2, 3, 3,
- 4, 6, 7, 8, 9,10,11,12,
- 13,14,15,16 };
-static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 3, 2, 1, 1, 1, 1, 1,
- -1,-1,-1,-1 };
+#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
+/* Each table cannot take more than #symbols * FSELog bits */
+#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
+
+static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16
+};
+static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
+ 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1
+};
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
-
-static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 2, 2, 3, 3,
- 4, 4, 5, 7, 8, 9,10,11,
- 12,13,14,15,16 };
-static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
- 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1,-1,-1,
- -1,-1,-1,-1,-1 };
+static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16
+};
+static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
+ 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1
+};
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
-
-static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
- 2, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- -1,-1,-1,-1,-1 };
+static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1
+};
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
-static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTD_copy8(void* dst, const void* src) {
-#ifdef __aarch64__
+#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
#else
- memcpy(dst, src, 8);
+ ZSTD_memcpy(dst, src, 8);
#endif
}
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
static void ZSTD_copy16(void* dst, const void* src) {
-#ifdef __aarch64__
+#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON)
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
#else
- memcpy(dst, src, 16);
+ ZSTD_memcpy(dst, src, 16);
#endif
}
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
@@ -255,13 +274,13 @@ typedef enum {
} ZSTD_overlap_e;
/*! ZSTD_wildcopy() :
- * Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
+ * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
* @param ovtype controls the overlap detection
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
* The src buffer must be before the dst buffer.
*/
-MEM_STATIC FORCE_INLINE_ATTR
+MEM_STATIC FORCE_INLINE_ATTR
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
{
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
@@ -284,14 +303,16 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
*/
-#ifndef __aarch64__
+#ifdef __aarch64__
do {
COPY16(op, ip);
}
while (op < oend);
#else
- COPY16(op, ip);
- if (op >= oend) return;
+ ZSTD_copy16(op, ip);
+ if (16 >= length) return;
+ op += 16;
+ ip += 16;
do {
COPY16(op, ip);
COPY16(op, ip);
@@ -305,7 +326,7 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src,
{
size_t const length = MIN(dstCapacity, srcSize);
if (length > 0) {
- memcpy(dst, src, length);
+ ZSTD_memcpy(dst, src, length);
}
return length;
}
@@ -320,28 +341,46 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src,
* In which case, resize it down to free some memory */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
+/* Controls whether the input/output buffer is buffered or stable. */
+typedef enum {
+ ZSTD_bm_buffered = 0, /* Buffer the input/output */
+ ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
+} ZSTD_bufferMode_e;
+
/*-*******************************************
* Private declarations
*********************************************/
typedef struct seqDef_s {
- U32 offset;
+ U32 offset; /* offset == rawOffset + ZSTD_REP_NUM, or equivalently, offCode + 1 */
U16 litLength;
U16 matchLength;
} seqDef;
+/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
typedef struct {
seqDef* sequencesStart;
- seqDef* sequences;
+ seqDef* sequences; /* ptr to end of sequences */
BYTE* litStart;
- BYTE* lit;
+ BYTE* lit; /* ptr to end of literals */
BYTE* llCode;
BYTE* mlCode;
BYTE* ofCode;
size_t maxNbSeq;
size_t maxNbLit;
- U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
- U32 longLengthPos;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
} seqStore_t;
typedef struct {
@@ -351,7 +390,7 @@ typedef struct {
/**
* Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
- * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
*/
MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
{
@@ -359,10 +398,10 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore
seqLen.litLength = seq->litLength;
seqLen.matchLength = seq->matchLength + MINMATCH;
if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
- if (seqStore->longLengthID == 1) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
seqLen.litLength += 0xFFFF;
}
- if (seqStore->longLengthID == 2) {
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
seqLen.matchLength += 0xFFFF;
}
}
@@ -384,9 +423,9 @@ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBu
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
/* custom memory allocation functions */
-void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
-void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_free(void* ptr, ZSTD_customMem customMem);
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
@@ -394,8 +433,12 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
- unsigned long r=0;
- return _BitScanReverse(&r, val) ? (unsigned)r : 0;
+# if STATIC_BMI2 == 1
+ return _lzcnt_u32(val)^31;
+# else
+ unsigned long r=0;
+ return _BitScanReverse(&r, val) ? (unsigned)r : 0;
+# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
diff --git a/Utilities/cmzstd/lib/common/zstd_trace.h b/Utilities/cmzstd/lib/common/zstd_trace.h
new file mode 100644
index 0000000000..2da5640771
--- /dev/null
+++ b/Utilities/cmzstd/lib/common/zstd_trace.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_TRACE_H
+#define ZSTD_TRACE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <stddef.h>
+
+/* weak symbol support */
+#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \
+ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
+ !defined(__CYGWIN__)
+# define ZSTD_HAVE_WEAK_SYMBOLS 1
+#else
+# define ZSTD_HAVE_WEAK_SYMBOLS 0
+#endif
+#if ZSTD_HAVE_WEAK_SYMBOLS
+# define ZSTD_WEAK_ATTR __attribute__((__weak__))
+#else
+# define ZSTD_WEAK_ATTR
+#endif
+
+/* Only enable tracing when weak symbols are available. */
+#ifndef ZSTD_TRACE
+# define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS
+#endif
+
+#if ZSTD_TRACE
+
+struct ZSTD_CCtx_s;
+struct ZSTD_DCtx_s;
+struct ZSTD_CCtx_params_s;
+
+typedef struct {
+ /**
+ * ZSTD_VERSION_NUMBER
+ *
+ * This is guaranteed to be the first member of ZSTD_trace.
+ * Otherwise, this struct is not stable between versions. If
+ * the version number does not match your expectation, you
+ * should not interpret the rest of the struct.
+ */
+ unsigned version;
+ /**
+ * Non-zero if streaming (de)compression is used.
+ */
+ unsigned streaming;
+ /**
+ * The dictionary ID.
+ */
+ unsigned dictionaryID;
+ /**
+ * Is the dictionary cold?
+ * Only set on decompression.
+ */
+ unsigned dictionaryIsCold;
+ /**
+ * The dictionary size or zero if no dictionary.
+ */
+ size_t dictionarySize;
+ /**
+ * The uncompressed size of the data.
+ */
+ size_t uncompressedSize;
+ /**
+ * The compressed size of the data.
+ */
+ size_t compressedSize;
+ /**
+ * The fully resolved CCtx parameters (NULL on decompression).
+ */
+ struct ZSTD_CCtx_params_s const* params;
+ /**
+ * The ZSTD_CCtx pointer (NULL on decompression).
+ */
+ struct ZSTD_CCtx_s const* cctx;
+ /**
+ * The ZSTD_DCtx pointer (NULL on compression).
+ */
+ struct ZSTD_DCtx_s const* dctx;
+} ZSTD_Trace;
+
+/**
+ * A tracing context. It must be 0 when tracing is disabled.
+ * Otherwise, any non-zero value returned by a tracing begin()
+ * function is presented to any subsequent calls to end().
+ *
+ * Any non-zero value is treated as tracing is enabled and not
+ * interpreted by the library.
+ *
+ * Two possible uses are:
+ * * A timestamp for when the begin() function was called.
+ * * A unique key identifying the (de)compression, like the
+ * address of the [dc]ctx pointer if you need to track
+ * more information than just a timestamp.
+ */
+typedef unsigned long long ZSTD_TraceCtx;
+
+/**
+ * Trace the beginning of a compression call.
+ * @param cctx The dctx pointer for the compression.
+ * It can be used as a key to map begin() to end().
+ * @returns Non-zero if tracing is enabled. The return value is
+ * passed to ZSTD_trace_compress_end().
+ */
+ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_compress_begin(
+ struct ZSTD_CCtx_s const* cctx);
+
+/**
+ * Trace the end of a compression call.
+ * @param ctx The return value of ZSTD_trace_compress_begin().
+ * @param trace The zstd tracing info.
+ */
+ZSTD_WEAK_ATTR void ZSTD_trace_compress_end(
+ ZSTD_TraceCtx ctx,
+ ZSTD_Trace const* trace);
+
+/**
+ * Trace the beginning of a decompression call.
+ * @param dctx The dctx pointer for the decompression.
+ * It can be used as a key to map begin() to end().
+ * @returns Non-zero if tracing is enabled. The return value is
+ * passed to ZSTD_trace_compress_end().
+ */
+ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_decompress_begin(
+ struct ZSTD_DCtx_s const* dctx);
+
+/**
+ * Trace the end of a decompression call.
+ * @param ctx The return value of ZSTD_trace_decompress_begin().
+ * @param trace The zstd tracing info.
+ */
+ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end(
+ ZSTD_TraceCtx ctx,
+ ZSTD_Trace const* trace);
+
+#endif /* ZSTD_TRACE */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_TRACE_H */
diff --git a/Utilities/cmzstd/lib/compress/fse_compress.c b/Utilities/cmzstd/lib/compress/fse_compress.c
index a42759814f..b4297ec88a 100644
--- a/Utilities/cmzstd/lib/compress/fse_compress.c
+++ b/Utilities/cmzstd/lib/compress/fse_compress.c
@@ -1,6 +1,6 @@
/* ******************************************************************
* FSE : Finite State Entropy encoder
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -15,8 +15,6 @@
/* **************************************************************
* Includes
****************************************************************/
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memcpy, memset */
#include "../common/compiler.h"
#include "../common/mem.h" /* U32, U16, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
@@ -25,6 +23,9 @@
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#include "../common/error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#define ZSTD_DEPS_NEED_MATH64
+#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
/* **************************************************************
@@ -74,13 +75,15 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
U32 const step = FSE_TABLESTEP(tableSize);
- U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
- FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
+ U32* cumul = (U32*)workSpace;
+ FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
+
U32 highThreshold = tableSize-1;
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
+ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
/* CTable header */
- if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
tableU16[-2] = (U16) tableLog;
tableU16[-1] = (U16) maxSymbolValue;
assert(tableLog < 16); /* required for threshold strategy to work */
@@ -89,7 +92,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
#ifdef __clang_analyzer__
- memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
#endif
/* symbol start positions */
@@ -168,12 +171,13 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
return 0;
}
-
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
}
+#endif
@@ -307,10 +311,10 @@ FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
size_t size;
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
- return (FSE_CTable*)malloc(size);
+ return (FSE_CTable*)ZSTD_malloc(size);
}
-void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
+void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
@@ -341,11 +345,10 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
}
-
/* Secondary normalization method.
To be used when primary method fails. */
-static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
{
short const NOT_YET_ASSIGNED = -2;
U32 s;
@@ -362,7 +365,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
continue;
}
if (count[s] <= lowThreshold) {
- norm[s] = -1;
+ norm[s] = lowProbCount;
distributed++;
total -= count[s];
continue;
@@ -414,7 +417,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
{ U64 const vStepLog = 62 - tableLog;
U64 const mid = (1ULL << (vStepLog-1)) - 1;
- U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
+ U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
U64 tmpTotal = mid;
for (s=0; s<=maxSymbolValue; s++) {
if (norm[s]==NOT_YET_ASSIGNED) {
@@ -431,10 +434,9 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
return 0;
}
-
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
- unsigned maxSymbolValue)
+ unsigned maxSymbolValue, unsigned useLowProbCount)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
@@ -443,8 +445,9 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
- U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
+ U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
int stillToDistribute = 1<<tableLog;
unsigned s;
@@ -456,7 +459,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
- normalizedCounter[s] = -1;
+ normalizedCounter[s] = lowProbCount;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
@@ -470,7 +473,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
- size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
@@ -625,6 +628,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` size must be `(1<<tableLog)`.
@@ -643,7 +647,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
/* init conditions */
- if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
+ if (wkspSize < FSE_COMPRESS_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (srcSize <= 1) return 0; /* Not compressible */
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
@@ -656,7 +660,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
}
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
- CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) );
/* Write table description header */
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
@@ -678,13 +682,16 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
typedef struct {
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
- BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
+ union {
+ U32 hist_wksp[HIST_WKSP_SIZE_U32];
+ BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
+ } workspace;
} fseWkspMax_t;
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
{
fseWkspMax_t scratchBuffer;
- DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
+ DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
}
@@ -693,6 +700,6 @@ size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcS
{
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
}
-
+#endif
#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/Utilities/cmzstd/lib/compress/hist.c b/Utilities/cmzstd/lib/compress/hist.c
index 61e08c7968..073c57e752 100644
--- a/Utilities/cmzstd/lib/compress/hist.c
+++ b/Utilities/cmzstd/lib/compress/hist.c
@@ -1,7 +1,7 @@
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -34,7 +34,7 @@ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
unsigned maxSymbolValue = *maxSymbolValuePtr;
unsigned largestCount=0;
- memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
while (ip<end) {
@@ -60,9 +60,9 @@ typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
* this design makes better use of OoO cpus,
* and is noticeably faster when some values are heavily repeated.
* But it needs some additional workspace for intermediate tables.
- * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
+ * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
* @return : largest histogram frequency,
- * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+ * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
static size_t HIST_count_parallel_wksp(
unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
@@ -71,22 +71,21 @@ static size_t HIST_count_parallel_wksp(
{
const BYTE* ip = (const BYTE*)source;
const BYTE* const iend = ip+sourceSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
+ size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
unsigned max=0;
U32* const Counting1 = workSpace;
U32* const Counting2 = Counting1 + 256;
U32* const Counting3 = Counting2 + 256;
U32* const Counting4 = Counting3 + 256;
- memset(workSpace, 0, 4*256*sizeof(unsigned));
-
/* safety checks */
+ assert(*maxSymbolValuePtr <= 255);
if (!sourceSize) {
- memset(count, 0, maxSymbolValue + 1);
+ ZSTD_memset(count, 0, countSize);
*maxSymbolValuePtr = 0;
return 0;
}
- if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
+ ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
/* by stripes of 16 bytes */
{ U32 cached = MEM_read32(ip); ip += 4;
@@ -118,21 +117,18 @@ static size_t HIST_count_parallel_wksp(
/* finish last symbols */
while (ip<iend) Counting1[*ip++]++;
- if (check) { /* verify stats will fit into destination table */
- U32 s; for (s=255; s>maxSymbolValue; s--) {
- Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
- if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
- } }
-
{ U32 s;
- if (maxSymbolValue > 255) maxSymbolValue = 255;
- for (s=0; s<=maxSymbolValue; s++) {
- count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
- if (count[s] > max) max = count[s];
+ for (s=0; s<256; s++) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s] > max) max = Counting1[s];
} }
- while (!count[maxSymbolValue]) maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
+ { unsigned maxSymbolValue = 255;
+ while (!Counting1[maxSymbolValue]) maxSymbolValue--;
+ if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
+ *maxSymbolValuePtr = maxSymbolValue;
+ ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
+ }
return (size_t)max;
}
@@ -152,14 +148,6 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
}
-/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
-size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* source, size_t sourceSize)
-{
- unsigned tmpCounters[HIST_WKSP_SIZE_U32];
- return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
-}
-
/* HIST_count_wksp() :
* Same as HIST_count(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
@@ -175,9 +163,19 @@ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
}
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize)
+{
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
+}
+
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize)
{
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
}
+#endif
diff --git a/Utilities/cmzstd/lib/compress/hist.h b/Utilities/cmzstd/lib/compress/hist.h
index 77e3ec4fb1..228ed48a71 100644
--- a/Utilities/cmzstd/lib/compress/hist.h
+++ b/Utilities/cmzstd/lib/compress/hist.h
@@ -1,7 +1,7 @@
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -14,7 +14,7 @@
****************************************************************** */
/* --- dependencies --- */
-#include <stddef.h> /* size_t */
+#include "../common/zstd_deps.h" /* size_t */
/* --- simple histogram functions --- */
diff --git a/Utilities/cmzstd/lib/compress/huf_compress.c b/Utilities/cmzstd/lib/compress/huf_compress.c
index 546879868a..485906e678 100644
--- a/Utilities/cmzstd/lib/compress/huf_compress.c
+++ b/Utilities/cmzstd/lib/compress/huf_compress.c
@@ -1,6 +1,6 @@
/* ******************************************************************
* Huffman encoder, part of New Generation Entropy library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -23,8 +23,7 @@
/* **************************************************************
* Includes
****************************************************************/
-#include <string.h> /* memcpy, memset */
-#include <stdio.h> /* printf (debug) */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
#include "../common/compiler.h"
#include "../common/bitstream.h"
#include "hist.h"
@@ -60,7 +59,15 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
*/
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+
+typedef struct {
+ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+ U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
+ unsigned count[HUF_TABLELOG_MAX+1];
+ S16 norm[HUF_TABLELOG_MAX+1];
+} HUF_CompressWeightsWksp;
+
+static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
@@ -68,33 +75,30 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+ HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
- FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
- BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
-
- unsigned count[HUF_TABLELOG_MAX+1];
- S16 norm[HUF_TABLELOG_MAX+1];
+ if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
/* init conditions */
if (wtSize <= 1) return 0; /* Not compressible */
/* Scan input and build symbol stats */
- { unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize); /* never fails */
+ { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
- CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
+ CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
/* Write table description header */
- { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) );
+ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
op += hSize;
}
/* Compress */
- CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
- { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) );
+ CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
@@ -103,34 +107,33 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
}
-struct HUF_CElt_s {
- U16 val;
- BYTE nbBits;
-}; /* typedef'd to HUF_CElt within "huf.h" */
-
-/*! HUF_writeCTable() :
- `CTable` : Huffman tree to save, using huf representation.
- @return : size of saved CTable */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize,
- const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
-{
+typedef struct {
+ HUF_CompressWeightsWksp wksp;
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+} HUF_WriteCTableWksp;
+
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
+ void* workspace, size_t workspaceSize)
+{
BYTE* op = (BYTE*)dst;
U32 n;
+ HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
- /* check conditions */
+ /* check conditions */
+ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
/* convert to weight */
- bitsToWeight[0] = 0;
+ wksp->bitsToWeight[0] = 0;
for (n=1; n<huffLog+1; n++)
- bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+ wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; n<maxSymbolValue; n++)
- huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+ wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
/* attempt weights compression by FSE */
- { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
@@ -140,12 +143,22 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
- huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+ wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
for (n=0; n<maxSymbolValue; n+=2)
- op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
+ op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
return ((maxSymbolValue+1)/2) + 1;
}
+/*! HUF_writeCTable() :
+ `CTable` : Huffman tree to save, using huf representation.
+ @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
+{
+ HUF_WriteCTableWksp wksp;
+ return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
+}
+
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
@@ -156,6 +169,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+ *hasZeroWeights = (rankVal[0] > 0);
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
@@ -164,16 +178,14 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
- U32 current = nextRankStart;
+ U32 curr = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
- rankVal[n] = current;
+ rankVal[n] = curr;
} }
/* fill nbBits */
- *hasZeroWeights = 0;
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
- *hasZeroWeights |= (w == 0);
CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
} }
@@ -212,32 +224,63 @@ typedef struct nodeElt_s {
BYTE nbBits;
} nodeElt;
+/**
+ * HUF_setMaxHeight():
+ * Enforces maxNbBits on the Huffman tree described in huffNode.
+ *
+ * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
+ * the tree to so that it is a valid canonical Huffman tree.
+ *
+ * @pre The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits == huffNode[lastNonNull].nbBits.
+ * @post The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits is the return value <= maxNbBits.
+ *
+ * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param lastNonNull The symbol with the lowest count in the Huffman tree.
+ * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * may not respect. After this function the Huffman tree will
+ * respect maxNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment,
+ * necessarily no more than maxNbBits.
+ */
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
- if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
+ /* early exit : no elt > maxNbBits, so the tree is already valid. */
+ if (largestBits <= maxNbBits) return largestBits;
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
const U32 baseCost = 1 << (largestBits - maxNbBits);
int n = (int)lastNonNull;
+ /* Adjust any ranks > maxNbBits to maxNbBits.
+ * Compute totalCost, which is how far the sum of the ranks is
+ * we are over 2^largestBits after adjust the offending ranks.
+ */
while (huffNode[n].nbBits > maxNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
huffNode[n].nbBits = (BYTE)maxNbBits;
- n --;
- } /* n stops at huffNode[n].nbBits <= maxNbBits */
- while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
+ n--;
+ }
+ /* n stops at huffNode[n].nbBits <= maxNbBits */
+ assert(huffNode[n].nbBits <= maxNbBits);
+ /* n end at index of smallest symbol using < maxNbBits */
+ while (huffNode[n].nbBits == maxNbBits) --n;
- /* renorm totalCost */
- totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
+ /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ * note : totalCost is necessarily a multiple of baseCost */
+ assert((totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - maxNbBits);
+ assert(totalCost > 0);
/* repay normalized cost */
{ U32 const noSymbol = 0xF0F0F0F0;
U32 rankLast[HUF_TABLELOG_MAX+2];
- /* Get pos of last (smallest) symbol per rank */
- memset(rankLast, 0xF0, sizeof(rankLast));
+ /* Get pos of last (smallest = lowest cum. count) symbol per rank */
+ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
{ U32 currentNbBits = maxNbBits;
int pos;
for (pos=n ; pos >= 0; pos--) {
@@ -247,34 +290,65 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
} }
while (totalCost > 0) {
+ /* Try to reduce the next power of 2 above totalCost because we
+ * gain back half the rank.
+ */
U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
if (highPos == noSymbol) continue;
+ /* Decrease highPos if no symbols of lowPos or if it is
+ * not cheaper to remove 2 lowPos than highPos.
+ */
if (lowPos == noSymbol) break;
{ U32 const highTotal = huffNode[highPos].count;
U32 const lowTotal = 2 * huffNode[lowPos].count;
if (highTotal <= lowTotal) break;
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
- nBitsToDecrease ++;
+ nBitsToDecrease++;
+ assert(rankLast[nBitsToDecrease] != noSymbol);
+ /* Increase the number of bits to gain back half the rank cost. */
totalCost -= 1 << (nBitsToDecrease-1);
+ huffNode[rankLast[nBitsToDecrease]].nbBits++;
+
+ /* Fix up the new rank.
+ * If the new rank was empty, this symbol is now its smallest.
+ * Otherwise, this symbol will be the largest in the new rank so no adjustment.
+ */
if (rankLast[nBitsToDecrease-1] == noSymbol)
- rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
- huffNode[rankLast[nBitsToDecrease]].nbBits ++;
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
+ /* Fix up the old rank.
+ * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
+ * it must be the only symbol in its rank, so the old rank now has no symbols.
+ * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
+ * the smallest node in the rank. If the previous position belongs to a different rank,
+ * then the rank is now empty.
+ */
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
- } } /* while (totalCost > 0) */
-
+ }
+ } /* while (totalCost > 0) */
+
+ /* If we've removed too much weight, then we have to add it back.
+ * To avoid overshooting again, we only adjust the smallest rank.
+ * We take the largest nodes from the lowest rank 0 and move them
+ * to rank 1. There's guaranteed to be enough rank 0 symbols because
+ * TODO.
+ */
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
- if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+ /* special case : no rank 1 symbol (using maxNbBits-1);
+ * let's create one from largest rank 0 (using maxNbBits).
+ */
+ if (rankLast[1] == noSymbol) {
while (huffNode[n].nbBits == maxNbBits) n--;
huffNode[n+1].nbBits--;
assert(n >= 0);
@@ -285,14 +359,16 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
huffNode[ rankLast[1] + 1 ].nbBits--;
rankLast[1]++;
totalCost ++;
- } } } /* there are several too large elements (at least >= 2) */
+ }
+ } /* repay normalized cost */
+ } /* there are several too large elements (at least >= 2) */
return maxNbBits;
}
typedef struct {
U32 base;
- U32 current;
+ U32 curr;
} rankPos;
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
@@ -304,21 +380,45 @@ typedef struct {
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
} HUF_buildCTable_wksp_tables;
+/**
+ * HUF_sort():
+ * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
+ *
+ * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
+ * Must have (maxSymbolValue + 1) entries.
+ * @param[in] count Histogram of the symbols.
+ * @param[in] maxSymbolValue Maximum symbol value.
+ * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
+ */
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
{
- U32 n;
-
- memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
- for (n=0; n<=maxSymbolValue; n++) {
- U32 r = BIT_highbit32(count[n] + 1);
- rankPosition[r].base ++;
+ int n;
+ int const maxSymbolValue1 = (int)maxSymbolValue + 1;
+
+ /* Compute base and set curr to base.
+ * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
+ * Then 2^lowerRank <= count[n]+1 <= 2^rank.
+ * We attribute each symbol to lowerRank's base value, because we want to know where
+ * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
+ */
+ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 lowerRank = BIT_highbit32(count[n] + 1);
+ rankPosition[lowerRank].base++;
}
- for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base;
- for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base;
- for (n=0; n<=maxSymbolValue; n++) {
+ assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
+ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
+ rankPosition[n-1].base += rankPosition[n].base;
+ rankPosition[n-1].curr = rankPosition[n-1].base;
+ }
+ /* Sort */
+ for (n = 0; n < maxSymbolValue1; ++n) {
U32 const c = count[n];
U32 const r = BIT_highbit32(c+1) + 1;
- U32 pos = rankPosition[r].current++;
+ U32 pos = rankPosition[r].curr++;
+ /* Insert into the correct position in the rank.
+ * We have at most 256 symbols, so this insertion should be fine.
+ */
while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
huffNode[pos] = huffNode[pos-1];
pos--;
@@ -335,28 +435,20 @@ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValu
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+/* HUF_buildTree():
+ * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
+ *
+ * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
+ * @param maxSymbolValue The maximum symbol value.
+ * @return The smallest node in the Huffman tree (by count).
+ */
+static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
{
- HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
- nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
- nodeElt* const huffNode = huffNode0+1;
+ nodeElt* const huffNode0 = huffNode - 1;
int nonNullRank;
int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
-
- /* safety checks */
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
- if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
- return ERROR(workSpace_tooSmall);
- if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(maxSymbolValue_tooLarge);
- memset(huffNode0, 0, sizeof(huffNodeTable));
-
- /* sort, decreasing order */
- HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
-
/* init for parents */
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
@@ -383,42 +475,72 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ return nonNullRank;
+}
+
+/**
+ * HUF_buildCTableFromTree():
+ * Build the CTable given the Huffman tree in huffNode.
+ *
+ * @param[out] CTable The output Huffman CTable.
+ * @param huffNode The Huffman tree.
+ * @param nonNullRank The last and smallest node in the Huffman tree.
+ * @param maxSymbolValue The maximum symbol value.
+ * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
+ */
+static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
+{
+ /* fill result into ctable (val, nbBits) */
+ int n;
+ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ int const alphabetSize = (int)(maxSymbolValue + 1);
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine starting value per rank */
+ { U16 min = 0;
+ for (n=(int)maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ for (n=0; n<alphabetSize; n++)
+ CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
+ for (n=0; n<alphabetSize; n++)
+ CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */
+}
+
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+ HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
+ nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
+ nodeElt* const huffNode = huffNode0+1;
+ int nonNullRank;
+
+ /* safety checks */
+ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
+ return ERROR(workSpace_tooSmall);
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+ return ERROR(maxSymbolValue_tooLarge);
+ ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
+
+ /* sort, decreasing order */
+ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+
+ /* build tree */
+ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
+
/* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
- /* fill result into tree (val, nbBits) */
- { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
- U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
- int const alphabetSize = (int)(maxSymbolValue + 1);
- if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
- for (n=0; n<=nonNullRank; n++)
- nbPerRank[huffNode[n].nbBits]++;
- /* determine stating value per rank */
- { U16 min = 0;
- for (n=(int)maxNbBits; n>0; n--) {
- valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- } }
- for (n=0; n<alphabetSize; n++)
- tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
- for (n=0; n<alphabetSize; n++)
- tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
- }
+ HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
return maxNbBits;
}
-/** HUF_buildCTable() :
- * @return : maxNbBits
- * Note : count is used before tree is written, so they can safely overlap
- */
-size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
-{
- HUF_buildCTable_wksp_tables workspace;
- return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
-}
-
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
size_t nbBits = 0;
@@ -629,29 +751,33 @@ static size_t HUF_compressCTable_internal(
typedef struct {
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
- HUF_buildCTable_wksp_tables buildCTable_wksp;
+ union {
+ HUF_buildCTable_wksp_tables buildCTable_wksp;
+ HUF_WriteCTableWksp writeCTable_wksp;
+ } wksps;
} HUF_compress_tables_t;
/* HUF_compress_internal() :
- * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+ * `workSpace_align4` must be aligned on 4-bytes boundaries,
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
- void* workSpace, size_t wkspSize,
+ void* workSpace_align4, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2)
{
- HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
+ assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
/* checks & inits */
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
@@ -669,7 +795,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
}
/* Scan input and build symbol stats */
- { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
@@ -691,16 +817,17 @@ HUF_compress_internal (void* dst, size_t dstSize,
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
- &table->buildCTable_wksp, sizeof(table->buildCTable_wksp));
+ &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */
- memset(table->CTable + (maxSymbolValue + 1), 0,
+ ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
}
/* Write table description header */
- { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
+ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
+ &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
/* Check if using previous huffman table is beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
@@ -716,7 +843,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable)
- memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
+ ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
@@ -747,14 +874,6 @@ size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
repeat, preferRepeat, bmi2);
}
-size_t HUF_compress1X (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog)
-{
- unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
- return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
-}
-
/* HUF_compress4X_repeat():
* compress input using 4 streams.
* provide workspace to generate compression tables */
@@ -784,6 +903,25 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
hufTable, repeat, preferRepeat, bmi2);
}
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
+/** HUF_buildCTable() :
+ * @return : maxNbBits
+ * Note : count is used before tree is written, so they can safely overlap
+ */
+size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
+{
+ HUF_buildCTable_wksp_tables workspace;
+ return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
+}
+
+size_t HUF_compress1X (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog)
+{
+ unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
+ return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
size_t HUF_compress2 (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog)
@@ -796,3 +934,4 @@ size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSi
{
return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
}
+#endif
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress.c b/Utilities/cmzstd/lib/compress/zstd_compress.c
index 3f963b1cff..b7ee2980a7 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress.c
+++ b/Utilities/cmzstd/lib/compress/zstd_compress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +11,7 @@
/*-*************************************
* Dependencies
***************************************/
-#include <limits.h> /* INT_MAX */
-#include <string.h> /* memset */
+#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
#include "../common/cpu.h"
#include "../common/mem.h"
#include "hist.h" /* HIST_countFast_wksp */
@@ -30,6 +29,19 @@
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * COMPRESS_HEAPMODE :
+ * Select how default decompression function ZSTD_compress() allocates its context,
+ * on stack (0, default), or into heap (1).
+ * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
+ */
+#ifndef ZSTD_COMPRESS_HEAPMODE
+# define ZSTD_COMPRESS_HEAPMODE 0
+#endif
+
/*-*************************************
* Helper functions
@@ -52,6 +64,7 @@ size_t ZSTD_compressBound(size_t srcSize) {
struct ZSTD_CDict_s {
const void* dictContent;
size_t dictContentSize;
+ ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
ZSTD_matchState_t matchState;
@@ -59,6 +72,10 @@ struct ZSTD_CDict_s {
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+ ZSTD_useRowMatchFinderMode_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ * row-based matchfinder. Unless the cdict is reloaded, we will use
+ * the same greedy/lazy matchfinder at compression time.
+ */
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
ZSTD_CCtx* ZSTD_createCCtx(void)
@@ -69,7 +86,7 @@ ZSTD_CCtx* ZSTD_createCCtx(void)
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
{
assert(cctx != NULL);
- memset(cctx, 0, sizeof(*cctx));
+ ZSTD_memset(cctx, 0, sizeof(*cctx));
cctx->customMem = memManager;
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
{ size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
@@ -82,8 +99,8 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
- { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem);
return cctx;
@@ -96,20 +113,20 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
ZSTD_CCtx* cctx;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
if (cctx == NULL) return NULL;
- memset(cctx, 0, sizeof(ZSTD_CCtx));
+ ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
/* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
- if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
- cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, HUF_WORKSPACE_SIZE);
+ cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
@@ -119,10 +136,10 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
*/
static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
{
- ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
ZSTD_freeCDict(cctx->localDict.cdict);
- memset(&cctx->localDict, 0, sizeof(cctx->localDict));
- memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
cctx->cdict = NULL;
}
@@ -153,7 +170,7 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
if (!cctxInWorkspace) {
- ZSTD_free(cctx, cctx->customMem);
+ ZSTD_customFree(cctx, cctx->customMem);
}
}
return 0;
@@ -189,15 +206,90 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+/* Returns true if the strategy supports using a row based matchfinder */
+static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
+ return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
+}
+
+/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
+ * for this compression.
+ */
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_useRowMatchFinderMode_e mode) {
+ assert(mode != ZSTD_urm_auto);
+ return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_urm_enableRowMatchFinder);
+}
+
+/* Returns row matchfinder usage enum given an initial mode and cParams */
+static ZSTD_useRowMatchFinderMode_e ZSTD_resolveRowMatchFinderMode(ZSTD_useRowMatchFinderMode_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+#if !defined(ZSTD_NO_INTRINSICS) && (defined(__SSE2__) || defined(__ARM_NEON))
+ int const kHasSIMD128 = 1;
+#else
+ int const kHasSIMD128 = 0;
+#endif
+ if (mode != ZSTD_urm_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
+ mode = ZSTD_urm_disableRowMatchFinder;
+ if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
+ if (kHasSIMD128) {
+ if (cParams->windowLog > 14) mode = ZSTD_urm_enableRowMatchFinder;
+ } else {
+ if (cParams->windowLog > 17) mode = ZSTD_urm_enableRowMatchFinder;
+ }
+ return mode;
+}
+
+/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
+static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
+ const ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
+ const U32 forDDSDict) {
+ assert(useRowMatchFinder != ZSTD_urm_auto);
+ /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
+ * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
+ */
+ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
+}
+
+/* Returns 1 if compression parameters are such that we should
+ * enable long distance matching (wlog >= 27, strategy >= btopt).
+ * Returns 0 otherwise.
+ */
+static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
+ return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
+}
+
+/* Returns 1 if compression parameters are such that we should
+ * enable blockSplitter (wlog >= 17, strategy >= btopt).
+ * Returns 0 otherwise.
+ */
+static U32 ZSTD_CParams_useBlockSplitter(const ZSTD_compressionParameters* const cParams) {
+ return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17;
+}
+
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params cctxParams;
- memset(&cctxParams, 0, sizeof(cctxParams));
+ /* should not matter, as all cParams are presumed properly defined */
+ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
cctxParams.cParams = cParams;
- cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
+
+ /* Adjust advanced params according to cParams */
+ if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
+ DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
+ cctxParams.ldmParams.enableLdm = 1;
+ /* LDM is enabled by default for optimal parser and window size >= 128MB */
+ ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
+ assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
+ assert(cctxParams.ldmParams.hashRateLog < 32);
+ }
+
+ if (ZSTD_CParams_useBlockSplitter(&cParams)) {
+ DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including block splitting into cctx params");
+ cctxParams.splitBlocks = 1;
+ }
+
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
assert(!ZSTD_checkCParams(cParams));
- cctxParams.fParams.contentSizeFlag = 1;
return cctxParams;
}
@@ -205,13 +297,12 @@ static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
ZSTD_customMem customMem)
{
ZSTD_CCtx_params* params;
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
- params = (ZSTD_CCtx_params*)ZSTD_calloc(
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
sizeof(ZSTD_CCtx_params), customMem);
if (!params) { return NULL; }
+ ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
params->customMem = customMem;
- params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
- params->fParams.contentSizeFlag = 1;
return params;
}
@@ -223,7 +314,7 @@ ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
{
if (params == NULL) { return 0; }
- ZSTD_free(params, params->customMem);
+ ZSTD_customFree(params, params->customMem);
return 0;
}
@@ -234,35 +325,54 @@ size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
- memset(cctxParams, 0, sizeof(*cctxParams));
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->compressionLevel = compressionLevel;
cctxParams->fParams.contentSizeFlag = 1;
return 0;
}
+#define ZSTD_NO_CLEVEL 0
+
+/**
+ * Initializes the cctxParams from params and compressionLevel.
+ * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
+ */
+static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
+ DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d", cctxParams->useRowMatchFinder);
+}
+
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
- memset(cctxParams, 0, sizeof(*cctxParams));
- assert(!ZSTD_checkCParams(params.cParams));
- cctxParams->cParams = params.cParams;
- cctxParams->fParams = params.fParams;
- cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
+ ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
return 0;
}
-/* ZSTD_assignParamsToCCtxParams() :
- * params is presumed valid at this stage */
-static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
- const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
+/**
+ * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
+ * @param param Validated zstd parameters.
+ */
+static void ZSTD_CCtxParams_setZstdParams(
+ ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
{
- ZSTD_CCtx_params ret = *cctxParams;
assert(!ZSTD_checkCParams(params->cParams));
- ret.cParams = params->cParams;
- ret.fParams = params->fParams;
- ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
- return ret;
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
}
ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
@@ -354,6 +464,11 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
#endif
return bounds;
+ case ZSTD_c_enableDedicatedDictSearch:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
case ZSTD_c_enableLongDistanceMatching:
bounds.lowerBound = 0;
bounds.upperBound = 1;
@@ -397,7 +512,7 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
return bounds;
case ZSTD_c_forceAttachDict:
- ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
bounds.lowerBound = ZSTD_dictDefaultAttach;
bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
@@ -418,6 +533,37 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
return bounds;
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+
+ case ZSTD_c_blockDelimiters:
+ bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
+ bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
+ return bounds;
+
+ case ZSTD_c_validateSequences:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_splitBlocks:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_useRowMatchFinder:
+ bounds.lowerBound = (int)ZSTD_urm_auto;
+ bounds.upperBound = (int)ZSTD_urm_enableRowMatchFinder;
+ return bounds;
+
+ case ZSTD_c_deterministicRefPrefix:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
@@ -465,6 +611,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
@@ -474,6 +621,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_literalCompressionMode:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_splitBlocks:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
default:
return 0;
}
@@ -515,12 +669,20 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_splitBlocks:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -541,9 +703,10 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_compressionLevel : {
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
- if (value) { /* 0 : does not change current level */
+ if (value == 0)
+ CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else
CCtxParams->compressionLevel = value;
- }
if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
}
@@ -667,6 +830,10 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
return CCtxParams->rsyncable;
#endif
+ case ZSTD_c_enableDedicatedDictSearch :
+ CCtxParams->enableDedicatedDictSearch = (value!=0);
+ return CCtxParams->enableDedicatedDictSearch;
+
case ZSTD_c_enableLongDistanceMatching :
CCtxParams->ldmParams.enableLdm = (value!=0);
return CCtxParams->ldmParams.enableLdm;
@@ -707,17 +874,52 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
CCtxParams->srcSizeHint = value;
return CCtxParams->srcSizeHint;
+ case ZSTD_c_stableInBuffer:
+ BOUNDCHECK(ZSTD_c_stableInBuffer, value);
+ CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->inBufferMode;
+
+ case ZSTD_c_stableOutBuffer:
+ BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
+ CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->outBufferMode;
+
+ case ZSTD_c_blockDelimiters:
+ BOUNDCHECK(ZSTD_c_blockDelimiters, value);
+ CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ return CCtxParams->blockDelimiters;
+
+ case ZSTD_c_validateSequences:
+ BOUNDCHECK(ZSTD_c_validateSequences, value);
+ CCtxParams->validateSequences = value;
+ return CCtxParams->validateSequences;
+
+ case ZSTD_c_splitBlocks:
+ BOUNDCHECK(ZSTD_c_splitBlocks, value);
+ CCtxParams->splitBlocks = value;
+ return CCtxParams->splitBlocks;
+
+ case ZSTD_c_useRowMatchFinder:
+ BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
+ CCtxParams->useRowMatchFinder = (ZSTD_useRowMatchFinderMode_e)value;
+ return CCtxParams->useRowMatchFinder;
+
+ case ZSTD_c_deterministicRefPrefix:
+ BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
+ CCtxParams->deterministicRefPrefix = !!value;
+ return CCtxParams->deterministicRefPrefix;
+
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
}
-size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
{
return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
}
size_t ZSTD_CCtxParams_getParameter(
- ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
+ ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
{
switch(param)
{
@@ -794,6 +996,9 @@ size_t ZSTD_CCtxParams_getParameter(
*value = CCtxParams->rsyncable;
break;
#endif
+ case ZSTD_c_enableDedicatedDictSearch :
+ *value = CCtxParams->enableDedicatedDictSearch;
+ break;
case ZSTD_c_enableLongDistanceMatching :
*value = CCtxParams->ldmParams.enableLdm;
break;
@@ -815,6 +1020,27 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_srcSizeHint :
*value = (int)CCtxParams->srcSizeHint;
break;
+ case ZSTD_c_stableInBuffer :
+ *value = (int)CCtxParams->inBufferMode;
+ break;
+ case ZSTD_c_stableOutBuffer :
+ *value = (int)CCtxParams->outBufferMode;
+ break;
+ case ZSTD_c_blockDelimiters :
+ *value = (int)CCtxParams->blockDelimiters;
+ break;
+ case ZSTD_c_validateSequences :
+ *value = (int)CCtxParams->validateSequences;
+ break;
+ case ZSTD_c_splitBlocks :
+ *value = (int)CCtxParams->splitBlocks;
+ break;
+ case ZSTD_c_useRowMatchFinder :
+ *value = (int)CCtxParams->useRowMatchFinder;
+ break;
+ case ZSTD_c_deterministicRefPrefix:
+ *value = (int)CCtxParams->deterministicRefPrefix;
+ break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
@@ -850,6 +1076,14 @@ ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long lo
return 0;
}
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
+ int const compressionLevel,
+ size_t const dictSize);
+static int ZSTD_dedicatedDictSearch_isSupported(
+ const ZSTD_compressionParameters* cParams);
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams);
+
/**
* Initializes the local dict using the requested parameters.
* NOTE: This does not use the pledged src size, because it may be used for more
@@ -858,8 +1092,6 @@ ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long lo
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
ZSTD_localDict* const dl = &cctx->localDict;
- ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
- &cctx->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN, dl->dictSize);
if (dl->dict == NULL) {
/* No local dictionary. */
assert(dl->dictBuffer == NULL);
@@ -876,12 +1108,12 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
assert(cctx->cdict == NULL);
assert(cctx->prefixDict.dict == NULL);
- dl->cdict = ZSTD_createCDict_advanced(
+ dl->cdict = ZSTD_createCDict_advanced2(
dl->dict,
dl->dictSize,
ZSTD_dlm_byRef,
dl->dictContentType,
- cParams,
+ &cctx->requestedParams,
cctx->customMem);
RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
cctx->cdict = dl->cdict;
@@ -894,8 +1126,6 @@ size_t ZSTD_CCtx_loadDictionary_advanced(
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't load a dictionary when ctx is not in init stage.");
- RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
- "no malloc for static CCtx");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
ZSTD_clearAllDicts(cctx); /* in case one already exists */
if (dict == NULL || dictSize == 0) /* no dictionary mode */
@@ -903,9 +1133,12 @@ size_t ZSTD_CCtx_loadDictionary_advanced(
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
} else {
- void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
+ void* dictBuffer;
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
+ dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
- memcpy(dictBuffer, dict, dictSize);
+ ZSTD_memcpy(dictBuffer, dict, dictSize);
cctx->localDict.dictBuffer = dictBuffer;
cctx->localDict.dict = dictBuffer;
}
@@ -938,6 +1171,14 @@ size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
return 0;
}
+size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a pool when ctx not in init stage.");
+ cctx->pool = pool;
+ return 0;
+}
+
size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
{
return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
@@ -1022,23 +1263,83 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
return hashLog - btScale;
}
+/** ZSTD_dictAndWindowLog() :
+ * Returns an adjusted window log that is large enough to fit the source and the dictionary.
+ * The zstd format says that the entire dictionary is valid if one byte of the dictionary
+ * is within the window. So the hashLog and chainLog should be large enough to reference both
+ * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
+ * the hashLog and windowLog.
+ * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
+{
+ const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
+ /* No dictionary ==> No change */
+ if (dictSize == 0) {
+ return windowLog;
+ }
+ assert(windowLog <= ZSTD_WINDOWLOG_MAX);
+ assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
+ {
+ U64 const windowSize = 1ULL << windowLog;
+ U64 const dictAndWindowSize = dictSize + windowSize;
+ /* If the window size is already large enough to fit both the source and the dictionary
+ * then just use the window size. Otherwise adjust so that it fits the dictionary and
+ * the window.
+ */
+ if (windowSize >= dictSize + srcSize) {
+ return windowLog; /* Window size large enough already */
+ } else if (dictAndWindowSize >= maxWindowSize) {
+ return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
+ } else {
+ return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
+ }
+ }
+}
+
/** ZSTD_adjustCParams_internal() :
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
- size_t dictSize)
+ size_t dictSize,
+ ZSTD_cParamMode_e mode)
{
- static const U64 minSrcSize = 513; /* (1<<9) + 1 */
- static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+ const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
- if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
- srcSize = minSrcSize;
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ /* If we don't know the source size, don't make any
+ * assumptions about it. We will already have selected
+ * smaller parameters if a dictionary is in use.
+ */
+ break;
+ case ZSTD_cpm_createCDict:
+ /* Assume a small source size when creating a dictionary
+ * with an unkown source size.
+ */
+ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ srcSize = minSrcSize;
+ break;
+ case ZSTD_cpm_attachDict:
+ /* Dictionary has its own dedicated parameters which have
+ * already been selected. We are selecting parameters
+ * for only the source.
+ */
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
/* resize windowLog if input is small enough, to use less memory */
if ( (srcSize < maxWindowResize)
@@ -1049,10 +1350,12 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
ZSTD_highbit32(tSize-1) + 1;
if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
}
- if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
- { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
- if (cycleLog > cPar.windowLog)
- cPar.chainLog -= (cycleLog - cPar.windowLog);
+ if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
+ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
+ if (cycleLog > dictAndWindowLog)
+ cPar.chainLog -= (cycleLog - dictAndWindowLog);
}
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
@@ -1068,38 +1371,50 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
}
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+static void ZSTD_overrideCParams(
+ ZSTD_compressionParameters* cParams,
+ const ZSTD_compressionParameters* overrides)
+{
+ if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
+ if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
+ if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
+ if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
+ if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
+ if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
+ if (overrides->strategy) cParams->strategy = overrides->strategy;
+}
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
srcSizeHint = CCtxParams->srcSizeHint;
}
- cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+ cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
- if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
- if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
- if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
- if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
- if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
- if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
- if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+ ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
- return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
}
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
+ const U32 enableDedicatedDictSearch,
const U32 forCCtx)
{
- size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ /* chain table size should be 0 for fast or row-hash strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
@@ -1109,71 +1424,117 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
- ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
- + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
+ ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
+ : 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
+ size_t const slackSpace = ZSTD_cwksp_slack_space_required();
+
+ /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
+ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
+ assert(useRowMatchFinder != ZSTD_urm_auto);
+
DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
(U32)chainSize, (U32)hSize, (U32)h3Size);
- return tableSpace + optSpace;
+ return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
+}
+
+static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ const ZSTD_compressionParameters* cParams,
+ const ldmParams_t* ldmParams,
+ const int isStatic,
+ const ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
+ const size_t buffInSize,
+ const size_t buffOutSize,
+ const U64 pledgedSrcSize)
+{
+ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (cParams->minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
+ size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
+ size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
+
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
+ size_t const ldmSeqSpace = ldmParams->enableLdm ?
+ ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+
+
+ size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ + ZSTD_cwksp_alloc_size(buffOutSize);
+
+ size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+
+ size_t const neededSpace =
+ cctxSpace +
+ entropySpace +
+ blockStateSpace +
+ ldmSpace +
+ ldmSeqSpace +
+ matchStateSize +
+ tokenSpace +
+ bufferSpace;
+
+ DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
+ return neededSpace;
}
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
- RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
- { ZSTD_compressionParameters const cParams =
- ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
- U32 const divider = (cParams.minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
- + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
- + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
- size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
- size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
- size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
-
- size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
- size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq));
+ ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ &cParams);
- /* estimateCCtxSize is for one-shot compression. So no buffers should
- * be needed. However, we still allocate two 0-sized buffers, which can
- * take space under ASAN. */
- size_t const bufferSpace = ZSTD_cwksp_alloc_size(0)
- + ZSTD_cwksp_alloc_size(0);
-
- size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx));
-
- size_t const neededSpace =
- cctxSpace +
- entropySpace +
- blockStateSpace +
- ldmSpace +
- ldmSeqSpace +
- matchStateSize +
- tokenSpace +
- bufferSpace;
-
- DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
- return neededSpace;
- }
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ /* estimateCCtxSize is for one-shot compression. So no buffers should
+ * be needed. However, we still allocate two 0-sized buffers, which can
+ * take space under ASAN. */
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
{
- ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
- return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder;
+ noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder;
+ rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ }
}
static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
{
- ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
- return ZSTD_estimateCCtxSize_usingCParams(cParams);
+ int tier = 0;
+ size_t largestSize = 0;
+ static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
+ for (; tier < 4; ++tier) {
+ /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
+ largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
+ }
+ return largestSize;
}
size_t ZSTD_estimateCCtxSize(int compressionLevel)
@@ -1181,6 +1542,7 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel)
int level;
size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ /* Ensure monotonically increasing memory usage as compression level increases */
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
@@ -1191,27 +1553,42 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
- ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);
- size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
- size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
- size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
- size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize)
- + ZSTD_cwksp_alloc_size(outBuffSize);
-
- return CCtxSize + streamingSize;
+ size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
+ ? ((size_t)1 << cParams.windowLog) + blockSize
+ : 0;
+ size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
+
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
+ ZSTD_CONTENTSIZE_UNKNOWN);
}
}
size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
{
- ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
- return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder;
+ noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder;
+ rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ }
}
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
{
- ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
return ZSTD_estimateCStreamSize_usingCParams(cParams);
}
@@ -1305,16 +1682,6 @@ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
}
/**
- * Indicates whether this compression proceeds directly from user-provided
- * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
- * whether the context needs to buffer the input/output (ZSTDb_buffered).
- */
-typedef enum {
- ZSTDb_not_buffered,
- ZSTDb_buffered
-} ZSTD_buffered_policy_e;
-
-/**
* Controls, for this matchState reset, whether the tables need to be cleared /
* prepared for the coming compression (ZSTDcrp_makeClean), or whether the
* tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
@@ -1341,20 +1708,27 @@ typedef enum {
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
+
static size_t
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
+ const ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
{
- size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ /* disable chain table allocation for fast or row-based strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
+ ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ assert(useRowMatchFinder != ZSTD_urm_auto);
if (forceResetIndex == ZSTDirp_reset) {
ZSTD_window_init(&ms->window);
ZSTD_cwksp_mark_tables_dirty(ws);
@@ -1393,11 +1767,23 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
}
+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+ { /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+ if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ }
+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = cParams->searchLog < 5 ? 4 : 5;
+ assert(cParams->hashLog > rowLog);
+ ms->rowHashLog = cParams->hashLog - rowLog;
+ }
+ }
+
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
-
return 0;
}
@@ -1414,75 +1800,85 @@ static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
}
+/** ZSTD_dictTooBig():
+ * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
+ * one go generically. So we ensure that in that case we reset the tables to zero,
+ * so that we can load as much of the dictionary as possible.
+ */
+static int ZSTD_dictTooBig(size_t const loadedDictSize)
+{
+ return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
+}
+
/*! ZSTD_resetCCtx_internal() :
- note : `params` are assumed fully validated at this stage */
+ * @param loadedDictSize The size of the dictionary to be loaded
+ * into the context, if any. If no dictionary is used, or the
+ * dictionary is being attached / copied, then pass 0.
+ * note : `params` are assumed fully validated at this stage.
+ */
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
- ZSTD_CCtx_params params,
+ ZSTD_CCtx_params const* params,
U64 const pledgedSrcSize,
+ size_t const loadedDictSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
ZSTD_cwksp* const ws = &zc->workspace;
- DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
- (U32)pledgedSrcSize, params.cParams.windowLog);
- assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d",
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zc->isFirstBlock = 1;
- if (params.ldmParams.enableLdm) {
+ /* Set applied params early so we can modify them for LDM,
+ * and point params at the applied params.
+ */
+ zc->appliedParams = *params;
+ params = &zc->appliedParams;
+
+ assert(params->useRowMatchFinder != ZSTD_urm_auto);
+ if (params->ldmParams.enableLdm) {
/* Adjust long distance matching parameters */
- ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
- assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
- assert(params.ldmParams.hashRateLog < 32);
- zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
+ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
+ assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
+ assert(params->ldmParams.hashRateLog < 32);
}
- { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
+ U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
- + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
- + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
- size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
- size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
- size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
- size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
-
- ZSTD_indexResetPolicy_e needsIndexReset = zc->initialized ? ZSTDirp_continue : ZSTDirp_reset;
-
- if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
- needsIndexReset = ZSTDirp_reset;
- }
+ size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
+ ? windowSize + blockSize
+ : 0;
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
+
+ int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
+ int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
+ ZSTD_indexResetPolicy_e needsIndexReset =
+ (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
- if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
+ size_t const neededSpace =
+ ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
+ buffInSize, buffOutSize, pledgedSrcSize);
+ int resizeWorkspace;
- /* Check if workspace is large enough, alloc a new one if needed */
- { size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
- size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
- size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
- size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize);
- size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
- size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq));
-
- size_t const neededSpace =
- cctxSpace +
- entropySpace +
- blockStateSpace +
- ldmSpace +
- ldmSeqSpace +
- matchStateSize +
- tokenSpace +
- bufferSpace;
+ FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
+
+ if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
+ { /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
-
- DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
- neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
+ resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
- if (workspaceTooSmall || workspaceWasteful) {
+ if (resizeWorkspace) {
DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
@@ -1503,15 +1899,14 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
- zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE);
+ zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
} }
ZSTD_cwksp_clear(ws);
/* init params */
- zc->appliedParams = params;
- zc->blockState.matchState.cParams = params.cParams;
+ zc->blockState.matchState.cParams = params->cParams;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
@@ -1524,6 +1919,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
XXH64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
zc->dictID = 0;
+ zc->dictContentSize = 0;
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
@@ -1534,19 +1930,20 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->seqStore.maxNbLit = blockSize;
/* buffers */
+ zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
zc->outBuffSize = buffOutSize;
zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
/* ldm bucketOffsets table */
- if (params.ldmParams.enableLdm) {
+ if (params->ldmParams.enableLdm) {
/* TODO: avoid memset? */
- size_t const ldmBucketSize =
- ((size_t)1) << (params.ldmParams.hashLog -
- params.ldmParams.bucketSizeLog);
- zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
- memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
+ size_t const numBuckets =
+ ((size_t)1) << (params->ldmParams.hashLog -
+ params->ldmParams.bucketSizeLog);
+ zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
+ ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
}
/* sequences storage */
@@ -1560,26 +1957,28 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&zc->blockState.matchState,
ws,
- &params.cParams,
+ &params->cParams,
+ params->useRowMatchFinder,
crp,
needsIndexReset,
ZSTD_resetTarget_CCtx), "");
/* ldm hash table */
- if (params.ldmParams.enableLdm) {
+ if (params->ldmParams.enableLdm) {
/* TODO: avoid memset? */
- size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
+ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
- memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
zc->maxNbLdmSequences = maxNbLdmSeq;
ZSTD_window_init(&zc->ldmState.window);
- ZSTD_window_clear(&zc->ldmState.window);
zc->ldmState.loadedDictEnd = 0;
}
+ assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
+
zc->initialized = 1;
return 0;
@@ -1618,12 +2017,14 @@ static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
U64 pledgedSrcSize)
{
size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
- return ( pledgedSrcSize <= cutoff
- || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
- || params->attachDictPref == ZSTD_dictForceAttach )
- && params->attachDictPref != ZSTD_dictForceCopy
- && !params->forceWindow; /* dictMatchState isn't correctly
- * handled in _enforceMaxDist */
+ int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
+ return dedicatedDictSearch
+ || ( ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params->attachDictPref == ZSTD_dictForceAttach )
+ && params->attachDictPref != ZSTD_dictForceCopy
+ && !params->forceWindow ); /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
}
static size_t
@@ -1633,17 +2034,28 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
- { const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;
+ DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
+ {
+ ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Resize working context table params for input only, since the dict
* has its own tables. */
- /* pledgeSrcSize == 0 means 0! */
- params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
+ /* pledgedSrcSize == 0 means 0! */
+
+ if (cdict->matchState.dedicatedDictSearch) {
+ ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
+ }
+
+ params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
+ cdict->dictContentSize, ZSTD_cpm_attachDict);
params.cParams.windowLog = windowLog;
- FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_makeClean, zbuff), "");
- assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
}
{ const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
@@ -1668,9 +2080,10 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
} }
cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
/* copy block state */
- memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
@@ -1683,14 +2096,18 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
{
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
- DEBUGLOG(4, "copying dictionary into context");
+ assert(!cdict->matchState.dedicatedDictSearch);
+ DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
{ unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Copy only compression parameters related to tables. */
params.cParams = *cdict_cParams;
params.cParams.windowLog = windowLog;
- FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ params.useRowMatchFinder = cdict->useRowMatchFinder;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
@@ -1698,24 +2115,37 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
}
ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+ assert(params.useRowMatchFinder != ZSTD_urm_auto);
/* copy tables */
- { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
+ ? ((size_t)1 << cdict_cParams->chainLog)
+ : 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
- memcpy(cctx->blockState.matchState.hashTable,
+ ZSTD_memcpy(cctx->blockState.matchState.hashTable,
cdict->matchState.hashTable,
hSize * sizeof(U32));
- memcpy(cctx->blockState.matchState.chainTable,
+ /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
+ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
+ ZSTD_memcpy(cctx->blockState.matchState.chainTable,
cdict->matchState.chainTable,
chainSize * sizeof(U32));
+ }
+ /* copy tag table */
+ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ZSTD_memcpy(cctx->blockState.matchState.tagTable,
+ cdict->matchState.tagTable,
+ tagTableSize);
+ }
}
/* Zero the hashTable3, since the cdict never fills it */
{ int const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
- memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+ ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
}
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
@@ -1729,9 +2159,10 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
}
cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
/* copy block state */
- memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
@@ -1771,16 +2202,18 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
- DEBUGLOG(5, "ZSTD_copyCCtx_internal");
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
"Can't copy a ctx that's not in init stage.");
-
- memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+ ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
+ assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_urm_auto);
+ params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
params.fParams = fParams;
- ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
+ ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
@@ -1792,18 +2225,22 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
/* copy tables */
- { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
+ { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
+ srcCCtx->appliedParams.useRowMatchFinder,
+ 0 /* forDDSDict */)
+ ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
+ : 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
int const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
- memcpy(dstCCtx->blockState.matchState.hashTable,
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
srcCCtx->blockState.matchState.hashTable,
hSize * sizeof(U32));
- memcpy(dstCCtx->blockState.matchState.chainTable,
+ ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
srcCCtx->blockState.matchState.chainTable,
chainSize * sizeof(U32));
- memcpy(dstCCtx->blockState.matchState.hashTable3,
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
srcCCtx->blockState.matchState.hashTable3,
h3Size * sizeof(U32));
}
@@ -1819,9 +2256,10 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
dstCCtx->dictID = srcCCtx->dictID;
+ dstCCtx->dictContentSize = srcCCtx->dictContentSize;
/* copy block state */
- memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
return 0;
}
@@ -1834,7 +2272,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
{
ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
- ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
+ ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
@@ -1861,7 +2299,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */
-#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty.
@@ -1905,7 +2343,7 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
- if (params->cParams.strategy != ZSTD_fast) {
+ if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
@@ -1942,9 +2380,9 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
}
- if (seqStorePtr->longLengthID==1)
+ if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
- if (seqStorePtr->longLengthID==2)
+ if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
@@ -1958,10 +2396,158 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
return (cctxParams->targetCBlockSize != 0);
}
-/* ZSTD_compressSequences_internal():
- * actually compresses both literals and sequences */
+/* ZSTD_blockSplitterEnabled():
+ * Returns if block splitting param is being used
+ * If used, compression will do best effort to split a block in order to improve compression ratio.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled(splitBlocks=%d)", cctxParams->splitBlocks);
+ return (cctxParams->splitBlocks != 0);
+}
+
+/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
+ * and size of the sequences statistics
+ */
+typedef struct {
+ U32 LLtype;
+ U32 Offtype;
+ U32 MLtype;
+ size_t size;
+ size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_symbolEncodingTypeStats_t;
+
+/* ZSTD_buildSequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
+ * Modifies `nextEntropy` to have the appropriate values as a side effect.
+ * nbSeq must be greater than 0.
+ *
+ * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
+ const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
+ BYTE* dst, const BYTE* const dstEnd,
+ ZSTD_strategy strategy, unsigned* countWorkspace,
+ void* entropyWorkspace, size_t entropyWkspSize) {
+ BYTE* const ostart = dst;
+ const BYTE* const oend = dstEnd;
+ BYTE* op = ostart;
+ FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ stats.lastCountSize = 0;
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ assert(op <= oend);
+ assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+ stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
+ countWorkspace, max, llCodeTable, nbSeq,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->litlengthCTable,
+ sizeof(prevEntropy->litlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.LLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+ stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
+ countWorkspace, max, ofCodeTable, nbSeq,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->offcodeCTable,
+ sizeof(prevEntropy->offcodeCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.Offtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+ stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
+ countWorkspace, max, mlCodeTable, nbSeq,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->matchlengthCTable,
+ sizeof(prevEntropy->matchlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.MLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ stats.size = (size_t)(op-ostart);
+ return stats;
+}
+
+/* ZSTD_entropyCompressSeqStore_internal():
+ * compresses both literals and sequences
+ * Returns compressed size of block, or a zstd error.
+ */
MEM_STATIC size_t
-ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
+ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
@@ -1971,24 +2557,26 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
- unsigned count[MaxSeq+1];
+ unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
- U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
- size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- BYTE* seqHead;
- BYTE* lastNCount = NULL;
+ size_t lastCountSize;
+
+ entropyWorkspace = count + (MaxSeq + 1);
+ entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
- DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq);
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
@@ -2023,98 +2611,23 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
assert(op <= oend);
if (nbSeq==0) {
/* Copy the old tables over as if we repeated them */
- memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
-
- /* seqHead : flags for FSE encoding type */
- seqHead = op++;
- assert(op <= oend);
-
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
- /* build CTable for Literal Lengths */
- { unsigned max = MaxLL;
- size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- DEBUGLOG(5, "Building LL table");
- nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
- LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
- count, max, mostFrequent, nbSeq,
- LLFSELog, prevEntropy->fse.litlengthCTable,
- LL_defaultNorm, LL_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(set_basic < set_compressed && set_rle < set_compressed);
- assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
- count, max, llCodeTable, nbSeq,
- LL_defaultNorm, LL_defaultNormLog, MaxLL,
- prevEntropy->fse.litlengthCTable,
- sizeof(prevEntropy->fse.litlengthCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
- if (LLtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
- /* build CTable for Offsets */
- { unsigned max = MaxOff;
- size_t const mostFrequent = HIST_countFast_wksp(
- count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
- DEBUGLOG(5, "Building OF table");
- nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
- Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
- count, max, mostFrequent, nbSeq,
- OffFSELog, prevEntropy->fse.offcodeCTable,
- OF_defaultNorm, OF_defaultNormLog,
- defaultPolicy, strategy);
- assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- count, max, ofCodeTable, nbSeq,
- OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- prevEntropy->fse.offcodeCTable,
- sizeof(prevEntropy->fse.offcodeCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
- if (Offtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
- /* build CTable for MatchLengths */
- { unsigned max = MaxML;
- size_t const mostFrequent = HIST_countFast_wksp(
- count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
- nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
- MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
- count, max, mostFrequent, nbSeq,
- MLFSELog, prevEntropy->fse.matchlengthCTable,
- ML_defaultNorm, ML_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
- count, max, mlCodeTable, nbSeq,
- ML_defaultNorm, ML_defaultNormLog, MaxML,
- prevEntropy->fse.matchlengthCTable,
- sizeof(prevEntropy->fse.matchlengthCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
- if (MLtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
-
- *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+ {
+ ZSTD_symbolEncodingTypeStats_t stats;
+ BYTE* seqHead = op++;
+ /* build stats for sequences */
+ stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ &prevEntropy->fse, &nextEntropy->fse,
+ op, oend,
+ strategy, count,
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
+ lastCountSize = stats.lastCountSize;
+ op += stats.size;
+ }
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
@@ -2134,9 +2647,9 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
- if (lastNCount && (op - lastNCount) < 4) {
- /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
- assert(op - lastNCount == 3);
+ if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
+ /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(lastCountSize + bitstreamSize == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
@@ -2148,7 +2661,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
}
MEM_STATIC size_t
-ZSTD_compressSequences(seqStore_t* seqStorePtr,
+ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
@@ -2157,7 +2670,7 @@ ZSTD_compressSequences(seqStore_t* seqStorePtr,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
{
- size_t const cSize = ZSTD_compressSequences_internal(
+ size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
entropyWorkspace, entropyWkspSize, bmi2);
@@ -2167,22 +2680,22 @@ ZSTD_compressSequences(seqStore_t* seqStorePtr,
*/
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
return 0; /* block not compressed */
- FORWARD_IF_ERROR(cSize, "ZSTD_compressSequences_internal failed");
+ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
/* Check compressibility */
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
-
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
return cSize;
}
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
- static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
+ static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
ZSTD_compressBlock_doubleFast,
@@ -2212,13 +2725,44 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMo
ZSTD_compressBlock_btlazy2_dictMatchState,
ZSTD_compressBlock_btopt_dictMatchState,
ZSTD_compressBlock_btultra_dictMatchState,
- ZSTD_compressBlock_btultra_dictMatchState }
+ ZSTD_compressBlock_btultra_dictMatchState },
+ { NULL /* default for 0 */,
+ NULL,
+ NULL,
+ ZSTD_compressBlock_greedy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ NULL,
+ NULL,
+ NULL,
+ NULL }
};
ZSTD_blockCompressor selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
- selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
+ static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
+ { ZSTD_compressBlock_greedy_row,
+ ZSTD_compressBlock_lazy_row,
+ ZSTD_compressBlock_lazy2_row },
+ { ZSTD_compressBlock_greedy_extDict_row,
+ ZSTD_compressBlock_lazy_extDict_row,
+ ZSTD_compressBlock_lazy2_extDict_row },
+ { ZSTD_compressBlock_greedy_dictMatchState_row,
+ ZSTD_compressBlock_lazy_dictMatchState_row,
+ ZSTD_compressBlock_lazy2_dictMatchState_row },
+ { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
+ };
+ DEBUGLOG(4, "Selecting a row-based matchfinder");
+ assert(useRowMatchFinder != ZSTD_urm_auto);
+ selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
+ } else {
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ }
assert(selectedCompressor != NULL);
return selectedCompressor;
}
@@ -2226,7 +2770,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMo
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
@@ -2234,7 +2778,7 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
- ssPtr->longLengthID = 0;
+ ssPtr->longLengthType = ZSTD_llt_none;
}
typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
@@ -2247,7 +2791,11 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
- ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
+ ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
+ } else {
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ }
return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
@@ -2263,10 +2811,10 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
/* limited update after a very long match */
{ const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
- const U32 current = (U32)(istart-base);
+ const U32 curr = (U32)(istart-base);
if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
- if (current > ms->nextToUpdate + 384)
- ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
+ if (curr > ms->nextToUpdate + 384)
+ ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
}
/* select and store sequences */
@@ -2283,10 +2831,11 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm) {
- rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
+ rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
@@ -2299,10 +2848,14 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
} else { /* not long range mode */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
+ ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
@@ -2314,59 +2867,72 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
{
const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
- const seqDef* seqs = seqStore->sequencesStart;
- size_t seqsSize = seqStore->sequences - seqs;
+ const seqDef* seqStoreSeqs = seqStore->sequencesStart;
+ size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
+ size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
+ size_t literalsRead = 0;
+ size_t lastLLSize;
ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
- size_t i; size_t position; int repIdx;
+ size_t i;
+ repcodes_t updatedRepcodes;
assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
- for (i = 0, position = 0; i < seqsSize; ++i) {
- outSeqs[i].offset = seqs[i].offset;
- outSeqs[i].litLength = seqs[i].litLength;
- outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH;
+ /* Ensure we have enough space for last literals "sequence" */
+ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
+ ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (i = 0; i < seqStoreSeqSize; ++i) {
+ U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
+ outSeqs[i].litLength = seqStoreSeqs[i].litLength;
+ outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
+ outSeqs[i].rep = 0;
if (i == seqStore->longLengthPos) {
- if (seqStore->longLengthID == 1) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
outSeqs[i].litLength += 0x10000;
- } else if (seqStore->longLengthID == 2) {
+ } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
outSeqs[i].matchLength += 0x10000;
}
}
- if (outSeqs[i].offset <= ZSTD_REP_NUM) {
- outSeqs[i].rep = outSeqs[i].offset;
- repIdx = (unsigned int)i - outSeqs[i].offset;
-
- if (outSeqs[i].litLength == 0) {
- if (outSeqs[i].offset < 3) {
- --repIdx;
+ if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
+ /* Derive the correct offset corresponding to a repcode */
+ outSeqs[i].rep = seqStoreSeqs[i].offset;
+ if (outSeqs[i].litLength != 0) {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ } else {
+ if (outSeqs[i].rep == 3) {
+ rawOffset = updatedRepcodes.rep[0] - 1;
} else {
- repIdx = (unsigned int)i - 1;
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
}
- ++outSeqs[i].rep;
- }
- assert(repIdx >= -3);
- outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1];
- if (outSeqs[i].rep == 4) {
- --outSeqs[i].offset;
}
- } else {
- outSeqs[i].offset -= ZSTD_REP_NUM;
}
-
- position += outSeqs[i].litLength;
- outSeqs[i].matchPos = (unsigned int)position;
- position += outSeqs[i].matchLength;
+ outSeqs[i].offset = rawOffset;
+ /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
+ so we provide seqStoreSeqs[i].offset - 1 */
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
+ seqStoreSeqs[i].offset - 1,
+ seqStoreSeqs[i].litLength == 0);
+ literalsRead += outSeqs[i].litLength;
}
- zc->seqCollector.seqIndex += seqsSize;
+ /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
+ * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
+ * for the block boundary, according to the API.
+ */
+ assert(seqStoreLiteralsSize >= literalsRead);
+ lastLLSize = seqStoreLiteralsSize - literalsRead;
+ outSeqs[i].litLength = (U32)lastLLSize;
+ outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
+ seqStoreSeqSize++;
+ zc->seqCollector.seqIndex += seqStoreSeqSize;
}
-size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize)
+size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
- void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem);
+ void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
SeqCollector seqCollector;
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
@@ -2378,16 +2944,47 @@ size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
zc->seqCollector = seqCollector;
ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
- ZSTD_free(dst, ZSTD_defaultCMem);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
return zc->seqCollector.seqIndex;
}
-/* Returns true if the given block is a RLE block */
-static int ZSTD_isRLE(const BYTE *ip, size_t length) {
+size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
+ size_t in = 0;
+ size_t out = 0;
+ for (; in < seqsSize; ++in) {
+ if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
+ if (in != seqsSize - 1) {
+ sequences[in+1].litLength += sequences[in].litLength;
+ }
+ } else {
+ sequences[out] = sequences[in];
+ ++out;
+ }
+ }
+ return out;
+}
+
+/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
+static int ZSTD_isRLE(const BYTE* src, size_t length) {
+ const BYTE* ip = src;
+ const BYTE value = ip[0];
+ const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
+ const size_t unrollSize = sizeof(size_t) * 4;
+ const size_t unrollMask = unrollSize - 1;
+ const size_t prefixLength = length & unrollMask;
size_t i;
- if (length < 2) return 1;
- for (i = 1; i < length; ++i) {
- if (ip[0] != ip[i]) return 0;
+ size_t u;
+ if (length == 1) return 1;
+ /* Check if prefix is RLE first before using unrolled loop */
+ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
+ return 0;
+ }
+ for (i = prefixLength; i != length; i += unrollSize) {
+ for (u = 0; u < unrollSize; u += sizeof(size_t)) {
+ if (MEM_readST(ip + i + u) != valueST) {
+ return 0;
+ }
+ }
}
return 1;
}
@@ -2404,11 +3001,713 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore)
return nbSeqs < 4 && nbLits < 10;
}
-static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
+static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
+{
+ ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
+ bs->prevCBlock = bs->nextCBlock;
+ bs->nextCBlock = tmp;
+}
+
+/* Writes the block header */
+static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+}
+
+/** ZSTD_buildBlockEntropyStats_literals() :
+ * Builds entropy for the literals.
+ * Stores literals block type (raw, rle, compressed, repeat) and
+ * huffman description table to hufMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE workspace
+ * @return : size of huffman description table or error code */
+static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int disableLiteralsCompression,
+ void* workspace, size_t wkspSize)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
+ BYTE* const nodeWksp = countWkspStart + countWkspSize;
+ const size_t nodeWkspSize = wkspEnd-nodeWksp;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ unsigned huffLog = HUF_TABLELOG_DEFAULT;
+ HUF_repeat repeat = prevHuf->repeatMode;
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralsCompression) {
+ DEBUGLOG(5, "set_basic - disabled");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+
+ /* small ? don't even attempt compression (speed opt) */
+#ifndef COMPRESS_LITERALS_SIZE_MIN
+#define COMPRESS_LITERALS_SIZE_MIN 63
+#endif
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) {
+ DEBUGLOG(5, "set_basic - too small");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Scan input and build symbol stats */
+ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
+ FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
+ if (largest == srcSize) {
+ DEBUGLOG(5, "set_rle");
+ hufMetadata->hType = set_rle;
+ return 0;
+ }
+ if (largest <= (srcSize >> 7)+4) {
+ DEBUGLOG(5, "set_basic - no gain");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Validate the previous Huffman table */
+ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ repeat = HUF_repeat_none;
+ }
+
+ /* Build Huffman Tree */
+ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
+ maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
+ huffLog = (U32)maxBits;
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_repeat;
+ return 0;
+ }
+ }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
+ }
+ }
+}
+
+
+/* ZSTD_buildDummySequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
+ * and updates nextEntropy to the appropriate repeatMode.
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
+ ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
+ nextEntropy->litlength_repeatMode = FSE_repeat_none;
+ nextEntropy->offcode_repeatMode = FSE_repeat_none;
+ nextEntropy->matchlength_repeatMode = FSE_repeat_none;
+ return stats;
+}
+
+/** ZSTD_buildBlockEntropyStats_sequences() :
+ * Builds entropy for the sequences.
+ * Stores symbol compression modes and fse table to fseMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE wksp.
+ * @return : size of fse tables or error code */
+static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
{
- ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
- zc->blockState.prevCBlock = zc->blockState.nextCBlock;
- zc->blockState.nextCBlock = tmp;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ BYTE* const ostart = fseMetadata->fseTablesBuffer;
+ BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
+ BYTE* op = ostart;
+ unsigned* countWorkspace = (unsigned*)workspace;
+ unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
+ size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
+ stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ prevEntropy, nextEntropy, op, oend,
+ strategy, countWorkspace,
+ entropyWorkspace, entropyWorkspaceSize)
+ : ZSTD_buildDummySequencesStatistics(nextEntropy);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
+ fseMetadata->lastCountSize = stats.lastCountSize;
+ return stats.size;
+}
+
+
+/** ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * Requires workspace size ENTROPY_WORKSPACE_SIZE
+ *
+ * @return : 0 on success or error code
+ */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
+ entropyMetadata->hufMetadata.hufDesSize =
+ ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
+ &prevEntropy->huf, &nextEntropy->huf,
+ &entropyMetadata->hufMetadata,
+ ZSTD_disableLiteralsCompression(cctxParams),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
+ entropyMetadata->fseMetadata.fseTablesSize =
+ ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
+ &prevEntropy->fse, &nextEntropy->fse,
+ cctxParams,
+ &entropyMetadata->fseMetadata,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
+ return 0;
+}
+
+/* Returns the size estimate for the literals section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
+ U32 singleStream = litSize < 256;
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
+static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
+ const FSE_CTable* fseCTable,
+ const U32* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ (void)defaultMax;
+ cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
+ return nbSeq * 10;
+ }
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits >> 3;
+}
+
+/* Returns the size estimate for the sequences section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
+ size_t cSeqSizeEstimate = 0;
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
+ fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
+ fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
+ fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+/* Returns the size estimate for a given stream of literals, of, ll, ml */
+static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return seqSize + literalsSize + ZSTD_blockHeaderSize;
+}
+
+/* Builds entropy statistics and uses them for blocksize estimation.
+ *
+ * Returns the estimated compressed size of the seqStore, or a zstd error.
+ */
+static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, const ZSTD_CCtx* zc) {
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ &entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
+ seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
+ (size_t)(seqStore->sequences - seqStore->sequencesStart),
+ &zc->blockState.nextCBlock->entropy, &entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
+ (int)(entropyMetadata.hufMetadata.hType == set_compressed), 1);
+}
+
+/* Returns literals bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
+ size_t literalsBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ literalsBytes += seq.litLength;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
+ literalsBytes += 0x10000;
+ }
+ }
+ return literalsBytes;
+}
+
+/* Returns match bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
+ size_t matchBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ matchBytes += seq.matchLength + MINMATCH;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
+ matchBytes += 0x10000;
+ }
+ }
+ return matchBytes;
+}
+
+/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
+ * Stores the result in resultSeqStore.
+ */
+static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
+ const seqStore_t* originalSeqStore,
+ size_t startIdx, size_t endIdx) {
+ BYTE* const litEnd = originalSeqStore->lit;
+ size_t literalsBytes;
+ size_t literalsBytesPreceding = 0;
+
+ *resultSeqStore = *originalSeqStore;
+ if (startIdx > 0) {
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
+ literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ }
+
+ /* Move longLengthPos into the correct position if necessary */
+ if (originalSeqStore->longLengthType != ZSTD_llt_none) {
+ if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
+ resultSeqStore->longLengthType = ZSTD_llt_none;
+ } else {
+ resultSeqStore->longLengthPos -= (U32)startIdx;
+ }
+ }
+ resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
+ literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->litStart += literalsBytesPreceding;
+ if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
+ /* This accounts for possible last literals if the derived chunk reaches the end of the block */
+ resultSeqStore->lit = litEnd;
+ } else {
+ resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
+ }
+ resultSeqStore->llCode += startIdx;
+ resultSeqStore->mlCode += startIdx;
+ resultSeqStore->ofCode += startIdx;
+}
+
+/**
+ * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
+ * offCode must be an offCode representing a repcode, therefore in the range of [0, 2].
+ */
+static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) {
+ U32 const adjustedOffCode = offCode + ll0;
+ assert(offCode < ZSTD_REP_NUM);
+ if (adjustedOffCode == ZSTD_REP_NUM) {
+ /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
+ assert(rep[0] > 0);
+ return rep[0] - 1;
+ }
+ return rep[adjustedOffCode];
+}
+
+/**
+ * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
+ * due to emission of RLE/raw blocks that disturb the offset history, and replaces any repcodes within
+ * the seqStore that may be invalid.
+ *
+ * dRepcodes are updated as would be on the decompression side. cRepcodes are updated exactly in
+ * accordance with the seqStore.
+ */
+static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
+ seqStore_t* const seqStore, U32 const nbSeq) {
+ U32 idx = 0;
+ for (; idx < nbSeq; ++idx) {
+ seqDef* const seq = seqStore->sequencesStart + idx;
+ U32 const ll0 = (seq->litLength == 0);
+ U32 offCode = seq->offset - 1;
+ assert(seq->offset > 0);
+ if (offCode <= ZSTD_REP_MOVE) {
+ U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
+ U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
+ /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
+ * the repcode with the offset it actually references, determined by the compression
+ * repcode history.
+ */
+ if (dRawOffset != cRawOffset) {
+ seq->offset = cRawOffset + ZSTD_REP_NUM;
+ }
+ }
+ /* Compression repcode history is always updated with values directly from the unmodified seqStore.
+ * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
+ */
+ *dRepcodes = ZSTD_updateRep(dRepcodes->rep, seq->offset - 1, ll0);
+ *cRepcodes = ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
+ }
+}
+
+/* ZSTD_compressSeqStore_singleBlock():
+ * Compresses a seqStore into a block with a block header, into the buffer dst.
+ *
+ * Returns the total size of that block (including header) or a ZSTD error code.
+ */
+static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
+ repcodes_t* const dRep, repcodes_t* const cRep,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock, U32 isPartition) {
+ const U32 rleMaxLength = 25;
+ BYTE* op = (BYTE*)dst;
+ const BYTE* ip = (const BYTE*)src;
+ size_t cSize;
+ size_t cSeqsSize;
+
+ /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
+ repcodes_t const dRepOriginal = *dRep;
+ if (isPartition)
+ ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
+
+ cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+ FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
+
+ if (!zc->isFirstBlock &&
+ cSeqsSize < rleMaxLength &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ cSeqsSize = 1;
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ if (cSeqsSize == 0) {
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else if (cSeqsSize == 1) {
+ cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
+ cSize = ZSTD_blockHeaderSize + cSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
+ }
+ return cSize;
+}
+
+/* Struct to keep track of where we are in our recursive calls. */
+typedef struct {
+ U32* splitLocations; /* Array of split indices */
+ size_t idx; /* The current index within splitLocations being worked on */
+} seqStoreSplits;
+
+#define MIN_SEQUENCES_BLOCK_SPLITTING 300
+#define MAX_NB_SPLITS 196
+
+/* Helper function to perform the recursive search for block splits.
+ * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
+ * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
+ * we do not recurse.
+ *
+ * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
+ * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
+ * In practice, recursion depth usually doesn't go beyond 4.
+ *
+ * Furthermore, the number of splits is capped by MAX_NB_SPLITS. At MAX_NB_SPLITS == 196 with the current existing blockSize
+ * maximum of 128 KB, this value is actually impossible to reach.
+ */
+static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
+ const ZSTD_CCtx* zc, const seqStore_t* origSeqStore) {
+ seqStore_t fullSeqStoreChunk;
+ seqStore_t firstHalfSeqStore;
+ seqStore_t secondHalfSeqStore;
+ size_t estimatedOriginalSize;
+ size_t estimatedFirstHalfSize;
+ size_t estimatedSecondHalfSize;
+ size_t midIdx = (startIdx + endIdx)/2;
+
+ if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= MAX_NB_SPLITS) {
+ return;
+ }
+ ZSTD_deriveSeqStoreChunk(&fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(&firstHalfSeqStore, origSeqStore, startIdx, midIdx);
+ ZSTD_deriveSeqStoreChunk(&secondHalfSeqStore, origSeqStore, midIdx, endIdx);
+ estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&fullSeqStoreChunk, zc);
+ estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&firstHalfSeqStore, zc);
+ estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&secondHalfSeqStore, zc);
+ DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
+ estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
+ if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
+ return;
+ }
+ if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
+ ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
+ splits->splitLocations[splits->idx] = (U32)midIdx;
+ splits->idx++;
+ ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
+ }
+}
+
+/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
+ *
+ * Returns the number of splits made (which equals the size of the partition table - 1).
+ */
+static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
+ seqStoreSplits splits = {partitions, 0};
+ if (nbSeq <= 4) {
+ DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
+ /* Refuse to try and split anything with less than 4 sequences */
+ return 0;
+ }
+ ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
+ splits.splitLocations[splits.idx] = nbSeq;
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
+ return splits.idx;
+}
+
+/* ZSTD_compressBlock_splitBlock():
+ * Attempts to split a given block into multiple blocks to improve compression ratio.
+ *
+ * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
+ */
+static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
+ const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) {
+ size_t cSize = 0;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ U32 partitions[MAX_NB_SPLITS];
+ size_t i = 0;
+ size_t srcBytesTotal = 0;
+ size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
+ seqStore_t nextSeqStore;
+ seqStore_t currSeqStore;
+
+ /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
+ * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
+ * separate repcode histories that simulate repcode history on compression and decompression side,
+ * and use the histories to determine whether we must replace a particular repcode with its raw offset.
+ *
+ * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
+ * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
+ * a nocompress/RLE block.
+ * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
+ * the replacement offset value rather than the original repcode to update the repcode history.
+ * dRep also will be the final repcode history sent to the next block.
+ *
+ * See ZSTD_seqStore_resolveOffCodes() for more details.
+ */
+ repcodes_t dRep;
+ repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ if (numSplits == 0) {
+ size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, blockSize,
+ lastBlock, 0 /* isPartition */);
+ FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
+ assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ return cSizeSingleBlock;
+ }
+
+ ZSTD_deriveSeqStoreChunk(&currSeqStore, &zc->seqStore, 0, partitions[0]);
+ for (i = 0; i <= numSplits; ++i) {
+ size_t srcBytes;
+ size_t cSizeChunk;
+ U32 const lastPartition = (i == numSplits);
+ U32 lastBlockEntireSrc = 0;
+
+ srcBytes = ZSTD_countSeqStoreLiteralsBytes(&currSeqStore) + ZSTD_countSeqStoreMatchBytes(&currSeqStore);
+ srcBytesTotal += srcBytes;
+ if (lastPartition) {
+ /* This is the final partition, need to account for possible last literals */
+ srcBytes += blockSize - srcBytesTotal;
+ lastBlockEntireSrc = lastBlock;
+ } else {
+ ZSTD_deriveSeqStoreChunk(&nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
+ }
+
+ cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, &currSeqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, srcBytes,
+ lastBlockEntireSrc, 1 /* isPartition */);
+ DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&currSeqStore, zc), cSizeChunk);
+ FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
+
+ ip += srcBytes;
+ op += cSizeChunk;
+ dstCapacity -= cSizeChunk;
+ cSize += cSizeChunk;
+ currSeqStore = nextSeqStore;
+ assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ }
+ /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
+ * for the next block.
+ */
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
+ return cSize;
+}
+
+static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 lastBlock) {
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ U32 nbSeq;
+ size_t cSize;
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) {
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ return cSize;
+ }
+ nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
+ }
+
+ assert(zc->appliedParams.splitBlocks == 1);
+ cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
+ FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
+ return cSize;
}
static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
@@ -2434,18 +3733,25 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
if (zc->seqCollector.collectSequences) {
ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
/* encode sequences and literals */
- cSize = ZSTD_compressSequences(&zc->seqStore,
+ cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
- zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ return 0;
+ }
+
+
if (frame &&
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
@@ -2461,7 +3767,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
out:
if (!ZSTD_isError(cSize) && cSize > 1) {
- ZSTD_confirmRepcodesAndEntropyTables(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
}
/* We check that dictionaries have offset codes available for the first
* block. After the first block, the offcode table might not have large
@@ -2514,7 +3820,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
- ZSTD_confirmRepcodesAndEntropyTables(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return cSize;
}
}
@@ -2554,9 +3860,9 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
void const* ip,
void const* iend)
{
- if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
- U32 const maxDist = (U32)1 << params->cParams.windowLog;
- U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const maxDist = (U32)1 << params->cParams.windowLog;
+ if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
@@ -2579,7 +3885,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
* Frame is supposed already started (header already produced)
* @return : compressed size, or an error code
*/
-static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
+static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastFrameChunk)
@@ -2593,7 +3899,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
- DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
@@ -2619,6 +3925,10 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
assert(cSize > 0);
assert(cSize <= blockSize + ZSTD_blockHeaderSize);
+ } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
+ assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
} else {
cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
@@ -2673,7 +3983,6 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
"dst buf is too small to fit worst-case frame header size.");
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
-
if (params->format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
pos = 4;
@@ -2699,6 +4008,26 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
return pos;
}
+/* ZSTD_writeSkippableFrame_advanced() :
+ * Writes out a skippable frame with the specified magic number variant (16 are supported),
+ * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
+ *
+ * Returns the total number of bytes written, or a ZSTD error code.
+ */
+size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant) {
+ BYTE* op = (BYTE*)dst;
+ RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
+ dstSize_tooSmall, "Not enough room for skippable frame");
+ RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
+ RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
+
+ MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
+ MEM_writeLE32(op+4, (U32)srcSize);
+ ZSTD_memcpy(op+8, src, srcSize);
+ return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
+}
+
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
@@ -2725,6 +4054,7 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.posInSequence = 0;
return 0;
}
@@ -2754,11 +4084,12 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
if (!srcSize) return fhSize; /* do not generate an empty block if no input */
- if (!ZSTD_window_update(&ms->window, src, srcSize)) {
+ if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
+ ms->forceNonContiguous = 0;
ms->nextToUpdate = ms->window.dictLimit;
}
if (cctx->appliedParams.ldmParams.enableLdm) {
- ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
}
if (!frame) {
@@ -2826,59 +4157,86 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
+ int const loadLdmDict = params->ldmParams.enableLdm && ls != NULL;
+
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
- ZSTD_window_update(&ms->window, src, srcSize);
+ if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
+ * Dictionaries right at the edge will immediately trigger overflow
+ * correction, but I don't want to insert extra constraints here.
+ */
+ U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
+ /* We must have cleared our windows when our source is this large. */
+ assert(ZSTD_window_isEmpty(ms->window));
+ if (loadLdmDict)
+ assert(ZSTD_window_isEmpty(ls->window));
+ /* If the dictionary is too large, only load the suffix of the dictionary. */
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
+ }
+
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+ ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
- if (params->ldmParams.enableLdm && ls != NULL) {
- ZSTD_window_update(&ls->window, src, srcSize);
+ if (loadLdmDict) {
+ ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
}
- /* Assert that we the ms params match the params we're being given */
- ZSTD_assertEqualCParams(params->cParams, ms->cParams);
-
if (srcSize <= HASH_READ_SIZE) return 0;
- while (iend - ip > HASH_READ_SIZE) {
- size_t const remaining = (size_t)(iend - ip);
- size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
- const BYTE* const ichunk = ip + chunk;
-
- ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
+ ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
- if (params->ldmParams.enableLdm && ls != NULL)
- ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
+ if (loadLdmDict)
+ ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
- switch(params->cParams.strategy)
- {
- case ZSTD_fast:
- ZSTD_fillHashTable(ms, ichunk, dtlm);
- break;
- case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
- break;
-
- case ZSTD_greedy:
- case ZSTD_lazy:
- case ZSTD_lazy2:
- if (chunk >= HASH_READ_SIZE)
- ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
- break;
-
- case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
- case ZSTD_btopt:
- case ZSTD_btultra:
- case ZSTD_btultra2:
- if (chunk >= HASH_READ_SIZE)
- ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
- break;
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, dtlm);
+ break;
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm);
+ break;
- default:
- assert(0); /* not possible : not a valid strategy id */
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ assert(srcSize >= HASH_READ_SIZE);
+ if (ms->dedicatedDictSearch) {
+ assert(ms->chainTable != NULL);
+ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
+ } else {
+ assert(params->useRowMatchFinder != ZSTD_urm_auto);
+ if (params->useRowMatchFinder == ZSTD_urm_enableRowMatchFinder) {
+ size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ZSTD_row_update(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using row-based hash table for lazy dict");
+ } else {
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using chain-based hash table for lazy dict");
+ }
}
+ break;
- ip = ichunk;
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ assert(srcSize >= HASH_READ_SIZE);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+ break;
+
+ default:
+ assert(0); /* not possible : not a valid strategy id */
}
ms->nextToUpdate = (U32)(iend - ms->window.base);
@@ -2887,22 +4245,28 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
/* Dictionaries that assign zero probability to symbols that show up causes problems
- when FSE encoding. Refuse dictionaries that assign zero probability to symbols
- that we may encounter during compression.
- NOTE: This behavior is not standard and could be improved in the future. */
-static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
+ * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
+ * and only dictionaries with 100% valid symbols can be assumed valid.
+ */
+static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
+{
U32 s;
- RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted, "dict fse tables don't have all symbols");
+ if (dictMaxSymbolValue < maxSymbolValue) {
+ return FSE_repeat_check;
+ }
for (s = 0; s <= maxSymbolValue; ++s) {
- RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted, "dict fse tables don't have all symbols");
+ if (normalizedCounter[s] == 0) {
+ return FSE_repeat_check;
+ }
}
- return 0;
+ return FSE_repeat_valid;
}
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
- short* offcodeNCount, unsigned* offcodeMaxValue,
const void* const dict, size_t dictSize)
{
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff;
const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
const BYTE* const dictEnd = dictPtr + dictSize;
dictPtr += 8;
@@ -2924,16 +4288,16 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
}
{ unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
- /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
/* fill all offset symbols to avoid garbage at end of table */
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.offcodeCTable,
offcodeNCount, MaxOff, offcodeLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
dictPtr += offcodeHeaderSize;
}
@@ -2942,13 +4306,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
- /* Every match length code must have non-zero probability */
- FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML), "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.matchlengthCTable,
matchlengthNCount, matchlengthMaxValue, matchlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
+ bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
dictPtr += matchlengthHeaderSize;
}
@@ -2957,13 +4320,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
- /* Every literal length code must have non-zero probability */
- FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL), "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.litlengthCTable,
litlengthNCount, litlengthMaxValue, litlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
+ bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
dictPtr += litlengthHeaderSize;
}
@@ -2973,12 +4335,28 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
bs->rep[2] = MEM_readLE32(dictPtr+8);
dictPtr += 12;
+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+ }
+ /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
+ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
+
+ /* All repCodes must be <= dictContentSize and != 0 */
+ { U32 u;
+ for (u=0; u<3; u++) {
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
+ } } }
+
return dictPtr - (const BYTE*)dict;
}
/* Dictionary format :
* See :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
*/
/*! ZSTD_loadZstdDictionary() :
* @return : dictID, or an error code
@@ -2995,42 +4373,23 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
- short offcodeNCount[MaxOff+1];
- unsigned offcodeMaxValue = MaxOff;
size_t dictID;
size_t eSize;
-
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(dictSize >= 8);
assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
- eSize = ZSTD_loadCEntropy(bs, workspace, offcodeNCount, &offcodeMaxValue, dict, dictSize);
+ eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
dictPtr += eSize;
- { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
- U32 offcodeMax = MaxOff;
- if (dictContentSize <= ((U32)-1) - 128 KB) {
- U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
- offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
- }
- /* All offset values <= dictContentSize + 128 KB must be representable */
- FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)), "");
- /* All repCodes must be <= dictContentSize and != 0*/
- { U32 u;
- for (u=0; u<3; u++) {
- RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
- RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
- } }
-
- bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
- bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
- bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
+ {
+ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
- return dictID;
}
+ return dictID;
}
/** ZSTD_compress_insertDictionary() :
@@ -3074,7 +4433,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
}
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
-#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6)
+#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
@@ -3086,6 +4445,10 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
+ size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
+#if ZSTD_TRACE
+ cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
+#endif
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
@@ -3100,13 +4463,14 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
- FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ dictContentSize,
ZSTDcrp_makeClean, zbuff) , "");
{ size_t const dictID = cdict ?
ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
- cdict->dictContentSize, dictContentType, dtlm,
+ cdict->dictContentSize, cdict->dictContentType, dtlm,
cctx->entropyWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
@@ -3115,6 +4479,7 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
+ cctx->dictContentSize = dictContentSize;
}
return 0;
}
@@ -3143,8 +4508,8 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
- ZSTD_CCtx_params const cctxParams =
- ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
@@ -3153,9 +4518,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
- ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
- ZSTD_CCtx_params const cctxParams =
- ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
+ ZSTD_CCtx_params cctxParams;
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
+ }
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
@@ -3209,6 +4576,30 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
return op-ostart;
}
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
+{
+#if ZSTD_TRACE
+ if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) {
+ int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0;
+ ZSTD_Trace trace;
+ ZSTD_memset(&trace, 0, sizeof(trace));
+ trace.version = ZSTD_VERSION_NUMBER;
+ trace.streaming = streaming;
+ trace.dictionaryID = cctx->dictID;
+ trace.dictionarySize = cctx->dictContentSize;
+ trace.uncompressedSize = cctx->consumedSrcSize;
+ trace.compressedSize = cctx->producedCSize + extraCSize;
+ trace.params = &cctx->appliedParams;
+ trace.cctx = cctx;
+ ZSTD_trace_compress_end(cctx->traceCtx, &trace);
+ }
+ cctx->traceCtx = 0;
+#else
+ (void)cctx;
+ (void)extraCSize;
+#endif
+}
+
size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
@@ -3231,26 +4622,10 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
+ ZSTD_CCtx_trace(cctx, endResult);
return cSize + endResult;
}
-
-static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- const ZSTD_parameters* params)
-{
- ZSTD_CCtx_params const cctxParams =
- ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
- DEBUGLOG(4, "ZSTD_compress_internal");
- return ZSTD_compress_advanced_internal(cctx,
- dst, dstCapacity,
- src, srcSize,
- dict, dictSize,
- &cctxParams);
-}
-
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -3259,11 +4634,12 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
{
DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
- return ZSTD_compress_internal(cctx,
- dst, dstCapacity,
- src, srcSize,
- dict, dictSize,
- &params);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ &cctx->simpleApiParams);
}
/* Internal */
@@ -3287,11 +4663,13 @@ size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
int compressionLevel)
{
- ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0);
- ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
+ assert(params.fParams.contentSizeFlag == 1);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
+ }
DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
- assert(params.fParams.contentSizeFlag == 1);
- return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
}
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
@@ -3309,10 +4687,17 @@ size_t ZSTD_compress(void* dst, size_t dstCapacity,
int compressionLevel)
{
size_t result;
+#if ZSTD_COMPRESS_HEAPMODE
+ ZSTD_CCtx* cctx = ZSTD_createCCtx();
+ RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
+ result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+ ZSTD_freeCCtx(cctx);
+#else
ZSTD_CCtx ctxBody;
ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
+#endif
return result;
}
@@ -3328,14 +4713,17 @@ size_t ZSTD_estimateCDictSize_advanced(
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
- + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
+ * in case we are using DDS with row-hash. */
+ + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams),
+ /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
{
- ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
}
@@ -3353,20 +4741,22 @@ static size_t ZSTD_initCDict_internal(
const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
- ZSTD_compressionParameters cParams)
+ ZSTD_CCtx_params params)
{
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
- assert(!ZSTD_checkCParams(cParams));
- cdict->matchState.cParams = cParams;
+ assert(!ZSTD_checkCParams(params.cParams));
+ cdict->matchState.cParams = params.cParams;
+ cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictContent = dictBuffer;
} else {
void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
cdict->dictContent = internalBuffer;
- memcpy(internalBuffer, dictBuffer, dictSize);
+ ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
}
cdict->dictContentSize = dictSize;
+ cdict->dictContentType = dictContentType;
cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
@@ -3376,18 +4766,16 @@ static size_t ZSTD_initCDict_internal(
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&cdict->matchState,
&cdict->workspace,
- &cParams,
+ &params.cParams,
+ params.useRowMatchFinder,
ZSTDcrp_makeClean,
ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
/* (Maybe) load the dictionary
* Skips loading the dictionary if it is < 8 bytes.
*/
- { ZSTD_CCtx_params params;
- memset(&params, 0, sizeof(params));
- params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
params.fParams.contentSizeFlag = 1;
- params.cParams = cParams;
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
&params, cdict->dictContent, cdict->dictContentSize,
@@ -3401,66 +4789,129 @@ static size_t ZSTD_initCDict_internal(
return 0;
}
-ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
- ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
+ ZSTD_compressionParameters cParams,
+ ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
+ U32 enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
{
- DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
- ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
+ ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
(dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
- void* const workspace = ZSTD_malloc(workspaceSize, customMem);
+ void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
ZSTD_cwksp ws;
ZSTD_CDict* cdict;
if (!workspace) {
- ZSTD_free(workspace, customMem);
+ ZSTD_customFree(workspace, customMem);
return NULL;
}
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
assert(cdict != NULL);
ZSTD_cwksp_move(&cdict->workspace, &ws);
cdict->customMem = customMem;
- cdict->compressionLevel = 0; /* signals advanced API usage */
+ cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
+ cdict->useRowMatchFinder = useRowMatchFinder;
+ return cdict;
+ }
+}
- if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
- dictBuffer, dictSize,
- dictLoadMethod, dictContentType,
- cParams) )) {
- ZSTD_freeCDict(cdict);
- return NULL;
- }
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ ZSTD_CCtxParams_init(&cctxParams, 0);
+ cctxParams.cParams = cParams;
+ cctxParams.customMem = customMem;
+ return ZSTD_createCDict_advanced2(
+ dictBuffer, dictSize,
+ dictLoadMethod, dictContentType,
+ &cctxParams, customMem);
+}
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* originalCctxParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams = *originalCctxParams;
+ ZSTD_compressionParameters cParams;
+ ZSTD_CDict* cdict;
- return cdict;
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ if (cctxParams.enableDedicatedDictSearch) {
+ cParams = ZSTD_dedicatedDictSearch_getCParams(
+ cctxParams.compressionLevel, dictSize);
+ ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
+ } else {
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
+ /* Fall back to non-DDSS params */
+ cctxParams.enableDedicatedDictSearch = 0;
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
+ cctxParams.cParams = cParams;
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+
+ cdict = ZSTD_createCDict_advanced_internal(dictSize,
+ dictLoadMethod, cctxParams.cParams,
+ cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
+ customMem);
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ cctxParams) )) {
+ ZSTD_freeCDict(cdict);
+ return NULL;
}
+
+ return cdict;
}
ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
{
- ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
- ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
if (cdict)
- cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
}
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
{
- ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
- return ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byRef, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
}
size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
@@ -3470,7 +4921,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
ZSTD_cwksp_free(&cdict->workspace, cMem);
if (!cdictInWorkspace) {
- ZSTD_free(cdict, cMem);
+ ZSTD_customFree(cdict, cMem);
}
return 0;
}
@@ -3496,19 +4947,22 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+ ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams);
+ /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ matchStateSize;
ZSTD_CDict* cdict;
+ ZSTD_CCtx_params params;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
ZSTD_cwksp ws;
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
if (cdict == NULL) return NULL;
ZSTD_cwksp_move(&cdict->workspace, &ws);
@@ -3518,10 +4972,15 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
(unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
+ ZSTD_CCtxParams_init(&params, 0);
+ params.cParams = cParams;
+ params.useRowMatchFinder = useRowMatchFinder;
+ cdict->useRowMatchFinder = useRowMatchFinder;
+
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
- cParams) ))
+ params) ))
return NULL;
return cdict;
@@ -3533,61 +4992,98 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
return cdict->matchState.cParams;
}
-/* ZSTD_compressBegin_usingCDict_advanced() :
- * cdict must be != NULL */
-size_t ZSTD_compressBegin_usingCDict_advanced(
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0;
+ return cdict->dictID;
+}
+
+/* ZSTD_compressBegin_usingCDict_internal() :
+ * Implementation of various ZSTD_compressBegin_usingCDict* functions.
+ */
+static size_t ZSTD_compressBegin_usingCDict_internal(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
+ ZSTD_CCtx_params cctxParams;
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
- { ZSTD_CCtx_params params = cctx->requestedParams;
+ /* Initialize the cctxParams from the cdict */
+ {
+ ZSTD_parameters params;
+ params.fParams = fParams;
params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
|| pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
- || cdict->compressionLevel == 0 )
- && (params.attachDictPref != ZSTD_dictForceLoad) ?
+ || cdict->compressionLevel == 0 ) ?
ZSTD_getCParamsFromCDict(cdict)
: ZSTD_getCParams(cdict->compressionLevel,
pledgedSrcSize,
cdict->dictContentSize);
- /* Increase window log to fit the entire dictionary and source if the
- * source size is known. Limit the increase to 19, which is the
- * window log for compression level 1 with the largest source size.
- */
- if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
- U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
- U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
- params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
- }
- params.fParams = fParams;
- return ZSTD_compressBegin_internal(cctx,
- NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
- cdict,
- &params, pledgedSrcSize,
- ZSTDb_not_buffered);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
+ }
+ /* Increase window log to fit the entire dictionary and source if the
+ * source size is known. Limit the increase to 19, which is the
+ * window log for compression level 1 with the largest source size.
+ */
+ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+ U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+ cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
}
+ return ZSTD_compressBegin_internal(cctx,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ cdict,
+ &cctxParams, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * This function is DEPRECATED.
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
}
/* ZSTD_compressBegin_usingCDict() :
- * pledgedSrcSize=0 means "unknown"
- * if pledgedSrcSize>0, it will enable contentSizeFlag */
+ * cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
- DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
- return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
-size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+/*! ZSTD_compress_usingCDict_internal():
+ * Implementation of various ZSTD_compress_usingCDict* functions.
+ */
+static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
- FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
+/*! ZSTD_compress_usingCDict_advanced():
+ * This function is DEPRECATED.
+ */
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
/*! ZSTD_compress_usingCDict() :
* Compression using a digested Dictionary.
* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
@@ -3599,7 +5095,7 @@ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
- return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
@@ -3640,32 +5136,12 @@ size_t ZSTD_CStreamOutSize(void)
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
-static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
- const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
- const ZSTD_CDict* const cdict,
- ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
+static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_resetCStream_internal");
- /* Finalize the compression parameters */
- params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
- /* params are supposed to be fully validated at this point */
- assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
- assert(!((dict) && (cdict))); /* either dict or cdict, not both */
-
- FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
- dict, dictSize, dictContentType, ZSTD_dtlm_fast,
- cdict,
- &params, pledgedSrcSize,
- ZSTDb_buffered) , "");
-
- cctx->inToCompress = 0;
- cctx->inBuffPos = 0;
- cctx->inBuffTarget = cctx->blockSize
- + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
- cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
- cctx->streamStage = zcss_load;
- cctx->frameEnded = 0;
- return 0; /* ready to go */
+ if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
+ return ZSTD_cpm_attachDict;
+ else
+ return ZSTD_cpm_noAttachDict;
}
/* ZSTD_resetCStream():
@@ -3749,7 +5225,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
- zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, &params);
+ ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
}
@@ -3815,12 +5291,17 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* check expectations */
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
- assert(zcs->inBuff != NULL);
- assert(zcs->inBuffSize > 0);
- assert(zcs->outBuff != NULL);
- assert(zcs->outBuffSize > 0);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->inBuff != NULL);
+ assert(zcs->inBuffSize > 0);
+ }
+ if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->outBuff != NULL);
+ assert(zcs->outBuffSize > 0);
+ }
assert(output->pos <= output->size);
assert(input->pos <= input->size);
+ assert((U32)flushMode <= (U32)ZSTD_e_end);
while (someMoreWork) {
switch(zcs->streamStage)
@@ -3830,7 +5311,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
case zcss_load:
if ( (flushMode == ZSTD_e_end)
- && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */
+ && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs,
@@ -3843,8 +5325,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
someMoreWork = 0; break;
}
- /* complete loading into inBuffer */
- { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ /* complete loading into inBuffer in buffered mode */
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
ip, iend-ip);
@@ -3864,31 +5347,49 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
}
/* compress current block (note : this stage cannot be stopped in the middle) */
DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
- { void* cDst;
+ { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ void* cDst;
size_t cSize;
- size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
size_t oSize = oend-op;
- unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
- if (oSize >= ZSTD_compressBound(iSize))
+ size_t const iSize = inputBuffered
+ ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSize);
+ if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
cDst = zcs->outBuff, oSize = zcs->outBuffSize;
- cSize = lastBlock ?
- ZSTD_compressEnd(zcs, cDst, oSize,
- zcs->inBuff + zcs->inToCompress, iSize) :
- ZSTD_compressContinue(zcs, cDst, oSize,
- zcs->inBuff + zcs->inToCompress, iSize);
- FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
- zcs->frameEnded = lastBlock;
- /* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
- if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
- DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
- (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
- if (!lastBlock)
- assert(zcs->inBuffTarget <= zcs->inBuffSize);
- zcs->inToCompress = zcs->inBuffPos;
+ if (inputBuffered) {
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize);
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ /* prepare next block */
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ if (zcs->inBuffTarget > zcs->inBuffSize)
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
+ if (!lastBlock)
+ assert(zcs->inBuffTarget <= zcs->inBuffSize);
+ zcs->inToCompress = zcs->inBuffPos;
+ } else {
+ unsigned const lastBlock = (ip + iSize == iend);
+ assert(flushMode == ZSTD_e_end /* Already validated */);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ /* Consume the input prior to error checking to mirror buffered mode. */
+ if (iSize > 0)
+ ip += iSize;
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ if (lastBlock)
+ assert(ip == iend);
+ }
if (cDst == op) { /* no need to flush */
op += cSize;
if (zcs->frameEnded) {
@@ -3905,6 +5406,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* fall-through */
case zcss_flush:
DEBUGLOG(5, "flush stage");
+ assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
{ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
@@ -3959,6 +5461,135 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf
return ZSTD_nextInputSizeHint_MTorST(zcs);
}
+/* After a compression call set the expected input/output buffer.
+ * This is validated at the start of the next compression call.
+ */
+static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ cctx->expectedInBuffer = *input;
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ cctx->expectedOutBufferSize = output->size - output->pos;
+ }
+}
+
+/* Validate that the input/output buffers match the expectations set by
+ * ZSTD_setBufferExpectations.
+ */
+static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
+ ZSTD_outBuffer const* output,
+ ZSTD_inBuffer const* input,
+ ZSTD_EndDirective endOp)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ ZSTD_inBuffer const expect = cctx->expectedInBuffer;
+ if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
+ if (endOp != ZSTD_e_end)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ size_t const outBufferSize = output->size - output->pos;
+ if (cctx->expectedOutBufferSize != outBufferSize)
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ }
+ return 0;
+}
+
+static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+ ZSTD_EndDirective endOp,
+ size_t inSize) {
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ if (cctx->cdict && !cctx->localDict.cdict) {
+ /* Let the cdict's compression level take priority over the requested params.
+ * But do not take the cdict's compression level if the "cdict" is actually a localDict
+ * generated from ZSTD_initLocalDict().
+ */
+ params.compressionLevel = cctx->cdict->compressionLevel;
+ }
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
+ {
+ size_t const dictSize = prefixDict.dict
+ ? prefixDict.dictSize
+ : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
+ ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ &params, cctx->pledgedSrcSizePlusOne-1,
+ dictSize, mode);
+ }
+
+ if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
+ /* Enable LDM by default for optimal parser and window size >= 128MB */
+ DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
+ params.ldmParams.enableLdm = 1;
+ }
+
+ if (ZSTD_CParams_useBlockSplitter(&params.cParams)) {
+ DEBUGLOG(4, "Block splitter enabled by default (window size >= 128K, strategy >= btopt)");
+ params.splitBlocks = 1;
+ }
+
+ params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
+
+#ifdef ZSTD_MULTITHREAD
+ if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
+ params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
+ }
+ if (params.nbWorkers > 0) {
+#if ZSTD_TRACE
+ cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
+#endif
+ /* mt context creation */
+ if (cctx->mtctx == NULL) {
+ DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
+ params.nbWorkers);
+ cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool);
+ RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
+ }
+ /* mt compression */
+ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
+ FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
+ cctx->mtctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
+ cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
+ cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0;
+ cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize;
+ cctx->consumedSrcSize = 0;
+ cctx->producedCSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->appliedParams = params;
+ } else
+#endif
+ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
+ cctx->cdict,
+ &params, pledgedSrcSize,
+ ZSTDb_buffered) , "");
+ assert(cctx->appliedParams.nbWorkers == 0);
+ cctx->inToCompress = 0;
+ cctx->inBuffPos = 0;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ /* for small input: avoid automatic flush on reaching end of block, since
+ * it would require to add a 3-bytes null block to end frame
+ */
+ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ } else {
+ cctx->inBuffTarget = 0;
+ }
+ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->frameEnded = 0;
+ }
+ return 0;
+}
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
@@ -3967,82 +5598,69 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
{
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
- RETURN_ERROR_IF(output->pos > output->size, GENERIC, "invalid buffer");
- RETURN_ERROR_IF(input->pos > input->size, GENERIC, "invalid buffer");
- assert(cctx!=NULL);
+ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
+ RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
+ RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
+ assert(cctx != NULL);
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
- ZSTD_CCtx_params params = cctx->requestedParams;
- ZSTD_prefixDict const prefixDict = cctx->prefixDict;
- FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
- memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
- assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
- DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
- if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */
- params.cParams = ZSTD_getCParamsFromCCtxParams(
- &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
-
-
-#ifdef ZSTD_MULTITHREAD
- if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
- params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
- }
- if (params.nbWorkers > 0) {
- /* mt context creation */
- if (cctx->mtctx == NULL) {
- DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
- params.nbWorkers);
- cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem);
- RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
- }
- /* mt compression */
- DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
- FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
- cctx->mtctx,
- prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
- cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
- cctx->streamStage = zcss_load;
- cctx->appliedParams.nbWorkers = params.nbWorkers;
- } else
-#endif
- { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
- prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
- cctx->cdict,
- params, cctx->pledgedSrcSizePlusOne-1) , "");
- assert(cctx->streamStage == zcss_load);
- assert(cctx->appliedParams.nbWorkers == 0);
- } }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ }
/* end of transparent initialization stage */
+ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
/* compression stage */
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers > 0) {
- int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
size_t flushMin;
- assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
if (cctx->cParamsChanged) {
ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
cctx->cParamsChanged = 0;
}
- do {
+ for (;;) {
+ size_t const ipos = input->pos;
+ size_t const opos = output->pos;
flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+ cctx->consumedSrcSize += (U64)(input->pos - ipos);
+ cctx->producedCSize += (U64)(output->pos - opos);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
+ if (flushMin == 0)
+ ZSTD_CCtx_trace(cctx, 0);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
}
FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
- } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
+
+ if (endOp == ZSTD_e_continue) {
+ /* We only require some progress with ZSTD_e_continue, not maximal progress.
+ * We're done if we've consumed or produced any bytes, or either buffer is
+ * full.
+ */
+ if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size)
+ break;
+ } else {
+ assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
+ /* We require maximal progress. We're done when the flush is complete or the
+ * output buffer is full.
+ */
+ if (flushMin == 0 || output->pos == output->size)
+ break;
+ }
+ }
DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
/* Either we don't require maximum forward progress, we've finished the
* flush, or we are out of output space.
*/
- assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
+ assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size);
+ ZSTD_setBufferExpectations(cctx, output, input);
return flushMin;
}
#endif
FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
DEBUGLOG(5, "completed ZSTD_compressStream2");
+ ZSTD_setBufferExpectations(cctx, output, input);
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
}
@@ -4065,14 +5683,22 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
+ ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
+ ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ /* Enable stable input/output buffers. */
+ cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
+ cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
{ size_t oPos = 0;
size_t iPos = 0;
size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
dst, dstCapacity, &oPos,
src, srcSize, &iPos,
ZSTD_e_end);
+ /* Reset to the original values. */
+ cctx->requestedParams.inBufferMode = originalInBufferMode;
+ cctx->requestedParams.outBufferMode = originalOutBufferMode;
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
@@ -4083,6 +5709,409 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
}
}
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_sequencePosition;
+
+/* Returns a ZSTD error code if sequence is not valid */
+static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
+ size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
+ size_t offsetBound;
+ U32 windowSize = 1 << windowLog;
+ /* posInSrc represents the amount of data the the decoder would decode up to this point.
+ * As long as the amount of data decoded is less than or equal to window size, offsets may be
+ * larger than the total length of output decoded in order to reference the dict, even larger than
+ * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
+ */
+ offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
+ RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
+ RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
+ return 0;
+}
+
+/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
+static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
+ U32 offCode = rawOffset + ZSTD_REP_MOVE;
+ U32 repCode = 0;
+
+ if (!ll0 && rawOffset == rep[0]) {
+ repCode = 1;
+ } else if (rawOffset == rep[1]) {
+ repCode = 2 - ll0;
+ } else if (rawOffset == rep[2]) {
+ repCode = 3 - ll0;
+ } else if (ll0 && rawOffset == rep[0] - 1) {
+ repCode = 3;
+ }
+ if (repCode) {
+ /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
+ offCode = repCode - 1;
+ }
+ return offCode;
+}
+
+/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
+ * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+ */
+static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize) {
+ U32 idx = seqPos->idx;
+ BYTE const* ip = (BYTE const*)(src);
+ const BYTE* const iend = ip + blockSize;
+ repcodes_t updatedRepcodes;
+ U32 dictSize;
+ U32 litLength;
+ U32 matchLength;
+ U32 ll0;
+ U32 offCode;
+
+ if (cctx->cdict) {
+ dictSize = (U32)cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = (U32)cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
+ litLength = inSeqs[idx].litLength;
+ matchLength = inSeqs[idx].matchLength;
+ ll0 = litLength == 0;
+ offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ cctx->appliedParams.cParams.minMatch),
+ "Sequence validation failed");
+ }
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ip += matchLength + litLength;
+ }
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ if (inSeqs[idx].litLength) {
+ DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
+ ip += inSeqs[idx].litLength;
+ seqPos->posInSrc += inSeqs[idx].litLength;
+ }
+ RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ seqPos->idx = idx+1;
+ return 0;
+}
+
+/* Returns the number of bytes to move the current read position back by. Only non-zero
+ * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
+ * went wrong.
+ *
+ * This function will attempt to scan through blockSize bytes represented by the sequences
+ * in inSeqs, storing any (partial) sequences.
+ *
+ * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
+ * avoid splitting a match, or to avoid splitting a match such that it would produce a match
+ * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ */
+static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize) {
+ U32 idx = seqPos->idx;
+ U32 startPosInSequence = seqPos->posInSequence;
+ U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
+ size_t dictSize;
+ BYTE const* ip = (BYTE const*)(src);
+ BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ repcodes_t updatedRepcodes;
+ U32 bytesAdjustment = 0;
+ U32 finalMatchSplit = 0;
+ U32 litLength;
+ U32 matchLength;
+ U32 rawOffset;
+ U32 offCode;
+
+ if (cctx->cdict) {
+ dictSize = cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
+ const ZSTD_Sequence currSeq = inSeqs[idx];
+ litLength = currSeq.litLength;
+ matchLength = currSeq.matchLength;
+ rawOffset = currSeq.offset;
+
+ /* Modify the sequence depending on where endPosInSequence lies */
+ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
+ if (startPosInSequence >= litLength) {
+ startPosInSequence -= litLength;
+ litLength = 0;
+ matchLength -= startPosInSequence;
+ } else {
+ litLength -= startPosInSequence;
+ }
+ /* Move to the next sequence */
+ endPosInSequence -= currSeq.litLength + currSeq.matchLength;
+ startPosInSequence = 0;
+ idx++;
+ } else {
+ /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
+ does not reach the end of the match. So, we have to split the sequence */
+ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
+ currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
+ if (endPosInSequence > litLength) {
+ U32 firstHalfMatchLength;
+ litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
+ firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
+ if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
+ /* Only ever split the match if it is larger than the block size */
+ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
+ if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
+ /* Move the endPosInSequence backward so that it creates match of minMatch length */
+ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ firstHalfMatchLength -= bytesAdjustment;
+ }
+ matchLength = firstHalfMatchLength;
+ /* Flag that we split the last match - after storing the sequence, exit the loop,
+ but keep the value of endPosInSequence */
+ finalMatchSplit = 1;
+ } else {
+ /* Move the position in sequence backwards so that we don't split match, and break to store
+ * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
+ * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
+ * would cause the first half of the match to be too small
+ */
+ bytesAdjustment = endPosInSequence - currSeq.litLength;
+ endPosInSequence = currSeq.litLength;
+ break;
+ }
+ } else {
+ /* This sequence ends inside the literals, break to store the last literals */
+ break;
+ }
+ }
+ /* Check if this offset can be represented with a repcode */
+ { U32 ll0 = (litLength == 0);
+ offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ }
+
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ cctx->appliedParams.cParams.minMatch),
+ "Sequence validation failed");
+ }
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ip += matchLength + litLength;
+ }
+ DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
+ seqPos->idx = idx;
+ seqPos->posInSequence = endPosInSequence;
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ iend -= bytesAdjustment;
+ if (ip != iend) {
+ /* Store any last literals */
+ U32 lastLLSize = (U32)(iend - ip);
+ assert(ip <= iend);
+ DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
+ seqPos->posInSrc += lastLLSize;
+ }
+
+ return bytesAdjustment;
+}
+
+typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize);
+static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
+ ZSTD_sequenceCopier sequenceCopier = NULL;
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ if (mode == ZSTD_sf_explicitBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
+ } else if (mode == ZSTD_sf_noBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ }
+ assert(sequenceCopier != NULL);
+ return sequenceCopier;
+}
+
+/* Compress, block-by-block, all of the sequences given.
+ *
+ * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
+ */
+static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize) {
+ size_t cSize = 0;
+ U32 lastBlock;
+ size_t blockSize;
+ size_t compressedSeqsSize;
+ size_t remaining = srcSize;
+ ZSTD_sequencePosition seqPos = {0, 0, 0};
+
+ BYTE const* ip = (BYTE const*)src;
+ BYTE* op = (BYTE*)dst;
+ ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+
+ DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
+ /* Special case: empty frame */
+ if (remaining == 0) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (remaining) {
+ size_t cBlockSize;
+ size_t additionalByteAdjustment;
+ lastBlock = remaining <= cctx->blockSize;
+ blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ ZSTD_resetSeqStore(&cctx->seqStore);
+ DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
+
+ additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
+ FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
+ blockSize -= additionalByteAdjustment;
+
+ /* If blocks are too small, emit as a nocompress block */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ cSize += cBlockSize;
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ continue;
+ }
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ blockSize,
+ cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+
+ if (!cctx->isFirstBlock &&
+ ZSTD_maybeRLE(&cctx->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ compressedSeqsSize = 1;
+ }
+
+ if (compressedSeqsSize == 0) {
+ /* ZSTD_noCompressBlock writes the block header as well */
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ } else if (compressedSeqsSize == 1) {
+ cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ } else {
+ U32 cBlockHeader;
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ DEBUGLOG(4, "cSize running total: %zu", cSize);
+
+ if (lastBlock) {
+ break;
+ } else {
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ }
+ }
+
+ return cSize;
+}
+
+size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize) {
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+ size_t compressedBlocksSize = 0;
+ size_t frameHeaderSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(3, "ZSTD_compressSequences()");
+ assert(cctx != NULL);
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+ /* Begin writing output, starting with frame header */
+ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
+ XXH64_update(&cctx->xxhState, src, srcSize);
+ }
+ /* cSize includes block header size and compressed sequences size */
+ compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ src, srcSize);
+ FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
+ cSize += compressedBlocksSize;
+ dstCapacity -= compressedBlocksSize;
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32((char*)dst + cSize, checksum);
+ cSize += 4;
+ }
+
+ DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
/*====== Finalize ======*/
/*! ZSTD_flushStream() :
@@ -4115,6 +6144,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
#define ZSTD_MAX_CLEVEL 22
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
+int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
{ /* "default" - for any srcSize > 256 KB */
@@ -4223,25 +6253,110 @@ static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEV
},
};
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
+ switch (cParams.strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+ return cParams;
+}
+
+static int ZSTD_dedicatedDictSearch_isSupported(
+ ZSTD_compressionParameters const* cParams)
+{
+ return (cParams->strategy >= ZSTD_greedy)
+ && (cParams->strategy <= ZSTD_lazy2)
+ && (cParams->hashLog > cParams->chainLog)
+ && (cParams->chainLog <= 24);
+}
+
+/**
+ * Reverses the adjustment applied to cparams when enabling dedicated dict
+ * search. This is used to recover the params set to be used in the working
+ * context. (Otherwise, those tables would also grow.)
+ */
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams) {
+ switch (cParams->strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
+ if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
+ cParams->hashLog = ZSTD_HASHLOG_MIN;
+ }
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+}
+
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ case ZSTD_cpm_createCDict:
+ break;
+ case ZSTD_cpm_attachDict:
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
+ size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
+ return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
+ }
+}
+
/*! ZSTD_getCParams_internal() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
- * Use dictSize == 0 for unknown or unused. */
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+ * Use dictSize == 0 for unknown or unused.
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
- int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
- size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
- U64 const rSize = unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
+ U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
- int row = compressionLevel;
+ int row;
DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
+
+ /* row */
if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
- if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
- if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
+ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ else row = compressionLevel;
+
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
- if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
+ DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
+ /* acceleration factor */
+ if (compressionLevel < 0) {
+ int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
+ cp.targetLength = (unsigned)(-clampedCompressionLevel);
+ }
/* refine parameters based on srcSize & dictSize */
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
}
}
@@ -4251,18 +6366,18 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);
+ return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
ZSTD_parameters params;
- ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
- memset(&params, 0, sizeof(params));
+ ZSTD_memset(&params, 0, sizeof(params));
params.cParams = cParams;
params.fParams.contentSizeFlag = 1;
return params;
@@ -4274,5 +6389,5 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo
* Fields of `ZSTD_frameParameters` are set to default values */
ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize);
+ return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_internal.h b/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
index db73f6ce21..3b04fd09f6 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -28,7 +28,6 @@
extern "C" {
#endif
-
/*-*************************************
* Constants
***************************************/
@@ -64,7 +63,7 @@ typedef struct {
} ZSTD_localDict;
typedef struct {
- U32 CTable[HUF_CTABLE_SIZE_U32(255)];
+ HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
HUF_repeat repeatMode;
} ZSTD_hufCTables_t;
@@ -82,12 +81,76 @@ typedef struct {
ZSTD_fseCTables_t fse;
} ZSTD_entropyCTables_t;
+/***********************************************
+* Entropy buffer statistics structs and funcs *
+***********************************************/
+/** ZSTD_hufCTablesMetadata_t :
+ * Stores Literals Block Type for a super-block in hType, and
+ * huffman tree description in hufDesBuffer.
+ * hufDesSize refers to the size of huffman tree description in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
typedef struct {
- U32 off;
- U32 len;
+ symbolEncodingType_e hType;
+ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
+ size_t hufDesSize;
+} ZSTD_hufCTablesMetadata_t;
+
+/** ZSTD_fseCTablesMetadata_t :
+ * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
+ * fse tables in fseTablesBuffer.
+ * fseTablesSize refers to the size of fse tables in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
+typedef struct {
+ symbolEncodingType_e llType;
+ symbolEncodingType_e ofType;
+ symbolEncodingType_e mlType;
+ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
+ size_t fseTablesSize;
+ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_fseCTablesMetadata_t;
+
+typedef struct {
+ ZSTD_hufCTablesMetadata_t hufMetadata;
+ ZSTD_fseCTablesMetadata_t fseMetadata;
+} ZSTD_entropyCTablesMetadata_t;
+
+/** ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * @return : 0 on success or error code */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize);
+
+/*********************************
+* Compression internals structs *
+*********************************/
+
+typedef struct {
+ U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */
+ U32 len; /* Raw length of match */
} ZSTD_match_t;
typedef struct {
+ U32 offset; /* Offset of sequence */
+ U32 litLength; /* Length of literals prior to match */
+ U32 matchLength; /* Raw length of match */
+} rawSeq;
+
+typedef struct {
+ rawSeq* seq; /* The start of the sequences */
+ size_t pos; /* The index in seq where reading stopped. pos <= size. */
+ size_t posInSequence; /* The position within the sequence at seq[pos] where reading
+ stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
+ size_t size; /* The number of sequences. <= capacity. */
+ size_t capacity; /* The capacity starting from `seq` pointer */
+} rawSeqStore_t;
+
+UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+
+typedef struct {
int price;
U32 off;
U32 mlen;
@@ -125,14 +188,21 @@ typedef struct {
} ZSTD_compressedBlockState_t;
typedef struct {
- BYTE const* nextSrc; /* next block here to continue on current prefix */
- BYTE const* base; /* All regular indexes relative to this position */
- BYTE const* dictBase; /* extDict indexes relative to this position */
- U32 dictLimit; /* below that point, need extDict */
- U32 lowLimit; /* below that point, no more valid data */
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more valid data */
+ U32 nbOverflowCorrections; /* Number of times overflow correction has run since
+ * ZSTD_window_init(). Useful for debugging coredumps
+ * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
+ */
} ZSTD_window_t;
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+
+#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
+
struct ZSTD_matchState_t {
ZSTD_window_t window; /* State for window round buffer management */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
@@ -144,12 +214,24 @@ struct ZSTD_matchState_t {
*/
U32 nextToUpdate; /* index from which to continue table update */
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+
+ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
+ U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
+
U32* hashTable;
U32* hashTable3;
U32* chainTable;
+
+ U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+
+ int dedicatedDictSearch; /* Indicates whether this matchState is using the
+ * dedicated dictionary search structure.
+ */
optState_t opt; /* optimal parser state */
const ZSTD_matchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
+ const rawSeqStore_t* ldmSeqStore;
};
typedef struct {
@@ -164,12 +246,21 @@ typedef struct {
} ldmEntry_t;
typedef struct {
+ BYTE const* split;
+ U32 hash;
+ U32 checksum;
+ ldmEntry_t* bucket;
+} ldmMatchCandidate_t;
+
+#define LDM_BATCH_SIZE 64
+
+typedef struct {
ZSTD_window_t window; /* State for the window round buffer management */
ldmEntry_t* hashTable;
U32 loadedDictEnd;
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
- U64 hashPower; /* Used to compute the rolling hash.
- * Depends on ldmParams.minMatchLength */
+ size_t splitIndices[LDM_BATCH_SIZE];
+ ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
} ldmState_t;
typedef struct {
@@ -182,19 +273,6 @@ typedef struct {
} ldmParams_t;
typedef struct {
- U32 offset;
- U32 litLength;
- U32 matchLength;
-} rawSeq;
-
-typedef struct {
- rawSeq* seq; /* The start of the sequences */
- size_t pos; /* The position where reading stopped. <= size. */
- size_t size; /* The number of sequences. <= capacity. */
- size_t capacity; /* The capacity starting from `seq` pointer */
-} rawSeqStore_t;
-
-typedef struct {
int collectSequences;
ZSTD_Sequence* seqStart;
size_t seqIndex;
@@ -228,17 +306,52 @@ struct ZSTD_CCtx_params_s {
/* Long distance matching parameters */
ldmParams_t ldmParams;
+ /* Dedicated dict search algorithm trigger */
+ int enableDedicatedDictSearch;
+
+ /* Input/output buffer modes */
+ ZSTD_bufferMode_e inBufferMode;
+ ZSTD_bufferMode_e outBufferMode;
+
+ /* Sequence compression API */
+ ZSTD_sequenceFormat_e blockDelimiters;
+ int validateSequences;
+
+ /* Block splitting */
+ int splitBlocks;
+
+ /* Param for deciding whether to use row-based matchfinder */
+ ZSTD_useRowMatchFinderMode_e useRowMatchFinder;
+
+ /* Always load a dictionary in ext-dict mode (not prefix mode)? */
+ int deterministicRefPrefix;
+
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
ZSTD_customMem customMem;
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
+#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+
+/**
+ * Indicates whether this compression proceeds directly from user-provided
+ * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
+ * whether the context needs to buffer the input/output (ZSTDb_buffered).
+ */
+typedef enum {
+ ZSTDb_not_buffered,
+ ZSTDb_buffered
+} ZSTD_buffered_policy_e;
+
struct ZSTD_CCtx_s {
ZSTD_compressionStage_e stage;
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
ZSTD_CCtx_params requestedParams;
ZSTD_CCtx_params appliedParams;
+ ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
U32 dictID;
+ size_t dictContentSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
size_t blockSize;
@@ -247,6 +360,7 @@ struct ZSTD_CCtx_s {
unsigned long long producedCSize;
XXH64_state_t xxhState;
ZSTD_customMem customMem;
+ ZSTD_threadPool* pool;
size_t staticSize;
SeqCollector seqCollector;
int isFirstBlock;
@@ -258,7 +372,10 @@ struct ZSTD_CCtx_s {
size_t maxNbLdmSequences;
rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
ZSTD_blockState_t blockState;
- U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+ U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+
+ /* Wether we are streaming or not */
+ ZSTD_buffered_policy_e bufferedPolicy;
/* streaming */
char* inBuff;
@@ -273,6 +390,10 @@ struct ZSTD_CCtx_s {
ZSTD_cStreamStage streamStage;
U32 frameEnded;
+ /* Stable in/out buffer verification */
+ ZSTD_inBuffer expectedInBuffer;
+ size_t expectedOutBufferSize;
+
/* Dictionary */
ZSTD_localDict localDict;
const ZSTD_CDict* cdict;
@@ -282,17 +403,46 @@ struct ZSTD_CCtx_s {
#ifdef ZSTD_MULTITHREAD
ZSTDMT_CCtx* mtctx;
#endif
+
+ /* Tracing */
+#if ZSTD_TRACE
+ ZSTD_TraceCtx traceCtx;
+#endif
};
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
-typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
-
+typedef enum {
+ ZSTD_noDict = 0,
+ ZSTD_extDict = 1,
+ ZSTD_dictMatchState = 2,
+ ZSTD_dedicatedDictSearch = 3
+} ZSTD_dictMode_e;
+
+typedef enum {
+ ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
+ * In this mode we use both the srcSize and the dictSize
+ * when selecting and adjusting parameters.
+ */
+ ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
+ * In this mode we only take the srcSize into account when selecting
+ * and adjusting parameters.
+ */
+ ZSTD_cpm_createCDict = 2, /* Creating a CDict.
+ * In this mode we take both the source size and the dictionary size
+ * into account when selecting and adjusting the parameters.
+ */
+ ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ * We don't know what these parameters are for. We default to the legacy
+ * behavior of taking both the source size and the dict size into account
+ * when selecting and adjusting parameters.
+ */
+} ZSTD_cParamMode_e;
typedef size_t (*ZSTD_blockCompressor) (
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -345,7 +495,7 @@ MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 con
newReps.rep[1] = rep[0];
newReps.rep[0] = currentOffset;
} else { /* repCode == 0 */
- memcpy(&newReps, rep, sizeof(newReps));
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
}
}
return newReps;
@@ -372,7 +522,7 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
dstSize_tooSmall, "dst buf too small for uncompressed block");
MEM_writeLE24(dst, cBlockHeader24);
- memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+ ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
return ZSTD_blockHeaderSize + srcSize;
}
@@ -469,8 +619,8 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
/* literal Length */
if (litLength>0xFFFF) {
- assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
- seqStorePtr->longLengthID = 1;
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
}
seqStorePtr->sequences[0].litLength = (U16)litLength;
@@ -480,8 +630,8 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
/* match Length */
if (mlBase>0xFFFF) {
- assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
- seqStorePtr->longLengthID = 2;
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
}
seqStorePtr->sequences[0].matchLength = (U16)mlBase;
@@ -498,8 +648,12 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
- unsigned long r = 0;
- return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
+# if STATIC_BMI2
+ return _tzcnt_u64(val) >> 3;
+# else
+ unsigned long r = 0;
+ return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
+# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_ctzll((U64)val) >> 3);
# else
@@ -530,8 +684,12 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
} else { /* Big Endian CPU */
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
- unsigned long r = 0;
- return _BitScanReverse64( &r, val ) ? (unsigned)(r >> 3) : 0;
+# if STATIC_BMI2
+ return _lzcnt_u64(val) >> 3;
+# else
+ unsigned long r = 0;
+ return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
+# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_clzll(val) >> 3);
# else
@@ -626,7 +784,8 @@ static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
{
switch(mls)
{
@@ -723,6 +882,13 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
window->dictLimit = end;
}
+MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
+{
+ return window.dictLimit == 1 &&
+ window.lowLimit == 1 &&
+ (window.nextSrc - window.base) == 1;
+}
+
/**
* ZSTD_window_hasExtDict():
* Returns non-zero if the window has a non-empty extDict.
@@ -742,20 +908,74 @@ MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
return ZSTD_window_hasExtDict(ms->window) ?
ZSTD_extDict :
ms->dictMatchState != NULL ?
- ZSTD_dictMatchState :
+ (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
ZSTD_noDict;
}
+/* Defining this macro to non-zero tells zstd to run the overflow correction
+ * code much more frequently. This is very inefficient, and should only be
+ * used for tests and fuzzers.
+ */
+#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
+# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
+# else
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
+# endif
+#endif
+
+/**
+ * ZSTD_window_canOverflowCorrect():
+ * Returns non-zero if the indices are large enough for overflow correction
+ * to work correctly without impacting compression ratio.
+ */
+MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src)
+{
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const curr = (U32)((BYTE const*)src - window.base);
+ U32 const minIndexToOverflowCorrect = cycleSize + MAX(maxDist, cycleSize);
+
+ /* Adjust the min index to backoff the overflow correction frequency,
+ * so we don't waste too much CPU in overflow correction. If this
+ * computation overflows we don't really care, we just need to make
+ * sure it is at least minIndexToOverflowCorrect.
+ */
+ U32 const adjustment = window.nbOverflowCorrections + 1;
+ U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
+ minIndexToOverflowCorrect);
+ U32 const indexLargeEnough = curr > adjustedIndex;
+
+ /* Only overflow correct early if the dictionary is invalidated already,
+ * so we don't hurt compression ratio.
+ */
+ U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
+
+ return indexLargeEnough && dictionaryInvalidated;
+}
+
/**
* ZSTD_window_needOverflowCorrection():
* Returns non-zero if the indices are getting too large and need overflow
* protection.
*/
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src,
void const* srcEnd)
{
- U32 const current = (U32)((BYTE const*)srcEnd - window.base);
- return current > ZSTD_CURRENT_MAX;
+ U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
+ if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
+ return 1;
+ }
+ }
+ return curr > ZSTD_CURRENT_MAX;
}
/**
@@ -766,7 +986,6 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
*
* The least significant cycleLog bits of the indices must remain the same,
* which may be 0. Every index up to maxDist in the past must be valid.
- * NOTE: (maxDist & cycleMask) must be zero.
*/
MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
U32 maxDist, void const* src)
@@ -790,17 +1009,25 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
* 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
* windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
*/
- U32 const cycleMask = (1U << cycleLog) - 1;
- U32 const current = (U32)((BYTE const*)src - window->base);
- U32 const currentCycle0 = current & cycleMask;
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const cycleMask = cycleSize - 1;
+ U32 const curr = (U32)((BYTE const*)src - window->base);
+ U32 const currentCycle0 = curr & cycleMask;
/* Exclude zero so that newCurrent - maxDist >= 1. */
- U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
- U32 const newCurrent = currentCycle1 + maxDist;
- U32 const correction = current - newCurrent;
- assert((maxDist & cycleMask) == 0);
- assert(current > newCurrent);
- /* Loose bound, should be around 1<<29 (see above) */
- assert(correction > 1<<28);
+ U32 const currentCycle1 = currentCycle0 == 0 ? cycleSize : currentCycle0;
+ U32 const newCurrent = currentCycle1 + MAX(maxDist, cycleSize);
+ U32 const correction = curr - newCurrent;
+ /* maxDist must be a power of two so that:
+ * (newCurrent & cycleMask) == (curr & cycleMask)
+ * This is required to not corrupt the chains / binary tree.
+ */
+ assert((maxDist & (maxDist - 1)) == 0);
+ assert((curr & cycleMask) == (newCurrent & cycleMask));
+ assert(curr > newCurrent);
+ if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+ }
window->base += correction;
window->dictBase += correction;
@@ -816,6 +1043,8 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
assert(window->lowLimit <= newCurrent);
assert(window->dictLimit <= newCurrent);
+ ++window->nbOverflowCorrections;
+
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
window->lowLimit);
return correction;
@@ -919,12 +1148,13 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
}
MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
- memset(window, 0, sizeof(*window));
+ ZSTD_memset(window, 0, sizeof(*window));
window->base = (BYTE const*)"";
window->dictBase = (BYTE const*)"";
window->dictLimit = 1; /* start from 1, so that 1st position is valid */
window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
window->nextSrc = window->base + 1; /* see issue #1241 */
+ window->nbOverflowCorrections = 0;
}
/**
@@ -935,7 +1165,8 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
* Returns non-zero if the segment is contiguous.
*/
MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
- void const* src, size_t srcSize)
+ void const* src, size_t srcSize,
+ int forceNonContiguous)
{
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
@@ -945,7 +1176,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
assert(window->base != NULL);
assert(window->dictBase != NULL);
/* Check if blocks follow each other */
- if (src != window->nextSrc) {
+ if (src != window->nextSrc || forceNonContiguous) {
/* not contiguous */
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
@@ -973,12 +1204,16 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
/**
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
- U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
+ * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
+ * valid for the entire block. So this check is sufficient to find the lowest valid match index.
+ */
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
@@ -986,12 +1221,15 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current
/**
* Returns the lowest allowed match index in the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.dictLimit;
- U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When computing the lowest prefix index we need to take the dictionary into account to handle
+ * the edge case where the dictionary and the source are contiguous in memory.
+ */
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
@@ -1045,7 +1283,6 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
* assumptions : magic number supposed already checked
* and dictSize >= 8 */
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
- short* offcodeNCount, unsigned* offcodeMaxValue,
const void* const dict, size_t dictSize);
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
@@ -1061,7 +1298,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
* Note: srcSizeHint == 0 means 0!
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
/*! ZSTD_initCStream_internal() :
* Private use only. Init streaming operation.
@@ -1122,4 +1359,9 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
+/** ZSTD_CCtx_trace() :
+ * Trace the end of a compression call.
+ */
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+
#endif /* ZSTD_COMPRESS_H */
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_literals.c b/Utilities/cmzstd/lib/compress/zstd_compress_literals.c
index 17e7168d89..008337bb1b 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_literals.c
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_literals.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -15,7 +15,7 @@
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
@@ -35,14 +35,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
assert(0);
}
- memcpy(ostart + flSize, src, srcSize);
+ ZSTD_memcpy(ostart + flSize, src, srcSize);
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
@@ -86,7 +86,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
disableLiteralCompression, (U32)srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
@@ -117,12 +117,12 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
}
}
- if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (cLitSize==1) {
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_literals.h b/Utilities/cmzstd/lib/compress/zstd_compress_literals.h
index 8b08705743..9904c0cd30 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_literals.h
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_literals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_sequences.c b/Utilities/cmzstd/lib/compress/zstd_compress_sequences.c
index f9f8097c83..611eabdcbb 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_sequences.c
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_sequences.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -51,6 +51,19 @@ static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
}
/**
+ * Returns true if we should use ncount=-1 else we should
+ * use ncount=1 for low probability symbols instead.
+ */
+static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
+{
+ /* Heuristic: This should cover most blocks <= 16K and
+ * start to fade out after 16K to about 32K depending on
+ * comprssibility.
+ */
+ return nbSeq >= 2048;
+}
+
+/**
* Returns the cost in bytes of encoding the normalized count header.
* Returns an error if any of the helper functions return an error.
*/
@@ -60,7 +73,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
BYTE wksp[FSE_NCOUNTBOUND];
S16 norm[MaxSeq + 1];
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
- FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max), "");
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
}
@@ -72,6 +85,8 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
{
unsigned cost = 0;
unsigned s;
+
+ assert(total > 0);
for (s = 0; s <= max; ++s) {
unsigned norm = (unsigned)((256 * count[s]) / total);
if (count[s] != 0 && norm == 0)
@@ -219,6 +234,11 @@ ZSTD_selectEncodingType(
return set_compressed;
}
+typedef struct {
+ S16 norm[MaxSeq + 1];
+ U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
+} ZSTD_BuildCTableWksp;
+
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
@@ -239,13 +259,13 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
*op = codeTable[0];
return 1;
case set_repeat:
- memcpy(nextCTable, prevCTable, prevCTableSize);
+ ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
return 0;
case set_basic:
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
return 0;
case set_compressed: {
- S16 norm[MaxSeq + 1];
+ ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
if (count[codeTable[nbSeq-1]] > 1) {
@@ -253,10 +273,12 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
nbSeq_1--;
}
assert(nbSeq_1 > 1);
- FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max), "");
- { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
+ assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
+ (void)entropyWorkspaceSize;
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
+ { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
- FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), "");
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
return NCountSize;
}
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_sequences.h b/Utilities/cmzstd/lib/compress/zstd_compress_sequences.h
index 68c6f9a5ac..7991364c2f 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_sequences.h
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_sequences.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_superblock.c b/Utilities/cmzstd/lib/compress/zstd_compress_superblock.c
index b693866c0a..e4e45069bc 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_superblock.c
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_superblock.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -15,288 +15,10 @@
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
#include "hist.h" /* HIST_countFast_wksp */
-#include "zstd_compress_internal.h"
+#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
-/*-*************************************
-* Superblock entropy buffer structs
-***************************************/
-/** ZSTD_hufCTablesMetadata_t :
- * Stores Literals Block Type for a super-block in hType, and
- * huffman tree description in hufDesBuffer.
- * hufDesSize refers to the size of huffman tree description in bytes.
- * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
-typedef struct {
- symbolEncodingType_e hType;
- BYTE hufDesBuffer[500]; /* TODO give name to this value */
- size_t hufDesSize;
-} ZSTD_hufCTablesMetadata_t;
-
-/** ZSTD_fseCTablesMetadata_t :
- * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
- * fse tables in fseTablesBuffer.
- * fseTablesSize refers to the size of fse tables in bytes.
- * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
-typedef struct {
- symbolEncodingType_e llType;
- symbolEncodingType_e ofType;
- symbolEncodingType_e mlType;
- BYTE fseTablesBuffer[500]; /* TODO give name to this value */
- size_t fseTablesSize;
- size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
-} ZSTD_fseCTablesMetadata_t;
-
-typedef struct {
- ZSTD_hufCTablesMetadata_t hufMetadata;
- ZSTD_fseCTablesMetadata_t fseMetadata;
-} ZSTD_entropyCTablesMetadata_t;
-
-
-/** ZSTD_buildSuperBlockEntropy_literal() :
- * Builds entropy for the super-block literals.
- * Stores literals block type (raw, rle, compressed, repeat) and
- * huffman description table to hufMetadata.
- * @return : size of huffman description table or error code */
-static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
- const ZSTD_hufCTables_t* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_hufCTablesMetadata_t* hufMetadata,
- const int disableLiteralsCompression,
- void* workspace, size_t wkspSize)
-{
- BYTE* const wkspStart = (BYTE*)workspace;
- BYTE* const wkspEnd = wkspStart + wkspSize;
- BYTE* const countWkspStart = wkspStart;
- unsigned* const countWksp = (unsigned*)workspace;
- const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
- BYTE* const nodeWksp = countWkspStart + countWkspSize;
- const size_t nodeWkspSize = wkspEnd-nodeWksp;
- unsigned maxSymbolValue = 255;
- unsigned huffLog = HUF_TABLELOG_DEFAULT;
- HUF_repeat repeat = prevHuf->repeatMode;
-
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
-
- /* Prepare nextEntropy assuming reusing the existing table */
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-
- if (disableLiteralsCompression) {
- DEBUGLOG(5, "set_basic - disabled");
- hufMetadata->hType = set_basic;
- return 0;
- }
-
- /* small ? don't even attempt compression (speed opt) */
-# define COMPRESS_LITERALS_SIZE_MIN 63
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
- if (srcSize <= minLitSize) {
- DEBUGLOG(5, "set_basic - too small");
- hufMetadata->hType = set_basic;
- return 0;
- }
- }
-
- /* Scan input and build symbol stats */
- { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
- FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
- if (largest == srcSize) {
- DEBUGLOG(5, "set_rle");
- hufMetadata->hType = set_rle;
- return 0;
- }
- if (largest <= (srcSize >> 7)+4) {
- DEBUGLOG(5, "set_basic - no gain");
- hufMetadata->hType = set_basic;
- return 0;
- }
- }
-
- /* Validate the previous Huffman table */
- if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
- repeat = HUF_repeat_none;
- }
-
- /* Build Huffman Tree */
- memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
- maxSymbolValue, huffLog,
- nodeWksp, nodeWkspSize);
- FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
- huffLog = (U32)maxBits;
- { /* Build and write the CTable */
- size_t const newCSize = HUF_estimateCompressedSize(
- (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
- size_t const hSize = HUF_writeCTable(
- hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
- (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog);
- /* Check against repeating the previous CTable */
- if (repeat != HUF_repeat_none) {
- size_t const oldCSize = HUF_estimateCompressedSize(
- (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
- if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
- DEBUGLOG(5, "set_repeat - smaller");
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_repeat;
- return 0;
- }
- }
- if (newCSize + hSize >= srcSize) {
- DEBUGLOG(5, "set_basic - no gains");
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_basic;
- return 0;
- }
- DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
- hufMetadata->hType = set_compressed;
- nextHuf->repeatMode = HUF_repeat_check;
- return hSize;
- }
- }
-}
-
-/** ZSTD_buildSuperBlockEntropy_sequences() :
- * Builds entropy for the super-block sequences.
- * Stores symbol compression modes and fse table to fseMetadata.
- * @return : size of fse tables or error code */
-static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
- const ZSTD_fseCTables_t* prevEntropy,
- ZSTD_fseCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_fseCTablesMetadata_t* fseMetadata,
- void* workspace, size_t wkspSize)
-{
- BYTE* const wkspStart = (BYTE*)workspace;
- BYTE* const wkspEnd = wkspStart + wkspSize;
- BYTE* const countWkspStart = wkspStart;
- unsigned* const countWksp = (unsigned*)workspace;
- const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
- BYTE* const cTableWksp = countWkspStart + countWkspSize;
- const size_t cTableWkspSize = wkspEnd-cTableWksp;
- ZSTD_strategy const strategy = cctxParams->cParams.strategy;
- FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
- FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
- FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
- const BYTE* const ofCodeTable = seqStorePtr->ofCode;
- const BYTE* const llCodeTable = seqStorePtr->llCode;
- const BYTE* const mlCodeTable = seqStorePtr->mlCode;
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- BYTE* const ostart = fseMetadata->fseTablesBuffer;
- BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
- BYTE* op = ostart;
-
- assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
- memset(workspace, 0, wkspSize);
-
- fseMetadata->lastCountSize = 0;
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
- /* build CTable for Literal Lengths */
- { U32 LLtype;
- unsigned max = MaxLL;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- DEBUGLOG(5, "Building LL table");
- nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
- LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- LLFSELog, prevEntropy->litlengthCTable,
- LL_defaultNorm, LL_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(set_basic < set_compressed && set_rle < set_compressed);
- assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
- countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
- prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
- if (LLtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->llType = (symbolEncodingType_e) LLtype;
- } }
- /* build CTable for Offsets */
- { U32 Offtype;
- unsigned max = MaxOff;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
- DEBUGLOG(5, "Building OF table");
- nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
- Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- OffFSELog, prevEntropy->offcodeCTable,
- OF_defaultNorm, OF_defaultNormLog,
- defaultPolicy, strategy);
- assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
- if (Offtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->ofType = (symbolEncodingType_e) Offtype;
- } }
- /* build CTable for MatchLengths */
- { U32 MLtype;
- unsigned max = MaxML;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
- nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
- MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- MLFSELog, prevEntropy->matchlengthCTable,
- ML_defaultNorm, ML_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
- countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
- prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
- if (MLtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->mlType = (symbolEncodingType_e) MLtype;
- } }
- assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
- return op-ostart;
-}
-
-
-/** ZSTD_buildSuperBlockEntropy() :
- * Builds entropy for the super-block.
- * @return : 0 on success or error code */
-static size_t
-ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- void* workspace, size_t wkspSize)
-{
- size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
- entropyMetadata->hufMetadata.hufDesSize =
- ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
- &prevEntropy->huf, &nextEntropy->huf,
- &entropyMetadata->hufMetadata,
- ZSTD_disableLiteralsCompression(cctxParams),
- workspace, wkspSize);
- FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
- entropyMetadata->fseMetadata.fseTablesSize =
- ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
- &prevEntropy->fse, &nextEntropy->fse,
- cctxParams,
- &entropyMetadata->fseMetadata,
- workspace, wkspSize);
- FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
- return 0;
-}
-
/** ZSTD_compressSubBlock_literal() :
* Compresses literals section for a sub-block.
* When we have to write the Huffman table we will sometimes choose a header
@@ -304,7 +26,7 @@ ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
* before we know the table size + compressed size, so we have a bound on the
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
*
- * We write the header when writeEntropy=1 and set entropyWrriten=1 when we succeeded
+ * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
* in writing the header, otherwise it is set to 0.
*
* hufMetadata->hType has literals block type info.
@@ -348,7 +70,7 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
if (writeEntropy && hufMetadata->hType == set_compressed) {
- memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
+ ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
op += hufMetadata->hufDesSize;
cLitSize += hufMetadata->hufDesSize;
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
@@ -474,7 +196,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
const U32 MLtype = fseMetadata->mlType;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
- memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
+ ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
op += fseMetadata->fseTablesSize;
} else {
const U32 repeat = set_repeat;
@@ -603,7 +325,7 @@ static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
const BYTE* codeTable, unsigned maxCode,
size_t nbSeq, const FSE_CTable* fseCTable,
const U32* additionalBits,
- short const* defaultNorm, U32 defaultNormLog,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
@@ -615,7 +337,11 @@ static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
if (type == set_basic) {
- cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ cSymbolTypeSizeEstimateInBits = max <= defaultMax
+ ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
+ : ERROR(GENERIC);
} else if (type == set_rle) {
cSymbolTypeSizeEstimateInBits = 0;
} else if (type == set_compressed || type == set_repeat) {
@@ -639,19 +365,20 @@ static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
void* workspace, size_t wkspSize,
int writeEntropy)
{
- size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+ size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
size_t cSeqSizeEstimate = 0;
+ if (nbSeq == 0) return sequencesSectionHeaderSize;
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
nbSeq, fseTables->offcodeCTable, NULL,
- OF_defaultNorm, OF_defaultNormLog,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
nbSeq, fseTables->litlengthCTable, LL_bits,
- LL_defaultNorm, LL_defaultNormLog,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
nbSeq, fseTables->matchlengthCTable, ML_bits,
- ML_defaultNorm, ML_defaultNormLog,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
@@ -790,7 +517,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
} while (!lastSequence);
if (writeLitEntropy) {
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
- memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
+ ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
}
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
/* If we haven't written our entropy tables, then we've violated our contract and
@@ -809,11 +536,11 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
if (sp < send) {
seqDef const* seq;
repcodes_t rep;
- memcpy(&rep, prevCBlock->rep, sizeof(rep));
+ ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
}
- memcpy(nextCBlock->rep, &rep, sizeof(rep));
+ ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
}
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
@@ -826,12 +553,12 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
unsigned lastBlock) {
ZSTD_entropyCTablesMetadata_t entropyMetadata;
- FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
- zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
@@ -841,5 +568,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
- zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_compress_superblock.h b/Utilities/cmzstd/lib/compress/zstd_compress_superblock.h
index 07f4cb1dc6..176f9b106f 100644
--- a/Utilities/cmzstd/lib/compress/zstd_compress_superblock.h
+++ b/Utilities/cmzstd/lib/compress/zstd_compress_superblock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstd_cwksp.h b/Utilities/cmzstd/lib/compress/zstd_cwksp.h
index a25c9263b7..2656d26ca2 100644
--- a/Utilities/cmzstd/lib/compress/zstd_cwksp.h
+++ b/Utilities/cmzstd/lib/compress/zstd_cwksp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -35,6 +35,10 @@ extern "C" {
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
#endif
+
+/* Set our tables and aligneds to align by 64 bytes */
+#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
+
/*-*************************************
* Structures
***************************************/
@@ -45,6 +49,16 @@ typedef enum {
} ZSTD_cwksp_alloc_phase_e;
/**
+ * Used to describe whether the workspace is statically allocated (and will not
+ * necessarily ever be freed), or if it's dynamically allocated and we can
+ * expect a well-formed caller to free this.
+ */
+typedef enum {
+ ZSTD_cwksp_dynamic_alloc,
+ ZSTD_cwksp_static_alloc
+} ZSTD_cwksp_static_alloc_e;
+
+/**
* Zstd fits all its internal datastructures into a single continuous buffer,
* so that it only needs to perform a single OS allocation (or so that a buffer
* can be provided to it and it can perform no allocations at all). This buffer
@@ -92,7 +106,7 @@ typedef enum {
*
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
* so that literally everything fits in a single buffer. Note: if present,
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
* CDict}() rely on a pointer comparison to see whether one or two frees are
* required.
*
@@ -107,10 +121,11 @@ typedef enum {
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
- * Their sizes depend on the cparams.
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
*
* - Aligned: these buffers are used for various purposes that require 4 byte
- * alignment, but don't require any initialization before they're used.
+ * alignment, but don't require any initialization before they're used. These
+ * buffers are each aligned to 64 bytes.
*
* - Buffers: these buffers are used for various purposes that don't require
* any alignment or initialization before they're used. This means they can
@@ -123,8 +138,7 @@ typedef enum {
*
* 1. Objects
* 2. Buffers
- * 3. Aligned
- * 4. Tables
+ * 3. Aligned/Tables
*
* Attempts to reserve objects of different types out of order will fail.
*/
@@ -137,9 +151,10 @@ typedef struct {
void* tableValidEnd;
void* allocStart;
- int allocFailed;
+ BYTE allocFailed;
int workspaceOversizedDuration;
ZSTD_cwksp_alloc_phase_e phase;
+ ZSTD_cwksp_static_alloc_e isStatic;
} ZSTD_cwksp;
/*-*************************************
@@ -176,39 +191,123 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
* Since tables aren't currently redzoned, you don't need to call through this
* to figure out how much space you need for the matchState tables. Everything
* else is though.
+ *
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+ if (size == 0)
+ return 0;
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#else
return size;
#endif
}
-MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
+/**
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
+ * Used to determine the number of bytes required for a given "aligned".
+ */
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
+}
+
+/**
+ * Returns the amount of additional space the cwksp must allocate
+ * for internal purposes (currently only alignment).
+ */
+MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
+ * to align the beginning of the aligned secion.
+ *
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
+ * aligneds being sized in multiples of 64 bytes.
+ */
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
+ return slackSpace;
+}
+
+
+/**
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
+ * alignBytes must be a power of two.
+ */
+MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
+ size_t const alignBytesMask = alignBytes - 1;
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
+ assert((alignBytes & alignBytesMask) == 0);
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return bytes;
+}
+
+/**
+ * Internal function. Do not use directly.
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
+ * counts from the end of the wksp. (as opposed to the object/table segment)
+ *
+ * Returns a pointer to the beginning of that space.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
+ void* const bottom = ws->tableEnd;
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(alloc >= bottom);
+ if (alloc < bottom) {
+ DEBUGLOG(4, "cwksp: alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ if (alloc < ws->tableValidEnd) {
+ ws->tableValidEnd = alloc;
+ }
+ ws->allocStart = alloc;
+ return alloc;
+}
+
+/**
+ * Moves the cwksp to the next phase, and does any necessary allocations.
+ * Returns a 0 on success, or zstd error
+ */
+MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
assert(phase >= ws->phase);
if (phase > ws->phase) {
+ /* Going from allocating objects to allocating buffers */
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
phase >= ZSTD_cwksp_alloc_buffers) {
ws->tableValidEnd = ws->objectEnd;
}
+
+ /* Going from allocating buffers to allocating aligneds/tables */
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
phase >= ZSTD_cwksp_alloc_aligned) {
- /* If unaligned allocations down from a too-large top have left us
- * unaligned, we need to realign our alloc ptr. Technically, this
- * can consume space that is unaccounted for in the neededSpace
- * calculation. However, I believe this can only happen when the
- * workspace is too large, and specifically when it is too large
- * by a larger margin than the space that will be consumed. */
- /* TODO: cleaner, compiler warning friendly way to do this??? */
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
- if (ws->allocStart < ws->tableValidEnd) {
- ws->tableValidEnd = ws->allocStart;
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
+ size_t const bytesToAlign =
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
+ }
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
+ void* const alloc = ws->objectEnd;
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void* const end = (BYTE*)alloc + bytesToAlign;
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
+ RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
+ "table phase - alignment initial allocation failed!");
+ ws->objectEnd = end;
+ ws->tableEnd = end;
+ ws->tableValidEnd = end;
}
}
ws->phase = phase;
+ ZSTD_cwksp_assert_internal_consistency(ws);
}
+ return 0;
}
/**
@@ -224,34 +323,26 @@ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
MEM_STATIC void* ZSTD_cwksp_reserve_internal(
ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
void* alloc;
- void* bottom = ws->tableEnd;
- ZSTD_cwksp_internal_advance_phase(ws, phase);
- alloc = (BYTE *)ws->allocStart - bytes;
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
+ return NULL;
+ }
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */
- alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+ bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
- alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
- ZSTD_cwksp_assert_internal_consistency(ws);
- assert(alloc >= bottom);
- if (alloc < bottom) {
- DEBUGLOG(4, "cwksp: alloc failed!");
- ws->allocFailed = 1;
- return NULL;
- }
- if (alloc < ws->tableValidEnd) {
- ws->tableValidEnd = alloc;
- }
- ws->allocStart = alloc;
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
- __asan_unpoison_memory_region(alloc, bytes);
+ if (alloc) {
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
+ }
#endif
return alloc;
@@ -265,28 +356,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
}
/**
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
*/
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
- assert((bytes & (sizeof(U32)-1)) == 0);
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ return ptr;
}
/**
- * Aligned on sizeof(unsigned). These buffers have the special property that
+ * Aligned on 64 bytes. These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
- void* alloc = ws->tableEnd;
- void* end = (BYTE *)alloc + bytes;
- void* top = ws->allocStart;
+ void* alloc;
+ void* end;
+ void* top;
+
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
+ return NULL;
+ }
+ alloc = ws->tableEnd;
+ end = (BYTE *)alloc + bytes;
+ top = ws->allocStart;
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
assert((bytes & (sizeof(U32)-1)) == 0);
- ZSTD_cwksp_internal_advance_phase(ws, phase);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(end <= top);
if (end > top) {
@@ -296,10 +395,14 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
}
ws->tableEnd = end;
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- __asan_unpoison_memory_region(alloc, bytes);
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
#endif
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
return alloc;
}
@@ -311,7 +414,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
void* alloc = ws->objectEnd;
void* end = (BYTE*)alloc + roundedBytes;
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif
@@ -332,11 +435,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
ws->tableEnd = end;
ws->tableValidEnd = end;
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
- __asan_unpoison_memory_region(alloc, bytes);
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
#endif
return alloc;
@@ -345,7 +450,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
-#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty. */
@@ -380,7 +485,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
}
ZSTD_cwksp_mark_tables_clean(ws);
}
@@ -392,8 +497,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing tables!");
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- {
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+ /* We don't do this when the workspace is statically allocated, because
+ * when that is the case, we have no capability to hook into the end of the
+ * workspace's lifecycle to unpoison the memory.
+ */
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
@@ -410,7 +519,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing!");
-#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the context re-use logic is sound, and that we don't
* access stuff that this compression hasn't initialized, we re-"poison"
* the workspace (or at least the non-static, non-table parts of it)
@@ -421,8 +530,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
}
#endif
-#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- {
+#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+ /* We don't do this when the workspace is statically allocated, because
+ * when that is the case, we have no capability to hook into the end of the
+ * workspace's lifecycle to unpoison the memory.
+ */
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
@@ -442,7 +555,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
* Any existing values in the workspace are ignored (the previously managed
* buffer, if present, must be separately freed).
*/
-MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
ws->workspace = start;
@@ -450,39 +563,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
ws->phase = ZSTD_cwksp_alloc_objects;
+ ws->isStatic = isStatic;
ZSTD_cwksp_clear(ws);
ws->workspaceOversizedDuration = 0;
ZSTD_cwksp_assert_internal_consistency(ws);
}
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
- void* workspace = ZSTD_malloc(size, customMem);
+ void* workspace = ZSTD_customMalloc(size, customMem);
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
- ZSTD_cwksp_init(ws, workspace, size);
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
return 0;
}
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
void *ptr = ws->workspace;
DEBUGLOG(4, "cwksp: freeing workspace");
- memset(ws, 0, sizeof(ZSTD_cwksp));
- ZSTD_free(ptr, customMem);
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
+ ZSTD_customFree(ptr, customMem);
}
/**
* Moves the management of a workspace from one cwksp to another. The src cwksp
- * is left in an invalid state (src must be re-init()'ed before its used again).
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
*/
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
*dst = *src;
- memset(src, 0, sizeof(ZSTD_cwksp));
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
}
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
}
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}
@@ -491,6 +610,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
* Functions Checking Free Space
***************************************/
+/* ZSTD_alignmentSpaceWithinBounds() :
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
+ * actual amount of space used.
+ */
+MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
+ size_t const estimatedSpace, int resizedWorkspace) {
+ if (resizedWorkspace) {
+ /* Resized/newly allocated wksp should have exact bounds */
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
+ } else {
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
+ */
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
+ }
+}
+
+
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_double_fast.c b/Utilities/cmzstd/lib/compress/zstd_double_fast.c
index 27eed66cfe..d0d3a784dd 100644
--- a/Utilities/cmzstd/lib/compress/zstd_double_fast.c
+++ b/Utilities/cmzstd/lib/compress/zstd_double_fast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -31,15 +31,15 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
* is empty.
*/
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
- U32 const current = (U32)(ip - base);
+ U32 const curr = (U32)(ip - base);
U32 i;
for (i = 0; i < fastHashFillStep; ++i) {
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
if (i == 0)
- hashSmall[smHash] = current + i;
+ hashSmall[smHash] = curr + i;
if (i == 0 || hashLarge[lgHash] == 0)
- hashLarge[lgHash] = current + i;
+ hashLarge[lgHash] = curr + i;
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
@@ -108,9 +108,9 @@ size_t ZSTD_compressBlock_doubleFast_generic(
/* init */
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
- U32 const current = (U32)(ip - base);
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
- U32 const maxRep = current - windowLow;
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
@@ -129,17 +129,17 @@ size_t ZSTD_compressBlock_doubleFast_generic(
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
- U32 const current = (U32)(ip-base);
+ U32 const curr = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
const BYTE* matchLong = base + matchIndexL;
const BYTE* match = base + matchIndexS;
- const U32 repIndex = current + 1 - offset_1;
+ const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- hashLong[h2] = hashSmall[h] = current; /* update hash tables */
+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
/* check dictMatchState repcode */
if (dictMode == ZSTD_dictMatchState
@@ -177,7 +177,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
- offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
+ offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
goto _match_found;
} }
@@ -209,7 +209,7 @@ _search_next_long:
size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
U32 const matchIndexL3 = hashLong[hl3];
const BYTE* matchL3 = base + matchIndexL3;
- hashLong[hl3] = current + 1;
+ hashLong[hl3] = curr + 1;
/* check prefix long +1 match */
if (matchIndexL3 > prefixLowestIndex) {
@@ -228,7 +228,7 @@ _search_next_long:
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
ip++;
- offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
+ offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
goto _match_found;
} } }
@@ -236,7 +236,7 @@ _search_next_long:
/* if no long +1 match, explore the short match we found */
if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
- offset = (U32)(current - matchIndexS);
+ offset = (U32)(curr - matchIndexS);
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
} else {
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
@@ -260,7 +260,7 @@ _match_stored:
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
- { U32 const indexToInsert = current+2;
+ { U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
@@ -401,15 +401,15 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
const BYTE* matchLong = matchLongBase + matchLongIndex;
- const U32 current = (U32)(ip-base);
- const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
size_t mLength;
- hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
- & (repIndex > dictStartIndex))
+ & (offset_1 < curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
@@ -421,7 +421,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
U32 offset;
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
- offset = current - matchLongIndex;
+ offset = curr - matchLongIndex;
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
@@ -433,19 +433,19 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
const BYTE* match3 = match3Base + matchIndex3;
U32 offset;
- hashLong[h3] = current + 1;
+ hashLong[h3] = curr + 1;
if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
ip++;
- offset = current+1 - matchIndex3;
+ offset = curr+1 - matchIndex3;
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
} else {
const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
- offset = current - matchIndex;
+ offset = curr - matchIndex;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
}
offset_2 = offset_1;
@@ -464,7 +464,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
- { U32 const indexToInsert = current+2;
+ { U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
@@ -477,7 +477,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
- & (repIndex2 > dictStartIndex))
+ & (offset_2 < current2 - dictStartIndex))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
diff --git a/Utilities/cmzstd/lib/compress/zstd_double_fast.h b/Utilities/cmzstd/lib/compress/zstd_double_fast.h
index 14d944d69b..e16b7b03a3 100644
--- a/Utilities/cmzstd/lib/compress/zstd_double_fast.h
+++ b/Utilities/cmzstd/lib/compress/zstd_double_fast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstd_fast.c b/Utilities/cmzstd/lib/compress/zstd_fast.c
index 85a3a7a91e..4edc04dccd 100644
--- a/Utilities/cmzstd/lib/compress/zstd_fast.c
+++ b/Utilities/cmzstd/lib/compress/zstd_fast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -29,16 +29,16 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
* Insert the other positions if their hash entry is empty.
*/
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
- U32 const current = (U32)(ip - base);
+ U32 const curr = (U32)(ip - base);
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
- hashTable[hash0] = current;
+ hashTable[hash0] = curr;
if (dtlm == ZSTD_dtlm_fast) continue;
/* Only load extra positions for ZSTD_dtlm_full */
{ U32 p;
for (p = 1; p < fastHashFillStep; ++p) {
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
if (hashTable[hash] == 0) { /* not yet filled */
- hashTable[hash] = current + p;
+ hashTable[hash] = curr + p;
} } } }
}
@@ -72,9 +72,9 @@ ZSTD_compressBlock_fast_generic(
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
ip1 = ip0 + 1;
- { U32 const current = (U32)(ip0 - base);
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
- U32 const maxRep = current - windowLow;
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
@@ -242,7 +242,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
assert(endIndex - prefixStartIndex <= maxDistance);
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
- /* ensure there will be no no underflow
+ /* ensure there will be no underflow
* when translating a dict index into a local index */
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
@@ -258,14 +258,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
size_t mLength;
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
- U32 const current = (U32)(ip-base);
+ U32 const curr = (U32)(ip-base);
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
- const U32 repIndex = current + 1 - offset_1;
+ const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- hashTable[h] = current; /* update hash table */
+ hashTable[h] = curr; /* update hash table */
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
@@ -284,7 +284,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
continue;
} else {
/* found a dict match */
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
while (((ip>anchor) & (dictMatch>dictStart))
&& (ip[-1] == dictMatch[-1])) {
@@ -316,8 +316,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
if (ip <= ilimit) {
/* Fill Table */
- assert(base+current+2 > istart); /* check base overflow */
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
+ assert(base+curr+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
@@ -410,15 +410,15 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const U32 matchIndex = hashTable[h];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
- const U32 current = (U32)(ip-base);
- const U32 repIndex = current + 1 - offset_1;
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1;
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- hashTable[h] = current; /* update hash table */
- DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current);
- assert(offset_1 <= current +1); /* check repIndex */
+ hashTable[h] = curr; /* update hash table */
+ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
- if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
+ if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
+ & (offset_1 < curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
@@ -435,7 +435,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
}
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
- U32 const offset = current - matchIndex;
+ U32 const offset = curr - matchIndex;
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1; offset_1 = offset; /* update offset history */
@@ -446,14 +446,14 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
if (ip <= ilimit) {
/* Fill Table */
- hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 < curr - dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
diff --git a/Utilities/cmzstd/lib/compress/zstd_fast.h b/Utilities/cmzstd/lib/compress/zstd_fast.h
index cf6aaa8e67..0d4a0c1090 100644
--- a/Utilities/cmzstd/lib/compress/zstd_fast.h
+++ b/Utilities/cmzstd/lib/compress/zstd_fast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstd_lazy.c b/Utilities/cmzstd/lib/compress/zstd_lazy.c
index 4cf5c88b53..3d523e8472 100644
--- a/Utilities/cmzstd/lib/compress/zstd_lazy.c
+++ b/Utilities/cmzstd/lib/compress/zstd_lazy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -58,11 +58,11 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
/** ZSTD_insertDUBT1() :
* sort one already inserted but unsorted position
- * assumption : current >= btlow == (current - btmask)
+ * assumption : curr >= btlow == (curr - btmask)
* doesn't fail */
static void
ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
- U32 current, const BYTE* inputEnd,
+ U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
{
@@ -74,41 +74,41 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
- const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
+ const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
+ const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* match;
- U32* smallerPtr = bt + 2*(current&btMask);
+ U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
U32 dummy32; /* to be nullified at the end */
U32 const windowValid = ms->window.lowLimit;
U32 const maxDistance = 1U << cParams->windowLog;
- U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
+ U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
- current, dictLimit, windowLow);
- assert(current >= btLow);
+ curr, dictLimit, windowLow);
+ assert(curr >= btLow);
assert(ip < iend); /* condition for ZSTD_count */
while (nbCompares-- && (matchIndex > windowLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- assert(matchIndex < current);
+ assert(matchIndex < curr);
/* note : all candidates are now supposed sorted,
* but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
* when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
if ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/
- || (current < dictLimit) /* both in extDict */) {
+ || (curr < dictLimit) /* both in extDict */) {
const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit)) ?
base : dictBase;
assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
- || (current < dictLimit) );
+ || (curr < dictLimit) );
match = mBase + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
@@ -119,7 +119,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
}
DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
- current, matchIndex, (U32)matchLength);
+ curr, matchIndex, (U32)matchLength);
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
@@ -168,7 +168,7 @@ ZSTD_DUBT_findBetterDictMatch (
const BYTE* const base = ms->window.base;
const BYTE* const prefixStart = base + ms->window.dictLimit;
- U32 const current = (U32)(ip-base);
+ U32 const curr = (U32)(ip-base);
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictEnd = dms->window.nextSrc;
U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
@@ -195,10 +195,10 @@ ZSTD_DUBT_findBetterDictMatch (
if (matchLength > bestLength) {
U32 matchIndex = dictMatchIndex + dictIndexDelta;
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
- current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
}
if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
@@ -218,9 +218,9 @@ ZSTD_DUBT_findBetterDictMatch (
}
if (bestLength >= MINMATCH) {
- U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
- current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
return bestLength;
@@ -241,13 +241,13 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
U32 matchIndex = hashTable[h];
const BYTE* const base = ms->window.base;
- U32 const current = (U32)(ip-base);
- U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
+ U32 const curr = (U32)(ip-base);
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
- U32 const btLow = (btMask >= current) ? 0 : current - btMask;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
U32 const unsortLimit = MAX(btLow, windowLow);
U32* nextCandidate = bt + 2*(matchIndex&btMask);
@@ -256,8 +256,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
U32 nbCandidates = nbCompares;
U32 previousCandidate = 0;
- DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
assert(ip <= iend-8); /* required for h calculation */
+ assert(dictMode != ZSTD_dedicatedDictSearch);
/* reach end of unsorted candidates list */
while ( (matchIndex > unsortLimit)
@@ -299,14 +300,14 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
- U32* smallerPtr = bt + 2*(current&btMask);
- U32* largerPtr = bt + 2*(current&btMask) + 1;
- U32 matchEndIdx = current + 8 + 1;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr + 8 + 1;
U32 dummy32; /* to be nullified at the end */
size_t bestLength = 0;
matchIndex = hashTable[h];
- hashTable[h] = current; /* Update Hash Table */
+ hashTable[h] = curr; /* Update Hash Table */
while (nbCompares-- && (matchIndex > windowLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
@@ -326,8 +327,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
@@ -363,12 +364,12 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
mls, dictMode);
}
- assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
+ assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
if (bestLength >= MINMATCH) {
- U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
- current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
return bestLength;
}
@@ -437,6 +438,220 @@ static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
}
}
+/***********************************
+* Dedicated dict search
+***********************************/
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32* const hashTable = ms->hashTable;
+ U32* const chainTable = ms->chainTable;
+ U32 const chainSize = 1 << ms->cParams.chainLog;
+ U32 idx = ms->nextToUpdate;
+ U32 const minChain = chainSize < target ? target - chainSize : idx;
+ U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const cacheSize = bucketSize - 1;
+ U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
+ U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
+
+ /* We know the hashtable is oversized by a factor of `bucketSize`.
+ * We are going to temporarily pretend `bucketSize == 1`, keeping only a
+ * single entry. We will use the rest of the space to construct a temporary
+ * chaintable.
+ */
+ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32* const tmpHashTable = hashTable;
+ U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
+ U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
+ U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
+ U32 hashIdx;
+
+ assert(ms->cParams.chainLog <= 24);
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
+ assert(idx != 0);
+ assert(tmpMinChain <= minChain);
+
+ /* fill conventional hash table and conventional chain table */
+ for ( ; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
+ if (idx >= tmpMinChain) {
+ tmpChainTable[idx - tmpMinChain] = hashTable[h];
+ }
+ tmpHashTable[h] = idx;
+ }
+
+ /* sort chains into ddss chain table */
+ {
+ U32 chainPos = 0;
+ for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
+ U32 count;
+ U32 countBeyondMinChain = 0;
+ U32 i = tmpHashTable[hashIdx];
+ for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
+ /* skip through the chain to the first position that won't be
+ * in the hash cache bucket */
+ if (i < minChain) {
+ countBeyondMinChain++;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ if (count == cacheSize) {
+ for (count = 0; count < chainLimit;) {
+ if (i < minChain) {
+ if (!i || ++countBeyondMinChain > cacheSize) {
+ /* only allow pulling `cacheSize` number of entries
+ * into the cache or chainTable beyond `minChain`,
+ * to replace the entries pulled out of the
+ * chainTable into the cache. This lets us reach
+ * back further without increasing the total number
+ * of entries in the chainTable, guaranteeing the
+ * DDSS chain table will fit into the space
+ * allocated for the regular one. */
+ break;
+ }
+ }
+ chainTable[chainPos++] = i;
+ count++;
+ if (i < tmpMinChain) {
+ break;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ } else {
+ count = 0;
+ }
+ if (count) {
+ tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
+ } else {
+ tmpHashTable[hashIdx] = 0;
+ }
+ }
+ assert(chainPos <= chainSize); /* I believe this is guaranteed... */
+ }
+
+ /* move chain pointers into the last entry of each hash bucket */
+ for (hashIdx = (1 << hashLog); hashIdx; ) {
+ U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const chainPackedPointer = tmpHashTable[hashIdx];
+ U32 i;
+ for (i = 0; i < cacheSize; i++) {
+ hashTable[bucketIdx + i] = 0;
+ }
+ hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
+ }
+
+ /* fill the buckets of the hash table */
+ for (idx = ms->nextToUpdate; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
+ << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 i;
+ /* Shift hash cache down 1. */
+ for (i = cacheSize - 1; i; i--)
+ hashTable[h + i] = hashTable[h + i - 1];
+ hashTable[h] = idx;
+ }
+
+ ms->nextToUpdate = target;
+}
+
+/* Returns the longest match length found in the dedicated dict search structure.
+ * If none are longer than the argument ml, then ml will be returned.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
+ const ZSTD_matchState_t* const dms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE* const prefixStart, const U32 curr,
+ const U32 dictLimit, const size_t ddsIdx) {
+ const U32 ddsLowestIndex = dms->window.dictLimit;
+ const BYTE* const ddsBase = dms->window.base;
+ const BYTE* const ddsEnd = dms->window.nextSrc;
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
+ U32 ddsAttempt;
+ U32 matchIndex;
+
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 const chainIndex = chainPackedPointer >> 8;
+
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
+ }
+
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
+ match = ddsBase + matchIndex;
+
+ if (!matchIndex) {
+ return ml;
+ }
+
+ /* guaranteed by table construction */
+ (void)ddsLowestIndex;
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) {
+ /* best possible, avoids read overflow on next attempt */
+ return ml;
+ }
+ }
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 chainIndex = chainPackedPointer >> 8;
+ U32 const chainLength = chainPackedPointer & 0xFF;
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
+ U32 chainAttempt;
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
+ }
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->chainTable[chainIndex];
+ match = ddsBase + matchIndex;
+
+ /* guaranteed by table construction */
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ return ml;
+}
/* *********************************
@@ -446,7 +661,7 @@ static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndex_internal(
+FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls)
@@ -475,7 +690,6 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
}
-
/* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE
size_t ZSTD_HcFindBestMatch_generic (
@@ -493,20 +707,33 @@ size_t ZSTD_HcFindBestMatch_generic (
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
- const U32 current = (U32)(ip-base);
+ const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
- const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
- const U32 minChain = current > chainSize ? current - chainSize : 0;
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+
+ U32 matchIndex;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32* entry = &dms->hashTable[ddsIdx];
+ PREFETCH_L1(entry);
+ }
+
/* HC4 match finder */
- U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
- for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
@@ -523,7 +750,7 @@ size_t ZSTD_HcFindBestMatch_generic (
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
+ *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
@@ -531,8 +758,10 @@ size_t ZSTD_HcFindBestMatch_generic (
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
}
- if (dictMode == ZSTD_dictMatchState) {
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
const U32* const dmsChainTable = dms->chainTable;
const U32 dmsChainSize = (1 << dms->cParams.chainLog);
const U32 dmsChainMask = dmsChainSize - 1;
@@ -545,7 +774,7 @@ size_t ZSTD_HcFindBestMatch_generic (
matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
- for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
const BYTE* const match = dmsBase + matchIndex;
assert(match+4 <= dmsEnd);
@@ -555,11 +784,12 @@ size_t ZSTD_HcFindBestMatch_generic (
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
if (matchIndex <= dmsMinChain) break;
+
matchIndex = dmsChainTable[matchIndex & dmsChainMask];
}
}
@@ -600,6 +830,22 @@ static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
}
+static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
+ }
+}
+
+
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
@@ -615,11 +861,657 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
}
}
+/* *********************************
+* (SIMD) Row-based matchfinder
+***********************************/
+/* Constants for row-based hash */
+#define ZSTD_ROW_HASH_TAG_OFFSET 1 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
+#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
+
+#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
+
+typedef U32 ZSTD_VecMask; /* Clarifies when we are interacting with a U32 representing a mask of matches */
+
+#if !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) /* SIMD SSE version */
+
+#include <emmintrin.h>
+typedef __m128i ZSTD_Vec128;
+
+/* Returns a 128-bit container with 128-bits from src */
+static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
+ return _mm_loadu_si128((ZSTD_Vec128 const*)src);
+}
+
+/* Returns a ZSTD_Vec128 with the byte "val" packed 16 times */
+static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
+ return _mm_set1_epi8((char)val);
+}
+
+/* Do byte-by-byte comparison result of x and y. Then collapse 128-bit resultant mask
+ * into a 32-bit mask that is the MSB of each byte.
+ * */
+static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
+ return (ZSTD_VecMask)_mm_movemask_epi8(_mm_cmpeq_epi8(x, y));
+}
+
+typedef struct {
+ __m128i fst;
+ __m128i snd;
+} ZSTD_Vec256;
+
+static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) {
+ ZSTD_Vec256 v;
+ v.fst = ZSTD_Vec128_read(ptr);
+ v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1);
+ return v;
+}
+
+static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
+ ZSTD_Vec256 v;
+ v.fst = ZSTD_Vec128_set8(val);
+ v.snd = ZSTD_Vec128_set8(val);
+ return v;
+}
+
+static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
+ ZSTD_VecMask fstMask;
+ ZSTD_VecMask sndMask;
+ fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst);
+ sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd);
+ return fstMask | (sndMask << 16);
+}
+
+#elif !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) /* SIMD ARM NEON Version */
+
+#include <arm_neon.h>
+typedef uint8x16_t ZSTD_Vec128;
+
+static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
+ return vld1q_u8((const BYTE* const)src);
+}
+
+static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
+ return vdupq_n_u8(val);
+}
+
+/* Mimics '_mm_movemask_epi8()' from SSE */
+static U32 ZSTD_vmovmaskq_u8(ZSTD_Vec128 val) {
+ /* Shift out everything but the MSB bits in each byte */
+ uint16x8_t highBits = vreinterpretq_u16_u8(vshrq_n_u8(val, 7));
+ /* Merge the even lanes together with vsra (right shift and add) */
+ uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(highBits, highBits, 7));
+ uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
+ uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
+ /* Extract the low 8 bits from each lane, merge */
+ return vgetq_lane_u8(paired64, 0) | ((U32)vgetq_lane_u8(paired64, 8) << 8);
+}
+
+static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
+ return (ZSTD_VecMask)ZSTD_vmovmaskq_u8(vceqq_u8(x, y));
+}
+
+typedef struct {
+ uint8x16_t fst;
+ uint8x16_t snd;
+} ZSTD_Vec256;
+
+static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) {
+ ZSTD_Vec256 v;
+ v.fst = ZSTD_Vec128_read(ptr);
+ v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1);
+ return v;
+}
+
+static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
+ ZSTD_Vec256 v;
+ v.fst = ZSTD_Vec128_set8(val);
+ v.snd = ZSTD_Vec128_set8(val);
+ return v;
+}
+
+static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
+ ZSTD_VecMask fstMask;
+ ZSTD_VecMask sndMask;
+ fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst);
+ sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd);
+ return fstMask | (sndMask << 16);
+}
+
+#else /* Scalar fallback version */
+
+#define VEC128_NB_SIZE_T (16 / sizeof(size_t))
+typedef struct {
+ size_t vec[VEC128_NB_SIZE_T];
+} ZSTD_Vec128;
+
+static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
+ ZSTD_Vec128 ret;
+ ZSTD_memcpy(ret.vec, src, VEC128_NB_SIZE_T*sizeof(size_t));
+ return ret;
+}
+
+static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
+ ZSTD_Vec128 ret = { {0} };
+ int startBit = sizeof(size_t) * 8 - 8;
+ for (;startBit >= 0; startBit -= 8) {
+ unsigned j = 0;
+ for (;j < VEC128_NB_SIZE_T; ++j) {
+ ret.vec[j] |= ((size_t)val << startBit);
+ }
+ }
+ return ret;
+}
+
+/* Compare x to y, byte by byte, generating a "matches" bitfield */
+static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
+ ZSTD_VecMask res = 0;
+ unsigned i = 0;
+ unsigned l = 0;
+ for (; i < VEC128_NB_SIZE_T; ++i) {
+ const size_t cmp1 = x.vec[i];
+ const size_t cmp2 = y.vec[i];
+ unsigned j = 0;
+ for (; j < sizeof(size_t); ++j, ++l) {
+ if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) {
+ res |= ((U32)1 << (j+i*sizeof(size_t)));
+ }
+ }
+ }
+ return res;
+}
+
+#define VEC256_NB_SIZE_T 2*VEC128_NB_SIZE_T
+typedef struct {
+ size_t vec[VEC256_NB_SIZE_T];
+} ZSTD_Vec256;
+
+static ZSTD_Vec256 ZSTD_Vec256_read(const void* const src) {
+ ZSTD_Vec256 ret;
+ ZSTD_memcpy(ret.vec, src, VEC256_NB_SIZE_T*sizeof(size_t));
+ return ret;
+}
+
+static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
+ ZSTD_Vec256 ret = { {0} };
+ int startBit = sizeof(size_t) * 8 - 8;
+ for (;startBit >= 0; startBit -= 8) {
+ unsigned j = 0;
+ for (;j < VEC256_NB_SIZE_T; ++j) {
+ ret.vec[j] |= ((size_t)val << startBit);
+ }
+ }
+ return ret;
+}
+
+/* Compare x to y, byte by byte, generating a "matches" bitfield */
+static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
+ ZSTD_VecMask res = 0;
+ unsigned i = 0;
+ unsigned l = 0;
+ for (; i < VEC256_NB_SIZE_T; ++i) {
+ const size_t cmp1 = x.vec[i];
+ const size_t cmp2 = y.vec[i];
+ unsigned j = 0;
+ for (; j < sizeof(size_t); ++j, ++l) {
+ if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) {
+ res |= ((U32)1 << (j+i*sizeof(size_t)));
+ }
+ }
+ }
+ return res;
+}
+
+#endif /* !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) */
+
+/* ZSTD_VecMask_next():
+ * Starting from the LSB, returns the idx of the next non-zero bit.
+ * Basically counting the nb of trailing zeroes.
+ */
+static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ return _BitScanForward(&r, val) ? (U32)r : 0;
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (U32)__builtin_ctz(val);
+# else
+ /* Software ctz version: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup */
+ static const U32 multiplyDeBruijnBitPosition[32] =
+ {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+ };
+ return multiplyDeBruijnBitPosition[((U32)((v & -(int)v) * 0x077CB531U)) >> 27];
+# endif
+}
+
+/* ZSTD_VecMask_rotateRight():
+ * Rotates a bitfield to the right by "rotation" bits.
+ * If the rotation is greater than totalBits, the returned mask is 0.
+ */
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_VecMask_rotateRight(ZSTD_VecMask mask, U32 const rotation, U32 const totalBits) {
+ if (rotation == 0)
+ return mask;
+ switch (totalBits) {
+ default:
+ assert(0);
+ case 16:
+ return (mask >> rotation) | (U16)(mask << (16 - rotation));
+ case 32:
+ return (mask >> rotation) | (U32)(mask << (32 - rotation));
+ }
+}
+
+/* ZSTD_row_nextIndex():
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
+ * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
+ U32 const next = (*tagRow - 1) & rowMask;
+ *tagRow = (BYTE)next;
+ return next;
+}
+
+/* ZSTD_isAligned():
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
+ */
+MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
+ assert((align & (align - 1)) == 0);
+ return (((size_t)ptr) & (align - 1)) == 0;
+}
+
+/* ZSTD_row_prefetch():
+ * Performs prefetching for the hashTable and tagTable at a given row.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
+ PREFETCH_L1(hashTable + relRow);
+ if (rowLog == 5) {
+ PREFETCH_L1(hashTable + relRow + 16);
+ }
+ PREFETCH_L1(tagTable + relRow);
+ assert(rowLog == 4 || rowLog == 5);
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on a multiple of 32 or 64 bytes */
+}
+
+/* ZSTD_row_fillHashCache():
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
+ * but not beyond iLimit.
+ */
+static void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+ U32 const rowLog, U32 const mls,
+ U32 idx, const BYTE* const iLimit)
+{
+ U32 const* const hashTable = ms->hashTable;
+ U16 const* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
+
+ for (; idx < lim; ++idx) {
+ U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
+ }
+
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
+}
+
+/* ZSTD_row_nextCachedHash():
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ U16 const* tagTable, BYTE const* base,
+ U32 idx, U32 const hashLog,
+ U32 const rowLog, U32 const mls)
+{
+ U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
+ return hash;
+ }
+}
+
+/* ZSTD_row_update_internal():
+ * Inserts the byte at ip into the appropriate position in the hash table.
+ * Determines the relative row, and the position within the {16, 32} entry row to insert at.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ DEBUGLOG(6, "ZSTD_row_update_internal(): nextToUpdate=%u, current=%u", idx, target);
+ for (; idx < target; ++idx) {
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, idx, hashLog, rowLog, mls)
+ : (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
+ Explicit cast allows us to get exact desired position within each row */
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+
+ assert(hash == ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
+ ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
+ row[pos] = idx;
+ }
+ ms->nextToUpdate = target;
+}
+
+/* ZSTD_row_update():
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
+ * processing.
+ */
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+ const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
+ const U32 rowMask = (1u << rowLog) - 1;
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
+
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
+}
+
+/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
+ * the hash at the nth position in a row of the tagTable.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) {
+ ZSTD_VecMask matches = 0;
+ if (rowEntries == 16) {
+ ZSTD_Vec128 hashes = ZSTD_Vec128_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET);
+ ZSTD_Vec128 expandedTags = ZSTD_Vec128_set8(tag);
+ matches = ZSTD_Vec128_cmpMask8(hashes, expandedTags);
+ } else if (rowEntries == 32) {
+ ZSTD_Vec256 hashes = ZSTD_Vec256_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET);
+ ZSTD_Vec256 expandedTags = ZSTD_Vec256_set8(tag);
+ matches = ZSTD_Vec256_cmpMask8(hashes, expandedTags);
+ } else {
+ assert(0);
+ }
+ /* Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
+ to match up with the actual layout of the entries within the hashTable */
+ return ZSTD_VecMask_rotateRight(matches, head, rowEntries);
+}
+
+/* The high-level approach of the SIMD row based match finder is as follows:
+ * - Figure out where to insert the new entry:
+ * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
+ * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
+ * which row to insert into.
+ * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
+ * be considered as a circular buffer with a "head" index that resides in the tagTable.
+ * - Also insert the "tag" into the equivalent row and position in the tagTable.
+ * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
+ * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
+ * for alignment/performance reasons, leaving some bytes unused.
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
+ * - Pick the longest match.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_RowFindBestMatch_generic (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode,
+ const U32 rowLog)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32* const hashCache = ms->hashCache;
+ const U32 hashLog = ms->rowHashLog;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 rowEntries = (1U << rowLog);
+ const U32 rowMask = rowEntries - 1;
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
+ U32 nbAttempts = 1U << cappedSearchLog;
+ size_t ml=4-1;
+
+ /* DMS/DDS variables that may be referenced laster */
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ size_t ddsIdx;
+ U32 ddsExtraAttempts; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
+ U32 dmsTag;
+ U32* dmsRow;
+ BYTE* dmsTagRow;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ { /* Prefetch DDS hashtable entry */
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
+ }
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
+ }
+
+ if (dictMode == ZSTD_dictMatchState) {
+ /* Prefetch DMS rows */
+ U32* const dmsHashTable = dms->hashTable;
+ U16* const dmsTagTable = dms->tagTable;
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
+ dmsRow = dmsHashTable + dmsRelRow;
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
+ }
+
+ /* Update the hashTable and tagTable up to (but not including) ip */
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ { /* Get the hash for ip, compute the appropriate row */
+ U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
+ U32 const head = *tagRow & rowMask;
+ U32 matchBuffer[32 /* maximum nb entries per row */];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
+
+ /* Cycle through the matches and prefetch */
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = row[matchPos];
+ assert(numMatches < rowEntries);
+ if (matchIndex < lowLimit)
+ break;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ PREFETCH_L1(base + matchIndex);
+ } else {
+ PREFETCH_L1(dictBase + matchIndex);
+ }
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
+ in ZSTD_row_update_internal() at the next search. */
+ {
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+ tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
+ row[pos] = ms->nextToUpdate++;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex < curr);
+ assert(matchIndex >= lowLimit);
+
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* Save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* TODO: Measure and potentially add prefetching to DMS */
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+
+ { U32 const head = *dmsTagRow & rowMask;
+ U32 matchBuffer[32 /* maximum nb row entries */];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
+
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = dmsRow[matchPos];
+ if (matchIndex < dmsLowestIndex)
+ break;
+ PREFETCH_L1(dmsBase + matchIndex);
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex >= dmsLowestIndex);
+ assert(matchIndex < curr);
+
+ { const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip))
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+ }
+
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break;
+ }
+ }
+ }
+ }
+ return ml;
+}
+
+/* Inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ const ZSTD_dictMode_e dictMode, size_t* offsetPtr, const U32 rowLog)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, dictMode, rowLog);
+ case 5 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, dictMode, rowLog);
+ case 7 :
+ case 6 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, dictMode, rowLog);
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectRowLog (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
+ switch(cappedSearchLog)
+ {
+ default :
+ case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 4);
+ case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 5);
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dictMatchState_selectRowLog(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
+ switch(cappedSearchLog)
+ {
+ default :
+ case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 4);
+ case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 5);
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
+ switch(cappedSearchLog)
+ {
+ default :
+ case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 4);
+ case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 5);
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_extDict_selectRowLog (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
+ switch(cappedSearchLog)
+ {
+ default :
+ case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 4);
+ case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 5);
+ }
+}
+
/* *******************************
* Common parser - lazy strategy
*********************************/
-typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
+typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_lazy_generic(
@@ -633,53 +1525,85 @@ ZSTD_compressBlock_lazy_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 prefixLowestIndex = ms->window.dictLimit;
const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
typedef size_t (*searchMax_f)(
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
- searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
- (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
- : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
- (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
- : ZSTD_HcFindBestMatch_selectMLS);
+
+ /**
+ * This table is indexed first by the four ZSTD_dictMode_e values, and then
+ * by the two searchMethod_e values. NULLs are placed for configurations
+ * that should never occur (extDict modes go to the other implementation
+ * below and there is no DDSS for binary tree search yet).
+ */
+ const searchMax_f searchFuncs[4][3] = {
+ {
+ ZSTD_HcFindBestMatch_selectMLS,
+ ZSTD_BtFindBestMatch_selectMLS,
+ ZSTD_RowFindBestMatch_selectRowLog
+ },
+ {
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
+ ZSTD_BtFindBestMatch_dictMatchState_selectMLS,
+ ZSTD_RowFindBestMatch_dictMatchState_selectRowLog
+ },
+ {
+ ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
+ NULL,
+ ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog
+ }
+ };
+
+ searchMax_f const searchMax = searchFuncs[dictMode][(int)searchMethod];
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+ const int isDMS = dictMode == ZSTD_dictMatchState;
+ const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
+ const int isDxS = isDMS || isDDS;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
- const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
- dms->window.dictLimit : 0;
- const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
- dms->window.base : NULL;
- const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
- dictBase + dictLowestIndex : NULL;
- const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
- dms->window.nextSrc : NULL;
- const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
+ const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
+ const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = isDxS ?
prefixLowestIndex - (U32)(dictEnd - dictBase) :
0;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
+ assert(searchMax != NULL);
- /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
- U32 const current = (U32)(ip - base);
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, ms->cParams.windowLog);
- U32 const maxRep = current - windowLow;
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
+ U32 const maxRep = curr - windowLow;
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
}
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
}
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
+
/* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
@@ -693,9 +1617,9 @@ ZSTD_compressBlock_lazy_generic(
const BYTE* start=ip+1;
/* check repCode */
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
- const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
@@ -736,7 +1660,7 @@ ZSTD_compressBlock_lazy_generic(
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offset = 0, start = ip;
}
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
@@ -771,7 +1695,7 @@ ZSTD_compressBlock_lazy_generic(
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offset = 0, start = ip;
}
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
@@ -809,7 +1733,7 @@ ZSTD_compressBlock_lazy_generic(
&& (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
{ start--; matchLength++; }
}
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
@@ -825,12 +1749,11 @@ _storeSequence:
}
/* check immediate repcode */
- if (dictMode == ZSTD_dictMatchState) {
+ if (isDxS) {
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex = current2 - offset_2;
- const BYTE* repMatch = dictMode == ZSTD_dictMatchState
- && repIndex < prefixLowestIndex ?
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex :
base + repIndex;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
@@ -925,6 +1848,92 @@ size_t ZSTD_compressBlock_greedy_dictMatchState(
}
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+}
+
+/* Row-based matchfinder */
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+}
+
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
@@ -936,7 +1945,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
@@ -944,18 +1953,28 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const dictStart = dictBase + ms->window.lowLimit;
const U32 windowLog = ms->cParams.windowLog;
+ const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
typedef size_t (*searchMax_f)(
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
- searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
-
+ const searchMax_f searchFuncs[3] = {
+ ZSTD_HcFindBestMatch_extDict_selectMLS,
+ ZSTD_BtFindBestMatch_extDict_selectMLS,
+ ZSTD_RowFindBestMatch_extDict_selectRowLog
+ };
+ searchMax_f searchMax = searchFuncs[(int)searchMethod];
U32 offset_1 = rep[0], offset_2 = rep[1];
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
/* init */
ip += (ip == prefixStart);
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
/* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
@@ -968,14 +1987,15 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
size_t matchLength=0;
size_t offset=0;
const BYTE* start=ip+1;
- U32 current = (U32)(ip-base);
+ U32 curr = (U32)(ip-base);
/* check repCode */
- { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current+1, windowLog);
- const U32 repIndex = (U32)(current+1 - offset_1);
+ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
+ const U32 repIndex = (U32)(curr+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
+ & (offset_1 < curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -999,14 +2019,15 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
if (depth>=1)
while (ip<ilimit) {
ip ++;
- current++;
+ curr++;
/* check repCode */
if (offset) {
- const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current, windowLog);
- const U32 repIndex = (U32)(current - offset_1);
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -1030,14 +2051,15 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
/* let's find an even better one */
if ((depth==2) && (ip<ilimit)) {
ip ++;
- current++;
+ curr++;
/* check repCode */
if (offset) {
- const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current, windowLog);
- const U32 repIndex = (U32)(current - offset_1);
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -1083,7 +2105,8 @@ _storeSequence:
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_2 < repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -1136,3 +2159,26 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}
+
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+}
diff --git a/Utilities/cmzstd/lib/compress/zstd_lazy.h b/Utilities/cmzstd/lib/compress/zstd_lazy.h
index 581936f03b..150f7b390b 100644
--- a/Utilities/cmzstd/lib/compress/zstd_lazy.h
+++ b/Utilities/cmzstd/lib/compress/zstd_lazy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -17,7 +17,18 @@ extern "C" {
#include "zstd_compress_internal.h"
+/**
+ * Dedicated Dictionary Search Structure bucket log. In the
+ * ZSTD_dedicatedDictSearch mode, the hashTable has
+ * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
+ * one.
+ */
+#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
+
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
@@ -33,6 +44,15 @@ size_t ZSTD_compressBlock_lazy(
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -46,6 +66,34 @@ size_t ZSTD_compressBlock_lazy_dictMatchState(
size_t ZSTD_compressBlock_greedy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -56,9 +104,19 @@ size_t ZSTD_compressBlock_lazy_extDict(
size_t ZSTD_compressBlock_lazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+
#if defined (__cplusplus)
}
diff --git a/Utilities/cmzstd/lib/compress/zstd_ldm.c b/Utilities/cmzstd/lib/compress/zstd_ldm.c
index 8c47948358..fa4ebeabd7 100644
--- a/Utilities/cmzstd/lib/compress/zstd_ldm.c
+++ b/Utilities/cmzstd/lib/compress/zstd_ldm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,13 +11,126 @@
#include "zstd_ldm.h"
#include "../common/debug.h"
+#include "../common/xxhash.h"
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+#include "zstd_ldm_geartab.h"
#define LDM_BUCKET_SIZE_LOG 3
#define LDM_MIN_MATCH_LENGTH 64
#define LDM_HASH_RLOG 7
-#define LDM_HASH_CHAR_OFFSET 10
+
+typedef struct {
+ U64 rolling;
+ U64 stopMask;
+} ldmRollingHashState_t;
+
+/** ZSTD_ldm_gear_init():
+ *
+ * Initializes the rolling hash state such that it will honor the
+ * settings in params. */
+static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
+{
+ unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
+ unsigned hashRateLog = params->hashRateLog;
+
+ state->rolling = ~(U32)0;
+
+ /* The choice of the splitting criterion is subject to two conditions:
+ * 1. it has to trigger on average every 2^(hashRateLog) bytes;
+ * 2. ideally, it has to depend on a window of minMatchLength bytes.
+ *
+ * In the gear hash algorithm, bit n depends on the last n bytes;
+ * so in order to obtain a good quality splitting criterion it is
+ * preferable to use bits with high weight.
+ *
+ * To match condition 1 we use a mask with hashRateLog bits set
+ * and, because of the previous remark, we make sure these bits
+ * have the highest possible weight while still respecting
+ * condition 2.
+ */
+ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
+ state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
+ } else {
+ /* In this degenerate case we simply honor the hash rate. */
+ state->stopMask = ((U64)1 << hashRateLog) - 1;
+ }
+}
+
+/** ZSTD_ldm_gear_reset()
+ * Feeds [data, data + minMatchLength) into the hash without registering any
+ * splits. This effectively resets the hash state. This is used when skipping
+ * over data, either at the beginning of a block, or skipping sections.
+ */
+static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
+ BYTE const* data, size_t minMatchLength)
+{
+ U64 hash = state->rolling;
+ size_t n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ } while (0)
+ while (n + 3 < minMatchLength) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < minMatchLength) {
+ GEAR_ITER_ONCE();
+ }
+#undef GEAR_ITER_ONCE
+}
+
+/** ZSTD_ldm_gear_feed():
+ *
+ * Registers in the splits array all the split points found in the first
+ * size bytes following the data pointer. This function terminates when
+ * either all the data has been processed or LDM_BATCH_SIZE splits are
+ * present in the splits array.
+ *
+ * Precondition: The splits array must not be full.
+ * Returns: The number of bytes processed. */
+static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
+ BYTE const* data, size_t size,
+ size_t* splits, unsigned* numSplits)
+{
+ size_t n;
+ U64 hash, mask;
+
+ hash = state->rolling;
+ mask = state->stopMask;
+ n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ if (UNLIKELY((hash & mask) == 0)) { \
+ splits[*numSplits] = n; \
+ *numSplits += 1; \
+ if (*numSplits == LDM_BATCH_SIZE) \
+ goto done; \
+ } \
+ } while (0)
+
+ while (n + 3 < size) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < size) {
+ GEAR_ITER_ONCE();
+ }
+
+#undef GEAR_ITER_ONCE
+
+done:
+ state->rolling = hash;
+ return n;
+}
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams)
@@ -27,13 +140,6 @@ void ZSTD_ldm_adjustParameters(ldmParams_t* params,
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
- if (cParams->strategy >= ZSTD_btopt) {
- /* Get out of the way of the optimal parser */
- U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
- assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
- assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
- params->minMatchLength = minMatch;
- }
if (params->hashLog == 0) {
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
@@ -61,41 +167,6 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
}
-/** ZSTD_ldm_getSmallHash() :
- * numBits should be <= 32
- * If numBits==0, returns 0.
- * @return : the most significant numBits of value. */
-static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
-{
- assert(numBits <= 32);
- return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
-}
-
-/** ZSTD_ldm_getChecksum() :
- * numBitsToDiscard should be <= 32
- * @return : the next most significant 32 bits after numBitsToDiscard */
-static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
-{
- assert(numBitsToDiscard <= 32);
- return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
-}
-
-/** ZSTD_ldm_getTag() ;
- * Given the hash, returns the most significant numTagBits bits
- * after (32 + hbits) bits.
- *
- * If there are not enough bits remaining, return the last
- * numTagBits bits. */
-static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
-{
- assert(numTagBits < 32 && hbits <= 32);
- if (32 - hbits < numTagBits) {
- return hash & (((U32)1 << numTagBits) - 1);
- } else {
- return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
- }
-}
-
/** ZSTD_ldm_getBucket() :
* Returns a pointer to the start of the bucket associated with hash. */
static ldmEntry_t* ZSTD_ldm_getBucket(
@@ -110,38 +181,12 @@ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
size_t const hash, const ldmEntry_t entry,
ldmParams_t const ldmParams)
{
- BYTE* const bucketOffsets = ldmState->bucketOffsets;
- *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
- bucketOffsets[hash]++;
- bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
-}
+ BYTE* const pOffset = ldmState->bucketOffsets + hash;
+ unsigned const offset = *pOffset;
+
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
-/** ZSTD_ldm_makeEntryAndInsertByTag() :
- *
- * Gets the small hash, checksum, and tag from the rollingHash.
- *
- * If the tag matches (1 << ldmParams.hashRateLog)-1, then
- * creates an ldmEntry from the offset, and inserts it into the hash table.
- *
- * hBits is the length of the small hash, which is the most significant hBits
- * of rollingHash. The checksum is the next 32 most significant bits, followed
- * by ldmParams.hashRateLog bits that make up the tag. */
-static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
- U64 const rollingHash,
- U32 const hBits,
- U32 const offset,
- ldmParams_t const ldmParams)
-{
- U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
- U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
- if (tag == tagMask) {
- U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
- U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
- ldmEntry_t entry;
- entry.offset = offset;
- entry.checksum = checksum;
- ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
- }
}
/** ZSTD_ldm_countBackwardsMatch() :
@@ -150,10 +195,10 @@ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
static size_t ZSTD_ldm_countBackwardsMatch(
const BYTE* pIn, const BYTE* pAnchor,
- const BYTE* pMatch, const BYTE* pBase)
+ const BYTE* pMatch, const BYTE* pMatchBase)
{
size_t matchLength = 0;
- while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
+ while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
pIn--;
pMatch--;
matchLength++;
@@ -161,6 +206,27 @@ static size_t ZSTD_ldm_countBackwardsMatch(
return matchLength;
}
+/** ZSTD_ldm_countBackwardsMatch_2segments() :
+ * Returns the number of bytes that match backwards from pMatch,
+ * even with the backwards match spanning 2 different segments.
+ *
+ * On reaching `pMatchBase`, start counting from mEnd */
+static size_t ZSTD_ldm_countBackwardsMatch_2segments(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase,
+ const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
+{
+ size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
+ if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
+ /* If backwards match is entirely in the extDict or prefix, immediately return */
+ return matchLength;
+ }
+ DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
+ matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
+ DEBUGLOG(7, "final backwards match length = %zu", matchLength);
+ return matchLength;
+}
+
/** ZSTD_ldm_fillFastTables() :
*
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
@@ -198,43 +264,42 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
return 0;
}
-/** ZSTD_ldm_fillLdmHashTable() :
- *
- * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
- * lastHash is the rolling hash that corresponds to lastHashed.
- *
- * Returns the rolling hash corresponding to position iend-1. */
-static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
- U64 lastHash, const BYTE* lastHashed,
- const BYTE* iend, const BYTE* base,
- U32 hBits, ldmParams_t const ldmParams)
-{
- U64 rollingHash = lastHash;
- const BYTE* cur = lastHashed + 1;
-
- while (cur < iend) {
- rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
- cur[ldmParams.minMatchLength-1],
- state->hashPower);
- ZSTD_ldm_makeEntryAndInsertByTag(state,
- rollingHash, hBits,
- (U32)(cur - base), ldmParams);
- ++cur;
- }
- return rollingHash;
-}
-
void ZSTD_ldm_fillHashTable(
- ldmState_t* state, const BYTE* ip,
+ ldmState_t* ldmState, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params)
{
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const istart = ip;
+ ldmRollingHashState_t hashState;
+ size_t* const splits = ldmState->splitIndices;
+ unsigned numSplits;
+
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
- if ((size_t)(iend - ip) >= params->minMatchLength) {
- U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength);
- ZSTD_ldm_fillLdmHashTable(
- state, startingHash, ip, iend - params->minMatchLength, state->window.base,
- params->hashLog - params->bucketSizeLog,
- *params);
+
+ ZSTD_ldm_gear_init(&hashState, params);
+ while (ip < iend) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ if (ip + splits[n] >= istart + minMatchLength) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = XXH64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+ ldmEntry_t entry;
+
+ entry.offset = (U32)(split - base);
+ entry.checksum = (U32)(xxhash >> 32);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ }
+ }
+
+ ip += hashed;
}
}
@@ -246,10 +311,10 @@ void ZSTD_ldm_fillHashTable(
* (after a long match, only update tables a limited amount). */
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
{
- U32 const current = (U32)(anchor - ms->window.base);
- if (current > ms->nextToUpdate + 1024) {
+ U32 const curr = (U32)(anchor - ms->window.base);
+ if (curr > ms->nextToUpdate + 1024) {
ms->nextToUpdate =
- current - MIN(512, current - ms->nextToUpdate - 1024);
+ curr - MIN(512, curr - ms->nextToUpdate - 1024);
}
}
@@ -260,11 +325,8 @@ static size_t ZSTD_ldm_generateSequences_internal(
/* LDM parameters */
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
U32 const minMatchLength = params->minMatchLength;
- U64 const hashPower = ldmState->hashPower;
+ U32 const entsPerBucket = 1U << params->bucketSizeLog;
U32 const hBits = params->hashLog - params->bucketSizeLog;
- U32 const ldmBucketSize = 1U << params->bucketSizeLog;
- U32 const hashRateLog = params->hashRateLog;
- U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
/* Prefix and extDict parameters */
U32 const dictLimit = ldmState->window.dictLimit;
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
@@ -276,45 +338,69 @@ static size_t ZSTD_ldm_generateSequences_internal(
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
- BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
+ BYTE const* const ilimit = iend - HASH_READ_SIZE;
/* Input positions */
BYTE const* anchor = istart;
BYTE const* ip = istart;
- /* Rolling hash */
- BYTE const* lastHashed = NULL;
- U64 rollingHash = 0;
-
- while (ip <= ilimit) {
- size_t mLength;
- U32 const current = (U32)(ip - base);
- size_t forwardMatchLength = 0, backwardMatchLength = 0;
- ldmEntry_t* bestEntry = NULL;
- if (ip != istart) {
- rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
- lastHashed[minMatchLength],
- hashPower);
- } else {
- rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
+ /* Rolling hash state */
+ ldmRollingHashState_t hashState;
+ /* Arrays for staged-processing */
+ size_t* const splits = ldmState->splitIndices;
+ ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
+ unsigned numSplits;
+
+ if (srcSize < minMatchLength)
+ return iend - anchor;
+
+ /* Initialize the rolling hash state with the first minMatchLength bytes */
+ ZSTD_ldm_gear_init(&hashState, params);
+ ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
+ ip += minMatchLength;
+
+ while (ip < ilimit) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
+ splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = XXH64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+
+ candidates[n].split = split;
+ candidates[n].hash = hash;
+ candidates[n].checksum = (U32)(xxhash >> 32);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ PREFETCH_L1(candidates[n].bucket);
}
- lastHashed = ip;
- /* Do not insert and do not look for a match */
- if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
- ip++;
- continue;
- }
+ for (n = 0; n < numSplits; n++) {
+ size_t forwardMatchLength = 0, backwardMatchLength = 0,
+ bestMatchLength = 0, mLength;
+ U32 offset;
+ BYTE const* const split = candidates[n].split;
+ U32 const checksum = candidates[n].checksum;
+ U32 const hash = candidates[n].hash;
+ ldmEntry_t* const bucket = candidates[n].bucket;
+ ldmEntry_t const* cur;
+ ldmEntry_t const* bestEntry = NULL;
+ ldmEntry_t newEntry;
+
+ newEntry.offset = (U32)(split - base);
+ newEntry.checksum = checksum;
+
+ /* If a split point would generate a sequence overlapping with
+ * the previous one, we merely register it in the hash table and
+ * move on */
+ if (split < anchor) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
- /* Get the best entry and compute the match lengths */
- {
- ldmEntry_t* const bucket =
- ZSTD_ldm_getBucket(ldmState,
- ZSTD_ldm_getSmallHash(rollingHash, hBits),
- *params);
- ldmEntry_t* cur;
- size_t bestMatchLength = 0;
- U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
-
- for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
+ for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
size_t curForwardMatchLength, curBackwardMatchLength,
curTotalMatchLength;
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
@@ -328,30 +414,23 @@ static size_t ZSTD_ldm_generateSequences_internal(
cur->offset < dictLimit ? dictEnd : iend;
BYTE const* const lowMatchPtr =
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
-
- curForwardMatchLength = ZSTD_count_2segments(
- ip, pMatch, iend,
- matchEnd, lowPrefixPtr);
+ curForwardMatchLength =
+ ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
if (curForwardMatchLength < minMatchLength) {
continue;
}
- curBackwardMatchLength =
- ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
- lowMatchPtr);
- curTotalMatchLength = curForwardMatchLength +
- curBackwardMatchLength;
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
+ split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
} else { /* !extDict */
BYTE const* const pMatch = base + cur->offset;
- curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
+ curForwardMatchLength = ZSTD_count(split, pMatch, iend);
if (curForwardMatchLength < minMatchLength) {
continue;
}
curBackwardMatchLength =
- ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
- lowPrefixPtr);
- curTotalMatchLength = curForwardMatchLength +
- curBackwardMatchLength;
+ ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
}
+ curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
if (curTotalMatchLength > bestMatchLength) {
bestMatchLength = curTotalMatchLength;
@@ -360,57 +439,54 @@ static size_t ZSTD_ldm_generateSequences_internal(
bestEntry = cur;
}
}
- }
-
- /* No match found -- continue searching */
- if (bestEntry == NULL) {
- ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
- hBits, current,
- *params);
- ip++;
- continue;
- }
- /* Match found */
- mLength = forwardMatchLength + backwardMatchLength;
- ip -= backwardMatchLength;
+ /* No match found -- insert an entry into the hash table
+ * and process the next candidate match */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
- {
- /* Store the sequence:
- * ip = current - backwardMatchLength
- * The match is at (bestEntry->offset - backwardMatchLength)
- */
- U32 const matchIndex = bestEntry->offset;
- U32 const offset = current - matchIndex;
- rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
-
- /* Out of sequence storage */
- if (rawSeqStore->size == rawSeqStore->capacity)
- return ERROR(dstSize_tooSmall);
- seq->litLength = (U32)(ip - anchor);
- seq->matchLength = (U32)mLength;
- seq->offset = offset;
- rawSeqStore->size++;
- }
+ /* Match found */
+ offset = (U32)(split - base) - bestEntry->offset;
+ mLength = forwardMatchLength + backwardMatchLength;
+ {
+ rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+ /* Out of sequence storage */
+ if (rawSeqStore->size == rawSeqStore->capacity)
+ return ERROR(dstSize_tooSmall);
+ seq->litLength = (U32)(split - backwardMatchLength - anchor);
+ seq->matchLength = (U32)mLength;
+ seq->offset = offset;
+ rawSeqStore->size++;
+ }
- /* Insert the current entry into the hash table */
- ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
- (U32)(lastHashed - base),
- *params);
+ /* Insert the current entry into the hash table --- it must be
+ * done after the previous block to avoid clobbering bestEntry */
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
- assert(ip + backwardMatchLength == lastHashed);
+ anchor = split + forwardMatchLength;
- /* Fill the hash table from lastHashed+1 to ip+mLength*/
- /* Heuristic: don't need to fill the entire table at end of block */
- if (ip + mLength <= ilimit) {
- rollingHash = ZSTD_ldm_fillLdmHashTable(
- ldmState, rollingHash, lastHashed,
- ip + mLength, base, hBits, *params);
- lastHashed = ip + mLength - 1;
+ /* If we find a match that ends after the data that we've hashed
+ * then we have a repeating, overlapping, pattern. E.g. all zeros.
+ * If one repetition of the pattern matches our `stopMask` then all
+ * repetitions will. We don't need to insert them all into out table,
+ * only the first one. So skip over overlapping matches.
+ * This is a major speed boost (20x) for compressing a single byte
+ * repeated, when that byte ends up in the table.
+ */
+ if (anchor > ip + hashed) {
+ ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
+ /* Continue the outter loop at anchor (ip + hashed == anchor). */
+ ip = anchor - hashed;
+ break;
+ }
}
- ip += mLength;
- anchor = ip;
+
+ ip += hashed;
}
+
return iend - anchor;
}
@@ -459,7 +535,7 @@ size_t ZSTD_ldm_generateSequences(
assert(chunkStart < iend);
/* 1. Perform overflow correction if necessary. */
- if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
U32 const ldmHSize = 1U << params->hashLog;
U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
@@ -562,14 +638,32 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
return sequence;
}
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
ZSTD_blockCompressor const blockCompressor =
- ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
+ ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
@@ -577,9 +671,18 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
BYTE const* ip = istart;
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
+ /* If using opt parser, use LDMs only as candidates rather than always accepting them */
+ if (cParams->strategy >= ZSTD_btopt) {
+ size_t lastLLSize;
+ ms->ldmSeqStore = rawSeqStore;
+ lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
+ ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
+ return lastLLSize;
+ }
+
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity);
- /* Loop through each sequence and apply the block compressor to the lits */
+ /* Loop through each sequence and apply the block compressor to the literals */
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
diff --git a/Utilities/cmzstd/lib/compress/zstd_ldm.h b/Utilities/cmzstd/lib/compress/zstd_ldm.h
index 229ea05a9e..393466fa9f 100644
--- a/Utilities/cmzstd/lib/compress/zstd_ldm.h
+++ b/Utilities/cmzstd/lib/compress/zstd_ldm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -66,6 +66,7 @@ size_t ZSTD_ldm_generateSequences(
*/
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
void const* src, size_t srcSize);
/**
@@ -73,11 +74,17 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
*
* Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
* Avoids emitting matches less than `minMatch` bytes.
- * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
*/
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
U32 const minMatch);
+/* ZSTD_ldm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
+ * Not to be used in conjunction with ZSTD_ldm_skipSequences().
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
/** ZSTD_ldm_getTableSize() :
* Estimate the space needed for long distance matching tables or 0 if LDM is
diff --git a/Utilities/cmzstd/lib/compress/zstd_ldm_geartab.h b/Utilities/cmzstd/lib/compress/zstd_ldm_geartab.h
new file mode 100644
index 0000000000..e5c24d856b
--- /dev/null
+++ b/Utilities/cmzstd/lib/compress/zstd_ldm_geartab.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_GEARTAB_H
+#define ZSTD_LDM_GEARTAB_H
+
+static U64 ZSTD_ldm_gearTab[256] = {
+ 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
+ 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
+ 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
+ 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
+ 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
+ 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140,
+ 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e,
+ 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
+ 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
+ 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
+ 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
+ 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
+ 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
+ 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
+ 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
+ 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
+ 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
+ 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
+ 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
+ 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
+ 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
+ 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
+ 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
+ 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
+ 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
+ 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
+ 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
+ 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b,
+ 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
+ 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
+ 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
+ 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
+ 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
+ 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
+ 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
+ 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
+ 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec,
+ 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
+ 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab,
+ 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
+ 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
+ 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
+ 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
+ 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
+ 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
+ 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
+ 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
+ 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
+ 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
+ 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
+ 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c,
+ 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
+ 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
+ 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
+ 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a,
+ 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
+ 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
+ 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
+ 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
+ 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
+ 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
+ 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
+ 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
+ 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
+ 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756,
+ 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
+ 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
+ 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
+ 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
+ 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
+ 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
+ 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
+ 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
+ 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
+ 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
+ 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
+ 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
+ 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
+ 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
+ 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
+ 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
+ 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
+ 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
+ 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
+ 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
+ 0x2b4da14f2613d8f4
+};
+
+#endif /* ZSTD_LDM_GEARTAB_H */
diff --git a/Utilities/cmzstd/lib/compress/zstd_opt.c b/Utilities/cmzstd/lib/compress/zstd_opt.c
index 36fff050cf..402a7e5c76 100644
--- a/Utilities/cmzstd/lib/compress/zstd_opt.c
+++ b/Utilities/cmzstd/lib/compress/zstd_opt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -386,32 +386,32 @@ static U32 ZSTD_insertBt1(
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* match;
- const U32 current = (U32)(ip-base);
- const U32 btLow = btMask >= current ? 0 : current - btMask;
- U32* smallerPtr = bt + 2*(current&btMask);
+ const U32 curr = (U32)(ip-base);
+ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 dummy32; /* to be nullified at the end */
U32 const windowLow = ms->window.lowLimit;
- U32 matchEndIdx = current+8+1;
+ U32 matchEndIdx = curr+8+1;
size_t bestLength = 8;
U32 nbCompares = 1U << cParams->searchLog;
#ifdef ZSTD_C_PREDICT
- U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
- U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
+ U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
predictedSmall += (predictedSmall>0);
predictedLarge += (predictedLarge>0);
#endif /* ZSTD_C_PREDICT */
- DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
assert(ip <= iend-8); /* required for h calculation */
- hashTable[h] = current; /* Update Hash Table */
+ hashTable[h] = curr; /* Update Hash Table */
assert(windowLow > 0);
while (nbCompares-- && (matchIndex >= windowLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- assert(matchIndex < current);
+ assert(matchIndex < curr);
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
@@ -474,8 +474,8 @@ static U32 ZSTD_insertBt1(
*smallerPtr = *largerPtr = 0;
{ U32 positions = 0;
if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
- assert(matchEndIdx > current + 8);
- return MAX(positions, matchEndIdx - (current + 8));
+ assert(matchEndIdx > curr + 8);
+ return MAX(positions, matchEndIdx - (curr + 8));
}
}
@@ -519,7 +519,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
const BYTE* const base = ms->window.base;
- U32 const current = (U32)(ip-base);
+ U32 const curr = (U32)(ip-base);
U32 const hashLog = cParams->hashLog;
U32 const minMatch = (mls==3) ? 3 : 4;
U32* const hashTable = ms->hashTable;
@@ -533,12 +533,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 const dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
- U32 const btLow = (btMask >= current) ? 0 : current - btMask;
- U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
U32 const matchLow = windowLow ? windowLow : 1;
- U32* smallerPtr = bt + 2*(current&btMask);
- U32* largerPtr = bt + 2*(current&btMask) + 1;
- U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
U32 dummy32; /* to be nullified at the end */
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
@@ -557,7 +557,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
size_t bestLength = lengthToBeat-1;
- DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
/* check repCode */
assert(ll0 <= 1); /* necessarily 1 or 0 */
@@ -565,29 +565,29 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 repCode;
for (repCode = ll0; repCode < lastR; repCode++) {
U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
- U32 const repIndex = current - repOffset;
+ U32 const repIndex = curr - repOffset;
U32 repLen = 0;
- assert(current >= dictLimit);
- if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */
+ assert(curr >= dictLimit);
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
/* We must validate the repcode offset because when we're using a dictionary the
* valid offset range shrinks when the dictionary goes out of bounds.
*/
if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
}
- } else { /* repIndex < dictLimit || repIndex >= current */
+ } else { /* repIndex < dictLimit || repIndex >= curr */
const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
dmsBase + repIndex - dmsIndexDelta :
dictBase + repIndex;
- assert(current >= windowLow);
+ assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
- && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
& (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
- && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
& ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
@@ -609,7 +609,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
if ((mls == 3) /*static*/ && (bestLength < mls)) {
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
if ((matchIndex3 >= matchLow)
- & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+ & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
size_t mlen;
if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
const BYTE* const match = base + matchIndex3;
@@ -624,26 +624,26 @@ U32 ZSTD_insertBtAndGetAllMatches (
DEBUGLOG(8, "found small match with hlog3, of length %u",
(U32)mlen);
bestLength = mlen;
- assert(current > matchIndex3);
+ assert(curr > matchIndex3);
assert(mnum==0); /* no prior solution */
- matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
+ matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
matches[0].len = (U32)mlen;
mnum = 1;
if ( (mlen > sufficient_len) |
(ip+mlen == iLimit) ) { /* best possible length */
- ms->nextToUpdate = current+1; /* skip insertion */
+ ms->nextToUpdate = curr+1; /* skip insertion */
return 1;
} } }
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
}
- hashTable[h] = current; /* Update Hash Table */
+ hashTable[h] = curr; /* Update Hash Table */
while (nbCompares-- && (matchIndex >= matchLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
const BYTE* match;
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- assert(current > matchIndex);
+ assert(curr > matchIndex);
if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
@@ -660,12 +660,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
if (matchLength > bestLength) {
DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+ (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
assert(matchEndIdx > matchIndex);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
@@ -708,11 +708,11 @@ U32 ZSTD_insertBtAndGetAllMatches (
if (matchLength > bestLength) {
matchIndex = dictMatchIndex + dmsIndexDelta;
DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+ (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
@@ -733,7 +733,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
}
}
- assert(matchEndIdx > current+8);
+ assert(matchEndIdx > curr+8);
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
return mnum;
}
@@ -764,6 +764,140 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
}
}
+/*************************
+* LDM helper functions *
+*************************/
+
+/* Struct containing info needed to make decision about ldm inclusion */
+typedef struct {
+ rawSeqStore_t seqStore; /* External match candidates store for this block */
+ U32 startPosInBlock; /* Start position of the current match candidate */
+ U32 endPosInBlock; /* End position of the current match candidate */
+ U32 offset; /* Offset of the match candidate */
+} ZSTD_optLdm_t;
+
+/* ZSTD_optLdm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
+ */
+static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
+ * Calculates the beginning and end of the next match in the current block.
+ * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
+ */
+static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 blockBytesRemaining) {
+ rawSeq currSeq;
+ U32 currBlockEndPos;
+ U32 literalsBytesRemaining;
+ U32 matchBytesRemaining;
+
+ /* Setting match end position to MAX to ensure we never use an LDM during this block */
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ return;
+ }
+ /* Calculate appropriate bytes left in matchLength and litLength after adjusting
+ based on ldmSeqStore->posInSequence */
+ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
+ assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
+ currBlockEndPos = currPosInBlock + blockBytesRemaining;
+ literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
+ currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
+ 0;
+ matchBytesRemaining = (literalsBytesRemaining == 0) ?
+ currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
+ currSeq.matchLength;
+
+ /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
+ if (literalsBytesRemaining >= blockBytesRemaining) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
+ return;
+ }
+
+ /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ when we are deciding whether or not to add the ldm */
+ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
+ optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
+ optLdm->offset = currSeq.offset;
+
+ if (optLdm->endPosInBlock > currBlockEndPos) {
+ /* Match ends after the block ends, we can't use the whole match */
+ optLdm->endPosInBlock = currBlockEndPos;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
+ } else {
+ /* Consume nb of bytes equal to size of sequence left */
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
+ }
+}
+
+/* ZSTD_optLdm_maybeAddMatch():
+ * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
+ * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
+ */
+static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+ ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
+ U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
+ /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
+ U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
+ U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
+
+ /* Ensure that current block position is not outside of the match */
+ if (currPosInBlock < optLdm->startPosInBlock
+ || currPosInBlock >= optLdm->endPosInBlock
+ || candidateMatchLength < MINMATCH) {
+ return;
+ }
+
+ if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
+ candidateOffCode, candidateMatchLength, currPosInBlock);
+ matches[*nbMatches].len = candidateMatchLength;
+ matches[*nbMatches].off = candidateOffCode;
+ (*nbMatches)++;
+ }
+}
+
+/* ZSTD_optLdm_processMatchCandidate():
+ * Wrapper function to update ldm seq store and call ldm functions as necessary.
+ */
+static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
+ U32 currPosInBlock, U32 remainingBytes) {
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ return;
+ }
+
+ if (currPosInBlock >= optLdm->endPosInBlock) {
+ if (currPosInBlock > optLdm->endPosInBlock) {
+ /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
+ * at the end of a match from the ldm seq store, and will often be some bytes
+ * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
+ */
+ U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
+ }
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
+ }
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+}
/*-*******************************
* Optimal parser
@@ -817,6 +951,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
ZSTD_match_t* const matches = optStatePtr->matchTable;
ZSTD_optimal_t lastSequence;
+ ZSTD_optLdm_t optLdm;
+
+ optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
+ optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
@@ -832,7 +971,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
/* find first match */
{ U32 const litlen = (U32)(ip - anchor);
U32 const ll0 = !litlen;
- U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
+ U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(ip-istart), (U32)(iend - ip));
if (!nbMatches) { ip++; continue; }
/* initialize opt[0] */
@@ -925,9 +1066,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
if (opt[cur].mlen != 0) {
U32 const prev = cur - opt[cur].mlen;
repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
- memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
} else {
- memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
@@ -945,8 +1086,12 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
U32 const previousPrice = opt[cur].price;
U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
- U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
+ U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
U32 matchNb;
+
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(inr-istart), (U32)(iend-inr));
+
if (!nbMatches) {
DEBUGLOG(7, "rPos:%u : no match found", cur);
continue;
@@ -1010,9 +1155,9 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
*/
if (lastSequence.mlen != 0) {
repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
- memcpy(rep, &reps, sizeof(reps));
+ ZSTD_memcpy(rep, &reps, sizeof(reps));
} else {
- memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
}
{ U32 const storeEnd = cur + 1;
@@ -1110,7 +1255,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
const void* src, size_t srcSize)
{
U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
- memcpy(tmpRep, rep, sizeof(tmpRep));
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
assert(ms->opt.litLengthSum == 0); /* first block */
@@ -1143,7 +1288,7 @@ size_t ZSTD_compressBlock_btultra2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- U32 const current = (U32)((const BYTE*)src - ms->window.base);
+ U32 const curr = (U32)((const BYTE*)src - ms->window.base);
DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
/* 2-pass strategy:
@@ -1158,7 +1303,7 @@ size_t ZSTD_compressBlock_btultra2(
if ( (ms->opt.litLengthSum==0) /* first block */
&& (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
&& (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
- && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
&& (srcSize > ZSTD_PREDEF_THRESHOLD)
) {
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
diff --git a/Utilities/cmzstd/lib/compress/zstd_opt.h b/Utilities/cmzstd/lib/compress/zstd_opt.h
index 9aba8a9018..627255f53d 100644
--- a/Utilities/cmzstd/lib/compress/zstd_opt.h
+++ b/Utilities/cmzstd/lib/compress/zstd_opt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/compress/zstdmt_compress.c b/Utilities/cmzstd/lib/compress/zstdmt_compress.c
index 1e3c8fdbee..22aa3e1245 100644
--- a/Utilities/cmzstd/lib/compress/zstdmt_compress.c
+++ b/Utilities/cmzstd/lib/compress/zstdmt_compress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,8 +20,7 @@
/* ====== Dependencies ====== */
-#include <string.h> /* memcpy, memset */
-#include <limits.h> /* INT_MAX, UINT_MAX */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
#include "../common/mem.h" /* MEM_STATIC */
#include "../common/pool.h" /* threadpool */
#include "../common/threading.h" /* mutex */
@@ -106,11 +105,11 @@ typedef struct ZSTDMT_bufferPool_s {
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
{
unsigned const maxNbBuffers = 2*nbWorkers + 3;
- ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
+ ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
if (bufPool==NULL) return NULL;
if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
- ZSTD_free(bufPool, cMem);
+ ZSTD_customFree(bufPool, cMem);
return NULL;
}
bufPool->bufferSize = 64 KB;
@@ -127,10 +126,10 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
if (!bufPool) return; /* compatibility with free on NULL */
for (u=0; u<bufPool->totalBuffers; u++) {
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
- ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
+ ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
}
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
- ZSTD_free(bufPool, bufPool->cMem);
+ ZSTD_customFree(bufPool, bufPool->cMem);
}
/* only works at initialization, not during compression */
@@ -201,13 +200,13 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
}
/* size conditions not respected : scratch this buffer, create new one */
DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
- ZSTD_free(buf.start, bufPool->cMem);
+ ZSTD_customFree(buf.start, bufPool->cMem);
}
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
/* create new buffer */
DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
{ buffer_t buffer;
- void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+ void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
buffer.start = start; /* note : start can be NULL if malloc fails ! */
buffer.capacity = (start==NULL) ? 0 : bSize;
if (start==NULL) {
@@ -229,13 +228,13 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
{
size_t const bSize = bufPool->bufferSize;
if (buffer.capacity < bSize) {
- void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+ void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
buffer_t newBuffer;
newBuffer.start = start;
newBuffer.capacity = start == NULL ? 0 : bSize;
if (start != NULL) {
assert(newBuffer.capacity >= buffer.capacity);
- memcpy(newBuffer.start, buffer.start, buffer.capacity);
+ ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
return newBuffer;
}
@@ -261,14 +260,12 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
/* Reached bufferPool capacity (should not happen) */
DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
- ZSTD_free(buf.start, bufPool->cMem);
+ ZSTD_customFree(buf.start, bufPool->cMem);
}
/* ===== Seq Pool Wrapper ====== */
-static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
-
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
@@ -278,7 +275,7 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
static rawSeqStore_t bufferToSeq(buffer_t buffer)
{
- rawSeqStore_t seq = {NULL, 0, 0, 0};
+ rawSeqStore_t seq = kNullRawSeqStore;
seq.seq = (rawSeq*)buffer.start;
seq.capacity = buffer.capacity / sizeof(rawSeq);
return seq;
@@ -354,7 +351,7 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
for (cid=0; cid<pool->totalCCtx; cid++)
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
- ZSTD_free(pool, pool->cMem);
+ ZSTD_customFree(pool, pool->cMem);
}
/* ZSTDMT_createCCtxPool() :
@@ -362,12 +359,12 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
ZSTD_customMem cMem)
{
- ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
+ ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
assert(nbWorkers > 0);
if (!cctxPool) return NULL;
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
- ZSTD_free(cctxPool, cMem);
+ ZSTD_customFree(cctxPool, cMem);
return NULL;
}
cctxPool->cMem = cMem;
@@ -475,10 +472,8 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
assert(params.ldmParams.hashRateLog < 32);
- serialState->ldmState.hashPower =
- ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
} else {
- memset(&params.ldmParams, 0, sizeof(params.ldmParams));
+ ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
}
serialState->nextJobID = 0;
if (params.fParams.checksumFlag)
@@ -489,35 +484,35 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
unsigned const bucketLog =
params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
- size_t const bucketSize = (size_t)1 << bucketLog;
unsigned const prevBucketLog =
serialState->params.ldmParams.hashLog -
serialState->params.ldmParams.bucketSizeLog;
+ size_t const numBuckets = (size_t)1 << bucketLog;
/* Size the seq pool tables */
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
/* Reset the window */
ZSTD_window_init(&serialState->ldmState.window);
/* Resize tables and output space if necessary. */
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
- ZSTD_free(serialState->ldmState.hashTable, cMem);
- serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
+ ZSTD_customFree(serialState->ldmState.hashTable, cMem);
+ serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
}
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
- ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
- serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
+ ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
+ serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
}
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
return 1;
/* Zero the tables */
- memset(serialState->ldmState.hashTable, 0, hashSize);
- memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
+ ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
+ ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
/* Update window state and fill hash table with dict */
serialState->ldmState.loadedDictEnd = 0;
if (dictSize > 0) {
if (dictContentType == ZSTD_dct_rawContent) {
BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
- ZSTD_window_update(&serialState->ldmState.window, dict, dictSize);
+ ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
} else {
@@ -537,7 +532,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
static int ZSTDMT_serialState_init(serialState_t* serialState)
{
int initError = 0;
- memset(serialState, 0, sizeof(*serialState));
+ ZSTD_memset(serialState, 0, sizeof(*serialState));
initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
@@ -552,8 +547,8 @@ static void ZSTDMT_serialState_free(serialState_t* serialState)
ZSTD_pthread_cond_destroy(&serialState->cond);
ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
- ZSTD_free(serialState->ldmState.hashTable, cMem);
- ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
+ ZSTD_customFree(serialState->ldmState.hashTable, cMem);
+ ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
}
static void ZSTDMT_serialState_update(serialState_t* serialState,
@@ -574,7 +569,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
seqStore.size == 0 && seqStore.capacity > 0);
assert(src.size <= serialState->params.jobSize);
- ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
+ ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
error = ZSTD_ldm_generateSequences(
&serialState->ldmState, &seqStore,
&serialState->params.ldmParams, src.start, src.size);
@@ -686,6 +681,8 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
/* Don't run LDM for the chunks, since we handle it externally */
jobParams.ldmParams.enableLdm = 0;
+ /* Correct nbWorkers to 0. */
+ jobParams.nbWorkers = 0;
/* init */
@@ -698,6 +695,10 @@ static void ZSTDMT_compressionJob(void* jobDescription)
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
}
+ if (!job->firstJob) {
+ size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
+ if (ZSTD_isError(err)) JOB_ERROR(err);
+ }
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
ZSTD_dtlm_fast,
@@ -753,6 +754,13 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
lastCBlockSize = cSize;
} }
+ if (!job->firstJob) {
+ /* Double check that we don't have an ext-dict, because then our
+ * repcode invalidation doesn't work.
+ */
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+ }
+ ZSTD_CCtx_trace(cctx, 0);
_endJob:
ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
@@ -820,7 +828,6 @@ struct ZSTDMT_CCtx_s {
roundBuff_t roundBuff;
serialState_t serial;
rsyncState_t rsync;
- unsigned singleBlockingThread;
unsigned jobIDMask;
unsigned doneJobID;
unsigned nextJobID;
@@ -832,6 +839,7 @@ struct ZSTDMT_CCtx_s {
ZSTD_customMem cMem;
ZSTD_CDict* cdictLocal;
const ZSTD_CDict* cdict;
+ unsigned providedFactory: 1;
};
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
@@ -842,7 +850,7 @@ static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZS
ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
}
- ZSTD_free(jobTable, cMem);
+ ZSTD_customFree(jobTable, cMem);
}
/* ZSTDMT_allocJobsTable()
@@ -854,7 +862,7 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom
U32 const nbJobs = 1 << nbJobsLog2;
U32 jobNb;
ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
- ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
+ ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
int initError = 0;
if (jobTable==NULL) return NULL;
*nbJobsPtr = nbJobs;
@@ -885,12 +893,12 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
/* ZSTDMT_CCtxParam_setNbWorkers():
* Internal use only */
-size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
+static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
{
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
}
-MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
+MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
{
ZSTDMT_CCtx* mtctx;
U32 nbJobs = nbWorkers + 2;
@@ -903,12 +911,19 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
/* invalid custom allocator */
return NULL;
- mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
+ mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
if (!mtctx) return NULL;
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
mtctx->cMem = cMem;
mtctx->allJobsCompleted = 1;
- mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
+ if (pool != NULL) {
+ mtctx->factory = pool;
+ mtctx->providedFactory = 1;
+ }
+ else {
+ mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
+ mtctx->providedFactory = 0;
+ }
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
mtctx->jobIDMask = nbJobs - 1;
@@ -925,22 +940,18 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
return mtctx;
}
-ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
{
#ifdef ZSTD_MULTITHREAD
- return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
+ return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
#else
(void)nbWorkers;
(void)cMem;
+ (void)pool;
return NULL;
#endif
}
-ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
-{
- return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
-}
-
/* ZSTDMT_releaseAllJobResources() :
* note : ensure all workers are killed first ! */
@@ -957,7 +968,7 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
/* Clear the job description, but keep the mutex/cond */
- memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
+ ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
mtctx->jobs[jobID].job_mutex = mutex;
mtctx->jobs[jobID].job_cond = cond;
}
@@ -984,7 +995,8 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
{
if (mtctx==NULL) return 0; /* compatible with free on NULL */
- POOL_free(mtctx->factory); /* stop and free worker threads */
+ if (!mtctx->providedFactory)
+ POOL_free(mtctx->factory); /* stop and free worker threads */
ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
ZSTDMT_freeBufferPool(mtctx->bufPool);
@@ -993,8 +1005,8 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
ZSTDMT_serialState_free(&mtctx->serial);
ZSTD_freeCDict(mtctx->cdictLocal);
if (mtctx->roundBuff.buffer)
- ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
- ZSTD_free(mtctx, mtctx->cMem);
+ ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
+ ZSTD_customFree(mtctx, mtctx->cMem);
return 0;
}
@@ -1011,65 +1023,6 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
+ mtctx->roundBuff.capacity;
}
-/* Internal only */
-size_t
-ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
- ZSTDMT_parameter parameter,
- int value)
-{
- DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
- switch(parameter)
- {
- case ZSTDMT_p_jobSize :
- DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
- return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
- case ZSTDMT_p_overlapLog :
- DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
- return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
- case ZSTDMT_p_rsyncable :
- DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
- return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
- default :
- return ERROR(parameter_unsupported);
- }
-}
-
-size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
-{
- DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
- return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
-}
-
-size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
-{
- switch (parameter) {
- case ZSTDMT_p_jobSize:
- return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
- case ZSTDMT_p_overlapLog:
- return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
- case ZSTDMT_p_rsyncable:
- return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
- default:
- return ERROR(parameter_unsupported);
- }
-}
-
-/* Sets parameters relevant to the compression job,
- * initializing others to default values. */
-static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
-{
- ZSTD_CCtx_params jobParams = *params;
- /* Clear parameters related to multithreading */
- jobParams.forceWindow = 0;
- jobParams.nbWorkers = 0;
- jobParams.jobSize = 0;
- jobParams.overlapLog = 0;
- jobParams.rsyncable = 0;
- memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
- memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
- return jobParams;
-}
-
/* ZSTDMT_resize() :
* @return : error code if fails, 0 on success */
@@ -1098,7 +1051,7 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
compressionLevel);
mtctx->params.compressionLevel = compressionLevel;
- { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0);
+ { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
cParams.windowLog = saved_wlog;
mtctx->params.cParams = cParams;
}
@@ -1185,8 +1138,8 @@ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
if (params->ldmParams.enableLdm) {
/* In Long Range Mode, the windowLog is typically oversized.
* In which case, it's preferable to determine the jobSize
- * based on chainLog instead. */
- jobLog = MAX(21, params->cParams.chainLog + 4);
+ * based on cycleLog instead. */
+ jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
} else {
jobLog = MAX(20, params->cParams.windowLog + 2);
}
@@ -1240,174 +1193,6 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
}
-static unsigned
-ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
-{
- assert(nbWorkers>0);
- { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
- size_t const jobMaxSize = jobSizeTarget << 2;
- size_t const passSizeMax = jobMaxSize * nbWorkers;
- unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
- unsigned const nbJobsLarge = multiplier * nbWorkers;
- unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
- unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
- return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
-} }
-
-/* ZSTDMT_compress_advanced_internal() :
- * This is a blocking function : it will only give back control to caller after finishing its compression job.
- */
-static size_t
-ZSTDMT_compress_advanced_internal(
- ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_CCtx_params params)
-{
- ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(&params);
- size_t const overlapSize = ZSTDMT_computeOverlapSize(&params);
- unsigned const nbJobs = ZSTDMT_computeNbJobs(&params, srcSize, params.nbWorkers);
- size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
- size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
- const char* const srcStart = (const char*)src;
- size_t remainingSrcSize = srcSize;
- unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
- size_t frameStartPos = 0, dstBufferPos = 0;
- assert(jobParams.nbWorkers == 0);
- assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
-
- params.jobSize = (U32)avgJobSize;
- DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
- nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
-
- if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
- ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
- DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
- if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
- return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
- }
-
- assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
- ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
- /* LDM doesn't even try to load the dictionary in single-ingestion mode */
- if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize, NULL, 0, ZSTD_dct_auto))
- return ERROR(memory_allocation);
-
- FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) , ""); /* only expands if necessary */
-
- { unsigned u;
- for (u=0; u<nbJobs; u++) {
- size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
- size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
- buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
- buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
- size_t dictSize = u ? overlapSize : 0;
-
- mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
- mtctx->jobs[u].prefix.size = dictSize;
- mtctx->jobs[u].src.start = srcStart + frameStartPos;
- mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
- mtctx->jobs[u].consumed = 0;
- mtctx->jobs[u].cSize = 0;
- mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
- mtctx->jobs[u].fullFrameSize = srcSize;
- mtctx->jobs[u].params = jobParams;
- /* do not calculate checksum within sections, but write it in header for first section */
- mtctx->jobs[u].dstBuff = dstBuffer;
- mtctx->jobs[u].cctxPool = mtctx->cctxPool;
- mtctx->jobs[u].bufPool = mtctx->bufPool;
- mtctx->jobs[u].seqPool = mtctx->seqPool;
- mtctx->jobs[u].serial = &mtctx->serial;
- mtctx->jobs[u].jobID = u;
- mtctx->jobs[u].firstJob = (u==0);
- mtctx->jobs[u].lastJob = (u==nbJobs-1);
-
- DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
- DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
- POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
-
- frameStartPos += jobSize;
- dstBufferPos += dstBufferCapacity;
- remainingSrcSize -= jobSize;
- } }
-
- /* collect result */
- { size_t error = 0, dstPos = 0;
- unsigned jobID;
- for (jobID=0; jobID<nbJobs; jobID++) {
- DEBUGLOG(5, "waiting for job %u ", jobID);
- ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
- while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
- DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
- ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
- }
- ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
- DEBUGLOG(5, "ready to write job %u ", jobID);
-
- { size_t const cSize = mtctx->jobs[jobID].cSize;
- if (ZSTD_isError(cSize)) error = cSize;
- if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
- if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
- if (!error)
- memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
- if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
- DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
- ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
- } }
- mtctx->jobs[jobID].dstBuff = g_nullBuffer;
- mtctx->jobs[jobID].cSize = 0;
- dstPos += cSize ;
- }
- } /* for (jobID=0; jobID<nbJobs; jobID++) */
-
- DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
- if (params.fParams.checksumFlag) {
- U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
- if (dstPos + 4 > dstCapacity) {
- error = ERROR(dstSize_tooSmall);
- } else {
- DEBUGLOG(4, "writing checksum : %08X \n", checksum);
- MEM_writeLE32((char*)dst + dstPos, checksum);
- dstPos += 4;
- } }
-
- if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
- return error ? error : dstPos;
- }
-}
-
-size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_parameters params,
- int overlapLog)
-{
- ZSTD_CCtx_params cctxParams = mtctx->params;
- cctxParams.cParams = params.cParams;
- cctxParams.fParams = params.fParams;
- assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
- cctxParams.overlapLog = overlapLog;
- return ZSTDMT_compress_advanced_internal(mtctx,
- dst, dstCapacity,
- src, srcSize,
- cdict, cctxParams);
-}
-
-
-size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- int compressionLevel)
-{
- ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
- int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
- params.fParams.contentSizeFlag = 1;
- return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
-}
-
-
/* ====================================== */
/* ======= Streaming API ======= */
/* ====================================== */
@@ -1432,16 +1217,6 @@ size_t ZSTDMT_initCStream_internal(
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
- mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
- if (mtctx->singleBlockingThread) {
- ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(&params);
- DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
- assert(singleThreadParams.nbWorkers == 0);
- return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
- dict, dictSize, cdict,
- &singleThreadParams, pledgedSrcSize);
- }
-
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
@@ -1475,9 +1250,8 @@ size_t ZSTDMT_initCStream_internal(
if (params.rsyncable) {
/* Aim for the targetsectionSize as the average job size. */
- U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
- U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
- assert(jobSizeMB >= 1);
+ U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
+ U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
mtctx->rsync.hash = 0;
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
@@ -1504,8 +1278,8 @@ size_t ZSTDMT_initCStream_internal(
size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
if (mtctx->roundBuff.capacity < capacity) {
if (mtctx->roundBuff.buffer)
- ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
- mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
+ ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
+ mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
if (mtctx->roundBuff.buffer == NULL) {
mtctx->roundBuff.capacity = 0;
return ERROR(memory_allocation);
@@ -1530,53 +1304,6 @@ size_t ZSTDMT_initCStream_internal(
return 0;
}
-size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
- const void* dict, size_t dictSize,
- ZSTD_parameters params,
- unsigned long long pledgedSrcSize)
-{
- ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
- DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
- cctxParams.cParams = params.cParams;
- cctxParams.fParams = params.fParams;
- return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
- cctxParams, pledgedSrcSize);
-}
-
-size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
- const ZSTD_CDict* cdict,
- ZSTD_frameParameters fParams,
- unsigned long long pledgedSrcSize)
-{
- ZSTD_CCtx_params cctxParams = mtctx->params;
- if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
- cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
- cctxParams.fParams = fParams;
- return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
- cctxParams, pledgedSrcSize);
-}
-
-
-/* ZSTDMT_resetCStream() :
- * pledgedSrcSize can be zero == unknown (for the time being)
- * prefer using ZSTD_CONTENTSIZE_UNKNOWN,
- * as `0` might mean "empty" in the future */
-size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
-{
- if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
- pledgedSrcSize);
-}
-
-size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
- ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
- DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
- cctxParams.cParams = params.cParams;
- cctxParams.fParams = params.fParams;
- return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
-}
-
/* ZSTDMT_writeLastEmptyBlock()
* Write a single empty block with an end-of-frame to finish a frame.
@@ -1740,7 +1467,7 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
if (toFlush > 0) {
- memcpy((char*)output->dst + output->pos,
+ ZSTD_memcpy((char*)output->dst + output->pos,
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
toFlush);
}
@@ -1894,7 +1621,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
return 0;
}
ZSTDMT_waitForLdmComplete(mtctx, buffer);
- memmove(start, mtctx->inBuff.prefix.start, prefixSize);
+ ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
mtctx->inBuff.prefix.start = start;
mtctx->roundBuff.pos = prefixSize;
}
@@ -1968,6 +1695,16 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
pos = 0;
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
+ if ((hash & hitMask) == hitMask) {
+ /* We're already at a sync point so don't load any more until
+ * we're able to flush this sync point.
+ * This likely happened because the job table was full so we
+ * couldn't add our job.
+ */
+ syncPoint.toLoad = 0;
+ syncPoint.flush = 1;
+ return syncPoint;
+ }
} else {
/* We don't have enough bytes buffered to initialize the hash, but
* we know we have at least RSYNC_LENGTH bytes total.
@@ -2022,34 +1759,11 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
assert(output->pos <= output->size);
assert(input->pos <= input->size);
- if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
- return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
- }
-
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
/* current frame being ended. Only flush/end are allowed */
return ERROR(stage_wrong);
}
- /* single-pass shortcut (note : synchronous-mode) */
- if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
- && (mtctx->nextJobID == 0) /* just started */
- && (mtctx->inBuff.filled == 0) /* nothing buffered */
- && (!mtctx->jobReady) /* no job already created */
- && (endOp == ZSTD_e_end) /* end order */
- && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
- size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
- (char*)output->dst + output->pos, output->size - output->pos,
- (const char*)input->src + input->pos, input->size - input->pos,
- mtctx->cdict, mtctx->params);
- if (ZSTD_isError(cSize)) return cSize;
- input->pos = input->size;
- output->pos += cSize;
- mtctx->allJobsCompleted = 1;
- mtctx->frameEnded = 1;
- return 0;
- }
-
/* fill input buffer */
if ( (!mtctx->jobReady)
&& (input->size > input->pos) ) { /* support NULL input */
@@ -2072,13 +1786,21 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
(U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
- memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
+ ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
input->pos += syncPoint.toLoad;
mtctx->inBuff.filled += syncPoint.toLoad;
forwardInputProgress = syncPoint.toLoad>0;
}
- if ((input->pos < input->size) && (endOp == ZSTD_e_end))
- endOp = ZSTD_e_flush; /* can't end now : not all input consumed */
+ }
+ if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
+ /* Can't end yet because the input is not fully consumed.
+ * We are in one of these cases:
+ * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
+ * - We filled the input buffer: flush this job but don't end the frame.
+ * - We hit a synchronization point: flush this job but don't end the frame.
+ */
+ assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
+ endOp = ZSTD_e_flush;
}
if ( (mtctx->jobReady)
@@ -2097,47 +1819,3 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
return remainingToFlush;
}
}
-
-
-size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
- FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) , "");
-
- /* recommended next input size : fill current input buffer */
- return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
-}
-
-
-static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
-{
- size_t const srcSize = mtctx->inBuff.filled;
- DEBUGLOG(5, "ZSTDMT_flushStream_internal");
-
- if ( mtctx->jobReady /* one job ready for a worker to pick up */
- || (srcSize > 0) /* still some data within input buffer */
- || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
- DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
- (U32)srcSize, (U32)endFrame);
- FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) , "");
- }
-
- /* check if there is any data available to flush */
- return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
-}
-
-
-size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
-{
- DEBUGLOG(5, "ZSTDMT_flushStream");
- if (mtctx->singleBlockingThread)
- return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
- return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
-}
-
-size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
-{
- DEBUGLOG(4, "ZSTDMT_endStream");
- if (mtctx->singleBlockingThread)
- return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
- return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
-}
diff --git a/Utilities/cmzstd/lib/compress/zstdmt_compress.h b/Utilities/cmzstd/lib/compress/zstdmt_compress.h
index 89914eb7f8..2fee2ec745 100644
--- a/Utilities/cmzstd/lib/compress/zstdmt_compress.h
+++ b/Utilities/cmzstd/lib/compress/zstdmt_compress.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -19,113 +19,57 @@
/* Note : This is an internal API.
* These APIs used to be exposed with ZSTDLIB_API,
* because it used to be the only way to invoke MT compression.
- * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
- * instead.
- *
- * If you depend on these APIs and can't switch, then define
- * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
- * However, we may completely remove these functions in a future
- * release, so please switch soon.
+ * Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead.
*
* This API requires ZSTD_MULTITHREAD to be defined during compilation,
* otherwise ZSTDMT_createCCtx*() will fail.
*/
-#ifdef ZSTD_LEGACY_MULTITHREADED_API
-# define ZSTDMT_API ZSTDLIB_API
-#else
-# define ZSTDMT_API
-#endif
-
/* === Dependencies === */
-#include <stddef.h> /* size_t */
+#include "../common/zstd_deps.h" /* size_t */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
/* === Constants === */
-#ifndef ZSTDMT_NBWORKERS_MAX
-# define ZSTDMT_NBWORKERS_MAX 200
+#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */
+# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256)
#endif
-#ifndef ZSTDMT_JOBSIZE_MIN
-# define ZSTDMT_JOBSIZE_MIN (1 MB)
+#ifndef ZSTDMT_JOBSIZE_MIN /* a different value can be selected at compile time */
+# define ZSTDMT_JOBSIZE_MIN (512 KB)
#endif
#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30)
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB))
+/* ========================================================
+ * === Private interface, for use by ZSTD_compress.c ===
+ * === Not exposed in libzstd. Never invoke directly ===
+ * ======================================================== */
+
/* === Memory management === */
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
-ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
-/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
-ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
- ZSTD_customMem cMem);
-ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
-
-ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
-
-
-/* === Simple one-pass compression function === */
-
-ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- int compressionLevel);
-
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+ ZSTD_customMem cMem,
+ ZSTD_threadPool *pool);
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
/* === Streaming functions === */
-ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
-ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
-
-ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
-ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-
-ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-
-
-/* === Advanced functions and parameters === */
-
-ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_parameters params,
- int overlapLog);
-
-ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
- const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
- ZSTD_parameters params,
- unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
-
-ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
- const ZSTD_CDict* cdict,
- ZSTD_frameParameters fparams,
- unsigned long long pledgedSrcSize); /* note : zero means empty */
-
-/* ZSTDMT_parameter :
- * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
-typedef enum {
- ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
- ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
- ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
-} ZSTDMT_parameter;
-
-/* ZSTDMT_setMTCtxParameter() :
- * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
- * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
- * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
-
-/* ZSTDMT_getMTCtxParameter() :
- * Query the ZSTDMT_CCtx for a parameter value.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
+size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
+/*! ZSTDMT_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+ const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
/*! ZSTDMT_compressStream_generic() :
* Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
@@ -134,16 +78,10 @@ ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter
* 0 if fully flushed
* or an error code
* note : needs to be init using any ZSTD_initCStream*() variant */
-ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective endOp);
-
-
-/* ========================================================
- * === Private interface, for use by ZSTD_compress.c ===
- * === Not exposed in libzstd. Never invoke directly ===
- * ======================================================== */
+size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
/*! ZSTDMT_toFlushNow()
* Tell how many bytes are ready to be flushed immediately.
@@ -153,15 +91,6 @@ ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
* therefore flushing is limited by speed of oldest job. */
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
-/*! ZSTDMT_CCtxParam_setMTCtxParameter()
- * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
-
-/*! ZSTDMT_CCtxParam_setNbWorkers()
- * Set nbWorkers, and clamp it.
- * Also reset jobSize and overlapLog */
-size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
-
/*! ZSTDMT_updateCParams_whileCompressing() :
* Updates only a selected set of compression parameters, to remain compatible with current frame.
* New parameters will be applied to next compression job. */
@@ -174,17 +103,6 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
-/*! ZSTDMT_initCStream_internal() :
- * Private use only. Init streaming operation.
- * expects params to be valid.
- * must receive dict, or cdict, or none, but not both.
- * @return : 0, or an error code */
-size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
- const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
- const ZSTD_CDict* cdict,
- ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
-
-
#if defined (__cplusplus)
}
#endif
diff --git a/Utilities/cmzstd/lib/decompress/huf_decompress.c b/Utilities/cmzstd/lib/decompress/huf_decompress.c
index 68293a1309..b93c9a003b 100644
--- a/Utilities/cmzstd/lib/decompress/huf_decompress.c
+++ b/Utilities/cmzstd/lib/decompress/huf_decompress.c
@@ -1,7 +1,7 @@
/* ******************************************************************
* huff0 huffman decoder,
* part of Finite State Entropy library
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -15,7 +15,7 @@
/* **************************************************************
* Dependencies
****************************************************************/
-#include <string.h> /* memcpy, memset */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
#include "../common/compiler.h"
#include "../common/bitstream.h" /* BIT_* */
#include "../common/fse.h" /* to compress headers */
@@ -103,7 +103,7 @@ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved;
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
{
DTableDesc dtd;
- memcpy(&dtd, table, sizeof(dtd));
+ ZSTD_memcpy(&dtd, table, sizeof(dtd));
return dtd;
}
@@ -115,29 +115,51 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
/*-***************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
+/**
+ * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
+ * a time.
+ */
+static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
+ U64 D4;
+ if (MEM_isLittleEndian()) {
+ D4 = symbol + (nbBits << 8);
+ } else {
+ D4 = (symbol << 8) + nbBits;
+ }
+ D4 *= 0x0001000100010001ULL;
+ return D4;
+}
+
+typedef struct {
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
+} HUF_ReadDTableX1_Workspace;
+
+
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
{
+ return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+ HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
- U32* rankVal;
- BYTE* huffWeight;
- size_t spaceUsed32 = 0;
-
- rankVal = (U32 *)workSpace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
- huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
- spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+ DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
+ if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
- /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+ /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
if (HUF_isError(iSize)) return iSize;
/* Table header */
@@ -145,52 +167,117 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
- memcpy(DTable, &dtd, sizeof(dtd));
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
}
- /* Calculate starting value for each rank */
- { U32 n, nextRankStart = 0;
- for (n=1; n<tableLog+1; n++) {
- U32 const current = nextRankStart;
- nextRankStart += (rankVal[n] << (n-1));
- rankVal[n] = current;
- } }
+ /* Compute symbols and rankStart given rankVal:
+ *
+ * rankVal already contains the number of values of each weight.
+ *
+ * symbols contains the symbols ordered by weight. First are the rankVal[0]
+ * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
+ * symbols[0] is filled (but unused) to avoid a branch.
+ *
+ * rankStart contains the offset where each rank belongs in the DTable.
+ * rankStart[0] is not filled because there are no entries in the table for
+ * weight 0.
+ */
+ {
+ int n;
+ int nextRankStart = 0;
+ int const unroll = 4;
+ int const nLimit = (int)nbSymbols - unroll + 1;
+ for (n=0; n<(int)tableLog+1; n++) {
+ U32 const curr = nextRankStart;
+ nextRankStart += wksp->rankVal[n];
+ wksp->rankStart[n] = curr;
+ }
+ for (n=0; n < nLimit; n += unroll) {
+ int u;
+ for (u=0; u < unroll; ++u) {
+ size_t const w = wksp->huffWeight[n+u];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
+ }
+ }
+ for (; n < (int)nbSymbols; ++n) {
+ size_t const w = wksp->huffWeight[n];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
+ }
+ }
- /* fill DTable */
- { U32 n;
- size_t const nEnd = nbSymbols;
- for (n=0; n<nEnd; n++) {
- size_t const w = huffWeight[n];
- size_t const length = (1 << w) >> 1;
- size_t const uStart = rankVal[w];
- size_t const uEnd = uStart + length;
- size_t u;
- HUF_DEltX1 D;
- D.byte = (BYTE)n;
- D.nbBits = (BYTE)(tableLog + 1 - w);
- rankVal[w] = (U32)uEnd;
- if (length < 4) {
- /* Use length in the loop bound so the compiler knows it is short. */
- for (u = 0; u < length; ++u)
- dt[uStart + u] = D;
- } else {
- /* Unroll the loop 4 times, we know it is a power of 2. */
- for (u = uStart; u < uEnd; u += 4) {
- dt[u + 0] = D;
- dt[u + 1] = D;
- dt[u + 2] = D;
- dt[u + 3] = D;
- } } } }
+ /* fill DTable
+ * We fill all entries of each weight in order.
+ * That way length is a constant for each iteration of the outter loop.
+ * We can switch based on the length to a different inner loop which is
+ * optimized for that particular case.
+ */
+ {
+ U32 w;
+ int symbol=wksp->rankVal[0];
+ int rankStart=0;
+ for (w=1; w<tableLog+1; ++w) {
+ int const symbolCount = wksp->rankVal[w];
+ int const length = (1 << w) >> 1;
+ int uStart = rankStart;
+ BYTE const nbBits = (BYTE)(tableLog + 1 - w);
+ int s;
+ int u;
+ switch (length) {
+ case 1:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart] = D;
+ uStart += 1;
+ }
+ break;
+ case 2:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart+0] = D;
+ dt[uStart+1] = D;
+ uStart += 2;
+ }
+ break;
+ case 4:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ uStart += 4;
+ }
+ break;
+ case 8:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ MEM_write64(dt + uStart + 4, D4);
+ uStart += 8;
+ }
+ break;
+ default:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ for (u=0; u < length; u += 16) {
+ MEM_write64(dt + uStart + u + 0, D4);
+ MEM_write64(dt + uStart + u + 4, D4);
+ MEM_write64(dt + uStart + u + 8, D4);
+ MEM_write64(dt + uStart + u + 12, D4);
+ }
+ assert(u == length);
+ uStart += length;
+ }
+ break;
+ }
+ symbol += symbolCount;
+ rankStart += symbolCount * length;
+ }
+ }
return iSize;
}
-size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_readDTableX1_wksp(DTable, src, srcSize,
- workSpace, sizeof(workSpace));
-}
-
FORCE_INLINE_TEMPLATE BYTE
HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
{
@@ -389,20 +476,6 @@ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
}
-size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
- return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
size_t HUF_decompress4X1_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
@@ -419,8 +492,7 @@ static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@@ -436,18 +508,6 @@ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
}
-size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
- return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
#endif /* HUF_FORCE_DECOMPRESS_X2 */
@@ -468,13 +528,15 @@ typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
- U32 nbBitsBaseline, U16 baseSeq)
+ U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
{
HUF_DEltX2 DElt;
- U32 rankVal[HUF_TABLELOG_MAX + 1];
+ U32* rankVal = wksp;
+ assert(wkspSize >= HUF_TABLELOG_MAX + 1);
+ (void)wkspSize;
/* get pre-calculated rankVal */
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+ ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
/* fill skipped values */
if (minWeight>1) {
@@ -509,14 +571,18 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 co
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
- const U32 nbBitsBaseline)
+ const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
{
- U32 rankVal[HUF_TABLELOG_MAX + 1];
+ U32* rankVal = wksp;
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+ assert(wkspSize >= HUF_TABLELOG_MAX + 1);
+ wksp += HUF_TABLELOG_MAX + 1;
+ wkspSize -= HUF_TABLELOG_MAX + 1;
+
+ ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
@@ -534,7 +600,7 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
- nbBitsBaseline, symbol);
+ nbBitsBaseline, symbol, wksp, wkspSize);
} else {
HUF_DEltX2 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
@@ -548,6 +614,15 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
}
}
+typedef struct {
+ rankValCol_t rankVal[HUF_TABLELOG_MAX];
+ U32 rankStats[HUF_TABLELOG_MAX + 1];
+ U32 rankStart0[HUF_TABLELOG_MAX + 2];
+ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
+ U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+} HUF_ReadDTableX2_Workspace;
+
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
@@ -560,48 +635,33 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
U32 *rankStart;
- rankValCol_t* rankVal;
- U32* rankStats;
- U32* rankStart0;
- sortedSymbol_t* sortedSymbol;
- BYTE* weightList;
- size_t spaceUsed32 = 0;
-
- rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
- spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
- rankStats = (U32 *)workSpace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 1;
- rankStart0 = (U32 *)workSpace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 2;
- sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
- spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
- weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
- spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
-
- rankStart = rankStart0 + 1;
- memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
+ HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
+
+ if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
+
+ rankStart = wksp->rankStart0 + 1;
+ ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
+ ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
- /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+ /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
- for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
- U32 current = nextRankStart;
- nextRankStart += rankStats[w];
- rankStart[w] = current;
+ U32 curr = nextRankStart;
+ nextRankStart += wksp->rankStats[w];
+ rankStart[w] = curr;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
@@ -610,57 +670,51 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
- U32 const w = weightList[s];
+ U32 const w = wksp->weightList[s];
U32 const r = rankStart[w]++;
- sortedSymbol[r].symbol = (BYTE)s;
- sortedSymbol[r].weight = (BYTE)w;
+ wksp->sortedSymbol[r].symbol = (BYTE)s;
+ wksp->sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
- { U32* const rankVal0 = rankVal[0];
+ { U32* const rankVal0 = wksp->rankVal[0];
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
- U32 current = nextRankVal;
- nextRankVal += rankStats[w] << (w+rescale);
- rankVal0[w] = current;
+ U32 curr = nextRankVal;
+ nextRankVal += wksp->rankStats[w] << (w+rescale);
+ rankVal0[w] = curr;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
- U32* const rankValPtr = rankVal[consumed];
+ U32* const rankValPtr = wksp->rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUF_fillDTableX2(dt, maxTableLog,
- sortedSymbol, sizeOfSort,
- rankStart0, rankVal, maxW,
- tableLog+1);
+ wksp->sortedSymbol, sizeOfSort,
+ wksp->rankStart0, wksp->rankVal, maxW,
+ tableLog+1,
+ wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
- memcpy(DTable, &dtd, sizeof(dtd));
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
return iSize;
}
-size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_readDTableX2_wksp(DTable, src, srcSize,
- workSpace, sizeof(workSpace));
-}
-
FORCE_INLINE_TEMPLATE U32
HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt+val, 2);
+ ZSTD_memcpy(op, dt+val, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
@@ -669,7 +723,7 @@ FORCE_INLINE_TEMPLATE U32
HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt+val, 1);
+ ZSTD_memcpy(op, dt+val, 1);
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
@@ -890,20 +944,6 @@ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
}
-size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
- return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
@@ -937,20 +977,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
}
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
- return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
#endif /* HUF_FORCE_DECOMPRESS_X1 */
@@ -1051,67 +1077,6 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
}
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
- static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
-#endif
-
- /* validation checks */
- if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
- if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
-
- { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)algoNb;
- assert(algoNb == 0);
- return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)algoNb;
- assert(algoNb == 1);
- return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
-#else
- return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-#endif
- }
-}
-
-size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- /* validation checks */
- if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
- if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
-
- { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)algoNb;
- assert(algoNb == 0);
- return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)algoNb;
- assert(algoNb == 1);
- return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
-#else
- return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
- HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
-#endif
- }
-}
-
-size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-
-
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
size_t dstSize, const void* cSrc,
size_t cSrcSize, void* workSpace,
@@ -1145,8 +1110,8 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
- if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
@@ -1168,14 +1133,6 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
}
}
-size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize)
-{
- U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
- return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
- workSpace, sizeof(workSpace));
-}
-
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
{
@@ -1199,7 +1156,7 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@@ -1246,3 +1203,149 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
#endif
}
}
+
+#ifndef ZSTD_NO_UNUSED_FUNCTIONS
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX1_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+#endif
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX2_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+#endif
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+#endif
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+#endif
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+ static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
+#endif
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
+#else
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+#endif
+ }
+}
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+#endif
+ }
+}
+
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+#endif
diff --git a/Utilities/cmzstd/lib/decompress/zstd_ddict.c b/Utilities/cmzstd/lib/decompress/zstd_ddict.c
index c8cb8ecc95..ce335477b3 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_ddict.c
+++ b/Utilities/cmzstd/lib/decompress/zstd_ddict.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,7 @@
/*-*******************************************************
* Dependencies
*********************************************************/
-#include <string.h> /* memcpy, memmove, memset */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
@@ -127,11 +127,11 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
ddict->dictContent = dict;
if (!dict) dictSize = 0;
} else {
- void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
+ void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
ddict->dictBuffer = internalBuffer;
ddict->dictContent = internalBuffer;
if (!internalBuffer) return ERROR(memory_allocation);
- memcpy(internalBuffer, dict, dictSize);
+ ZSTD_memcpy(internalBuffer, dict, dictSize);
}
ddict->dictSize = dictSize;
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
@@ -147,9 +147,9 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_customMem customMem)
{
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
- { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
if (ddict == NULL) return NULL;
ddict->cMem = customMem;
{ size_t const initResult = ZSTD_initDDict_internal(ddict,
@@ -198,7 +198,7 @@ const ZSTD_DDict* ZSTD_initStaticDDict(
if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
if (sBufferSize < neededSpace) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) {
- memcpy(ddict+1, dict, dictSize); /* local copy */
+ ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
dict = ddict+1;
}
if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
@@ -213,8 +213,8 @@ size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = ddict->cMem;
- ZSTD_free(ddict->dictBuffer, cMem);
- ZSTD_free(ddict, cMem);
+ ZSTD_customFree(ddict->dictBuffer, cMem);
+ ZSTD_customFree(ddict, cMem);
return 0;
}
}
diff --git a/Utilities/cmzstd/lib/decompress/zstd_ddict.h b/Utilities/cmzstd/lib/decompress/zstd_ddict.h
index af307efd3d..bd03268b50 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_ddict.h
+++ b/Utilities/cmzstd/lib/decompress/zstd_ddict.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -15,7 +15,7 @@
/*-*******************************************************
* Dependencies
*********************************************************/
-#include <stddef.h> /* size_t */
+#include "../common/zstd_deps.h" /* size_t */
#include "../zstd.h" /* ZSTD_DDict, and several public functions */
diff --git a/Utilities/cmzstd/lib/decompress/zstd_decompress.c b/Utilities/cmzstd/lib/decompress/zstd_decompress.c
index be5c7cfc33..910bc034c0 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_decompress.c
+++ b/Utilities/cmzstd/lib/decompress/zstd_decompress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -55,13 +55,14 @@
/*-*******************************************************
* Dependencies
*********************************************************/
-#include <string.h> /* memcpy, memmove, memset */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
+#include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */
#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
@@ -72,6 +73,144 @@
#endif
+
+/*************************************
+ * Multiple DDicts Hashset internals *
+ *************************************/
+
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
+
+#define DDICT_HASHSET_TABLE_BASE_SIZE 64
+#define DDICT_HASHSET_RESIZE_FACTOR 2
+
+/* Hash function to determine starting position of dict insertion within the table
+ * Returns an index between [0, hashSet->ddictPtrTableSize]
+ */
+static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ const U64 hash = XXH64(&dictID, sizeof(U32), 0);
+ /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
+ return hash & (hashSet->ddictPtrTableSize - 1);
+}
+
+/* Adds DDict to a hashset without resizing it.
+ * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
+ * Returns 0 if successful, or a zstd error code if something went wrong.
+ */
+static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
+ const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ while (hashSet->ddictPtrTable[idx] != NULL) {
+ /* Replace existing ddict if inserting ddict with same dictID */
+ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
+ DEBUGLOG(4, "DictID already exists, replacing rather than adding");
+ hashSet->ddictPtrTable[idx] = ddict;
+ return 0;
+ }
+ idx &= idxRangeMask;
+ idx++;
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ hashSet->ddictPtrTable[idx] = ddict;
+ hashSet->ddictPtrCount++;
+ return 0;
+}
+
+/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
+ * rehashes all values, allocates new table, frees old table.
+ * Returns 0 on success, otherwise a zstd error code.
+ */
+static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
+ const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
+ const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
+ size_t oldTableSize = hashSet->ddictPtrTableSize;
+ size_t i;
+
+ DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
+ RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
+ hashSet->ddictPtrTable = newTable;
+ hashSet->ddictPtrTableSize = newTableSize;
+ hashSet->ddictPtrCount = 0;
+ for (i = 0; i < oldTableSize; ++i) {
+ if (oldTable[i] != NULL) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
+ }
+ }
+ ZSTD_customFree((void*)oldTable, customMem);
+ DEBUGLOG(4, "Finished re-hash");
+ return 0;
+}
+
+/* Fetches a DDict with the given dictID
+ * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
+ */
+static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ for (;;) {
+ size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
+ if (currDictID == dictID || currDictID == 0) {
+ /* currDictID == 0 implies a NULL ddict entry */
+ break;
+ } else {
+ idx &= idxRangeMask; /* Goes to start of table when we reach the end */
+ idx++;
+ }
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ return hashSet->ddictPtrTable[idx];
+}
+
+/* Allocates space for and returns a ddict hash set
+ * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
+ * Returns NULL if allocation failed.
+ */
+static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
+ ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
+ DEBUGLOG(4, "Allocating new hash set");
+ ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
+ ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
+ ret->ddictPtrCount = 0;
+ if (!ret || !ret->ddictPtrTable) {
+ return NULL;
+ }
+ return ret;
+}
+
+/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
+ * Note: The ZSTD_DDict* within the table are NOT freed.
+ */
+static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Freeing ddict hash set");
+ if (hashSet && hashSet->ddictPtrTable) {
+ ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
+ }
+ if (hashSet) {
+ ZSTD_customFree(hashSet, customMem);
+ }
+}
+
+/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
+ * Returns 0 on success, or a ZSTD error.
+ */
+static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
+ if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
+ }
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
+ return 0;
+}
+
/*-*************************************************************
* Context management
***************************************************************/
@@ -94,11 +233,19 @@ static size_t ZSTD_startingInputLength(ZSTD_format_e format)
return startingInputLength;
}
+static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
+{
+ assert(dctx->streamStage == zdss_init);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->outBufferMode = ZSTD_bm_buffered;
+ dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
+ dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+}
+
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
{
- dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
dctx->staticSize = 0;
- dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
dctx->ddict = NULL;
dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
@@ -113,7 +260,8 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
- dctx->outBufferMode = ZSTD_obm_buffered;
+ dctx->ddictSet = NULL;
+ ZSTD_DCtx_resetParameters(dctx);
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
@@ -134,9 +282,9 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
{
- if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
- { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
if (!dctx) return NULL;
dctx->customMem = customMem;
ZSTD_initDCtx_internal(dctx);
@@ -164,13 +312,17 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
{ ZSTD_customMem const cMem = dctx->customMem;
ZSTD_clearDict(dctx);
- ZSTD_free(dctx->inBuff, cMem);
+ ZSTD_customFree(dctx->inBuff, cMem);
dctx->inBuff = NULL;
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (dctx->legacyContext)
ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
#endif
- ZSTD_free(dctx, cMem);
+ if (dctx->ddictSet) {
+ ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
+ dctx->ddictSet = NULL;
+ }
+ ZSTD_customFree(dctx, cMem);
return 0;
}
}
@@ -179,7 +331,30 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
{
size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
- memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+ ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
+ * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
+ * accordingly sets the ddict to be used to decompress the frame.
+ *
+ * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
+ *
+ * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
+ */
+static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
+ assert(dctx->refMultipleDDicts && dctx->ddictSet);
+ DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
+ if (dctx->ddict) {
+ const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
+ if (frameDDict) {
+ DEBUGLOG(4, "DDict found!");
+ ZSTD_clearDict(dctx);
+ dctx->dictID = dctx->fParams.dictID;
+ dctx->ddict = frameDDict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ }
}
@@ -246,7 +421,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
const BYTE* ip = (const BYTE*)src;
size_t const minInputSize = ZSTD_startingInputLength(format);
- memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
if (srcSize < minInputSize) return minInputSize;
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
@@ -256,7 +431,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
/* skippable frame */
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
- memset(zfhPtr, 0, sizeof(*zfhPtr));
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
@@ -433,12 +608,19 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
/** ZSTD_decodeFrameHeader() :
* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * If multiple DDict references are enabled, also will choose the correct DDict to use.
* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
{
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
if (ZSTD_isError(result)) return result; /* invalid header */
RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+
+ /* Reference DDict requested by frame if dctx references multiple ddicts */
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(dctx);
+ }
+
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
/* Skip the dictID check in fuzzing mode, because it makes the search
* harder.
@@ -446,7 +628,9 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
dictionary_wrong, "");
#endif
- if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+ dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
+ if (dctx->validateChecksum) XXH64_reset(&dctx->xxhState, 0);
+ dctx->processedCSize += headerSize;
return 0;
}
@@ -461,7 +645,7 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
{
ZSTD_frameSizeInfo frameSizeInfo;
- memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+ ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize))
@@ -516,7 +700,7 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
ip += 4;
}
- frameSizeInfo.compressedSize = ip - ipstart;
+ frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
? zfh.frameContentSize
: nbBlocks * zfh.blockSizeMax;
@@ -569,7 +753,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
{
DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
- ZSTD_checkContinuity(dctx, blockStart);
+ ZSTD_checkContinuity(dctx, blockStart, blockSize);
dctx->previousDstEnd = (const char*)blockStart + blockSize;
return blockSize;
}
@@ -579,12 +763,12 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_copyRawBlock");
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
- RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
- memcpy(dst, src, srcSize);
+ ZSTD_memcpy(dst, src, srcSize);
return srcSize;
}
@@ -592,15 +776,41 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
BYTE b,
size_t regenSize)
{
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
- RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
- memset(dst, b, regenSize);
+ ZSTD_memset(dst, b, regenSize);
return regenSize;
}
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+{
+#if ZSTD_TRACE
+ if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) {
+ ZSTD_Trace trace;
+ ZSTD_memset(&trace, 0, sizeof(trace));
+ trace.version = ZSTD_VERSION_NUMBER;
+ trace.streaming = streaming;
+ if (dctx->ddict) {
+ trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict);
+ trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict);
+ trace.dictionaryIsCold = dctx->ddictIsCold;
+ }
+ trace.uncompressedSize = (size_t)uncompressedSize;
+ trace.compressedSize = (size_t)compressedSize;
+ trace.dctx = dctx;
+ ZSTD_trace_decompress_end(dctx->traceCtx, &trace);
+ }
+#else
+ (void)dctx;
+ (void)uncompressedSize;
+ (void)compressedSize;
+ (void)streaming;
+#endif
+}
+
/*! ZSTD_decompressFrame() :
* @dctx must be properly initialized
@@ -610,8 +820,9 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void** srcPtr, size_t *srcSizePtr)
{
- const BYTE* ip = (const BYTE*)(*srcPtr);
- BYTE* const ostart = (BYTE* const)dst;
+ const BYTE* const istart = (const BYTE*)(*srcPtr);
+ const BYTE* ip = istart;
+ BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
BYTE* op = ostart;
size_t remainingSrcSize = *srcSizePtr;
@@ -647,13 +858,13 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
switch(blockProperties.blockType)
{
case bt_compressed:
- decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
break;
case bt_raw :
- decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
break;
case bt_rle :
- decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
break;
case bt_reserved :
default:
@@ -661,7 +872,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
if (ZSTD_isError(decodedSize)) return decodedSize;
- if (dctx->fParams.checksumFlag)
+ if (dctx->validateChecksum)
XXH64_update(&dctx->xxhState, op, decodedSize);
if (decodedSize != 0)
op += decodedSize;
@@ -676,19 +887,21 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
corruption_detected, "");
}
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
- U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
- U32 checkRead;
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
- checkRead = MEM_readLE32(ip);
- RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
+ if (!dctx->forceIgnoreChecksum) {
+ U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
+ U32 checkRead;
+ checkRead = MEM_readLE32(ip);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
+ }
ip += 4;
remainingSrcSize -= 4;
}
-
+ ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
- return op-ostart;
+ return (size_t)(op-ostart);
}
static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
@@ -721,7 +934,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
- assert(decodedSize <=- dstCapacity);
+ assert(decodedSize <= dstCapacity);
dst = (BYTE*)dst + decodedSize;
dstCapacity -= decodedSize;
@@ -753,7 +966,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
* use this in all cases but ddict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
}
- ZSTD_checkContinuity(dctx, dst);
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
&src, &srcSize);
@@ -761,15 +974,13 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
(ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
&& (moreThan1Frame==1),
srcSize_wrong,
- "at least one frame successfully completed, but following "
- "bytes are garbage: it's more likely to be a srcSize error, "
- "specifying more bytes than compressed size of frame(s). This "
- "error message replaces ERROR(prefix_unknown), which would be "
- "confusing, as the first header is actually correct. Note that "
- "one could be unlucky, it might be a corruption error instead, "
- "happening right at the place where we expect zstd magic "
- "bytes. But this is _much_ less likely than a srcSize field "
- "error.");
+ "At least one frame successfully completed, "
+ "but following bytes are garbage: "
+ "it's more likely to be a srcSize error, "
+ "specifying more input bytes than size of frame(s). "
+ "Note: one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic bytes. "
+ "But this is _much_ less likely than a srcSize field error.");
if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
if (res != 0)
@@ -781,7 +992,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
- return (BYTE*)dst - (BYTE*)dststart;
+ return (size_t)((BYTE*)dst - (BYTE*)dststart);
}
size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
@@ -890,7 +1101,9 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
- if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ dctx->processedCSize += srcSize;
switch (dctx->stage)
{
@@ -899,21 +1112,21 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
if (dctx->format == ZSTD_f_zstd1) { /* allows header */
assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
- memcpy(dctx->headerBuffer, src, srcSize);
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
dctx->stage = ZSTDds_decodeSkippableHeader;
return 0;
} }
dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
- memcpy(dctx->headerBuffer, src, srcSize);
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
dctx->expected = dctx->headerSize - srcSize;
dctx->stage = ZSTDds_decodeFrameHeader;
return 0;
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
- memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
@@ -977,7 +1190,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize;
- if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+ if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize);
dctx->previousDstEnd = (char*)dst + rSize;
/* Stay on the same stage until we are finished streaming the block. */
@@ -995,6 +1208,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
} else {
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
dctx->expected = 0; /* ends here */
dctx->stage = ZSTDds_getFrameHeaderSize;
}
@@ -1007,10 +1221,14 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_checkChecksum:
assert(srcSize == 4); /* guaranteed by dctx->expected */
- { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
- U32 const check32 = MEM_readLE32(src);
- DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
- RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
+ {
+ if (dctx->validateChecksum) {
+ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
+ }
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
@@ -1019,7 +1237,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decodeSkippableHeader:
assert(src != NULL);
assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
- memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
dctx->stage = ZSTDds_skipFrame;
return 0;
@@ -1075,7 +1293,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
workspace, workspaceSize);
#else
size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
- dictPtr, dictEnd - dictPtr,
+ dictPtr, (size_t)(dictEnd - dictPtr),
workspace, workspaceSize);
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
@@ -1084,40 +1302,46 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff, offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
- offcodeLog);
+ offcodeLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */0);
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
- matchlengthLog);
+ matchlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
- litlengthLog);
+ litlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
dictPtr += litlengthHeaderSize;
}
@@ -1131,7 +1355,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
entropy->rep[i] = rep;
} }
- return dictPtr - (const BYTE*)dict;
+ return (size_t)(dictPtr - (const BYTE*)dict);
}
static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
@@ -1158,8 +1382,12 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
{
assert(dctx != NULL);
+#if ZSTD_TRACE
+ dctx->traceCtx = (ZSTD_trace_decompress_begin != NULL) ? ZSTD_trace_decompress_begin(dctx) : 0;
+#endif
dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->processedCSize = 0;
dctx->decodedSize = 0;
dctx->previousDstEnd = NULL;
dctx->prefixStart = NULL;
@@ -1170,7 +1398,7 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
dctx->dictID = 0;
dctx->bType = bt_reserved;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
- memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
dctx->MLTptr = dctx->entropy.MLTable;
dctx->OFTptr = dctx->entropy.OFTable;
@@ -1373,6 +1601,16 @@ size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
if (ddict) {
dctx->ddict = ddict;
dctx->dictUses = ZSTD_use_indefinitely;
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
+ if (dctx->ddictSet == NULL) {
+ dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
+ if (!dctx->ddictSet) {
+ RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
+ }
+ }
+ assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
+ }
}
return 0;
}
@@ -1394,7 +1632,7 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
{
- return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
}
ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
@@ -1411,8 +1649,16 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
return bounds;
case ZSTD_d_stableOutBuffer:
- bounds.lowerBound = (int)ZSTD_obm_buffered;
- bounds.upperBound = (int)ZSTD_obm_stable;
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+ case ZSTD_d_forceIgnoreChecksum:
+ bounds.lowerBound = (int)ZSTD_d_validateChecksum;
+ bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
+ return bounds;
+ case ZSTD_d_refMultipleDDicts:
+ bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
+ bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
default:;
}
@@ -1436,6 +1682,29 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
}
+size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
+{
+ switch (param) {
+ case ZSTD_d_windowLogMax:
+ *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
+ return 0;
+ case ZSTD_d_format:
+ *value = (int)dctx->format;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ *value = (int)dctx->outBufferMode;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ *value = (int)dctx->forceIgnoreChecksum;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ *value = (int)dctx->refMultipleDDicts;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
@@ -1451,7 +1720,18 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
return 0;
case ZSTD_d_stableOutBuffer:
CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
- dctx->outBufferMode = (ZSTD_outBufferMode_e)value;
+ dctx->outBufferMode = (ZSTD_bufferMode_e)value;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
+ dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
+ if (dctx->staticSize != 0) {
+ RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
+ }
+ dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
return 0;
default:;
}
@@ -1469,8 +1749,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
- dctx->format = ZSTD_f_zstd1;
- dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ ZSTD_DCtx_resetParameters(dctx);
}
return 0;
}
@@ -1524,7 +1803,7 @@ static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const ne
{
if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
zds->oversizedDuration++;
- else
+ else
zds->oversizedDuration = 0;
}
@@ -1538,7 +1817,7 @@ static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const*
{
ZSTD_outBuffer const expect = zds->expectedOutBuffer;
/* No requirement when ZSTD_obm_stable is not enabled. */
- if (zds->outBufferMode != ZSTD_obm_stable)
+ if (zds->outBufferMode != ZSTD_bm_stable)
return 0;
/* Any buffer is allowed in zdss_init, this must be the same for every other call until
* the context is reset.
@@ -1548,7 +1827,7 @@ static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const*
/* The buffer must match our expectation exactly. */
if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
return 0;
- RETURN_ERROR(dstBuffer_wrong, "ZSTD_obm_stable enabled but output differs!");
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
}
/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
@@ -1560,7 +1839,7 @@ static size_t ZSTD_decompressContinueStream(
ZSTD_DStream* zds, char** op, char* oend,
void const* src, size_t srcSize) {
int const isSkipFrame = ZSTD_isSkipFrame(zds);
- if (zds->outBufferMode == ZSTD_obm_buffered) {
+ if (zds->outBufferMode == ZSTD_bm_buffered) {
size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
size_t const decodedSize = ZSTD_decompressContinue(zds,
zds->outBuff + zds->outStart, dstSize, src, srcSize);
@@ -1573,14 +1852,14 @@ static size_t ZSTD_decompressContinueStream(
}
} else {
/* Write directly into the output buffer */
- size_t const dstSize = isSkipFrame ? 0 : oend - *op;
+ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
FORWARD_IF_ERROR(decodedSize, "");
*op += decodedSize;
/* Flushing is not needed. */
zds->streamStage = zdss_read;
assert(*op <= oend);
- assert(zds->outBufferMode == ZSTD_obm_stable);
+ assert(zds->outBufferMode == ZSTD_bm_stable);
}
return 0;
}
@@ -1635,6 +1914,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
} }
#endif
{ size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ if (zds->refMultipleDDicts && zds->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(zds);
+ }
DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
@@ -1663,14 +1945,14 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
assert(iend >= ip);
if (toLoad > remainingInput) { /* not enough input to load full header */
if (remainingInput > 0) {
- memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
zds->lhSize += remainingInput;
}
input->pos = input->size;
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
- memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
break;
} }
@@ -1678,10 +1960,10 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
- size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
- size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
ip = istart + cSize;
@@ -1693,7 +1975,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
} }
/* Check output buffer is large enough for ZSTD_odm_stable. */
- if (zds->outBufferMode == ZSTD_obm_stable
+ if (zds->outBufferMode == ZSTD_bm_stable
&& zds->fParams.frameType != ZSTD_skippableFrame
&& zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
@@ -1723,7 +2005,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
- size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_obm_buffered
+ size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
: 0;
@@ -1731,7 +2013,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
{ int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
-
+
if (tooSmall || tooLarge) {
size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
DEBUGLOG(4, "inBuff : from %u to %u",
@@ -1745,10 +2027,10 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
memory_allocation, "");
} else {
- ZSTD_free(zds->inBuff, zds->customMem);
+ ZSTD_customFree(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
- zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
+ zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
}
zds->inBuffSize = neededInBuffSize;
@@ -1760,7 +2042,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
case zdss_read:
DEBUGLOG(5, "stage zdss_read");
- { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip);
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
if (neededInSize==0) { /* end of frame */
zds->streamStage = zdss_init;
@@ -1790,7 +2072,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
corruption_detected,
"should never happen");
- loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
ip += loadedSize;
zds->inPos += loadedSize;
@@ -1804,7 +2086,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
}
case zdss_flush:
{ size_t const toFlushSize = zds->outEnd - zds->outStart;
- size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+ size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
op += flushedSize;
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
diff --git a/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c b/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
index ad3b3d8dbd..349dcdc333 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
+++ b/Utilities/cmzstd/lib/decompress/zstd_decompress_block.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,7 @@
/*-*******************************************************
* Dependencies
*********************************************************/
-#include <string.h> /* memcpy, memmove, memset */
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/compiler.h" /* prefetch */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
@@ -44,7 +44,7 @@
/*_*******************************************************
* Memory operations
**********************************************************/
-static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
/*-*************************************************************
@@ -166,7 +166,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
dctx->litSize = litSize;
dctx->litEntropy = 1;
if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
@@ -191,10 +191,10 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
- memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
@@ -223,7 +223,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
- memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
@@ -236,7 +236,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
/* Default FSE distribution tables.
* These are pre-calculated FSE decoding tables using default distributions as defined in specification :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
* They were generated programmatically with following method :
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
@@ -364,23 +364,26 @@ static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddB
* generate FSE decoding table for one symbol (ll, ml or off)
* cannot fail if input is valid =>
* all inputs are presumed validated at this stage */
-void
-ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+FORCE_INLINE_TEMPLATE
+void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
- unsigned tableLog)
+ unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_seqSymbol* const tableDecode = dt+1;
- U16 symbolNext[MaxSeq+1];
-
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
- U32 highThreshold = tableSize-1;
+
+ U16* symbolNext = (U16*)wksp;
+ BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
+ U32 highThreshold = tableSize - 1;
+
/* Sanity Checks */
assert(maxSymbolValue <= MaxSeq);
assert(tableLog <= MaxFSELog);
-
+ assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
+ (void)wkspSize;
/* Init, lay down lowprob symbols */
{ ZSTD_seqSymbol_header DTableH;
DTableH.tableLog = tableLog;
@@ -396,16 +399,69 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
assert(normalizedCounter[s]>=0);
symbolNext[s] = (U16)normalizedCounter[s];
} } }
- memcpy(dt, &DTableH, sizeof(DTableH));
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
- { U32 const tableMask = tableSize-1;
+ assert(tableSize <= 512);
+ /* Specialized symbol spreading for the case when there are
+ * no low probability (-1 count) symbols. When compressing
+ * small blocks we avoid low probability symbols to hit this
+ * case, since header decoding speed matters more.
+ */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].baseValue = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
- for (i=0; i<normalizedCounter[s]; i++) {
+ int const n = normalizedCounter[s];
+ for (i=0; i<n; i++) {
tableDecode[position].baseValue = s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
@@ -414,7 +470,8 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
}
/* Build Decoding table */
- { U32 u;
+ {
+ U32 u;
for (u=0; u<tableSize; u++) {
U32 const symbol = tableDecode[u].baseValue;
U32 const nextState = symbolNext[symbol]++;
@@ -423,7 +480,46 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
assert(nbAdditionalBits[symbol] < 255);
tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
tableDecode[u].baseValue = baseValue[symbol];
- } }
+ }
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+#endif
+
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+ return;
+ }
+#endif
+ (void)bmi2;
+ ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
@@ -435,7 +531,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
const void* src, size_t srcSize,
const U32* baseValue, const U32* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
- int ddictIsCold, int nbSeq)
+ int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
+ int bmi2)
{
switch(type)
{
@@ -467,7 +564,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
- ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
*DTablePtr = DTableSpace;
return headerSize;
}
@@ -480,7 +577,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
const void* src, size_t srcSize)
{
- const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* const istart = (const BYTE*)src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
int nbSeq;
@@ -499,7 +596,8 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
- nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
+ ip+=2;
} else {
RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
@@ -520,7 +618,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
- dctx->ddictIsCold, nbSeq);
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
@@ -530,7 +630,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
- dctx->ddictIsCold, nbSeq);
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
@@ -540,7 +642,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
- dctx->ddictIsCold, nbSeq);
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
@@ -554,7 +658,6 @@ typedef struct {
size_t litLength;
size_t matchLength;
size_t offset;
- const BYTE* match;
} seq_t;
typedef struct {
@@ -568,9 +671,6 @@ typedef struct {
ZSTD_fseState stateOffb;
ZSTD_fseState stateML;
size_t prevOffset[ZSTD_REP_NUM];
- const BYTE* prefixStart;
- const BYTE* dictEnd;
- size_t pos;
} seqState_t;
/*! ZSTD_overlapCopy8() :
@@ -686,12 +786,12 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart-match);
if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
+ ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
@@ -752,12 +852,12 @@ size_t ZSTD_execSequence(BYTE* op,
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
+ ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
@@ -832,10 +932,9 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD
: 0)
typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
-typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
{
seq_t seq;
ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
@@ -910,14 +1009,6 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, c
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
- if (prefetch == ZSTD_p_prefetch) {
- size_t const pos = seqState->pos + seq.litLength;
- const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
- seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
- * No consequence though : no memory access will occur, offset is only used for prefetching */
- seqState->pos = pos + seq.matchLength;
- }
-
/* ANS state update
* gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
* clang-9.2.0 does 7% worse with ZSTD_updateFseState().
@@ -948,7 +1039,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, c
}
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
-static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
{
size_t const windowSize = dctx->fParams.windowSize;
/* No dictionary used. */
@@ -969,6 +1060,7 @@ MEM_STATIC void ZSTD_assertValidSequence(
seq_t const seq,
BYTE const* prefixStart, BYTE const* virtualStart)
{
+#if DEBUGLEVEL >= 1
size_t const windowSize = dctx->fParams.windowSize;
size_t const sequenceSize = seq.litLength + seq.matchLength;
BYTE const* const oLitEnd = op + seq.litLength;
@@ -986,6 +1078,9 @@ MEM_STATIC void ZSTD_assertValidSequence(
/* Offset must be within our window. */
assert(seq.offset <= windowSize);
}
+#else
+ (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
+#endif
}
#endif
@@ -1000,7 +1095,7 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
- BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
@@ -1014,7 +1109,6 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
- size_t error = 0;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
@@ -1048,13 +1142,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
* If you see most cycles served out of the DSB you've hit the good case.
* If it is pretty even then you may be in an okay case.
*
- * I've been able to reproduce this issue on the following CPUs:
+ * This issue has been reproduced on the following CPUs:
* - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
* Use Instruments->Counters to get DSB/MITE cycles.
* I never got performance swings, but I was able to
* go from the good case of mostly DSB to half of the
* cycles served from MITE.
* - Coffeelake: Intel i9-9900k
+ * - Coffeelake: Intel i7-9700k
*
* I haven't been able to reproduce the instability or DSB misses on any
* of the following CPUS:
@@ -1067,33 +1162,35 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
*
* https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
*/
+ __asm__(".p2align 6");
+ __asm__("nop");
__asm__(".p2align 5");
__asm__("nop");
+# if __GNUC__ >= 9
+ /* better for gcc-9 and gcc-10, worse for clang and gcc-8 */
+ __asm__(".p2align 3");
+# else
__asm__(".p2align 4");
+# endif
#endif
for ( ; ; ) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
BIT_reloadDStream(&(seqState.DStream));
- /* gcc and clang both don't like early returns in this loop.
- * gcc doesn't like early breaks either.
- * Instead save an error and report it at the end.
- * When there is an error, don't increment op, so we don't
- * overwrite.
- */
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;
- else op += oneSeqSize;
- if (UNLIKELY(!--nbSeq)) break;
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
- if (ZSTD_isError(error)) return error;
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
/* save reps for next block */
@@ -1104,7 +1201,7 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
- memcpy(op, litPtr, lastLLSize);
+ ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
@@ -1124,6 +1221,24 @@ ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+ const BYTE* const prefixStart, const BYTE* const dictEnd)
+{
+ prefetchPos += sequence.litLength;
+ { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
+ const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ return prefetchPos + sequence.matchLength;
+}
+
+/* This decoding function employs prefetching
+ * to reduce latency impact of cache misses.
+ * It's generally employed when block contains a significant portion of long-distance matches
+ * or when coupled with a "cold" dictionary */
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body(
ZSTD_DCtx* dctx,
@@ -1134,7 +1249,7 @@ ZSTD_decompressSequencesLong_body(
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
- BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
@@ -1146,18 +1261,17 @@ ZSTD_decompressSequencesLong_body(
/* Regen sequences */
if (nbSeq) {
-#define STORED_SEQS 4
+#define STORED_SEQS 8
#define STORED_SEQS_MASK (STORED_SEQS-1)
-#define ADVANCED_SEQS 4
+#define ADVANCED_SEQS STORED_SEQS
seq_t sequences[STORED_SEQS];
int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
seqState_t seqState;
int seqNb;
+ size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
+
dctx->fseEntropy = 1;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
- seqState.prefixStart = prefixStart;
- seqState.pos = (size_t)(op-prefixStart);
- seqState.dictEnd = dictEnd;
assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF(
@@ -1169,21 +1283,23 @@ ZSTD_decompressSequencesLong_body(
/* prepare in advance */
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
- sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
- PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb] = sequence;
}
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
- PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
@@ -1209,7 +1325,7 @@ ZSTD_decompressSequencesLong_body(
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
- memcpy(op, litPtr, lastLLSize);
+ ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
@@ -1409,9 +1525,9 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
}
-void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
{
- if (dst != dctx->previousDstEnd) { /* not contiguous */
+ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dst;
@@ -1425,7 +1541,7 @@ size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize)
{
size_t dSize;
- ZSTD_checkContinuity(dctx, dst);
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
diff --git a/Utilities/cmzstd/lib/decompress/zstd_decompress_block.h b/Utilities/cmzstd/lib/decompress/zstd_decompress_block.h
index bf39b7350c..049a0cd84c 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_decompress_block.h
+++ b/Utilities/cmzstd/lib/decompress/zstd_decompress_block.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -15,7 +15,7 @@
/*-*******************************************************
* Dependencies
*********************************************************/
-#include <stddef.h> /* size_t */
+#include "../common/zstd_deps.h" /* size_t */
#include "../zstd.h" /* DCtx, and some public functions */
#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
@@ -48,12 +48,15 @@ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
* this function must be called with valid parameters only
* (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
* in which case it cannot fail.
+ * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
+ * defined in zstd_decompress_internal.h.
* Internal use only.
*/
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
- unsigned tableLog);
+ unsigned tableLog, void* wksp, size_t wkspSize,
+ int bmi2);
#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/Utilities/cmzstd/lib/decompress/zstd_decompress_internal.h b/Utilities/cmzstd/lib/decompress/zstd_decompress_internal.h
index 9ad96c5548..ebda0c9031 100644
--- a/Utilities/cmzstd/lib/decompress/zstd_decompress_internal.h
+++ b/Utilities/cmzstd/lib/decompress/zstd_decompress_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -27,26 +27,26 @@
/*-*******************************************************
* Constants
*********************************************************/
-static const U32 LL_base[MaxLL+1] = {
+static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 18, 20, 22, 24, 28, 32, 40,
48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
0x2000, 0x4000, 0x8000, 0x10000 };
-static const U32 OF_base[MaxOff+1] = {
+static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
-static const U32 OF_bits[MaxOff+1] = {
+static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31 };
-static const U32 ML_base[MaxML+1] = {
+static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26,
@@ -73,12 +73,16 @@ static const U32 ML_base[MaxML+1] = {
#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+
typedef struct {
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
U32 rep[ZSTD_REP_NUM];
+ U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
} ZSTD_entropyDTables_t;
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
@@ -95,10 +99,12 @@ typedef enum {
ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
} ZSTD_dictUses_e;
-typedef enum {
- ZSTD_obm_buffered = 0, /* Buffer the output */
- ZSTD_obm_stable = 1 /* ZSTD_outBuffer is stable */
-} ZSTD_outBufferMode_e;
+/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
+typedef struct {
+ const ZSTD_DDict** ddictPtrTable;
+ size_t ddictPtrTableSize;
+ size_t ddictPtrCount;
+} ZSTD_DDictHashSet;
struct ZSTD_DCtx_s
{
@@ -114,6 +120,7 @@ struct ZSTD_DCtx_s
const void* dictEnd; /* end of previous segment */
size_t expected;
ZSTD_frameHeader fParams;
+ U64 processedCSize;
U64 decodedSize;
blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
ZSTD_dStage stage;
@@ -122,6 +129,8 @@ struct ZSTD_DCtx_s
XXH64_state_t xxhState;
size_t headerSize;
ZSTD_format_e format;
+ ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
+ U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
const BYTE* litPtr;
ZSTD_customMem customMem;
size_t litSize;
@@ -135,6 +144,8 @@ struct ZSTD_DCtx_s
U32 dictID;
int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
ZSTD_dictUses_e dictUses;
+ ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
+ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
/* streaming */
ZSTD_dStreamStage streamStage;
@@ -152,7 +163,7 @@ struct ZSTD_DCtx_s
U32 legacyVersion;
U32 hostageByte;
int noForwardProgress;
- ZSTD_outBufferMode_e outBufferMode;
+ ZSTD_bufferMode_e outBufferMode;
ZSTD_outBuffer expectedOutBuffer;
/* workspace */
@@ -165,6 +176,11 @@ struct ZSTD_DCtx_s
void const* dictContentBeginForFuzzing;
void const* dictContentEndForFuzzing;
#endif
+
+ /* Tracing */
+#if ZSTD_TRACE
+ ZSTD_TraceCtx traceCtx;
+#endif
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
@@ -183,7 +199,7 @@ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
* If yes, do nothing (continue on current segment).
* If not, classify previous segment as "external dictionary", and start a new segment.
* This function cannot fail. */
-void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst);
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/Utilities/cmzstd/lib/deprecated/zbuff.h b/Utilities/cmzstd/lib/deprecated/zbuff.h
index 03cb14a039..b83ea0fed5 100644
--- a/Utilities/cmzstd/lib/deprecated/zbuff.h
+++ b/Utilities/cmzstd/lib/deprecated/zbuff.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/deprecated/zbuff_common.c b/Utilities/cmzstd/lib/deprecated/zbuff_common.c
index 579bc4df14..e7d01a0818 100644
--- a/Utilities/cmzstd/lib/deprecated/zbuff_common.c
+++ b/Utilities/cmzstd/lib/deprecated/zbuff_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/deprecated/zbuff_compress.c b/Utilities/cmzstd/lib/deprecated/zbuff_compress.c
index 2d20b13775..2e72267356 100644
--- a/Utilities/cmzstd/lib/deprecated/zbuff_compress.c
+++ b/Utilities/cmzstd/lib/deprecated/zbuff_compress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/deprecated/zbuff_decompress.c b/Utilities/cmzstd/lib/deprecated/zbuff_decompress.c
index d3c49e84b8..d73c0f35fa 100644
--- a/Utilities/cmzstd/lib/deprecated/zbuff_decompress.c
+++ b/Utilities/cmzstd/lib/deprecated/zbuff_decompress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/Utilities/cmzstd/lib/dictBuilder/cover.c b/Utilities/cmzstd/lib/dictBuilder/cover.c
index da54ef15f2..8364444d15 100644
--- a/Utilities/cmzstd/lib/dictBuilder/cover.c
+++ b/Utilities/cmzstd/lib/dictBuilder/cover.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -26,47 +26,57 @@
#include <string.h> /* memset */
#include <time.h> /* clock */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+# define ZDICT_STATIC_LINKING_ONLY
+#endif
+
#include "../common/mem.h" /* read */
#include "../common/pool.h"
#include "../common/threading.h"
-#include "cover.h"
#include "../common/zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
+#include "../zdict.h"
+#include "cover.h"
/*-*************************************
* Constants
***************************************/
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
-#define DEFAULT_SPLITPOINT 1.0
+#define COVER_DEFAULT_SPLITPOINT 1.0
/*-*************************************
* Console display
***************************************/
+#ifndef LOCALDISPLAYLEVEL
static int g_displayLevel = 2;
+#endif
+#undef DISPLAY
#define DISPLAY(...) \
{ \
fprintf(stderr, __VA_ARGS__); \
fflush(stderr); \
}
+#undef LOCALDISPLAYLEVEL
#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
if (displayLevel >= l) { \
DISPLAY(__VA_ARGS__); \
} /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+#undef DISPLAYLEVEL
#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+#ifndef LOCALDISPLAYUPDATE
+static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+#endif
+#undef LOCALDISPLAYUPDATE
#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
if (displayLevel >= l) { \
- if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \
+ if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \
g_time = clock(); \
DISPLAY(__VA_ARGS__); \
} \
}
+#undef DISPLAYUPDATE
#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
-static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
-static clock_t g_time = 0;
/*-*************************************
* Hash table
@@ -120,9 +130,9 @@ static int COVER_map_init(COVER_map_t *map, U32 size) {
/**
* Internal hash function
*/
-static const U32 prime4bytes = 2654435761U;
+static const U32 COVER_prime4bytes = 2654435761U;
static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
- return (key * prime4bytes) >> (32 - map->sizeLog);
+ return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
}
/**
@@ -215,7 +225,7 @@ typedef struct {
} COVER_ctx_t;
/* We need a global context for qsort... */
-static COVER_ctx_t *g_ctx = NULL;
+static COVER_ctx_t *g_coverCtx = NULL;
/*-*************************************
* Helper functions
@@ -258,11 +268,11 @@ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
/**
* Same as COVER_cmp() except ties are broken by pointer value
- * NOTE: g_ctx must be set to call this function. A global is required because
+ * NOTE: g_coverCtx must be set to call this function. A global is required because
* qsort doesn't take an opaque pointer.
*/
-static int COVER_strict_cmp(const void *lp, const void *rp) {
- int result = COVER_cmp(g_ctx, lp, rp);
+static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
+ int result = COVER_cmp(g_coverCtx, lp, rp);
if (result == 0) {
result = lp < rp ? -1 : 1;
}
@@ -271,8 +281,8 @@ static int COVER_strict_cmp(const void *lp, const void *rp) {
/**
* Faster version for d <= 8.
*/
-static int COVER_strict_cmp8(const void *lp, const void *rp) {
- int result = COVER_cmp8(g_ctx, lp, rp);
+static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
+ int result = COVER_cmp8(g_coverCtx, lp, rp);
if (result == 0) {
result = lp < rp ? -1 : 1;
}
@@ -603,7 +613,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
/* qsort doesn't take an opaque pointer, so pass as a global.
* On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
*/
- g_ctx = ctx;
+ g_coverCtx = ctx;
#if defined(__OpenBSD__)
mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
(ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
@@ -946,7 +956,7 @@ void COVER_dictSelectionFree(COVER_dictSelection_t selection){
free(selection.dictContent);
}
-COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
@@ -954,8 +964,8 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
size_t largestCompressed = 0;
BYTE* customDictContentEnd = customDictContent + dictContentSize;
- BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize);
- BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize);
+ BYTE * largestDictbuffer = (BYTE *)malloc(dictBufferCapacity);
+ BYTE * candidateDictBuffer = (BYTE *)malloc(dictBufferCapacity);
double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
if (!largestDictbuffer || !candidateDictBuffer) {
@@ -967,7 +977,7 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
/* Initial dictionary size and compressed size */
memcpy(largestDictbuffer, customDictContent, dictContentSize);
dictContentSize = ZDICT_finalizeDictionary(
- largestDictbuffer, dictContentSize, customDictContent, dictContentSize,
+ largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
if (ZDICT_isError(dictContentSize)) {
@@ -1001,7 +1011,7 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
while (dictContentSize < largestDict) {
memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
dictContentSize = ZDICT_finalizeDictionary(
- candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize,
+ candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
if (ZDICT_isError(dictContentSize)) {
@@ -1053,18 +1063,19 @@ typedef struct COVER_tryParameters_data_s {
* This function is thread safe if zstd is compiled with multithreaded support.
* It takes its parameters as an *OWNING* opaque pointer to support threading.
*/
-static void COVER_tryParameters(void *opaque) {
+static void COVER_tryParameters(void *opaque)
+{
/* Save parameters as local variables */
- COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
+ COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
const COVER_ctx_t *const ctx = data->ctx;
const ZDICT_cover_params_t parameters = data->parameters;
size_t dictBufferCapacity = data->dictBufferCapacity;
size_t totalCompressedSize = ERROR(GENERIC);
/* Allocate space for hash table, dict, and freqs */
COVER_map_t activeDmers;
- BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+ BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
- U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
goto _cleanup;
@@ -1079,7 +1090,7 @@ static void COVER_tryParameters(void *opaque) {
{
const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
dictBufferCapacity, parameters);
- selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+ selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
totalCompressedSize);
@@ -1094,19 +1105,18 @@ _cleanup:
free(data);
COVER_map_destroy(&activeDmers);
COVER_dictSelectionFree(selection);
- if (freqs) {
- free(freqs);
- }
+ free(freqs);
}
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
- void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
- const size_t *samplesSizes, unsigned nbSamples,
- ZDICT_cover_params_t *parameters) {
+ void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t* parameters)
+{
/* constants */
const unsigned nbThreads = parameters->nbThreads;
const double splitPoint =
- parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+ parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
diff --git a/Utilities/cmzstd/lib/dictBuilder/cover.h b/Utilities/cmzstd/lib/dictBuilder/cover.h
index f2aa0e35e3..1aacdddd6f 100644
--- a/Utilities/cmzstd/lib/dictBuilder/cover.h
+++ b/Utilities/cmzstd/lib/dictBuilder/cover.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Facebook, Inc.
+ * Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -8,6 +8,10 @@
* You may select, at your option, one of the above-listed licenses.
*/
+#ifndef ZDICT_STATIC_LINKING_ONLY
+# define ZDICT_STATIC_LINKING_ONLY
+#endif
+
#include <stdio.h> /* fprintf */
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memset */
@@ -16,10 +20,7 @@
#include "../common/pool.h"
#include "../common/threading.h"
#include "../common/zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
+#include "../zdict.h"
/**
* COVER_best_t is used for two purposes:
@@ -152,6 +153,6 @@ void COVER_dictSelectionFree(COVER_dictSelection_t selection);
* smallest dictionary within a specified regression of the compressed size
* from the largest dictionary.
*/
- COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);
diff --git a/Utilities/cmzstd/lib/dictBuilder/divsufsort.c b/Utilities/cmzstd/lib/dictBuilder/divsufsort.c
index ead9220442..a2870fb3ba 100644
--- a/Utilities/cmzstd/lib/dictBuilder/divsufsort.c
+++ b/Utilities/cmzstd/lib/dictBuilder/divsufsort.c
@@ -1576,7 +1576,7 @@ note:
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
- /* Set the sorted order of tyoe B* suffixes. */
+ /* Set the sorted order of type B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
diff --git a/Utilities/cmzstd/lib/dictBuilder/fastcover.c b/Utilities/cmzstd/lib/dictBuilder/fastcover.c
index 485c333b54..ed789f92f4 100644
--- a/Utilities/cmzstd/lib/dictBuilder/fastcover.c
+++ b/Utilities/cmzstd/lib/dictBuilder/fastcover.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Facebook, Inc.
+ * Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,15 +16,17 @@
#include <string.h> /* memset */
#include <time.h> /* clock */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+# define ZDICT_STATIC_LINKING_ONLY
+#endif
+
#include "../common/mem.h" /* read */
#include "../common/pool.h"
#include "../common/threading.h"
-#include "cover.h"
#include "../common/zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
+#include "../compress/zstd_compress_internal.h" /* ZSTD_hash*() */
+#include "../zdict.h"
+#include "cover.h"
/*-*************************************
@@ -33,7 +35,7 @@
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define FASTCOVER_MAX_F 31
#define FASTCOVER_MAX_ACCEL 10
-#define DEFAULT_SPLITPOINT 0.75
+#define FASTCOVER_DEFAULT_SPLITPOINT 0.75
#define DEFAULT_F 20
#define DEFAULT_ACCEL 1
@@ -41,50 +43,50 @@
/*-*************************************
* Console display
***************************************/
+#ifndef LOCALDISPLAYLEVEL
static int g_displayLevel = 2;
+#endif
+#undef DISPLAY
#define DISPLAY(...) \
{ \
fprintf(stderr, __VA_ARGS__); \
fflush(stderr); \
}
+#undef LOCALDISPLAYLEVEL
#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
if (displayLevel >= l) { \
DISPLAY(__VA_ARGS__); \
} /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+#undef DISPLAYLEVEL
#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+#ifndef LOCALDISPLAYUPDATE
+static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+#endif
+#undef LOCALDISPLAYUPDATE
#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
if (displayLevel >= l) { \
- if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \
+ if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \
g_time = clock(); \
DISPLAY(__VA_ARGS__); \
} \
}
+#undef DISPLAYUPDATE
#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
-static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
-static clock_t g_time = 0;
/*-*************************************
* Hash Functions
***************************************/
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-
-
/**
- * Hash the d-byte value pointed to by p and mod 2^f
+ * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
*/
-static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {
+static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 f, unsigned d) {
if (d == 6) {
- return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);
+ return ZSTD_hash6Ptr(p, f);
}
- return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);
+ return ZSTD_hash8Ptr(p, f);
}
@@ -461,20 +463,20 @@ typedef struct FASTCOVER_tryParameters_data_s {
* This function is thread safe if zstd is compiled with multithreaded support.
* It takes its parameters as an *OWNING* opaque pointer to support threading.
*/
-static void FASTCOVER_tryParameters(void *opaque)
+static void FASTCOVER_tryParameters(void* opaque)
{
/* Save parameters as local variables */
- FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;
+ FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t*)opaque;
const FASTCOVER_ctx_t *const ctx = data->ctx;
const ZDICT_cover_params_t parameters = data->parameters;
size_t dictBufferCapacity = data->dictBufferCapacity;
size_t totalCompressedSize = ERROR(GENERIC);
/* Initialize array to keep track of frequency of dmer within activeSegment */
- U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
+ U16* segmentFreqs = (U16*)calloc(((U64)1 << ctx->f), sizeof(U16));
/* Allocate space for hash table, dict, and freqs */
- BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+ BYTE *const dict = (BYTE*)malloc(dictBufferCapacity);
COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
- U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
+ U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
if (!segmentFreqs || !dict || !freqs) {
DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
goto _cleanup;
@@ -486,7 +488,7 @@ static void FASTCOVER_tryParameters(void *opaque)
parameters, segmentFreqs);
const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
- selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+ selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
totalCompressedSize);
@@ -617,7 +619,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover(
/* constants */
const unsigned nbThreads = parameters->nbThreads;
const double splitPoint =
- parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+ parameters->splitPoint <= 0.0 ? FASTCOVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
diff --git a/Utilities/cmzstd/lib/dictBuilder/zdict.c b/Utilities/cmzstd/lib/dictBuilder/zdict.c
index 6d0b04231b..459cbe4d19 100644
--- a/Utilities/cmzstd/lib/dictBuilder/zdict.c
+++ b/Utilities/cmzstd/lib/dictBuilder/zdict.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -23,9 +23,13 @@
/* Unix Large Files support (>4GB) */
#define _FILE_OFFSET_BITS 64
#if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */
+# ifndef _LARGEFILE_SOURCE
# define _LARGEFILE_SOURCE
+# endif
#elif ! defined(__LP64__) /* No point defining Large file for 64 bit */
+# ifndef _LARGEFILE64_SOURCE
# define _LARGEFILE64_SOURCE
+# endif
#endif
@@ -37,18 +41,19 @@
#include <stdio.h> /* fprintf, fopen, ftello64 */
#include <time.h> /* clock */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+# define ZDICT_STATIC_LINKING_ONLY
+#endif
+#define HUF_STATIC_LINKING_ONLY
+
#include "../common/mem.h" /* read */
#include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */
#include "../common/zstd_internal.h" /* includes zstd.h */
#include "../common/xxhash.h" /* XXH64 */
-#include "divsufsort.h"
-#ifndef ZDICT_STATIC_LINKING_ONLY
-# define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
#include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */
+#include "../zdict.h"
+#include "divsufsort.h"
/*-*************************************
@@ -62,14 +67,15 @@
#define NOISELENGTH 32
-static const int g_compressionLevel_default = 3;
static const U32 g_selectivity_default = 9;
/*-*************************************
* Console display
***************************************/
+#undef DISPLAY
#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
+#undef DISPLAYLEVEL
#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
@@ -105,20 +111,17 @@ size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)
size_t headerSize;
if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted);
- { unsigned offcodeMaxValue = MaxOff;
- ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));
+ { ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));
U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE);
- short* offcodeNCount = (short*)malloc((MaxOff+1)*sizeof(short));
- if (!bs || !wksp || !offcodeNCount) {
+ if (!bs || !wksp) {
headerSize = ERROR(memory_allocation);
} else {
ZSTD_reset_compressedBlockState(bs);
- headerSize = ZSTD_loadCEntropy(bs, wksp, offcodeNCount, &offcodeMaxValue, dictBuffer, dictSize);
+ headerSize = ZSTD_loadCEntropy(bs, wksp, dictBuffer, dictSize);
}
free(bs);
free(wksp);
- free(offcodeNCount);
}
return headerSize;
@@ -532,6 +535,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
clock_t displayClock = 0;
clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
+# undef DISPLAYUPDATE
# define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
if (ZDICT_clockSpan(displayClock) > refreshRate) \
{ displayClock = clock(); DISPLAY(__VA_ARGS__); \
@@ -706,7 +710,7 @@ static void ZDICT_flatLit(unsigned* countLit)
#define OFFCODE_MAX 30 /* only applicable to first block */
static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
- unsigned compressionLevel,
+ int compressionLevel,
const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles,
const void* dictBuffer, size_t dictBufferSize,
unsigned notificationLevel)
@@ -741,7 +745,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
memset(repOffset, 0, sizeof(repOffset));
repOffset[1] = repOffset[4] = repOffset[8] = 1;
memset(bestRepOffset, 0, sizeof(bestRepOffset));
- if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
+ if (compressionLevel==0) compressionLevel = ZSTD_CLEVEL_DEFAULT;
params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
@@ -786,7 +790,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
/* note : the result of this phase should be used to better appreciate the impact on statistics */
total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
- errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
+ errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
@@ -795,7 +799,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
Offlog = (U32)errorCode;
total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
- errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
+ errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
@@ -804,7 +808,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
mlLog = (U32)errorCode;
total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
- errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
+ errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
@@ -893,7 +897,7 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
size_t hSize;
#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
BYTE header[HBUFFSIZE];
- int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
+ int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
U32 const notificationLevel = params.notificationLevel;
/* check conditions */
@@ -939,7 +943,7 @@ static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t params)
{
- int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
+ int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
U32 const notificationLevel = params.notificationLevel;
size_t hSize = 8;
@@ -968,16 +972,11 @@ static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
return MIN(dictBufferCapacity, hSize+dictContentSize);
}
-/* Hidden declaration for dbio.c */
-size_t ZDICT_trainFromBuffer_unsafe_legacy(
- void* dictBuffer, size_t maxDictSize,
- const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
- ZDICT_legacy_params_t params);
/*! ZDICT_trainFromBuffer_unsafe_legacy() :
-* Warning : `samplesBuffer` must be followed by noisy guard band.
+* Warning : `samplesBuffer` must be followed by noisy guard band !!!
* @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
*/
-size_t ZDICT_trainFromBuffer_unsafe_legacy(
+static size_t ZDICT_trainFromBuffer_unsafe_legacy(
void* dictBuffer, size_t maxDictSize,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_legacy_params_t params)
@@ -1114,8 +1113,8 @@ size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
memset(&params, 0, sizeof(params));
params.d = 8;
params.steps = 4;
- /* Default to level 6 since no compression level information is available */
- params.zParams.compressionLevel = 3;
+ /* Use default level since no compression level information is available */
+ params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
params.zParams.notificationLevel = DEBUGLEVEL;
#endif
diff --git a/Utilities/cmzstd/lib/dictBuilder/zdict.h b/Utilities/cmzstd/lib/zdict.h
index ff2e77faf7..75b05dbf43 100644
--- a/Utilities/cmzstd/lib/dictBuilder/zdict.h
+++ b/Utilities/cmzstd/lib/zdict.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -36,6 +36,145 @@ extern "C" {
# define ZDICTLIB_API ZDICTLIB_VISIBILITY
#endif
+/*******************************************************************************
+ * Zstd dictionary builder
+ *
+ * FAQ
+ * ===
+ * Why should I use a dictionary?
+ * ------------------------------
+ *
+ * Zstd can use dictionaries to improve compression ratio of small data.
+ * Traditionally small files don't compress well because there is very little
+ * repetion in a single sample, since it is small. But, if you are compressing
+ * many similar files, like a bunch of JSON records that share the same
+ * structure, you can train a dictionary on ahead of time on some samples of
+ * these files. Then, zstd can use the dictionary to find repetitions that are
+ * present across samples. This can vastly improve compression ratio.
+ *
+ * When is a dictionary useful?
+ * ----------------------------
+ *
+ * Dictionaries are useful when compressing many small files that are similar.
+ * The larger a file is, the less benefit a dictionary will have. Generally,
+ * we don't expect dictionary compression to be effective past 100KB. And the
+ * smaller a file is, the more we would expect the dictionary to help.
+ *
+ * How do I use a dictionary?
+ * --------------------------
+ *
+ * Simply pass the dictionary to the zstd compressor with
+ * `ZSTD_CCtx_loadDictionary()`. The same dictionary must then be passed to
+ * the decompressor, using `ZSTD_DCtx_loadDictionary()`. There are other
+ * more advanced functions that allow selecting some options, see zstd.h for
+ * complete documentation.
+ *
+ * What is a zstd dictionary?
+ * --------------------------
+ *
+ * A zstd dictionary has two pieces: Its header, and its content. The header
+ * contains a magic number, the dictionary ID, and entropy tables. These
+ * entropy tables allow zstd to save on header costs in the compressed file,
+ * which really matters for small data. The content is just bytes, which are
+ * repeated content that is common across many samples.
+ *
+ * What is a raw content dictionary?
+ * ---------------------------------
+ *
+ * A raw content dictionary is just bytes. It doesn't have a zstd dictionary
+ * header, a dictionary ID, or entropy tables. Any buffer is a valid raw
+ * content dictionary.
+ *
+ * How do I train a dictionary?
+ * ----------------------------
+ *
+ * Gather samples from your use case. These samples should be similar to each
+ * other. If you have several use cases, you could try to train one dictionary
+ * per use case.
+ *
+ * Pass those samples to `ZDICT_trainFromBuffer()` and that will train your
+ * dictionary. There are a few advanced versions of this function, but this
+ * is a great starting point. If you want to further tune your dictionary
+ * you could try `ZDICT_optimizeTrainFromBuffer_cover()`. If that is too slow
+ * you can try `ZDICT_optimizeTrainFromBuffer_fastCover()`.
+ *
+ * If the dictionary training function fails, that is likely because you
+ * either passed too few samples, or a dictionary would not be effective
+ * for your data. Look at the messages that the dictionary trainer printed,
+ * if it doesn't say too few samples, then a dictionary would not be effective.
+ *
+ * How large should my dictionary be?
+ * ----------------------------------
+ *
+ * A reasonable dictionary size, the `dictBufferCapacity`, is about 100KB.
+ * The zstd CLI defaults to a 110KB dictionary. You likely don't need a
+ * dictionary larger than that. But, most use cases can get away with a
+ * smaller dictionary. The advanced dictionary builders can automatically
+ * shrink the dictionary for you, and select a the smallest size that
+ * doesn't hurt compression ratio too much. See the `shrinkDict` parameter.
+ * A smaller dictionary can save memory, and potentially speed up
+ * compression.
+ *
+ * How many samples should I provide to the dictionary builder?
+ * ------------------------------------------------------------
+ *
+ * We generally recommend passing ~100x the size of the dictionary
+ * in samples. A few thousand should suffice. Having too few samples
+ * can hurt the dictionaries effectiveness. Having more samples will
+ * only improve the dictionaries effectiveness. But having too many
+ * samples can slow down the dictionary builder.
+ *
+ * How do I determine if a dictionary will be effective?
+ * -----------------------------------------------------
+ *
+ * Simply train a dictionary and try it out. You can use zstd's built in
+ * benchmarking tool to test the dictionary effectiveness.
+ *
+ * # Benchmark levels 1-3 without a dictionary
+ * zstd -b1e3 -r /path/to/my/files
+ * # Benchmark levels 1-3 with a dictioanry
+ * zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary
+ *
+ * When should I retrain a dictionary?
+ * -----------------------------------
+ *
+ * You should retrain a dictionary when its effectiveness drops. Dictionary
+ * effectiveness drops as the data you are compressing changes. Generally, we do
+ * expect dictionaries to "decay" over time, as your data changes, but the rate
+ * at which they decay depends on your use case. Internally, we regularly
+ * retrain dictionaries, and if the new dictionary performs significantly
+ * better than the old dictionary, we will ship the new dictionary.
+ *
+ * I have a raw content dictionary, how do I turn it into a zstd dictionary?
+ * -------------------------------------------------------------------------
+ *
+ * If you have a raw content dictionary, e.g. by manually constructing it, or
+ * using a third-party dictionary builder, you can turn it into a zstd
+ * dictionary by using `ZDICT_finalizeDictionary()`. You'll also have to
+ * provide some samples of the data. It will add the zstd header to the
+ * raw content, which contains a dictionary ID and entropy tables, which
+ * will improve compression ratio, and allow zstd to write the dictionary ID
+ * into the frame, if you so choose.
+ *
+ * Do I have to use zstd's dictionary builder?
+ * -------------------------------------------
+ *
+ * No! You can construct dictionary content however you please, it is just
+ * bytes. It will always be valid as a raw content dictionary. If you want
+ * a zstd dictionary, which can improve compression ratio, use
+ * `ZDICT_finalizeDictionary()`.
+ *
+ * What is the attack surface of a zstd dictionary?
+ * ------------------------------------------------
+ *
+ * Zstd is heavily fuzz tested, including loading fuzzed dictionaries, so
+ * zstd should never crash, or access out-of-bounds memory no matter what
+ * the dictionary is. However, if an attacker can control the dictionary
+ * during decompression, they can cause zstd to generate arbitrary bytes,
+ * just like if they controlled the compressed data.
+ *
+ ******************************************************************************/
+
/*! ZDICT_trainFromBuffer():
* Train a dictionary from an array of samples.
@@ -64,7 +203,14 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap
typedef struct {
int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */
unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
- unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) */
+ unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value)
+ * NOTE: The zstd format reserves some dictionary IDs for future use.
+ * You may use them in private settings, but be warned that they
+ * may be used by zstd in a public dictionary registry in the future.
+ * These dictionary IDs are:
+ * - low range : <= 32767
+ * - high range : >= (2^31)
+ */
} ZDICT_params_t;
/*! ZDICT_finalizeDictionary():
@@ -264,10 +410,11 @@ typedef struct {
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
*/
ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
- void *dictBuffer, size_t dictBufferCapacity,
- const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_legacy_params_t parameters);
+
/* Deprecation warnings */
/* It is generally possible to disable deprecation warnings from compiler,
for example with -Wno-deprecated-declarations for gcc
@@ -279,7 +426,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
-# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
+# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405)
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
# elif (ZDICT_GCC_VERSION >= 301)
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
diff --git a/Utilities/cmzstd/lib/zstd.h b/Utilities/cmzstd/lib/zstd.h
index 8c6fc6ae90..4651e6c4dc 100644
--- a/Utilities/cmzstd/lib/zstd.h
+++ b/Utilities/cmzstd/lib/zstd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -71,17 +71,22 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
-#define ZSTD_VERSION_MINOR 4
-#define ZSTD_VERSION_RELEASE 5
-
+#define ZSTD_VERSION_MINOR 5
+#define ZSTD_VERSION_RELEASE 0
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
+
+/*! ZSTD_versionNumber() :
+ * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);
#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
#define ZSTD_QUOTE(str) #str
#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
-ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
+
+/*! ZSTD_versionString() :
+ * Return runtime library version, like "1.4.5". Requires v1.3.0+. */
+ZSTDLIB_API const char* ZSTD_versionString(void);
/* *************************************
* Default constant
@@ -104,7 +109,6 @@ ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
-
/***************************************
* Simple API
***************************************/
@@ -161,7 +165,7 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
-/*! ZSTD_findFrameCompressedSize() :
+/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
* `src` should point to the start of a ZSTD frame or skippable frame.
* `srcSize` must be >= first frame size
* @return : the compressed size of the first frame starting at `src`,
@@ -175,8 +179,9 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
/***************************************
@@ -194,7 +199,7 @@ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compres
*/
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
-ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
@@ -217,7 +222,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
* Use one context per thread for parallel execution. */
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
-ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
/*! ZSTD_decompressDCtx() :
* Same as ZSTD_decompress(),
@@ -229,9 +234,9 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
const void* src, size_t srcSize);
-/***************************************
-* Advanced compression API
-***************************************/
+/*********************************************
+* Advanced compression API (Requires v1.4.0+)
+**********************************************/
/* API design :
* Parameters are pushed one by one into an existing context,
@@ -261,7 +266,6 @@ typedef enum { ZSTD_fast=1,
Only the order (from fast to strong) is guaranteed */
} ZSTD_strategy;
-
typedef enum {
/* compression parameters
@@ -327,14 +331,15 @@ typedef enum {
* The higher the value of selected strategy, the more complex it is,
* resulting in stronger and slower compression.
* Special: value 0 means "use default strategy". */
-
/* LDM mode parameters */
ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
* This parameter is designed to improve compression ratio
* for large inputs, by finding large matches at long distance.
* It increases memory usage and window size.
* Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
- * except when expressly set to a different value. */
+ * except when expressly set to a different value.
+ * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
+ * compression strategy >= ZSTD_btopt (== compression level 16+) */
ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
* Larger values increase memory usage and compression ratio,
* but decrease compression speed.
@@ -365,20 +370,24 @@ typedef enum {
ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
/* multi-threading parameters */
- /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
- * They return an error otherwise. */
+ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
+ * In a situation where it's unknown if the linked library supports multi-threading or not,
+ * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
+ */
ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
- * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :
+ * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
* ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
- * while compression work is performed in parallel, within worker threads.
+ * while compression is performed in parallel, within worker thread(s).
* (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
* in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
* More workers improve speed, but also increase memory usage.
- * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
+ * compression is performed inside Caller's thread, and all invocations are blocking */
ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
* Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters.
- * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest.
* The minimum size is automatically and transparently enforced. */
ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
* The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
@@ -403,6 +412,13 @@ typedef enum {
* ZSTD_c_literalCompressionMode
* ZSTD_c_targetCBlockSize
* ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+ * ZSTD_c_stableOutBuffer
+ * ZSTD_c_blockDelimiters
+ * ZSTD_c_validateSequences
+ * ZSTD_c_splitBlocks
+ * ZSTD_c_useRowMatchFinder
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@@ -413,7 +429,15 @@ typedef enum {
ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002,
ZSTD_c_experimentalParam6=1003,
- ZSTD_c_experimentalParam7=1004
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+ ZSTD_c_experimentalParam10=1007,
+ ZSTD_c_experimentalParam11=1008,
+ ZSTD_c_experimentalParam12=1009,
+ ZSTD_c_experimentalParam13=1010,
+ ZSTD_c_experimentalParam14=1011,
+ ZSTD_c_experimentalParam15=1012
} ZSTD_cParameter;
typedef struct {
@@ -498,9 +522,9 @@ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
const void* src, size_t srcSize);
-/***************************************
-* Advanced decompression API
-***************************************/
+/***********************************************
+* Advanced decompression API (Requires v1.4.0+)
+************************************************/
/* The advanced API pushes parameters one by one into an existing DCtx context.
* Parameters are sticky, and remain valid for all following frames
@@ -524,11 +548,15 @@ typedef enum {
* At the time of this writing, they include :
* ZSTD_d_format
* ZSTD_d_stableOutBuffer
+ * ZSTD_d_forceIgnoreChecksum
+ * ZSTD_d_refMultipleDDicts
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
ZSTD_d_experimentalParam1=1000,
- ZSTD_d_experimentalParam2=1001
+ ZSTD_d_experimentalParam2=1001,
+ ZSTD_d_experimentalParam3=1002,
+ ZSTD_d_experimentalParam4=1003
} ZSTD_dParameter;
@@ -642,7 +670,7 @@ typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same
/* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
/*===== ZSTD_CStream management functions =====*/
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
-ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
/*===== Streaming compression functions =====*/
typedef enum {
@@ -658,14 +686,15 @@ typedef enum {
: note : multithreaded compression will block to flush as much output as possible. */
} ZSTD_EndDirective;
-/*! ZSTD_compressStream2() :
+/*! ZSTD_compressStream2() : Requires v1.4.0+
* Behaves about the same as ZSTD_compressStream, with additional control on end directive.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
* - output->pos must be <= dstCapacity, input->pos must be <= srcSize
* - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - endOp must be a valid directive
* - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
- * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
+ * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
* and then immediately returns, just indicating that there is some data remaining to be flushed.
* The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
* - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
@@ -703,11 +732,11 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
/* *****************************************************************************
- * This following is a legacy streaming API.
+ * This following is a legacy streaming API, available since v1.0+ .
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
* It is redundant, but remains fully supported.
- * Advanced parameters and dictionary compression can only be used through the
- * new API.
+ * Streaming in combination with advanced parameters and dictionary compression
+ * can only be used through the new API.
******************************************************************************/
/*!
@@ -762,7 +791,7 @@ typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same
/* For compatibility with versions <= v1.2.0, prefer differentiating them. */
/*===== ZSTD_DStream management functions =====*/
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
-ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
/*===== Streaming decompression functions =====*/
@@ -785,7 +814,7 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output
/*! ZSTD_compress_usingDict() :
* Compression at an explicit compression level using a Dictionary.
* A dictionary can be any arbitrary data segment (also called a prefix),
- * or a buffer with specified information (see dictBuilder/zdict.h).
+ * or a buffer with specified information (see zdict.h).
* Note : This function loads the dictionary, resulting in significant startup delay.
* It's intended for a dictionary used only once.
* Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
@@ -828,7 +857,8 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize
int compressionLevel);
/*! ZSTD_freeCDict() :
- * Function frees memory allocated by ZSTD_createCDict(). */
+ * Function frees memory allocated by ZSTD_createCDict().
+ * If a NULL pointer is passed, no operation is performed. */
ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
/*! ZSTD_compress_usingCDict() :
@@ -850,7 +880,8 @@ typedef struct ZSTD_DDict_s ZSTD_DDict;
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
/*! ZSTD_freeDDict() :
- * Function frees memory allocated with ZSTD_createDDict() */
+ * Function frees memory allocated with ZSTD_createDDict()
+ * If a NULL pointer is passed, no operation is performed. */
ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
/*! ZSTD_decompress_usingDDict() :
@@ -866,19 +897,25 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
* Dictionary helper functions
*******************************/
-/*! ZSTD_getDictID_fromDict() :
+/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+
* Provides the dictID stored within dictionary.
* if @return == 0, the dictionary is not conformant with Zstandard specification.
* It can still be loaded, but as a content-only dictionary. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
-/*! ZSTD_getDictID_fromDDict() :
+/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+
* Provides the dictID of the dictionary loaded into `ddict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
-/*! ZSTD_getDictID_fromFrame() :
+/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+
* Provides the dictID required to decompressed the frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
@@ -892,7 +929,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
/*******************************************************************************
- * Advanced dictionary and prefix API
+ * Advanced dictionary and prefix API (Requires v1.4.0+)
*
* This API allows dictionaries to be used with ZSTD_compress2(),
* ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
@@ -901,7 +938,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
******************************************************************************/
-/*! ZSTD_CCtx_loadDictionary() :
+/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+
* Create an internal CDict from `dict` buffer.
* Decompression will have to use same dictionary.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
@@ -920,11 +957,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* to precisely select how dictionary content must be interpreted. */
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-/*! ZSTD_CCtx_refCDict() :
+/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
* Reference a prepared dictionary, to be used for all next compressed frames.
* Note that compression parameters are enforced from within CDict,
* and supersede any compression parameter previously set within CCtx.
- * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
* The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
* The dictionary will remain valid for future compressed frames using same CCtx.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
@@ -934,7 +971,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s
* Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-/*! ZSTD_CCtx_refPrefix() :
+/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+
* Reference a prefix (single-usage dictionary) for next compressed frame.
* A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
* Decompression will need same prefix to properly regenerate data.
@@ -955,7 +992,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
-/*! ZSTD_DCtx_loadDictionary() :
+/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
* Create an internal DDict from dict buffer,
* to be used to decompress next frames.
* The dictionary remains valid for all future frames, until explicitly invalidated.
@@ -972,9 +1009,16 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
*/
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-/*! ZSTD_DCtx_refDDict() :
+/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+
* Reference a prepared dictionary, to be used to decompress next frames.
* The dictionary remains active for decompression of future frames using same DCtx.
+ *
+ * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
+ * will store the DDict references in a table, and the DDict used for decompression
+ * will be determined at decompression time, as per the dict ID in the frame.
+ * The memory for the table is allocated on the first call to refDDict, and can be
+ * freed with ZSTD_freeDCtx().
+ *
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Note 1 : Currently, only one dictionary can be managed.
* Referencing a new dictionary effectively "discards" any previous one.
@@ -983,7 +1027,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s
*/
ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-/*! ZSTD_DCtx_refPrefix() :
+/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+
* Reference a prefix (single-usage dictionary) to decompress next frame.
* This is the reverse operation of ZSTD_CCtx_refPrefix(),
* and must use the same prefix as the one used during compression.
@@ -1004,7 +1048,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
/* === Memory management === */
-/*! ZSTD_sizeof_*() :
+/*! ZSTD_sizeof_*() : Requires v1.4.0+
* These functions give the _current_ memory usage of selected object.
* Note that object memory usage can evolve (increase or decrease) over time. */
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
@@ -1029,6 +1073,28 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+/* Deprecation warnings :
+ * Should these warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
+ * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
+ */
+#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
+# define ZSTD_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
+# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
+# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
+# elif defined(_MSC_VER)
+# define ZSTD_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
+# else
+# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
+# define ZSTD_DEPRECATED(message) ZSTDLIB_API
+# endif
+#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
+
/****************************************************************************************
* experimental API (static linking only)
****************************************************************************************
@@ -1100,21 +1166,40 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
typedef struct {
- unsigned int matchPos; /* Match pos in dst */
- /* If seqDef.offset > 3, then this is seqDef.offset - 3
- * If seqDef.offset < 3, then this is the corresponding repeat offset
- * But if seqDef.offset < 3 and litLength == 0, this is the
- * repeat offset before the corresponding repeat offset
- * And if seqDef.offset == 3 and litLength == 0, this is the
- * most recent repeat offset - 1
- */
- unsigned int offset;
- unsigned int litLength; /* Literal length */
- unsigned int matchLength; /* Match length */
- /* 0 when seq not rep and seqDef.offset otherwise
- * when litLength == 0 this will be <= 4, otherwise <= 3 like normal
- */
- unsigned int rep;
+ unsigned int offset; /* The offset of the match. (NOT the same as the offset code)
+ * If offset == 0 and matchLength == 0, this sequence represents the last
+ * literals in the block of litLength size.
+ */
+
+ unsigned int litLength; /* Literal length of the sequence. */
+ unsigned int matchLength; /* Match length of the sequence. */
+
+ /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
+ * In this case, we will treat the sequence as a marker for a block boundary.
+ */
+
+ unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'.
+ * Ranges from [0, 3].
+ *
+ * Repeat offsets are essentially previous offsets from previous sequences sorted in
+ * recency order. For more detail, see doc/zstd_compression_format.md
+ *
+ * If rep == 0, then 'offset' does not contain a repeat offset.
+ * If rep > 0:
+ * If litLength != 0:
+ * rep == 1 --> offset == repeat_offset_1
+ * rep == 2 --> offset == repeat_offset_2
+ * rep == 3 --> offset == repeat_offset_3
+ * If litLength == 0:
+ * rep == 1 --> offset == repeat_offset_2
+ * rep == 2 --> offset == repeat_offset_3
+ * rep == 3 --> offset == repeat_offset_1 - 1
+ *
+ * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
+ * 'rep', but repeat offsets do not necessarily need to be calculated from an external
+ * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * use this 'rep' field at all (as of now).
+ */
} ZSTD_Sequence;
typedef struct {
@@ -1157,6 +1242,18 @@ typedef enum {
} ZSTD_format_e;
typedef enum {
+ /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
+ ZSTD_d_validateChecksum = 0,
+ ZSTD_d_ignoreChecksum = 1
+} ZSTD_forceIgnoreChecksum_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_refMultipleDDicts */
+ ZSTD_rmd_refSingleDDict = 0,
+ ZSTD_rmd_refMultipleDDicts = 1
+} ZSTD_refMultipleDDicts_e;
+
+typedef enum {
/* Note: this enum and the behavior it controls are effectively internal
* implementation details of the compressor. They are expected to continue
* to evolve and should be considered only in the context of extremely
@@ -1204,6 +1301,11 @@ typedef enum {
ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */
} ZSTD_literalCompressionMode_e;
+typedef enum {
+ ZSTD_urm_auto = 0, /* Automatically determine whether or not we use row matchfinder */
+ ZSTD_urm_disableRowMatchFinder = 1, /* Never use row matchfinder */
+ ZSTD_urm_enableRowMatchFinder = 2 /* Always use row matchfinder when applicable */
+} ZSTD_useRowMatchFinderMode_e;
/***************************************
* Frame size functions
@@ -1237,7 +1339,7 @@ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t
* `srcSize` must be the _exact_ size of this series
* (i.e. there should be a frame boundary at `src + srcSize`)
* @return : - upper-bound for the decompressed size of all data in all successive frames
- * - if an error occured: ZSTD_CONTENTSIZE_ERROR
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
*
* note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
* note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
@@ -1253,14 +1355,91 @@ ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcS
* or an error code (if srcSize is too small) */
ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
-/*! ZSTD_getSequences() :
- * Extract sequences from the sequence store
+typedef enum {
+ ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
+} ZSTD_sequenceFormat_e;
+
+/*! ZSTD_generateSequences() :
+ * Generate sequences using ZSTD_compress2, given a source buffer.
+ *
+ * Each block will end with a dummy sequence
+ * with offset == 0, matchLength == 0, and litLength == length of last literals.
+ * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
+ * simply acts as a block delimiter.
+ *
* zc can be used to insert custom compression params.
* This function invokes ZSTD_compress2
- * @return : number of sequences extracted
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+ * @return : number of sequences generated
+ */
+
+ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize);
+
+/*! ZSTD_mergeBlockDelimiters() :
+ * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
+ * by merging them into into the literals of the next sequence.
+ *
+ * As such, the final generated result has no explicit representation of block boundaries,
+ * and the final last literals segment is not represented in the sequences.
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
+ * @return : number of sequences left after merging
+ */
+ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+
+/*! ZSTD_compressSequences() :
+ * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
+ * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * The entire source is compressed into a single frame.
+ *
+ * The compression behavior changes based on cctx params. In particular:
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
+ * the block size derived from the cctx, and sequences may be split. This is the default setting.
+ *
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ *
+ * In addition to the two adjustable experimental params, there are other important cctx params.
+ * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
+ * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
+ * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
+ * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
+ *
+ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
+ * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history
+ * @return : final compressed size or a ZSTD error.
*/
-ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_writeSkippableFrame() :
+ * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
+ * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
+ * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, if the source size is not representable
+ * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant);
/***************************************
@@ -1372,7 +1551,11 @@ ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
-static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+static
+#ifdef __GNUC__
+__attribute__((__unused__))
+#endif
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
@@ -1385,12 +1568,38 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictS
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
- ZSTD_customMem customMem);
+/* ! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+ */
+typedef struct POOL_ctx_s ZSTD_threadPool;
+ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+/*
+ * This API is temporary and is expected to change or disappear in the future!
+ */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
/***************************************
* Advanced compression functions
@@ -1430,18 +1639,20 @@ ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParame
/*! ZSTD_compress_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
- * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
-ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2")
+size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
/*! ZSTD_compress_usingCDict_advanced() :
- * Note : this function is now REDUNDANT.
+ * Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
- * This prototype will be marked as deprecated and generate compilation warning in some future version */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
@@ -1503,7 +1714,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
/* Controls how the literals are compressed (default is auto).
* The value must be of type ZSTD_literalCompressionMode_e.
- * See ZSTD_literalCompressionMode_t enum definition for details.
+ * See ZSTD_literalCompressionMode_e enum definition for details.
*/
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
@@ -1518,12 +1729,189 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
* but compression ratio may regress significantly if guess considerably underestimates */
#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+/* Controls whether the new and experimental "dedicated dictionary search
+ * structure" can be used. This feature is still rough around the edges, be
+ * prepared for surprising behavior!
+ *
+ * How to use it:
+ *
+ * When using a CDict, whether to use this feature or not is controlled at
+ * CDict creation, and it must be set in a CCtxParams set passed into that
+ * construction (via ZSTD_createCDict_advanced2()). A compression will then
+ * use the feature or not based on how the CDict was constructed; the value of
+ * this param, set in the CCtx, will have no effect.
+ *
+ * However, when a dictionary buffer is passed into a CCtx, such as via
+ * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
+ * whether the CDict that is created internally can use the feature or not.
+ *
+ * What it does:
+ *
+ * Normally, the internal data structures of the CDict are analogous to what
+ * would be stored in a CCtx after compressing the contents of a dictionary.
+ * To an approximation, a compression using a dictionary can then use those
+ * data structures to simply continue what is effectively a streaming
+ * compression where the simulated compression of the dictionary left off.
+ * Which is to say, the search structures in the CDict are normally the same
+ * format as in the CCtx.
+ *
+ * It is possible to do better, since the CDict is not like a CCtx: the search
+ * structures are written once during CDict creation, and then are only read
+ * after that, while the search structures in the CCtx are both read and
+ * written as the compression goes along. This means we can choose a search
+ * structure for the dictionary that is read-optimized.
+ *
+ * This feature enables the use of that different structure.
+ *
+ * Note that some of the members of the ZSTD_compressionParameters struct have
+ * different semantics and constraints in the dedicated search structure. It is
+ * highly recommended that you simply set a compression level in the CCtxParams
+ * you pass into the CDict creation call, and avoid messing with the cParams
+ * directly.
+ *
+ * Effects:
+ *
+ * This will only have any effect when the selected ZSTD_strategy
+ * implementation supports this feature. Currently, that's limited to
+ * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
+ *
+ * Note that this means that the CDict tables can no longer be copied into the
+ * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
+ * useable. The dictionary can only be attached or reloaded.
+ *
+ * In general, you should expect compression to be faster--sometimes very much
+ * so--and CDict creation to be slightly slower. Eventually, we will probably
+ * make this mode the default.
+ */
+#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
+
+/* ZSTD_c_stableInBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the compressor, and
+ * compression will fail if it ever changes. This means the only flush
+ * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
+ * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
+ * MUST not be modified during compression or you will get data corruption.
+ *
+ * When this flag is enabled zstd won't allocate an input window buffer,
+ * because the user guarantees it can reference the ZSTD_inBuffer until
+ * the frame is complete. But, it will still allocate an output buffer
+ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
+ * avoid the memcpy() from the input buffer to the input window buffer.
+ *
+ * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
+ * That means this flag cannot be used with ZSTD_compressStream().
+ *
+ * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, compression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
+ * not be modified during compression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * matches. Normally zstd maintains its own window buffer for this purpose,
+ * but passing this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
+
+/* ZSTD_c_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells he compressor that the ZSTD_outBuffer will not be resized between
+ * calls. Specifically: (out.size - out.pos) will never grow. This gives the
+ * compressor the freedom to say: If the compressed data doesn't fit in the
+ * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
+ * always decompress directly into the output buffer, instead of decompressing
+ * into an internal buffer and copying to the output buffer.
+ *
+ * When this flag is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer. It will still allocate the
+ * input window buffer (see ZSTD_c_stableInBuffer).
+ *
+ * Zstd will check that (out.size - out.pos) never grows and return an error
+ * if it does. While not strictly necessary, this should prevent surprises.
+ */
+#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
+
+/* ZSTD_c_blockDelimiters
+ * Default is 0 == ZSTD_sf_noBlockDelimiters.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ *
+ * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
+ * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
+ * See the definition of ZSTD_Sequence for more specifics.
+ */
+#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
+
+/* ZSTD_c_validateSequences
+ * Default is 0 == disabled. Set to 1 to enable sequence validation.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * during function execution.
+ *
+ * Without validation, providing a sequence that does not conform to the zstd spec will cause
+ * undefined behavior, and may produce a corrupted block.
+ *
+ * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and
+ * return an error.
+ *
+ */
+#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+
+/* ZSTD_c_splitBlocks
+ * Default is 0 == disabled. Set to 1 to enable block splitting.
+ *
+ * Will attempt to split blocks in order to improve compression ratio at the cost of speed.
+ */
+#define ZSTD_c_splitBlocks ZSTD_c_experimentalParam13
+
+/* ZSTD_c_useRowMatchFinder
+ * Default is ZSTD_urm_auto.
+ * Controlled with ZSTD_useRowMatchFinderMode_e enum.
+ *
+ * By default, in ZSTD_urm_auto, when finalizing the compression parameters, the library
+ * will decide at runtime whether to use the row-based matchfinder based on support for SIMD
+ * instructions as well as the windowLog.
+ *
+ * Set to ZSTD_urm_disableRowMatchFinder to never use row-based matchfinder.
+ * Set to ZSTD_urm_enableRowMatchFinder to force usage of row-based matchfinder.
+ */
+#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
+
+/* ZSTD_c_deterministicRefPrefix
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Zstd produces different results for prefix compression when the prefix is
+ * directly adjacent to the data about to be compressed vs. when it isn't.
+ * This is because zstd detects that the two buffers are contiguous and it can
+ * use a more efficient match finding algorithm. However, this produces different
+ * results than when the two buffers are non-contiguous. This flag forces zstd
+ * to always load the prefix in non-contiguous mode, even if it happens to be
+ * adjacent to the data, to guarantee determinism.
+ *
+ * If you really care about determinism when using a dictionary or prefix,
+ * like when doing delta compression, you should select this option. It comes
+ * at a speed penalty of about ~2.5% if the dictionary and data happened to be
+ * contiguous, and is free if they weren't contiguous. We don't expect that
+ * intentionally making the dictionary and data contiguous will be worth the
+ * cost to memcpy() the data.
+ */
+#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
+
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
/*! ZSTD_CCtx_params :
@@ -1538,13 +1926,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param
* These parameters will be applied to
* all subsequent frames.
* - ZSTD_compressStream2() : Do compression using the CCtx.
- * - ZSTD_freeCCtxParams() : Free the memory.
+ * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
*
* This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
* for static allocation of CCtx for single-threaded compression.
*/
ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
-ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
/*! ZSTD_CCtxParams_reset() :
* Reset params to default values.
@@ -1563,11 +1951,13 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compre
*/
ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
-/*! ZSTD_CCtxParams_setParameter() :
+/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+
* Similar to ZSTD_CCtx_setParameter.
* Set one compression parameter, selected by enum ZSTD_cParameter.
- * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Parameters must be applied to a ZSTD_CCtx using
+ * ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : a code representing success or failure (which can be tested with
+ * ZSTD_isError()).
*/
ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
@@ -1576,7 +1966,7 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_c
* Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
* Apply a set of ZSTD_CCtx_params to the compression context.
@@ -1647,6 +2037,13 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* pre
*/
ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+/*! ZSTD_DCtx_getParameter() :
+ * Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+
/* ZSTD_d_format
* experimental parameter,
* allowing selection between ZSTD_format_e input compression formats
@@ -1684,12 +2081,49 @@ ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowS
*/
#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
+/* ZSTD_d_forceIgnoreChecksum
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * Tells the decompressor to skip checksum validation during decompression, regardless
+ * of whether checksumming was specified during compression. This offers some
+ * slight performance benefits, and may be useful for debugging.
+ * Param has values of type ZSTD_forceIgnoreChecksum_e
+ */
+#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
+
+/* ZSTD_d_refMultipleDDicts
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * If enabled and dctx is allocated on the heap, then additional memory will be allocated
+ * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
+ * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
+ * store all references. At decompression time, the appropriate dictID is selected
+ * from the set of DDicts based on the dictID in the frame.
+ *
+ * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
+ *
+ * Param has values of byte ZSTD_refMultipleDDicts_e
+ *
+ * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
+ * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
+ * Memory is allocated as per ZSTD_DCtx::customMem.
+ *
+ * Although this function allocates memory for the table, the user is still responsible for
+ * memory management of the underlying ZSTD_DDict* themselves.
+ */
+#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+
+
/*! ZSTD_DCtx_setFormat() :
+ * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
* Instruct the decoder context about what kind of data to decode next.
* This instruction is mandatory to decode data without a fully-formed header,
* such ZSTD_f_zstd1_magicless for example.
* @return : 0, or an error code (which can be tested using ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
/*! ZSTD_decompressStream_simpleArgs() :
* Same as ZSTD_decompressStream(),
@@ -1711,8 +2145,9 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
********************************************************************/
/*===== Advanced Streaming compression functions =====*/
-/**! ZSTD_initCStream_srcSize() :
- * This function is deprecated, and equivalent to:
+
+/*! ZSTD_initCStream_srcSize() :
+ * This function is DEPRECATED, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
@@ -1721,15 +2156,15 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
* pledgedSrcSize must be correct. If it is not known at init time, use
* ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
* "0" also disables frame content size field. It may be enabled in the future.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
-/**! ZSTD_initCStream_usingDict() :
- * This function is deprecated, and is equivalent to:
+/*! ZSTD_initCStream_usingDict() :
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
@@ -1738,15 +2173,15 @@ ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
* dict == NULL or dictSize < 8, in which case no dict is used.
* Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
* it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
int compressionLevel);
-/**! ZSTD_initCStream_advanced() :
- * This function is deprecated, and is approximately equivalent to:
+/*! ZSTD_initCStream_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* // Pseudocode: Set each zstd parameter and leave the rest as-is.
* for ((param, value) : params) {
@@ -1758,25 +2193,26 @@ ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
* dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
* pledgedSrcSize must be correct.
* If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params,
unsigned long long pledgedSrcSize);
-/**! ZSTD_initCStream_usingCDict() :
- * This function is deprecated, and equivalent to:
+/*! ZSTD_initCStream_usingCDict() :
+ * This function is DEPRECATED, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, cdict);
- *
+ *
* note : cdict will just be referenced, and must outlive compression session
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
-/**! ZSTD_initCStream_usingCDict_advanced() :
+/*! ZSTD_initCStream_usingCDict_advanced() :
* This function is DEPRECATED, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
@@ -1789,18 +2225,21 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDi
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize);
/*! ZSTD_resetCStream() :
- * This function is deprecated, and is equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but
+ * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be
+ * explicitly specified.
*
* start a new frame, using same parameters from previous frame.
* This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
@@ -1810,9 +2249,10 @@ ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
* For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
* but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
* @return : 0, or an error code (which can be tested using ZSTD_isError())
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
typedef struct {
@@ -1849,7 +2289,8 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
/*===== Advanced Streaming decompression functions =====*/
-/**
+
+/*!
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
@@ -1860,7 +2301,7 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
*/
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
-/**
+/*!
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
@@ -1871,7 +2312,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic
*/
ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
-/**
+/*!
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
@@ -1898,8 +2339,7 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
ZSTD_CCtx object can be re-used multiple times within successive compression operations.
Start by initializing a context.
- Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
- or ZSTD_compressBegin_advanced(), for finer parameter control.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
Then, consume your input using ZSTD_compressContinue().
@@ -1924,16 +2364,18 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
/*===== Buffer-less streaming compression functions =====*/
ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
-ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-/*-
+/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+/**
Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations.
diff --git a/Utilities/cmzstd/lib/common/zstd_errors.h b/Utilities/cmzstd/lib/zstd_errors.h
index 998398e7e5..fa3686b772 100644
--- a/Utilities/cmzstd/lib/common/zstd_errors.h
+++ b/Utilities/cmzstd/lib/zstd_errors.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -77,6 +77,7 @@ typedef enum {
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
ZSTD_error_dstBuffer_wrong = 104,
+ ZSTD_error_srcBuffer_wrong = 105,
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;