mirror of
https://github.com/godotengine/godot.git
synced 2024-11-22 04:06:14 +00:00
zstd: Update to upstream version 1.3.1
It is now dual-licensed BSD-3-Clause and GPL-2.0, we use the former. The PATENTS file is no longer applicable \o/ Also add zstd to COPYRIGHT.txt
This commit is contained in:
parent
560fc0f199
commit
c3ab9eb590
@ -321,6 +321,11 @@ Comment: zlib
|
||||
Copyright: 1995-2017, Jean-loup Gailly and Mark Adler
|
||||
License: Zlib
|
||||
|
||||
Files: ./thirdparty/zstd/
|
||||
Comment: Zstandard
|
||||
Copyright: 2016-2017, Facebook, Inc.
|
||||
License: BSD-3-Clause
|
||||
|
||||
|
||||
|
||||
License: Apache-2.0
|
||||
@ -372,9 +377,9 @@ License: BSD-3-clause
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
.
|
||||
3. Neither the name of the University nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
|
7
thirdparty/README.md
vendored
7
thirdparty/README.md
vendored
@ -369,13 +369,14 @@ Files extracted from upstream source:
|
||||
|
||||
- all .c and .h files
|
||||
|
||||
|
||||
## zstd
|
||||
|
||||
- Upstream: https://github.com/facebook/zstd
|
||||
- Version: 1.3.0
|
||||
- Version: 1.3.1
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
||||
- all .c and .h under lib/
|
||||
- README.md, LICENSE, PATENTS
|
||||
- lib/{common/,compress/,decompress/,zstd.h}
|
||||
- README.md, LICENSE
|
||||
|
33
thirdparty/zstd/PATENTS
vendored
33
thirdparty/zstd/PATENTS
vendored
@ -1,33 +0,0 @@
|
||||
Additional Grant of Patent Rights Version 2
|
||||
|
||||
"Software" means the Zstandard software distributed by Facebook, Inc.
|
||||
|
||||
Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
|
||||
("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
|
||||
(subject to the termination provision below) license under any Necessary
|
||||
Claims, to make, have made, use, sell, offer to sell, import, and otherwise
|
||||
transfer the Software. For avoidance of doubt, no license is granted under
|
||||
Facebook’s rights in any patent claims that are infringed by (i) modifications
|
||||
to the Software made by you or any third party or (ii) the Software in
|
||||
combination with any software or other technology.
|
||||
|
||||
The license granted hereunder will terminate, automatically and without notice,
|
||||
if you (or any of your subsidiaries, corporate affiliates or agents) initiate
|
||||
directly or indirectly, or take a direct financial interest in, any Patent
|
||||
Assertion: (i) against Facebook or any of its subsidiaries or corporate
|
||||
affiliates, (ii) against any party if such Patent Assertion arises in whole or
|
||||
in part from any software, technology, product or service of Facebook or any of
|
||||
its subsidiaries or corporate affiliates, or (iii) against any party relating
|
||||
to the Software. Notwithstanding the foregoing, if Facebook or any of its
|
||||
subsidiaries or corporate affiliates files a lawsuit alleging patent
|
||||
infringement against you in the first instance, and you respond by filing a
|
||||
patent infringement counterclaim in that lawsuit against that party that is
|
||||
unrelated to the Software, the license granted hereunder will not terminate
|
||||
under section (i) of this paragraph due to such counterclaim.
|
||||
|
||||
A "Necessary Claim" is a claim of a patent owned by Facebook that is
|
||||
necessarily infringed by the Software standing alone.
|
||||
|
||||
A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
|
||||
or contributory infringement or inducement to infringe any patent, including a
|
||||
cross-claim or counterclaim.
|
153
thirdparty/zstd/README.md
vendored
153
thirdparty/zstd/README.md
vendored
@ -1,153 +0,0 @@
|
||||
__Zstandard__, or `zstd` as short version, is a fast lossless compression algorithm,
|
||||
targeting real-time compression scenarios at zlib-level and better compression ratios.
|
||||
|
||||
It is provided as an open-source BSD-licensed **C** library,
|
||||
and a command line utility producing and decoding `.zst` and `.gz` files.
|
||||
For other programming languages,
|
||||
you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages).
|
||||
|
||||
| dev branch status |
|
||||
|-------------------|
|
||||
| [![Build Status][travisDevBadge]][travisLink] [![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink]
|
||||
|
||||
[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite"
|
||||
[travisLink]: https://travis-ci.org/facebook/zstd
|
||||
[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite"
|
||||
[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0
|
||||
[CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite"
|
||||
[CircleLink]: https://circleci.com/gh/facebook/zstd
|
||||
|
||||
|
||||
As a reference, several fast compression algorithms were tested and compared
|
||||
on a server running Linux Debian (`Linux version 4.8.0-1-amd64`),
|
||||
with a Core i7-6700K CPU @ 4.0GHz,
|
||||
using [lzbench], an open-source in-memory benchmark by @inikep
|
||||
compiled with GCC 6.3.0,
|
||||
on the [Silesia compression corpus].
|
||||
|
||||
[lzbench]: https://github.com/inikep/lzbench
|
||||
[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
|
||||
|
||||
| Compressor name | Ratio | Compression| Decompress.|
|
||||
| --------------- | ------| -----------| ---------- |
|
||||
| **zstd 1.1.3 -1** | 2.877 | 430 MB/s | 1110 MB/s |
|
||||
| zlib 1.2.8 -1 | 2.743 | 110 MB/s | 400 MB/s |
|
||||
| brotli 0.5.2 -0 | 2.708 | 400 MB/s | 430 MB/s |
|
||||
| quicklz 1.5.0 -1 | 2.238 | 550 MB/s | 710 MB/s |
|
||||
| lzo1x 2.09 -1 | 2.108 | 650 MB/s | 830 MB/s |
|
||||
| lz4 1.7.5 | 2.101 | 720 MB/s | 3600 MB/s |
|
||||
| snappy 1.1.3 | 2.091 | 500 MB/s | 1650 MB/s |
|
||||
| lzf 3.6 -1 | 2.077 | 400 MB/s | 860 MB/s |
|
||||
|
||||
[zlib]:http://www.zlib.net/
|
||||
[LZ4]: http://www.lz4.org/
|
||||
|
||||
Zstd can also offer stronger compression ratios at the cost of compression speed.
|
||||
Speed vs Compression trade-off is configurable by small increments. Decompression speed is preserved and remains roughly the same at all settings, a property shared by most LZ compression algorithms, such as [zlib] or lzma.
|
||||
|
||||
The following tests were run
|
||||
on a server running Linux Debian (`Linux version 4.8.0-1-amd64`)
|
||||
with a Core i7-6700K CPU @ 4.0GHz,
|
||||
using [lzbench], an open-source in-memory benchmark by @inikep
|
||||
compiled with GCC 6.3.0,
|
||||
on the [Silesia compression corpus].
|
||||
|
||||
Compression Speed vs Ratio | Decompression Speed
|
||||
---------------------------|--------------------
|
||||
![Compression Speed vs Ratio](doc/images/Cspeed4.png "Compression Speed vs Ratio") | ![Decompression Speed](doc/images/Dspeed4.png "Decompression Speed")
|
||||
|
||||
Several algorithms can produce higher compression ratios, but at slower speeds, falling outside of the graph.
|
||||
For a larger picture including very slow modes, [click on this link](doc/images/DCspeed5.png) .
|
||||
|
||||
|
||||
### The case for Small Data compression
|
||||
|
||||
Previous charts provide results applicable to typical file and stream scenarios (several MB). Small data comes with different perspectives.
|
||||
|
||||
The smaller the amount of data to compress, the more difficult it is to compress. This problem is common to all compression algorithms, and reason is, compression algorithms learn from past data how to compress future data. But at the beginning of a new data set, there is no "past" to build upon.
|
||||
|
||||
To solve this situation, Zstd offers a __training mode__, which can be used to tune the algorithm for a selected type of data.
|
||||
Training Zstandard is achieved by providing it with a few samples (one file per sample). The result of this training is stored in a file called "dictionary", which must be loaded before compression and decompression.
|
||||
Using this dictionary, the compression ratio achievable on small data improves dramatically.
|
||||
|
||||
The following example uses the `github-users` [sample set](https://github.com/facebook/zstd/releases/tag/v1.1.3), created from [github public API](https://developer.github.com/v3/users/#get-all-users).
|
||||
It consists of roughly 10K records weighing about 1KB each.
|
||||
|
||||
Compression Ratio | Compression Speed | Decompression Speed
|
||||
------------------|-------------------|--------------------
|
||||
![Compression Ratio](doc/images/dict-cr.png "Compression Ratio") | ![Compression Speed](doc/images/dict-cs.png "Compression Speed") | ![Decompression Speed](doc/images/dict-ds.png "Decompression Speed")
|
||||
|
||||
|
||||
These compression gains are achieved while simultaneously providing _faster_ compression and decompression speeds.
|
||||
|
||||
Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no _universal dictionary_).
|
||||
Hence, deploying one dictionary per type of data will provide the greatest benefits.
|
||||
Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
|
||||
|
||||
#### Dictionary compression How To :
|
||||
|
||||
1) Create the dictionary
|
||||
|
||||
`zstd --train FullPathToTrainingSet/* -o dictionaryName`
|
||||
|
||||
2) Compress with dictionary
|
||||
|
||||
`zstd -D dictionaryName FILE`
|
||||
|
||||
3) Decompress with dictionary
|
||||
|
||||
`zstd -D dictionaryName --decompress FILE.zst`
|
||||
|
||||
|
||||
### Build
|
||||
|
||||
Once you have the repository cloned, there are multiple ways provided to build Zstandard.
|
||||
|
||||
#### Makefile
|
||||
|
||||
If your system is compatible with a standard `make` (or `gmake`) binary generator,
|
||||
you can simply run it at the root directory.
|
||||
It will generate `zstd` within root directory.
|
||||
|
||||
Other available options include :
|
||||
- `make install` : create and install zstd binary, library and man page
|
||||
- `make test` : create and run `zstd` and test tools on local platform
|
||||
|
||||
#### cmake
|
||||
|
||||
A `cmake` project generator is provided within `build/cmake`.
|
||||
It can generate Makefiles or other build scripts
|
||||
to create `zstd` binary, and `libzstd` dynamic and static libraries.
|
||||
|
||||
#### Meson
|
||||
|
||||
A Meson project is provided within `contrib/meson`.
|
||||
|
||||
#### Visual Studio (Windows)
|
||||
|
||||
Going into `build` directory, you will find additional possibilities :
|
||||
- Projects for Visual Studio 2005, 2008 and 2010
|
||||
+ VS2010 project is compatible with VS2012, VS2013 and VS2015
|
||||
- Automated build scripts for Visual compiler by @KrzysFR , in `build/VS_scripts`,
|
||||
which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution.
|
||||
|
||||
|
||||
### Status
|
||||
|
||||
Zstandard is currently deployed within Facebook. It is used daily to compress and decompress very large amounts of data in multiple formats and use cases.
|
||||
Zstandard is considered safe for production environments.
|
||||
|
||||
### License
|
||||
|
||||
Zstandard is [BSD-licensed](LICENSE). We also provide an [additional patent grant](PATENTS).
|
||||
|
||||
### Contributing
|
||||
|
||||
The "dev" branch is the one where all contributions will be merged before reaching "master".
|
||||
If you plan to propose a patch, please commit into the "dev" branch or its own feature branch.
|
||||
Direct commit to "master" are not permitted.
|
||||
For more information, please read [CONTRIBUTING](CONTRIBUTING.md).
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
Zstd entropy stage is provided by [Huff0 and FSE, from Finite State Entropy library](https://github.com/Cyan4973/FiniteStateEntropy).
|
81
thirdparty/zstd/common/bitstream.h
vendored
81
thirdparty/zstd/common/bitstream.h
vendored
@ -80,9 +80,9 @@ extern "C" {
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
size_t bitContainer;
|
||||
@ -203,7 +203,7 @@ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
otherwise an error code (can be tested using ERR_isError() ) */
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
void* startPtr, size_t dstCapacity)
|
||||
{
|
||||
@ -217,8 +217,8 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
can add up to 26 bits into `bitC`.
|
||||
Does not check for register overflow ! */
|
||||
* can add up to 26 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
@ -268,7 +268,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
or 0 if it could not fit into dstBuffer */
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1); /* endMark */
|
||||
@ -279,14 +279,14 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize) or an errorCode if a problem is detected
|
||||
*/
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
||||
{
|
||||
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
|
||||
@ -305,29 +305,30 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
|
||||
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
||||
switch(srcSize)
|
||||
{
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
/* fall-through */
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
/* fall-through */
|
||||
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
/* fall-through */
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
/* fall-through */
|
||||
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
/* fall-through */
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
/* fall-through */
|
||||
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
/* fall-through */
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
/* fall-through */
|
||||
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
/* fall-through */
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
/* fall-through */
|
||||
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
/* fall-through */
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
/* fall-through */
|
||||
|
||||
default: break;
|
||||
default: break;
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
|
||||
}
|
||||
|
||||
@ -363,9 +364,8 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted
|
||||
*/
|
||||
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
* @return : value extracted */
|
||||
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
|
||||
@ -392,8 +392,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value.
|
||||
*/
|
||||
* @return : extracted value. */
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBits(bitD, nbBits);
|
||||
@ -402,7 +401,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works only if nbBits >= 1 */
|
||||
* unsafe version; only works only if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBitsFast(bitD, nbBits);
|
||||
@ -412,10 +411,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
@ -446,8 +445,8 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return Tells if DStream has exactly reached its end (all bits consumed).
|
||||
*/
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
|
||||
|
85
thirdparty/zstd/common/compiler.h
vendored
Normal file
85
thirdparty/zstd/common/compiler.h
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPILER_H
|
||||
#define ZSTD_COMPILER_H
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
/* force inlining */
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/**
|
||||
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
|
||||
* parameters. They must be inlined for the compiler to elimininate the constant
|
||||
* branches.
|
||||
*/
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
/**
|
||||
* HINT_INLINE is used to help the compiler generate better code. It is *not*
|
||||
* used for "templates", so it can be tweaked based on the compilers
|
||||
* performance.
|
||||
*
|
||||
* gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
|
||||
* always_inline attribute.
|
||||
*
|
||||
* clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
|
||||
* attribute.
|
||||
*/
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
|
||||
# define HINT_INLINE static INLINE_KEYWORD
|
||||
#else
|
||||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* prefetch */
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0)
|
||||
#elif defined(__GNUC__)
|
||||
# define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
|
||||
#else
|
||||
# define PREFETCH(ptr) /* disabled */
|
||||
#endif
|
||||
|
||||
/* disable warnings */
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPILER_H */
|
18
thirdparty/zstd/common/error_private.c
vendored
18
thirdparty/zstd/common/error_private.c
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
/* The purpose of this file is to have a single list of error strings embedded in binary */
|
||||
@ -20,19 +20,17 @@ const char* ERR_getErrorString(ERR_enum code)
|
||||
case PREFIX(GENERIC): return "Error (generic)";
|
||||
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
|
||||
case PREFIX(version_unsupported): return "Version not supported";
|
||||
case PREFIX(parameter_unknown): return "Unknown parameter type";
|
||||
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
|
||||
case PREFIX(frameParameter_unsupportedBy32bits): return "Frame parameter unsupported in 32-bits mode";
|
||||
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
|
||||
case PREFIX(compressionParameter_unsupported): return "Compression parameter is not supported";
|
||||
case PREFIX(compressionParameter_outOfBound): return "Compression parameter is out of bound";
|
||||
case PREFIX(corruption_detected): return "Corrupted block detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(parameter_unsupported): return "Unsupported parameter";
|
||||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
|
||||
case PREFIX(srcSize_wrong): return "Src size is incorrect";
|
||||
case PREFIX(corruption_detected): return "Corrupted block detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
|
||||
|
8
thirdparty/zstd/common/error_private.h
vendored
8
thirdparty/zstd/common/error_private.h
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
/* Note : this module is expected to remain private, do not expose it */
|
||||
|
16
thirdparty/zstd/common/fse.h
vendored
16
thirdparty/zstd/common/fse.h
vendored
@ -31,13 +31,14 @@
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Dependencies
|
||||
@ -297,8 +298,10 @@ FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
|
||||
*/
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
||||
#ifdef FSE_STATIC_LINKING_ONLY
|
||||
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
|
||||
#define FSE_H_FSE_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependency *** */
|
||||
#include "bitstream.h"
|
||||
@ -381,6 +384,11 @@ size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
|
||||
|
||||
typedef enum {
|
||||
FSE_repeat_none, /**< Cannot use the previous table */
|
||||
FSE_repeat_check, /**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} FSE_repeat;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
@ -694,5 +702,3 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
25
thirdparty/zstd/common/fse_decompress.c
vendored
25
thirdparty/zstd/common/fse_decompress.c
vendored
@ -33,35 +33,16 @@
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
@ -216,7 +197,7 @@ size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
|
||||
return 0;
|
||||
}
|
||||
|
||||
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt, const unsigned fast)
|
||||
|
11
thirdparty/zstd/common/huf.h
vendored
11
thirdparty/zstd/common/huf.h
vendored
@ -31,13 +31,13 @@
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include <stddef.h> /* size_t */
|
||||
@ -124,6 +124,7 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
/* ******************************************************************
|
||||
* WARNING !!
|
||||
@ -132,7 +133,8 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const
|
||||
* because they are not guaranteed to remain stable in the future.
|
||||
* Only consider them in association with static linking.
|
||||
*******************************************************************/
|
||||
#ifdef HUF_STATIC_LINKING_ONLY
|
||||
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
|
||||
#define HUF_H_HUF_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include "mem.h" /* U32 */
|
||||
@ -295,9 +297,6 @@ size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* c
|
||||
|
||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
12
thirdparty/zstd/common/mem.h
vendored
12
thirdparty/zstd/common/mem.h
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef MEM_H_MODULE
|
||||
@ -110,7 +110,7 @@ Only use if no other choice to achieve best performance on target platform */
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
MEM_STATIC U64 MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
@ -131,7 +131,7 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
||||
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
|
||||
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
||||
MEM_STATIC U64 MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; }
|
||||
MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
|
||||
|
88
thirdparty/zstd/common/pool.c
vendored
88
thirdparty/zstd/common/pool.c
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
* Copyright (c) 2016-present, Facebook, Inc.
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
@ -39,6 +39,12 @@ struct POOL_ctx_s {
|
||||
size_t queueHead;
|
||||
size_t queueTail;
|
||||
size_t queueSize;
|
||||
|
||||
/* The number of threads working on jobs */
|
||||
size_t numThreadsBusy;
|
||||
/* Indicates if the queue is empty */
|
||||
int queueEmpty;
|
||||
|
||||
/* The mutex protects the queue */
|
||||
pthread_mutex_t queueMutex;
|
||||
/* Condition variable for pushers to wait on when the queue is full */
|
||||
@ -60,30 +66,41 @@ static void* POOL_thread(void* opaque) {
|
||||
for (;;) {
|
||||
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
||||
pthread_mutex_lock(&ctx->queueMutex);
|
||||
while (ctx->queueHead == ctx->queueTail && !ctx->shutdown) {
|
||||
|
||||
while (ctx->queueEmpty && !ctx->shutdown) {
|
||||
pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
||||
}
|
||||
/* empty => shutting down: so stop */
|
||||
if (ctx->queueHead == ctx->queueTail) {
|
||||
if (ctx->queueEmpty) {
|
||||
pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return opaque;
|
||||
}
|
||||
/* Pop a job off the queue */
|
||||
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
||||
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
||||
ctx->numThreadsBusy++;
|
||||
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
|
||||
/* Unlock the mutex, signal a pusher, and run the job */
|
||||
pthread_mutex_unlock(&ctx->queueMutex);
|
||||
pthread_cond_signal(&ctx->queuePushCond);
|
||||
|
||||
job.function(job.opaque);
|
||||
}
|
||||
}
|
||||
|
||||
/* If the intended queue size was 0, signal after finishing job */
|
||||
if (ctx->queueSize == 1) {
|
||||
pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->numThreadsBusy--;
|
||||
pthread_mutex_unlock(&ctx->queueMutex);
|
||||
pthread_cond_signal(&ctx->queuePushCond);
|
||||
} }
|
||||
} /* for (;;) */
|
||||
/* Unreachable */
|
||||
}
|
||||
|
||||
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
|
||||
POOL_ctx *ctx;
|
||||
/* Check the parameters */
|
||||
if (!numThreads || !queueSize) { return NULL; }
|
||||
if (!numThreads) { return NULL; }
|
||||
/* Allocate the context and zero initialize */
|
||||
ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx));
|
||||
if (!ctx) { return NULL; }
|
||||
@ -92,15 +109,17 @@ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
|
||||
* and full queues.
|
||||
*/
|
||||
ctx->queueSize = queueSize + 1;
|
||||
ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job));
|
||||
ctx->queue = (POOL_job*) malloc(ctx->queueSize * sizeof(POOL_job));
|
||||
ctx->queueHead = 0;
|
||||
ctx->queueTail = 0;
|
||||
pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
ctx->numThreadsBusy = 0;
|
||||
ctx->queueEmpty = 1;
|
||||
(void)pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
(void)pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
(void)pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
ctx->shutdown = 0;
|
||||
/* Allocate space for the thread handles */
|
||||
ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t));
|
||||
ctx->threads = (pthread_t*)malloc(numThreads * sizeof(pthread_t));
|
||||
ctx->numThreads = 0;
|
||||
/* Check for errors */
|
||||
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
||||
@ -153,22 +172,37 @@ size_t POOL_sizeof(POOL_ctx *ctx) {
|
||||
+ ctx->numThreads * sizeof(pthread_t);
|
||||
}
|
||||
|
||||
void POOL_add(void *ctxVoid, POOL_function function, void *opaque) {
|
||||
POOL_ctx *ctx = (POOL_ctx *)ctxVoid;
|
||||
/**
|
||||
* Returns 1 if the queue is full and 0 otherwise.
|
||||
*
|
||||
* If the queueSize is 1 (the pool was created with an intended queueSize of 0),
|
||||
* then a queue is empty if there is a thread free and no job is waiting.
|
||||
*/
|
||||
static int isQueueFull(POOL_ctx const* ctx) {
|
||||
if (ctx->queueSize > 1) {
|
||||
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
||||
} else {
|
||||
return ctx->numThreadsBusy == ctx->numThreads ||
|
||||
!ctx->queueEmpty;
|
||||
}
|
||||
}
|
||||
|
||||
void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
|
||||
if (!ctx) { return; }
|
||||
|
||||
pthread_mutex_lock(&ctx->queueMutex);
|
||||
{ POOL_job const job = {function, opaque};
|
||||
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
size_t newTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
while (ctx->queueHead == newTail && !ctx->shutdown) {
|
||||
while (isQueueFull(ctx) && !ctx->shutdown) {
|
||||
pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
newTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
}
|
||||
/* The queue is still going => there is space */
|
||||
if (!ctx->shutdown) {
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = newTail;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&ctx->queueMutex);
|
||||
@ -183,22 +217,22 @@ struct POOL_ctx_s {
|
||||
int data;
|
||||
};
|
||||
|
||||
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
(void)numThreads;
|
||||
(void)queueSize;
|
||||
return (POOL_ctx *)malloc(sizeof(POOL_ctx));
|
||||
return (POOL_ctx*)malloc(sizeof(POOL_ctx));
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx *ctx) {
|
||||
if (ctx) free(ctx);
|
||||
void POOL_free(POOL_ctx* ctx) {
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
void POOL_add(void *ctx, POOL_function function, void *opaque) {
|
||||
void POOL_add(void* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx *ctx) {
|
||||
size_t POOL_sizeof(POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*ctx);
|
||||
}
|
||||
|
20
thirdparty/zstd/common/pool.h
vendored
20
thirdparty/zstd/common/pool.h
vendored
@ -1,11 +1,12 @@
|
||||
/**
|
||||
* Copyright (c) 2016-present, Facebook, Inc.
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef POOL_H
|
||||
#define POOL_H
|
||||
|
||||
@ -19,11 +20,10 @@ extern "C" {
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
/*! POOL_create() :
|
||||
Create a thread pool with at most `numThreads` threads.
|
||||
`numThreads` must be at least 1.
|
||||
The maximum number of queued jobs before blocking is `queueSize`.
|
||||
`queueSize` must be at least 1.
|
||||
@return : The POOL_ctx pointer on success else NULL.
|
||||
* Create a thread pool with at most `numThreads` threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
|
11
thirdparty/zstd/common/threading.h
vendored
11
thirdparty/zstd/common/threading.h
vendored
@ -1,4 +1,3 @@
|
||||
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
@ -42,14 +41,14 @@ extern "C" {
|
||||
|
||||
/* mutex */
|
||||
#define pthread_mutex_t CRITICAL_SECTION
|
||||
#define pthread_mutex_init(a,b) InitializeCriticalSection((a))
|
||||
#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0)
|
||||
#define pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define pthread_cond_t CONDITION_VARIABLE
|
||||
#define pthread_cond_init(a, b) InitializeConditionVariable((a))
|
||||
#define pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
|
||||
#define pthread_cond_destroy(a) /* No delete */
|
||||
#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
@ -80,14 +79,14 @@ int _pthread_join(pthread_t* thread, void** value_ptr);
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multithreading support */
|
||||
|
||||
#define pthread_mutex_t int /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */
|
||||
#define pthread_mutex_init(a,b)
|
||||
#define pthread_mutex_t int /* #define rather than typedef, because sometimes pthread support is implicit, resulting in duplicated symbols */
|
||||
#define pthread_mutex_init(a,b) ((void)a, 0)
|
||||
#define pthread_mutex_destroy(a)
|
||||
#define pthread_mutex_lock(a)
|
||||
#define pthread_mutex_unlock(a)
|
||||
|
||||
#define pthread_cond_t int
|
||||
#define pthread_cond_init(a,b)
|
||||
#define pthread_cond_init(a,b) ((void)a, 0)
|
||||
#define pthread_cond_destroy(a)
|
||||
#define pthread_cond_wait(a,b)
|
||||
#define pthread_cond_signal(a)
|
||||
|
50
thirdparty/zstd/common/xxhash.c
vendored
50
thirdparty/zstd/common/xxhash.c
vendored
@ -113,19 +113,25 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
||||
/* *************************************
|
||||
* Compiler Specific Options
|
||||
***************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
|
||||
|
||||
@ -248,7 +254,7 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
@ -256,7 +262,7 @@ FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_a
|
||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
@ -266,7 +272,7 @@ static U32 XXH_readBE32(const void* ptr)
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
@ -274,7 +280,7 @@ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_a
|
||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
@ -335,7 +341,7 @@ static U32 XXH32_round(U32 seed, U32 input)
|
||||
return seed;
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
@ -435,7 +441,7 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
return acc;
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
@ -584,7 +590,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
@ -654,7 +660,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void*
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem32;
|
||||
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
||||
@ -704,7 +710,7 @@ XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
||||
|
||||
/* **** XXH64 **** */
|
||||
|
||||
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
@ -771,7 +777,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void*
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem64;
|
||||
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
||||
|
8
thirdparty/zstd/common/zstd_common.c
vendored
8
thirdparty/zstd/common/zstd_common.c
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
|
62
thirdparty/zstd/common/zstd_errors.h
vendored
62
thirdparty/zstd/common/zstd_errors.h
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ERRORS_H_398273423
|
||||
@ -37,43 +37,41 @@ extern "C" {
|
||||
/*-****************************************
|
||||
* error codes list
|
||||
* note : this API is still considered unstable
|
||||
* it should not be used with a dynamic library
|
||||
* and shall not be used with a dynamic library.
|
||||
* only static linking is allowed
|
||||
******************************************/
|
||||
typedef enum {
|
||||
ZSTD_error_no_error,
|
||||
ZSTD_error_GENERIC,
|
||||
ZSTD_error_prefix_unknown,
|
||||
ZSTD_error_version_unsupported,
|
||||
ZSTD_error_parameter_unknown,
|
||||
ZSTD_error_frameParameter_unsupported,
|
||||
ZSTD_error_frameParameter_unsupportedBy32bits,
|
||||
ZSTD_error_frameParameter_windowTooLarge,
|
||||
ZSTD_error_compressionParameter_unsupported,
|
||||
ZSTD_error_compressionParameter_outOfBound,
|
||||
ZSTD_error_init_missing,
|
||||
ZSTD_error_memory_allocation,
|
||||
ZSTD_error_stage_wrong,
|
||||
ZSTD_error_dstSize_tooSmall,
|
||||
ZSTD_error_srcSize_wrong,
|
||||
ZSTD_error_corruption_detected,
|
||||
ZSTD_error_checksum_wrong,
|
||||
ZSTD_error_tableLog_tooLarge,
|
||||
ZSTD_error_maxSymbolValue_tooLarge,
|
||||
ZSTD_error_maxSymbolValue_tooSmall,
|
||||
ZSTD_error_dictionary_corrupted,
|
||||
ZSTD_error_dictionary_wrong,
|
||||
ZSTD_error_dictionaryCreation_failed,
|
||||
ZSTD_error_frameIndex_tooLarge,
|
||||
ZSTD_error_seekableIO,
|
||||
ZSTD_error_maxCode
|
||||
ZSTD_error_no_error = 0,
|
||||
ZSTD_error_GENERIC = 1,
|
||||
ZSTD_error_prefix_unknown = 10,
|
||||
ZSTD_error_version_unsupported = 12,
|
||||
ZSTD_error_frameParameter_unsupported = 14,
|
||||
ZSTD_error_frameParameter_windowTooLarge = 16,
|
||||
ZSTD_error_corruption_detected = 20,
|
||||
ZSTD_error_checksum_wrong = 22,
|
||||
ZSTD_error_dictionary_corrupted = 30,
|
||||
ZSTD_error_dictionary_wrong = 32,
|
||||
ZSTD_error_dictionaryCreation_failed = 34,
|
||||
ZSTD_error_parameter_unsupported = 40,
|
||||
ZSTD_error_parameter_outOfBound = 42,
|
||||
ZSTD_error_tableLog_tooLarge = 44,
|
||||
ZSTD_error_maxSymbolValue_tooLarge = 46,
|
||||
ZSTD_error_maxSymbolValue_tooSmall = 48,
|
||||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it may change in future versions! Use ZSTD_isError() instead */
|
||||
} ZSTD_ErrorCode;
|
||||
|
||||
/*! ZSTD_getErrorCode() :
|
||||
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
|
||||
which can be used to compare with enum list published above */
|
||||
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
|
||||
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
|
||||
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
|
107
thirdparty/zstd/common/zstd_internal.h
vendored
107
thirdparty/zstd/common/zstd_internal.h
vendored
@ -1,55 +1,28 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CCOMMON_H_MODULE
|
||||
#define ZSTD_CCOMMON_H_MODULE
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "compiler.h"
|
||||
#include "mem.h"
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
@ -211,20 +184,6 @@ MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* s
|
||||
*********************************************/
|
||||
typedef struct ZSTD_stats_s ZSTD_stats_t;
|
||||
|
||||
typedef struct {
|
||||
U32 off;
|
||||
U32 len;
|
||||
} ZSTD_match_t;
|
||||
|
||||
typedef struct {
|
||||
U32 price;
|
||||
U32 off;
|
||||
U32 mlen;
|
||||
U32 litlen;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_optimal_t;
|
||||
|
||||
|
||||
typedef struct seqDef_s {
|
||||
U32 offset;
|
||||
U16 litLength;
|
||||
@ -242,13 +201,31 @@ typedef struct {
|
||||
BYTE* ofCode;
|
||||
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
|
||||
U32 longLengthPos;
|
||||
/* opt */
|
||||
ZSTD_optimal_t* priceTable;
|
||||
ZSTD_match_t* matchTable;
|
||||
U32* matchLengthFreq;
|
||||
U32* litLengthFreq;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
U32 repToConfirm[ZSTD_REP_NUM];
|
||||
} seqStore_t;
|
||||
|
||||
typedef struct {
|
||||
U32 off;
|
||||
U32 len;
|
||||
} ZSTD_match_t;
|
||||
|
||||
typedef struct {
|
||||
U32 price;
|
||||
U32 off;
|
||||
U32 mlen;
|
||||
U32 litlen;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_optimal_t;
|
||||
|
||||
typedef struct {
|
||||
U32* litFreq;
|
||||
U32* litLengthFreq;
|
||||
U32* matchLengthFreq;
|
||||
U32* offCodeFreq;
|
||||
ZSTD_match_t* matchTable;
|
||||
ZSTD_optimal_t* priceTable;
|
||||
|
||||
U32 matchLengthSum;
|
||||
U32 matchSum;
|
||||
U32 litLengthSum;
|
||||
@ -264,7 +241,19 @@ typedef struct {
|
||||
U32 cachedPrice;
|
||||
U32 cachedLitLength;
|
||||
const BYTE* cachedLiterals;
|
||||
} seqStore_t;
|
||||
} optState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
|
||||
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
U32 workspace[HUF_WORKSPACE_SIZE_U32];
|
||||
HUF_repeat hufCTable_repeatMode;
|
||||
FSE_repeat offcode_repeatMode;
|
||||
FSE_repeat matchlength_repeatMode;
|
||||
FSE_repeat litlength_repeatMode;
|
||||
} ZSTD_entropyCTables_t;
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
|
||||
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
|
||||
@ -338,9 +327,9 @@ typedef struct {
|
||||
} blockProperties_t;
|
||||
|
||||
/*! ZSTD_getcBlockSize() :
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
|
||||
|
||||
#endif /* ZSTD_CCOMMON_H_MODULE */
|
||||
|
26
thirdparty/zstd/compress/fse_compress.c
vendored
26
thirdparty/zstd/compress/fse_compress.c
vendored
@ -32,27 +32,6 @@
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
@ -60,13 +39,16 @@
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
@ -781,7 +763,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
|
7
thirdparty/zstd/compress/huf_compress.c
vendored
7
thirdparty/zstd/compress/huf_compress.c
vendored
@ -50,13 +50,15 @@
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
@ -436,7 +438,7 @@ static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt*
|
||||
|
||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||
|
||||
#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
|
||||
|
||||
#define HUF_FLUSHBITS_1(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
|
||||
@ -451,7 +453,6 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
size_t n;
|
||||
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
|
||||
BIT_CStream_t bitC;
|
||||
|
||||
/* init */
|
||||
|
622
thirdparty/zstd/compress/zstd_compress.c
vendored
622
thirdparty/zstd/compress/zstd_compress.c
vendored
File diff suppressed because it is too large
Load Diff
234
thirdparty/zstd/compress/zstd_opt.h
vendored
234
thirdparty/zstd/compress/zstd_opt.h
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
* Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
@ -22,173 +22,173 @@
|
||||
/*-*************************************
|
||||
* Price functions for optimal parser
|
||||
***************************************/
|
||||
FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr)
|
||||
static void ZSTD_setLog2Prices(optState_t* optPtr)
|
||||
{
|
||||
ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1);
|
||||
ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1);
|
||||
ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1);
|
||||
ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1);
|
||||
ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum));
|
||||
optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
|
||||
optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
|
||||
optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
|
||||
optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
|
||||
optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum));
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize)
|
||||
static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize)
|
||||
{
|
||||
unsigned u;
|
||||
|
||||
ssPtr->cachedLiterals = NULL;
|
||||
ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
|
||||
ssPtr->staticPrices = 0;
|
||||
optPtr->cachedLiterals = NULL;
|
||||
optPtr->cachedPrice = optPtr->cachedLitLength = 0;
|
||||
optPtr->staticPrices = 0;
|
||||
|
||||
if (ssPtr->litLengthSum == 0) {
|
||||
if (srcSize <= 1024) ssPtr->staticPrices = 1;
|
||||
if (optPtr->litLengthSum == 0) {
|
||||
if (srcSize <= 1024) optPtr->staticPrices = 1;
|
||||
|
||||
assert(ssPtr->litFreq!=NULL);
|
||||
assert(optPtr->litFreq!=NULL);
|
||||
for (u=0; u<=MaxLit; u++)
|
||||
ssPtr->litFreq[u] = 0;
|
||||
optPtr->litFreq[u] = 0;
|
||||
for (u=0; u<srcSize; u++)
|
||||
ssPtr->litFreq[src[u]]++;
|
||||
optPtr->litFreq[src[u]]++;
|
||||
|
||||
ssPtr->litSum = 0;
|
||||
ssPtr->litLengthSum = MaxLL+1;
|
||||
ssPtr->matchLengthSum = MaxML+1;
|
||||
ssPtr->offCodeSum = (MaxOff+1);
|
||||
ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
|
||||
optPtr->litSum = 0;
|
||||
optPtr->litLengthSum = MaxLL+1;
|
||||
optPtr->matchLengthSum = MaxML+1;
|
||||
optPtr->offCodeSum = (MaxOff+1);
|
||||
optPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
|
||||
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->litSum += ssPtr->litFreq[u];
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++)
|
||||
ssPtr->litLengthFreq[u] = 1;
|
||||
optPtr->litLengthFreq[u] = 1;
|
||||
for (u=0; u<=MaxML; u++)
|
||||
ssPtr->matchLengthFreq[u] = 1;
|
||||
optPtr->matchLengthFreq[u] = 1;
|
||||
for (u=0; u<=MaxOff; u++)
|
||||
ssPtr->offCodeFreq[u] = 1;
|
||||
optPtr->offCodeFreq[u] = 1;
|
||||
} else {
|
||||
ssPtr->matchLengthSum = 0;
|
||||
ssPtr->litLengthSum = 0;
|
||||
ssPtr->offCodeSum = 0;
|
||||
ssPtr->matchSum = 0;
|
||||
ssPtr->litSum = 0;
|
||||
optPtr->matchLengthSum = 0;
|
||||
optPtr->litLengthSum = 0;
|
||||
optPtr->offCodeSum = 0;
|
||||
optPtr->matchSum = 0;
|
||||
optPtr->litSum = 0;
|
||||
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
ssPtr->litSum += ssPtr->litFreq[u];
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxLL; u++) {
|
||||
ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
|
||||
optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
optPtr->litLengthSum += optPtr->litLengthFreq[u];
|
||||
}
|
||||
for (u=0; u<=MaxML; u++) {
|
||||
ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
|
||||
ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
|
||||
optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
|
||||
optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3);
|
||||
}
|
||||
ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
|
||||
optPtr->matchSum *= ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u<=MaxOff; u++) {
|
||||
ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
|
||||
ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
|
||||
optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->offCodeSum += optPtr->offCodeFreq[u];
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_setLog2Prices(ssPtr);
|
||||
ZSTD_setLog2Prices(optPtr);
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals)
|
||||
static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals)
|
||||
{
|
||||
U32 price, u;
|
||||
|
||||
if (ssPtr->staticPrices)
|
||||
if (optPtr->staticPrices)
|
||||
return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
|
||||
|
||||
if (litLength == 0)
|
||||
return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1);
|
||||
return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1);
|
||||
|
||||
/* literals */
|
||||
if (ssPtr->cachedLiterals == literals) {
|
||||
U32 const additional = litLength - ssPtr->cachedLitLength;
|
||||
const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
|
||||
price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
|
||||
if (optPtr->cachedLiterals == literals) {
|
||||
U32 const additional = litLength - optPtr->cachedLitLength;
|
||||
const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength;
|
||||
price = optPtr->cachedPrice + additional * optPtr->log2litSum;
|
||||
for (u=0; u < additional; u++)
|
||||
price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1);
|
||||
ssPtr->cachedPrice = price;
|
||||
ssPtr->cachedLitLength = litLength;
|
||||
price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1);
|
||||
optPtr->cachedPrice = price;
|
||||
optPtr->cachedLitLength = litLength;
|
||||
} else {
|
||||
price = litLength * ssPtr->log2litSum;
|
||||
price = litLength * optPtr->log2litSum;
|
||||
for (u=0; u < litLength; u++)
|
||||
price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1);
|
||||
price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
|
||||
|
||||
if (litLength >= 12) {
|
||||
ssPtr->cachedLiterals = literals;
|
||||
ssPtr->cachedPrice = price;
|
||||
ssPtr->cachedLitLength = litLength;
|
||||
optPtr->cachedLiterals = literals;
|
||||
optPtr->cachedPrice = price;
|
||||
optPtr->cachedLitLength = litLength;
|
||||
}
|
||||
}
|
||||
|
||||
/* literal Length */
|
||||
{ const BYTE LL_deltaCode = 19;
|
||||
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
|
||||
price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1);
|
||||
price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
|
||||
}
|
||||
|
||||
return price;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
|
||||
FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
|
||||
{
|
||||
/* offset */
|
||||
U32 price;
|
||||
BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
|
||||
|
||||
if (seqStorePtr->staticPrices)
|
||||
return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
|
||||
if (optPtr->staticPrices)
|
||||
return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
|
||||
|
||||
price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
|
||||
price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
|
||||
if (!ultra && offCode >= 20) price += (offCode-19)*2;
|
||||
|
||||
/* match Length */
|
||||
{ const BYTE ML_deltaCode = 36;
|
||||
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
|
||||
price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1);
|
||||
price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
|
||||
}
|
||||
|
||||
return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
|
||||
return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor;
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
|
||||
static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
|
||||
{
|
||||
U32 u;
|
||||
|
||||
/* literals */
|
||||
seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
||||
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
||||
for (u=0; u < litLength; u++)
|
||||
seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
||||
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
||||
|
||||
/* literal Length */
|
||||
{ const BYTE LL_deltaCode = 19;
|
||||
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
|
||||
seqStorePtr->litLengthFreq[llCode]++;
|
||||
seqStorePtr->litLengthSum++;
|
||||
optPtr->litLengthFreq[llCode]++;
|
||||
optPtr->litLengthSum++;
|
||||
}
|
||||
|
||||
/* match offset */
|
||||
{ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
|
||||
seqStorePtr->offCodeSum++;
|
||||
seqStorePtr->offCodeFreq[offCode]++;
|
||||
optPtr->offCodeSum++;
|
||||
optPtr->offCodeFreq[offCode]++;
|
||||
}
|
||||
|
||||
/* match Length */
|
||||
{ const BYTE ML_deltaCode = 36;
|
||||
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
|
||||
seqStorePtr->matchLengthFreq[mlCode]++;
|
||||
seqStorePtr->matchLengthSum++;
|
||||
optPtr->matchLengthFreq[mlCode]++;
|
||||
optPtr->matchLengthSum++;
|
||||
}
|
||||
|
||||
ZSTD_setLog2Prices(seqStorePtr);
|
||||
ZSTD_setLog2Prices(optPtr);
|
||||
}
|
||||
|
||||
|
||||
@ -203,7 +203,7 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B
|
||||
|
||||
|
||||
/* function safe only for comparisons */
|
||||
MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
||||
static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
||||
{
|
||||
switch (length)
|
||||
{
|
||||
@ -219,7 +219,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
||||
|
||||
/* Update hashTable3 up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
FORCE_INLINE
|
||||
static
|
||||
U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
|
||||
{
|
||||
U32* const hashTable3 = zc->hashTable3;
|
||||
@ -412,11 +412,12 @@ static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
|
||||
/*-*******************************
|
||||
* Optimal parser
|
||||
*********************************/
|
||||
FORCE_INLINE
|
||||
FORCE_INLINE_TEMPLATE
|
||||
void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize, const int ultra)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
optState_t* optStatePtr = &(ctx->optState);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
@ -430,16 +431,16 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
|
||||
ZSTD_match_t* matches = seqStorePtr->matchTable;
|
||||
ZSTD_optimal_t* opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* matches = optStatePtr->matchTable;
|
||||
const BYTE* inr;
|
||||
U32 offset, rep[ZSTD_REP_NUM];
|
||||
|
||||
/* init */
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
@ -462,7 +463,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
}
|
||||
best_off = i - (ip == anchor);
|
||||
do {
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen--;
|
||||
@ -487,7 +488,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
|
||||
best_mlen = matches[u].len;
|
||||
while (mlen <= best_mlen) {
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen++;
|
||||
@ -507,12 +508,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur-1].mlen == 1) {
|
||||
litlen = opt[cur-1].litlen + 1;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
|
||||
} else
|
||||
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
|
||||
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
|
||||
} else {
|
||||
litlen = 1;
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
|
||||
}
|
||||
|
||||
if (cur > last_pos || price <= opt[cur].price)
|
||||
@ -554,12 +555,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
} else
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
|
||||
@ -586,12 +587,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen)
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
else
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
|
||||
@ -645,13 +646,13 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
if (litLength==0) offset--;
|
||||
}
|
||||
|
||||
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip = ip + mlen;
|
||||
} } /* for (cur=0; cur < last_pos; ) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
|
||||
|
||||
/* Last Literals */
|
||||
{ size_t const lastLLSize = iend - anchor;
|
||||
@ -661,11 +662,12 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE
|
||||
FORCE_INLINE_TEMPLATE
|
||||
void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const void* src, size_t srcSize, const int ultra)
|
||||
{
|
||||
seqStore_t* seqStorePtr = &(ctx->seqStore);
|
||||
optState_t* optStatePtr = &(ctx->optState);
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
@ -683,16 +685,16 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
const U32 mls = ctx->appliedParams.cParams.searchLength;
|
||||
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
|
||||
ZSTD_match_t* matches = seqStorePtr->matchTable;
|
||||
ZSTD_optimal_t* opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* matches = optStatePtr->matchTable;
|
||||
const BYTE* inr;
|
||||
|
||||
/* init */
|
||||
U32 offset, rep[ZSTD_REP_NUM];
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
|
||||
|
||||
ctx->nextToUpdate3 = ctx->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
@ -726,7 +728,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
best_off = i - (ip==anchor);
|
||||
litlen = opt[0].litlen;
|
||||
do {
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
|
||||
mlen--;
|
||||
@ -756,7 +758,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
best_mlen = matches[u].len;
|
||||
litlen = opt[0].litlen;
|
||||
while (mlen <= best_mlen) {
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
if (mlen > last_pos || price < opt[mlen].price)
|
||||
SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
|
||||
mlen++;
|
||||
@ -773,12 +775,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur-1].mlen == 1) {
|
||||
litlen = opt[cur-1].litlen + 1;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
|
||||
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
|
||||
} else
|
||||
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
|
||||
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
|
||||
} else {
|
||||
litlen = 1;
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
|
||||
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
|
||||
}
|
||||
|
||||
if (cur > last_pos || price <= opt[cur].price)
|
||||
@ -826,12 +828,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
|
||||
} else
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
|
||||
@ -858,12 +860,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
|
||||
if (opt[cur].mlen == 1) {
|
||||
litlen = opt[cur].litlen;
|
||||
if (cur > litlen)
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
else
|
||||
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
} else {
|
||||
litlen = 0;
|
||||
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
|
||||
}
|
||||
|
||||
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
|
||||
@ -918,13 +920,13 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
if (litLength==0) offset--;
|
||||
}
|
||||
|
||||
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip = ip + mlen;
|
||||
} } /* for (cur=0; cur < last_pos; ) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
|
||||
|
||||
/* Last Literals */
|
||||
{ size_t lastLLSize = iend - anchor;
|
||||
|
377
thirdparty/zstd/compress/zstdmt_compress.c
vendored
377
thirdparty/zstd/compress/zstdmt_compress.c
vendored
@ -1,15 +1,16 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
/* ====== Tuning parameters ====== */
|
||||
#define ZSTDMT_NBTHREADS_MAX 128
|
||||
#define ZSTDMT_NBTHREADS_MAX 256
|
||||
#define ZSTDMT_OVERLAPLOG_DEFAULT 6
|
||||
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
@ -73,6 +74,7 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
|
||||
|
||||
|
||||
/* ===== Buffer Pool ===== */
|
||||
/* a single Buffer Pool can be invoked from multiple threads in parallel */
|
||||
|
||||
typedef struct buffer_s {
|
||||
void* start;
|
||||
@ -82,6 +84,8 @@ typedef struct buffer_s {
|
||||
static const buffer_t g_nullBuffer = { NULL, 0 };
|
||||
|
||||
typedef struct ZSTDMT_bufferPool_s {
|
||||
pthread_mutex_t poolMutex;
|
||||
size_t bufferSize;
|
||||
unsigned totalBuffers;
|
||||
unsigned nbBuffers;
|
||||
ZSTD_customMem cMem;
|
||||
@ -90,10 +94,15 @@ typedef struct ZSTDMT_bufferPool_s {
|
||||
|
||||
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_customMem cMem)
|
||||
{
|
||||
unsigned const maxNbBuffers = 2*nbThreads + 2;
|
||||
unsigned const maxNbBuffers = 2*nbThreads + 3;
|
||||
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
|
||||
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
||||
if (bufPool==NULL) return NULL;
|
||||
if (pthread_mutex_init(&bufPool->poolMutex, NULL)) {
|
||||
ZSTD_free(bufPool, cMem);
|
||||
return NULL;
|
||||
}
|
||||
bufPool->bufferSize = 64 KB;
|
||||
bufPool->totalBuffers = maxNbBuffers;
|
||||
bufPool->nbBuffers = 0;
|
||||
bufPool->cMem = cMem;
|
||||
@ -106,6 +115,7 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
||||
if (!bufPool) return; /* compatibility with free on NULL */
|
||||
for (u=0; u<bufPool->totalBuffers; u++)
|
||||
ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
|
||||
pthread_mutex_destroy(&bufPool->poolMutex);
|
||||
ZSTD_free(bufPool, bufPool->cMem);
|
||||
}
|
||||
|
||||
@ -116,65 +126,85 @@ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
|
||||
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
|
||||
unsigned u;
|
||||
size_t totalBufferSize = 0;
|
||||
pthread_mutex_lock(&bufPool->poolMutex);
|
||||
for (u=0; u<bufPool->totalBuffers; u++)
|
||||
totalBufferSize += bufPool->bTable[u].size;
|
||||
pthread_mutex_unlock(&bufPool->poolMutex);
|
||||
|
||||
return poolSize + totalBufferSize;
|
||||
}
|
||||
|
||||
/** ZSTDMT_getBuffer() :
|
||||
* assumption : invocation from main thread only ! */
|
||||
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize)
|
||||
static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* bufPool, size_t bSize)
|
||||
{
|
||||
if (pool->nbBuffers) { /* try to use an existing buffer */
|
||||
buffer_t const buf = pool->bTable[--(pool->nbBuffers)];
|
||||
bufPool->bufferSize = bSize;
|
||||
}
|
||||
|
||||
/** ZSTDMT_getBuffer() :
|
||||
* assumption : bufPool must be valid */
|
||||
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
||||
{
|
||||
size_t const bSize = bufPool->bufferSize;
|
||||
DEBUGLOG(5, "ZSTDMT_getBuffer");
|
||||
pthread_mutex_lock(&bufPool->poolMutex);
|
||||
if (bufPool->nbBuffers) { /* try to use an existing buffer */
|
||||
buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
|
||||
size_t const availBufferSize = buf.size;
|
||||
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize))
|
||||
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) {
|
||||
/* large enough, but not too much */
|
||||
pthread_mutex_unlock(&bufPool->poolMutex);
|
||||
return buf;
|
||||
}
|
||||
/* size conditions not respected : scratch this buffer, create new one */
|
||||
ZSTD_free(buf.start, pool->cMem);
|
||||
DEBUGLOG(5, "existing buffer does not meet size conditions => freeing");
|
||||
ZSTD_free(buf.start, bufPool->cMem);
|
||||
}
|
||||
pthread_mutex_unlock(&bufPool->poolMutex);
|
||||
/* create new buffer */
|
||||
DEBUGLOG(5, "create a new buffer");
|
||||
{ buffer_t buffer;
|
||||
void* const start = ZSTD_malloc(bSize, pool->cMem);
|
||||
if (start==NULL) bSize = 0;
|
||||
void* const start = ZSTD_malloc(bSize, bufPool->cMem);
|
||||
buffer.start = start; /* note : start can be NULL if malloc fails ! */
|
||||
buffer.size = bSize;
|
||||
buffer.size = (start==NULL) ? 0 : bSize;
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
|
||||
/* store buffer for later re-use, up to pool capacity */
|
||||
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
|
||||
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
||||
{
|
||||
if (buf.start == NULL) return; /* release on NULL */
|
||||
if (pool->nbBuffers < pool->totalBuffers) {
|
||||
pool->bTable[pool->nbBuffers++] = buf; /* store for later re-use */
|
||||
if (buf.start == NULL) return; /* compatible with release on NULL */
|
||||
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
|
||||
pthread_mutex_lock(&bufPool->poolMutex);
|
||||
if (bufPool->nbBuffers < bufPool->totalBuffers) {
|
||||
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
|
||||
pthread_mutex_unlock(&bufPool->poolMutex);
|
||||
return;
|
||||
}
|
||||
pthread_mutex_unlock(&bufPool->poolMutex);
|
||||
/* Reached bufferPool capacity (should not happen) */
|
||||
ZSTD_free(buf.start, pool->cMem);
|
||||
DEBUGLOG(5, "buffer pool capacity reached => freeing ");
|
||||
ZSTD_free(buf.start, bufPool->cMem);
|
||||
}
|
||||
|
||||
|
||||
/* ===== CCtx Pool ===== */
|
||||
/* a single CCtx Pool can be invoked from multiple threads in parallel */
|
||||
|
||||
typedef struct {
|
||||
pthread_mutex_t poolMutex;
|
||||
unsigned totalCCtx;
|
||||
unsigned availCCtx;
|
||||
ZSTD_customMem cMem;
|
||||
ZSTD_CCtx* cctx[1]; /* variable size */
|
||||
} ZSTDMT_CCtxPool;
|
||||
|
||||
/* assumption : CCtxPool invocation only from main thread */
|
||||
|
||||
/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
|
||||
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
||||
{
|
||||
unsigned u;
|
||||
for (u=0; u<pool->totalCCtx; u++)
|
||||
ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */
|
||||
pthread_mutex_destroy(&pool->poolMutex);
|
||||
ZSTD_free(pool, pool->cMem);
|
||||
}
|
||||
|
||||
@ -186,6 +216,10 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
|
||||
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
|
||||
sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*), cMem);
|
||||
if (!cctxPool) return NULL;
|
||||
if (pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
|
||||
ZSTD_free(cctxPool, cMem);
|
||||
return NULL;
|
||||
}
|
||||
cctxPool->cMem = cMem;
|
||||
cctxPool->totalCCtx = nbThreads;
|
||||
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
|
||||
@ -198,50 +232,57 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
|
||||
/* only works during initialization phase, not during compression */
|
||||
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
|
||||
{
|
||||
unsigned const nbThreads = cctxPool->totalCCtx;
|
||||
size_t const poolSize = sizeof(*cctxPool)
|
||||
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
|
||||
unsigned u;
|
||||
size_t totalCCtxSize = 0;
|
||||
for (u=0; u<nbThreads; u++)
|
||||
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
|
||||
|
||||
return poolSize + totalCCtxSize;
|
||||
pthread_mutex_lock(&cctxPool->poolMutex);
|
||||
{ unsigned const nbThreads = cctxPool->totalCCtx;
|
||||
size_t const poolSize = sizeof(*cctxPool)
|
||||
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
|
||||
unsigned u;
|
||||
size_t totalCCtxSize = 0;
|
||||
for (u=0; u<nbThreads; u++) {
|
||||
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
|
||||
}
|
||||
pthread_mutex_unlock(&cctxPool->poolMutex);
|
||||
return poolSize + totalCCtxSize;
|
||||
}
|
||||
}
|
||||
|
||||
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool)
|
||||
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
|
||||
{
|
||||
if (pool->availCCtx) {
|
||||
pool->availCCtx--;
|
||||
return pool->cctx[pool->availCCtx];
|
||||
}
|
||||
return ZSTD_createCCtx(); /* note : can be NULL, when creation fails ! */
|
||||
DEBUGLOG(5, "ZSTDMT_getCCtx");
|
||||
pthread_mutex_lock(&cctxPool->poolMutex);
|
||||
if (cctxPool->availCCtx) {
|
||||
cctxPool->availCCtx--;
|
||||
{ ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
|
||||
pthread_mutex_unlock(&cctxPool->poolMutex);
|
||||
return cctx;
|
||||
} }
|
||||
pthread_mutex_unlock(&cctxPool->poolMutex);
|
||||
DEBUGLOG(5, "create one more CCtx");
|
||||
return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
|
||||
}
|
||||
|
||||
static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
|
||||
{
|
||||
if (cctx==NULL) return; /* compatibility with release on NULL */
|
||||
pthread_mutex_lock(&pool->poolMutex);
|
||||
if (pool->availCCtx < pool->totalCCtx)
|
||||
pool->cctx[pool->availCCtx++] = cctx;
|
||||
else
|
||||
else {
|
||||
/* pool overflow : should not happen, since totalCCtx==nbThreads */
|
||||
DEBUGLOG(5, "CCtx pool overflow : free cctx");
|
||||
ZSTD_freeCCtx(cctx);
|
||||
}
|
||||
pthread_mutex_unlock(&pool->poolMutex);
|
||||
}
|
||||
|
||||
|
||||
/* ===== Thread worker ===== */
|
||||
|
||||
typedef struct {
|
||||
buffer_t buffer;
|
||||
size_t filled;
|
||||
} inBuff_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_CCtx* cctx;
|
||||
buffer_t src;
|
||||
const void* srcStart;
|
||||
size_t srcSize;
|
||||
size_t dictSize;
|
||||
size_t srcSize;
|
||||
buffer_t dstBuff;
|
||||
size_t cSize;
|
||||
size_t dstFlushed;
|
||||
@ -253,6 +294,8 @@ typedef struct {
|
||||
pthread_cond_t* jobCompleted_cond;
|
||||
ZSTD_parameters params;
|
||||
const ZSTD_CDict* cdict;
|
||||
ZSTDMT_CCtxPool* cctxPool;
|
||||
ZSTDMT_bufferPool* bufPool;
|
||||
unsigned long long fullFrameSize;
|
||||
} ZSTDMT_jobDescription;
|
||||
|
||||
@ -260,37 +303,56 @@ typedef struct {
|
||||
void ZSTDMT_compressChunk(void* jobDescription)
|
||||
{
|
||||
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
|
||||
ZSTD_CCtx* cctx = ZSTDMT_getCCtx(job->cctxPool);
|
||||
const void* const src = (const char*)job->srcStart + job->dictSize;
|
||||
buffer_t const dstBuff = job->dstBuff;
|
||||
buffer_t dstBuff = job->dstBuff;
|
||||
DEBUGLOG(5, "job (first:%u) (last:%u) : dictSize %u, srcSize %u",
|
||||
job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
|
||||
|
||||
if (cctx==NULL) {
|
||||
job->cSize = ERROR(memory_allocation);
|
||||
goto _endJob;
|
||||
}
|
||||
|
||||
if (dstBuff.start == NULL) {
|
||||
dstBuff = ZSTDMT_getBuffer(job->bufPool);
|
||||
if (dstBuff.start==NULL) {
|
||||
job->cSize = ERROR(memory_allocation);
|
||||
goto _endJob;
|
||||
}
|
||||
job->dstBuff = dstBuff;
|
||||
}
|
||||
|
||||
if (job->cdict) { /* should only happen for first segment */
|
||||
size_t const initError = ZSTD_compressBegin_usingCDict_advanced(job->cctx, job->cdict, job->params.fParams, job->fullFrameSize);
|
||||
size_t const initError = ZSTD_compressBegin_usingCDict_advanced(cctx, job->cdict, job->params.fParams, job->fullFrameSize);
|
||||
DEBUGLOG(5, "using CDict");
|
||||
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
|
||||
} else { /* srcStart points at reloaded section */
|
||||
if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */
|
||||
{ size_t const dictModeError = ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */
|
||||
size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
|
||||
{ size_t const dictModeError = ZSTD_setCCtxParameter(cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */
|
||||
size_t const initError = ZSTD_compressBegin_advanced(cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
|
||||
if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; }
|
||||
ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
|
||||
ZSTD_setCCtxParameter(cctx, ZSTD_p_forceWindow, 1);
|
||||
} }
|
||||
if (!job->firstChunk) { /* flush and overwrite frame header when it's not first segment */
|
||||
size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0);
|
||||
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0);
|
||||
if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; }
|
||||
ZSTD_invalidateRepCodes(job->cctx);
|
||||
ZSTD_invalidateRepCodes(cctx);
|
||||
}
|
||||
|
||||
DEBUGLOG(5, "Compressing : ");
|
||||
DEBUG_PRINTHEX(4, job->srcStart, 12);
|
||||
job->cSize = (job->lastChunk) ?
|
||||
ZSTD_compressEnd (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
|
||||
ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
|
||||
ZSTD_compressEnd (cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
|
||||
ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
|
||||
DEBUGLOG(5, "compressed %u bytes into %u bytes (first:%u) (last:%u)",
|
||||
(unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
|
||||
DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize));
|
||||
|
||||
_endJob:
|
||||
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
|
||||
ZSTDMT_releaseBuffer(job->bufPool, job->src);
|
||||
job->src = g_nullBuffer; job->srcStart = NULL;
|
||||
PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
|
||||
job->jobCompleted = 1;
|
||||
job->jobScanned = 0;
|
||||
@ -303,15 +365,19 @@ _endJob:
|
||||
/* ===== Multi-threaded compression ===== */
|
||||
/* ------------------------------------------ */
|
||||
|
||||
typedef struct {
|
||||
buffer_t buffer;
|
||||
size_t filled;
|
||||
} inBuff_t;
|
||||
|
||||
struct ZSTDMT_CCtx_s {
|
||||
POOL_ctx* factory;
|
||||
ZSTDMT_jobDescription* jobs;
|
||||
ZSTDMT_bufferPool* buffPool;
|
||||
ZSTDMT_bufferPool* bufPool;
|
||||
ZSTDMT_CCtxPool* cctxPool;
|
||||
pthread_mutex_t jobCompleted_mutex;
|
||||
pthread_cond_t jobCompleted_cond;
|
||||
size_t targetSectionSize;
|
||||
size_t marginSize;
|
||||
size_t inBuffSize;
|
||||
size_t dictSize;
|
||||
size_t targetDictSize;
|
||||
@ -324,7 +390,7 @@ struct ZSTDMT_CCtx_s {
|
||||
unsigned nextJobID;
|
||||
unsigned frameEnded;
|
||||
unsigned allJobsCompleted;
|
||||
unsigned overlapRLog;
|
||||
unsigned overlapLog;
|
||||
unsigned long long frameContentSize;
|
||||
size_t sectionSize;
|
||||
ZSTD_customMem cMem;
|
||||
@ -347,7 +413,8 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
|
||||
U32 nbJobs = nbThreads + 2;
|
||||
DEBUGLOG(3, "ZSTDMT_createCCtx_advanced");
|
||||
|
||||
if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL;
|
||||
if (nbThreads < 1) return NULL;
|
||||
nbThreads = MIN(nbThreads , ZSTDMT_NBTHREADS_MAX);
|
||||
if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
|
||||
/* invalid custom allocator */
|
||||
return NULL;
|
||||
@ -358,18 +425,24 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
|
||||
mtctx->nbThreads = nbThreads;
|
||||
mtctx->allJobsCompleted = 1;
|
||||
mtctx->sectionSize = 0;
|
||||
mtctx->overlapRLog = 3;
|
||||
mtctx->factory = POOL_create(nbThreads, 1);
|
||||
mtctx->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
|
||||
mtctx->factory = POOL_create(nbThreads, 0);
|
||||
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, cMem);
|
||||
mtctx->jobIDMask = nbJobs - 1;
|
||||
mtctx->buffPool = ZSTDMT_createBufferPool(nbThreads, cMem);
|
||||
mtctx->bufPool = ZSTDMT_createBufferPool(nbThreads, cMem);
|
||||
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads, cMem);
|
||||
if (!mtctx->factory | !mtctx->jobs | !mtctx->buffPool | !mtctx->cctxPool) {
|
||||
if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool) {
|
||||
ZSTDMT_freeCCtx(mtctx);
|
||||
return NULL;
|
||||
}
|
||||
if (pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) {
|
||||
ZSTDMT_freeCCtx(mtctx);
|
||||
return NULL;
|
||||
}
|
||||
if (pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) {
|
||||
ZSTDMT_freeCCtx(mtctx);
|
||||
return NULL;
|
||||
}
|
||||
pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL); /* Todo : check init function return */
|
||||
pthread_cond_init(&mtctx->jobCompleted_cond, NULL);
|
||||
DEBUGLOG(3, "mt_cctx created, for %u threads", nbThreads);
|
||||
return mtctx;
|
||||
}
|
||||
@ -386,15 +459,13 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
|
||||
unsigned jobID;
|
||||
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
|
||||
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
|
||||
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff);
|
||||
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
||||
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
||||
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src);
|
||||
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].src);
|
||||
mtctx->jobs[jobID].src = g_nullBuffer;
|
||||
ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx);
|
||||
mtctx->jobs[jobID].cctx = NULL;
|
||||
}
|
||||
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
|
||||
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer);
|
||||
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer);
|
||||
mtctx->inBuff.buffer = g_nullBuffer;
|
||||
mtctx->allJobsCompleted = 1;
|
||||
}
|
||||
@ -404,7 +475,7 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
||||
if (mtctx==NULL) return 0; /* compatible with free on NULL */
|
||||
POOL_free(mtctx->factory);
|
||||
if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
|
||||
ZSTDMT_freeBufferPool(mtctx->buffPool); /* release job resources into pools first */
|
||||
ZSTDMT_freeBufferPool(mtctx->bufPool); /* release job resources into pools first */
|
||||
ZSTD_free(mtctx->jobs, mtctx->cMem);
|
||||
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
|
||||
ZSTD_freeCDict(mtctx->cdictLocal);
|
||||
@ -418,11 +489,11 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
|
||||
{
|
||||
if (mtctx == NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*mtctx)
|
||||
+ POOL_sizeof(mtctx->factory)
|
||||
+ ZSTDMT_sizeof_bufferPool(mtctx->buffPool)
|
||||
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
|
||||
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
|
||||
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
|
||||
+ POOL_sizeof(mtctx->factory)
|
||||
+ ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
|
||||
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
|
||||
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
|
||||
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
|
||||
}
|
||||
|
||||
size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
|
||||
@ -434,10 +505,10 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter,
|
||||
return 0;
|
||||
case ZSTDMT_p_overlapSectionLog :
|
||||
DEBUGLOG(5, "ZSTDMT_p_overlapSectionLog : %u", value);
|
||||
mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value;
|
||||
mtctx->overlapLog = (value >= 9) ? 9 : value;
|
||||
return 0;
|
||||
default :
|
||||
return ERROR(compressionParameter_unsupported);
|
||||
return ERROR(parameter_unsupported);
|
||||
}
|
||||
}
|
||||
|
||||
@ -459,12 +530,13 @@ static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbT
|
||||
|
||||
|
||||
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters const params,
|
||||
unsigned overlapRLog)
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters const params,
|
||||
unsigned overlapLog)
|
||||
{
|
||||
unsigned const overlapRLog = (overlapLog>9) ? 0 : 9-overlapLog;
|
||||
size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog);
|
||||
unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, mtctx->nbThreads);
|
||||
size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
|
||||
@ -473,6 +545,7 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
size_t remainingSrcSize = srcSize;
|
||||
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */
|
||||
size_t frameStartPos = 0, dstBufferPos = 0;
|
||||
XXH64_state_t xxh64;
|
||||
|
||||
DEBUGLOG(4, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize);
|
||||
if (nbChunks==1) { /* fallback to single-thread mode */
|
||||
@ -480,7 +553,9 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, params.fParams);
|
||||
return ZSTD_compress_advanced(cctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
|
||||
}
|
||||
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is useful to avoid allocating extra buffers */
|
||||
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */
|
||||
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) );
|
||||
XXH64_reset(&xxh64, 0);
|
||||
|
||||
if (nbChunks > mtctx->jobIDMask+1) { /* enlarge job table */
|
||||
U32 nbJobs = nbChunks;
|
||||
@ -496,17 +571,10 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize);
|
||||
size_t const dstBufferCapacity = ZSTD_compressBound(chunkSize);
|
||||
buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
|
||||
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity);
|
||||
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool);
|
||||
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
|
||||
size_t dictSize = u ? overlapSize : 0;
|
||||
|
||||
if ((cctx==NULL) || (dstBuffer.start==NULL)) {
|
||||
mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */
|
||||
mtctx->jobs[u].jobCompleted = 1;
|
||||
nbChunks = u+1; /* only wait and free u jobs, instead of initially expected nbChunks ones */
|
||||
break; /* let's wait for previous jobs to complete, but don't start new ones */
|
||||
}
|
||||
|
||||
mtctx->jobs[u].src = g_nullBuffer;
|
||||
mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize;
|
||||
mtctx->jobs[u].dictSize = dictSize;
|
||||
mtctx->jobs[u].srcSize = chunkSize;
|
||||
@ -516,13 +584,18 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
/* do not calculate checksum within sections, but write it in header for first section */
|
||||
if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0;
|
||||
mtctx->jobs[u].dstBuff = dstBuffer;
|
||||
mtctx->jobs[u].cctx = cctx;
|
||||
mtctx->jobs[u].cctxPool = mtctx->cctxPool;
|
||||
mtctx->jobs[u].bufPool = mtctx->bufPool;
|
||||
mtctx->jobs[u].firstChunk = (u==0);
|
||||
mtctx->jobs[u].lastChunk = (u==nbChunks-1);
|
||||
mtctx->jobs[u].jobCompleted = 0;
|
||||
mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex;
|
||||
mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond;
|
||||
|
||||
if (params.fParams.checksumFlag) {
|
||||
XXH64_update(&xxh64, srcStart + frameStartPos, chunkSize);
|
||||
}
|
||||
|
||||
DEBUGLOG(5, "posting job %u (%u bytes)", u, (U32)chunkSize);
|
||||
DEBUG_PRINTHEX(6, mtctx->jobs[u].srcStart, 12);
|
||||
POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
|
||||
@ -533,8 +606,8 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
} }
|
||||
|
||||
/* collect result */
|
||||
{ unsigned chunkID;
|
||||
size_t error = 0, dstPos = 0;
|
||||
{ size_t error = 0, dstPos = 0;
|
||||
unsigned chunkID;
|
||||
for (chunkID=0; chunkID<nbChunks; chunkID++) {
|
||||
DEBUGLOG(5, "waiting for chunk %u ", chunkID);
|
||||
PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
|
||||
@ -545,8 +618,6 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
|
||||
DEBUGLOG(5, "ready to write chunk %u ", chunkID);
|
||||
|
||||
ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx);
|
||||
mtctx->jobs[chunkID].cctx = NULL;
|
||||
mtctx->jobs[chunkID].srcStart = NULL;
|
||||
{ size_t const cSize = mtctx->jobs[chunkID].cSize;
|
||||
if (ZSTD_isError(cSize)) error = cSize;
|
||||
@ -556,13 +627,25 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap when chunk compressed within dst */
|
||||
if (chunkID >= compressWithinDst) { /* chunk compressed into its own buffer, which must be released */
|
||||
DEBUGLOG(5, "releasing buffer %u>=%u", chunkID, compressWithinDst);
|
||||
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
|
||||
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[chunkID].dstBuff);
|
||||
}
|
||||
mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
|
||||
}
|
||||
dstPos += cSize ;
|
||||
}
|
||||
}
|
||||
} /* for (chunkID=0; chunkID<nbChunks; chunkID++) */
|
||||
|
||||
DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
|
||||
if (params.fParams.checksumFlag) {
|
||||
U32 const checksum = (U32)XXH64_digest(&xxh64);
|
||||
if (dstPos + 4 > dstCapacity) {
|
||||
error = ERROR(dstSize_tooSmall);
|
||||
} else {
|
||||
DEBUGLOG(4, "writing checksum : %08X \n", checksum);
|
||||
MEM_writeLE32((char*)dst + dstPos, checksum);
|
||||
dstPos += 4;
|
||||
} }
|
||||
|
||||
if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
|
||||
return error ? error : dstPos;
|
||||
}
|
||||
@ -574,10 +657,10 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
||||
const void* src, size_t srcSize,
|
||||
int compressionLevel)
|
||||
{
|
||||
U32 const overlapRLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3;
|
||||
U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT;
|
||||
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
|
||||
params.fParams.contentSizeFlag = 1;
|
||||
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapRLog);
|
||||
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
|
||||
}
|
||||
|
||||
|
||||
@ -615,8 +698,8 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
if (zcs->nbThreads==1) {
|
||||
DEBUGLOG(4, "single thread mode");
|
||||
return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0],
|
||||
dict, dictSize, cdict,
|
||||
params, pledgedSrcSize);
|
||||
dict, dictSize, cdict,
|
||||
params, pledgedSrcSize);
|
||||
}
|
||||
|
||||
if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */
|
||||
@ -642,18 +725,16 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
zcs->cdict = cdict;
|
||||
}
|
||||
|
||||
zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog);
|
||||
DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog);
|
||||
zcs->targetDictSize = (zcs->overlapLog==0) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - (9 - zcs->overlapLog));
|
||||
DEBUGLOG(4, "overlapLog : %u ", zcs->overlapLog);
|
||||
DEBUGLOG(4, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
|
||||
zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
|
||||
zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
|
||||
zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
|
||||
DEBUGLOG(4, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
|
||||
zcs->marginSize = zcs->targetSectionSize >> 2;
|
||||
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize;
|
||||
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
|
||||
if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
|
||||
zcs->inBuff.filled = 0;
|
||||
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize;
|
||||
ZSTDMT_setBufferSize(zcs->bufPool, MAX(zcs->inBuffSize, ZSTD_compressBound(zcs->targetSectionSize)) );
|
||||
zcs->inBuff.buffer = g_nullBuffer;
|
||||
zcs->dictSize = 0;
|
||||
zcs->doneJobID = 0;
|
||||
zcs->nextJobID = 0;
|
||||
@ -664,8 +745,9 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
}
|
||||
|
||||
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize)
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params,
|
||||
unsigned long long pledgedSrcSize)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTDMT_initCStream_advanced");
|
||||
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, NULL, params, pledgedSrcSize);
|
||||
@ -701,19 +783,8 @@ size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
|
||||
|
||||
static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame)
|
||||
{
|
||||
size_t const dstBufferCapacity = ZSTD_compressBound(srcSize);
|
||||
buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity);
|
||||
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool);
|
||||
unsigned const jobID = zcs->nextJobID & zcs->jobIDMask;
|
||||
|
||||
if ((cctx==NULL) || (dstBuffer.start==NULL)) {
|
||||
zcs->jobs[jobID].jobCompleted = 1;
|
||||
zcs->nextJobID++;
|
||||
ZSTDMT_waitForAllJobsCompleted(zcs);
|
||||
ZSTDMT_releaseAllJobResources(zcs);
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ",
|
||||
zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
|
||||
zcs->jobs[jobID].src = zcs->inBuff.buffer;
|
||||
@ -726,8 +797,9 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
|
||||
if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;
|
||||
zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL;
|
||||
zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize;
|
||||
zcs->jobs[jobID].dstBuff = dstBuffer;
|
||||
zcs->jobs[jobID].cctx = cctx;
|
||||
zcs->jobs[jobID].dstBuff = g_nullBuffer;
|
||||
zcs->jobs[jobID].cctxPool = zcs->cctxPool;
|
||||
zcs->jobs[jobID].bufPool = zcs->bufPool;
|
||||
zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0);
|
||||
zcs->jobs[jobID].lastChunk = endFrame;
|
||||
zcs->jobs[jobID].jobCompleted = 0;
|
||||
@ -735,11 +807,13 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
|
||||
zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex;
|
||||
zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond;
|
||||
|
||||
if (zcs->params.fParams.checksumFlag)
|
||||
XXH64_update(&zcs->xxhState, (const char*)zcs->inBuff.buffer.start + zcs->dictSize, srcSize);
|
||||
|
||||
/* get a new buffer for next input */
|
||||
if (!endFrame) {
|
||||
size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize);
|
||||
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
|
||||
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
|
||||
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->bufPool);
|
||||
if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */
|
||||
zcs->jobs[jobID].jobCompleted = 1;
|
||||
zcs->nextJobID++;
|
||||
@ -747,26 +821,20 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
|
||||
ZSTDMT_releaseAllJobResources(zcs);
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
DEBUGLOG(5, "inBuff currently filled to %u", (U32)zcs->inBuff.filled);
|
||||
zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize;
|
||||
DEBUGLOG(5, "new job : inBuff filled to %u, with %u dict and %u src",
|
||||
(U32)zcs->inBuff.filled, (U32)newDictSize,
|
||||
(U32)(zcs->inBuff.filled - newDictSize));
|
||||
memmove(zcs->inBuff.buffer.start,
|
||||
(const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize,
|
||||
zcs->inBuff.filled);
|
||||
DEBUGLOG(5, "new inBuff pre-filled");
|
||||
zcs->dictSize = newDictSize;
|
||||
} else { /* if (endFrame==1) */
|
||||
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
|
||||
zcs->inBuff.buffer = g_nullBuffer;
|
||||
zcs->inBuff.filled = 0;
|
||||
zcs->dictSize = 0;
|
||||
zcs->frameEnded = 1;
|
||||
if (zcs->nextJobID == 0)
|
||||
if (zcs->nextJobID == 0) {
|
||||
/* single chunk exception : checksum is calculated directly within worker thread */
|
||||
zcs->params.fParams.checksumFlag = 0;
|
||||
}
|
||||
} }
|
||||
|
||||
DEBUGLOG(4, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)",
|
||||
zcs->nextJobID,
|
||||
@ -804,11 +872,8 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
|
||||
ZSTDMT_releaseAllJobResources(zcs);
|
||||
return job.cSize;
|
||||
}
|
||||
ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx);
|
||||
zcs->jobs[wJobID].cctx = NULL;
|
||||
DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag);
|
||||
if (zcs->params.fParams.checksumFlag) {
|
||||
XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize);
|
||||
if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */
|
||||
U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
|
||||
DEBUGLOG(5, "writing checksum : %08X \n", checksum);
|
||||
@ -816,9 +881,6 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
|
||||
job.cSize += 4;
|
||||
zcs->jobs[wJobID].cSize += 4;
|
||||
} }
|
||||
ZSTDMT_releaseBuffer(zcs->buffPool, job.src);
|
||||
zcs->jobs[wJobID].srcStart = NULL;
|
||||
zcs->jobs[wJobID].src = g_nullBuffer;
|
||||
zcs->jobs[wJobID].jobScanned = 1;
|
||||
}
|
||||
{ size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
|
||||
@ -828,7 +890,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
|
||||
job.dstFlushed += toWrite;
|
||||
}
|
||||
if (job.dstFlushed == job.cSize) { /* output buffer fully flushed => move to next one */
|
||||
ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff);
|
||||
ZSTDMT_releaseBuffer(zcs->bufPool, job.dstBuff);
|
||||
zcs->jobs[wJobID].dstBuff = g_nullBuffer;
|
||||
zcs->jobs[wJobID].jobCompleted = 0;
|
||||
zcs->doneJobID++;
|
||||
@ -852,18 +914,18 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp)
|
||||
{
|
||||
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize + mtctx->marginSize;
|
||||
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize;
|
||||
assert(output->pos <= output->size);
|
||||
assert(input->pos <= input->size);
|
||||
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
|
||||
/* current frame being ended. Only flush/end are allowed. Or start new frame with init */
|
||||
return ERROR(stage_wrong);
|
||||
}
|
||||
if (mtctx->nbThreads==1) {
|
||||
if (mtctx->nbThreads==1) { /* delegate to single-thread (synchronous) */
|
||||
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
|
||||
}
|
||||
|
||||
/* single-pass shortcut (note : this is blocking-mode) */
|
||||
/* single-pass shortcut (note : this is synchronous-mode) */
|
||||
if ( (mtctx->nextJobID==0) /* just started */
|
||||
&& (mtctx->inBuff.filled==0) /* nothing buffered */
|
||||
&& (endOp==ZSTD_e_end) /* end order */
|
||||
@ -871,24 +933,29 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
size_t const cSize = ZSTDMT_compress_advanced(mtctx,
|
||||
(char*)output->dst + output->pos, output->size - output->pos,
|
||||
(const char*)input->src + input->pos, input->size - input->pos,
|
||||
mtctx->cdict, mtctx->params, mtctx->overlapRLog);
|
||||
mtctx->cdict, mtctx->params, mtctx->overlapLog);
|
||||
if (ZSTD_isError(cSize)) return cSize;
|
||||
input->pos = input->size;
|
||||
output->pos += cSize;
|
||||
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer); /* was allocated in initStream */
|
||||
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer); /* was allocated in initStream */
|
||||
mtctx->allJobsCompleted = 1;
|
||||
mtctx->frameEnded = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fill input buffer */
|
||||
if ((input->src) && (mtctx->inBuff.buffer.start)) { /* support NULL input */
|
||||
size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
|
||||
DEBUGLOG(2, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
|
||||
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
|
||||
input->pos += toLoad;
|
||||
mtctx->inBuff.filled += toLoad;
|
||||
}
|
||||
if (input->size > input->pos) { /* support NULL input */
|
||||
if (mtctx->inBuff.buffer.start == NULL) {
|
||||
mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool);
|
||||
if (mtctx->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
|
||||
mtctx->inBuff.filled = 0;
|
||||
}
|
||||
{ size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
|
||||
DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
|
||||
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
|
||||
input->pos += toLoad;
|
||||
mtctx->inBuff.filled += toLoad;
|
||||
} }
|
||||
|
||||
if ( (mtctx->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */
|
||||
&& (mtctx->nextJobID <= mtctx->doneJobID + mtctx->jobIDMask) ) { /* avoid overwriting job round buffer */
|
||||
|
19
thirdparty/zstd/compress/zstdmt_compress.h
vendored
19
thirdparty/zstd/compress/zstdmt_compress.h
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTDMT_COMPRESS_H
|
||||
@ -15,10 +15,11 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Note : All prototypes defined in this file are labelled experimental.
|
||||
* No guarantee of API continuity is provided on any of them.
|
||||
* In fact, the expectation is that these prototypes will be replaced
|
||||
* by ZSTD_compress_generic() API in the near future */
|
||||
/* Note : This is an internal API.
|
||||
* Some methods are still exposed (ZSTDLIB_API),
|
||||
* because it used to be the only way to invoke MT compression.
|
||||
* Now, it's recommended to use ZSTD_compress_generic() instead.
|
||||
* These methods will stop being exposed in a future version */
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <stddef.h> /* size_t */
|
||||
@ -67,7 +68,7 @@ ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters const params,
|
||||
unsigned overlapRLog);
|
||||
unsigned overlapLog);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
||||
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
||||
|
32
thirdparty/zstd/decompress/huf_decompress.c
vendored
32
thirdparty/zstd/decompress/huf_decompress.c
vendored
@ -32,38 +32,22 @@
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# define FORCE_INLINE static __forceinline
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#else
|
||||
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_INLINE static inline __attribute__((always_inline))
|
||||
# else
|
||||
# define FORCE_INLINE static inline
|
||||
# endif
|
||||
# else
|
||||
# define FORCE_INLINE static
|
||||
# endif /* __STDC_VERSION__ */
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Dependencies
|
||||
****************************************************************/
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include "bitstream.h" /* BIT_* */
|
||||
#include "compiler.h"
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
@ -180,7 +164,7 @@ static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, con
|
||||
if (MEM_64bits()) \
|
||||
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
|
||||
|
||||
FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
@ -639,7 +623,7 @@ static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DE
|
||||
if (MEM_64bits()) \
|
||||
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
|
||||
|
||||
FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
HINT_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
|
||||
{
|
||||
BYTE* const pStart = p;
|
||||
|
||||
@ -917,11 +901,11 @@ static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, qu
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-determined metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
|
||||
* Assumption : 0 < cSrcSize, dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
|
||||
{
|
||||
/* decoder timing evaluation */
|
||||
U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
|
||||
U32 const Q = cSrcSize >= dstSize ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
|
||||
U32 const D256 = (U32)(dstSize >> 8);
|
||||
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
|
||||
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
|
||||
@ -977,7 +961,7 @@ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
|
||||
{
|
||||
/* validation checks */
|
||||
if (dstSize == 0) return ERROR(dstSize_tooSmall);
|
||||
if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
|
||||
if (cSrcSize == 0) return ERROR(corruption_detected);
|
||||
|
||||
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
|
||||
return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
|
||||
|
203
thirdparty/zstd/decompress/zstd_decompress.c
vendored
203
thirdparty/zstd/decompress/zstd_decompress.c
vendored
@ -1,10 +1,10 @@
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
|
||||
@ -53,15 +53,6 @@
|
||||
# include "zstd_legacy.h"
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define ZSTD_PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0)
|
||||
#elif defined(__GNUC__)
|
||||
# define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
|
||||
#else
|
||||
# define ZSTD_PREFETCH(ptr) /* disabled */
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Errors
|
||||
@ -95,7 +86,7 @@ typedef struct {
|
||||
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
|
||||
U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_entropyTables_t;
|
||||
} ZSTD_entropyDTables_t;
|
||||
|
||||
struct ZSTD_DCtx_s
|
||||
{
|
||||
@ -103,7 +94,7 @@ struct ZSTD_DCtx_s
|
||||
const FSE_DTable* MLTptr;
|
||||
const FSE_DTable* OFTptr;
|
||||
const HUF_DTable* HUFptr;
|
||||
ZSTD_entropyTables_t entropy;
|
||||
ZSTD_entropyDTables_t entropy;
|
||||
const void* previousDstEnd; /* detect continuity */
|
||||
const void* base; /* start of current segment */
|
||||
const void* vBase; /* virtual start of previous segment if it was just before current one */
|
||||
@ -304,15 +295,18 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
|
||||
return ZSTD_skippableHeaderSize; /* magic number + frame length */
|
||||
memset(zfhPtr, 0, sizeof(*zfhPtr));
|
||||
zfhPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
|
||||
zfhPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
|
||||
zfhPtr->frameType = ZSTD_skippableFrame;
|
||||
zfhPtr->windowSize = 0;
|
||||
return 0;
|
||||
}
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
|
||||
/* ensure there is enough `srcSize` to fully read/decode frame header */
|
||||
{ size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
|
||||
if (srcSize < fhsize) return fhsize; }
|
||||
{ size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
|
||||
if (srcSize < fhsize) return fhsize;
|
||||
zfhPtr->headerSize = (U32)fhsize;
|
||||
}
|
||||
|
||||
{ BYTE const fhdByte = ip[4];
|
||||
size_t pos = 5;
|
||||
@ -320,24 +314,23 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
|
||||
U32 const checksumFlag = (fhdByte>>2)&1;
|
||||
U32 const singleSegment = (fhdByte>>5)&1;
|
||||
U32 const fcsID = fhdByte>>6;
|
||||
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
|
||||
U32 windowSize = 0;
|
||||
U64 windowSize = 0;
|
||||
U32 dictID = 0;
|
||||
U64 frameContentSize = 0;
|
||||
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
|
||||
if ((fhdByte & 0x08) != 0)
|
||||
return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
|
||||
return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
|
||||
|
||||
if (!singleSegment) {
|
||||
BYTE const wlByte = ip[pos++];
|
||||
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
|
||||
if (windowLog > ZSTD_WINDOWLOG_MAX)
|
||||
return ERROR(frameParameter_windowTooLarge);
|
||||
windowSize = (1U << windowLog);
|
||||
windowSize = (1ULL << windowLog);
|
||||
windowSize += (windowSize >> 3) * (wlByte&7);
|
||||
}
|
||||
|
||||
switch(dictIDSizeCode)
|
||||
{
|
||||
default: /* impossible */
|
||||
default: assert(0); /* impossible */
|
||||
case 0 : break;
|
||||
case 1 : dictID = ip[pos]; pos++; break;
|
||||
case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
|
||||
@ -345,14 +338,15 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
|
||||
}
|
||||
switch(fcsID)
|
||||
{
|
||||
default: /* impossible */
|
||||
default: assert(0); /* impossible */
|
||||
case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
|
||||
case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
|
||||
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
|
||||
case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
|
||||
}
|
||||
if (!windowSize) windowSize = (U32)frameContentSize;
|
||||
if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge);
|
||||
if (singleSegment) windowSize = frameContentSize;
|
||||
|
||||
zfhPtr->frameType = ZSTD_frame;
|
||||
zfhPtr->frameContentSize = frameContentSize;
|
||||
zfhPtr->windowSize = windowSize;
|
||||
zfhPtr->dictID = dictID;
|
||||
@ -362,10 +356,10 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
|
||||
}
|
||||
|
||||
/** ZSTD_getFrameContentSize() :
|
||||
* compatible with legacy mode
|
||||
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
|
||||
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
|
||||
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
|
||||
* compatible with legacy mode
|
||||
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
|
||||
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
|
||||
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
|
||||
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
@ -374,17 +368,14 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
|
||||
return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
|
||||
}
|
||||
#endif
|
||||
{ ZSTD_frameHeader fParams;
|
||||
if (ZSTD_getFrameHeader(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR;
|
||||
if (fParams.windowSize == 0) {
|
||||
/* Either skippable or empty frame, size == 0 either way */
|
||||
{ ZSTD_frameHeader zfh;
|
||||
if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
|
||||
return ZSTD_CONTENTSIZE_ERROR;
|
||||
if (zfh.frameType == ZSTD_skippableFrame) {
|
||||
return 0;
|
||||
} else if (fParams.frameContentSize != 0) {
|
||||
return fParams.frameContentSize;
|
||||
} else {
|
||||
return ZSTD_CONTENTSIZE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
return zfh.frameContentSize;
|
||||
} }
|
||||
}
|
||||
|
||||
/** ZSTD_findDecompressedSize() :
|
||||
@ -442,7 +433,8 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
|
||||
* compatible with legacy mode
|
||||
* @return : decompressed size if known, 0 otherwise
|
||||
note : 0 can mean any of the following :
|
||||
- decompressed size is not present within frame header
|
||||
- frame content is empty
|
||||
- decompressed size field is not present in frame header
|
||||
- frame header unknown / not supported
|
||||
- frame header not complete (`srcSize` too small) */
|
||||
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
|
||||
@ -460,7 +452,8 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
|
||||
size_t const result = ZSTD_getFrameHeader(&(dctx->fParams), src, headerSize);
|
||||
if (ZSTD_isError(result)) return result; /* invalid header */
|
||||
if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
|
||||
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
|
||||
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
|
||||
return ERROR(dictionary_wrong);
|
||||
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
|
||||
return 0;
|
||||
}
|
||||
@ -951,7 +944,7 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState)
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE
|
||||
HINT_INLINE
|
||||
size_t ZSTD_execSequence(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
@ -1095,7 +1088,7 @@ static size_t ZSTD_decompressSequences(
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets)
|
||||
FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets)
|
||||
{
|
||||
seq_t seq;
|
||||
|
||||
@ -1195,7 +1188,7 @@ static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const window
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE
|
||||
HINT_INLINE
|
||||
size_t ZSTD_execSequenceLong(BYTE* op,
|
||||
BYTE* const oend, seq_t sequence,
|
||||
const BYTE** litPtr, const BYTE* const litLimit,
|
||||
@ -1331,7 +1324,7 @@ static size_t ZSTD_decompressSequencesLong(
|
||||
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize32);
|
||||
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
|
||||
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
|
||||
ZSTD_PREFETCH(sequence.match);
|
||||
PREFETCH(sequence.match);
|
||||
sequences[seqNb&STOSEQ_MASK] = sequence;
|
||||
op += oneSeqSize;
|
||||
}
|
||||
@ -1433,28 +1426,26 @@ size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t len
|
||||
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
{
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
|
||||
if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
if (ZSTD_isLegacy(src, srcSize))
|
||||
return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
#endif
|
||||
if (srcSize >= ZSTD_skippableHeaderSize &&
|
||||
(MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
|
||||
if ( (srcSize >= ZSTD_skippableHeaderSize)
|
||||
&& (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
|
||||
return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4);
|
||||
} else {
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const ipstart = ip;
|
||||
size_t remainingSize = srcSize;
|
||||
ZSTD_frameHeader fParams;
|
||||
ZSTD_frameHeader zfh;
|
||||
|
||||
size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
|
||||
if (ZSTD_isError(headerSize)) return headerSize;
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const ret = ZSTD_getFrameHeader(&fParams, ip, remainingSize);
|
||||
/* Extract Frame Header */
|
||||
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
|
||||
if (ZSTD_isError(ret)) return ret;
|
||||
if (ret > 0) return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
ip += headerSize;
|
||||
remainingSize -= headerSize;
|
||||
ip += zfh.headerSize;
|
||||
remainingSize -= zfh.headerSize;
|
||||
|
||||
/* Loop on each block */
|
||||
while (1) {
|
||||
@ -1462,7 +1453,8 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
|
||||
if (ZSTD_isError(cBlockSize)) return cBlockSize;
|
||||
|
||||
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong);
|
||||
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
|
||||
ip += ZSTD_blockHeaderSize + cBlockSize;
|
||||
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
|
||||
@ -1470,7 +1462,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
if (blockProperties.lastBlock) break;
|
||||
}
|
||||
|
||||
if (fParams.checksumFlag) { /* Frame content checksum */
|
||||
if (zfh.checksumFlag) { /* Final frame content checksum */
|
||||
if (remainingSize < 4) return ERROR(srcSize_wrong);
|
||||
ip += 4;
|
||||
remainingSize -= 4;
|
||||
@ -1483,8 +1475,8 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
|
||||
/*! ZSTD_decompressFrame() :
|
||||
* @dctx must be properly initialized */
|
||||
static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void** srcPtr, size_t *srcSizePtr)
|
||||
void* dst, size_t dstCapacity,
|
||||
const void** srcPtr, size_t *srcSizePtr)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)(*srcPtr);
|
||||
BYTE* const ostart = (BYTE* const)dst;
|
||||
@ -1493,13 +1485,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
||||
size_t remainingSize = *srcSizePtr;
|
||||
|
||||
/* check */
|
||||
if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
|
||||
/* Frame Header */
|
||||
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
|
||||
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
|
||||
if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
|
||||
CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
|
||||
if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
|
||||
return ERROR(srcSize_wrong);
|
||||
CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
|
||||
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
|
||||
}
|
||||
|
||||
@ -1531,14 +1525,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
|
||||
}
|
||||
|
||||
if (ZSTD_isError(decodedSize)) return decodedSize;
|
||||
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
|
||||
if (dctx->fParams.checksumFlag)
|
||||
XXH64_update(&dctx->xxhState, op, decodedSize);
|
||||
op += decodedSize;
|
||||
ip += cBlockSize;
|
||||
remainingSize -= cBlockSize;
|
||||
if (blockProperties.lastBlock) break;
|
||||
}
|
||||
|
||||
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
|
||||
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
|
||||
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
|
||||
U32 checkRead;
|
||||
if (remainingSize<4) return ERROR(checksum_wrong);
|
||||
@ -1560,17 +1555,13 @@ static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
|
||||
static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void *dict, size_t dictSize,
|
||||
const void* dict, size_t dictSize,
|
||||
const ZSTD_DDict* ddict)
|
||||
{
|
||||
void* const dststart = dst;
|
||||
assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
|
||||
|
||||
if (ddict) {
|
||||
if (dict) {
|
||||
/* programmer error, these two cases should be mutually exclusive */
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
|
||||
dict = ZSTD_DDictDictContent(ddict);
|
||||
dictSize = ZSTD_DDictDictSize(ddict);
|
||||
}
|
||||
@ -1583,7 +1574,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
||||
size_t decodedSize;
|
||||
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
|
||||
if (ZSTD_isError(frameSize)) return frameSize;
|
||||
/* legacy support is incompatible with static dctx */
|
||||
/* legacy support is not compatible with static dctx */
|
||||
if (dctx->staticSize) return ERROR(memory_allocation);
|
||||
|
||||
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
|
||||
@ -1606,16 +1597,13 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
||||
return ERROR(srcSize_wrong);
|
||||
skippableSize = MEM_readLE32((const BYTE *)src + 4) +
|
||||
ZSTD_skippableHeaderSize;
|
||||
if (srcSize < skippableSize) {
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
if (srcSize < skippableSize) return ERROR(srcSize_wrong);
|
||||
|
||||
src = (const BYTE *)src + skippableSize;
|
||||
srcSize -= skippableSize;
|
||||
continue;
|
||||
} else {
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
|
||||
if (ddict) {
|
||||
@ -1631,12 +1619,11 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
|
||||
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
|
||||
&src, &srcSize);
|
||||
if (ZSTD_isError(res)) return res;
|
||||
/* don't need to bounds check this, ZSTD_decompressFrame will have
|
||||
* already */
|
||||
/* no need to bound check, ZSTD_decompressFrame already has */
|
||||
dst = (BYTE*)dst + res;
|
||||
dstCapacity -= res;
|
||||
}
|
||||
}
|
||||
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
|
||||
|
||||
if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
|
||||
|
||||
@ -1735,7 +1722,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
|
||||
return 0;
|
||||
}
|
||||
dctx->expected = 0; /* not necessary to copy more */
|
||||
|
||||
/* fall-through */
|
||||
case ZSTDds_decodeFrameHeader:
|
||||
assert(src != NULL);
|
||||
memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
|
||||
@ -1846,7 +1833,7 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
|
||||
/* ZSTD_loadEntropy() :
|
||||
* dict : must point at beginning of a valid zstd dictionary
|
||||
* @return : size of entropy tables read */
|
||||
static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize)
|
||||
static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
|
||||
{
|
||||
const BYTE* dictPtr = (const BYTE*)dict;
|
||||
const BYTE* const dictEnd = dictPtr + dictSize;
|
||||
@ -1924,8 +1911,9 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
|
||||
|
||||
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
|
||||
{
|
||||
CHECK_F(ZSTD_decompressBegin(dctx));
|
||||
if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
|
||||
CHECK_F( ZSTD_decompressBegin(dctx) );
|
||||
if (dict && dictSize)
|
||||
CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1936,7 +1924,7 @@ struct ZSTD_DDict_s {
|
||||
void* dictBuffer;
|
||||
const void* dictContent;
|
||||
size_t dictSize;
|
||||
ZSTD_entropyTables_t entropy;
|
||||
ZSTD_entropyDTables_t entropy;
|
||||
U32 dictID;
|
||||
U32 entropyPresent;
|
||||
ZSTD_customMem cMem;
|
||||
@ -1954,7 +1942,7 @@ static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
|
||||
|
||||
size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
|
||||
{
|
||||
CHECK_F(ZSTD_decompressBegin(dstDCtx));
|
||||
CHECK_F( ZSTD_decompressBegin(dstDCtx) );
|
||||
if (ddict) { /* support begin on NULL */
|
||||
dstDCtx->dictID = ddict->dictID;
|
||||
dstDCtx->base = ddict->dictContent;
|
||||
@ -2135,7 +2123,7 @@ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
|
||||
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
|
||||
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_frameHeader zfp = { 0 , 0 , 0 , 0 };
|
||||
ZSTD_frameHeader zfp = { 0, 0, ZSTD_frame, 0, 0, 0 };
|
||||
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
|
||||
if (ZSTD_isError(hError)) return 0;
|
||||
return zfp.dictID;
|
||||
@ -2230,7 +2218,7 @@ size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds,
|
||||
{
|
||||
switch(paramType)
|
||||
{
|
||||
default : return ERROR(parameter_unknown);
|
||||
default : return ERROR(parameter_unsupported);
|
||||
case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
|
||||
}
|
||||
return 0;
|
||||
@ -2247,16 +2235,19 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize)
|
||||
size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
|
||||
size_t const inBuffSize = blockSize; /* no block can be larger */
|
||||
size_t const outBuffSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
|
||||
return sizeof(ZSTD_DStream) + ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
|
||||
return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
|
||||
}
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
|
||||
{
|
||||
ZSTD_frameHeader fh;
|
||||
size_t const err = ZSTD_getFrameHeader(&fh, src, srcSize);
|
||||
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
|
||||
ZSTD_frameHeader zfh;
|
||||
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
|
||||
if (ZSTD_isError(err)) return err;
|
||||
if (err>0) return ERROR(srcSize_wrong);
|
||||
return ZSTD_estimateDStreamSize(fh.windowSize);
|
||||
if (zfh.windowSize > windowSizeMax)
|
||||
return ERROR(frameParameter_windowTooLarge);
|
||||
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
|
||||
}
|
||||
|
||||
|
||||
@ -2307,16 +2298,14 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
|
||||
/* legacy support is incompatible with static dctx */
|
||||
if (zds->staticSize) return ERROR(memory_allocation);
|
||||
CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion,
|
||||
dict, dictSize));
|
||||
CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
|
||||
zds->previousLegacyVersion, legacyVersion,
|
||||
dict, dictSize));
|
||||
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
|
||||
return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
|
||||
} else {
|
||||
return hSize; /* error */
|
||||
return ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
|
||||
}
|
||||
#else
|
||||
return hSize;
|
||||
#endif
|
||||
return hSize; /* error */
|
||||
}
|
||||
if (hSize != 0) { /* need more input */
|
||||
size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
|
||||
@ -2367,8 +2356,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
|
||||
|
||||
/* Adapt buffer sizes to frame header instructions */
|
||||
{ size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_MAX);
|
||||
size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
|
||||
{ size_t const blockSize = (size_t)(MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_MAX));
|
||||
size_t const neededOutSize = (size_t)(zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2);
|
||||
zds->blockSize = blockSize;
|
||||
if ((zds->inBuffSize < blockSize) || (zds->outBuffSize < neededOutSize)) {
|
||||
size_t const bufferSize = blockSize + neededOutSize;
|
||||
@ -2393,7 +2382,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
zds->outBuffSize = neededOutSize;
|
||||
} }
|
||||
zds->streamStage = zdss_read;
|
||||
/* pass-through */
|
||||
/* fall-through */
|
||||
|
||||
case zdss_read:
|
||||
DEBUGLOG(5, "stage zdss_read");
|
||||
@ -2418,8 +2407,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
} }
|
||||
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
|
||||
zds->streamStage = zdss_load;
|
||||
/* pass-through */
|
||||
|
||||
/* fall-through */
|
||||
case zdss_load:
|
||||
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
|
||||
size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
|
||||
@ -2441,8 +2429,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
|
||||
zds->outEnd = zds->outStart + decodedSize;
|
||||
} }
|
||||
zds->streamStage = zdss_flush;
|
||||
/* pass-through */
|
||||
|
||||
/* fall-through */
|
||||
case zdss_flush:
|
||||
{ size_t const toFlushSize = zds->outEnd - zds->outStart;
|
||||
size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
|
||||
|
30
thirdparty/zstd/zstd.h
vendored
30
thirdparty/zstd/zstd.h
vendored
@ -2,11 +2,10 @@
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the BSD-style license found in the
|
||||
* LICENSE file in the root directory of this source tree. An additional grant
|
||||
* of patent rights can be found in the PATENTS file in the same directory.
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -59,7 +58,7 @@ extern "C" {
|
||||
/*------ Version ------*/
|
||||
#define ZSTD_VERSION_MAJOR 1
|
||||
#define ZSTD_VERSION_MINOR 3
|
||||
#define ZSTD_VERSION_RELEASE 0
|
||||
#define ZSTD_VERSION_RELEASE 1
|
||||
|
||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */
|
||||
@ -425,13 +424,6 @@ typedef struct {
|
||||
ZSTD_frameParameters fParams;
|
||||
} ZSTD_parameters;
|
||||
|
||||
typedef struct {
|
||||
unsigned long long frameContentSize;
|
||||
size_t windowSize;
|
||||
unsigned dictID;
|
||||
unsigned checksumFlag;
|
||||
} ZSTD_frameHeader;
|
||||
|
||||
/*= Custom memory allocation functions */
|
||||
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
|
||||
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
|
||||
@ -809,7 +801,6 @@ ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstC
|
||||
ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
/*-
|
||||
Buffer-less streaming decompression (synchronous mode)
|
||||
|
||||
@ -874,6 +865,15 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
|
||||
*/
|
||||
|
||||
/*===== Buffer-less streaming decompression functions =====*/
|
||||
typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
|
||||
typedef struct {
|
||||
unsigned long long frameContentSize; /* ZSTD_CONTENTSIZE_UNKNOWN means this field is not available. 0 means "empty" */
|
||||
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
|
||||
ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
|
||||
unsigned headerSize;
|
||||
unsigned dictID;
|
||||
unsigned checksumFlag;
|
||||
} ZSTD_frameHeader;
|
||||
ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||
ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
|
||||
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
@ -953,7 +953,9 @@ typedef enum {
|
||||
* Special: value 0 means "do not change strategy". */
|
||||
|
||||
/* frame parameters */
|
||||
ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1) */
|
||||
ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1)
|
||||
* note that content size must be known at the beginning,
|
||||
* it is sent using ZSTD_CCtx_setPledgedSrcSize() */
|
||||
ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */
|
||||
ZSTD_p_dictIDFlag, /* When applicable, dictID of dictionary is provided in frame header (default:1) */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user