diff --git a/.circleci/config.yml b/.circleci/config.yml index c63473785..65bb3c227 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,6 +7,8 @@ jobs: # preinstalled to reduce installation time. docker: - image: fbopensource/zstd-circleci-primary:0.0.1 + # TODO: Re-enable aarch64 build: + # make aarch64build && make clean steps: - checkout - run: @@ -14,12 +16,11 @@ jobs: command: | ./tests/test-license.py cc -v; CFLAGS="-O0 -Werror -pedantic" make all && make clean - make c99build ; make clean - make c11build ; make clean - make aarch64build ; make clean - make -j regressiontest; make clean - make shortest ; make clean - make cxxtest ; make clean + make c99build && make clean + make c11build && make clean + make -j regressiontest&& make clean + make shortest && make clean + make cxxtest && make clean # the second half of the jobs are in this test short-tests-1: docker: @@ -84,31 +85,10 @@ workflows: commit: jobs: # Run the tests in parallel - - short-tests-0: - filters: - tags: - only: /.*/ - - short-tests-1: - filters: - tags: - only: /.*/ - # Create a branch called regression and set it to dev to force a - # regression test run - - regression-test: - filters: - branches: - only: - - regression - # Only run on release tags. - - publish-github-release: - requires: - - short-tests-0 - - short-tests-1 - filters: - branches: - ignore: /.*/ - tags: - only: /^v\d+\.\d+\.\d+$/ + - short-tests-0 + - short-tests-1 + - regression-test + nightly: triggers: - schedule: @@ -120,7 +100,7 @@ workflows: - dev - master jobs: - # Run daily long regression tests + # Run daily regression tests - regression-test diff --git a/.github/workflows/dev-long-tests.yml b/.github/workflows/dev-long-tests.yml new file mode 100644 index 000000000..3967d186f --- /dev/null +++ b/.github/workflows/dev-long-tests.yml @@ -0,0 +1,233 @@ +name: dev-long-tests +# Tests longer than 10mn + +concurrency: + group: long-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: [ dev, release, actionsTest ] + +jobs: + # lasts ~24mn + make-test: + runs-on: ubuntu-latest + env: + DEVNULLRIGHTS: 1 + READFROMBLOCKDEVICE: 1 + steps: + - uses: actions/checkout@v2 + - name: make test + run: make test + + # lasts ~26mn + make-test-osx: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: OS-X test + run: make test # make -c lib all doesn't work because of the fact that it's not a tty + + no-intrinsics-fuzztest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: no intrinsics fuzztest + run: MOREFLAGS="-DZSTD_NO_INTRINSICS" make -C tests fuzztest + + tsan-zstreamtest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: thread sanitizer zstreamtest + run: CC=clang ZSTREAM_TESTTIME=-T3mn make tsan-test-zstream + + # lasts ~15mn + tsan-fuzztest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: thread sanitizer fuzztest + run: CC=clang make tsan-fuzztest + + # lasts ~23mn + gcc-8-asan-ubsan-testzstd: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: gcc-8 + ASan + UBSan + Test Zstd + run: | + sudo apt-get -qqq update + make gcc8install + CC=gcc-8 make -j uasan-test-zstd + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v142 + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + + visual-2015: + # only GH actions windows-2016 contains VS 2015 + runs-on: windows-2016 + strategy: + matrix: + platform: [x64, Win32] + configuration: [Debug, Release] + steps: + - uses: actions/checkout@v2 + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@v1.0.2 + - name: Build + working-directory: ${{env.GITHUB_WORKSPACE}} + run: > + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + + minimal-decompressor-macros: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: minimal decompressor macros + run: | + make clean && make -j all ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" + make clean && make check ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" + make clean && make -j all MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X1 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT" + make clean && make check MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X1 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT" + make clean && make -j all MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X2 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG" + make clean && make check MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X2 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG" + make clean && make -j all MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" + make clean && make check MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" + + dynamic-bmi2: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: dynamic bmi2 tests + run: | + make clean && make -j check MOREFLAGS="-O0 -Werror -mbmi2" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=1" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=1 -mbmi2" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=0" + make clean && make -j check MOREFLAGS="-O0 -Werror -DDYNAMIC_BMI2=0 -mbmi2" + + test-variants: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: make all variants & validate + run: | + make -j -C programs allVariants MOREFLAGS=-O0 + ./tests/test-variants.sh + + + qemu-consistency: + name: QEMU ${{ matrix.name }} + runs-on: ubuntu-latest + strategy: + fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. + matrix: + include: [ + { name: ARM, xcc_pkg: gcc-arm-linux-gnueabi, xcc: arm-linux-gnueabi-gcc, xemu_pkg: qemu-system-arm, xemu: qemu-arm-static }, + { name: ARM64, xcc_pkg: gcc-aarch64-linux-gnu, xcc: aarch64-linux-gnu-gcc, xemu_pkg: qemu-system-arm, xemu: qemu-aarch64-static }, + { name: PPC, xcc_pkg: gcc-powerpc-linux-gnu, xcc: powerpc-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc-static }, + { name: PPC64LE, xcc_pkg: gcc-powerpc64le-linux-gnu, xcc: powerpc64le-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc64le-static }, + { name: S390X, xcc_pkg: gcc-s390x-linux-gnu, xcc: s390x-linux-gnu-gcc, xemu_pkg: qemu-system-s390x, xemu: qemu-s390x-static }, + { name: MIPS, xcc_pkg: gcc-mips-linux-gnu, xcc: mips-linux-gnu-gcc, xemu_pkg: qemu-system-mips, xemu: qemu-mips-static }, + { name: M68K, xcc_pkg: gcc-m68k-linux-gnu, xcc: m68k-linux-gnu-gcc, xemu_pkg: qemu-system-m68k, xemu: qemu-m68k-static }, + ] + env: # Set environment variables + XCC: ${{ matrix.xcc }} + XEMU: ${{ matrix.xemu }} + steps: + - uses: actions/checkout@v2 # https://github.com/actions/checkout + - name: apt update & install + run: | + sudo apt-get update + sudo apt-get install gcc-multilib g++-multilib qemu-utils qemu-user-static + sudo apt-get install ${{ matrix.xcc_pkg }} ${{ matrix.xemu_pkg }} + - name: Environment info + run: | + echo && which $XCC + echo && $XCC --version + echo && $XCC -v # Show built-in specs + echo && which $XEMU + echo && $XEMU --version + - name: ARM + if: ${{ matrix.name == 'ARM' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: ARM64 + if: ${{ matrix.name == 'ARM64' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: PPC + if: ${{ matrix.name == 'PPC' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: PPC64LE + if: ${{ matrix.name == 'PPC64LE' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: S390X + if: ${{ matrix.name == 'S390X' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: MIPS + if: ${{ matrix.name == 'MIPS' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: M68K + if: ${{ matrix.name == 'M68K' }} + continue-on-error: true # disable reporting errors (alignment issues) + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + + mingw-short-test: + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + include: [ + { compiler: gcc, platform: x64, script: "CFLAGS=-Werror make -j allzstd DEBUGLEVEL=2"}, + { compiler: gcc, platform: x86, script: "CFLAGS=-Werror make -j allzstd"}, + { compiler: clang, platform: x64, script: "CFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd V=1"}, + ] + steps: + - uses: actions/checkout@v2 + - name: Mingw short test + run: | + ECHO "Building ${{matrix.compiler}} ${{matrix.platform}}" + $env:PATH_ORIGINAL = $env:PATH + $env:PATH_MINGW32 = "C:\msys64\mingw32\bin" + $env:PATH_MINGW64 = "C:\msys64\mingw64\bin" + COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw32\bin\make.exe + COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw64\bin\make.exe + IF ("${{matrix.platform}}" -eq "x64") + { + $env:PATH = $env:PATH_MINGW64 + ";" + $env:PATH_ORIGINAL + } + ELSEIF ("${{matrix.platform}}" -eq "x86") + { + $env:PATH = $env:PATH_MINGW32 + ";" + $env:PATH_ORIGINAL + } + make -v + sh -c "${{matrix.compiler}} -v" + $env:CC = "${{matrix.compiler}}" + sh -c "${{matrix.script}}" + ECHO "Testing ${{matrix.compiler}} ${{matrix.platform}}" + make clean + make check + + + visual-runtime-tests: + runs-on: windows-latest + strategy: + matrix: + platform: [x64, Win32] + configuration: [Release] + steps: + - uses: actions/checkout@v2 + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@v1.0.2 + - name: Build and run tests + working-directory: ${{env.GITHUB_WORKSPACE}} + env: + ZSTD_BIN: ./zstd.exe + DATAGEN_BIN: ./datagen.exe + # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference + run: | + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v142 /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + COPY build\VS2010\bin\${{matrix.platform}}_${{matrix.configuration}}\*.exe tests\ + CD tests + sh -e playTests.sh + DIR + .\fuzzer.exe -T2m + +# This test currently fails on Github Actions specifically. +# Possible reason : TTY emulation. +# Note that the same test works fine locally and on travisCI. +# This will have to be fixed before transferring the test to GA. +# versions-compatibility: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v2 +# - name: Versions Compatibility Test +# run: | +# make -C tests versionsTest + + +# For reference : icc tests +# icc tests are currently failing on Github Actions, likely to issues during installation stage +# To be fixed later +# +# icc: +# name: icc-check +# runs-on: ubuntu-latest +# steps: +# - name: install icc +# run: | +# export DEBIAN_FRONTEND=noninteractive +# sudo apt-get -qqq update +# sudo apt-get install -y wget build-essential pkg-config cmake ca-certificates gnupg +# sudo wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB +# sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB +# sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" +# sudo apt-get update +# sudo apt-get install -y intel-basekit intel-hpckit +# - uses: actions/checkout@v2 +# - name: make check +# run: | +# make CC=/opt/intel/oneapi/compiler/latest/linux/bin/intel64/icc check diff --git a/.github/workflows/generic-dev.yml b/.github/workflows/generic-dev.yml deleted file mode 100644 index ae4ee7e9f..000000000 --- a/.github/workflows/generic-dev.yml +++ /dev/null @@ -1,225 +0,0 @@ -name: generic-dev - -on: - pull_request: - branches: [ dev, release, actionsTest ] - -jobs: - -# Dev PR jobs that still have to be migrated from travis -# -# versionTag (only on release tags) -# valgrindTest (keeps failing for some reason. need investigation) -# staticAnalyze (need trusty so need self-hosted) -# pcc-fuzz: (need trusty so need self-hosted) -# min-decomp-macros (flakey) -# -# setting up self-hosted is pretty straightforward, but -# I need admins permissions to the repo for that it looks like -# So I'm tabling that for now -# -# The release branch exclusive jobs will be in a separate -# workflow file (the osx tests and meson build that is) - - benchmarking: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: make benchmarking - run: make benchmarking - - test: - runs-on: ubuntu-latest - env: - DEVNULLRIGHTS: 1 - READFROMBLOCKDEVICE: 1 - steps: - - uses: actions/checkout@v2 - - name: make test - run: make test - - check-32bit: # designed to catch https://github.com/facebook/zstd/issues/2428 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: make check on 32-bit - run: | - sudo apt update - APT_PACKAGES="gcc-multilib" make apt-install - CFLAGS="-m32 -O1 -fstack-protector" make check V=1 - - gcc-7-libzstd: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: gcc-7 + libzstdmt compilation - run: | - make gcc7install - CC=gcc-7 CFLAGS=-Werror make -j all - make clean - LDFLAGS=-Wl,--no-undefined make -C lib libzstd-mt - - # candidate test (to check) : underlink test - # LDFLAGS=-Wl,--no-undefined : will make the linker fail if dll is underlinked - - gcc-8-asan-ubsan-testzstd: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: gcc-8 + ASan + UBSan + Test Zstd - run: | - make gcc8install - CC=gcc-8 CFLAGS="-Werror" make -j all - make clean - CC=gcc-8 make -j uasan-test-zstd git push origin ``` @@ -104,7 +104,7 @@ Our contribution process works in three main stages: issue at hand, then please indicate this by requesting that an issue be closed by commenting. * Just because your changes have been merged does not mean the topic or larger issue is complete. Remember that the change must make it to an official zstd release for it to be meaningful. We recommend - that contributers track the activity on their pull request and corresponding issue(s) page(s) until + that contributors track the activity on their pull request and corresponding issue(s) page(s) until their change makes it to the next release of zstd. Users will often discover bugs in your code or suggest ways to refine and improve your initial changes even after the pull request is merged. @@ -270,15 +270,15 @@ for level 1 compression on Zstd. Typically this means, you have identified a sec code that you think can be made to run faster. The first thing you will want to do is make sure that the piece of code is actually taking up -a notable amount of time to run. It is usually not worth optimzing something which accounts for less than +a notable amount of time to run. It is usually not worth optimizing something which accounts for less than 0.0001% of the total running time. Luckily, there are tools to help with this. Profilers will let you see how much time your code spends inside a particular function. -If your target code snippit is only part of a function, it might be worth trying to -isolate that snippit by moving it to its own function (this is usually not necessary but +If your target code snippet is only part of a function, it might be worth trying to +isolate that snippet by moving it to its own function (this is usually not necessary but might be). -Most profilers (including the profilers dicusssed below) will generate a call graph of -functions for you. Your goal will be to find your function of interest in this call grapch +Most profilers (including the profilers discussed below) will generate a call graph of +functions for you. Your goal will be to find your function of interest in this call graph and then inspect the time spent inside of it. You might also want to to look at the annotated assembly which most profilers will provide you with. @@ -301,16 +301,16 @@ $ zstd -b1 -i5 # this will run for 5 seconds 5. Once you run your benchmarking script, switch back over to instruments and attach your process to the time profiler. You can do this by: * Clicking on the `All Processes` drop down in the top left of the toolbar. - * Selecting your process from the dropdown. In my case, it is just going to be labled + * Selecting your process from the dropdown. In my case, it is just going to be labeled `zstd` * Hitting the bright red record circle button on the top left of the toolbar -6. You profiler will now start collecting metrics from your bencharking script. Once +6. You profiler will now start collecting metrics from your benchmarking script. Once you think you have collected enough samples (usually this is the case after 3 seconds of recording), stop your profiler. 7. Make sure that in toolbar of the bottom window, `profile` is selected. 8. You should be able to see your call graph. * If you don't see the call graph or an incomplete call graph, make sure you have compiled - zstd and your benchmarking scripg using debug flags. On mac and linux, this just means + zstd and your benchmarking script using debug flags. On mac and linux, this just means you will have to supply the `-g` flag alone with your build script. You might also have to provide the `-fno-omit-frame-pointer` flag 9. Dig down the graph to find your function call and then inspect it by double clicking @@ -329,7 +329,7 @@ Some general notes on perf: counter statistics. Perf uses a high resolution timer and this is likely one of the first things your team will run when assessing your PR. * Perf has a long list of hardware counters that can be viewed with `perf --list`. -When measuring optimizations, something worth trying is to make sure the handware +When measuring optimizations, something worth trying is to make sure the hardware counters you expect to be impacted by your change are in fact being so. For example, if you expect the L1 cache misses to decrease with your change, you can look at the counter `L1-dcache-load-misses` @@ -368,7 +368,7 @@ Follow these steps to link travis-ci with your github fork of zstd TODO ### appveyor -Follow these steps to link circle-ci with your girhub fork of zstd +Follow these steps to link circle-ci with your github fork of zstd 1. Make sure you are logged into your github account 2. Go to https://www.appveyor.com/ diff --git a/Makefile b/Makefile index c1908f0a1..77bb4ce87 100644 --- a/Makefile +++ b/Makefile @@ -148,7 +148,7 @@ clean: #------------------------------------------------------------------------------ # make install is validated only for Linux, macOS, Hurd and some BSD targets #------------------------------------------------------------------------------ -ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD DragonFly NetBSD MSYS_NT Haiku)) +ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD DragonFly NetBSD MSYS_NT Haiku AIX)) HOST_OS = POSIX @@ -217,7 +217,7 @@ armbuild: clean CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allzstd aarch64build: clean - CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allzstd + CC=aarch64-linux-gnu-gcc CFLAGS="-Werror -O0" $(MAKE) allzstd ppcbuild: clean CC=powerpc-linux-gnu-gcc CFLAGS="-m32 -Wno-attributes -Werror" $(MAKE) -j allzstd @@ -381,23 +381,23 @@ cmakebuild: c89build: clean $(CC) -v - CFLAGS="-std=c89 -Werror" $(MAKE) allmost # will fail, due to missing support for `long long` + CFLAGS="-std=c89 -Werror -O0" $(MAKE) allmost # will fail, due to missing support for `long long` gnu90build: clean $(CC) -v - CFLAGS="-std=gnu90 -Werror" $(MAKE) allmost + CFLAGS="-std=gnu90 -Werror -O0" $(MAKE) allmost c99build: clean $(CC) -v - CFLAGS="-std=c99 -Werror" $(MAKE) allmost + CFLAGS="-std=c99 -Werror -O0" $(MAKE) allmost gnu99build: clean $(CC) -v - CFLAGS="-std=gnu99 -Werror" $(MAKE) allmost + CFLAGS="-std=gnu99 -Werror -O0" $(MAKE) allmost c11build: clean $(CC) -v - CFLAGS="-std=c11 -Werror" $(MAKE) allmost + CFLAGS="-std=c11 -Werror -O0" $(MAKE) allmost bmix64build: clean $(CC) -v @@ -416,5 +416,5 @@ bmi32build: clean staticAnalyze: SCANBUILD ?= scan-build staticAnalyze: $(CC) -v - CC=$(CC) CPPFLAGS=-g $(SCANBUILD) --status-bugs -v $(MAKE) allzstd examples contrib + CC=$(CC) CPPFLAGS=-g $(SCANBUILD) --status-bugs -v $(MAKE) zstd endif diff --git a/Package.swift b/Package.swift new file mode 100644 index 000000000..75a5a0b33 --- /dev/null +++ b/Package.swift @@ -0,0 +1,36 @@ +// swift-tools-version:5.0 +// The swift-tools-version declares the minimum version of Swift required to build this package. + +import PackageDescription + +let package = Package( + name: "zstd", + platforms: [ + .macOS(.v10_10), .iOS(.v9), .tvOS(.v9) + ], + products: [ + // Products define the executables and libraries a package produces, and make them visible to other packages. + .library( + name: "libzstd", + targets: [ "libzstd" ]) + ], + dependencies: [ + // Dependencies declare other packages that this package depends on. + // .package(url: /* package url */, from: "1.0.0"), + ], + targets: [ + // Targets are the basic building blocks of a package. A target can define a module or a test suite. + // Targets can depend on other targets in this package, and on products in packages this package depends on. + .target( + name: "libzstd", + path: "lib", + sources: [ "common", "compress", "decompress", "dictBuilder" ], + publicHeadersPath: "modulemap", + cSettings: [ + .headerSearchPath(".") + ]) + ], + swiftLanguageVersions: [.v5], + cLanguageStandard: .gnu11, + cxxLanguageStandard: .gnucxx14 +) diff --git a/README.md b/README.md index dcca7662d..44cce47ee 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,8 @@ __Zstandard__, or `zstd` as short version, is a fast lossless compression algori targeting real-time compression scenarios at zlib-level and better compression ratios. It's backed by a very fast entropy stage, provided by [Huff0 and FSE library](https://github.com/Cyan4973/FiniteStateEntropy). -The project is provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, +Zstandard's format is stable and documented in [RFC8878](https://datatracker.ietf.org/doc/html/rfc8878). Multiple independent implementations are already available. +This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files. Should your project require another programming language, a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages). @@ -17,8 +18,8 @@ a list of known ports and bindings is provided on [Zstandard homepage](http://ww [![Build status][CirrusDevBadge]][CirrusLink] [![Fuzzing Status][OSSFuzzBadge]][OSSFuzzLink] -[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite" -[travisLink]: https://travis-ci.org/facebook/zstd +[travisDevBadge]: https://api.travis-ci.com/facebook/zstd.svg?branch=dev "Continuous Integration test suite" +[travisLink]: https://travis-ci.com/facebook/zstd [AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite" [AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0 [CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite" diff --git a/appveyor.yml b/appveyor.yml index c6ab78688..c58ef91a1 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,34 +23,6 @@ SCRIPT: "make allzstd MOREFLAGS=-static" ARTIFACT: "true" BUILD: "true" - - COMPILER: "clang" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd V=1" - BUILD: "true" - - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "" - TEST: "cmake" - - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Release" - COMPILER: "clang-cl" HOST: "cmake-visual" @@ -113,56 +85,6 @@ appveyor PushArtifact zstd-win-release-%PLATFORM%.zip ) ) - - if [%HOST%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2008 %PLATFORM%\%CONFIGURATION% in %APPVEYOR_BUILD_FOLDER% && - ECHO *** && - msbuild "build\VS2008\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v90 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\*.exe && - MD5sum build/VS2008/bin/%PLATFORM%/%CONFIGURATION%/*.exe && - COPY build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2008_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2010 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2010_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2012_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2013 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2013_%PLATFORM%_%CONFIGURATION%.exe && - ECHO *** && - ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ - ) - if [%HOST%]==[cmake-visual] ( ECHO *** && ECHO *** Building %CMAKE_GENERATOR% ^(%CMAKE_GENERATOR_TOOLSET%^) %PLATFORM%\%CONFIGURATION% && @@ -186,18 +108,6 @@ cd ..\..\.. && make clean ) - - SET "FUZZERTEST=-T30s" - - if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] ( - CD tests && - SET ZSTD_BIN=./zstd.exe&& - SET DATAGEN_BIN=./datagen.exe&& - sh -e playTests.sh --test-large-data && - fullbench.exe -i1 && - fullbench.exe -i1 -P0 && - fuzzer_VS2012_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2013_%PLATFORM%_Release.exe %FUZZERTEST% && - fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST% - ) # The following tests are for regular pushes @@ -208,38 +118,26 @@ version: 1.0.{build} environment: matrix: + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Release" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Release" + - COMPILER: "gcc" HOST: "cygwin" PLATFORM: "x64" - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "CFLAGS=-Werror make -j allzstd DEBUGLEVEL=2" - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x86" - SCRIPT: "CFLAGS=-Werror make -j allzstd" - - COMPILER: "clang" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "CFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd V=1" - - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Release" - COMPILER: "clang-cl" HOST: "cmake-visual" @@ -260,12 +158,6 @@ cmake,^ make ) - - if [%HOST%]==[mingw] ( - SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && - SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe - ) - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" ) @@ -283,30 +175,6 @@ ctest -V -L Medium; " ) - - if [%HOST%]==[mingw] ( - ( if [%PLATFORM%]==[x64] ( - SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" - ) else if [%PLATFORM%]==[x86] ( - SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" - ) ) && - make -v && - sh -c "%COMPILER% -v" && - set "CC=%COMPILER%" && - sh -c "%SCRIPT%" - ) - - if [%HOST%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ - ) - if [%HOST%]==[cmake-visual] ( ECHO *** && ECHO *** Building %CMAKE_GENERATOR% ^(%CMAKE_GENERATOR_TOOLSET%^) %PLATFORM%\%CONFIGURATION% && @@ -316,12 +184,22 @@ POPD && ECHO *** ) + - if [%HOST%]==[visual] ( + ECHO *** && + ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && + ECHO *** && + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && + DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && + DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe + ) test_script: - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% + - SET "FUZZERTEST=-T10s" - if [%HOST%]==[mingw] ( set "CC=%COMPILER%" && make clean && make check - ) + ) \ No newline at end of file diff --git a/build/VS_scripts/build.generic.cmd b/build/VS_scripts/build.generic.cmd index a7ca4d067..b24e6ed4b 100644 --- a/build/VS_scripts/build.generic.cmd +++ b/build/VS_scripts/build.generic.cmd @@ -19,10 +19,10 @@ GOTO build :display_help echo Syntax: build.generic.cmd msbuild_version msbuild_platform msbuild_configuration msbuild_toolset -echo msbuild_version: VS installed version (VS2012, VS2013, VS2015, VS2017, ...) +echo msbuild_version: VS installed version (VS2012, VS2013, VS2015, VS2017, VS2019, ...) echo msbuild_platform: Platform (x64 or Win32) echo msbuild_configuration: VS configuration (Release or Debug) -echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141) +echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141, v142, ...) EXIT /B 1 @@ -43,6 +43,16 @@ IF %msbuild_version% == VS2017 ( IF EXIST %msbuild_vs2017enterprise% SET msbuild=%msbuild_vs2017enterprise% ) +:: VS2019 +SET msbuild_vs2019community="%programfiles(x86)%\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" +SET msbuild_vs2019professional="%programfiles(x86)%\Microsoft Visual Studio\2019\Professional\MSBuild\Current\Bin\MSBuild.exe" +SET msbuild_vs2019enterprise="%programfiles(x86)%\Microsoft Visual Studio\2019\Enterprise\MSBuild\Current\Bin\MSBuild.exe" +IF %msbuild_version% == VS2019 ( + IF EXIST %msbuild_vs2019community% SET msbuild=%msbuild_vs2019community% + IF EXIST %msbuild_vs2019professional% SET msbuild=%msbuild_vs2019professional% + IF EXIST %msbuild_vs2019enterprise% SET msbuild=%msbuild_vs2019enterprise% +) + SET project="%~p0\..\VS2010\zstd.sln" SET msbuild_params=/verbosity:minimal /nologo /t:Clean,Build /p:Platform=%msbuild_platform% /p:Configuration=%msbuild_configuration% diff --git a/build/cmake/contrib/pzstd/CMakeLists.txt b/build/cmake/contrib/pzstd/CMakeLists.txt index 5c30a91b1..27af86c88 100644 --- a/build/cmake/contrib/pzstd/CMakeLists.txt +++ b/build/cmake/contrib/pzstd/CMakeLists.txt @@ -21,10 +21,16 @@ add_executable(pzstd ${PROGRAMS_DIR}/util.c ${PZSTD_DIR}/main.cpp ${PZSTD_DIR}/O set_property(TARGET pzstd APPEND PROPERTY COMPILE_DEFINITIONS "NDEBUG") set_property(TARGET pzstd APPEND PROPERTY COMPILE_OPTIONS "-Wno-shadow") +if (ZSTD_BUILD_SHARED) + set(ZSTD_LIB libzstd_shared) +else() + set(ZSTD_LIB libzstd_static) +endif() + set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) if (CMAKE_USE_PTHREADS_INIT) - target_link_libraries(pzstd libzstd_shared ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(pzstd ${ZSTD_LIB} ${CMAKE_THREAD_LIBS_INIT}) else() message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") endif() diff --git a/build/cmake/lib/CMakeLists.txt b/build/cmake/lib/CMakeLists.txt index 5f756652e..7ba469330 100644 --- a/build/cmake/lib/CMakeLists.txt +++ b/build/cmake/lib/CMakeLists.txt @@ -7,7 +7,7 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -project(libzstd C) +project(libzstd C ASM) set(CMAKE_INCLUDE_CURRENT_DIR TRUE) option(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" ON) @@ -22,7 +22,7 @@ include_directories(${LIBRARY_DIR} ${LIBRARY_DIR}/common) file(GLOB CommonSources ${LIBRARY_DIR}/common/*.c) file(GLOB CompressSources ${LIBRARY_DIR}/compress/*.c) -file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c) +file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c ${LIBRARY_DIR}/decompress/*.S) file(GLOB DictBuilderSources ${LIBRARY_DIR}/dictBuilder/*.c) set(Sources @@ -106,7 +106,7 @@ if (MSVC) endif () # With MSVC static library needs to be renamed to avoid conflict with import library -if (MSVC) +if (MSVC OR (WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")) set(STATIC_LIBRARY_BASE_NAME zstd_static) else () set(STATIC_LIBRARY_BASE_NAME zstd) @@ -168,6 +168,7 @@ install(TARGETS ${library_targets} ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}" ) # uninstall target diff --git a/build/cmake/programs/CMakeLists.txt b/build/cmake/programs/CMakeLists.txt index f1d127746..490030783 100644 --- a/build/cmake/programs/CMakeLists.txt +++ b/build/cmake/programs/CMakeLists.txt @@ -37,7 +37,9 @@ target_link_libraries(zstd ${PROGRAMS_ZSTD_LINK_TARGET}) if (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)") target_link_libraries(zstd rt) endif () -install(TARGETS zstd RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") +install(TARGETS zstd + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}") if (UNIX) add_custom_target(zstdcat ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdcat DEPENDS zstd COMMENT "Creating zstdcat symlink") diff --git a/build/meson/contrib/pzstd/meson.build b/build/meson/contrib/pzstd/meson.build index dcf2136db..2c47999fa 100644 --- a/build/meson/contrib/pzstd/meson.build +++ b/build/meson/contrib/pzstd/meson.build @@ -18,7 +18,7 @@ pzstd_sources = [join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'contrib/pzstd/SkippableFrame.cpp')] pzstd = executable('pzstd', pzstd_sources, - cpp_args: [ '-DNDEBUG', '-Wno-shadow', '-pedantic', '-Wno-deprecated-declarations' ], + cpp_args: [ '-DNDEBUG', '-Wno-shadow', '-Wno-deprecated-declarations' ], include_directories: pzstd_includes, dependencies: [ libzstd_dep, thread_dep ], install: true) diff --git a/build/meson/lib/meson.build b/build/meson/lib/meson.build index 5cc9fee86..1f4f8c25d 100644 --- a/build/meson/lib/meson.build +++ b/build/meson/lib/meson.build @@ -37,6 +37,7 @@ libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_opt.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_ldm.c'), join_paths(zstd_rootdir, 'lib/decompress/huf_decompress.c'), + join_paths(zstd_rootdir, 'lib/decompress/huf_decompress_amd64.S'), join_paths(zstd_rootdir, 'lib/decompress/zstd_decompress.c'), join_paths(zstd_rootdir, 'lib/decompress/zstd_decompress_block.c'), join_paths(zstd_rootdir, 'lib/decompress/zstd_ddict.c'), @@ -108,6 +109,7 @@ libzstd = library('zstd', libzstd_sources, include_directories: libzstd_includes, c_args: libzstd_c_args, + gnu_symbol_visibility: 'hidden', dependencies: libzstd_deps, install: true, version: zstd_libversion) diff --git a/build/meson/meson.build b/build/meson/meson.build index 2a425b2fa..0c29a7621 100644 --- a/build/meson/meson.build +++ b/build/meson/meson.build @@ -14,10 +14,14 @@ project('zstd', default_options : [ 'c_std=gnu99', 'cpp_std=c++11', - 'buildtype=release' + 'buildtype=release', + 'warning_level=3', + # -Wdocumentation does not actually pass, nor do the test binaries, + # so this isn't safe + #'werror=true' ], version: 'DUMMY', - meson_version: '>=0.47.0') + meson_version: '>=0.48.0') cc = meson.get_compiler('c') cxx = meson.get_compiler('cpp') @@ -106,10 +110,8 @@ use_lz4 = lz4_dep.found() add_project_arguments('-DXXH_NAMESPACE=ZSTD_', language: ['c']) if [compiler_gcc, compiler_clang].contains(cc_id) - common_warning_flags = [ '-Wextra', '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] + common_warning_flags = [ '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] if cc_id == compiler_clang - # Should use Meson's own --werror build option - #common_warning_flags += '-Werror' common_warning_flags += ['-Wconversion', '-Wno-sign-conversion', '-Wdocumentation'] endif cc_compile_flags = cc.get_supported_arguments(common_warning_flags + ['-Wstrict-prototypes']) diff --git a/build/meson/meson_options.txt b/build/meson/meson_options.txt index 90a81c539..accf3fa10 100644 --- a/build/meson/meson_options.txt +++ b/build/meson/meson_options.txt @@ -10,7 +10,7 @@ # Read guidelines from https://wiki.gnome.org/Initiatives/GnomeGoals/MesonPorting -option('legacy_level', type: 'integer', min: 0, max: 7, value: '5', +option('legacy_level', type: 'integer', min: 0, max: 7, value: 5, description: 'Support any legacy format: 7 to 1 for v0.7+ to v0.1+') option('debug_level', type: 'integer', min: 0, max: 9, value: 1, description: 'Enable run-time debug. See lib/common/debug.h') diff --git a/build/meson/programs/meson.build b/build/meson/programs/meson.build index d255627cd..4181030c2 100644 --- a/build/meson/programs/meson.build +++ b/build/meson/programs/meson.build @@ -18,7 +18,9 @@ zstd_programs_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/benchzstd.c'), join_paths(zstd_rootdir, 'programs/datagen.c'), join_paths(zstd_rootdir, 'programs/dibio.c'), - join_paths(zstd_rootdir, 'programs/zstdcli_trace.c')] + join_paths(zstd_rootdir, 'programs/zstdcli_trace.c'), + # needed due to use of private symbol + -fvisibility=hidden + join_paths(zstd_rootdir, 'lib/common/xxhash.c')] zstd_c_args = libzstd_debug_cflags if use_multi_thread diff --git a/build/meson/tests/meson.build b/build/meson/tests/meson.build index 1b233630a..14f45982a 100644 --- a/build/meson/tests/meson.build +++ b/build/meson/tests/meson.build @@ -29,64 +29,62 @@ ZSTDRTTEST = ['--test-large-data'] test_includes = [ include_directories(join_paths(zstd_rootdir, 'programs')) ] -datagen_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'tests/datagencli.c')] +testcommon_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), + join_paths(zstd_rootdir, 'programs/util.c'), + join_paths(zstd_rootdir, 'programs/timefn.c'), + join_paths(zstd_rootdir, 'programs/benchfn.c'), + join_paths(zstd_rootdir, 'programs/benchzstd.c')] + +testcommon = static_library('testcommon', + testcommon_sources, + # needed due to use of private symbol + -fvisibility=hidden + objects: libzstd.extract_all_objects(recursive: false)) + +testcommon_dep = declare_dependency(link_with: testcommon, + dependencies: libzstd_deps, + include_directories: libzstd_includes) + +datagen_sources = [join_paths(zstd_rootdir, 'tests/datagencli.c')] datagen = executable('datagen', datagen_sources, c_args: [ '-DNDEBUG' ], include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -fullbench_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'programs/benchfn.c'), - join_paths(zstd_rootdir, 'programs/benchzstd.c'), - join_paths(zstd_rootdir, 'tests/fullbench.c')] +fullbench_sources = [join_paths(zstd_rootdir, 'tests/fullbench.c')] fullbench = executable('fullbench', fullbench_sources, include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -fuzzer_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/fuzzer.c')] +fuzzer_sources = [join_paths(zstd_rootdir, 'tests/fuzzer.c')] fuzzer = executable('fuzzer', fuzzer_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, thread_dep ], + dependencies: [ testcommon_dep, thread_dep ], install: false) -zstreamtest_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/seqgen.c'), +zstreamtest_sources = [join_paths(zstd_rootdir, 'tests/seqgen.c'), join_paths(zstd_rootdir, 'tests/zstreamtest.c')] zstreamtest = executable('zstreamtest', zstreamtest_sources, include_directories: test_includes, - dependencies: libzstd_dep, + dependencies: testcommon_dep, install: false) -paramgrill_sources = [join_paths(zstd_rootdir, 'programs/benchfn.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'programs/benchzstd.c'), - join_paths(zstd_rootdir, 'programs/datagen.c'), - join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'tests/paramgrill.c')] +paramgrill_sources = [join_paths(zstd_rootdir, 'tests/paramgrill.c')] paramgrill = executable('paramgrill', paramgrill_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, libm_dep ], + dependencies: [ testcommon_dep, libm_dep ], install: false) roundTripCrash_sources = [join_paths(zstd_rootdir, 'tests/roundTripCrash.c')] roundTripCrash = executable('roundTripCrash', roundTripCrash_sources, - dependencies: [ libzstd_dep ], + dependencies: [ testcommon_dep ], install: false) longmatch_sources = [join_paths(zstd_rootdir, 'tests/longmatch.c')] @@ -111,18 +109,14 @@ if 0 < legacy_level and legacy_level <= 4 install: false) endif -decodecorpus_sources = [join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/decodecorpus.c')] +decodecorpus_sources = [join_paths(zstd_rootdir, 'tests/decodecorpus.c')] decodecorpus = executable('decodecorpus', decodecorpus_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, libm_dep ], + dependencies: [ testcommon_dep, libm_dep ], install: false) -poolTests_sources = [join_paths(zstd_rootdir, 'programs/util.c'), - join_paths(zstd_rootdir, 'programs/timefn.c'), - join_paths(zstd_rootdir, 'tests/poolTests.c'), +poolTests_sources = [join_paths(zstd_rootdir, 'tests/poolTests.c'), join_paths(zstd_rootdir, 'lib/common/pool.c'), join_paths(zstd_rootdir, 'lib/common/threading.c'), join_paths(zstd_rootdir, 'lib/common/zstd_common.c'), @@ -130,7 +124,7 @@ poolTests_sources = [join_paths(zstd_rootdir, 'programs/util.c'), poolTests = executable('poolTests', poolTests_sources, include_directories: test_includes, - dependencies: [ libzstd_dep, thread_dep ], + dependencies: [ testcommon_dep, thread_dep ], install: false) checkTag_sources = [join_paths(zstd_rootdir, 'tests/checkTag.c')] @@ -186,10 +180,6 @@ test('test-zstream-1', zstreamtest, args: ['-v', ZSTREAM_TESTTIME] + FUZZER_FLAGS, timeout: 240) -test('test-zstream-2', - zstreamtest, - args: ['-mt', '-t1', ZSTREAM_TESTTIME] + FUZZER_FLAGS, - timeout: 120) test('test-zstream-3', zstreamtest, args: ['--newapi', '-t1', ZSTREAM_TESTTIME] + FUZZER_FLAGS, diff --git a/build/single_file_libs/zstd-in.c b/build/single_file_libs/zstd-in.c index 1b27953a6..733dcb757 100644 --- a/build/single_file_libs/zstd-in.c +++ b/build/single_file_libs/zstd-in.c @@ -25,7 +25,7 @@ * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also * defined in mem.h (breaking C99 compatibility). * - * Note: the undefs for xxHash allow Zstd's implementation to coinside with with + * Note: the undefs for xxHash allow Zstd's implementation to coincide with with * standalone xxHash usage (with global defines). * * Note: multithreading is enabled for all platforms apart from Emscripten. @@ -43,6 +43,8 @@ #define ZSTD_MULTITHREAD #endif #define ZSTD_TRACE 0 +/* TODO: Can't amalgamate ASM function */ +#define HUF_DISABLE_ASM 1 /* Include zstd_deps.h first with all the options we need enabled. */ #define ZSTD_DEPS_NEED_MALLOC diff --git a/build/single_file_libs/zstddeclib-in.c b/build/single_file_libs/zstddeclib-in.c index 019d9c260..cbf70c619 100644 --- a/build/single_file_libs/zstddeclib-in.c +++ b/build/single_file_libs/zstddeclib-in.c @@ -25,7 +25,7 @@ * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also * defined in mem.h (breaking C99 compatibility). * - * Note: the undefs for xxHash allow Zstd's implementation to coinside with with + * Note: the undefs for xxHash allow Zstd's implementation to coincide with with * standalone xxHash usage (with global defines). */ #define DEBUGLEVEL 0 @@ -39,6 +39,8 @@ #define ZSTD_LEGACY_SUPPORT 0 #define ZSTD_STRIP_ERROR_STRINGS #define ZSTD_TRACE 0 +/* TODO: Can't amalgamate ASM function */ +#define HUF_DISABLE_ASM 1 /* Include zstd_deps.h first with all the options we need enabled. */ #define ZSTD_DEPS_NEED_MALLOC diff --git a/contrib/freestanding_lib/freestanding.py b/contrib/freestanding_lib/freestanding.py index 197168721..cd9d63774 100755 --- a/contrib/freestanding_lib/freestanding.py +++ b/contrib/freestanding_lib/freestanding.py @@ -27,7 +27,6 @@ SKIPPED_FILES = [ "common/pool.h", "common/threading.c", "common/threading.h", - "common/zstd_trace.c", "common/zstd_trace.h", "compress/zstdmt_compress.h", "compress/zstdmt_compress.c", @@ -461,7 +460,8 @@ class Freestanding(object): print(*args, **kwargs) def _copy_file(self, lib_path): - if not (lib_path.endswith(".c") or lib_path.endswith(".h")): + suffixes = [".c", ".h", ".S"] + if not any((lib_path.endswith(suffix) for suffix in suffixes)): return if lib_path in SKIPPED_FILES: self._log(f"\tSkipping file: {lib_path}") diff --git a/contrib/linux-kernel/Makefile b/contrib/linux-kernel/Makefile index c391df7c0..47a431740 100644 --- a/contrib/linux-kernel/Makefile +++ b/contrib/linux-kernel/Makefile @@ -35,11 +35,12 @@ libzstd: -DXXH_STATIC_LINKING_ONLY \ -DMEM_FORCE_MEMORY_ACCESS=0 \ -D__GNUC__ \ + -D__linux__=1 \ -DSTATIC_BMI2=0 \ -DZSTD_ADDRESS_SANITIZER=0 \ -DZSTD_MEMORY_SANITIZER=0 \ + -DZSTD_DATAFLOW_SANITIZER=0 \ -DZSTD_COMPRESS_HEAPMODE=1 \ - -UZSTD_NO_INLINE \ -UNO_PREFETCH \ -U__cplusplus \ -UZSTD_DLL_EXPORT \ @@ -50,9 +51,11 @@ libzstd: -U_WIN32 \ -RZSTDLIB_VISIBILITY= \ -RZSTDERRORLIB_VISIBILITY= \ + -RZSTD_FALLTHROUGH=fallthrough \ -DZSTD_HAVE_WEAK_SYMBOLS=0 \ -DZSTD_TRACE=0 \ - -DZSTD_NO_TRACE + -DZSTD_NO_TRACE \ + -DZSTD_LINUX_KERNEL mv linux/lib/zstd/zstd.h linux/include/linux/zstd_lib.h mv linux/lib/zstd/zstd_errors.h linux/include/linux/ cp linux_zstd.h linux/include/linux/zstd.h @@ -86,10 +89,17 @@ import-upstream: rm $(LINUX)/lib/zstd/common/xxhash.* rm $(LINUX)/lib/zstd/compress/zstdmt_* +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes -Wc++-compat \ + -Wimplicit-fallthrough + .PHONY: test test: libzstd - $(MAKE) -C test run-test CFLAGS="-O3 $(CFLAGS)" -j + $(MAKE) -C test run-test CFLAGS="-O3 $(CFLAGS) $(DEBUGFLAGS) -Werror" -j .PHONY: clean clean: - $(RM) -rf linux + $(RM) -rf linux test/test test/static_test diff --git a/contrib/linux-kernel/decompress_sources.h b/contrib/linux-kernel/decompress_sources.h index f35bef03e..a2aefe8f2 100644 --- a/contrib/linux-kernel/decompress_sources.h +++ b/contrib/linux-kernel/decompress_sources.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Facebook, Inc. * All rights reserved. @@ -21,6 +21,11 @@ #include "common/error_private.c" #include "common/fse_decompress.c" #include "common/zstd_common.c" +/* + * Disable the ASM Huffman implementation because we need to + * include all the sources. + */ +#define HUF_DISABLE_ASM 1 #include "decompress/huf_decompress.c" #include "decompress/zstd_ddict.c" #include "decompress/zstd_decompress.c" diff --git a/contrib/linux-kernel/linux.mk b/contrib/linux-kernel/linux.mk index 19485e3cc..f6f3a8983 100644 --- a/contrib/linux-kernel/linux.mk +++ b/contrib/linux-kernel/linux.mk @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause # ################################################################ # Copyright (c) Facebook, Inc. # All rights reserved. @@ -11,7 +11,7 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o -ccflags-y += -O3 +ccflags-y += -Wno-error=deprecated-declarations zstd_compress-y := \ zstd_compress_module.o \ @@ -41,6 +41,7 @@ zstd_decompress-y := \ common/fse_decompress.o \ common/zstd_common.o \ decompress/huf_decompress.o \ + decompress/huf_decompress_amd64.o \ decompress/zstd_ddict.o \ decompress/zstd_decompress.o \ decompress/zstd_decompress_block.o \ diff --git a/contrib/linux-kernel/linux_zstd.h b/contrib/linux-kernel/linux_zstd.h index 446ecabcd..113408eef 100644 --- a/contrib/linux-kernel/linux_zstd.h +++ b/contrib/linux-kernel/linux_zstd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. diff --git a/contrib/linux-kernel/mem.h b/contrib/linux-kernel/mem.h index 4b5db5756..dcdd586a9 100644 --- a/contrib/linux-kernel/mem.h +++ b/contrib/linux-kernel/mem.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. diff --git a/contrib/linux-kernel/test/Makefile b/contrib/linux-kernel/test/Makefile index 2908839fd..dc76a5f40 100644 --- a/contrib/linux-kernel/test/Makefile +++ b/contrib/linux-kernel/test/Makefile @@ -18,9 +18,13 @@ CPPFLAGS += -DZSTD_ASAN_DONT_POISON_WORKSPACE LINUX_ZSTD_MODULE := $(wildcard $(LINUX_ZSTDLIB)/*.c) LINUX_ZSTD_COMMON := $(wildcard $(LINUX_ZSTDLIB)/common/*.c) LINUX_ZSTD_COMPRESS := $(wildcard $(LINUX_ZSTDLIB)/compress/*.c) -LINUX_ZSTD_DECOMPRESS := $(wildcard $(LINUX_ZSTDLIB)/decompress/*.c) +LINUX_ZSTD_DECOMPRESS := $(wildcard $(LINUX_ZSTDLIB)/decompress/*.c $(LINUX_ZSTDLIB)/decompress/*.S) LINUX_ZSTD_FILES := $(LINUX_ZSTD_MODULE) $(LINUX_ZSTD_COMMON) $(LINUX_ZSTD_COMPRESS) $(LINUX_ZSTD_DECOMPRESS) -LINUX_ZSTD_OBJECTS := $(LINUX_ZSTD_FILES:.c=.o) +LINUX_ZSTD_OBJECTS0 := $(LINUX_ZSTD_FILES:.c=.o) +LINUX_ZSTD_OBJECTS := $(LINUX_ZSTD_OBJECTS0:.S=.o) + +%.o: %.S + $(CC) -c $(CPPFLAGS) $(CFLAGS) $^ -o $@ liblinuxzstd.a: $(LINUX_ZSTD_OBJECTS) $(AR) $(ARFLAGS) $@ $^ diff --git a/contrib/linux-kernel/test/include/asm/unaligned.h b/contrib/linux-kernel/test/include/asm/unaligned.h index 02c2d74f3..86ec4ca38 100644 --- a/contrib/linux-kernel/test/include/asm/unaligned.h +++ b/contrib/linux-kernel/test/include/asm/unaligned.h @@ -20,6 +20,7 @@ static unsigned _isLittleEndian(void) { const union { uint32_t u; uint8_t c[4]; } one = { 1 }; assert(_IS_LITTLE_ENDIAN == one.c[0]); + (void)one; return _IS_LITTLE_ENDIAN; } diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h index ea3422ee3..de43edb69 100644 --- a/contrib/linux-kernel/test/include/linux/compiler.h +++ b/contrib/linux-kernel/test/include/linux/compiler.h @@ -18,4 +18,6 @@ #define noinline __attribute__((noinline)) #endif +#define fallthrough __attribute__((__fallthrough__)) + #endif diff --git a/contrib/linux-kernel/test/include/linux/xxhash.h b/contrib/linux-kernel/test/include/linux/xxhash.h index 0a43bb275..7e92a706e 100644 --- a/contrib/linux-kernel/test/include/linux/xxhash.h +++ b/contrib/linux-kernel/test/include/linux/xxhash.h @@ -124,11 +124,10 @@ XXH_API uint64_t xxh64(const void *input, size_t length, uint64_t seed); static inline unsigned long xxhash(const void *input, size_t length, uint64_t seed) { -#if BITS_PER_LONG == 64 - return xxh64(input, length, seed); -#else - return xxh32(input, length, seed); -#endif + if (sizeof(size_t) == 8) + return xxh64(input, length, seed); + else + return xxh32(input, length, seed); } /*-**************************** diff --git a/contrib/linux-kernel/test/macro-test.sh b/contrib/linux-kernel/test/macro-test.sh index c688ac03b..9ea84aa66 100755 --- a/contrib/linux-kernel/test/macro-test.sh +++ b/contrib/linux-kernel/test/macro-test.sh @@ -36,9 +36,9 @@ test_not_present "ZSTD_NO_INTRINSICS" test_not_present "ZSTD_NO_UNUSED_FUNCTIONS" test_not_present "ZSTD_LEGACY_SUPPORT" test_not_present "STATIC_BMI2" -test_not_present "ZSTD_NO_INLINE" test_not_present "ZSTD_DLL_EXPORT" test_not_present "ZSTD_DLL_IMPORT" test_not_present "__ICCARM__" test_not_present "_MSC_VER" test_not_present "_WIN32" +test_not_present "__linux__" diff --git a/contrib/linux-kernel/test/static_test.c b/contrib/linux-kernel/test/static_test.c index 50c594c77..d2b8b5a32 100644 --- a/contrib/linux-kernel/test/static_test.c +++ b/contrib/linux-kernel/test/static_test.c @@ -28,17 +28,19 @@ static const char kEmptyZstdFrame[] = { 0x28, 0xb5, 0x2f, 0xfd, 0x24, 0x00, 0x01, 0x00, 0x00, 0x99, 0xe9, 0xd8, 0x51 }; -static void test_decompress_unzstd() { +static void test_decompress_unzstd(void) { fprintf(stderr, "Testing decompress unzstd... "); { size_t const wkspSize = zstd_dctx_workspace_bound(); void* wksp = malloc(wkspSize); - CONTROL(wksp != NULL); ZSTD_DCtx* dctx = zstd_init_dctx(wksp, wkspSize); + CONTROL(wksp != NULL); CONTROL(dctx != NULL); - size_t const dSize = zstd_decompress_dctx(dctx, NULL, 0, kEmptyZstdFrame, sizeof(kEmptyZstdFrame)); - CONTROL(!zstd_is_error(dSize)); - CONTROL(dSize == 0); + { + size_t const dSize = zstd_decompress_dctx(dctx, NULL, 0, kEmptyZstdFrame, sizeof(kEmptyZstdFrame)); + CONTROL(!zstd_is_error(dSize)); + CONTROL(dSize == 0); + } free(wksp); } fprintf(stderr, "Ok\n"); diff --git a/contrib/linux-kernel/test/test.c b/contrib/linux-kernel/test/test.c index 9064be793..6cd1730bb 100644 --- a/contrib/linux-kernel/test/test.c +++ b/contrib/linux-kernel/test/test.c @@ -30,15 +30,15 @@ typedef struct { size_t compSize; } test_data_t; -test_data_t create_test_data(void) { +static test_data_t create_test_data(void) { test_data_t data; data.dataSize = 128 * 1024; - data.data = malloc(data.dataSize); + data.data = (char*)malloc(data.dataSize); CONTROL(data.data != NULL); - data.data2 = malloc(data.dataSize); + data.data2 = (char*)malloc(data.dataSize); CONTROL(data.data2 != NULL); data.compSize = zstd_compress_bound(data.dataSize); - data.comp = malloc(data.compSize); + data.comp = (char*)malloc(data.compSize); CONTROL(data.comp != NULL); memset(data.data, 0, data.dataSize); return data; @@ -54,26 +54,27 @@ static void free_test_data(test_data_t const *data) { #define MAX(a, b) ((a) > (b) ? (a) : (b)) static void test_btrfs(test_data_t const *data) { - fprintf(stderr, "testing btrfs use cases... "); size_t const size = MIN(data->dataSize, 128 * 1024); + fprintf(stderr, "testing btrfs use cases... "); for (int level = -1; level < 16; ++level) { zstd_parameters params = zstd_get_params(level, size); - CONTROL(params.cParams.windowLog <= 17); size_t const workspaceSize = MAX(zstd_cstream_workspace_bound(¶ms.cParams), zstd_dstream_workspace_bound(size)); void *workspace = malloc(workspaceSize); - CONTROL(workspace != NULL); char const *ip = data->data; char const *iend = ip + size; char *op = data->comp; char *oend = op + data->compSize; + + CONTROL(params.cParams.windowLog <= 17); + CONTROL(workspace != NULL); { zstd_cstream *cctx = zstd_init_cstream(¶ms, size, workspace, workspaceSize); - CONTROL(cctx != NULL); zstd_out_buffer out = {NULL, 0, 0}; zstd_in_buffer in = {NULL, 0, 0}; + CONTROL(cctx != NULL); for (;;) { if (in.pos == in.size) { in.src = ip; @@ -108,9 +109,9 @@ static void test_btrfs(test_data_t const *data) { oend = op + size; { zstd_dstream *dctx = zstd_init_dstream(1ULL << params.cParams.windowLog, workspace, workspaceSize); - CONTROL(dctx != NULL); zstd_out_buffer out = {NULL, 0, 0}; zstd_in_buffer in = {NULL, 0, 0}; + CONTROL(dctx != NULL); for (;;) { if (in.pos == in.size) { in.src = ip; @@ -125,15 +126,16 @@ static void test_btrfs(test_data_t const *data) { out.pos = 0; op += out.size; } - - size_t const ret = zstd_decompress_stream(dctx, &out, &in); - CONTROL(!zstd_is_error(ret)); - if (ret == 0) { - break; + { + size_t const ret = zstd_decompress_stream(dctx, &out, &in); + CONTROL(!zstd_is_error(ret)); + if (ret == 0) { + break; + } } } } - CONTROL(op - data->data2 == data->dataSize); + CONTROL((size_t)(op - data->data2) == data->dataSize); CONTROL(!memcmp(data->data, data->data2, data->dataSize)); free(workspace); } @@ -141,14 +143,14 @@ static void test_btrfs(test_data_t const *data) { } static void test_decompress_unzstd(test_data_t const *data) { - fprintf(stderr, "Testing decompress unzstd... "); size_t cSize; + fprintf(stderr, "Testing decompress unzstd... "); { zstd_parameters params = zstd_get_params(19, 0); size_t const wkspSize = zstd_cctx_workspace_bound(¶ms.cParams); void* wksp = malloc(wkspSize); - CONTROL(wksp != NULL); zstd_cctx* cctx = zstd_init_cctx(wksp, wkspSize); + CONTROL(wksp != NULL); CONTROL(cctx != NULL); cSize = zstd_compress_cctx(cctx, data->comp, data->compSize, data->data, data->dataSize, ¶ms); CONTROL(!zstd_is_error(cSize)); @@ -157,19 +159,21 @@ static void test_decompress_unzstd(test_data_t const *data) { { size_t const wkspSize = zstd_dctx_workspace_bound(); void* wksp = malloc(wkspSize); - CONTROL(wksp != NULL); zstd_dctx* dctx = zstd_init_dctx(wksp, wkspSize); + CONTROL(wksp != NULL); CONTROL(dctx != NULL); - size_t const dSize = zstd_decompress_dctx(dctx, data->data2, data->dataSize, data->comp, cSize); - CONTROL(!zstd_is_error(dSize)); - CONTROL(dSize == data->dataSize); + { + size_t const dSize = zstd_decompress_dctx(dctx, data->data2, data->dataSize, data->comp, cSize); + CONTROL(!zstd_is_error(dSize)); + CONTROL(dSize == data->dataSize); + } CONTROL(!memcmp(data->data, data->data2, data->dataSize)); free(wksp); } fprintf(stderr, "Ok\n"); } -static void test_f2fs() { +static void test_f2fs(void) { fprintf(stderr, "testing f2fs uses... "); CONTROL(zstd_min_clevel() < 0); CONTROL(zstd_max_clevel() == 22); @@ -182,7 +186,7 @@ static void __attribute__((noinline)) use(void *x) { asm volatile("" : "+r"(x)); } -static void __attribute__((noinline)) set_stack() { +static void __attribute__((noinline)) set_stack(void) { char stack[8192]; g_stack = stack; @@ -190,14 +194,16 @@ static void __attribute__((noinline)) set_stack() { use(g_stack); } -static void __attribute__((noinline)) check_stack() { +static void __attribute__((noinline)) check_stack(void) { size_t cleanStack = 0; while (cleanStack < 8192 && g_stack[cleanStack] == 0x33) { ++cleanStack; } - size_t const stackSize = 8192 - cleanStack; - fprintf(stderr, "Maximum stack size: %zu\n", stackSize); - CONTROL(stackSize <= 2048 + 512); + { + size_t const stackSize = 8192 - cleanStack; + fprintf(stderr, "Maximum stack size: %zu\n", stackSize); + CONTROL(stackSize <= 2048 + 512); + } } static void test_stack_usage(test_data_t const *data) { diff --git a/contrib/linux-kernel/zstd_compress_module.c b/contrib/linux-kernel/zstd_compress_module.c index 37d08ff43..65548a4bb 100644 --- a/contrib/linux-kernel/zstd_compress_module.c +++ b/contrib/linux-kernel/zstd_compress_module.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * Copyright (c) Facebook, Inc. * All rights reserved. @@ -17,6 +17,43 @@ #include "common/zstd_deps.h" #include "common/zstd_internal.h" +#define ZSTD_FORWARD_IF_ERR(ret) \ + do { \ + size_t const __ret = (ret); \ + if (ZSTD_isError(__ret)) \ + return __ret; \ + } while (0) + +static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters, + unsigned long long pledged_src_size) +{ + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset( + cctx, ZSTD_reset_session_and_parameters)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize( + cctx, pledged_src_size)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_windowLog, parameters->cParams.windowLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_hashLog, parameters->cParams.hashLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_chainLog, parameters->cParams.chainLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_searchLog, parameters->cParams.searchLog)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_minMatch, parameters->cParams.minMatch)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_targetLength, parameters->cParams.targetLength)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_strategy, parameters->cParams.strategy)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag)); + ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter( + cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag)); + return 0; +} + int zstd_min_clevel(void) { return ZSTD_minCLevel(); @@ -59,7 +96,8 @@ EXPORT_SYMBOL(zstd_init_cctx); size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, const void *src, size_t src_size, const zstd_parameters *parameters) { - return ZSTD_compress_advanced(cctx, dst, dst_capacity, src, src_size, NULL, 0, *parameters); + ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size)); + return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size); } EXPORT_SYMBOL(zstd_compress_cctx); @@ -73,7 +111,6 @@ zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters, unsigned long long pledged_src_size, void *workspace, size_t workspace_size) { zstd_cstream *cstream; - size_t ret; if (workspace == NULL) return NULL; @@ -86,8 +123,7 @@ zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters, if (pledged_src_size == 0) pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN; - ret = ZSTD_initCStream_advanced(cstream, NULL, 0, *parameters, pledged_src_size); - if (ZSTD_isError(ret)) + if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size))) return NULL; return cstream; diff --git a/contrib/linux-kernel/zstd_decompress_module.c b/contrib/linux-kernel/zstd_decompress_module.c index 15005cdb9..f4ed952ed 100644 --- a/contrib/linux-kernel/zstd_decompress_module.c +++ b/contrib/linux-kernel/zstd_decompress_module.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * Copyright (c) Facebook, Inc. * All rights reserved. diff --git a/contrib/linux-kernel/zstd_deps.h b/contrib/linux-kernel/zstd_deps.h index 853b72426..7a5bf4483 100644 --- a/contrib/linux-kernel/zstd_deps.h +++ b/contrib/linux-kernel/zstd_deps.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Facebook, Inc. * All rights reserved. diff --git a/contrib/pzstd/Makefile b/contrib/pzstd/Makefile index 25265e79a..3d930cca9 100644 --- a/contrib/pzstd/Makefile +++ b/contrib/pzstd/Makefile @@ -57,19 +57,6 @@ LD_COMMAND = $(CXX) $^ $(ALL_LDFLAGS) $(LIBS) -pthread -o $@ CC_COMMAND = $(CC) $(DEPFLAGS) $(ALL_CFLAGS) -c $< -o $@ CXX_COMMAND = $(CXX) $(DEPFLAGS) $(ALL_CXXFLAGS) -c $< -o $@ -# Get a list of all zstd files so we rebuild the static library when we need to -ZSTDCOMMON_FILES := $(wildcard $(ZSTDDIR)/common/*.c) \ - $(wildcard $(ZSTDDIR)/common/*.h) -ZSTDCOMP_FILES := $(wildcard $(ZSTDDIR)/compress/*.c) \ - $(wildcard $(ZSTDDIR)/compress/*.h) -ZSTDDECOMP_FILES := $(wildcard $(ZSTDDIR)/decompress/*.c) \ - $(wildcard $(ZSTDDIR)/decompress/*.h) -ZSTDPROG_FILES := $(wildcard $(PROGDIR)/*.c) \ - $(wildcard $(PROGDIR)/*.h) -ZSTD_FILES := $(wildcard $(ZSTDDIR)/*.h) \ - $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) \ - $(ZSTDPROG_FILES) - # List all the pzstd source files so we can determine their dependencies PZSTD_SRCS := $(wildcard *.cpp) PZSTD_TESTS := $(wildcard test/*.cpp) @@ -189,7 +176,8 @@ roundtrip: test/RoundTripTest$(EXT) # Use the static library that zstd builds for simplicity and # so we get the compiler options correct -$(ZSTDDIR)/libzstd.a: $(ZSTD_FILES) +.PHONY: $(ZSTDDIR)/libzstd.a +$(ZSTDDIR)/libzstd.a: CFLAGS="$(ALL_CFLAGS)" LDFLAGS="$(ALL_LDFLAGS)" $(MAKE) -C $(ZSTDDIR) libzstd.a # Rules to build the tests diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp index 37292221b..90f9d571f 100644 --- a/contrib/pzstd/Options.cpp +++ b/contrib/pzstd/Options.cpp @@ -87,7 +87,7 @@ void usage() { std::fprintf(stderr, " -V, --version : display version number and exit\n"); std::fprintf(stderr, " -v, --verbose : verbose mode; specify multiple times to increase log level (default:2)\n"); std::fprintf(stderr, " -q, --quiet : suppress warnings; specify twice to suppress errors too\n"); - std::fprintf(stderr, " -c, --stdout : force write to standard output, even if it is the console\n"); + std::fprintf(stderr, " -c, --stdout : write to standard output (even if it is the console)\n"); #ifdef UTIL_HAS_CREATEFILELIST std::fprintf(stderr, " -r : operate recursively on directories\n"); #endif diff --git a/contrib/pzstd/utils/Buffer.h b/contrib/pzstd/utils/Buffer.h index f69c3b4d9..d17ad2f2c 100644 --- a/contrib/pzstd/utils/Buffer.h +++ b/contrib/pzstd/utils/Buffer.h @@ -45,7 +45,7 @@ class Buffer { : buffer_(buffer), range_(data) {} Buffer(Buffer&&) = default; - Buffer& operator=(Buffer&&) & = default; + Buffer& operator=(Buffer&&) = default; /** * Splits the data into two pieces: [begin, begin + n), [begin + n, end). diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h index fedb5d786..6a850ad4e 100644 --- a/contrib/pzstd/utils/Range.h +++ b/contrib/pzstd/utils/Range.h @@ -6,7 +6,7 @@ * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). */ - + /** * A subset of `folly/Range.h`. * All code copied verbatim modulo formatting @@ -83,8 +83,8 @@ class Range { Range(const Range&) = default; Range(Range&&) = default; - Range& operator=(const Range&) & = default; - Range& operator=(Range&&) & = default; + Range& operator=(const Range&) = default; + Range& operator=(Range&&) = default; constexpr size_type size() const { return e_ - b_; diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c index 62e6f0dd4..936407086 100644 --- a/doc/educational_decoder/zstd_decompress.c +++ b/doc/educational_decoder/zstd_decompress.c @@ -2145,7 +2145,7 @@ static void FSE_init_dtable(FSE_dtable *const dtable, // "All remaining symbols are sorted in their natural order. Starting from // symbol 0 and table position 0, each symbol gets attributed as many cells - // as its probability. Cell allocation is spreaded, not linear." + // as its probability. Cell allocation is spread, not linear." // Place the rest in the table const u16 step = (size >> 1) + (size >> 3) + 3; const u16 mask = size - 1; diff --git a/doc/zstd_compression_format.md b/doc/zstd_compression_format.md index 5c7deb9c4..fc09bd553 100644 --- a/doc/zstd_compression_format.md +++ b/doc/zstd_compression_format.md @@ -1124,7 +1124,7 @@ These symbols define a full state reset, reading `Accuracy_Log` bits. Then, all remaining symbols, sorted in natural order, are allocated cells. Starting from symbol `0` (if it exists), and table position `0`, each symbol gets allocated as many cells as its probability. -Cell allocation is spreaded, not linear : +Cell allocation is spread, not linear : each successor position follows this rule : ``` @@ -1669,7 +1669,7 @@ or at least provide a meaningful error code explaining for which reason it canno Version changes --------------- -- 0.3.7 : clarifications for Repeat_Offsets +- 0.3.7 : clarifications for Repeat_Offsets, matching RFC8878 - 0.3.6 : clarifications for Dictionary_ID - 0.3.5 : clarifications for Block_Maximum_Size - 0.3.4 : clarifications for FSE decoding table diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html index 010f10a01..7d2e1c6ca 100644 --- a/doc/zstd_manual.html +++ b/doc/zstd_manual.html @@ -1,10 +1,10 @@ -zstd 1.5.0 Manual +zstd 1.5.1 Manual -

zstd 1.5.0 Manual

+

zstd 1.5.1 Manual


Contents

    @@ -40,7 +40,7 @@ functions. The library supports regular compression levels from 1 up to ZSTD_maxCLevel(), - which is currently 22. Levels >= 20, labeled `--ultra`, should be used with + which is 22 in most cases. Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory. The library also offers negative compression levels, which extend the range of speed vs. ratio preferences. The lower the level, the faster the speed (at the cost of compression). @@ -357,7 +357,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */ * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences - * ZSTD_c_splitBlocks + * ZSTD_c_useBlockSplitter * ZSTD_c_useRowMatchFinder * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; @@ -803,7 +803,7 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */

    Advanced dictionary and prefix API (Requires v1.4.0+)

      This API allows dictionaries to be used with ZSTD_compress2(),
    - ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
    + ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
      only reset with the context is reset with ZSTD_reset_parameters or
      ZSTD_reset_session_and_parameters. Prefixes are single-use.
     
    @@ -1072,10 +1072,14 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); } ZSTD_literalCompressionMode_e;

    typedef enum {
    -  ZSTD_urm_auto = 0,                   /* Automatically determine whether or not we use row matchfinder */
    -  ZSTD_urm_disableRowMatchFinder = 1,  /* Never use row matchfinder */
    -  ZSTD_urm_enableRowMatchFinder = 2    /* Always use row matchfinder when applicable */
    -} ZSTD_useRowMatchFinderMode_e;
    +  /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
    +   * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
    +   * or ZSTD_ps_disable allow for a force enable/disable the feature.
    +   */
    +  ZSTD_ps_auto = 0,         /* Let the library automatically determine whether the feature shall be enabled */
    +  ZSTD_ps_enable = 1,       /* Force-enable the feature */
    +  ZSTD_ps_disable = 2       /* Do not use the feature */
    +} ZSTD_paramSwitch_e;
     

    Frame size functions

    
     
    @@ -1205,6 +1209,25 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
      
     


    +
    size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
    +                                const void* src, size_t srcSize);
    +

    Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + + The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + in the magicVariant. + + Returns an error if destination buffer is not large enough, or if the frame is not skippable. + + @return : number of bytes written or a ZSTD error. + +


    + +
    unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
    +

    Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + +


    +

    Memory management

    
     
     
    size_t ZSTD_estimateCCtxSize(int compressionLevel);
    @@ -1303,6 +1326,21 @@ ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this con
      
     


    +
    typedef struct POOL_ctx_s ZSTD_threadPool;
    +ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
    +void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
    +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
    +

    These prototypes make it possible to share a thread pool among multiple compression contexts. + This can limit resources for applications with multiple threads where each one uses + a threaded compression mode (via ZSTD_c_nbWorkers parameter). + ZSTD_createThreadPool creates a new thread pool with a given number of threads. + Note that the lifetime of such pool must exist while being used. + ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + to use an internal thread pool). + ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. + +


    +

    Advanced compression functions

    
     
     
    ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
    @@ -1594,7 +1632,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
     

    This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, cdict); - + note : cdict will just be referenced, and must outlive compression session This prototype will generate compilation warnings. diff --git a/examples/streaming_compression.c b/examples/streaming_compression.c index e20bcde99..87cb887de 100644 --- a/examples/streaming_compression.c +++ b/examples/streaming_compression.c @@ -15,9 +15,12 @@ #include // presumes zstd library is installed #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() - -static void compressFile_orDie(const char* fname, const char* outName, int cLevel) +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, + int nbThreads) { + fprintf (stderr, "Starting compression of %s with level %d, using %d threads\n", + fname, cLevel, nbThreads); + /* Open the input and output files. */ FILE* const fin = fopen_orDie(fname, "rb"); FILE* const fout = fopen_orDie(outName, "wb"); @@ -39,7 +42,7 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve */ CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel) ); CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) ); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 4); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); /* This loop read from the input file, compresses that entire chunk, * and writes all output produced to the output file. @@ -106,17 +109,30 @@ int main(int argc, const char** argv) { const char* const exeName = argv[0]; - if (argc!=2) { + if (argc < 2) { printf("wrong arguments\n"); printf("usage:\n"); - printf("%s FILE\n", exeName); + printf("%s FILE [LEVEL] [THREADS]\n", exeName); return 1; } + int cLevel = 1; + int nbThreads = 4; + + if (argc >= 3) { + cLevel = atoi (argv[2]); + CHECK(cLevel != 0, "can't parse LEVEL!"); + } + + if (argc >= 4) { + nbThreads = atoi (argv[3]); + CHECK(nbThreads != 0, "can't parse THREADS!"); + } + const char* const inFilename = argv[1]; char* const outFilename = createOutFilename_orDie(inFilename); - compressFile_orDie(inFilename, outFilename, 1); + compressFile_orDie(inFilename, outFilename, cLevel, nbThreads); free(outFilename); /* not strictly required, since program execution stops there, * but some static analyzer main complain otherwise */ diff --git a/examples/streaming_compression_thread_pool.c b/examples/streaming_compression_thread_pool.c index 5a6551baa..471ca8631 100644 --- a/examples/streaming_compression_thread_pool.c +++ b/examples/streaming_compression_thread_pool.c @@ -28,8 +28,10 @@ typedef struct compress_args static void *compressFile_orDie(void *data) { + const int nbThreads = 16; + compress_args_t *args = (compress_args_t *)data; - fprintf (stderr, "Starting compression of %s with level %d\n", args->fname, args->cLevel); + fprintf (stderr, "Starting compression of %s with level %d, using %d threads\n", args->fname, args->cLevel, nbThreads); /* Open the input and output files. */ FILE* const fin = fopen_orDie(args->fname, "rb"); FILE* const fout = fopen_orDie(args->outName, "wb"); @@ -56,7 +58,7 @@ static void *compressFile_orDie(void *data) */ CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, args->cLevel) ); CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) ); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 16); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); /* This loop read from the input file, compresses that entire chunk, * and writes all output produced to the output file. diff --git a/lib/Makefile b/lib/Makefile index e258dea8a..934c2b681 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -100,18 +100,7 @@ ZSTD_LIB_DECOMPRESSION ?= 1 ZSTD_LIB_DICTBUILDER ?= 1 ZSTD_LIB_DEPRECATED ?= 0 -# Legacy support -ZSTD_LEGACY_SUPPORT ?= 5 -ZSTD_LEGACY_MULTITHREADED_API ?= 0 - -# Build size optimizations -HUF_FORCE_DECOMPRESS_X1 ?= 0 -HUF_FORCE_DECOMPRESS_X2 ?= 0 -ZSTD_FORCE_DECOMPRESS_SHORT ?= 0 -ZSTD_FORCE_DECOMPRESS_LONG ?= 0 -ZSTD_NO_INLINE ?= 0 -ZSTD_STRIP_ERROR_STRINGS ?= 0 - +# Input variables for libzstd.mk ifeq ($(ZSTD_LIB_COMPRESSION), 0) ZSTD_LIB_DICTBUILDER = 0 ZSTD_LIB_DEPRECATED = 0 @@ -122,86 +111,46 @@ ifeq ($(ZSTD_LIB_DECOMPRESSION), 0) ZSTD_LIB_DEPRECATED = 0 endif +include libzstd.mk + +ZSTD_FILES := $(ZSTD_COMMON_FILES) $(ZSTD_LEGACY_FILES) + ifneq ($(ZSTD_LIB_COMPRESSION), 0) - ZSTD_FILES += $(ZSTDCOMP_FILES) + ZSTD_FILES += $(ZSTD_COMPRESS_FILES) endif ifneq ($(ZSTD_LIB_DECOMPRESSION), 0) - ZSTD_FILES += $(ZSTDDECOMP_FILES) + ZSTD_FILES += $(ZSTD_DECOMPRESS_FILES) endif ifneq ($(ZSTD_LIB_DEPRECATED), 0) - ZSTD_FILES += $(ZDEPR_FILES) + ZSTD_FILES += $(ZSTD_DEPRECATED_FILES) endif ifneq ($(ZSTD_LIB_DICTBUILDER), 0) - ZSTD_FILES += $(ZDICT_FILES) + ZSTD_FILES += $(ZSTD_DICTBUILDER_FILES) endif -ifneq ($(HUF_FORCE_DECOMPRESS_X1), 0) - CFLAGS += -DHUF_FORCE_DECOMPRESS_X1 -endif - -ifneq ($(HUF_FORCE_DECOMPRESS_X2), 0) - CFLAGS += -DHUF_FORCE_DECOMPRESS_X2 -endif - -ifneq ($(ZSTD_FORCE_DECOMPRESS_SHORT), 0) - CFLAGS += -DZSTD_FORCE_DECOMPRESS_SHORT -endif - -ifneq ($(ZSTD_FORCE_DECOMPRESS_LONG), 0) - CFLAGS += -DZSTD_FORCE_DECOMPRESS_LONG -endif - -ifneq ($(ZSTD_NO_INLINE), 0) - CFLAGS += -DZSTD_NO_INLINE -endif - -ifneq ($(ZSTD_STRIP_ERROR_STRINGS), 0) - CFLAGS += -DZSTD_STRIP_ERROR_STRINGS -endif - -ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0) - CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API -endif - -ifneq ($(ZSTD_LEGACY_SUPPORT), 0) -ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) - ZSTD_FILES += $(shell ls legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') -endif -endif -CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) - ZSTD_LOCAL_SRC := $(notdir $(ZSTD_FILES)) -ZSTD_LOCAL_OBJ := $(ZSTD_LOCAL_SRC:.c=.o) +ZSTD_LOCAL_OBJ0 := $(ZSTD_LOCAL_SRC:.c=.o) +ZSTD_LOCAL_OBJ := $(ZSTD_LOCAL_OBJ0:.S=.o) -ZSTD_SUBDIR := common compress decompress dictBuilder legacy deprecated -vpath %.c $(ZSTD_SUBDIR) +VERSION := $(ZSTD_VERSION) -UNAME := $(shell uname) +# Note: by default, the static library is built single-threaded and dynamic library is built +# multi-threaded. It is possible to force multi or single threaded builds by appending +# -mt or -nomt to the build target (like lib-mt for multi-threaded, lib-nomt for single-threaded). +.PHONY: default +default: lib-release -ifndef BUILD_DIR -ifeq ($(UNAME), Darwin) - ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0) - HASH ?= md5 - endif -else ifeq ($(UNAME), FreeBSD) - HASH ?= gmd5sum -else ifeq ($(UNAME), NetBSD) - HASH ?= md5 -n -else ifeq ($(UNAME), OpenBSD) - HASH ?= md5 +CPPFLAGS_DYNLIB = -DZSTD_MULTITHREAD # dynamic library build defaults to multi-threaded +LDFLAGS_DYNLIB = -pthread +CPPFLAGS_STATLIB = # static library build defaults to single-threaded + + +ifeq ($(findstring GCC,$(CCVER)),GCC) +decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize endif -HASH ?= md5sum - -HASH_DIR = conf_$(shell echo $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(ZSTD_FILES) | $(HASH) | cut -f 1 -d " " ) -HAVE_HASH :=$(shell echo 1 | $(HASH) > /dev/null && echo 1 || echo 0) -ifeq ($(HAVE_HASH),0) - $(info warning : could not find HASH ($(HASH)), needed to differentiate builds using different flags) - BUILD_DIR := obj/generic_noconf -endif -endif # BUILD_DIR # macOS linker doesn't support -soname, and use different extension @@ -212,19 +161,16 @@ ifeq ($(UNAME), Darwin) SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) else - SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) + ifeq ($(UNAME), AIX) + SONAME_FLAGS = + else + SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) + endif SHARED_EXT = so SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) endif -SET_CACHE_DIRECTORY = \ - +$(MAKE) --no-print-directory $@ \ - BUILD_DIR=obj/$(HASH_DIR) \ - CPPFLAGS="$(CPPFLAGS)" \ - CFLAGS="$(CFLAGS)" \ - LDFLAGS="$(LDFLAGS)" - .PHONY: all all: lib @@ -233,6 +179,13 @@ all: lib .PHONY: libzstd.a # must be run every time libzstd.a: CPPFLAGS += $(CPPFLAGS_STATLIB) +SET_CACHE_DIRECTORY = \ + +$(MAKE) --no-print-directory $@ \ + BUILD_DIR=obj/$(HASH_DIR) \ + CPPFLAGS="$(CPPFLAGS)" \ + CFLAGS="$(CFLAGS)" \ + LDFLAGS="$(LDFLAGS)" + ifndef BUILD_DIR # determine BUILD_DIR from compilation flags @@ -343,6 +296,14 @@ $(ZSTD_STATLIB_DIR)/%.o : %.c $(ZSTD_STATLIB_DIR)/%.d | $(ZSTD_STATLIB_DIR) @echo CC $@ $(COMPILE.c) $(DEPFLAGS) $(ZSTD_STATLIB_DIR)/$*.d $(OUTPUT_OPTION) $< +$(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR) + @echo AS $@ + $(COMPILE.c) $(OUTPUT_OPTION) $< + +$(ZSTD_STATLIB_DIR)/%.o : %.S | $(ZSTD_STATLIB_DIR) + @echo AS $@ + $(COMPILE.c) $(OUTPUT_OPTION) $< + MKDIR ?= mkdir $(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATLIB_DIR): $(MKDIR) -p $@ @@ -374,7 +335,7 @@ clean: #----------------------------------------------------------------------------- # make install is validated only for below listed environments #----------------------------------------------------------------------------- -ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku)) +ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX)) all: libzstd.pc diff --git a/lib/README.md b/lib/README.md index f781ac57e..4c9d8f059 100644 --- a/lib/README.md +++ b/lib/README.md @@ -125,7 +125,7 @@ The file structure is designed to make this selection manually achievable for an `ZSTD_getErrorName` (implied by `ZSTD_LIB_MINIFY`). Finally, when integrating into your application, make sure you're doing link- - time optimation and unused symbol garbage collection (via some combination of, + time optimization and unused symbol garbage collection (via some combination of, e.g., `-flto`, `-ffat-lto-objects`, `-fuse-linker-plugin`, `-ffunction-sections`, `-fdata-sections`, `-fmerge-all-constants`, `-Wl,--gc-sections`, `-Wl,-z,norelro`, and an archiver that understands @@ -155,6 +155,12 @@ The file structure is designed to make this selection manually achievable for an - The build macro `ZSTD_NO_INTRINSICS` can be defined to disable all explicit intrinsics. Compiler builtins are still used. +- The build macro `ZSTD_DECODER_INTERNAL_BUFFER` can be set to control + the amount of extra memory used during decompression to store literals. + This defaults to 64kB. Reducing this value reduces the memory footprint of + `ZSTD_DCtx` decompression contexts, + but might also result in a small decompression speed cost. + #### Windows : using MinGW+MSYS to create DLL diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h index 2e5a933ad..84b6062ff 100644 --- a/lib/common/bitstream.h +++ b/lib/common/bitstream.h @@ -143,10 +143,16 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 - return _lzcnt_u32(val) ^ 31; + return _lzcnt_u32(val) ^ 31; # else - unsigned long r = 0; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; @@ -293,22 +299,22 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); - /* fall-through */ + ZSTD_FALLTHROUGH; case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); - /* fall-through */ + ZSTD_FALLTHROUGH; case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); - /* fall-through */ + ZSTD_FALLTHROUGH; case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; - /* fall-through */ + ZSTD_FALLTHROUGH; case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; - /* fall-through */ + ZSTD_FALLTHROUGH; case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; - /* fall-through */ + ZSTD_FALLTHROUGH; default: break; } @@ -332,7 +338,16 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ assert(nbBits < BIT_MASK_SIZE); + /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better + * than accessing memory. When bmi2 instruction is not present, we consider + * such cpus old (pre-Haswell, 2013) and their performance is not of that + * importance. + */ +#if defined(__x86_64__) || defined(_M_X86) + return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1); +#else return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; +#endif } MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) diff --git a/lib/common/compiler.h b/lib/common/compiler.h index a951d0ade..ea5fe2f47 100644 --- a/lib/common/compiler.h +++ b/lib/common/compiler.h @@ -40,7 +40,7 @@ /** On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). - This explictly marks such functions as __cdecl so that the code will still compile + This explicitly marks such functions as __cdecl so that the code will still compile if a CC other than __cdecl has been made the default. */ #if defined(_MSC_VER) @@ -101,6 +101,13 @@ # define TARGET_ATTRIBUTE(target) #endif +/* Target attribute for BMI2 dynamic dispatch. + * Enable lzcnt, bmi, and bmi2. + * We test for bmi1 & bmi2. lzcnt is included in bmi1. + */ +#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") + + /* Enable runtime BMI2 dispatch based on the CPU. * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ @@ -108,7 +115,7 @@ #if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ - && (defined(__x86_64__) || defined(_M_X86)) \ + && (defined(__x86_64__) || defined(_M_X64)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 #else @@ -150,8 +157,9 @@ } /* vectorization - * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */ -#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, + * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */ +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__) # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) # else @@ -197,6 +205,22 @@ #define STATIC_BMI2 0 #endif +/* compile time determination of SIMD support */ +#if !defined(ZSTD_NO_INTRINSICS) +# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) +# define ZSTD_ARCH_X86_SSE2 +# endif +# if defined(__ARM_NEON) || defined(_M_ARM64) +# define ZSTD_ARCH_ARM_NEON +# endif +# +# if defined(ZSTD_ARCH_X86_SSE2) +# include +# elif defined(ZSTD_ARCH_ARM_NEON) +# include +# endif +#endif + /* compat. with non-clang compilers */ #ifndef __has_builtin # define __has_builtin(x) 0 @@ -207,6 +231,39 @@ # define __has_feature(x) 0 #endif +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define ZSTD_HAS_C_ATTRIBUTE(x) 0 +#endif + +/* Only use C++ attributes in C++. Some compilers report support for C++ + * attributes when compiling with C. + */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute. + * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough + * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * - Else: __attribute__((__fallthrough__)) + */ +#ifndef ZSTD_FALLTHROUGH +# if ZSTD_HAS_C_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif __has_attribute(__fallthrough__) +# define ZSTD_FALLTHROUGH __attribute__((__fallthrough__)) +# else +# define ZSTD_FALLTHROUGH +# endif +#endif + /* detects whether we are being compiled under msan */ #ifndef ZSTD_MEMORY_SANITIZER # if __has_feature(memory_sanitizer) @@ -216,6 +273,15 @@ # endif #endif +/* detects whether we are being compiled undef dfsan */ +#ifndef ZSTD_DATAFLOW_SANITIZER +# if __has_feature(dataflow_sanitizer) +# define ZSTD_DATAFLOW_SANITIZER 1 +# else +# define ZSTD_DATAFLOW_SANITIZER 0 +# endif +#endif + #if ZSTD_MEMORY_SANITIZER /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to diff --git a/lib/common/entropy_common.c b/lib/common/entropy_common.c index 41cd69566..4229b40c5 100644 --- a/lib/common/entropy_common.c +++ b/lib/common/entropy_common.c @@ -43,8 +43,14 @@ static U32 FSE_ctz(U32 val) assert(val != 0); { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - return _BitScanForward(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_ctz(val); # elif defined(__ICCARM__) /* IAR Intrinsic */ @@ -217,7 +223,7 @@ static size_t FSE_readNCount_body_default( } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2( +BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { @@ -299,7 +305,7 @@ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats, ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } @@ -337,7 +343,7 @@ static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* r } #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, +static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) diff --git a/lib/common/error_private.h b/lib/common/error_private.h index 6d8b9f776..007d81066 100644 --- a/lib/common/error_private.h +++ b/lib/common/error_private.h @@ -22,6 +22,8 @@ extern "C" { * Dependencies ******************************************/ #include "../zstd_errors.h" /* enum list */ +#include "compiler.h" +#include "debug.h" #include "zstd_deps.h" /* size_t */ @@ -73,6 +75,83 @@ ERR_STATIC const char* ERR_getErrorName(size_t code) return ERR_getErrorString(ERR_getErrorCode(code)); } +/** + * Ignore: this is an internal helper. + * + * This is a helper function to help force C99-correctness during compilation. + * Under strict compilation modes, variadic macro arguments can't be empty. + * However, variadic function arguments can be. Using a function therefore lets + * us statically check that at least one (string) argument was passed, + * independent of the compilation flags. + */ +static INLINE_KEYWORD UNUSED_ATTR +void _force_has_format_string(const char *format, ...) { + (void)format; +} + +/** + * Ignore: this is an internal helper. + * + * We want to force this function invocation to be syntactically correct, but + * we don't want to force runtime evaluation of its arguments. + */ +#define _FORCE_HAS_FORMAT_STRING(...) \ + if (0) { \ + _force_has_format_string(__VA_ARGS__); \ + } + +#define ERR_QUOTE(str) #str + +/** + * Return the specified error if the condition evaluates to true. + * + * In debug modes, prints additional information. + * In order to do that (particularly, printing the conditional that failed), + * this can't just wrap RETURN_ERROR(). + */ +#define RETURN_ERROR_IF(cond, err, ...) \ + if (cond) { \ + RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } + +/** + * Unconditionally return the specified error. + * + * In debug modes, prints additional information. + */ +#define RETURN_ERROR(err, ...) \ + do { \ + RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } while(0); + +/** + * If the provided expression evaluates to an error code, returns that error code. + * + * In debug modes, prints additional information. + */ +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ + if (ERR_isError(err_code)) { \ + RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ + __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return err_code; \ + } \ + } while(0); + #if defined (__cplusplus) } #endif diff --git a/lib/common/fse.h b/lib/common/fse.h index 19dd4febc..714bfd3e7 100644 --- a/lib/common/fse.h +++ b/lib/common/fse.h @@ -336,8 +336,9 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`. + * See FSE_buildCTable_wksp() for breakdown of workspace usage. */ -#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2))) +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */) #define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); diff --git a/lib/common/fse_decompress.c b/lib/common/fse_decompress.c index f4ff58fa0..a5a358015 100644 --- a/lib/common/fse_decompress.c +++ b/lib/common/fse_decompress.c @@ -365,7 +365,7 @@ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, co } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) +BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1); } diff --git a/lib/common/huf.h b/lib/common/huf.h index 3d47ced03..85518481e 100644 --- a/lib/common/huf.h +++ b/lib/common/huf.h @@ -89,9 +89,9 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, /** HUF_compress4X_wksp() : * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ -#define HUF_WORKSPACE_SIZE ((6 << 10) + 256) -#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) + * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ +#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) +#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, @@ -116,11 +116,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* *** Constants *** */ -#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ #define HUF_SYMBOLVALUE_MAX 255 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif @@ -136,15 +136,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* static allocation of HUF's Compression Table */ /* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */ -struct HUF_CElt_s { - U16 val; - BYTE nbBits; -}; /* typedef'd to HUF_CElt */ -typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */ -#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ -#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +typedef size_t HUF_CElt; /* consider it an incomplete type */ +#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ - HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */ + HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; @@ -194,6 +190,7 @@ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSym size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); @@ -206,12 +203,13 @@ typedef enum { * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. @@ -249,11 +247,10 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); -/** HUF_getNbBits() : +/** HUF_getNbBitsFromCTable() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : is not inlined, as HUF_CElt definition is private - * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); + * Note 1 : is not inlined, as HUF_CElt definition is private */ +U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); /* * HUF_decompress() does the following: @@ -305,18 +302,20 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c /* ====================== */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 @@ -354,6 +353,9 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); #endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +#endif #endif /* HUF_STATIC_LINKING_ONLY */ diff --git a/lib/common/mem.h b/lib/common/mem.h index 9f3b81ab9..1c61b7e52 100644 --- a/lib/common/mem.h +++ b/lib/common/mem.h @@ -153,8 +153,22 @@ MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { +#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + return 1; +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + return 0; +#elif defined(__clang__) && __LITTLE_ENDIAN__ + return 1; +#elif defined(__clang__) && __BIG_ENDIAN__ + return 0; +#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86) + return 1; +#elif defined(__DMC__) && defined(_M_IX86) + return 1; +#else const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; +#endif } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h index 68252e987..9073df422 100644 --- a/lib/common/zstd_internal.h +++ b/lib/common/zstd_internal.h @@ -19,10 +19,8 @@ /*-************************************* * Dependencies ***************************************/ -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) -#include -#endif #include "compiler.h" +#include "cpu.h" #include "mem.h" #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ #include "error_private.h" @@ -60,81 +58,7 @@ extern "C" { #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) - -/** - * Ignore: this is an internal helper. - * - * This is a helper function to help force C99-correctness during compilation. - * Under strict compilation modes, variadic macro arguments can't be empty. - * However, variadic function arguments can be. Using a function therefore lets - * us statically check that at least one (string) argument was passed, - * independent of the compilation flags. - */ -static INLINE_KEYWORD UNUSED_ATTR -void _force_has_format_string(const char *format, ...) { - (void)format; -} - -/** - * Ignore: this is an internal helper. - * - * We want to force this function invocation to be syntactically correct, but - * we don't want to force runtime evaluation of its arguments. - */ -#define _FORCE_HAS_FORMAT_STRING(...) \ - if (0) { \ - _force_has_format_string(__VA_ARGS__); \ - } - -/** - * Return the specified error if the condition evaluates to true. - * - * In debug modes, prints additional information. - * In order to do that (particularly, printing the conditional that failed), - * this can't just wrap RETURN_ERROR(). - */ -#define RETURN_ERROR_IF(cond, err, ...) \ - if (cond) { \ - RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } - -/** - * Unconditionally return the specified error. - * - * In debug modes, prints additional information. - */ -#define RETURN_ERROR(err, ...) \ - do { \ - RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } while(0); - -/** - * If the provided expression evaluates to an error code, returns that error code. - * - * In debug modes, prints additional information. - */ -#define FORWARD_IF_ERROR(err, ...) \ - do { \ - size_t const err_code = (err); \ - if (ERR_isError(err_code)) { \ - RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return err_code; \ - } \ - } while(0); +#define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) /*-************************************* @@ -247,19 +171,25 @@ static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; * Shared functions to include for inlining *********************************************/ static void ZSTD_copy8(void* dst, const void* src) { -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) +#if defined(ZSTD_ARCH_ARM_NEON) vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); #else ZSTD_memcpy(dst, src, 8); #endif } - #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } + +/* Need to use memmove here since the literal buffer can now be located within + the dst buffer. In circumstances where the op "catches up" to where the + literal buffer is, there can be partial overlaps in this call on the final + copy if the literal is being shifted by less than 16 bytes. */ static void ZSTD_copy16(void* dst, const void* src) { -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) +#if defined(ZSTD_ARCH_ARM_NEON) vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); +#elif defined(ZSTD_ARCH_X86_SSE2) + _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); #else - ZSTD_memcpy(dst, src, 16); + ZSTD_memmove(dst, src, 16); #endif } #define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; } @@ -288,8 +218,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e BYTE* op = (BYTE*)dst; BYTE* const oend = op + length; - assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN)); - if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { /* Handle short offset copies. */ do { @@ -436,8 +364,14 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus # if STATIC_BMI2 == 1 return _lzcnt_u32(val)^31; # else - unsigned long r=0; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; @@ -456,6 +390,63 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus } } +/** + * Counts the number of trailing zeros of a `size_t`. + * Most compilers should support CTZ as a builtin. A backup + * implementation is provided if the builtin isn't supported, but + * it may not be terribly efficient. + */ +MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val) +{ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return _tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, (U64)val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return __builtin_ctzll((U64)val); +# else + static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19, + 4, 25, 14, 28, 9, 34, 20, 56, + 5, 17, 26, 54, 15, 41, 29, 43, + 10, 31, 38, 35, 21, 45, 49, 57, + 63, 6, 12, 18, 24, 27, 33, 55, + 16, 53, 40, 42, 30, 37, 44, 48, + 62, 11, 23, 32, 52, 39, 36, 47, + 61, 22, 51, 46, 60, 50, 59, 58 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + if (val != 0) { + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return __builtin_ctz((U32)val); +# else + static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3, + 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, + 26, 12, 18, 6, 11, 5, 10, 9 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } +} + /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. @@ -482,6 +473,14 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); +/** + * @returns true iff the CPU supports dynamic BMI2 dispatch. + */ +MEM_STATIC int ZSTD_cpuSupportsBmi2(void) +{ + ZSTD_cpuid_t cpuid = ZSTD_cpuid(); + return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); +} #if defined (__cplusplus) } diff --git a/lib/common/zstd_trace.h b/lib/common/zstd_trace.h index 2da564077..f9121f7d8 100644 --- a/lib/common/zstd_trace.h +++ b/lib/common/zstd_trace.h @@ -17,10 +17,19 @@ extern "C" { #include -/* weak symbol support */ -#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \ +/* weak symbol support + * For now, enable conservatively: + * - Only GNUC + * - Only ELF + * - Only x86-64 and i386 + * Also, explicitly disable on platforms known not to work so they aren't + * forgotten in the future. + */ +#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ + defined(__GNUC__) && defined(__ELF__) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ - !defined(__CYGWIN__) + !defined(__CYGWIN__) && !defined(_AIX) # define ZSTD_HAVE_WEAK_SYMBOLS 1 #else # define ZSTD_HAVE_WEAK_SYMBOLS 0 diff --git a/lib/compress/clevels.h b/lib/compress/clevels.h new file mode 100644 index 000000000..38622f1a1 --- /dev/null +++ b/lib/compress/clevels.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CLEVELS_H +#define ZSTD_CLEVELS_H + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../zstd.h" + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_MAX_CLEVEL 22 +#define ZSTD_MAX_32BIT_CLEVEL 21 + +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" - for any srcSize > 256 KB */ + /* W, C, H, S, L, TL, strat */ + { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ + { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ + { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ + { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ + { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ + { 21, 18, 19, 4, 5, 2, ZSTD_greedy }, /* level 5 */ + { 21, 19, 20, 5, 5, 4, ZSTD_greedy }, /* level 6 */ + { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */ + { 21, 19, 20, 5, 5, 16, ZSTD_lazy }, /* level 8 */ + { 21, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 21, 22, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */ + { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */ + { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ + { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ + { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ + { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ + { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ + { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ + { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ + { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ + { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ + { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ + { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/ + { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/ + { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ + { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ + { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ + { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ + { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ + { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ + { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ + { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ + { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ + { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ + { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ + { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ + { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ + { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ + { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ + { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ + { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ + { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ + { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +}; + + + +#endif /* ZSTD_CLEVELS_H */ diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c index b4297ec88..5547b4ac0 100644 --- a/lib/compress/fse_compress.c +++ b/lib/compress/fse_compress.c @@ -75,13 +75,14 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); + U32 const maxSV1 = maxSymbolValue+1; - U32* cumul = (U32*)workSpace; - FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2)); + U16* cumul = (U16*)workSpace; /* size = maxSV1 */ + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */ U32 highThreshold = tableSize-1; - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */ + assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge); /* CTable header */ tableU16[-2] = (U16) tableLog; @@ -98,20 +99,61 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, /* symbol start positions */ { U32 u; cumul[0] = 0; - for (u=1; u <= maxSymbolValue+1; u++) { + for (u=1; u <= maxSV1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { - cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + assert(normalizedCounter[u-1] >= 0); + cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1]; + assert(cumul[u] >= cumul[u-1]); /* no overflow */ } } - cumul[maxSymbolValue+1] = tableSize+1; + cumul[maxSV1] = (U16)(tableSize+1); } /* Spread symbols */ - { U32 position = 0; + if (highThreshold == tableSize - 1) { + /* Case for no low prob count symbols. Lay down 8 bytes at a time + * to reduce branch misses since we are operating on a small block + */ + BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */ + { U64 const add = 0x0101010101010101ull; + size_t pos = 0; + U64 sv = 0; + U32 s; + for (s=0; s=0); + pos += (size_t)n; + } + } + /* Spread symbols across the table. Lack of lowprob symbols means that + * we don't need variable sized inner loop, so we can unroll the loop and + * reduce branch misses. + */ + { size_t position = 0; + size_t s; + size_t const unroll = 2; /* Experimentally determined optimal unroll */ + assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ + for (s = 0; s < (size_t)tableSize; s += unroll) { + size_t u; + for (u = 0; u < unroll; ++u) { + size_t const uPosition = (position + (u * step)) & tableMask; + tableSymbol[uPosition] = spread[s + u]; + } + position = (position + (unroll * step)) & tableMask; + } + assert(position == 0); /* Must have initialized all positions */ + } + } else { + U32 position = 0; U32 symbol; - for (symbol=0; symbol<=maxSymbolValue; symbol++) { + for (symbol=0; symbol highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } - assert(position==0); /* Must have initialized all positions */ } @@ -144,16 +185,17 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, case -1: case 1: symbolTT[s].deltaNbBits = (tableLog << 16) - (1< 1); + { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1); + U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = total - normalizedCounter[s]; - total += normalizedCounter[s]; + symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]); + total += (unsigned)normalizedCounter[s]; } } } } #if 0 /* debug : symbol costs */ @@ -164,32 +206,26 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, symbol, normalizedCounter[symbol], FSE_getMaxNbBits(symbolTT, symbol), (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256); - } - } + } } #endif return 0; } -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */ - return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol)); -} -#endif - #ifndef FSE_COMMONDEFS_ONLY - /*-************************************************************** * FSE NCount encoding ****************************************************************/ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) { - size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3; + size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog + + 4 /* bitCount initialized at 4 */ + + 2 /* first two symbols may use one additional bit each */) / 8) + + 1 /* round up to whole nb bytes */ + + 2 /* additional two bytes for bitstream flush */; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c index 485906e67..41a414b5c 100644 --- a/lib/compress/huf_compress.c +++ b/lib/compress/huf_compress.c @@ -53,6 +53,28 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS /* ******************************************************* * HUF : Huffman block compression *********************************************************/ +#define HUF_WORKSPACE_MAX_ALIGNMENT 8 + +static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) +{ + size_t const mask = align - 1; + size_t const rem = (size_t)workspace & mask; + size_t const add = (align - rem) & mask; + BYTE* const aligned = (BYTE*)workspace + add; + assert((align & (align - 1)) == 0); /* pow 2 */ + assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); + if (*workspaceSizePtr >= add) { + assert(add < align); + assert(((size_t)aligned & mask) == 0); + *workspaceSizePtr -= add; + return aligned; + } else { + *workspaceSizePtr = 0; + return NULL; + } +} + + /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. @@ -75,7 +97,7 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; - HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace; + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32)); if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); @@ -106,6 +128,40 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT return (size_t)(op-ostart); } +static size_t HUF_getNbBits(HUF_CElt elt) +{ + return elt & 0xFF; +} + +static size_t HUF_getNbBitsFast(HUF_CElt elt) +{ + return elt; +} + +static size_t HUF_getValue(HUF_CElt elt) +{ + return elt & ~0xFF; +} + +static size_t HUF_getValueFast(HUF_CElt elt) +{ + return elt; +} + +static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) +{ + assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); + *elt = nbBits; +} + +static void HUF_setValue(HUF_CElt* elt, size_t value) +{ + size_t const nbBits = HUF_getNbBits(*elt); + if (nbBits > 0) { + assert((value >> nbBits) == 0); + *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); + } +} typedef struct { HUF_CompressWeightsWksp wksp; @@ -117,9 +173,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize) { + HUF_CElt const* const ct = CTable + 1; BYTE* op = (BYTE*)dst; U32 n; - HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace; + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32)); /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); @@ -130,9 +187,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, for (n=1; nbitsToWeight[n] = (BYTE)(huffLog + 1 - n); for (n=0; nhuffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits]; + wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; /* attempt weights compression by FSE */ + if (maxDstSize < 1) return ERROR(dstSize_tooSmall); { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; @@ -166,6 +224,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; U32 nbSymbols = 0; + HUF_CElt* const ct = CTable + 1; /* get symbol weights */ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); @@ -175,6 +234,8 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); + CTable[0] = tableLog; + /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { @@ -186,13 +247,13 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void /* fill nbBits */ { U32 n; for (n=0; nn=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; - { U32 n; for (n=0; n>= 1; } } /* assign value within rank, symbol order */ - { U32 n; for (n=0; n huffNode[i-1].count) { + return 0; + } + } + return 1; +} + +/* Insertion sort by descending order */ +HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { + int i; + int const size = high-low+1; + huffNode += low; + for (i = 1; i < size; ++i) { + nodeElt const key = huffNode[i]; + int j = i - 1; + while (j >= 0 && huffNode[j].count < key.count) { + huffNode[j + 1] = huffNode[j]; + j--; + } + huffNode[j + 1] = key; + } +} + +/* Pivot helper function for quicksort. */ +static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { + /* Simply select rightmost element as pivot. "Better" selectors like + * median-of-three don't experimentally appear to have any benefit. + */ + U32 const pivot = arr[high].count; + int i = low - 1; + int j = low; + for ( ; j < high; j++) { + if (arr[j].count > pivot) { + i++; + HUF_swapNodes(&arr[i], &arr[j]); + } + } + HUF_swapNodes(&arr[i + 1], &arr[high]); + return i + 1; +} + +/* Classic quicksort by descending with partially iterative calls + * to reduce worst case callstack size. + */ +static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { + int const kInsertionSortThreshold = 8; + if (high - low < kInsertionSortThreshold) { + HUF_insertionSort(arr, low, high); + return; + } + while (low < high) { + int const idx = HUF_quickSortPartition(arr, low, high); + if (idx - low < high - idx) { + HUF_simpleQuickSort(arr, low, idx - 1); + low = idx + 1; + } else { + HUF_simpleQuickSort(arr, idx + 1, high); + high = idx - 1; + } + } +} + /** * HUF_sort(): * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. + * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. * * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. * Must have (maxSymbolValue + 1) entries. @@ -390,44 +547,52 @@ typedef struct { * @param[in] maxSymbolValue Maximum symbol value. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. */ -static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition) -{ - int n; - int const maxSymbolValue1 = (int)maxSymbolValue + 1; +static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { + U32 n; + U32 const maxSymbolValue1 = maxSymbolValue+1; /* Compute base and set curr to base. - * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1. - * Then 2^lowerRank <= count[n]+1 <= 2^rank. + * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. + * See HUF_getIndex to see bucketing strategy. * We attribute each symbol to lowerRank's base value, because we want to know where * each rank begins in the output, so for rank R we want to count ranks R+1 and above. */ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); for (n = 0; n < maxSymbolValue1; ++n) { - U32 lowerRank = BIT_highbit32(count[n] + 1); + U32 lowerRank = HUF_getIndex(count[n]); + assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); rankPosition[lowerRank].base++; } + assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); + /* Set up the rankPosition table */ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { rankPosition[n-1].base += rankPosition[n].base; rankPosition[n-1].curr = rankPosition[n-1].base; } - /* Sort */ + + /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ for (n = 0; n < maxSymbolValue1; ++n) { U32 const c = count[n]; - U32 const r = BIT_highbit32(c+1) + 1; - U32 pos = rankPosition[r].curr++; - /* Insert into the correct position in the rank. - * We have at most 256 symbols, so this insertion should be fine. - */ - while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) { - huffNode[pos] = huffNode[pos-1]; - pos--; - } + U32 const r = HUF_getIndex(c) + 1; + U32 const pos = rankPosition[r].curr++; + assert(pos < maxSymbolValue1); huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } -} + /* Sort each bucket. */ + for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { + U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base; + U32 const bucketStartIdx = rankPosition[n].base; + if (bucketSize > 1) { + assert(bucketStartIdx < maxSymbolValue1); + HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); + } + } + + assert(HUF_isSorted(huffNode, maxSymbolValue1)); +} /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. @@ -490,6 +655,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) */ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) { + HUF_CElt* const ct = CTable + 1; /* fill result into ctable (val, nbBits) */ int n; U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; @@ -505,20 +671,20 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i min >>= 1; } } for (n=0; nhuffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; /* safety checks */ - if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; @@ -536,96 +702,334 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ - HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits); + HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); return maxNbBits; } size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + HUF_CElt const* ct = CTable + 1; size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { - nbBits += CTable[s].nbBits * count[s]; + nbBits += HUF_getNbBits(ct[s]) * count[s]; } return nbBits >> 3; } int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + HUF_CElt const* ct = CTable + 1; int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { - bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); } return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } -FORCE_INLINE_TEMPLATE void -HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +/** HUF_CStream_t: + * Huffman uses its own BIT_CStream_t implementation. + * There are three major differences from BIT_CStream_t: + * 1. HUF_addBits() takes a HUF_CElt (size_t) which is + * the pair (nbBits, value) in the format: + * format: + * - Bits [0, 4) = nbBits + * - Bits [4, 64 - nbBits) = 0 + * - Bits [64 - nbBits, 64) = value + * 2. The bitContainer is built from the upper bits and + * right shifted. E.g. to add a new value of N bits + * you right shift the bitContainer by N, then or in + * the new value into the N upper bits. + * 3. The bitstream has two bit containers. You can add + * bits to the second container and merge them into + * the first container. + */ + +#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + + BYTE* startPtr; + BYTE* ptr; + BYTE* endPtr; +} HUF_CStream_t; + +/**! HUF_initCStream(): + * Initializes the bitstream. + * @returns 0 or an error code. + */ +static size_t HUF_initCStream(HUF_CStream_t* bitC, + void* startPtr, size_t dstCapacity) { - BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); + ZSTD_memset(bitC, 0, sizeof(*bitC)); + bitC->startPtr = (BYTE*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); + if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); + return 0; } -#define HUF_FLUSHBITS(s) BIT_flushBits(s) +/*! HUF_addBits(): + * Adds the symbol stored in HUF_CElt elt to the bitstream. + * + * @param elt The element we're adding. This is a (nbBits, value) pair. + * See the HUF_CStream_t docs for the format. + * @param idx Insert into the bitstream at this idx. + * @param kFast This is a template parameter. If the bitstream is guaranteed + * to have at least 4 unused bits after this call it may be 1, + * otherwise it must be 0. HUF_addBits() is faster when fast is set. + */ +FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) +{ + assert(idx <= 1); + assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); + /* This is efficient on x86-64 with BMI2 because shrx + * only reads the low 6 bits of the register. The compiler + * knows this and elides the mask. When fast is set, + * every operation can use the same value loaded from elt. + */ + bitC->bitContainer[idx] >>= HUF_getNbBits(elt); + bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); + /* We only read the low 8 bits of bitC->bitPos[idx] so it + * doesn't matter that the high bits have noise from the value. + */ + bitC->bitPos[idx] += HUF_getNbBitsFast(elt); + assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + /* The last 4-bits of elt are dirty if fast is set, + * so we must not be overwriting bits that have already been + * inserted into the bit container. + */ +#if DEBUGLEVEL >= 1 + { + size_t const nbBits = HUF_getNbBits(elt); + size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1; + (void)dirtyBits; + /* Middle bits are 0. */ + assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); + /* We didn't overwrite any bits in the bit container. */ + assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + (void)dirtyBits; + } +#endif +} -#define HUF_FLUSHBITS_1(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) +FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) +{ + bitC->bitContainer[1] = 0; + bitC->bitPos[1] = 0; +} + +/*! HUF_mergeIndex1() : + * Merges the bit container @ index 1 into the bit container @ index 0 + * and zeros the bit container @ index 1. + */ +FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) +{ + assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); + bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); + bitC->bitContainer[0] |= bitC->bitContainer[1]; + bitC->bitPos[0] += bitC->bitPos[1]; + assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); +} + +/*! HUF_flushBits() : +* Flushes the bits in the bit container @ index 0. +* +* @post bitPos will be < 8. +* @param kFast If kFast is set then we must know a-priori that +* the bit container will not overflow. +*/ +FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) +{ + /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ + size_t const nbBits = bitC->bitPos[0] & 0xFF; + size_t const nbBytes = nbBits >> 3; + /* The top nbBits bits of bitContainer are the ones we need. */ + size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); + /* Mask bitPos to account for the bytes we consumed. */ + bitC->bitPos[0] &= 7; + assert(nbBits > 0); + assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); + assert(bitC->ptr <= bitC->endPtr); + MEM_writeLEST(bitC->ptr, bitContainer); + bitC->ptr += nbBytes; + assert(!kFast || bitC->ptr <= bitC->endPtr); + if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + /* bitContainer doesn't need to be modified because the leftover + * bits are already the top bitPos bits. And we don't care about + * noise in the lower values. + */ +} + +/*! HUF_endMark() + * @returns The Huffman stream end mark: A 1-bit value = 1. + */ +static HUF_CElt HUF_endMark(void) +{ + HUF_CElt endMark; + HUF_setNbBits(&endMark, 1); + HUF_setValue(&endMark, 1); + return endMark; +} + +/*! HUF_closeCStream() : + * @return Size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ +static size_t HUF_closeCStream(HUF_CStream_t* bitC) +{ + HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); + HUF_flushBits(bitC, /* kFast */ 0); + { + size_t const nbBits = bitC->bitPos[0] & 0xFF; + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (bitC->ptr - bitC->startPtr) + (nbBits > 0); + } +} + +FORCE_INLINE_TEMPLATE void +HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) +{ + HUF_addBits(bitCPtr, CTable[symbol], idx, fast); +} + +FORCE_INLINE_TEMPLATE void +HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, + const BYTE* ip, size_t srcSize, + const HUF_CElt* ct, + int kUnroll, int kFastFlush, int kLastFast) +{ + /* Join to kUnroll */ + int n = (int)srcSize; + int rem = n % kUnroll; + if (rem > 0) { + for (; rem > 0; --rem) { + HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); + } + HUF_flushBits(bitC, kFastFlush); + } + assert(n % kUnroll == 0); + + /* Join to 2 * kUnroll */ + if (n % (2 * kUnroll)) { + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); + HUF_flushBits(bitC, kFastFlush); + n -= kUnroll; + } + assert(n % (2 * kUnroll) == 0); + + for (; n>0; n-= 2 * kUnroll) { + /* Encode kUnroll symbols into the bitstream @ index 0. */ + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); + HUF_flushBits(bitC, kFastFlush); + /* Encode kUnroll symbols into the bitstream @ index 1. + * This allows us to start filling the bit container + * without any data dependencies. + */ + HUF_zeroIndex1(bitC); + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); + /* Merge bitstream @ index 1 into the bitstream @ index 0 */ + HUF_mergeIndex1(bitC); + HUF_flushBits(bitC, kFastFlush); + } + assert(n == 0); + +} + +/** + * Returns a tight upper bound on the output space needed by Huffman + * with 8 bytes buffer to handle over-writes. If the output is at least + * this large we don't need to do bounds checks during Huffman encoding. + */ +static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) +{ + return ((srcSize * tableLog) >> 3) + 8; +} -#define HUF_FLUSHBITS_2(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { + U32 const tableLog = (U32)CTable[0]; + HUF_CElt const* ct = CTable + 1; const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; - size_t n; - BIT_CStream_t bitC; + HUF_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ - { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op)); + { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } - n = srcSize & ~3; /* join to mod 4 */ - switch (srcSize & 3) - { - case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); - HUF_FLUSHBITS_2(&bitC); - /* fall-through */ - case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); - HUF_FLUSHBITS_1(&bitC); - /* fall-through */ - case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); - HUF_FLUSHBITS(&bitC); - /* fall-through */ - case 0 : /* fall-through */ - default: break; + if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); + else { + if (MEM_32bits()) { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: ZSTD_FALLTHROUGH; + case 9: ZSTD_FALLTHROUGH; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 7: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } else { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 9: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 7: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 6: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } } + assert(bitC.ptr <= bitC.endPtr); - for (; n>0; n-=4) { /* note : n&3==0 at this stage */ - HUF_encodeSymbol(&bitC, ip[n- 1], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 2], CTable); - HUF_FLUSHBITS_2(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 3], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 4], CTable); - HUF_FLUSHBITS(&bitC); - } - - return BIT_closeCStream(&bitC); + return HUF_closeCStream(&bitC); } #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) @@ -667,9 +1071,13 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } +size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +{ + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); +} static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, @@ -689,8 +1097,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } @@ -698,8 +1105,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } @@ -707,8 +1113,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } @@ -717,7 +1122,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, assert(op <= oend); assert(ip <= iend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); - if (cSize==0) return 0; + if (cSize == 0 || cSize > 65535) return 0; op += cSize; } @@ -726,7 +1131,12 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + +size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +{ + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -750,35 +1160,38 @@ static size_t HUF_compressCTable_internal( typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; - HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; union { HUF_buildCTable_wksp_tables buildCTable_wksp; HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[HIST_WKSP_SIZE_U32]; } wksps; } HUF_compress_tables_t; +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ + /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, - * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */ + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, - void* workSpace_align4, size_t wkspSize, + void* workSpace, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2) + const int bmi2, unsigned suspectUncompressible) { - HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4; + HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; - HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE); - assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */ + HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); /* checks & inits */ - if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); + if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ @@ -794,8 +1207,23 @@ HUF_compress_internal (void* dst, size_t dstSize, nbStreams, oldHufTable, bmi2); } + /* If uncompressible data is suspected, do a smaller sampling first */ + DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); + if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + size_t largestTotal = 0; + { unsigned maxSymbolValueBegin = maxSymbolValue; + CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestBegin; + } + { unsigned maxSymbolValueEnd = maxSymbolValue; + CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestEnd; + } + if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ + } + /* Scan input and build symbol stats */ - { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) ); + { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } @@ -820,9 +1248,12 @@ HUF_compress_internal (void* dst, size_t dstSize, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; - /* Zero unused symbols in CTable, so we can check it for validity */ - ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0, - sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); + } + /* Zero unused symbols in CTable, so we can check it for validity */ + { + size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue); + size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt); + ZSTD_memset(table->CTable + ctableSize, 0, unusedSize); } /* Write table description header */ @@ -859,19 +1290,20 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize, return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + NULL, NULL, 0, 0 /*bmi2*/, 0); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, + int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2); + repeat, preferRepeat, bmi2, suspectUncompressible); } /* HUF_compress4X_repeat(): @@ -885,22 +1317,23 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + NULL, NULL, 0, 0 /*bmi2*/, 0); } /* HUF_compress4X_repeat(): * compress input using 4 streams. + * consider skipping quickly * re-use an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2); + hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS @@ -918,7 +1351,7 @@ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } @@ -926,7 +1359,7 @@ size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index 7edd94183..b4807fbd8 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -12,7 +12,6 @@ * Dependencies ***************************************/ #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ -#include "../common/cpu.h" #include "../common/mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ @@ -42,6 +41,18 @@ # define ZSTD_COMPRESS_HEAPMODE 0 #endif +/*! + * ZSTD_HASHLOG3_MAX : + * Maximum size of the hash table dedicated to find 3-bytes matches, + * in log format, aka 17 => 1 << 17 == 128Ki positions. + * This structure is only used in zstd_opt. + * Since allocation is centralized for all strategies, it has to be known here. + * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, + * so that zstd_opt.c doesn't need to know about this constant. + */ +#ifndef ZSTD_HASHLOG3_MAX +# define ZSTD_HASHLOG3_MAX 17 +#endif /*-************************************* * Helper functions @@ -72,10 +83,10 @@ struct ZSTD_CDict_s { ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ - ZSTD_useRowMatchFinderMode_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use - * row-based matchfinder. Unless the cdict is reloaded, we will use - * the same greedy/lazy matchfinder at compression time. - */ + ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use + * row-based matchfinder. Unless the cdict is reloaded, we will use + * the same greedy/lazy matchfinder at compression time. + */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) @@ -88,7 +99,7 @@ static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) assert(cctx != NULL); ZSTD_memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; - cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + cctx->bmi2 = ZSTD_cpuSupportsBmi2(); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; @@ -214,35 +225,42 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder * for this compression. */ -static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_useRowMatchFinderMode_e mode) { - assert(mode != ZSTD_urm_auto); - return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_urm_enableRowMatchFinder); +static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { + assert(mode != ZSTD_ps_auto); + return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); } -/* Returns row matchfinder usage enum given an initial mode and cParams */ -static ZSTD_useRowMatchFinderMode_e ZSTD_resolveRowMatchFinderMode(ZSTD_useRowMatchFinderMode_e mode, - const ZSTD_compressionParameters* const cParams) { -#if !defined(ZSTD_NO_INTRINSICS) && (defined(__SSE2__) || defined(__ARM_NEON)) +/* Returns row matchfinder usage given an initial mode and cParams */ +static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { +#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) int const kHasSIMD128 = 1; #else int const kHasSIMD128 = 0; #endif - if (mode != ZSTD_urm_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ - mode = ZSTD_urm_disableRowMatchFinder; + if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ + mode = ZSTD_ps_disable; if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; if (kHasSIMD128) { - if (cParams->windowLog > 14) mode = ZSTD_urm_enableRowMatchFinder; + if (cParams->windowLog > 14) mode = ZSTD_ps_enable; } else { - if (cParams->windowLog > 17) mode = ZSTD_urm_enableRowMatchFinder; + if (cParams->windowLog > 17) mode = ZSTD_ps_enable; } return mode; } +/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ +static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; +} + /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const U32 forDDSDict) { - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. */ @@ -253,16 +271,10 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, * enable long distance matching (wlog >= 27, strategy >= btopt). * Returns 0 otherwise. */ -static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) { - return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27; -} - -/* Returns 1 if compression parameters are such that we should - * enable blockSplitter (wlog >= 17, strategy >= btopt). - * Returns 0 otherwise. - */ -static U32 ZSTD_CParams_useBlockSplitter(const ZSTD_compressionParameters* const cParams) { - return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17; +static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( @@ -274,20 +286,13 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( cctxParams.cParams = cParams; /* Adjust advanced params according to cParams */ - if (ZSTD_CParams_shouldEnableLdm(&cParams)) { - DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params"); - cctxParams.ldmParams.enableLdm = 1; - /* LDM is enabled by default for optimal parser and window size >= 128MB */ + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); + if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); assert(cctxParams.ldmParams.hashRateLog < 32); } - - if (ZSTD_CParams_useBlockSplitter(&cParams)) { - DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including block splitting into cctx params"); - cctxParams.splitBlocks = 1; - } - + cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); assert(!ZSTD_checkCParams(cParams)); return cctxParams; @@ -348,7 +353,10 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par */ cctxParams->compressionLevel = compressionLevel; cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); - DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d", cctxParams->useRowMatchFinder); + cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", + cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) @@ -518,9 +526,9 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_literalCompressionMode: - ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); - bounds.lowerBound = ZSTD_lcm_auto; - bounds.upperBound = ZSTD_lcm_uncompressed; + ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_targetCBlockSize: @@ -549,14 +557,14 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = 1; return bounds; - case ZSTD_c_splitBlocks: - bounds.lowerBound = 0; - bounds.upperBound = 1; + case ZSTD_c_useBlockSplitter: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_useRowMatchFinder: - bounds.lowerBound = (int)ZSTD_urm_auto; - bounds.upperBound = (int)ZSTD_urm_enableRowMatchFinder; + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_deterministicRefPrefix: @@ -625,7 +633,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_splitBlocks: + case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: default: @@ -680,7 +688,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_splitBlocks: + case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: break; @@ -780,7 +788,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, } case ZSTD_c_literalCompressionMode : { - const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; + const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; @@ -835,7 +843,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : - CCtxParams->ldmParams.enableLdm = (value!=0); + CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : @@ -857,8 +865,8 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : - RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, - parameter_outOfBound, "Param out of bounds!"); + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; @@ -894,14 +902,14 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, CCtxParams->validateSequences = value; return CCtxParams->validateSequences; - case ZSTD_c_splitBlocks: - BOUNDCHECK(ZSTD_c_splitBlocks, value); - CCtxParams->splitBlocks = value; - return CCtxParams->splitBlocks; + case ZSTD_c_useBlockSplitter: + BOUNDCHECK(ZSTD_c_useBlockSplitter, value); + CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; + return CCtxParams->useBlockSplitter; case ZSTD_c_useRowMatchFinder: BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); - CCtxParams->useRowMatchFinder = (ZSTD_useRowMatchFinderMode_e)value; + CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; return CCtxParams->useRowMatchFinder; case ZSTD_c_deterministicRefPrefix: @@ -1032,8 +1040,8 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_validateSequences : *value = (int)CCtxParams->validateSequences; break; - case ZSTD_c_splitBlocks : - *value = (int)CCtxParams->splitBlocks; + case ZSTD_c_useBlockSplitter : + *value = (int)CCtxParams->useBlockSplitter; break; case ZSTD_c_useRowMatchFinder : *value = (int)CCtxParams->useRowMatchFinder; @@ -1324,7 +1332,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, break; case ZSTD_cpm_createCDict: /* Assume a small source size when creating a dictionary - * with an unkown source size. + * with an unknown source size. */ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) srcSize = minSrcSize; @@ -1398,7 +1406,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( srcSizeHint = CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); - if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; + if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ @@ -1407,7 +1415,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const U32 enableDedicatedDictSearch, const U32 forCCtx) { @@ -1440,7 +1448,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); @@ -1451,12 +1459,12 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, const int isStatic, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, const U64 pledgedSrcSize) { - size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize)); + size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (cParams->minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; @@ -1469,7 +1477,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); - size_t const ldmSeqSpace = ldmParams->enableLdm ? + size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; @@ -1496,8 +1504,8 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, - &cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, + &cParams); RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); /* estimateCCtxSize is for one-shot compression. So no buffers should @@ -1514,9 +1522,9 @@ size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { @@ -1561,7 +1569,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, @@ -1576,9 +1584,9 @@ size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { @@ -1713,7 +1721,7 @@ static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) @@ -1728,7 +1736,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); if (forceResetIndex == ZSTDirp_reset) { ZSTD_window_init(&ms->window); ZSTD_cwksp_mark_tables_dirty(ws); @@ -1774,8 +1782,8 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); } { /* Switch to 32-entry rows if searchLog is 5 (or more) */ - U32 const rowLog = cParams->searchLog < 5 ? 4 : 5; - assert(cParams->hashLog > rowLog); + U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); + assert(cParams->hashLog >= rowLog); ms->rowHashLog = cParams->hashLog - rowLog; } } @@ -1824,8 +1832,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_buffered_policy_e const zbuff) { ZSTD_cwksp* const ws = &zc->workspace; - DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d", - (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder); + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", + (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zc->isFirstBlock = 1; @@ -1836,8 +1844,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->appliedParams = *params; params = &zc->appliedParams; - assert(params->useRowMatchFinder != ZSTD_urm_auto); - if (params->ldmParams.enableLdm) { + assert(params->useRowMatchFinder != ZSTD_ps_auto); + assert(params->useBlockSplitter != ZSTD_ps_auto); + assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); @@ -1937,7 +1947,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const numBuckets = ((size_t)1) << (params->ldmParams.hashLog - @@ -1964,7 +1974,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_resetTarget_CCtx), ""); /* ldm hash table */ - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); @@ -1976,8 +1986,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->ldmState.loadedDictEnd = 0; } - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); zc->initialized = 1; @@ -2115,7 +2125,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, } ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); - assert(params.useRowMatchFinder != ZSTD_urm_auto); + assert(params.useRowMatchFinder != ZSTD_ps_auto); /* copy tables */ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) @@ -2209,8 +2219,12 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; - assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_urm_auto); + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; + params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; + params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, @@ -2296,6 +2310,8 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ @@ -2315,12 +2331,17 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; columnsplitBlocks); - return (cctxParams->splitBlocks != 0); + DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); + assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); + return (cctxParams->useBlockSplitter == ZSTD_ps_enable); } /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types @@ -2546,6 +2569,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, * compresses both literals and sequences * Returns compressed size of block, or a zstd error. */ +#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, @@ -2580,15 +2604,19 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; + size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; + size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; + /* Base suspicion of uncompressibility on ratio of literals to sequences */ + unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); size_t const litSize = (size_t)(seqStorePtr->lit - literals); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, - ZSTD_disableLiteralsCompression(cctxParams), + ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, - bmi2); + bmi2, suspectUncompressible); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; @@ -2693,7 +2721,7 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e useRowMatchFinder, ZSTD_dictMode_e dictMode) +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, @@ -2758,7 +2786,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRow ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } }; DEBUGLOG(4, "Selecting a row-based matchfinder"); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; } else { selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; @@ -2825,7 +2853,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert(!zc->appliedParams.ldmParams.enableLdm); + assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2834,7 +2862,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->appliedParams.useRowMatchFinder, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); - } else if (zc->appliedParams.ldmParams.enableLdm) { + } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; ldmSeqStore.seq = zc->ldmSequences; @@ -3027,7 +3055,7 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi const ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, - const int disableLiteralsCompression, + const int literalsCompressionIsDisabled, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; @@ -3045,7 +3073,7 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - if (disableLiteralsCompression) { + if (literalsCompressionIsDisabled) { DEBUGLOG(5, "set_basic - disabled"); hufMetadata->hType = set_basic; return 0; @@ -3192,7 +3220,7 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, - ZSTD_disableLiteralsCompression(cctxParams), + ZSTD_literalsCompressionIsDisabled(cctxParams), workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = @@ -3319,19 +3347,20 @@ static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, * * Returns the estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, const ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t entropyMetadata; +static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { + ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; + DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, - &entropyMetadata, + entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, &entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, - (int)(entropyMetadata.hufMetadata.hType == set_compressed), 1); + &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ @@ -3474,6 +3503,7 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const if (isPartition) ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, @@ -3499,9 +3529,6 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const return 0; } - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - if (cSeqsSize == 0) { cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "Nocompress block failed"); @@ -3518,6 +3545,10 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const cSize = ZSTD_blockHeaderSize + cSeqsSize; DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + return cSize; } @@ -3528,7 +3559,6 @@ typedef struct { } seqStoreSplits; #define MIN_SEQUENCES_BLOCK_SPLITTING 300 -#define MAX_NB_SPLITS 196 /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. @@ -3539,29 +3569,31 @@ typedef struct { * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * - * Furthermore, the number of splits is capped by MAX_NB_SPLITS. At MAX_NB_SPLITS == 196 with the current existing blockSize + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, - const ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { - seqStore_t fullSeqStoreChunk; - seqStore_t firstHalfSeqStore; - seqStore_t secondHalfSeqStore; + ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { + seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; - if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= MAX_NB_SPLITS) { + if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); return; } - ZSTD_deriveSeqStoreChunk(&fullSeqStoreChunk, origSeqStore, startIdx, endIdx); - ZSTD_deriveSeqStoreChunk(&firstHalfSeqStore, origSeqStore, startIdx, midIdx); - ZSTD_deriveSeqStoreChunk(&secondHalfSeqStore, origSeqStore, midIdx, endIdx); - estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&fullSeqStoreChunk, zc); - estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&firstHalfSeqStore, zc); - estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&secondHalfSeqStore, zc); - DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", + DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); + ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); + DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { return; @@ -3601,12 +3633,12 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; - U32 partitions[MAX_NB_SPLITS]; size_t i = 0; size_t srcBytesTotal = 0; + U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); - seqStore_t nextSeqStore; - seqStore_t currSeqStore; /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3626,6 +3658,7 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s repcodes_t cRep; ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); + ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, @@ -3643,36 +3676,36 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s return cSizeSingleBlock; } - ZSTD_deriveSeqStoreChunk(&currSeqStore, &zc->seqStore, 0, partitions[0]); + ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); for (i = 0; i <= numSplits; ++i) { size_t srcBytes; size_t cSizeChunk; U32 const lastPartition = (i == numSplits); U32 lastBlockEntireSrc = 0; - srcBytes = ZSTD_countSeqStoreLiteralsBytes(&currSeqStore) + ZSTD_countSeqStoreMatchBytes(&currSeqStore); + srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ srcBytes += blockSize - srcBytesTotal; lastBlockEntireSrc = lastBlock; } else { - ZSTD_deriveSeqStoreChunk(&nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); + ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); } - cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, &currSeqStore, + cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, &dRep, &cRep, op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&currSeqStore, zc), cSizeChunk); + DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; op += cSizeChunk; dstCapacity -= cSizeChunk; cSize += cSizeChunk; - currSeqStore = nextSeqStore; + *currSeqStore = *nextSeqStore; assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); } /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes @@ -3690,6 +3723,7 @@ static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, U32 nbSeq; size_t cSize; DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); + assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); @@ -3704,7 +3738,6 @@ static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); } - assert(zc->appliedParams.splitBlocks == 1); cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); return cSize; @@ -3746,12 +3779,6 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); - if (zc->seqCollector.collectSequences) { - ZSTD_copyBlockSequences(zc); - return 0; - } - - if (frame && /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." @@ -3915,6 +3942,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; @@ -3991,7 +4019,9 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; @@ -3999,7 +4029,9 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, } switch(fcsCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; @@ -4047,7 +4079,7 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe { RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong, "wrong cctx stage"); - RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, + RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable, parameter_unsupported, "incompatible with ldm"); cctx->externSeqStore.seq = seq; @@ -4088,7 +4120,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, ms->forceNonContiguous = 0; ms->nextToUpdate = ms->window.dictLimit; } - if (cctx->appliedParams.ldmParams.enableLdm) { + if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); } @@ -4157,7 +4189,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; - int const loadLdmDict = params->ldmParams.enableLdm && ls != NULL; + int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); @@ -4214,8 +4246,8 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, assert(ms->chainTable != NULL); ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); } else { - assert(params->useRowMatchFinder != ZSTD_urm_auto); - if (params->useRowMatchFinder == ZSTD_urm_enableRowMatchFinder) { + assert(params->useRowMatchFinder != ZSTD_ps_auto); + if (params->useRowMatchFinder == ZSTD_ps_enable) { size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); @@ -4715,7 +4747,7 @@ size_t ZSTD_estimateCDictSize_advanced( + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small * in case we are using DDS with row-hash. */ - + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams), + + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); @@ -4792,7 +4824,7 @@ static size_t ZSTD_initCDict_internal( static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, U32 enableDedicatedDictSearch, ZSTD_customMem customMem) { @@ -4947,7 +4979,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) @@ -5403,7 +5435,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } - /* fall-through */ + ZSTD_FALLTHROUGH; case zcss_flush: DEBUGLOG(5, "flush stage"); assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); @@ -5524,17 +5556,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, dictSize, mode); } - if (ZSTD_CParams_shouldEnableLdm(¶ms.cParams)) { - /* Enable LDM by default for optimal parser and window size >= 128MB */ - DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)"); - params.ldmParams.enableLdm = 1; - } - - if (ZSTD_CParams_useBlockSplitter(¶ms.cParams)) { - DEBUGLOG(4, "Block splitter enabled by default (window size >= 128K, strategy >= btopt)"); - params.splitBlocks = 1; - } - + params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); + params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); #ifdef ZSTD_MULTITHREAD @@ -6140,119 +6163,12 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) /*-===== Pre-defined compression levels =====-*/ +#include "clevels.h" -#define ZSTD_MAX_CLEVEL 22 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } +int ZSTD_maxCLevel(void) { return MEM_32bits() ? ZSTD_MAX_32BIT_CLEVEL : ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { -{ /* "default" - for any srcSize > 256 KB */ - /* W, C, H, S, L, TL, strat */ - { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ - { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ - { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ - { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ - { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ - { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ - { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ - { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ - { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ - { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ - { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ - { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ - { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ - { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ - { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ - { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ - { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ - { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ - { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ - { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ - { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ - { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ - { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ -}, -{ /* for srcSize <= 256 KB */ - /* W, C, H, S, L, T, strat */ - { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ - { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ - { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ - { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ - { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ - { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ - { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ - { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ - { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ - { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 128 KB */ - /* W, C, H, S, L, T, strat */ - { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ - { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ - { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ - { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ - { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ - { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ - { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ - { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ - { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ - { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ - { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 16 KB */ - /* W, C, H, S, L, T, strat */ - { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ - { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ - { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ - { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ - { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ - { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ - { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ - { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ - { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ - { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ - { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ - { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ - { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ - { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ - { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ - { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -}; - static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); @@ -6345,7 +6261,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, /* row */ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ - else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; + else if (compressionLevel > ZSTD_maxCLevel()) row = ZSTD_maxCLevel(); else row = compressionLevel; { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h index 3b04fd09f..7360ee8e2 100644 --- a/lib/compress/zstd_compress_internal.h +++ b/lib/compress/zstd_compress_internal.h @@ -63,7 +63,7 @@ typedef struct { } ZSTD_localDict; typedef struct { - HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)]; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; @@ -179,7 +179,7 @@ typedef struct { U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; } optState_t; typedef struct { @@ -199,6 +199,8 @@ typedef struct { */ } ZSTD_window_t; +#define ZSTD_WINDOW_START_INDEX 2 + typedef struct ZSTD_matchState_t ZSTD_matchState_t; #define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ @@ -264,7 +266,7 @@ typedef struct { } ldmState_t; typedef struct { - U32 enableLdm; /* 1 if enable long distance matching */ + ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ @@ -295,7 +297,7 @@ struct ZSTD_CCtx_params_s { * There is no guarantee that hint is close to actual source size */ ZSTD_dictAttachPref_e attachDictPref; - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; /* Multithreading: used to pass parameters to mtctx */ int nbWorkers; @@ -318,10 +320,10 @@ struct ZSTD_CCtx_params_s { int validateSequences; /* Block splitting */ - int splitBlocks; + ZSTD_paramSwitch_e useBlockSplitter; /* Param for deciding whether to use row-based matchfinder */ - ZSTD_useRowMatchFinderMode_e useRowMatchFinder; + ZSTD_paramSwitch_e useRowMatchFinder; /* Always load a dictionary in ext-dict mode (not prefix mode)? */ int deterministicRefPrefix; @@ -343,6 +345,22 @@ typedef enum { ZSTDb_buffered } ZSTD_buffered_policy_e; +/** + * Struct that contains all elements of block splitter that should be allocated + * in a wksp. + */ +#define ZSTD_MAX_NB_BLOCK_SPLITS 196 +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + + U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + struct ZSTD_CCtx_s { ZSTD_compressionStage_e stage; int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */ @@ -374,7 +392,7 @@ struct ZSTD_CCtx_s { ZSTD_blockState_t blockState; U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */ - /* Wether we are streaming or not */ + /* Whether we are streaming or not */ ZSTD_buffered_policy_e bufferedPolicy; /* streaming */ @@ -408,6 +426,9 @@ struct ZSTD_CCtx_s { #if ZSTD_TRACE ZSTD_TraceCtx traceCtx; #endif + + /* Workspace for block splitter */ + ZSTD_blockSplitCtx blockSplitCtx; }; typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e; @@ -442,7 +463,7 @@ typedef enum { typedef size_t (*ZSTD_blockCompressor) ( ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); MEM_STATIC U32 ZSTD_LLcode(U32 litLength) @@ -549,17 +570,17 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) return (srcSize >> minlog) + 2; } -MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) +MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams) { switch (cctxParams->literalCompressionMode) { - case ZSTD_lcm_huffman: + case ZSTD_ps_enable: return 0; - case ZSTD_lcm_uncompressed: + case ZSTD_ps_disable: return 1; default: assert(0 /* impossible: pre-validated */); - /* fall-through */ - case ZSTD_lcm_auto: + ZSTD_FALLTHROUGH; + case ZSTD_ps_auto: return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); } } @@ -651,8 +672,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # if STATIC_BMI2 return _tzcnt_u64(val) >> 3; # else - unsigned long r = 0; - return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, (U64)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_ctzll((U64)val) >> 3); @@ -669,8 +696,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r=0; - return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else @@ -687,8 +720,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # if STATIC_BMI2 return _lzcnt_u64(val) >> 3; # else - unsigned long r = 0; - return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, (U64)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_clzll(val) >> 3); @@ -702,8 +741,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r = 0; - return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, (unsigned long)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else @@ -884,9 +929,9 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window) { - return window.dictLimit == 1 && - window.lowLimit == 1 && - (window.nextSrc - window.base) == 1; + return window.dictLimit == ZSTD_WINDOW_START_INDEX && + window.lowLimit == ZSTD_WINDOW_START_INDEX && + (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX; } /** @@ -937,7 +982,9 @@ MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window, { U32 const cycleSize = 1u << cycleLog; U32 const curr = (U32)((BYTE const*)src - window.base); - U32 const minIndexToOverflowCorrect = cycleSize + MAX(maxDist, cycleSize); + U32 const minIndexToOverflowCorrect = cycleSize + + MAX(maxDist, cycleSize) + + ZSTD_WINDOW_START_INDEX; /* Adjust the min index to backoff the overflow correction frequency, * so we don't waste too much CPU in overflow correction. If this @@ -1012,10 +1059,14 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 const cycleSize = 1u << cycleLog; U32 const cycleMask = cycleSize - 1; U32 const curr = (U32)((BYTE const*)src - window->base); - U32 const currentCycle0 = curr & cycleMask; - /* Exclude zero so that newCurrent - maxDist >= 1. */ - U32 const currentCycle1 = currentCycle0 == 0 ? cycleSize : currentCycle0; - U32 const newCurrent = currentCycle1 + MAX(maxDist, cycleSize); + U32 const currentCycle = curr & cycleMask; + /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ + U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX + ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX) + : 0; + U32 const newCurrent = currentCycle + + currentCycleCorrection + + MAX(maxDist, cycleSize); U32 const correction = curr - newCurrent; /* maxDist must be a power of two so that: * (newCurrent & cycleMask) == (curr & cycleMask) @@ -1031,14 +1082,20 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, window->base += correction; window->dictBase += correction; - if (window->lowLimit <= correction) window->lowLimit = 1; - else window->lowLimit -= correction; - if (window->dictLimit <= correction) window->dictLimit = 1; - else window->dictLimit -= correction; + if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->lowLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->lowLimit -= correction; + } + if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->dictLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->dictLimit -= correction; + } /* Ensure we can still reference the full window. */ assert(newCurrent >= maxDist); - assert(newCurrent - maxDist >= 1); + assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX); /* Ensure that lowLimit and dictLimit didn't underflow. */ assert(window->lowLimit <= newCurrent); assert(window->dictLimit <= newCurrent); @@ -1149,11 +1206,12 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { ZSTD_memset(window, 0, sizeof(*window)); - window->base = (BYTE const*)""; - window->dictBase = (BYTE const*)""; - window->dictLimit = 1; /* start from 1, so that 1st position is valid */ - window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ - window->nextSrc = window->base + 1; /* see issue #1241 */ + window->base = (BYTE const*)" "; + window->dictBase = (BYTE const*)" "; + ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */ + window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */ + window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */ + window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */ window->nbOverflowCorrections = 0; } @@ -1206,15 +1264,15 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, */ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) { - U32 const maxDistance = 1U << windowLog; - U32 const lowestValid = ms->window.lowLimit; - U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - U32 const isDictionary = (ms->loadedDictEnd != 0); + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.lowLimit; + U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't * valid for the entire block. So this check is sufficient to find the lowest valid match index. */ - U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } diff --git a/lib/compress/zstd_compress_literals.c b/lib/compress/zstd_compress_literals.c index 008337bb1..52b0a8059 100644 --- a/lib/compress/zstd_compress_literals.c +++ b/lib/compress/zstd_compress_literals.c @@ -73,7 +73,8 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2) + const int bmi2, + unsigned suspectUncompressible) { size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); @@ -105,11 +106,11 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, HUF_compress1X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) : HUF_compress4X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible); if (repeat != HUF_repeat_none) { /* reused the existing table */ DEBUGLOG(5, "Reusing previous huffman table"); diff --git a/lib/compress/zstd_compress_literals.h b/lib/compress/zstd_compress_literals.h index 9904c0cd3..9775fb97c 100644 --- a/lib/compress/zstd_compress_literals.h +++ b/lib/compress/zstd_compress_literals.h @@ -18,12 +18,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); +/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2); + const int bmi2, + unsigned suspectUncompressible); #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/lib/compress/zstd_compress_sequences.c b/lib/compress/zstd_compress_sequences.c index 611eabdcb..fa31e6e94 100644 --- a/lib/compress/zstd_compress_sequences.c +++ b/lib/compress/zstd_compress_sequences.c @@ -275,10 +275,11 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity, assert(nbSeq_1 > 1); assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp)); (void)entropyWorkspaceSize; - FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), ""); - { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */ + FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed"); + assert(oend >= op); + { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), ""); + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed"); return NCountSize; } } @@ -398,7 +399,7 @@ ZSTD_encodeSequences_default( #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t ZSTD_encodeSequences_bmi2( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, diff --git a/lib/compress/zstd_compress_superblock.c b/lib/compress/zstd_compress_superblock.c index e4e45069b..bcbe158b7 100644 --- a/lib/compress/zstd_compress_superblock.c +++ b/lib/compress/zstd_compress_superblock.c @@ -132,6 +132,7 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; + (void)(litLengthSum); /* suppress unused variable warning on some environments */ while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; @@ -474,7 +475,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, /* I think there is an optimization opportunity here. * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful * since it recalculates estimate from scratch. - * For example, it would recount literal distribution and symbol codes everytime. + * For example, it would recount literal distribution and symbol codes every time. */ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, &nextCBlock->entropy, entropyMetadata, diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h index 2656d26ca..7ba90262d 100644 --- a/lib/compress/zstd_cwksp.h +++ b/lib/compress/zstd_cwksp.h @@ -219,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes * to align the beginning of tables section, as well as another n_2=[0, 63] bytes - * to align the beginning of the aligned secion. + * to align the beginning of the aligned section. * * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and * aligneds being sized in multiples of 64 bytes. diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c index d0d3a784d..b9393b6a4 100644 --- a/lib/compress/zstd_double_fast.c +++ b/lib/compress/zstd_double_fast.c @@ -48,10 +48,216 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, FORCE_INLINE_TEMPLATE -size_t ZSTD_compressBlock_doubleFast_generic( +size_t ZSTD_compressBlock_doubleFast_noDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls /* template */) +{ + ZSTD_compressionParameters const* cParams = &ms->cParams; + U32* const hashLong = ms->hashTable; + const U32 hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + const U32 hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + const BYTE* const prefixLowest = base + prefixLowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + size_t mLength; + U32 offset; + U32 curr; + + /* how many positions to search before increasing step size */ + const size_t kStepIncr = 1 << kSearchStrength; + /* the position at which to increment the step size if no match is found */ + const BYTE* nextStep; + size_t step; /* the current step size */ + + size_t hl0; /* the long hash at ip */ + size_t hl1; /* the long hash at ip1 */ + + U32 idxl0; /* the long match index for ip */ + U32 idxl1; /* the long match index for ip1 */ + + const BYTE* matchl0; /* the long match for ip */ + const BYTE* matchs0; /* the short match for ip */ + const BYTE* matchl1; /* the long match for ip1 */ + + const BYTE* ip = istart; /* the current position */ + const BYTE* ip1; /* the next position */ + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); + + /* init */ + ip += ((ip - prefixLowest) == 0); + { + U32 const current = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Outer Loop: one iteration per match found and stored */ + while (1) { + step = 1; + nextStep = ip + kStepIncr; + ip1 = ip + step; + + if (ip1 > ilimit) { + goto _cleanup; + } + + hl0 = ZSTD_hashPtr(ip, hBitsL, 8); + idxl0 = hashLong[hl0]; + matchl0 = base + idxl0; + + /* Inner Loop: one iteration per search / position */ + do { + const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 idxs0 = hashSmall[hs0]; + curr = (U32)(ip-base); + matchs0 = base + idxs0; + + hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */ + + /* check noDict repcode */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + goto _match_stored; + } + + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + + if (idxl0 > prefixLowestIndex) { + /* check prefix long match */ + if (MEM_read64(matchl0) == MEM_read64(ip)) { + mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; + offset = (U32)(ip-matchl0); + while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ + goto _match_found; + } + } + + idxl1 = hashLong[hl1]; + matchl1 = base + idxl1; + + if (idxs0 > prefixLowestIndex) { + /* check prefix short match */ + if (MEM_read32(matchs0) == MEM_read32(ip)) { + goto _search_next_long; + } + } + + if (ip1 >= nextStep) { + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + step++; + nextStep += kStepIncr; + } + ip = ip1; + ip1 += step; + + hl0 = hl1; + idxl0 = idxl1; + matchl0 = matchl1; + #if defined(__aarch64__) + PREFETCH_L1(ip+256); + #endif + } while (ip1 <= ilimit); + +_cleanup: + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return (size_t)(iend - anchor); + +_search_next_long: + + /* check prefix long +1 match */ + if (idxl1 > prefixLowestIndex) { + if (MEM_read64(matchl1) == MEM_read64(ip1)) { + ip = ip1; + mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8; + offset = (U32)(ip-matchl1); + while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */ + goto _match_found; + } + } + + /* if no long +1 match, explore the short match we found */ + mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; + offset = (U32)(ip - matchs0); + while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */ + + /* fall-through */ + +_match_found: /* requires ip, offset, mLength */ + offset_2 = offset_1; + offset_1 = offset; + + if (step < 4) { + /* It is unsafe to write this value back to the hashtable when ip1 is + * greater than or equal to the new ip we will have after we're done + * processing this match. Rather than perform that test directly + * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler + * more predictable test. The minmatch even if we take a short match is + * 4 bytes, so as long as step, the distance between ip and ip1 + * (initially) is less than 4, we know ip1 < new ip. */ + hashLong[hl1] = (U32)(ip1 - base); + } + + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + +_match_stored: + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = curr+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + } + } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls /* template */, ZSTD_dictMode_e const dictMode) + U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; @@ -72,54 +278,30 @@ size_t ZSTD_compressBlock_doubleFast_generic( U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dictCParams = - dictMode == ZSTD_dictMatchState ? - &dms->cParams : NULL; - const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? - dms->hashTable : NULL; - const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? - dms->chainTable : NULL; - const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? - dms->window.dictLimit : 0; - const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? - dms->window.base : NULL; - const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? - dictBase + dictStartIndex : NULL; - const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? - dms->window.nextSrc : NULL; - const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? - prefixLowestIndex - (U32)(dictEnd - dictBase) : - 0; - const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? - dictCParams->hashLog : hBitsL; - const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? - dictCParams->chainLog : hBitsS; + const ZSTD_compressionParameters* const dictCParams = &dms->cParams; + const U32* const dictHashLong = dms->hashTable; + const U32* const dictHashSmall = dms->chainTable; + const U32 dictStartIndex = dms->window.dictLimit; + const BYTE* const dictBase = dms->window.base; + const BYTE* const dictStart = dictBase + dictStartIndex; + const BYTE* const dictEnd = dms->window.nextSrc; + const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); + const U32 dictHBitsL = dictCParams->hashLog; + const U32 dictHBitsS = dictCParams->chainLog; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); - - assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); /* if a dictionary is attached, it must be within window range */ - if (dictMode == ZSTD_dictMatchState) { - assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); - } + assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); /* init */ ip += (dictAndPrefixLength == 0); - if (dictMode == ZSTD_noDict) { - U32 const curr = (U32)(ip - base); - U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); - U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; - } - if (dictMode == ZSTD_dictMatchState) { - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - } + + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ @@ -135,15 +317,13 @@ size_t ZSTD_compressBlock_doubleFast_generic( const BYTE* matchLong = base + matchIndexL; const BYTE* match = base + matchIndexS; const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (dictMode == ZSTD_dictMatchState - && repIndex < prefixLowestIndex) ? + const BYTE* repMatch = (repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ - /* check dictMatchState repcode */ - if (dictMode == ZSTD_dictMatchState - && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + /* check repcode */ + if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -152,15 +332,6 @@ size_t ZSTD_compressBlock_doubleFast_generic( goto _match_stored; } - /* check noDict repcode */ - if ( dictMode == ZSTD_noDict - && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); - goto _match_stored; - } - if (matchIndexL > prefixLowestIndex) { /* check prefix long match */ if (MEM_read64(matchLong) == MEM_read64(ip)) { @@ -169,7 +340,7 @@ size_t ZSTD_compressBlock_doubleFast_generic( while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dictMatchState long match */ U32 const dictMatchIndexL = dictHashLong[dictHL]; const BYTE* dictMatchL = dictBase + dictMatchIndexL; @@ -187,7 +358,7 @@ size_t ZSTD_compressBlock_doubleFast_generic( if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dictMatchState short match */ U32 const dictMatchIndexS = dictHashSmall[dictHS]; match = dictBase + dictMatchIndexS; @@ -220,7 +391,7 @@ _search_next_long: while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dict long +1 match */ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; @@ -234,7 +405,7 @@ _search_next_long: } } } /* if no long +1 match, explore the short match we found */ - if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { + if (matchIndexS < prefixLowestIndex) { mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; offset = (U32)(curr - matchIndexS); while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ @@ -244,8 +415,6 @@ _search_next_long: while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } - /* fall-through */ - _match_found: offset_2 = offset_1; offset_1 = offset; @@ -268,43 +437,27 @@ _match_stored: } /* check immediate repcode */ - if (dictMode == ZSTD_dictMatchState) { - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState - && repIndex2 < prefixLowestIndex ? - dictBase + repIndex2 - dictIndexDelta : - base + repIndex2; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } - - if (dictMode == ZSTD_noDict) { - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); - ip += rLength; + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? + dictBase + repIndex2 - dictIndexDelta : + base + repIndex2; + if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; anchor = ip; - continue; /* faster when present ... (?) */ - } } } + continue; + } + break; + } + } } /* while (ip < ilimit) */ /* save reps for next block */ @@ -315,6 +468,24 @@ _match_stored: return (size_t)(iend - anchor); } +#define ZSTD_GEN_DFAST_FN(dictMode, mls) \ + static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ + } + +ZSTD_GEN_DFAST_FN(noDict, 4) +ZSTD_GEN_DFAST_FN(noDict, 5) +ZSTD_GEN_DFAST_FN(noDict, 6) +ZSTD_GEN_DFAST_FN(noDict, 7) + +ZSTD_GEN_DFAST_FN(dictMatchState, 4) +ZSTD_GEN_DFAST_FN(dictMatchState, 5) +ZSTD_GEN_DFAST_FN(dictMatchState, 6) +ZSTD_GEN_DFAST_FN(dictMatchState, 7) + size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -325,13 +496,13 @@ size_t ZSTD_compressBlock_doubleFast( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); } } @@ -345,13 +516,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); } } @@ -387,7 +558,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ @@ -409,7 +580,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ - & (offset_1 < curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; @@ -477,7 +648,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ - & (offset_2 < current2 - dictStartIndex)) + & (offset_2 <= current2 - dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -500,6 +671,10 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( return (size_t)(iend - anchor); } +ZSTD_GEN_DFAST_FN(extDict, 4) +ZSTD_GEN_DFAST_FN(extDict, 5) +ZSTD_GEN_DFAST_FN(extDict, 6) +ZSTD_GEN_DFAST_FN(extDict, 7) size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -510,12 +685,12 @@ size_t ZSTD_compressBlock_doubleFast_extDict( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c index 4edc04dcc..87de17f81 100644 --- a/lib/compress/zstd_fast.c +++ b/lib/compress/zstd_fast.c @@ -43,8 +43,54 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, } +/** + * If you squint hard enough (and ignore repcodes), the search operation at any + * given position is broken into 4 stages: + * + * 1. Hash (map position to hash value via input read) + * 2. Lookup (map hash val to index via hashtable read) + * 3. Load (map index to value at that position via input read) + * 4. Compare + * + * Each of these steps involves a memory read at an address which is computed + * from the previous step. This means these steps must be sequenced and their + * latencies are cumulative. + * + * Rather than do 1->2->3->4 sequentially for a single position before moving + * onto the next, this implementation interleaves these operations across the + * next few positions: + * + * R = Repcode Read & Compare + * H = Hash + * T = Table Lookup + * M = Match Read & Compare + * + * Pos | Time --> + * ----+------------------- + * N | ... M + * N+1 | ... TM + * N+2 | R H T M + * N+3 | H TM + * N+4 | R H T M + * N+5 | H ... + * N+6 | R ... + * + * This is very much analogous to the pipelining of execution in a CPU. And just + * like a CPU, we have to dump the pipeline when we find a match (i.e., take a + * branch). + * + * When this happens, we throw away our current state, and do the following prep + * to re-enter the loop: + * + * Pos | Time --> + * ----+------------------- + * N | H T + * N+1 | H + * + * This is also the work we do at the beginning to enter the loop initially. + */ FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_fast_generic( +ZSTD_compressBlock_fast_noDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) @@ -53,135 +99,229 @@ ZSTD_compressBlock_fast_generic( U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; + size_t const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ - const BYTE* ip0 = istart; - const BYTE* ip1; - const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; + + const BYTE* anchor = istart; + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + U32 rep_offset1 = rep[0]; + U32 rep_offset2 = rep[1]; U32 offsetSaved = 0; - /* init */ + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 idx; /* match idx for ip0 */ + U32 mval; /* src value at match idx */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); + DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); - ip1 = ip0 + 1; { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0; + if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0; } - /* Main Search Loop */ -#ifdef __INTEL_COMPILER - /* From intel 'The vector pragma indicates that the loop should be - * vectorized if it is legal to do so'. Can be used together with - * #pragma ivdep (but have opted to exclude that because intel - * warns against using it).*/ - #pragma vector always -#endif - while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ - size_t mLength; - BYTE const* ip2 = ip0 + 2; - size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); - U32 const val0 = MEM_read32(ip0); - size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); - U32 const val1 = MEM_read32(ip1); - U32 const current0 = (U32)(ip0-base); - U32 const current1 = (U32)(ip1-base); - U32 const matchIndex0 = hashTable[h0]; - U32 const matchIndex1 = hashTable[h1]; - BYTE const* repMatch = ip2 - offset_1; - const BYTE* match0 = base + matchIndex0; - const BYTE* match1 = base + matchIndex1; - U32 offcode; + /* start each op */ +_start: /* Requires: ip0 */ -#if defined(__aarch64__) - PREFETCH_L1(ip0+256); -#endif + step = stepSize; + nextStep = ip0 + kStepIncr; - hashTable[h0] = current0; /* update hash table */ - hashTable[h1] = current1; /* update hash table */ + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ + ip1 = ip0 + stepSize; + ip2 = ip1 + stepSize; + ip3 = ip2 + stepSize; - assert(ip0 + 1 == ip1); + if (ip3 >= ilimit) { + goto _cleanup; + } - if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { - mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0; - ip0 = ip2 - mLength; - match0 = repMatch - mLength; - mLength += 4; + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + idx = hashTable[hash0]; + + do { + /* load repcode match for ip[2]*/ + const U32 rval = MEM_read32(ip2 - rep_offset1); + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { + ip0 = ip2; + match0 = ip0 - rep_offset1; + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; offcode = 0; + mLength += 4; goto _match; } - if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { - /* found a regular match */ + + /* load match for ip[0] */ + if (idx >= prefixStartIndex) { + mval = MEM_read32(base + idx); + } else { + mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ + } + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ goto _offset; } - if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { - /* found a regular match after one literal */ - ip0 = ip1; - match0 = match1; + + /* lookup ip[1] */ + idx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* load match for ip[0] */ + if (idx >= prefixStartIndex) { + mval = MEM_read32(base + idx); + } else { + mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ + } + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ goto _offset; } - { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; - assert(step >= 2); - ip0 += step; - ip1 += step; - continue; + + /* lookup ip[1] */ + idx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* calculate step */ + if (ip2 >= nextStep) { + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + step++; + nextStep += kStepIncr; } -_offset: /* Requires: ip0, match0 */ - /* Compute the offset code */ - offset_2 = offset_1; - offset_1 = (U32)(ip0-match0); - offcode = offset_1 + ZSTD_REP_MOVE; - mLength = 4; - /* Count the backwards match length */ - while (((ip0>anchor) & (match0>prefixStart)) - && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ -_match: /* Requires: ip0, match0, offcode */ - /* Count the forward length */ - mLength += ZSTD_count(ip0+mLength, match0+mLength, iend); - ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); - /* match found */ - ip0 += mLength; - anchor = ip0; + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip2 + step; + ip3 = ip2 + step; + } while (ip3 < ilimit); - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+current0+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - - if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */ - while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; - { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); - ip0 += rLength; - ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); - anchor = ip0; - continue; /* faster when present (confirmed on gcc-8) ... (?) */ - } } } - ip1 = ip0 + 1; - } +_cleanup: + /* Note that there are probably still a couple positions we could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = rep_offset1 ? rep_offset1 : offsetSaved; + rep[1] = rep_offset2 ? rep_offset2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx */ + + /* Compute the offset code. */ + match0 = base + idx; + rep_offset2 = rep_offset1; + rep_offset1 = (U32)(ip0-match0); + offcode = rep_offset1 + ZSTD_REP_MOVE; + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } + +_match: /* Requires: ip0, match0, offcode */ + + /* Count the forward length. */ + mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength - MINMATCH); + + ip0 += mLength; + anchor = ip0; + + /* write next hash table entry */ + if (ip1 < ip0) { + hashTable[hash1] = (U32)(ip1 - base); + } + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ + while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; + { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += rLength; + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); + anchor = ip0; + continue; /* faster when present (confirmed on gcc-8) ... (?) */ + } } } + + goto _start; } +#define ZSTD_GEN_FAST_FN(dictMode, mls) \ + static size_t ZSTD_compressBlock_fast_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ + } + +ZSTD_GEN_FAST_FN(noDict, 4) +ZSTD_GEN_FAST_FN(noDict, 5) +ZSTD_GEN_FAST_FN(noDict, 6) +ZSTD_GEN_FAST_FN(noDict, 7) size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -193,13 +333,13 @@ size_t ZSTD_compressBlock_fast( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_noDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_noDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_noDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_noDict_7(ms, seqStore, rep, src, srcSize); } } @@ -351,6 +491,12 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( return (size_t)(iend - anchor); } + +ZSTD_GEN_FAST_FN(dictMatchState, 4) +ZSTD_GEN_FAST_FN(dictMatchState, 5) +ZSTD_GEN_FAST_FN(dictMatchState, 6) +ZSTD_GEN_FAST_FN(dictMatchState, 7) + size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) @@ -361,13 +507,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_dictMatchState_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_dictMatchState_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_dictMatchState_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_dictMatchState_7(ms, seqStore, rep, src, srcSize); } } @@ -402,7 +548,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); + return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ @@ -418,7 +564,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ - & (offset_1 < curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; @@ -453,7 +599,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 < curr - dictStartIndex)) /* intentional overflow */ + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -475,6 +621,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( return (size_t)(iend - anchor); } +ZSTD_GEN_FAST_FN(extDict, 4) +ZSTD_GEN_FAST_FN(extDict, 5) +ZSTD_GEN_FAST_FN(extDict, 6) +ZSTD_GEN_FAST_FN(extDict, 7) size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -485,12 +635,12 @@ size_t ZSTD_compressBlock_fast_extDict( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_extDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_extDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_extDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_extDict_7(ms, seqStore, rep, src, srcSize); } } diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c index 3d523e847..d6354be7f 100644 --- a/lib/compress/zstd_lazy.c +++ b/lib/compress/zstd_lazy.c @@ -61,7 +61,7 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms, * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ static void -ZSTD_insertDUBT1(ZSTD_matchState_t* ms, +ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) @@ -93,7 +93,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, assert(curr >= btLow); assert(ip < iend); /* condition for ZSTD_count */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); @@ -151,7 +151,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, static size_t ZSTD_DUBT_findBetterDictMatch ( - ZSTD_matchState_t* ms, + const ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, @@ -185,7 +185,7 @@ ZSTD_DUBT_findBetterDictMatch ( (void)dictMode; assert(dictMode == ZSTD_dictMatchState); - while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { + for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) { U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dictBase + dictMatchIndex; @@ -309,7 +309,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, matchIndex = hashTable[h]; hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; @@ -357,6 +357,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, @@ -390,54 +391,6 @@ ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); } - -static size_t -ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); - } -} - - -static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); - } -} - - -static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); - } -} - /*********************************** * Dedicated dict search ***********************************/ @@ -450,7 +403,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B U32* const chainTable = ms->chainTable; U32 const chainSize = 1 << ms->cParams.chainLog; U32 idx = ms->nextToUpdate; - U32 const minChain = chainSize < target ? target - chainSize : idx; + U32 const minChain = chainSize < target - idx ? target - chainSize : idx; U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG; U32 const cacheSize = bucketSize - 1; U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize; @@ -692,7 +645,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE -size_t ZSTD_HcFindBestMatch_generic ( +size_t ZSTD_HcFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, @@ -758,6 +711,7 @@ size_t ZSTD_HcFindBestMatch_generic ( matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); } + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dedicatedDictSearch) { ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); @@ -797,310 +751,80 @@ size_t ZSTD_HcFindBestMatch_generic ( return ml; } - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); - } -} - - -static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); - } -} - - -static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch); - } -} - - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); - } -} - /* ********************************* * (SIMD) Row-based matchfinder ***********************************/ /* Constants for row-based hash */ -#define ZSTD_ROW_HASH_TAG_OFFSET 1 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ +#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) +#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) -typedef U32 ZSTD_VecMask; /* Clarifies when we are interacting with a U32 representing a mask of matches */ - -#if !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) /* SIMD SSE version */ - -#include -typedef __m128i ZSTD_Vec128; - -/* Returns a 128-bit container with 128-bits from src */ -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - return _mm_loadu_si128((ZSTD_Vec128 const*)src); -} - -/* Returns a ZSTD_Vec128 with the byte "val" packed 16 times */ -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - return _mm_set1_epi8((char)val); -} - -/* Do byte-by-byte comparison result of x and y. Then collapse 128-bit resultant mask - * into a 32-bit mask that is the MSB of each byte. - * */ -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - return (ZSTD_VecMask)_mm_movemask_epi8(_mm_cmpeq_epi8(x, y)); -} - -typedef struct { - __m128i fst; - __m128i snd; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_read(ptr); - v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1); - return v; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_set8(val); - v.snd = ZSTD_Vec128_set8(val); - return v; -} - -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask fstMask; - ZSTD_VecMask sndMask; - fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst); - sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd); - return fstMask | (sndMask << 16); -} - -#elif !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) /* SIMD ARM NEON Version */ - -#include -typedef uint8x16_t ZSTD_Vec128; - -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - return vld1q_u8((const BYTE* const)src); -} - -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - return vdupq_n_u8(val); -} - -/* Mimics '_mm_movemask_epi8()' from SSE */ -static U32 ZSTD_vmovmaskq_u8(ZSTD_Vec128 val) { - /* Shift out everything but the MSB bits in each byte */ - uint16x8_t highBits = vreinterpretq_u16_u8(vshrq_n_u8(val, 7)); - /* Merge the even lanes together with vsra (right shift and add) */ - uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(highBits, highBits, 7)); - uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); - uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); - /* Extract the low 8 bits from each lane, merge */ - return vgetq_lane_u8(paired64, 0) | ((U32)vgetq_lane_u8(paired64, 8) << 8); -} - -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - return (ZSTD_VecMask)ZSTD_vmovmaskq_u8(vceqq_u8(x, y)); -} - -typedef struct { - uint8x16_t fst; - uint8x16_t snd; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_read(ptr); - v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1); - return v; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_set8(val); - v.snd = ZSTD_Vec128_set8(val); - return v; -} - -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask fstMask; - ZSTD_VecMask sndMask; - fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst); - sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd); - return fstMask | (sndMask << 16); -} - -#else /* Scalar fallback version */ - -#define VEC128_NB_SIZE_T (16 / sizeof(size_t)) -typedef struct { - size_t vec[VEC128_NB_SIZE_T]; -} ZSTD_Vec128; - -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - ZSTD_Vec128 ret; - ZSTD_memcpy(ret.vec, src, VEC128_NB_SIZE_T*sizeof(size_t)); - return ret; -} - -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - ZSTD_Vec128 ret = { {0} }; - int startBit = sizeof(size_t) * 8 - 8; - for (;startBit >= 0; startBit -= 8) { - unsigned j = 0; - for (;j < VEC128_NB_SIZE_T; ++j) { - ret.vec[j] |= ((size_t)val << startBit); - } - } - return ret; -} - -/* Compare x to y, byte by byte, generating a "matches" bitfield */ -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - ZSTD_VecMask res = 0; - unsigned i = 0; - unsigned l = 0; - for (; i < VEC128_NB_SIZE_T; ++i) { - const size_t cmp1 = x.vec[i]; - const size_t cmp2 = y.vec[i]; - unsigned j = 0; - for (; j < sizeof(size_t); ++j, ++l) { - if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) { - res |= ((U32)1 << (j+i*sizeof(size_t))); - } - } - } - return res; -} - -#define VEC256_NB_SIZE_T 2*VEC128_NB_SIZE_T -typedef struct { - size_t vec[VEC256_NB_SIZE_T]; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const src) { - ZSTD_Vec256 ret; - ZSTD_memcpy(ret.vec, src, VEC256_NB_SIZE_T*sizeof(size_t)); - return ret; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 ret = { {0} }; - int startBit = sizeof(size_t) * 8 - 8; - for (;startBit >= 0; startBit -= 8) { - unsigned j = 0; - for (;j < VEC256_NB_SIZE_T; ++j) { - ret.vec[j] |= ((size_t)val << startBit); - } - } - return ret; -} - -/* Compare x to y, byte by byte, generating a "matches" bitfield */ -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask res = 0; - unsigned i = 0; - unsigned l = 0; - for (; i < VEC256_NB_SIZE_T; ++i) { - const size_t cmp1 = x.vec[i]; - const size_t cmp2 = y.vec[i]; - unsigned j = 0; - for (; j < sizeof(size_t); ++j, ++l) { - if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) { - res |= ((U32)1 << (j+i*sizeof(size_t))); - } - } - } - return res; -} - -#endif /* !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) */ +typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */ /* ZSTD_VecMask_next(): * Starting from the LSB, returns the idx of the next non-zero bit. * Basically counting the nb of trailing zeroes. */ static U32 ZSTD_VecMask_next(ZSTD_VecMask val) { -# if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - return _BitScanForward(&r, val) ? (U32)r : 0; -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (U32)__builtin_ctz(val); + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (U32)(r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# elif (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)))) + if (sizeof(size_t) == 4) { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + (U32)__builtin_ctz(mostSignificantWord); + } else { + return (U32)__builtin_ctz(leastSignificantWord); + } + } else { + return (U32)__builtin_ctzll(val); + } # else - /* Software ctz version: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup */ - static const U32 multiplyDeBruijnBitPosition[32] = - { - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 - }; - return multiplyDeBruijnBitPosition[((U32)((v & -(int)v) * 0x077CB531U)) >> 27]; + /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count + * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer + */ + val = ~val & (val - 1ULL); /* Lowest set bit mask */ + val = val - ((val >> 1) & 0x5555555555555555); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56); # endif } -/* ZSTD_VecMask_rotateRight(): - * Rotates a bitfield to the right by "rotation" bits. - * If the rotation is greater than totalBits, the returned mask is 0. +/* ZSTD_rotateRight_*(): + * Rotates a bitfield to the right by "count" bits. + * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts */ -FORCE_INLINE_TEMPLATE ZSTD_VecMask -ZSTD_VecMask_rotateRight(ZSTD_VecMask mask, U32 const rotation, U32 const totalBits) { - if (rotation == 0) - return mask; - switch (totalBits) { - default: - assert(0); - case 16: - return (mask >> rotation) | (U16)(mask << (16 - rotation)); - case 32: - return (mask >> rotation) | (U32)(mask << (32 - rotation)); - } +FORCE_INLINE_TEMPLATE +U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { + assert(count < 64); + count &= 0x3F; /* for fickle pattern recognition */ + return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); +} + +FORCE_INLINE_TEMPLATE +U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { + assert(count < 32); + count &= 0x1F; /* for fickle pattern recognition */ + return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); +} + +FORCE_INLINE_TEMPLATE +U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { + assert(count < 16); + count &= 0x0F; /* for fickle pattern recognition */ + return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); } /* ZSTD_row_nextIndex(): @@ -1126,20 +850,24 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) { */ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) { PREFETCH_L1(hashTable + relRow); - if (rowLog == 5) { + if (rowLog >= 5) { PREFETCH_L1(hashTable + relRow + 16); + /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */ } PREFETCH_L1(tagTable + relRow); - assert(rowLog == 4 || rowLog == 5); + if (rowLog == 6) { + PREFETCH_L1(tagTable + relRow + 32); + } + assert(rowLog == 4 || rowLog == 5 || rowLog == 6); assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */ - assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on a multiple of 32 or 64 bytes */ + assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */ } /* ZSTD_row_fillHashCache(): * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, * but not beyond iLimit. */ -static void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, +FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, U32 const rowLog, U32 const mls, U32 idx, const BYTE* const iLimit) { @@ -1179,35 +907,65 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab } } -/* ZSTD_row_update_internal(): - * Inserts the byte at ip into the appropriate position in the hash table. - * Determines the relative row, and the position within the {16, 32} entry row to insert at. +/* ZSTD_row_update_internalImpl(): + * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, + U32 updateStartIdx, U32 const updateEndIdx, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { U32* const hashTable = ms->hashTable; U16* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; const BYTE* const base = ms->window.base; - const U32 target = (U32)(ip - base); - U32 idx = ms->nextToUpdate; - DEBUGLOG(6, "ZSTD_row_update_internal(): nextToUpdate=%u, current=%u", idx, target); - for (; idx < target; ++idx) { - U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, idx, hashLog, rowLog, mls) - : (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); + for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { + U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls) + : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte. Explicit cast allows us to get exact desired position within each row */ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert(hash == ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); + assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK; - row[pos] = idx; + row[pos] = updateStartIdx; } +} + +/* ZSTD_row_update_internal(): + * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. + * Skips sections of long matches as is necessary. + */ +FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) +{ + U32 idx = ms->nextToUpdate; + const BYTE* const base = ms->window.base; + const U32 target = (U32)(ip - base); + const U32 kSkipThreshold = 384; + const U32 kMaxMatchStartPositionsToUpdate = 96; + const U32 kMaxMatchEndPositionsToUpdate = 32; + + if (useCache) { + /* Only skip positions when using hash cache, i.e. + * if we are loading a dict, don't skip anything. + * If we decide to skip, then we only update a set number + * of positions at the beginning and end of the match. + */ + if (UNLIKELY(target - idx > kSkipThreshold)) { + U32 const bound = idx + kMaxMatchStartPositionsToUpdate; + ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); + idx = target - kMaxMatchEndPositionsToUpdate; + ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1); + } + } + assert(target >= idx); + ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); ms->nextToUpdate = target; } @@ -1216,7 +974,7 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const * processing. */ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); const U32 rowMask = (1u << rowLog) - 1; const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); @@ -1226,24 +984,131 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { /* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches * the hash at the nth position in a row of the tagTable. - */ + * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield + * to match up with the actual layout of the entries within the hashTable */ FORCE_INLINE_TEMPLATE ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) { - ZSTD_VecMask matches = 0; + const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET; + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); +#if defined(ZSTD_ARCH_X86_SSE2) if (rowEntries == 16) { - ZSTD_Vec128 hashes = ZSTD_Vec128_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET); - ZSTD_Vec128 expandedTags = ZSTD_Vec128_set8(tag); - matches = ZSTD_Vec128_cmpMask8(hashes, expandedTags); + const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)src); + const __m128i equalMask = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(tag)); + const U16 matches = (U16)_mm_movemask_epi8(equalMask); + return ZSTD_rotateRight_U16(matches, head); } else if (rowEntries == 32) { - ZSTD_Vec256 hashes = ZSTD_Vec256_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET); - ZSTD_Vec256 expandedTags = ZSTD_Vec256_set8(tag); - matches = ZSTD_Vec256_cmpMask8(hashes, expandedTags); - } else { - assert(0); + const __m128i chunk0 = _mm_loadu_si128((const __m128i*)(const void*)&src[0]); + const __m128i chunk1 = _mm_loadu_si128((const __m128i*)(const void*)&src[16]); + const __m128i equalMask0 = _mm_cmpeq_epi8(chunk0, _mm_set1_epi8(tag)); + const __m128i equalMask1 = _mm_cmpeq_epi8(chunk1, _mm_set1_epi8(tag)); + const U32 lo = (U32)_mm_movemask_epi8(equalMask0); + const U32 hi = (U32)_mm_movemask_epi8(equalMask1); + return ZSTD_rotateRight_U32((hi << 16) | lo, head); + } else { /* rowEntries == 64 */ + const __m128i chunk0 = _mm_loadu_si128((const __m128i*)(const void*)&src[0]); + const __m128i chunk1 = _mm_loadu_si128((const __m128i*)(const void*)&src[16]); + const __m128i chunk2 = _mm_loadu_si128((const __m128i*)(const void*)&src[32]); + const __m128i chunk3 = _mm_loadu_si128((const __m128i*)(const void*)&src[48]); + const __m128i comparisonMask = _mm_set1_epi8(tag); + const __m128i equalMask0 = _mm_cmpeq_epi8(chunk0, comparisonMask); + const __m128i equalMask1 = _mm_cmpeq_epi8(chunk1, comparisonMask); + const __m128i equalMask2 = _mm_cmpeq_epi8(chunk2, comparisonMask); + const __m128i equalMask3 = _mm_cmpeq_epi8(chunk3, comparisonMask); + const U64 mask0 = (U64)_mm_movemask_epi8(equalMask0); + const U64 mask1 = (U64)_mm_movemask_epi8(equalMask1); + const U64 mask2 = (U64)_mm_movemask_epi8(equalMask2); + const U64 mask3 = (U64)_mm_movemask_epi8(equalMask3); + return ZSTD_rotateRight_U64((mask3 << 48) | (mask2 << 32) | (mask1 << 16) | mask0, head); } - /* Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield - to match up with the actual layout of the entries within the hashTable */ - return ZSTD_VecMask_rotateRight(matches, head, rowEntries); +#else +# if defined(ZSTD_ARCH_ARM_NEON) + if (MEM_isLittleEndian()) { + if (rowEntries == 16) { + const uint8x16_t chunk = vld1q_u8(src); + const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); + const uint16x8_t t0 = vshlq_n_u16(equalMask, 7); + const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14)); + const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14)); + const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28)); + const U16 hi = (U16)vgetq_lane_u8(t3, 8); + const U16 lo = (U16)vgetq_lane_u8(t3, 0); + return ZSTD_rotateRight_U16((hi << 8) | lo, head); + } else if (rowEntries == 32) { + const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src); + const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); + const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); + const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag)); + const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag)); + const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0)); + const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1)); + const uint8x8_t t0 = vreinterpret_u8_s8(pack0); + const uint8x8_t t1 = vreinterpret_u8_s8(pack1); + const uint8x8_t t2 = vsri_n_u8(t1, t0, 2); + const uint8x8x2_t t3 = vuzp_u8(t2, t0); + const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4); + const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0); + return ZSTD_rotateRight_U32(matches, head); + } else { /* rowEntries == 64 */ + const uint8x16x4_t chunk = vld4q_u8(src); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); + const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); + const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); + const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); + + const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); + const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); + const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); + const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); + const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); + return ZSTD_rotateRight_U64(matches, head); + } + } +# endif + { /* SWAR */ + const size_t chunkSize = sizeof(size_t); + const size_t shiftAmount = ((chunkSize * 8) - chunkSize); + const size_t xFF = ~((size_t)0); + const size_t x01 = xFF / 0xFF; + const size_t x80 = x01 << 7; + const size_t splatChar = tag * x01; + ZSTD_VecMask matches = 0; + int i = rowEntries - chunkSize; + assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8)); + if (MEM_isLittleEndian()) { /* runtime check so have two loops */ + const size_t extractMagic = (xFF / 0x7F) >> chunkSize; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= (chunk * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } else { /* big endian: reverse bits during extraction */ + const size_t msb = xFF ^ (xFF >> 1); + const size_t extractMagic = (msb / 0x1FF) | msb; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= ((chunk >> 7) * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } + matches = ~matches; + if (rowEntries == 16) { + return ZSTD_rotateRight_U16((U16)matches, head); + } else if (rowEntries == 32) { + return ZSTD_rotateRight_U32((U32)matches, head); + } else { + return ZSTD_rotateRight_U64((U64)matches, head); + } + } +#endif } /* The high-level approach of the SIMD row based match finder is as follows: @@ -1262,7 +1127,7 @@ ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, con * - Pick the longest match. */ FORCE_INLINE_TEMPLATE -size_t ZSTD_RowFindBestMatch_generic ( +size_t ZSTD_RowFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, @@ -1293,11 +1158,13 @@ size_t ZSTD_RowFindBestMatch_generic ( /* DMS/DDS variables that may be referenced laster */ const ZSTD_matchState_t* const dms = ms->dictMatchState; - size_t ddsIdx; - U32 ddsExtraAttempts; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ - U32 dmsTag; - U32* dmsRow; - BYTE* dmsTagRow; + + /* Initialize the following variables to satisfy static analyzer */ + size_t ddsIdx = 0; + U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ + U32 dmsTag = 0; + U32* dmsRow = NULL; + BYTE* dmsTagRow = NULL; if (dictMode == ZSTD_dedicatedDictSearch) { const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; @@ -1329,7 +1196,7 @@ size_t ZSTD_RowFindBestMatch_generic ( U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); U32 const head = *tagRow & rowMask; - U32 matchBuffer[32 /* maximum nb entries per row */]; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries); @@ -1385,6 +1252,7 @@ size_t ZSTD_RowFindBestMatch_generic ( } } + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dedicatedDictSearch) { ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); @@ -1397,7 +1265,7 @@ size_t ZSTD_RowFindBestMatch_generic ( const U32 dmsIndexDelta = dictLimit - dmsSize; { U32 const head = *dmsTagRow & rowMask; - U32 matchBuffer[32 /* maximum nb row entries */]; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries); @@ -1435,84 +1303,168 @@ size_t ZSTD_RowFindBestMatch_generic ( return ml; } -/* Inlining is important to hardwire a hot branch (template emulation) */ -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - const ZSTD_dictMode_e dictMode, size_t* offsetPtr, const U32 rowLog) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, dictMode, rowLog); - case 5 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, dictMode, rowLog); - case 7 : - case 6 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, dictMode, rowLog); - } -} -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectRowLog ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 5); - } -} +typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dictMatchState_selectRowLog( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 5); - } -} +/** + * This struct contains the functions necessary for lazy to search. + * Currently, that is only searchMax. However, it is still valuable to have the + * VTable because this makes it easier to add more functions to the VTable later. + * + * TODO: The start of the search function involves loading and calculating a + * bunch of constants from the ZSTD_matchState_t. These computations could be + * done in an initialization function, and saved somewhere in the match state. + * Then we could pass a pointer to the saved state instead of the match state, + * and avoid duplicate computations. + * + * TODO: Move the match re-winding into searchMax. This improves compression + * ratio, and unlocks further simplifications with the next TODO. + * + * TODO: Try moving the repcode search into searchMax. After the re-winding + * and repcode search are in searchMax, there is no more logic in the match + * finder loop that requires knowledge about the dictMode. So we should be + * able to avoid force inlining it, and we can join the extDict loop with + * the single segment loop. It should go in searchMax instead of its own + * function to avoid having multiple virtual function calls per search. + */ +typedef struct { + searchMax_f searchMax; +} ZSTD_LazyVTable; -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 5); - } -} +#define GEN_ZSTD_BT_VTABLE(dictMode, mls, ...) \ + static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ + } \ + static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \ + ZSTD_BtFindBestMatch_##dictMode##_##mls \ + }; -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_extDict_selectRowLog ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 5); - } -} +#define GEN_ZSTD_HC_VTABLE(dictMode, mls, ...) \ + static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ + } \ + static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \ + ZSTD_HcFindBestMatch_##dictMode##_##mls \ + }; +#define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \ + static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ + } \ + static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \ + ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \ + }; + +#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ + X(dictMode, mls, 4) \ + X(dictMode, mls, 5) \ + X(dictMode, mls, 6) + +#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6) + +#define ZSTD_FOR_EACH_MLS(X, dictMode) \ + X(dictMode, 4) \ + X(dictMode, 5) \ + X(dictMode, 6) + +#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \ + X(__VA_ARGS__, noDict) \ + X(__VA_ARGS__, extDict) \ + X(__VA_ARGS__, dictMatchState) \ + X(__VA_ARGS__, dedicatedDictSearch) + +/* Generate Row VTables for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE) +/* Generate Binary Tree VTables for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE) +/* Generate Hash Chain VTables for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE) + +#define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \ + { \ + &ZSTD_BtVTable_##dictMode##_4, \ + &ZSTD_BtVTable_##dictMode##_5, \ + &ZSTD_BtVTable_##dictMode##_6 \ + } + +#define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \ + { \ + &ZSTD_HcVTable_##dictMode##_4, \ + &ZSTD_HcVTable_##dictMode##_5, \ + &ZSTD_HcVTable_##dictMode##_6 \ + } + +#define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \ + { \ + &ZSTD_RowVTable_##dictMode##_##mls##_4, \ + &ZSTD_RowVTable_##dictMode##_##mls##_5, \ + &ZSTD_RowVTable_##dictMode##_##mls##_6 \ + } + +#define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \ + { \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \ + } + +#define GEN_ZSTD_VTABLE_ARRAY(X) \ + { \ + X(noDict), \ + X(extDict), \ + X(dictMatchState), \ + X(dedicatedDictSearch) \ + } /* ******************************* * Common parser - lazy strategy *********************************/ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; + +static ZSTD_LazyVTable const* ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) +{ + /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */ + ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY); + ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY); + /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */ + ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY); + + U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch)); + U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); + switch (searchMethod) { + case search_hashChain: + return hcVTables[dictMode][mls - 4]; + case search_binaryTree: + return btVTables[dictMode][mls - 4]; + case search_rowHash: + return rowVTables[dictMode][mls - 4][rowLog - 4]; + default: + return NULL; + } +} + FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, @@ -1531,9 +1483,6 @@ ZSTD_compressBlock_lazy_generic( const BYTE* const prefixLowest = base + prefixLowestIndex; const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); /** * This table is indexed first by the four ZSTD_dictMode_e values, and then @@ -1541,30 +1490,8 @@ ZSTD_compressBlock_lazy_generic( * that should never occur (extDict modes go to the other implementation * below and there is no DDSS for binary tree search yet). */ - const searchMax_f searchFuncs[4][3] = { - { - ZSTD_HcFindBestMatch_selectMLS, - ZSTD_BtFindBestMatch_selectMLS, - ZSTD_RowFindBestMatch_selectRowLog - }, - { - NULL, - NULL, - NULL - }, - { - ZSTD_HcFindBestMatch_dictMatchState_selectMLS, - ZSTD_BtFindBestMatch_dictMatchState_selectMLS, - ZSTD_RowFindBestMatch_dictMatchState_selectRowLog - }, - { - ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS, - NULL, - ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog - } - }; - searchMax_f const searchMax = searchFuncs[dictMode][(int)searchMethod]; + searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; const int isDMS = dictMode == ZSTD_dictMatchState; @@ -1955,15 +1882,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 windowLog = ms->cParams.windowLog; const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - const searchMax_f searchFuncs[3] = { - ZSTD_HcFindBestMatch_extDict_selectMLS, - ZSTD_BtFindBestMatch_extDict_selectMLS, - ZSTD_RowFindBestMatch_extDict_selectRowLog - }; - searchMax_f searchMax = searchFuncs[(int)searchMethod]; + searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); @@ -1995,7 +1914,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */ - & (offset_1 < curr+1 - windowLow) ) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; @@ -2010,7 +1929,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( matchLength = ml2, start = ip, offset=offsetFound; } - if (matchLength < 4) { + if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } @@ -2027,7 +1946,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; @@ -2059,7 +1978,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; @@ -2106,7 +2025,7 @@ _storeSequence: const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_2 < repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c index fa4ebeabd..19b99f278 100644 --- a/lib/compress/zstd_ldm.c +++ b/lib/compress/zstd_ldm.c @@ -159,12 +159,12 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params) size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); - return params.enableLdm ? totalSize : 0; + return params.enableLdm == ZSTD_ps_enable ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { - return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; + return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getBucket() : @@ -478,7 +478,7 @@ static size_t ZSTD_ldm_generateSequences_internal( */ if (anchor > ip + hashed) { ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); - /* Continue the outter loop at anchor (ip + hashed == anchor). */ + /* Continue the outer loop at anchor (ip + hashed == anchor). */ ip = anchor - hashed; break; } @@ -657,7 +657,7 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h index 393466fa9..4e68dbf52 100644 --- a/lib/compress/zstd_ldm.h +++ b/lib/compress/zstd_ldm.h @@ -66,7 +66,7 @@ size_t ZSTD_ldm_generateSequences( */ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, void const* src, size_t srcSize); /** diff --git a/lib/compress/zstd_ldm_geartab.h b/lib/compress/zstd_ldm_geartab.h index e5c24d856..647f865be 100644 --- a/lib/compress/zstd_ldm_geartab.h +++ b/lib/compress/zstd_ldm_geartab.h @@ -11,7 +11,10 @@ #ifndef ZSTD_LDM_GEARTAB_H #define ZSTD_LDM_GEARTAB_H -static U64 ZSTD_ldm_gearTab[256] = { +#include "../common/compiler.h" /* UNUSED_ATTR */ +#include "../common/mem.h" /* U64 */ + +static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = { 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c index 402a7e5c7..0488589ff 100644 --- a/lib/compress/zstd_opt.c +++ b/lib/compress/zstd_opt.c @@ -8,13 +8,26 @@ * You may select, at your option, one of the above-listed licenses. */ +/** + * Disable inlining for the optimal parser for the kernel build. + * It is unlikely to be used in the kernel, and where it is used + * latency shouldn't matter because it is very slow to begin with. + * We prefer a ~180KB binary size win over faster optimal parsing. + * + * TODO(https://github.com/facebook/zstd/issues/2862): + * Improve the code size of the optimal parser in general, so we + * don't need this hack for the kernel build. + */ +#ifdef ZSTD_LINUX_KERNEL +#define ZSTD_NO_INLINE 1 +#endif + #include "zstd_compress_internal.h" #include "hist.h" #include "zstd_opt.h" #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ -#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ #define ZSTD_MAX_PRICE (1<<30) #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ @@ -24,11 +37,11 @@ * Price functions for optimal parser ***************************************/ -#if 0 /* approximation at bit level */ +#if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) -#elif 0 /* fractional bit accuracy */ +# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat)) +#elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) @@ -66,7 +79,7 @@ MEM_STATIC double ZSTD_fCost(U32 price) static int ZSTD_compressedLiterals(optState_t const* const optPtr) { - return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; + return optPtr->literalCompressionMode != ZSTD_ps_disable; } static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) @@ -79,25 +92,46 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) } -/* ZSTD_downscaleStat() : - * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) - * return the resulting sum of elements */ -static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) +static U32 sum_u32(const unsigned table[], size_t nbElts) +{ + size_t n; + U32 total = 0; + for (n=0; n 0 && ZSTD_FREQ_DIV+malus < 31); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift); + assert(shift < 30); for (s=0; s> (ZSTD_FREQ_DIV+malus)); + table[s] = 1 + (table[s] >> shift); sum += table[s]; } return sum; } +/* ZSTD_scaleStats() : + * reduce all elements in table is sum too large + * return the resulting sum of elements */ +static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) +{ + U32 const prevsum = sum_u32(table, lastEltIndex+1); + U32 const factor = prevsum >> logTarget; + DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); + assert(logTarget < 30); + if (factor <= 1) return prevsum; + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor)); +} + /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics * take hints from dictionary if there is one - * or init from zero, using src for literals stats, or flat 1 for match symbols + * and init from zero if there is none, + * using src for literals stats, and baseline stats for sequence symbols * otherwise downscale existing stats, to be used as seed for next block. */ static void @@ -126,7 +160,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->litSum = 0; for (lit=0; lit<=MaxLit; lit++) { U32 const scaleLog = 11; /* scale to 2K */ - U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); + U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit); assert(bitCost <= scaleLog); optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litSum += optPtr->litFreq[lit]; @@ -174,14 +208,18 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, if (compressedLiterals) { unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8); } - { unsigned ll; - for (ll=0; ll<=MaxLL; ll++) - optPtr->litLengthFreq[ll] = 1; + { unsigned const baseLLfreqs[MaxLL+1] = { + 4, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs)); optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1); } - optPtr->litLengthSum = MaxLL+1; { unsigned ml; for (ml=0; ml<=MaxML; ml++) @@ -189,21 +227,25 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, } optPtr->matchLengthSum = MaxML+1; - { unsigned of; - for (of=0; of<=MaxOff; of++) - optPtr->offCodeFreq[of] = 1; + { unsigned const baseOFCfreqs[MaxOff+1] = { + 6, 2, 1, 1, 2, 3, 4, 4, + 4, 3, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs)); optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - optPtr->offCodeSum = MaxOff+1; + } } else { /* new block : re-use previous statistics, scaled down */ if (compressedLiterals) - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); - optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); + optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); + optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11); + optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11); + optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11); } ZSTD_setBasePrices(optPtr, optLevel); @@ -338,7 +380,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, +static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip) { @@ -364,11 +406,13 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, * Binary Tree search ***************************************/ /** ZSTD_insertBt1() : add one or multiple positions to tree. - * ip : assumed <= iend-8 . + * @param ip assumed <= iend-8 . + * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ static U32 ZSTD_insertBt1( - ZSTD_matchState_t* ms, + const ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, + U32 const target, U32 const mls, const int extDict) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -391,7 +435,10 @@ static U32 ZSTD_insertBt1( U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 dummy32; /* to be nullified at the end */ - U32 const windowLow = ms->window.lowLimit; + /* windowLow is based on target because + * we only need positions that will be in the window at the end of the tree update. + */ + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); U32 matchEndIdx = curr+8+1; size_t bestLength = 8; U32 nbCompares = 1U << cParams->searchLog; @@ -404,11 +451,12 @@ static U32 ZSTD_insertBt1( DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); + assert(curr <= target); assert(ip <= iend-8); /* required for h calculation */ hashTable[h] = curr; /* Update Hash Table */ assert(windowLow > 0); - while (nbCompares-- && (matchIndex >= windowLow)) { + for (; nbCompares && (matchIndex >= windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); @@ -492,7 +540,7 @@ void ZSTD_updateTree_internal( idx, target, dictMode); while(idx < target) { - U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); + U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); assert(idx < (U32)(idx + forward)); idx += forward; } @@ -635,11 +683,11 @@ U32 ZSTD_insertBtAndGetAllMatches ( return 1; } } } /* no dictMatchState lookup: dicts don't have a populated HC3 table */ - } + } /* if (mls == 3) */ hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex >= matchLow)) { + for (; nbCompares && (matchIndex >= matchLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); const BYTE* match; size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ @@ -672,8 +720,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ break; /* drop, to preserve bt consistency (miss a little bit of compression) */ - } - } + } } if (match[matchLength] < ip[matchLength]) { /* match smaller than current */ @@ -692,12 +739,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); U32 dictMatchIndex = dms->hashTable[dmsH]; const U32* const dmsBt = dms->chainTable; commonLengthSmaller = commonLengthLarger = 0; - while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { + for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) { const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dmsBase + dictMatchIndex; @@ -718,8 +766,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - } + } } if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ if (match[matchLength] < ip[matchLength]) { @@ -729,9 +776,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( /* match is larger than current */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; - } - } - } + } } } /* if (dictMode == ZSTD_dictMatchState) */ assert(matchEndIdx > curr+8); ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ @@ -893,17 +938,17 @@ static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_ */ U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock; ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); - } + } ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); } ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); } + /*-******************************* * Optimal parser *********************************/ - static U32 ZSTD_totalLen(ZSTD_optimal_t sol) { return sol.litlen + sol.mlen; @@ -985,7 +1030,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, * in every price. We include the literal length to avoid negative * prices when we subtract the previous literal length. */ - opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); + opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); /* large match -> immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; @@ -1005,7 +1050,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, } } /* set prices for first matches starting position == 0 */ - { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); + assert(opt[0].price >= 0); + { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { @@ -1022,7 +1068,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, opt[pos].mlen = pos; opt[pos].off = offset; opt[pos].litlen = litlen; - opt[pos].price = sequencePrice; + opt[pos].price = (int)sequencePrice; } } last_pos = pos-1; } @@ -1037,9 +1083,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* Fix current position with one literal if cheaper */ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; int const price = opt[cur-1].price - + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) - + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); + + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) + + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", @@ -1082,9 +1128,10 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } + assert(opt[cur].price >= 0); { U32 const ll0 = (opt[cur].mlen != 0); U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; - U32 const previousPrice = opt[cur].price; + U32 const previousPrice = (U32)opt[cur].price; U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); U32 matchNb; @@ -1124,7 +1171,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; - int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", @@ -1220,28 +1267,7 @@ size_t ZSTD_compressBlock_btopt( } -/* used in 2-pass strategy */ -static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) -{ - U32 s, sum=0; - assert(ZSTD_FREQ_DIV+bonus >= 0); - for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); - optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); -} /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. @@ -1272,8 +1298,6 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ms->window.lowLimit = ms->window.dictLimit; ms->nextToUpdate = ms->window.dictLimit; - /* re-inforce weight of collected statistics */ - ZSTD_upscaleStats(&ms->opt); } size_t ZSTD_compressBlock_btultra( diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c index 22aa3e124..2aec41dda 100644 --- a/lib/compress/zstdmt_compress.c +++ b/lib/compress/zstdmt_compress.c @@ -467,7 +467,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState, ZSTD_dictContentType_e dictContentType) { /* Adjust parameters */ - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); @@ -478,7 +478,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState, serialState->nextJobID = 0; if (params.fParams.checksumFlag) XXH64_reset(&serialState->xxhState, 0); - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_customMem cMem = params.customMem; unsigned const hashLog = params.ldmParams.hashLog; size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); @@ -564,7 +564,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, /* A future job may error and skip our job */ if (serialState->nextJobID == jobID) { /* It is now our turn, do any processing necessary */ - if (serialState->params.ldmParams.enableLdm) { + if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { size_t error; assert(seqStore.seq != NULL && seqStore.pos == 0 && seqStore.size == 0 && seqStore.capacity > 0); @@ -594,7 +594,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, if (seqStore.size > 0) { size_t const err = ZSTD_referenceExternalSequences( jobCCtx, seqStore.seq, seqStore.size); - assert(serialState->params.ldmParams.enableLdm); + assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); assert(!ZSTD_isError(err)); (void)err; } @@ -672,7 +672,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ } - if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) + if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL) JOB_ERROR(ERROR(memory_allocation)); /* Don't compute the checksum for chunks, since we compute it externally, @@ -680,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) */ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; /* Don't run LDM for the chunks, since we handle it externally */ - jobParams.ldmParams.enableLdm = 0; + jobParams.ldmParams.enableLdm = ZSTD_ps_disable; /* Correct nbWorkers to 0. */ jobParams.nbWorkers = 0; @@ -807,6 +807,15 @@ typedef struct { static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 +/* Don't create chunks smaller than the zstd block size. + * This stops us from regressing compression ratio too much, + * and ensures our output fits in ZSTD_compressBound(). + * + * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then + * ZSTD_COMPRESSBOUND() will need to be updated. + */ +#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX +#define RSYNC_MIN_BLOCK_SIZE (1<ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on cycleLog instead. */ @@ -1179,7 +1188,7 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. @@ -1252,6 +1261,9 @@ size_t ZSTDMT_initCStream_internal( /* Aim for the targetsectionSize as the average job size. */ U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10); U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10); + /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our + * expected job size is at least 4x larger. */ + assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2); DEBUGLOG(4, "rsyncLog = %u", rsyncBits); mtctx->rsync.hash = 0; mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; @@ -1263,7 +1275,7 @@ size_t ZSTDMT_initCStream_internal( ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ - size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; + size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra @@ -1575,7 +1587,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) { - if (mtctx->params.ldmParams.enableLdm) { + if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); DEBUGLOG(5, "source [0x%zx, 0x%zx)", @@ -1678,6 +1690,11 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) if (!mtctx->params.rsyncable) /* Rsync is disabled. */ return syncPoint; + if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE) + /* We don't emit synchronization points if it would produce too small blocks. + * We don't have enough input to find a synchronization point, so don't look. + */ + return syncPoint; if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) /* Not enough to compute the hash. * We will miss any synchronization points in this RSYNC_LENGTH byte @@ -1688,10 +1705,28 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) */ return syncPoint; /* Initialize the loop variables. */ - if (mtctx->inBuff.filled >= RSYNC_LENGTH) { - /* We have enough bytes buffered to initialize the hash. + if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) { + /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions + * because they can't possibly be a sync point. So we can start + * part way through the input buffer. + */ + pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled; + if (pos >= RSYNC_LENGTH) { + prev = istart + pos - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); + } else { + assert(mtctx->inBuff.filled >= RSYNC_LENGTH); + prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos)); + hash = ZSTD_rollingHash_append(hash, istart, pos); + } + } else { + /* We have enough bytes buffered to initialize the hash, + * and are have processed enough bytes to find a sync point. * Start scanning at the beginning of the input. */ + assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); + assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH); pos = 0; prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); @@ -1705,16 +1740,6 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) syncPoint.flush = 1; return syncPoint; } - } else { - /* We don't have enough bytes buffered to initialize the hash, but - * we know we have at least RSYNC_LENGTH bytes total. - * Start scanning after the first RSYNC_LENGTH bytes less the bytes - * already buffered. - */ - pos = RSYNC_LENGTH - mtctx->inBuff.filled; - prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; - hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); - hash = ZSTD_rollingHash_append(hash, istart, pos); } /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll * through the input. If we hit a synchronization point, then cut the @@ -1726,8 +1751,9 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) */ for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; diff --git a/lib/decompress/huf_decompress.c b/lib/decompress/huf_decompress.c index b93c9a003..dd6c466c7 100644 --- a/lib/decompress/huf_decompress.c +++ b/lib/decompress/huf_decompress.c @@ -22,6 +22,13 @@ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" +#include "../common/zstd_internal.h" + +/* ************************************************************** +* Constants +****************************************************************/ + +#define HUF_DECODER_FAST_TABLELOG 11 /* ************************************************************** * Macros @@ -36,6 +43,54 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif +/* Only use assembly on Linux / MacOS. + * Disable when MSAN is enabled. + */ +#if defined(__linux__) || defined(__linux) || defined(__APPLE__) +# if ZSTD_MEMORY_SANITIZER +# define HUF_ASM_SUPPORTED 0 +# elif ZSTD_DATAFLOW_SANITIZER +# define HUF_ASM_SUPPORTED 0 +# else +# define HUF_ASM_SUPPORTED 1 +# endif +#else +# define HUF_ASM_SUPPORTED 0 +#endif + +/* HUF_DISABLE_ASM: Disables all ASM implementations. */ +#if !defined(HUF_DISABLE_ASM) && \ + HUF_ASM_SUPPORTED && \ + defined(__x86_64__) && (DYNAMIC_BMI2 || defined(__BMI2__)) +# define HUF_ENABLE_ASM_X86_64_BMI2 1 +#else +# define HUF_ENABLE_ASM_X86_64_BMI2 0 +#endif + +#if HUF_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2 +# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +#else +# define HUF_ASM_X86_64_BMI2_ATTRS +#endif + +#ifdef __cplusplus +# define HUF_EXTERN_C extern "C" +#else +# define HUF_EXTERN_C +#endif +#define HUF_ASM_DECL HUF_EXTERN_C + +#if DYNAMIC_BMI2 || (HUF_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +# define HUF_NEED_BMI2_FUNCTION 1 +#else +# define HUF_NEED_BMI2_FUNCTION 0 +#endif + +#if !(HUF_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +# define HUF_NEED_DEFAULT_FUNCTION 1 +#else +# define HUF_NEED_DEFAULT_FUNCTION 0 +#endif /* ************************************************************** * Error Management @@ -65,7 +120,7 @@ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ - static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ @@ -107,13 +162,146 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) return dtd; } +#if HUF_ENABLE_ASM_X86_64_BMI2 + +static size_t HUF_initDStream(BYTE const* ip) { + BYTE const lastByte = ip[7]; + size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + size_t const value = MEM_readLEST(ip) | 1; + assert(bitsConsumed <= 8); + return value << bitsConsumed; +} +typedef struct { + BYTE const* ip[4]; + BYTE* op[4]; + U64 bits[4]; + void const* dt; + BYTE const* ilimit; + BYTE* oend; + BYTE const* iend[4]; +} HUF_DecompressAsmArgs; + +/** + * Initializes args for the asm decoding loop. + * @returns 0 on success + * 1 if the fallback implementation should be used. + * Or an error code on failure. + */ +static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +{ + void const* dt = DTable + 1; + U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; + + const BYTE* const ilimit = (const BYTE*)src + 6 + 8; + + BYTE* const oend = (BYTE*)dst + dstSize; + + /* We're assuming x86-64 BMI2 - assure that this is the case. */ + assert(MEM_isLittleEndian() && !MEM_32bits()); + + /* strict minimum : jump table + 1 byte per stream */ + if (srcSize < 10) + return ERROR(corruption_detected); + + /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers. + * If table log is not correct at this point, fallback to the old decoder. + * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. + */ + if (dtLog != HUF_DECODER_FAST_TABLELOG) + return 1; + + /* Read the jump table. */ + { + const BYTE* const istart = (const BYTE*)src; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = srcSize - (length1 + length2 + length3 + 6); + args->iend[0] = istart + 6; /* jumpTable */ + args->iend[1] = args->iend[0] + length1; + args->iend[2] = args->iend[1] + length2; + args->iend[3] = args->iend[2] + length3; + + /* HUF_initDStream() requires this, and this small of an input + * won't benefit from the ASM loop anyways. + * length1 must be >= 16 so that ip[0] >= ilimit before the loop + * starts. + */ + if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8) + return 1; + if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ + } + /* ip[] contains the position that is currently loaded into bits[]. */ + args->ip[0] = args->iend[1] - sizeof(U64); + args->ip[1] = args->iend[2] - sizeof(U64); + args->ip[2] = args->iend[3] - sizeof(U64); + args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64); + + /* op[] contains the output pointers. */ + args->op[0] = (BYTE*)dst; + args->op[1] = args->op[0] + (dstSize+3)/4; + args->op[2] = args->op[1] + (dstSize+3)/4; + args->op[3] = args->op[2] + (dstSize+3)/4; + + /* No point to call the ASM loop for tiny outputs. */ + if (args->op[3] >= oend) + return 1; + + /* bits[] is the bit container. + * It is read from the MSB down to the LSB. + * It is shifted left as it is read, and zeros are + * shifted in. After the lowest valid bit a 1 is + * set, so that CountTrailingZeros(bits[]) can be used + * to count how many bits we've consumed. + */ + args->bits[0] = HUF_initDStream(args->ip[0]); + args->bits[1] = HUF_initDStream(args->ip[1]); + args->bits[2] = HUF_initDStream(args->ip[2]); + args->bits[3] = HUF_initDStream(args->ip[3]); + + /* If ip[] >= ilimit, it is guaranteed to be safe to + * reload bits[]. It may be beyond its section, but is + * guaranteed to be valid (>= istart). + */ + args->ilimit = ilimit; + + args->oend = oend; + args->dt = dt; + + return 0; +} + +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd) +{ + /* Validate that we haven't overwritten. */ + if (args->op[stream] > segmentEnd) + return ERROR(corruption_detected); + /* Validate that we haven't read beyond iend[]. + * Note that ip[] may be < iend[] because the MSB is + * the next bit to read, and we may have consumed 100% + * of the stream, so down to iend[i] - 8 is valid. + */ + if (args->ip[stream] < args->iend[stream] - 8) + return ERROR(corruption_detected); + + /* Construct the BIT_DStream_t. */ + bit->bitContainer = MEM_readLE64(args->ip[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]); + bit->start = (const char*)args->iend[0]; + bit->limitPtr = bit->start + sizeof(size_t); + bit->ptr = (const char*)args->ip[stream]; + + return 0; +} +#endif + #ifndef HUF_FORCE_DECOMPRESS_X2 /*-***************************/ /* single-symbol decoding */ /*-***************************/ -typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ +typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */ /** * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at @@ -122,14 +310,44 @@ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decodi static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { - D4 = symbol + (nbBits << 8); - } else { D4 = (symbol << 8) + nbBits; + } else { + D4 = symbol + (nbBits << 8); } D4 *= 0x0001000100010001ULL; return D4; } +/** + * Increase the tableLog to targetTableLog and rescales the stats. + * If tableLog > targetTableLog this is a no-op. + * @returns New tableLog + */ +static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog) +{ + if (tableLog > targetTableLog) + return tableLog; + if (tableLog < targetTableLog) { + U32 const scale = targetTableLog - tableLog; + U32 s; + /* Increase the weight for all non-zero probability symbols by scale. */ + for (s = 0; s < nbSymbols; ++s) { + huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale); + } + /* Update rankVal to reflect the new weights. + * All weights except 0 get moved to weight + scale. + * Weights [1, scale] are empty. + */ + for (s = targetTableLog; s > scale; --s) { + rankVal[s] = rankVal[s - scale]; + } + for (s = scale; s > 0; --s) { + rankVal[s] = 0; + } + } + return targetTableLog; +} + typedef struct { U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1]; @@ -162,8 +380,12 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); if (HUF_isError(iSize)) return iSize; + /* Table header */ { DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog + 1; + U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG); + tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; @@ -207,7 +429,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr /* fill DTable * We fill all entries of each weight in order. - * That way length is a constant for each iteration of the outter loop. + * That way length is a constant for each iteration of the outer loop. * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ @@ -304,11 +526,15 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons BYTE* const pStart = p; /* up to 4 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_1(p, bitDPtr); - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + if ((pEnd - p) > 3) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_1(p, bitDPtr); + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + } + } else { + BIT_reloadDStream(bitDPtr); } /* [0-3] symbols remaining */ @@ -388,33 +614,36 @@ HUF_decompress4X1_usingDTable_internal_body( U32 endSignal = 1; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ - for ( ; (endSignal) & (op4 < olimit) ; ) { - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_1(op1, &bitD1); - HUF_DECODE_SYMBOLX1_1(op2, &bitD2); - HUF_DECODE_SYMBOLX1_1(op3, &bitD3); - HUF_DECODE_SYMBOLX1_1(op4, &bitD4); - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_0(op1, &bitD1); - HUF_DECODE_SYMBOLX1_0(op2, &bitD2); - HUF_DECODE_SYMBOLX1_0(op3, &bitD3); - HUF_DECODE_SYMBOLX1_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit) ; ) { + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_1(op1, &bitD1); + HUF_DECODE_SYMBOLX1_1(op2, &bitD2); + HUF_DECODE_SYMBOLX1_1(op3, &bitD3); + HUF_DECODE_SYMBOLX1_1(op4, &bitD4); + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_0(op1, &bitD1); + HUF_DECODE_SYMBOLX1_0(op2, &bitD2); + HUF_DECODE_SYMBOLX1_0(op3, &bitD3); + HUF_DECODE_SYMBOLX1_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + } } /* check corruption */ @@ -440,6 +669,79 @@ HUF_decompress4X1_usingDTable_internal_body( } } +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_NEED_DEFAULT_FUNCTION +static +size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_ENABLE_ASM_X86_64_BMI2 + +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args); + +static HUF_ASM_X86_64_BMI2_ATTRS +size_t +HUF_decompress4X1_usingDTable_internal_bmi2_asm( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + void const* dt = DTable + 1; + const BYTE* const iend = (const BYTE*)cSrc + 6; + BYTE* const oend = (BYTE*)dst + dstSize; + HUF_DecompressAsmArgs args; + { + size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init asm args"); + if (ret != 0) + return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + } + + assert(args.ip[0] >= args.ilimit); + HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args); + + /* Our loop guarantees that ip[] >= ilimit and that we haven't + * overwritten any op[]. + */ + assert(args.ip[0] >= iend); + assert(args.ip[1] >= iend); + assert(args.ip[2] >= iend); + assert(args.ip[3] >= iend); + assert(args.op[3] <= oend); + (void)iend; + + /* finish bit streams one by one. */ + { + size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + /* Decompress and validate that we've produced exactly the expected length. */ + args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) return ERROR(corruption_detected); + } + } + + /* decoded size */ + return dstSize; +} +#endif /* HUF_ENABLE_ASM_X86_64_BMI2 */ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const void *cSrc, @@ -447,8 +749,28 @@ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) -HUF_DGEN(HUF_decompress4X1_usingDTable_internal) +static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { +# if HUF_ENABLE_ASM_X86_64_BMI2 + return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +# else + return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); +# endif + } +#else + (void)bmi2; +#endif + +#if HUF_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +#else + return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); +#endif +} size_t HUF_decompress1X1_usingDTable( @@ -518,106 +840,226 @@ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ -typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; +typedef struct { BYTE symbol; } sortedSymbol_t; typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; +/** + * Constructs a HUF_DEltX2 in a U32. + */ +static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level) +{ + U32 seq; + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3); + DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32)); + if (MEM_isLittleEndian()) { + seq = level == 1 ? symbol : (baseSeq + (symbol << 8)); + return seq + (nbBits << 16) + ((U32)level << 24); + } else { + seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol); + return (seq << 16) + (nbBits << 8) + (U32)level; + } +} + +/** + * Constructs a HUF_DEltX2. + */ +static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level) +{ + HUF_DEltX2 DElt; + U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val)); + ZSTD_memcpy(&DElt, &val, sizeof(val)); + return DElt; +} + +/** + * Constructs 2 HUF_DEltX2s and packs them into a U64. + */ +static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level) +{ + U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + return (U64)DElt + ((U64)DElt << 32); +} + +/** + * Fills the DTable rank with all the symbols from [begin, end) that are each + * nbBits long. + * + * @param DTableRank The start of the rank in the DTable. + * @param begin The first symbol to fill (inclusive). + * @param end The last symbol to fill (exclusive). + * @param nbBits Each symbol is nbBits long. + * @param tableLog The table log. + * @param baseSeq If level == 1 { 0 } else { the first level symbol } + * @param level The level in the table. Must be 1 or 2. + */ +static void HUF_fillDTableX2ForWeight( + HUF_DEltX2* DTableRank, + sortedSymbol_t const* begin, sortedSymbol_t const* end, + U32 nbBits, U32 tableLog, + U16 baseSeq, int const level) +{ + U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */); + const sortedSymbol_t* ptr; + assert(level >= 1 && level <= 2); + switch (length) { + case 1: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + *DTableRank++ = DElt; + } + break; + case 2: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + DTableRank[0] = DElt; + DTableRank[1] = DElt; + DTableRank += 2; + } + break; + case 4: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + DTableRank += 4; + } + break; + case 8: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + DTableRank += 8; + } + break; + default: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + HUF_DEltX2* const DTableRankEnd = DTableRank + length; + for (; DTableRank != DTableRankEnd; DTableRank += 8) { + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + } + } + break; + } +} /* HUF_fillDTableX2Level2() : * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, - const U32* rankValOrigin, const int minWeight, - const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, - U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize) +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits, + const U32* rankVal, const int minWeight, const int maxWeight1, + const sortedSymbol_t* sortedSymbols, U32 const* rankStart, + U32 nbBitsBaseline, U16 baseSeq) { - HUF_DEltX2 DElt; - U32* rankVal = wksp; - - assert(wkspSize >= HUF_TABLELOG_MAX + 1); - (void)wkspSize; - /* get pre-calculated rankVal */ - ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1)); - - /* fill skipped values */ + /* Fill skipped values (all positions up to rankVal[minWeight]). + * These are positions only get a single symbol because the combined weight + * is too large. + */ if (minWeight>1) { - U32 i, skipSize = rankVal[minWeight]; - MEM_writeLE16(&(DElt.sequence), baseSeq); - DElt.nbBits = (BYTE)(consumed); - DElt.length = 1; - for (i = 0; i < skipSize; i++) - DTable[i] = DElt; + U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */); + U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1); + int const skipSize = rankVal[minWeight]; + assert(length > 1); + assert((U32)skipSize < length); + switch (length) { + case 2: + assert(skipSize == 1); + ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2)); + break; + case 4: + assert(skipSize <= 4); + ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2)); + break; + default: + { + int i; + for (i = 0; i < skipSize; i += 8) { + ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2)); + } + } + } } - /* fill DTable */ - { U32 s; for (s=0; s= 1 */ - - rankVal[weight] += length; - } } + /* Fill each of the second level symbols by weight. */ + { + int w; + for (w = minWeight; w < maxWeight1; ++w) { + int const begin = rankStart[w]; + int const end = rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; + U32 const totalBits = nbBits + consumedBits; + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedSymbols + begin, sortedSymbols + end, + totalBits, targetLog, + baseSeq, /* level */ 2); + } + } } - static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, - const sortedSymbol_t* sortedList, const U32 sortedListSize, + const sortedSymbol_t* sortedList, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, - const U32 nbBitsBaseline, U32* wksp, size_t wkspSize) + const U32 nbBitsBaseline) { - U32* rankVal = wksp; + U32* const rankVal = rankValOrigin[0]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; - U32 s; + int w; + int const wEnd = (int)maxWeight + 1; - assert(wkspSize >= HUF_TABLELOG_MAX + 1); - wksp += HUF_TABLELOG_MAX + 1; - wkspSize -= HUF_TABLELOG_MAX + 1; + /* Fill DTable in order of weight. */ + for (w = 1; w < wEnd; ++w) { + int const begin = (int)rankStart[w]; + int const end = (int)rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; - ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1)); - - /* fill DTable */ - for (s=0; s= minBits) { /* enough room for a second symbol */ - U32 sortedRank; + if (targetLog-nbBits >= minBits) { + /* Enough room for a second symbol. */ + int start = rankVal[w]; + U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */); int minWeight = nbBits + scaleLog; + int s; if (minWeight < 1) minWeight = 1; - sortedRank = rankStart[minWeight]; - HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, - rankValOrigin[nbBits], minWeight, - sortedList+sortedRank, sortedListSize-sortedRank, - nbBitsBaseline, symbol, wksp, wkspSize); + /* Fill the DTable for every symbol of weight w. + * These symbols get at least 1 second symbol. + */ + for (s = begin; s != end; ++s) { + HUF_fillDTableX2Level2( + DTable + start, targetLog, nbBits, + rankValOrigin[nbBits], minWeight, wEnd, + sortedList, rankStart, + nbBitsBaseline, sortedList[s].symbol); + start += length; + } } else { - HUF_DEltX2 DElt; - MEM_writeLE16(&(DElt.sequence), symbol); - DElt.nbBits = (BYTE)(nbBits); - DElt.length = 1; - { U32 const end = start + length; - U32 u; - for (u = start; u < end; u++) DTable[u] = DElt; - } } - rankVal[weight] += length; + /* Only a single symbol. */ + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedList + begin, sortedList + end, + nbBits, targetLog, + /* baseSeq */ 0, /* level */ 1); + } } } typedef struct { rankValCol_t rankVal[HUF_TABLELOG_MAX]; U32 rankStats[HUF_TABLELOG_MAX + 1]; - U32 rankStart0[HUF_TABLELOG_MAX + 2]; + U32 rankStart0[HUF_TABLELOG_MAX + 3]; sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; @@ -627,9 +1069,16 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { - U32 tableLog, maxW, sizeOfSort, nbSymbols; + return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); +} + +size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 const maxTableLog = dtd.maxTableLog; + U32 maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; @@ -647,11 +1096,12 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG; /* find maxWeight */ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ @@ -664,7 +1114,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, rankStart[w] = curr; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - sizeOfSort = nextRankStart; + rankStart[maxW+1] = nextRankStart; } /* sort symbols by weight */ @@ -673,7 +1123,6 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, U32 const w = wksp->weightList[s]; U32 const r = rankStart[w]++; wksp->sortedSymbol[r].symbol = (BYTE)s; - wksp->sortedSymbol[r].weight = (BYTE)w; } rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ } @@ -698,10 +1147,9 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, } } } } HUF_fillDTableX2(dt, maxTableLog, - wksp->sortedSymbol, sizeOfSort, + wksp->sortedSymbol, wksp->rankStart0, wksp->rankVal, maxW, - tableLog+1, - wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32)); + tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; @@ -714,7 +1162,7 @@ FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, dt+val, 2); + ZSTD_memcpy(op, &dt[val].sequence, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } @@ -723,15 +1171,17 @@ FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, dt+val, 1); - if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); - else { + ZSTD_memcpy(op, &dt[val].sequence, 1); + if (dt[val].length==1) { + BIT_skipBits(DStream, dt[val].nbBits); + } else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); - } } + } + } return 1; } @@ -753,19 +1203,37 @@ HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, BYTE* const pStart = p; /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) { + if (dtLog <= 11 && MEM_64bits()) { + /* up to 10 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) { + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } else { + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } + } else { + BIT_reloadDStream(bitDPtr); } /* closer to end : up to 2 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= 2) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + } if (p < pEnd) p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); @@ -799,7 +1267,6 @@ HUF_decompress1X2_usingDTable_internal_body( /* decoded size */ return dstSize; } - FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -841,57 +1308,60 @@ HUF_decompress4X2_usingDTable_internal_body( U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* 16-32 symbols per loop (4-8 symbols per stream) */ - for ( ; (endSignal) & (op4 < olimit); ) { + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit); ) { #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; #else - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal = (U32)LIKELY( - (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = (U32)LIKELY((U32) + (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); #endif + } } /* check corruption */ @@ -915,8 +1385,99 @@ HUF_decompress4X2_usingDTable_internal_body( } } +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_NEED_DEFAULT_FUNCTION +static +size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_ENABLE_ASM_X86_64_BMI2 + +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args); + +static HUF_ASM_X86_64_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_bmi2_asm( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) { + void const* dt = DTable + 1; + const BYTE* const iend = (const BYTE*)cSrc + 6; + BYTE* const oend = (BYTE*)dst + dstSize; + HUF_DecompressAsmArgs args; + { + size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init asm args"); + if (ret != 0) + return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + } + + assert(args.ip[0] >= args.ilimit); + HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args); + + /* note : op4 already verified within main loop */ + assert(args.ip[0] >= iend); + assert(args.ip[1] >= iend); + assert(args.ip[2] >= iend); + assert(args.ip[3] >= iend); + assert(args.op[3] <= oend); + (void)iend; + + /* finish bitStreams one by one */ + { + size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) + return ERROR(corruption_detected); + } + } + + /* decoded size */ + return dstSize; +} +#endif /* HUF_ENABLE_ASM_X86_64_BMI2 */ + +static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { +# if HUF_ENABLE_ASM_X86_64_BMI2 + return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +# else + return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); +# endif + } +#else + (void)bmi2; +#endif + +#if HUF_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +#else + return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); +#endif +} + HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -HUF_DGEN(HUF_decompress4X2_usingDTable_internal) size_t HUF_decompress1X2_usingDTable( void* dst, size_t dstSize, @@ -1025,25 +1586,25 @@ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] = { /* single, double, quad */ - {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ - {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ - {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ - {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ - {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ - {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ - {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ - {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ - {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ - {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ - {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ - {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ - {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ - {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ - {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ - {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ + {{0,0}, {1,1}}, /* Q==0 : impossible */ + {{0,0}, {1,1}}, /* Q==1 : impossible */ + {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */ + {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */ + {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */ + {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */ + {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */ + {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */ + {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */ + {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */ + {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */ + {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */ + {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */ + {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */ + {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */ + {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */ }; #endif @@ -1070,7 +1631,7 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); - DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ + DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; } #endif diff --git a/lib/decompress/huf_decompress_amd64.S b/lib/decompress/huf_decompress_amd64.S new file mode 100644 index 000000000..cebe2f321 --- /dev/null +++ b/lib/decompress/huf_decompress_amd64.S @@ -0,0 +1,578 @@ +#if !defined(HUF_DISABLE_ASM) && defined(__x86_64__) + +/* Stack marking + * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart + */ +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif + +/* Calling convention: + * + * %rdi contains the first argument: HUF_DecompressAsmArgs*. + * %rbp isn't maintained (no frame pointer). + * %rsp contains the stack pointer that grows down. + * No red-zone is assumed, only addresses >= %rsp are used. + * All register contents are preserved. + * + * TODO: Support Windows calling convention. + */ + +.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop +.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +.text + +/* Sets up register mappings for clarity. + * op[], bits[], dtable & ip[0] each get their own register. + * ip[1,2,3] & olimit alias var[]. + * %rax is a scratch register. + */ + +#define op0 rsi +#define op1 rbx +#define op2 rcx +#define op3 rdi + +#define ip0 r8 +#define ip1 r9 +#define ip2 r10 +#define ip3 r11 + +#define bits0 rbp +#define bits1 rdx +#define bits2 r12 +#define bits3 r13 +#define dtable r14 +#define olimit r15 + +/* var[] aliases ip[1,2,3] & olimit + * ip[1,2,3] are saved every iteration. + * olimit is only used in compute_olimit. + */ +#define var0 r15 +#define var1 r9 +#define var2 r10 +#define var3 r11 + +/* 32-bit var registers */ +#define vard0 r15d +#define vard1 r9d +#define vard2 r10d +#define vard3 r11d + +/* Helper macro: args if idx != 4. */ +#define IF_NOT_4_0(...) __VA_ARGS__ +#define IF_NOT_4_1(...) __VA_ARGS__ +#define IF_NOT_4_2(...) __VA_ARGS__ +#define IF_NOT_4_3(...) __VA_ARGS__ +#define IF_NOT_4_4(...) +#define IF_NOT_4_(idx, ...) IF_NOT_4_##idx(__VA_ARGS__) +#define IF_NOT_4(idx, ...) IF_NOT_4_(idx, __VA_ARGS__) + +/* Calls X(N) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM(X) \ + X(0); \ + X(1); \ + X(2); \ + X(3) + +/* Calls X(N, idx) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ + X(0, idx); \ + X(1, idx); \ + X(2, idx); \ + X(3, idx) + +/* Define both _HUF_* & HUF_* symbols because MacOS + * C symbols are prefixed with '_' & Linux symbols aren't. + */ +_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: +HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + push %rbx + push %rcx + push %rdx + push %rbp + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + push %r12 + push %r13 + push %r14 + push %r15 + + /* Read HUF_DecompressAsmArgs* args from %rax */ + movq %rdi, %rax + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + push 104(%rax) /* ilimit */ + push 112(%rax) /* oend */ + push %olimit /* olimit space */ + + subq $24, %rsp + +.L_4X1_compute_olimit: + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rbx, rdx must be saved + * op3 & ip0 mustn't be clobbered + */ + movq %rbx, 0(%rsp) + movq %rdx, 8(%rsp) + + movq 32(%rsp), %rax /* rax = oend */ + subq %op3, %rax /* rax = oend - op3 */ + + /* r15 = (oend - op3) / 5 */ + movabsq $-3689348814741910323, %rdx + mulq %rdx + movq %rdx, %r15 + shrq $2, %r15 + + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilimit */ + subq %rdx, %rax /* rax = ip0 - ilimit */ + movq %rax, %rbx /* rbx = ip0 - ilimit */ + + /* rdx = (ip0 - ilimit) / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %rbx + shrq %rbx + addq %rbx, %rdx + shrq $2, %rdx + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* r15 = r15 * 5 */ + leaq (%r15, %r15, 4), %r15 + + /* olimit = op3 + r15 */ + addq %op3, %olimit + + movq 8(%rsp), %rdx + movq 0(%rsp), %rbx + + /* If (op3 + 20 > olimit) */ + movq %op3, %rax /* rax = op3 */ + addq $20, %rax /* rax = op3 + 20 */ + cmpq %rax, %olimit /* op3 + 20 > olimit */ + jb .L_4X1_exit + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb .L_4X1_exit + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb .L_4X1_exit + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb .L_4X1_exit + +/* Reads top 11 bits from bits[n] + * Loads dt[bits[n]] into var[n] + */ +#define GET_NEXT_DELT(n) \ + movq $53, %var##n; \ + shrxq %var##n, %bits##n, %var##n; \ + movzwl (%dtable,%var##n,2),%vard##n + +/* var[n] must contain the DTable entry computed with GET_NEXT_DELT + * Moves var[n] to %rax + * bits[n] <<= var[n] & 63 + * op[n][idx] = %rax >> 8 + * %ah is a way to access bits [8, 16) of %rax + */ +#define DECODE_FROM_DELT(n, idx) \ + movq %var##n, %rax; \ + shlxq %var##n, %bits##n, %bits##n; \ + movb %ah, idx(%op##n) + +/* Assumes GET_NEXT_DELT has been called. + * Calls DECODE_FROM_DELT then GET_NEXT_DELT if n < 4 + */ +#define DECODE(n, idx) \ + DECODE_FROM_DELT(n, idx); \ + IF_NOT_4(idx, GET_NEXT_DELT(n)) + +/* // ctz & nbBytes is stored in bits[n] + * // nbBits is stored in %rax + * ctz = CTZ[bits[n]] + * nbBits = ctz & 7 + * nbBytes = ctz >> 3 + * op[n] += 5 + * ip[n] -= nbBytes + * // Note: x86-64 is little-endian ==> no bswap + * bits[n] = MEM_readST(ip[n]) | 1 + * bits[n] <<= nbBits + */ +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + andq $7, %rax; \ + shrq $3, %bits##n; \ + leaq 5(%op##n), %op##n; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlx %rax, %bits##n, %bits##n + + /* Store clobbered variables on the stack */ + movq %olimit, 24(%rsp) + movq %ip1, 0(%rsp) + movq %ip2, 8(%rsp) + movq %ip3, 16(%rsp) + + /* Call GET_NEXT_DELT for each stream */ + FOR_EACH_STREAM(GET_NEXT_DELT) + + .p2align 6 + +.L_4X1_loop_body: + /* Decode 5 symbols in each of the 4 streams (20 total) + * Must have called GET_NEXT_DELT for each stream + */ + FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) + + /* Load ip[1,2,3] from stack (var[] aliases them) + * ip[] is needed for RELOAD_BITS + * Each will be stored back to the stack after RELOAD + */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Reload each stream & fetch the next table entry + * to prepare for the next iteration + */ + RELOAD_BITS(0) + GET_NEXT_DELT(0) + + RELOAD_BITS(1) + movq %ip1, 0(%rsp) + GET_NEXT_DELT(1) + + RELOAD_BITS(2) + movq %ip2, 8(%rsp) + GET_NEXT_DELT(2) + + RELOAD_BITS(3) + movq %ip3, 16(%rsp) + GET_NEXT_DELT(3) + + /* If op3 < olimit: continue the loop */ + cmp %op3, 24(%rsp) + ja .L_4X1_loop_body + + /* Reload ip[1,2,3] from stack */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Re-compute olimit */ + jmp .L_4X1_compute_olimit + +#undef GET_NEXT_DELT +#undef DECODE_FROM_DELT +#undef DECODE +#undef RELOAD_BITS +.L_4X1_exit: + addq $24, %rsp + + /* Restore stack (oend & olimit) */ + pop %rax /* olimit */ + pop %rax /* oend */ + pop %rax /* ilimit */ + pop %rax /* arg */ + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rbp + pop %rdx + pop %rcx + pop %rbx + pop %rax + ret + +_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: +HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + push %rbx + push %rcx + push %rdx + push %rbp + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + push %r12 + push %r13 + push %r14 + push %r15 + + movq %rdi, %rax + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + push %rax /* olimit */ + push 104(%rax) /* ilimit */ + + movq 112(%rax), %rax + push %rax /* oend3 */ + + movq %op3, %rax + push %rax /* oend2 */ + + movq %op2, %rax + push %rax /* oend1 */ + + movq %op1, %rax + push %rax /* oend0 */ + + /* Scratch space */ + subq $8, %rsp + +.L_4X2_compute_olimit: + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rdx must be saved + * op[1,2,3,4] & ip0 mustn't be clobbered + */ + movq %rdx, 0(%rsp) + + /* We can consume up to 7 input bytes each iteration. */ + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilimit */ + subq %rdx, %rax /* rax = ip0 - ilimit */ + movq %rax, %r15 /* r15 = ip0 - ilimit */ + + /* rdx = rax / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %r15 + shrq %r15 + addq %r15, %rdx + shrq $2, %rdx + + /* r15 = (ip0 - ilimit) / 7 */ + movq %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 8(%rsp), %rax /* rax = oend0 */ + subq %op0, %rax /* rax = oend0 - op0 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 16(%rsp), %rax /* rax = oend1 */ + subq %op1, %rax /* rax = oend1 - op1 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 24(%rsp), %rax /* rax = oend2 */ + subq %op2, %rax /* rax = oend2 - op2 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 32(%rsp), %rax /* rax = oend3 */ + subq %op3, %rax /* rax = oend3 - op3 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* olimit = op3 + 5 * r15 */ + movq %r15, %rax + leaq (%op3, %rax, 4), %olimit + addq %rax, %olimit + + movq 0(%rsp), %rdx + + /* If (op3 + 10 > olimit) */ + movq %op3, %rax /* rax = op3 */ + addq $10, %rax /* rax = op3 + 10 */ + cmpq %rax, %olimit /* op3 + 10 > olimit */ + jb .L_4X2_exit + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb .L_4X2_exit + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb .L_4X2_exit + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb .L_4X2_exit + +#define DECODE(n, idx) \ + movq %bits##n, %rax; \ + shrq $53, %rax; \ + movzwl 0(%dtable,%rax,4),%r8d; \ + movzbl 2(%dtable,%rax,4),%r15d; \ + movzbl 3(%dtable,%rax,4),%eax; \ + movw %r8w, (%op##n); \ + shlxq %r15, %bits##n, %bits##n; \ + addq %rax, %op##n + +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + shrq $3, %bits##n; \ + andq $7, %rax; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlxq %rax, %bits##n, %bits##n + + + movq %olimit, 48(%rsp) + + .p2align 6 + +.L_4X2_loop_body: + /* We clobber r8, so store it on the stack */ + movq %r8, 0(%rsp) + + /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ + FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) + + /* Reload r8 */ + movq 0(%rsp), %r8 + + FOR_EACH_STREAM(RELOAD_BITS) + + cmp %op3, 48(%rsp) + ja .L_4X2_loop_body + jmp .L_4X2_compute_olimit + +#undef DECODE +#undef RELOAD_BITS +.L_4X2_exit: + addq $8, %rsp + /* Restore stack (oend & olimit) */ + pop %rax /* oend0 */ + pop %rax /* oend1 */ + pop %rax /* oend2 */ + pop %rax /* oend3 */ + pop %rax /* ilimit */ + pop %rax /* olimit */ + pop %rax /* arg */ + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rbp + pop %rdx + pop %rcx + pop %rbx + pop %rax + ret + +#endif diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c index 910bc034c..0031e98cf 100644 --- a/lib/decompress/zstd_decompress.c +++ b/lib/decompress/zstd_decompress.c @@ -56,7 +56,6 @@ * Dependencies *********************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ -#include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" @@ -177,12 +176,15 @@ static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); DEBUGLOG(4, "Allocating new hash set"); + if (!ret) + return NULL; ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); - ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; - ret->ddictPtrCount = 0; - if (!ret || !ret->ddictPtrTable) { + if (!ret->ddictPtrTable) { + ZSTD_customFree(ret, customMem); return NULL; } + ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; + ret->ddictPtrCount = 0; return ret; } @@ -255,11 +257,15 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; +#endif dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; - dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); +#if DYNAMIC_BMI2 + dctx->bmi2 = ZSTD_cpuSupportsBmi2(); +#endif dctx->ddictSet = NULL; ZSTD_DCtx_resetParameters(dctx); #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION @@ -280,8 +286,7 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) return dctx; } -ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -{ +static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); @@ -292,10 +297,15 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) } } +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDCtx_internal(customMem); +} + ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); - return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) @@ -380,6 +390,19 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size) return 0; } +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + */ +unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size) +{ + if (size < ZSTD_FRAMEIDSIZE) return 0; + { U32 const magic = MEM_readLE32(buffer); + if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } + return 0; +} + /** ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. @@ -466,7 +489,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s } switch(dictIDSizeCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; @@ -474,7 +499,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s } switch(fcsID) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; @@ -503,7 +530,6 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } - /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise @@ -544,6 +570,37 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) } } +/*! ZSTD_readSkippableFrame() : + * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, + const void* src, size_t srcSize) +{ + U32 const magicNumber = MEM_readLE32(src); + size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); + size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; + + /* check input validity */ + RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); + RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + + /* deliver payload */ + if (skippableContentSize > 0 && dst != NULL) + ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); + if (magicVariant != NULL) + *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; + return skippableContentSize; +} + /** ZSTD_findDecompressedSize() : * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or @@ -858,7 +915,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); @@ -1009,7 +1066,7 @@ static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) switch (dctx->dictUses) { default: assert(0 /* Impossible */); - /* fall-through */ + ZSTD_FALLTHROUGH; case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; @@ -1031,7 +1088,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; - ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); @@ -1065,7 +1122,7 @@ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t return dctx->expected; if (dctx->bType != bt_raw) return dctx->expected; - return MIN(MAX(inputSize, 1), dctx->expected); + return BOUNDED(1, inputSize, dctx->expected); } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { @@ -1073,7 +1130,9 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { { default: /* should not happen */ assert(0); + ZSTD_FALLTHROUGH; case ZSTDds_getFrameHeaderSize: + ZSTD_FALLTHROUGH; case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: @@ -1085,6 +1144,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: + ZSTD_FALLTHROUGH; case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } @@ -1168,7 +1228,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : @@ -1493,7 +1553,7 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); - return ZSTD_createDStream_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) @@ -1503,7 +1563,7 @@ ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { - return ZSTD_createDCtx_advanced(customMem); + return ZSTD_createDCtx_internal(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) @@ -1763,7 +1823,8 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) { size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/ + unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, @@ -1897,10 +1958,12 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) zds->legacyVersion = 0; +#endif zds->hostageByte = 0; zds->expectedOutBuffer = *output; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); @@ -2038,7 +2101,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->outBuffSize = neededOutBuffSize; } } } zds->streamStage = zdss_read; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_read: DEBUGLOG(5, "stage zdss_read"); @@ -2057,7 +2120,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c index 349dcdc33..e548844ce 100644 --- a/lib/decompress/zstd_decompress_block.c +++ b/lib/decompress/zstd_decompress_block.c @@ -69,15 +69,56 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, } } +/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ +static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, + const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) +{ + if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) + { + /* room for litbuffer to fit without read faulting */ + dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_in_dst; + } + else if (litSize > ZSTD_LITBUFFEREXTRASIZE) + { + /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + if (splitImmediately) { + /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; + } + else { + /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; + dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; + } + dctx->litBufferLocation = ZSTD_split; + } + else + { + /* fits entirely within litExtraBuffer, so no split is necessary */ + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; + } +} /* Hidden declaration for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize); + const void* src, size_t srcSize, + void* dst, size_t dstCapacity, const streaming_operation streaming); /*! ZSTD_decodeLiteralsBlock() : + * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored + * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current + * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being + * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. + * * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ + const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ + void* dst, size_t dstCapacity, const streaming_operation streaming) { DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); @@ -90,7 +131,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, case set_repeat: DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); - /* fall-through */ + ZSTD_FALLTHROUGH; case set_compressed: RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); @@ -99,6 +140,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -121,8 +163,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); + RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); /* prefetch huffman table if cold */ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { @@ -133,11 +178,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, if (singleStream) { hufSuccess = HUF_decompress1X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); } else { hufSuccess = HUF_decompress4X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); } } else { if (singleStream) { @@ -150,15 +195,22 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); #endif } else { hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); } } + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); + dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; + } RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); @@ -166,13 +218,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; - ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -189,23 +241,36 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); - ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; - ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -222,8 +287,19 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); - ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; @@ -495,7 +571,7 @@ static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt, +BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, const U32* baseValue, const U32* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize) @@ -620,7 +696,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += llhSize; } @@ -632,7 +708,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += ofhSize; } @@ -644,7 +720,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += mlhSize; } @@ -713,7 +789,7 @@ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ -static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { +static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; @@ -729,6 +805,7 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ assert(length >= 8); ZSTD_overlapCopy8(&op, &ip, diff); + length -= 8; assert(op - ip >= 8); assert(op <= oend); } @@ -743,12 +820,35 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ assert(oend > oend_w); ZSTD_wildcopy(op, ip, oend_w - op, ovtype); ip += oend_w - op; - op = oend_w; + op += oend_w - op; } /* Handle the leftovers. */ while (op < oend) *op++ = *ip++; } +/* ZSTD_safecopyDstBeforeSrc(): + * This version allows overlap with dst before src, or handles the non-overlap case with dst after src + * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ +static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) { + ptrdiff_t const diff = op - ip; + BYTE* const oend = op + length; + + if (length < 8 || diff > -8) { + /* Handle short lengths, close overlaps, and dst not before src. */ + while (op < oend) *op++ = *ip++; + return; + } + + if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { + ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); + ip += oend - WILDCOPY_OVERLENGTH - op; + op += oend - WILDCOPY_OVERLENGTH - op; + } + + /* Handle the leftovers. */ + while (op < oend) *op++ = *ip++; +} + /* ZSTD_execSequenceEnd(): * This version handles cases that are near the end of the output buffer. It requires * more careful checks to make sure there is no overflow. By separating out these hard @@ -759,9 +859,9 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ */ FORCE_NOINLINE size_t ZSTD_execSequenceEnd(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; @@ -784,27 +884,76 @@ size_t ZSTD_execSequenceEnd(BYTE* op, if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix */ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); - match = dictEnd - (prefixStart-match); + match = dictEnd - (prefixStart - match); if (match + sequence.matchLength <= dictEnd) { ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } } + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); + return sequenceLength; +} + +/* ZSTD_execSequenceEndSplitLitBuffer(): + * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. + */ +FORCE_NOINLINE +size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + + /* bounds checks : careful of address space overflow in 32-bit mode */ + RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); + RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + + /* copy literals */ + RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer"); + ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength); + op = oLitEnd; + *litPtr = iLitEnd; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix */ + RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) { + ZSTD_memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); return sequenceLength; } HINT_INLINE size_t ZSTD_execSequence(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; @@ -813,6 +962,98 @@ size_t ZSTD_execSequence(BYTE* op, const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; + assert(op != NULL /* Precondition */); + assert(oend_w < oend /* No underflow */); + /* Handle edge cases in a slow path: + * - Read beyond end of literals + * - Match end is within WILDCOPY_OVERLIMIT of oend + * - 32-bit mode and the match length overflows + */ + if (UNLIKELY( + iLitEnd > litLimit || + oMatchEnd > oend_w || + (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) + return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + + /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ + assert(op <= oLitEnd /* No overflow */); + assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); + assert(oMatchEnd <= oend /* No underflow */); + assert(iLitEnd <= litLimit /* Literal length is in bounds */); + assert(oLitEnd <= oend_w /* Can wildcopy literals */); + assert(oMatchEnd <= oend_w /* Can wildcopy matches */); + + /* Copy Literals: + * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. + * We likely don't need the full 32-byte wildcopy. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(op, (*litPtr)); + if (UNLIKELY(sequence.litLength > 16)) { + ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap); + } + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* Copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix -> go into extDict */ + RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); + match = dictEnd + (match - prefixStart); + if (match + sequence.matchLength <= dictEnd) { + ZSTD_memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + /* Match within prefix of 1 or more bytes */ + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + + /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy + * without overlap checking. + */ + if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { + /* We bet on a full wildcopy for matches, since we expect matches to be + * longer than literals (in general). In silesia, ~10% of matches are longer + * than 16 bytes. + */ + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); + return sequenceLength; + } + assert(sequence.offset < WILDCOPY_VECLEN); + + /* Copy 8 bytes and spread the offset to be >= 8. */ + ZSTD_overlapCopy8(&op, &match, sequence.offset); + + /* If the match length is > 8 bytes, then continue with the wildcopy. */ + if (sequence.matchLength > 8) { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); + } + return sequenceLength; +} + +HINT_INLINE +size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + assert(op != NULL /* Precondition */); assert(oend_w < oend /* No underflow */); /* Handle edge cases in a slow path: @@ -824,7 +1065,7 @@ size_t ZSTD_execSequence(BYTE* op, iLitEnd > litLimit || oMatchEnd > oend_w || (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) - return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ assert(op <= oLitEnd /* No overflow */); @@ -892,6 +1133,7 @@ size_t ZSTD_execSequence(BYTE* op, return sequenceLength; } + static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) { @@ -905,20 +1147,10 @@ ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqS } FORCE_INLINE_TEMPLATE void -ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits) { - ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.nextState + lowBits; -} - -FORCE_INLINE_TEMPLATE void -ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo) -{ - U32 const nbBits = DInfo.nbBits; - size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.nextState + lowBits; + DStatePtr->state = nextState + lowBits; } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum @@ -937,102 +1169,100 @@ FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) { seq_t seq; - ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state]; - ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state]; - ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state]; - U32 const llBase = llDInfo.baseValue; - U32 const mlBase = mlDInfo.baseValue; - U32 const ofBase = ofDInfo.baseValue; - BYTE const llBits = llDInfo.nbAdditionalBits; - BYTE const mlBits = mlDInfo.nbAdditionalBits; - BYTE const ofBits = ofDInfo.nbAdditionalBits; - BYTE const totalBits = llBits+mlBits+ofBits; + const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; + const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; + const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { U32 const ofBase = ofDInfo->baseValue; + BYTE const llBits = llDInfo->nbAdditionalBits; + BYTE const mlBits = mlDInfo->nbAdditionalBits; + BYTE const ofBits = ofDInfo->nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; - /* sequence */ - { size_t offset; - if (ofBits > 1) { - ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); - ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); - if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ - } else { - offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); - } - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } else { - U32 const ll0 = (llBase == 0); - if (LIKELY((ofBits == 0))) { - if (LIKELY(!ll0)) - offset = seqState->prevOffset[0]; - else { - offset = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; + U16 const llNext = llDInfo->nextState; + U16 const mlNext = mlDInfo->nextState; + U16 const ofNext = ofDInfo->nextState; + U32 const llnbBits = llDInfo->nbBits; + U32 const mlnbBits = mlDInfo->nbBits; + U32 const ofnbBits = ofDInfo->nbBits; + /* + * As gcc has better branch and block analyzers, sometimes it is only + * valuable to mark likelyness for clang, it gives around 3-4% of + * performance. + */ + + /* sequence */ + { size_t offset; + #if defined(__clang__) + if (LIKELY(ofBits > 1)) { + #else + if (ofBits > 1) { + #endif + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { + U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); } + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; } else { - offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); - { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } } } - seq.offset = offset; - } - - seq.matchLength = mlBase; - if (mlBits > 0) - seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); - - if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ - ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - - seq.litLength = llBase; - if (llBits > 0) - seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); - - if (MEM_32bits()) - BIT_reloadDStream(&seqState->DStream); - - DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - - /* ANS state update - * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo(). - * clang-9.2.0 does 7% worse with ZSTD_updateFseState(). - * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the - * better option, so it is the default for other compilers. But, if you - * measure that it is worse, please put up a pull request. - */ - { -#if defined(__GNUC__) && !defined(__clang__) - const int kUseUpdateFseState = 1; -#else - const int kUseUpdateFseState = 0; -#endif - if (kUseUpdateFseState) { - ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ - } else { - ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */ + U32 const ll0 = (llDInfo->baseValue == 0); + if (LIKELY((ofBits == 0))) { + offset = seqState->prevOffset[ll0]; + seqState->prevOffset[1] = seqState->prevOffset[!ll0]; + seqState->prevOffset[0] = offset; + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } } } + seq.offset = offset; } + + #if defined(__clang__) + if (UNLIKELY(mlBits > 0)) + #else + if (mlBits > 0) + #endif + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + #if defined(__clang__) + if (UNLIKELY(llBits > 0)) + #else + if (llBits > 0) + #endif + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ } return seq; @@ -1085,9 +1315,11 @@ MEM_STATIC void ZSTD_assertValidSequence( #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG + + FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE -ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, +ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, @@ -1099,11 +1331,11 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body"); + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer"); (void)frame; /* Regen sequences */ @@ -1124,55 +1356,237 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, BIT_DStream_endOfBuffer < BIT_DStream_completed && BIT_DStream_completed < BIT_DStream_overflow); + /* decompress without overrunning litPtr begins */ + { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + /* Align the decompression loop to 32 + 16 bytes. + * + * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression + * speed swings based on the alignment of the decompression loop. This + * performance swing is caused by parts of the decompression loop falling + * out of the DSB. The entire decompression loop should fit in the DSB, + * when it can't we get much worse performance. You can measure if you've + * hit the good case or the bad case with this perf command for some + * compressed file test.zst: + * + * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ + * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst + * + * If you see most cycles served out of the MITE you've hit the bad case. + * If you see most cycles served out of the DSB you've hit the good case. + * If it is pretty even then you may be in an okay case. + * + * This issue has been reproduced on the following CPUs: + * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 + * Use Instruments->Counters to get DSB/MITE cycles. + * I never got performance swings, but I was able to + * go from the good case of mostly DSB to half of the + * cycles served from MITE. + * - Coffeelake: Intel i9-9900k + * - Coffeelake: Intel i7-9700k + * + * I haven't been able to reproduce the instability or DSB misses on any + * of the following CPUS: + * - Haswell + * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH + * - Skylake + * + * Alignment is done for each of the three major decompression loops: + * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer + * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer + * - ZSTD_decompressSequences_body + * Alignment choices are made to minimize large swings on bad cases and influence on performance + * from changes external to this code, rather than to overoptimize on the current commit. + * + * If you are seeing performance stability this script can help test. + * It tests on 4 commits in zstd where I saw performance change. + * + * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 + */ #if defined(__GNUC__) && defined(__x86_64__) - /* Align the decompression loop to 32 + 16 bytes. - * - * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression - * speed swings based on the alignment of the decompression loop. This - * performance swing is caused by parts of the decompression loop falling - * out of the DSB. The entire decompression loop should fit in the DSB, - * when it can't we get much worse performance. You can measure if you've - * hit the good case or the bad case with this perf command for some - * compressed file test.zst: - * - * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ - * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst - * - * If you see most cycles served out of the MITE you've hit the bad case. - * If you see most cycles served out of the DSB you've hit the good case. - * If it is pretty even then you may be in an okay case. - * - * This issue has been reproduced on the following CPUs: - * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 - * Use Instruments->Counters to get DSB/MITE cycles. - * I never got performance swings, but I was able to - * go from the good case of mostly DSB to half of the - * cycles served from MITE. - * - Coffeelake: Intel i9-9900k - * - Coffeelake: Intel i7-9700k - * - * I haven't been able to reproduce the instability or DSB misses on any - * of the following CPUS: - * - Haswell - * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH - * - Skylake - * - * If you are seeing performance stability this script can help test. - * It tests on 4 commits in zstd where I saw performance change. - * - * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 - */ - __asm__(".p2align 6"); - __asm__("nop"); - __asm__(".p2align 5"); - __asm__("nop"); -# if __GNUC__ >= 9 - /* better for gcc-9 and gcc-10, worse for clang and gcc-8 */ - __asm__(".p2align 3"); -# else - __asm__(".p2align 4"); + __asm__(".p2align 6"); +# if __GNUC__ >= 7 + /* good for gcc-7, gcc-9, and gcc-11 */ + __asm__("nop"); + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 4"); +# if __GNUC__ == 8 || __GNUC__ == 10 + /* good for gcc-8 and gcc-10 */ + __asm__("nop"); + __asm__(".p2align 3"); +# endif # endif #endif + + /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ + for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) { + size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (UNLIKELY(!--nbSeq)) + break; + BIT_reloadDStream(&(seqState.DStream)); + sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + } + + /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ + if (nbSeq > 0) { + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence.litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (--nbSeq) + BIT_reloadDStream(&(seqState.DStream)); + } + } + } + + if (nbSeq > 0) /* there is remaining lit from extra buffer */ + { + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ != 7 + /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */ + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); +# elif __GNUC__ >= 11 + __asm__(".p2align 3"); +# else + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); +# endif +#endif + + for (; ; ) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (UNLIKELY(!--nbSeq)) + break; + BIT_reloadDStream(&(seqState.DStream)); + } + } + + /* check if reached exact end */ + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); + RETURN_ERROR_IF(nbSeq, corruption_detected, ""); + RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); + /* save reps for next block */ + { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + { + size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + } + { size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return op-ostart; +} + +FORCE_INLINE_TEMPLATE size_t +DONT_VECTORIZE +ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); + const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); + const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); + DEBUGLOG(5, "ZSTD_decompressSequences_body"); + (void)frame; + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } + RETURN_ERROR_IF( + ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), + corruption_detected, ""); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != NULL); + + ZSTD_STATIC_ASSERT( + BIT_DStream_unfinished < BIT_DStream_completed && + BIT_DStream_endOfBuffer < BIT_DStream_completed && + BIT_DStream_completed < BIT_DStream_overflow); + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ >= 7 + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); +# else + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); +# endif +#endif + for ( ; ; ) { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); @@ -1218,6 +1632,16 @@ ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } + +static size_t +ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT @@ -1250,10 +1674,10 @@ ZSTD_decompressSequencesLong_body( const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + maxDstSize; + BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); @@ -1289,32 +1713,94 @@ ZSTD_decompressSequencesLong_body( } RETURN_ERROR_IF(seqNblitBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) + { + /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } + else + { + /* lit buffer is either wholly contained in first or second split, or not split at all*/ + oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } } RETURN_ERROR_IF(seqNblitBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) + { + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence->litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { + size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); #endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - op += oneSeqSize; + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + } + else + { + size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } /* save reps for next block */ @@ -1322,10 +1808,21 @@ ZSTD_decompressSequencesLong_body( } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; + if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */ + { + size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + } + { size_t const lastLLSize = litBufferEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { - ZSTD_memcpy(op, litPtr, lastLLSize); + ZSTD_memmove(op, litPtr, lastLLSize); op += lastLLSize; } } @@ -1349,7 +1846,7 @@ ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, #if DYNAMIC_BMI2 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, @@ -1359,10 +1856,20 @@ ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } +static BMI2_TARGET_ATTRIBUTE size_t +DONT_VECTORIZE +ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -1391,11 +1898,25 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +static size_t +ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); +#if DYNAMIC_BMI2 + if (ZSTD_DCtx_get_bmi2(dctx)) { + return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + } +#endif + return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ @@ -1415,7 +1936,7 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif @@ -1456,7 +1977,7 @@ ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable) size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame) + const void* src, size_t srcSize, const int frame, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; /* isLongOffset must be true if there are long offsets. @@ -1471,7 +1992,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ - { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; @@ -1519,7 +2040,10 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + if (dctx->litBufferLocation == ZSTD_split) + return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + else + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif } } @@ -1542,7 +2066,7 @@ size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, { size_t dSize; ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } diff --git a/lib/decompress/zstd_decompress_block.h b/lib/decompress/zstd_decompress_block.h index 049a0cd84..4828d6a9f 100644 --- a/lib/decompress/zstd_decompress_block.h +++ b/lib/decompress/zstd_decompress_block.h @@ -33,6 +33,12 @@ */ + /* Streaming state is used to inform allocation of the literal buffer */ +typedef enum { + not_streaming = 0, + is_streaming = 1 +} streaming_operation; + /* ZSTD_decompressBlock_internal() : * decompress block, starting at `src`, * into destination buffer `dst`. @@ -41,7 +47,7 @@ */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame); + const void* src, size_t srcSize, const int frame, const streaming_operation streaming); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) diff --git a/lib/decompress/zstd_decompress_internal.h b/lib/decompress/zstd_decompress_internal.h index ebda0c903..51dc34729 100644 --- a/lib/decompress/zstd_decompress_internal.h +++ b/lib/decompress/zstd_decompress_internal.h @@ -20,7 +20,7 @@ * Dependencies *********************************************************/ #include "../common/mem.h" /* BYTE, U16, U32 */ -#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */ +#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */ @@ -106,6 +106,22 @@ typedef struct { size_t ddictPtrCount; } ZSTD_DDictHashSet; +#ifndef ZSTD_DECODER_INTERNAL_BUFFER +# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16) +#endif + +#define ZSTD_LBMIN 64 +#define ZSTD_LBMAX (128 << 10) + +/* extra buffer, compensates when dst is not large enough to store litBuffer */ +#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX) + +typedef enum { + ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */ + ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */ + ZSTD_split = 2 /* Split between litExtraBuffer and dst */ +} ZSTD_litLocation_e; + struct ZSTD_DCtx_s { const ZSTD_seqSymbol* LLTptr; @@ -136,7 +152,9 @@ struct ZSTD_DCtx_s size_t litSize; size_t rleSize; size_t staticSize; +#if DYNAMIC_BMI2 != 0 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ +#endif /* dictionary */ ZSTD_DDict* ddictLocal; @@ -158,16 +176,21 @@ struct ZSTD_DCtx_s size_t outStart; size_t outEnd; size_t lhSize; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; +#endif U32 hostageByte; int noForwardProgress; ZSTD_bufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; /* workspace */ - BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; + BYTE* litBuffer; + const BYTE* litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; size_t oversizedDuration; @@ -183,6 +206,14 @@ struct ZSTD_DCtx_s #endif }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ +MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) { +#if DYNAMIC_BMI2 != 0 + return dctx->bmi2; +#else + (void)dctx; + return 0; +#endif +} /*-******************************************************* * Shared internal functions diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c index 8364444d1..028802a1b 100644 --- a/lib/dictBuilder/cover.c +++ b/lib/dictBuilder/cover.c @@ -40,6 +40,13 @@ /*-************************************* * Constants ***************************************/ +/** +* There are 32bit indexes used to ref samples, so limit samples size to 4GB +* on 64bit builds. +* For 32bit builds we choose 1 GB. +* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large +* contiguous buffer, so 1GB is already a high limit. +*/ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define COVER_DEFAULT_SPLITPOINT 1.0 @@ -47,7 +54,7 @@ * Console display ***************************************/ #ifndef LOCALDISPLAYLEVEL -static int g_displayLevel = 2; +static int g_displayLevel = 0; #endif #undef DISPLAY #define DISPLAY(...) \ @@ -735,7 +742,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( COVER_map_t activeDmers; parameters.splitPoint = 1.0; /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; + g_displayLevel = (int)parameters.zParams.notificationLevel; /* Checks */ if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); diff --git a/lib/dictBuilder/fastcover.c b/lib/dictBuilder/fastcover.c index ed789f92f..3352859ad 100644 --- a/lib/dictBuilder/fastcover.c +++ b/lib/dictBuilder/fastcover.c @@ -32,6 +32,13 @@ /*-************************************* * Constants ***************************************/ +/** +* There are 32bit indexes used to ref samples, so limit samples size to 4GB +* on 64bit builds. +* For 32bit builds we choose 1 GB. +* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large +* contiguous buffer, so 1GB is already a high limit. +*/ #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define FASTCOVER_MAX_F 31 #define FASTCOVER_MAX_ACCEL 10 @@ -44,7 +51,7 @@ * Console display ***************************************/ #ifndef LOCALDISPLAYLEVEL -static int g_displayLevel = 2; +static int g_displayLevel = 0; #endif #undef DISPLAY #define DISPLAY(...) \ @@ -549,7 +556,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; + g_displayLevel = (int)parameters.zParams.notificationLevel; /* Assign splitPoint and f if not provided */ parameters.splitPoint = 1.0; parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; @@ -632,7 +639,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover( const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; const unsigned shrinkDict = 0; /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; + const int displayLevel = (int)parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; @@ -716,7 +723,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover( data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; - data->parameters.zParams.notificationLevel = g_displayLevel; + data->parameters.zParams.notificationLevel = (unsigned)g_displayLevel; /* Check the parameters */ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) { diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c index 459cbe4d1..8b8b381ed 100644 --- a/lib/dictBuilder/zdict.c +++ b/lib/dictBuilder/zdict.c @@ -135,22 +135,32 @@ static unsigned ZDICT_NbCommonBytes (size_t val) if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanForward64( &r, (U64)val ); - return (unsigned)(r>>3); + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, (U64)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctzll((U64)val) >> 3); + return (unsigned)(__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r=0; - _BitScanForward( &r, (U32)val ); - return (unsigned)(r>>3); + if (val != 0) { + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); + return (unsigned)(__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; @@ -159,11 +169,16 @@ static unsigned ZDICT_NbCommonBytes (size_t val) } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (unsigned)(r>>3); + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clzll(val) >> 3); + return (unsigned)(__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ @@ -174,11 +189,16 @@ static unsigned ZDICT_NbCommonBytes (size_t val) # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r = 0; - _BitScanReverse( &r, (unsigned long)val ); - return (unsigned)(r>>3); + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, (unsigned long)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); + return (unsigned)(__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } @@ -235,7 +255,7 @@ static dictItem ZDICT_analyzePos( U32 savings[LLIMIT] = {0}; const BYTE* b = (const BYTE*)buffer; size_t maxLength = LLIMIT; - size_t pos = suffix[start]; + size_t pos = (size_t)suffix[start]; U32 end = start; dictItem solution; @@ -369,7 +389,7 @@ static dictItem ZDICT_analyzePos( savings[i] = savings[i-1] + (lengthList[i] * (i-3)); DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", - (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); + (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / (double)maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; @@ -379,7 +399,7 @@ static dictItem ZDICT_analyzePos( { U32 id; for (id=start; id= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ - int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); + int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length); table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ table[u].length += addedLength; @@ -766,6 +786,13 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, pos += fileSizes[u]; } + if (notificationLevel >= 4) { + /* writeStats */ + DISPLAYLEVEL(4, "Offset Code Frequencies : \n"); + for (u=0; u<=offcodeMax; u++) { + DISPLAYLEVEL(4, "%2u :%7u \n", u, offcodeCount[u]); + } } + /* analyze, build stats, starting with literals */ { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); if (HUF_isError(maxNbBits)) { @@ -872,7 +899,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset); #else /* at this stage, we don't use the result of "most common first offset", - as the impact of statistics is not properly evaluated */ + * as the impact of statistics is not properly evaluated */ MEM_writeLE32(dstPtr+0, repStartValue[0]); MEM_writeLE32(dstPtr+4, repStartValue[1]); MEM_writeLE32(dstPtr+8, repStartValue[2]); @@ -888,6 +915,17 @@ _cleanup: } +/** + * @returns the maximum repcode value + */ +static U32 ZDICT_maxRep(U32 const reps[ZSTD_REP_NUM]) +{ + U32 maxRep = reps[0]; + int r; + for (r = 1; r < ZSTD_REP_NUM; ++r) + maxRep = MAX(maxRep, reps[r]); + return maxRep; +} size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, const void* customDictContent, size_t dictContentSize, @@ -899,11 +937,13 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, BYTE header[HBUFFSIZE]; int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; + /* The final dictionary content must be at least as large as the largest repcode */ + size_t const minContentSize = (size_t)ZDICT_maxRep(repStartValue); + size_t paddingSize; /* check conditions */ DEBUGLOG(4, "ZDICT_finalizeDictionary"); if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall); - if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong); if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall); /* dictionary header */ @@ -927,12 +967,43 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, hSize += eSize; } - /* copy elements in final buffer ; note : src and dst buffer can overlap */ - if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; - { size_t const dictSize = hSize + dictContentSize; - char* dictEnd = (char*)dictBuffer + dictSize; - memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); - memcpy(dictBuffer, header, hSize); + /* Shrink the content size if it doesn't fit in the buffer */ + if (hSize + dictContentSize > dictBufferCapacity) { + dictContentSize = dictBufferCapacity - hSize; + } + + /* Pad the dictionary content with zeros if it is too small */ + if (dictContentSize < minContentSize) { + RETURN_ERROR_IF(hSize + minContentSize > dictBufferCapacity, dstSize_tooSmall, + "dictBufferCapacity too small to fit max repcode"); + paddingSize = minContentSize - dictContentSize; + } else { + paddingSize = 0; + } + + { + size_t const dictSize = hSize + paddingSize + dictContentSize; + + /* The dictionary consists of the header, optional padding, and the content. + * The padding comes before the content because the "best" position in the + * dictionary is the last byte. + */ + BYTE* const outDictHeader = (BYTE*)dictBuffer; + BYTE* const outDictPadding = outDictHeader + hSize; + BYTE* const outDictContent = outDictPadding + paddingSize; + + assert(dictSize <= dictBufferCapacity); + assert(outDictContent + dictContentSize == (BYTE*)dictBuffer + dictSize); + + /* First copy the customDictContent into its final location. + * `customDictContent` and `dictBuffer` may overlap, so we must + * do this before any other writes into the output buffer. + * Then copy the header & padding into the output buffer. + */ + memmove(outDictContent, customDictContent, dictContentSize); + memcpy(outDictHeader, header, hSize); + memset(outDictPadding, 0, paddingSize); + return dictSize; } } diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c index 7ab554797..6be9c8105 100644 --- a/lib/legacy/zstd_v01.c +++ b/lib/legacy/zstd_v01.c @@ -343,8 +343,7 @@ FORCE_INLINE unsigned FSE_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r; - _BitScanReverse ( &r, val ); - return (unsigned) r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c index 89fdc7169..f26b99bc8 100644 --- a/lib/legacy/zstd_v02.c +++ b/lib/legacy/zstd_v02.c @@ -353,9 +353,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c index 5262d515a..744bde515 100644 --- a/lib/legacy/zstd_v03.c +++ b/lib/legacy/zstd_v03.c @@ -356,9 +356,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c index bee1b99dd..4d336b59f 100644 --- a/lib/legacy/zstd_v04.c +++ b/lib/legacy/zstd_v04.c @@ -627,9 +627,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c index eb8966bb4..bc293661e 100644 --- a/lib/legacy/zstd_v05.c +++ b/lib/legacy/zstd_v05.c @@ -756,9 +756,8 @@ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv05_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c index fcb16d4d8..7d6a35922 100644 --- a/lib/legacy/zstd_v06.c +++ b/lib/legacy/zstd_v06.c @@ -860,9 +860,8 @@ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv06_highbit32 ( U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c index 0d0e46609..1449ef3ab 100644 --- a/lib/legacy/zstd_v07.c +++ b/lib/legacy/zstd_v07.c @@ -530,9 +530,8 @@ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits); MEM_STATIC unsigned BITv07_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ diff --git a/lib/libzstd.mk b/lib/libzstd.mk new file mode 100644 index 000000000..45ee1ce62 --- /dev/null +++ b/lib/libzstd.mk @@ -0,0 +1,185 @@ +# ################################################################ +# Copyright (c) Yann Collet, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +################################################################## +# Input Variables +################################################################## + +# Zstd lib directory +LIBZSTD ?= ./ + +# Legacy support +ZSTD_LEGACY_SUPPORT ?= 5 +ZSTD_LEGACY_MULTITHREADED_API ?= 0 + +# Build size optimizations +HUF_FORCE_DECOMPRESS_X1 ?= 0 +HUF_FORCE_DECOMPRESS_X2 ?= 0 +ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 +ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 +ZSTD_NO_INLINE ?= 0 +ZSTD_STRIP_ERROR_STRINGS ?= 0 + +# Assembly support +ZSTD_NO_ASM ?= 0 + +################################################################## +# libzstd helpers +################################################################## + +# Make 4.3 doesn't support '\#' anymore (https://lwn.net/Articles/810071/) +NUM_SYMBOL := \# + +# define silent mode as default (verbose mode with V=1 or VERBOSE=1) +$(V)$(VERBOSE).SILENT: + +# When cross-compiling from linux to windows, +# one might need to specify TARGET_SYSTEM as "Windows." +# Building from Fedora fails without it. +# (but Ubuntu and Debian don't need to set anything) +TARGET_SYSTEM ?= $(OS) + +# Version numbers +LIBVER_SRC := $(LIBZSTD)/zstd.h +LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` +LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) +LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) +LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) +LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) +LIBVER := $(shell echo $(LIBVER_SCRIPT)) +CCVER := $(shell $(CC) --version) +ZSTD_VERSION?= $(LIBVER) + +# ZSTD_LIB_MINIFY is a helper variable that +# configures a bunch of other variables to space-optimized defaults. +ZSTD_LIB_MINIFY ?= 0 +ifneq ($(ZSTD_LIB_MINIFY), 0) + HAVE_CC_OZ ?= $(shell echo "" | $(CC) -Oz -x c -c - -o /dev/null 2> /dev/null && echo 1 || echo 0) + ZSTD_LEGACY_SUPPORT ?= 0 + ZSTD_LIB_DEPRECATED ?= 0 + HUF_FORCE_DECOMPRESS_X1 ?= 1 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 + ZSTD_NO_INLINE ?= 1 + ZSTD_STRIP_ERROR_STRINGS ?= 1 +ifneq ($(HAVE_CC_OZ), 0) + # Some compilers (clang) support an even more space-optimized setting. + CFLAGS += -Oz +else + CFLAGS += -Os +endif + CFLAGS += -fno-stack-protector -fomit-frame-pointer -fno-ident \ + -DDYNAMIC_BMI2=0 -DNDEBUG +else + CFLAGS += -O3 +endif + +DEBUGLEVEL ?= 0 +CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -DDEBUGLEVEL=$(DEBUGLEVEL) +ifeq ($(TARGET_SYSTEM),Windows_NT) # MinGW assumed + CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting +endif +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls -Wmissing-prototypes -Wc++-compat +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) +LDFLAGS += $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) + +HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) +GREP_OPTIONS ?= +ifeq ($HAVE_COLORNEVER, 1) + GREP_OPTIONS += --color=never +endif +GREP = grep $(GREP_OPTIONS) +SED_ERE_OPT ?= -E + +ZSTD_COMMON_FILES := $(sort $(wildcard $(LIBZSTD)/common/*.c)) +ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/compress/*.c)) +ZSTD_DECOMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/decompress/*.c)) +ZSTD_DICTBUILDER_FILES := $(sort $(wildcard $(LIBZSTD)/dictBuilder/*.c)) +ZSTD_DEPRECATED_FILES := $(sort $(wildcard $(LIBZSTD)/deprecated/*.c)) +ZSTD_LEGACY_FILES := + +ZSTD_DECOMPRESS_AMD64_ASM_FILES := $(sort $(wildcard $(LIBZSTD)/decompress/*_amd64.S)) + +ifneq ($(ZSTD_NO_ASM), 0) + CPPFLAGS += -DHUF_DISABLE_ASM +else + # Unconditionally add the ASM files they are disabled by + # macros in the .S file. + ZSTD_DECOMPRESS_FILES += $(ZSTD_DECOMPRESS_AMD64_ASM_FILES) +endif + +ifneq ($(HUF_FORCE_DECOMPRESS_X1), 0) + CFLAGS += -DHUF_FORCE_DECOMPRESS_X1 +endif + +ifneq ($(HUF_FORCE_DECOMPRESS_X2), 0) + CFLAGS += -DHUF_FORCE_DECOMPRESS_X2 +endif + +ifneq ($(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT), 0) + CFLAGS += -DZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT +endif + +ifneq ($(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG), 0) + CFLAGS += -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +endif + +ifneq ($(ZSTD_NO_INLINE), 0) + CFLAGS += -DZSTD_NO_INLINE +endif + +ifneq ($(ZSTD_STRIP_ERROR_STRINGS), 0) + CFLAGS += -DZSTD_STRIP_ERROR_STRINGS +endif + +ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0) + CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API +endif + +ifneq ($(ZSTD_LEGACY_SUPPORT), 0) +ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) + ZSTD_LEGACY_FILES += $(shell ls $(LIBZSTD)/legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') +endif +endif +CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) + +UNAME := $(shell uname) + +ifndef BUILD_DIR +ifeq ($(UNAME), Darwin) + ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0) + HASH ?= md5 + endif +else ifeq ($(UNAME), FreeBSD) + HASH ?= gmd5sum +else ifeq ($(UNAME), NetBSD) + HASH ?= md5 -n +else ifeq ($(UNAME), OpenBSD) + HASH ?= md5 +endif +HASH ?= md5sum + +HASH_DIR = conf_$(shell echo $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(ZSTD_FILES) | $(HASH) | cut -f 1 -d " " ) +HAVE_HASH :=$(shell echo 1 | $(HASH) > /dev/null && echo 1 || echo 0) +ifeq ($(HAVE_HASH),0) + $(info warning : could not find HASH ($(HASH)), needed to differentiate builds using different flags) + BUILD_DIR := obj/generic_noconf +endif +endif # BUILD_DIR + +ZSTD_SUBDIR := $(LIBZSTD)/common $(LIBZSTD)/compress $(LIBZSTD)/decompress $(LIBZSTD)/dictBuilder $(LIBZSTD)/legacy $(LIBZSTD)/deprecated +vpath %.c $(ZSTD_SUBDIR) +vpath %.S $(ZSTD_SUBDIR) diff --git a/lib/modulemap/module.modulemap b/lib/modulemap/module.modulemap new file mode 100644 index 000000000..eeda69885 --- /dev/null +++ b/lib/modulemap/module.modulemap @@ -0,0 +1,4 @@ +module libzstd [extern_c] { + header "../zstd.h" + export * +} diff --git a/lib/zdict.h b/lib/zdict.h index 75b05dbf4..f1e139a40 100644 --- a/lib/zdict.h +++ b/lib/zdict.h @@ -46,7 +46,7 @@ extern "C" { * * Zstd can use dictionaries to improve compression ratio of small data. * Traditionally small files don't compress well because there is very little - * repetion in a single sample, since it is small. But, if you are compressing + * repetition in a single sample, since it is small. But, if you are compressing * many similar files, like a bunch of JSON records that share the same * structure, you can train a dictionary on ahead of time on some samples of * these files. Then, zstd can use the dictionary to find repetitions that are @@ -132,7 +132,7 @@ extern "C" { * * # Benchmark levels 1-3 without a dictionary * zstd -b1e3 -r /path/to/my/files - * # Benchmark levels 1-3 with a dictioanry + * # Benchmark levels 1-3 with a dictionary * zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary * * When should I retrain a dictionary? @@ -237,7 +237,6 @@ typedef struct { * is presumed that the most profitable content is at the end of the dictionary, * since that is the cheapest to reference. * - * `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes. * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). * * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), @@ -272,8 +271,9 @@ ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); * Use them only in association with static linking. * ==================================================================================== */ -#define ZDICT_CONTENTSIZE_MIN 128 #define ZDICT_DICTSIZE_MIN 256 +/* Deprecated: Remove in v1.6.0 */ +#define ZDICT_CONTENTSIZE_MIN 128 /*! ZDICT_cover_params_t: * k and d are the only required parameters. diff --git a/lib/zstd.h b/lib/zstd.h index c3f642d47..66ec125d0 100644 --- a/lib/zstd.h +++ b/lib/zstd.h @@ -74,7 +74,7 @@ extern "C" { /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 0 +#define ZSTD_VERSION_RELEASE 1 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -249,7 +249,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * - * This API supercedes all other "advanced" API entry points in the experimental section. + * This API supersedes all other "advanced" API entry points in the experimental section. * In the future, we expect to remove from experimental API entry points which are redundant with this API. */ @@ -419,7 +419,7 @@ typedef enum { * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences - * ZSTD_c_splitBlocks + * ZSTD_c_useBlockSplitter * ZSTD_c_useRowMatchFinder * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; @@ -934,7 +934,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and * only reset with the context is reset with ZSTD_reset_parameters or * ZSTD_reset_session_and_parameters. Prefixes are single-use. ******************************************************************************/ @@ -1170,9 +1170,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_SRCSIZEHINT_MIN 0 #define ZSTD_SRCSIZEHINT_MAX INT_MAX -/* internal */ -#define ZSTD_HASHLOG3_MAX 17 - /* --- Advanced types --- */ @@ -1315,10 +1312,14 @@ typedef enum { } ZSTD_literalCompressionMode_e; typedef enum { - ZSTD_urm_auto = 0, /* Automatically determine whether or not we use row matchfinder */ - ZSTD_urm_disableRowMatchFinder = 1, /* Never use row matchfinder */ - ZSTD_urm_enableRowMatchFinder = 2 /* Always use row matchfinder when applicable */ -} ZSTD_useRowMatchFinderMode_e; + /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final + * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable + * or ZSTD_ps_disable allow for a force enable/disable the feature. + */ + ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_enable = 1, /* Force-enable the feature */ + ZSTD_ps_disable = 2 /* Do not use the feature */ +} ZSTD_paramSwitch_e; /*************************************** * Frame size functions @@ -1454,6 +1455,26 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* ds ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant); +/*! ZSTD_readSkippableFrame() : + * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, + const void* src, size_t srcSize); + +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + */ +ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); + + /*************************************** * Memory management @@ -1581,15 +1602,15 @@ ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_ ZSTD_compressionParameters cParams, ZSTD_customMem customMem); -/* ! Thread pool : - * These prototypes make it possible to share a thread pool among multiple compression contexts. - * This can limit resources for applications with multiple threads where each one uses - * a threaded compression mode (via ZSTD_c_nbWorkers parameter). - * ZSTD_createThreadPool creates a new thread pool with a given number of threads. - * Note that the lifetime of such pool must exist while being used. - * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value - * to use an internal thread pool). - * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. +/*! Thread pool : + * These prototypes make it possible to share a thread pool among multiple compression contexts. + * This can limit resources for applications with multiple threads where each one uses + * a threaded compression mode (via ZSTD_c_nbWorkers parameter). + * ZSTD_createThreadPool creates a new thread pool with a given number of threads. + * Note that the lifetime of such pool must exist while being used. + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + * to use an internal thread pool). + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. */ typedef struct POOL_ctx_s ZSTD_threadPool; ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); @@ -1725,9 +1746,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 -/* Controls how the literals are compressed (default is auto). - * The value must be of type ZSTD_literalCompressionMode_e. - * See ZSTD_literalCompressionMode_e enum definition for details. +/* Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never compress literals. + * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals + * may still be emitted if huffman is not beneficial to use.) + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * literals compression based on the compression parameters - specifically, + * negative compression levels do not use literal compression. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 @@ -1790,7 +1817,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * * Note that this means that the CDict tables can no longer be copied into the * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be - * useable. The dictionary can only be attached or reloaded. + * usable. The dictionary can only be attached or reloaded. * * In general, you should expect compression to be faster--sometimes very much * so--and CDict creation to be slightly slower. Eventually, we will probably @@ -1879,23 +1906,26 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 -/* ZSTD_c_splitBlocks - * Default is 0 == disabled. Set to 1 to enable block splitting. +/* ZSTD_c_useBlockSplitter + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use block splitter. + * Set to ZSTD_ps_enable to always use block splitter. * - * Will attempt to split blocks in order to improve compression ratio at the cost of speed. + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * block splitting based on the compression parameters. */ -#define ZSTD_c_splitBlocks ZSTD_c_experimentalParam13 +#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 /* ZSTD_c_useRowMatchFinder - * Default is ZSTD_urm_auto. - * Controlled with ZSTD_useRowMatchFinderMode_e enum. + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use row-based matchfinder. + * Set to ZSTD_ps_enable to force usage of row-based matchfinder. * - * By default, in ZSTD_urm_auto, when finalizing the compression parameters, the library - * will decide at runtime whether to use the row-based matchfinder based on support for SIMD - * instructions as well as the windowLog. - * - * Set to ZSTD_urm_disableRowMatchFinder to never use row-based matchfinder. - * Set to ZSTD_urm_enableRowMatchFinder to force usage of row-based matchfinder. + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * the row-based matchfinder based on support for SIMD instructions and the window log. + * Note that this only pertains to compression strategies: greedy, lazy, and lazy2 */ #define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14 @@ -2218,7 +2248,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); - * + * * note : cdict will just be referenced, and must outlive compression session * This prototype will generate compilation warnings. */ diff --git a/programs/Makefile b/programs/Makefile index 599fb02fa..fa557e6f9 100644 --- a/programs/Makefile +++ b/programs/Makefile @@ -9,7 +9,7 @@ # ########################################################################## # zstd : Command Line Utility, supporting gzip-like arguments # zstd32 : Same as zstd, but forced to compile in 32-bits mode -# zstd_nolegacy : zstd without support of decompression of legacy versions +# zstd-nolegacy : zstd without support of decompression of legacy versions # zstd-small : minimal zstd without dictionary builder and benchmark # zstd-compress : compressor-only version of zstd # zstd-decompress : decompressor-only version of zstd @@ -18,31 +18,9 @@ .PHONY: default default: zstd-release -# silent mode by default; verbose can be triggered by V=1 or VERBOSE=1 -$(V)$(VERBOSE).SILENT: +LIBZSTD := ../lib - -ZSTDDIR := ../lib - -# Version numbers -LIBVER_SRC := $(ZSTDDIR)/zstd.h -LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) -LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) -LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) -LIBVER := $(shell echo $(LIBVER_SCRIPT)) - -ZSTD_VERSION = $(LIBVER) - -HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) -GREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) - GREP_OPTIONS += --color=never -endif -GREP = grep $(GREP_OPTIONS) +include $(LIBZSTD)/libzstd.mk ifeq ($(shell $(CC) -v 2>&1 | $(GREP) -c "gcc version "), 1) ALIGN_LOOP = -falign-loops=32 @@ -50,78 +28,25 @@ else ALIGN_LOOP = endif -DEBUGLEVEL ?= 0 -CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -DDEBUGLEVEL=$(DEBUGLEVEL) -ifeq ($(OS),Windows_NT) # MinGW assumed - CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting -endif -CFLAGS ?= -O3 -DEBUGFLAGS+=-Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ - -Wstrict-prototypes -Wundef -Wpointer-arith \ - -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls -Wmissing-prototypes -Wc++-compat -CFLAGS += $(DEBUGFLAGS) -CPPFLAGS += $(MOREFLAGS) -LDFLAGS += $(MOREFLAGS) -FLAGS = $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) - -ZSTDLIB_COMMON := $(ZSTDDIR)/common -ZSTDLIB_COMPRESS := $(ZSTDDIR)/compress -ZSTDLIB_DECOMPRESS := $(ZSTDDIR)/decompress -ZDICT_DIR := $(ZSTDDIR)/dictBuilder -ZSTDLEGACY_DIR := $(ZSTDDIR)/legacy - -vpath %.c $(ZSTDLIB_COMMON) $(ZSTDLIB_COMPRESS) $(ZSTDLIB_DECOMPRESS) $(ZDICT_DIR) $(ZSTDLEGACY_DIR) - -ZSTDLIB_COMMON_C := $(wildcard $(ZSTDLIB_COMMON)/*.c) -ZSTDLIB_COMPRESS_C := $(wildcard $(ZSTDLIB_COMPRESS)/*.c) -ZSTDLIB_DECOMPRESS_C := $(wildcard $(ZSTDLIB_DECOMPRESS)/*.c) -ZSTDLIB_CORE_SRC := $(ZSTDLIB_DECOMPRESS_C) $(ZSTDLIB_COMMON_C) $(ZSTDLIB_COMPRESS_C) -ZDICT_SRC := $(wildcard $(ZDICT_DIR)/*.c) - -ZSTD_LEGACY_SUPPORT ?= 5 -ZSTDLEGACY_SRC := -ifneq ($(ZSTD_LEGACY_SUPPORT), 0) -ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) - ZSTDLEGACY_SRC += $(shell ls $(ZSTDLEGACY_DIR)/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') -endif -endif +ZSTDLIB_COMMON_SRC := $(sort $(ZSTD_COMMON_FILES)) +ZSTDLIB_COMPRESS_SRC := $(sort $(ZSTD_COMPRESS_FILES)) +ZSTDLIB_DECOMPRESS_SRC := $(sort $(ZSTD_DECOMPRESS_FILES)) +ZSTDLIB_CORE_SRC := $(sort $(ZSTD_DECOMPRESS_FILES) $(ZSTD_COMMON_FILES) $(ZSTD_COMPRESS_FILES)) +ZDICT_SRC := $(sort $(ZSTD_DICTBUILDER_FILES)) +ZSTDLEGACY_SRC := $(sort $(ZSTD_LEGACY_FILES)) # Sort files in alphabetical order for reproducible builds ZSTDLIB_FULL_SRC = $(sort $(ZSTDLIB_CORE_SRC) $(ZSTDLEGACY_SRC) $(ZDICT_SRC)) ZSTDLIB_LOCAL_SRC = $(notdir $(ZSTDLIB_FULL_SRC)) -ZSTDLIB_LOCAL_OBJ := $(ZSTDLIB_LOCAL_SRC:.c=.o) +ZSTDLIB_LOCAL_OBJ0 := $(ZSTDLIB_LOCAL_SRC:.c=.o) +ZSTDLIB_LOCAL_OBJ := $(ZSTDLIB_LOCAL_OBJ0:.S=.o) ZSTD_CLI_SRC := $(wildcard *.c) ZSTD_CLI_OBJ := $(ZSTD_CLI_SRC:.c=.o) ZSTD_ALL_SRC = $(ZSTDLIB_LOCAL_SRC) $(ZSTD_CLI_SRC) -ZSTD_ALL_OBJ := $(ZSTD_ALL_SRC:.c=.o) - -UNAME := $(shell uname) - -ifndef BUILD_DIR -ifeq ($(UNAME), Darwin) - ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0) - HASH ?= md5 - endif -else ifeq ($(UNAME), FreeBSD) - HASH ?= gmd5sum -else ifeq ($(UNAME), NetBSD) - HASH ?= md5 -n -else ifeq ($(UNAME), OpenBSD) - HASH ?= md5 -endif -HASH ?= md5sum -HAVE_HASH :=$(shell echo 1 | $(HASH) > /dev/null && echo 1 || echo 0) - -HASH_DIR = conf_$(shell echo $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(LDLIBS) $(ZSTD_FILES) | $(HASH) | cut -f 1 -d " ") -ifeq ($(HAVE_HASH),0) - $(info warning : could not find HASH ($(HASH)), needed to differentiate builds using different flags) - BUILD_DIR := obj/generic_noconf -endif -endif # BUILD_DIR +ZSTD_ALL_OBJ0 := $(ZSTD_ALL_SRC:.c=.o) +ZSTD_ALL_OBJ := $(ZSTD_ALL_OBJ0:.S=.o) # Define *.exe as extension for Windows systems ifneq (,$(filter Windows%,$(OS))) @@ -139,9 +64,6 @@ endif VOID = /dev/null -# Make 4.3 doesn't support '\#' anymore (https://lwn.net/Articles/810071/) -NUM_SYMBOL := \# - # thread detection NO_THREAD_MSG := ==> no threads, building without multithreading support HAVE_PTHREAD := $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_pthread.c && $(CC) $(FLAGS) -o have_pthread$(EXT) have_pthread.c -pthread 2> $(VOID) && rm have_pthread$(EXT) && echo 1 || echo 0; rm have_pthread.c) @@ -156,7 +78,7 @@ endif # zlib detection NO_ZLIB_MSG := ==> no zlib, building zstd without .gz support -HAVE_ZLIB := $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_zlib.c && $(CC) $(FLAGS) -o have_zlib$(EXT) have_zlib.c -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0; rm have_zlib.c) +HAVE_ZLIB ?= $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_zlib.c && $(CC) $(FLAGS) -o have_zlib$(EXT) have_zlib.c -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0; rm have_zlib.c) ifeq ($(HAVE_ZLIB), 1) ZLIB_MSG := ==> building zstd with .gz compression support ZLIBCPP = -DZSTD_GZCOMPRESS -DZSTD_GZDECOMPRESS @@ -167,7 +89,7 @@ endif # lzma detection NO_LZMA_MSG := ==> no liblzma, building zstd without .xz/.lzma support -HAVE_LZMA := $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lzma.c && $(CC) $(FLAGS) -o have_lzma$(EXT) have_lzma.c -llzma 2> $(VOID) && rm have_lzma$(EXT) && echo 1 || echo 0; rm have_lzma.c) +HAVE_LZMA ?= $(shell printf '$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lzma.c && $(CC) $(FLAGS) -o have_lzma$(EXT) have_lzma.c -llzma 2> $(VOID) && rm have_lzma$(EXT) && echo 1 || echo 0; rm have_lzma.c) ifeq ($(HAVE_LZMA), 1) LZMA_MSG := ==> building zstd with .xz/.lzma compression support LZMACPP = -DZSTD_LZMACOMPRESS -DZSTD_LZMADECOMPRESS @@ -178,7 +100,7 @@ endif # lz4 detection NO_LZ4_MSG := ==> no liblz4, building zstd without .lz4 support -HAVE_LZ4 := $(shell printf '$(NUM_SYMBOL)include \n$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lz4.c && $(CC) $(FLAGS) -o have_lz4$(EXT) have_lz4.c -llz4 2> $(VOID) && rm have_lz4$(EXT) && echo 1 || echo 0; rm have_lz4.c) +HAVE_LZ4 ?= $(shell printf '$(NUM_SYMBOL)include \n$(NUM_SYMBOL)include \nint main(void) { return 0; }' > have_lz4.c && $(CC) $(FLAGS) -o have_lz4$(EXT) have_lz4.c -llz4 2> $(VOID) && rm have_lz4$(EXT) && echo 1 || echo 0; rm have_lz4.c) ifeq ($(HAVE_LZ4), 1) LZ4_MSG := ==> building zstd with .lz4 compression support LZ4CPP = -DZSTD_LZ4COMPRESS -DZSTD_LZ4DECOMPRESS @@ -212,7 +134,7 @@ SET_CACHE_DIRECTORY = \ all: zstd .PHONY: allVariants -allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-nolegacy zstd-dictBuilder +allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-frugal zstd-nolegacy zstd-dictBuilder .PHONY: zstd # must always be run zstd : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP) $(LZ4CPP) @@ -276,6 +198,7 @@ zstd32 : $(ZSTDLIB_FULL_SRC) $(ZSTD_CLI_SRC) ## zstd-nolegacy: same scope as zstd, with just support of legacy formats removed zstd-nolegacy : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD) $(DEBUGFLAGS_LD) +zstd-nolegacy : CPPFLAGS += -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 zstd-nolegacy : $(ZSTDLIB_CORE_SRC) $(ZDICT_SRC) $(ZSTD_CLI_OBJ) $(CC) $(FLAGS) $^ -o $@$(EXT) $(LDFLAGS) @@ -299,7 +222,7 @@ zstd-noxz : zstd ## zstd-dll: zstd executable linked to dynamic library libzstd (must have same version) .PHONY: zstd-dll -zstd-dll : LDFLAGS+= -L$(ZSTDDIR) +zstd-dll : LDFLAGS+= -L$(LIBZSTD) zstd-dll : LDLIBS += -lzstd zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c zstd-dll : zstd @@ -323,18 +246,17 @@ zstd-pgo : ## zstd-small: minimal target, supporting only zstd compression and decompression. no bench. no legacy. no other format. zstd-small: CFLAGS = -Os -s zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOTRACE $^ -o $@$(EXT) + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-decompress: $(ZSTDLIB_COMMON_C) $(ZSTDLIB_DECOMPRESS_C) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) +zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-compress: $(ZSTDLIB_COMMON_C) $(ZSTDLIB_COMPRESS_C) zstdcli.c util.c timefn.c fileio.c - $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) +zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) ## zstd-dictBuilder: executable supporting dictionary creation and compression (only) -zstd-dictBuilder: CPPFLAGS += -DZSTD_NOBENCH -DZSTD_NODECOMPRESS -DZSTD_NOTRACE -zstd-dictBuilder: $(ZSTDLIB_COMMON_C) $(ZSTDLIB_COMPRESS_C) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c dibio.c - $(CC) $(FLAGS) $^ -o $@$(EXT) +zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c dibio.c + $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODECOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) zstdmt: zstd ln -sf zstd zstdmt @@ -398,6 +320,10 @@ $(BUILD_DIR)/%.o : %.c $(BUILD_DIR)/%.d | $(BUILD_DIR) @echo CC $@ $(COMPILE.c) $(DEPFLAGS) $(BUILD_DIR)/$*.d $(OUTPUT_OPTION) $< +$(BUILD_DIR)/%.o : %.S | $(BUILD_DIR) + @echo AS $@ + $(COMPILE.c) $(OUTPUT_OPTION) $< + MKDIR ?= mkdir $(BUILD_DIR): ; $(MKDIR) -p $@ @@ -411,7 +337,7 @@ include $(wildcard $(DEPFILES)) #----------------------------------------------------------------------------- # make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets #----------------------------------------------------------------------------- -ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku)) +ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX)) HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= diff --git a/programs/README.md b/programs/README.md index 7fd710420..5570f90c3 100644 --- a/programs/README.md +++ b/programs/README.md @@ -156,7 +156,7 @@ Arguments : Advanced arguments : -V : display Version number and exit - -c : force write to standard output, even if it is the console + -c : write to standard output (even if it is the console) -v : verbose mode; specify multiple times to increase verbosity -q : suppress warnings; specify twice to suppress errors too --no-progress : do not display the progress counter @@ -172,6 +172,7 @@ Advanced compression arguments : --long[=#]: enable long distance matching with given window log (default: 27) --fast[=#]: switch to very fast compression levels (default: 1) --adapt : dynamically adapt compression level to I/O conditions +--patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine -T# : spawns # compression threads (default: 1, 0==# cores) -B# : select size of each job (default: 0==automatic) --single-thread : use a single thread for both I/O and compression (result slightly different than -T1) diff --git a/programs/benchzstd.c b/programs/benchzstd.c index 49c03490d..1e4d717d1 100644 --- a/programs/benchzstd.c +++ b/programs/benchzstd.c @@ -70,6 +70,8 @@ static const size_t maxMemory = (sizeof(size_t)==4) ? #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush(NULL); } #define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ +#define OUTPUT(...) { fprintf(stdout, __VA_ARGS__); fflush(NULL); } +#define OUTPUTLEVEL(l, ...) if (displayLevel>=l) { OUTPUT(__VA_ARGS__); } /* ************************************* @@ -129,7 +131,7 @@ BMK_advancedParams_t BMK_initAdvancedParams(void) { 0, /* ldmHashLog */ 0, /* ldmBuckSizeLog */ 0, /* ldmHashRateLog */ - ZSTD_lcm_auto, /* literalCompressionMode */ + ZSTD_ps_auto, /* literalCompressionMode */ 0 /* useRowMatchFinder */ }; return res; @@ -181,7 +183,7 @@ BMK_initCCtx(ZSTD_CCtx* ctx, CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_minMatch, (int)comprParams->minMatch)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_targetLength, (int)comprParams->targetLength)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_literalCompressionMode, (int)adv->literalCompressionMode)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_strategy, comprParams->strategy)); + CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_strategy, (int)comprParams->strategy)); CHECK_Z(ZSTD_CCtx_loadDictionary(ctx, dictBuffer, dictBufferSize)); } @@ -371,10 +373,7 @@ BMK_benchMemAdvancedNoAlloc( if (adv->mode == BMK_decodeOnly) { cSizes[nbBlocks] = thisBlockSize; benchResult.cSize = thisBlockSize; - } - } - } - } + } } } } /* warming up `compressedBuffer` */ if (adv->mode == BMK_decodeOnly) { @@ -429,8 +428,9 @@ BMK_benchMemAdvancedNoAlloc( dctxprep.dictBuffer = dictBuffer; dctxprep.dictBufferSize = dictBufferSize; - DISPLAYLEVEL(2, "\r%70s\r", ""); /* blank line */ - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize); + OUTPUTLEVEL(2, "\r%70s\r", ""); /* blank line */ + assert(srcSize < UINT_MAX); + OUTPUTLEVEL(2, "%2s-%-17.17s :%10u -> \r", marks[markNb], displayName, (unsigned)srcSize); while (!(compressionCompleted && decompressionCompleted)) { if (!compressionCompleted) { @@ -442,7 +442,7 @@ BMK_benchMemAdvancedNoAlloc( { BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); cSize = cResult.sumOfReturn; - ratio = (double)srcSize / cSize; + ratio = (double)srcSize / (double)cSize; { BMK_benchResult_t newResult; newResult.cSpeed = (U64)((double)srcSize * TIMELOOP_NANOSEC / cResult.nanoSecPerRun); benchResult.cSize = cSize; @@ -451,11 +451,12 @@ BMK_benchMemAdvancedNoAlloc( } } { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s\r", + assert(cSize < UINT_MAX); + OUTPUTLEVEL(2, "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s \r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratioAccuracy, ratio, - benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT); + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT); } compressionCompleted = BMK_isCompleted_TimedFn(timeStateCompress); } @@ -474,11 +475,11 @@ BMK_benchMemAdvancedNoAlloc( } { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r", + OUTPUTLEVEL(2, "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s, %6.1f MB/s\r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratioAccuracy, ratio, - benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT, + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT, (double)benchResult.dSpeed / MB_UNIT); } decompressionCompleted = BMK_isCompleted_TimedFn(timeStateDecompress); @@ -515,7 +516,7 @@ BMK_benchMemAdvancedNoAlloc( DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u+n]); DISPLAY(" \n"); DISPLAY("decode: "); - for (n=lowest; n>0; n++) + for (n=lowest; n>0; n--) DISPLAY("%02X ", resultBuffer[u-n]); DISPLAY(" :%02X: ", resultBuffer[u]); for (n=1; n<3; n++) @@ -535,13 +536,13 @@ BMK_benchMemAdvancedNoAlloc( double const cSpeed = (double)benchResult.cSpeed / MB_UNIT; double const dSpeed = (double)benchResult.dSpeed / MB_UNIT; if (adv->additionalParam) { - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, adv->additionalParam); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, adv->additionalParam); } else { - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); } } - DISPLAYLEVEL(2, "%2i#\n", cLevel); + OUTPUTLEVEL(2, "%2i#\n", cLevel); } /* Bench */ benchResult.cMem = (1ULL << (comprParams->windowLog)) + ZSTD_sizeof_CCtx(cctx); @@ -670,7 +671,7 @@ static BMK_benchOutcome_t BMK_benchCLevel(const void* srcBuffer, size_t benchedS } if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ - DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", + OUTPUT("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (unsigned)benchedSize, adv->nbSeconds, (unsigned)(adv->blockSize>>10)); @@ -760,7 +761,7 @@ static int BMK_loadFiles(void* buffer, size_t bufferSize, } { FILE* const f = fopen(fileNamesTable[n], "rb"); if (f==NULL) RETURN_ERROR_INT(10, "impossible to open file %s", fileNamesTable[n]); - DISPLAYLEVEL(2, "Loading %s... \r", fileNamesTable[n]); + OUTPUTLEVEL(2, "Loading %s... \r", fileNamesTable[n]); if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */ { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); if (readSize != (size_t)fileSize) RETURN_ERROR_INT(11, "could not read %s", fileNamesTable[n]); @@ -797,6 +798,10 @@ BMK_benchOutcome_t BMK_benchFilesAdvanced( RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); } + if (totalSizeToLoad == UTIL_FILESIZE_UNKNOWN) { + RETURN_ERROR(9, BMK_benchOutcome_t, "Error loading files"); + } + fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t)); if (!fileSizes) RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory for fileSizes"); diff --git a/programs/benchzstd.h b/programs/benchzstd.h index 9b40dcc29..11ac85da7 100644 --- a/programs/benchzstd.h +++ b/programs/benchzstd.h @@ -116,7 +116,7 @@ typedef struct { int ldmHashLog; int ldmBucketSizeLog; int ldmHashRateLog; - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; int useRowMatchFinder; /* use row-based matchfinder if possible */ } BMK_advancedParams_t; diff --git a/programs/dibio.c b/programs/dibio.c index d6c9f6d9e..e7fb905ec 100644 --- a/programs/dibio.c +++ b/programs/dibio.c @@ -49,6 +49,7 @@ static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_t)(512 MB) << sizeof(size_t)); #define NOISELENGTH 32 +#define MAX_SAMPLES_SIZE (2 GB) /* training dataset limited to 2GB */ /*-************************************* @@ -88,6 +89,15 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; #undef MIN #define MIN(a,b) ((a) < (b) ? (a) : (b)) +/** + Returns the size of a file. + If error returns -1. +*/ +static S64 DiB_getFileSize (const char * fileName) +{ + U64 const fileSize = UTIL_getFileSize(fileName); + return (fileSize == UTIL_FILESIZE_UNKNOWN) ? -1 : (S64)fileSize; +} /* ******************************************************** * File related operations @@ -101,47 +111,67 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; * *bufferSizePtr is modified, it provides the amount data loaded within buffer. * sampleSizes is filled with the size of each sample. */ -static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr, - size_t* sampleSizes, unsigned sstSize, - const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize, - unsigned displayLevel) +static int DiB_loadFiles( + void* buffer, size_t* bufferSizePtr, + size_t* sampleSizes, int sstSize, + const char** fileNamesTable, int nbFiles, + size_t targetChunkSize, int displayLevel ) { char* const buff = (char*)buffer; - size_t pos = 0; - unsigned nbLoadedChunks = 0, fileIndex; + size_t totalDataLoaded = 0; + int nbSamplesLoaded = 0; + int fileIndex = 0; + FILE * f = NULL; - for (fileIndex=0; fileIndex *bufferSizePtr-pos) break; - { size_t const readSize = fread(buff+pos, 1, toLoad, f); - if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName); - pos += readSize; - sampleSizes[nbLoadedChunks++] = toLoad; - remainingToLoad -= targetChunkSize; - if (nbLoadedChunks == sstSize) { /* no more space left in sampleSizes table */ - fileIndex = nbFiles; /* stop there */ + assert(targetChunkSize <= SAMPLESIZE_MAX); + + while ( nbSamplesLoaded < sstSize && fileIndex < nbFiles ) { + size_t fileDataLoaded; + S64 const fileSize = DiB_getFileSize(fileNamesTable[fileIndex]); + if (fileSize <= 0) /* skip if zero-size or file error */ + continue; + + f = fopen( fileNamesTable[fileIndex], "rb"); + if (f == NULL) + EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileNamesTable[fileIndex], strerror(errno)); + DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[fileIndex]); + + /* Load the first chunk of data from the file */ + fileDataLoaded = targetChunkSize > 0 ? + (size_t)MIN(fileSize, (S64)targetChunkSize) : + (size_t)MIN(fileSize, SAMPLESIZE_MAX ); + if (totalDataLoaded + fileDataLoaded > *bufferSizePtr) + break; + if (fread( buff+totalDataLoaded, 1, fileDataLoaded, f ) != fileDataLoaded) + EXM_THROW(11, "Pb reading %s", fileNamesTable[fileIndex]); + sampleSizes[nbSamplesLoaded++] = fileDataLoaded; + totalDataLoaded += fileDataLoaded; + + /* If file-chunking is enabled, load the rest of the file as more samples */ + if (targetChunkSize > 0) { + while( (S64)fileDataLoaded < fileSize && nbSamplesLoaded < sstSize ) { + size_t const chunkSize = MIN((size_t)(fileSize-fileDataLoaded), targetChunkSize); + if (totalDataLoaded + chunkSize > *bufferSizePtr) /* buffer is full */ break; - } - if (toLoad < targetChunkSize) { - fseek(f, (long)(targetChunkSize - toLoad), SEEK_CUR); - } } } - fclose(f); + + if (fread( buff+totalDataLoaded, 1, chunkSize, f ) != chunkSize) + EXM_THROW(11, "Pb reading %s", fileNamesTable[fileIndex]); + sampleSizes[nbSamplesLoaded++] = chunkSize; + totalDataLoaded += chunkSize; + fileDataLoaded += chunkSize; + } + } + fileIndex += 1; + fclose(f); f = NULL; } + if (f != NULL) + fclose(f); + DISPLAYLEVEL(2, "\r%79s\r", ""); - *bufferSizePtr = pos; - DISPLAYLEVEL(4, "loaded : %u KB \n", (unsigned)(pos >> 10)) - return nbLoadedChunks; + DISPLAYLEVEL(4, "Loaded %d KB total training data, %d nb samples \n", + (int)(totalDataLoaded / (1 KB)), nbSamplesLoaded ); + *bufferSizePtr = totalDataLoaded; + return nbSamplesLoaded; } #define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r))) @@ -223,11 +253,10 @@ static void DiB_saveDict(const char* dictFileName, if (n!=0) EXM_THROW(5, "%s : flush error", dictFileName) } } - typedef struct { - U64 totalSizeToLoad; - unsigned oneSampleTooLarge; - unsigned nbSamples; + S64 totalSizeToLoad; + int nbSamples; + int oneSampleTooLarge; } fileStats; /*! DiB_fileStats() : @@ -235,46 +264,87 @@ typedef struct { * provides the amount of data to be loaded and the resulting nb of samples. * This is useful primarily for allocation purpose => sample buffer, and sample sizes table. */ -static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel) +static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t chunkSize, int displayLevel) { fileStats fs; - unsigned n; + int n; memset(&fs, 0, sizeof(fs)); + + // We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX + assert( chunkSize <= SAMPLESIZE_MAX ); + for (n=0; n 2*SAMPLESIZE_MAX); - fs.nbSamples += nbSamples; + S64 const fileSize = DiB_getFileSize(fileNamesTable[n]); + // TODO: is there a minimum sample size? What if the file is 1-byte? + if (fileSize == 0) { + DISPLAYLEVEL(3, "Sample file '%s' has zero size, skipping...\n", fileNamesTable[n]); + continue; + } + + /* the case where we are breaking up files in sample chunks */ + if (chunkSize > 0) + { + // TODO: is there a minimum sample size? Can we have a 1-byte sample? + fs.nbSamples += (int)((fileSize + chunkSize-1) / chunkSize); + fs.totalSizeToLoad += fileSize; + } + else { + /* the case where one file is one sample */ + if (fileSize > SAMPLESIZE_MAX) { + /* flag excessively large sample files */ + fs.oneSampleTooLarge |= (fileSize > 2*SAMPLESIZE_MAX); + + /* Limit to the first SAMPLESIZE_MAX (128kB) of the file */ + DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB", + fileNamesTable[n], SAMPLESIZE_MAX / (1 KB)); + } + fs.nbSamples += 1; + fs.totalSizeToLoad += MIN(fileSize, SAMPLESIZE_MAX); + } } - DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (unsigned)(fs.totalSizeToLoad >> 10)); + DISPLAYLEVEL(4, "Found training data %d files, %d KB, %d samples\n", nbFiles, (int)(fs.totalSizeToLoad / (1 KB)), fs.nbSamples); return fs; } - -int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, - const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, +int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, + const char** fileNamesTable, int nbFiles, size_t chunkSize, ZDICT_legacy_params_t* params, ZDICT_cover_params_t* coverParams, ZDICT_fastCover_params_t* fastCoverParams, int optimize) { - unsigned const displayLevel = params ? params->zParams.notificationLevel : - coverParams ? coverParams->zParams.notificationLevel : - fastCoverParams ? fastCoverParams->zParams.notificationLevel : - 0; /* should never happen */ + fileStats fs; + size_t* sampleSizes; /* vector of sample sizes. Each sample can be up to SAMPLESIZE_MAX */ + int nbSamplesLoaded; /* nb of samples effectively loaded in srcBuffer */ + size_t loadedSize; /* total data loaded in srcBuffer for all samples */ + void* srcBuffer /* contiguous buffer with training data/samples */; void* const dictBuffer = malloc(maxDictSize); - fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); - size_t* const sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t)); - size_t const memMult = params ? MEMMULT : - coverParams ? COVER_MEMMULT: - FASTCOVER_MEMMULT; - size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult; - size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad); - void* const srcBuffer = malloc(loadedSize+NOISELENGTH); int result = 0; + int const displayLevel = params ? params->zParams.notificationLevel : + coverParams ? coverParams->zParams.notificationLevel : + fastCoverParams ? fastCoverParams->zParams.notificationLevel : 0; + + /* Shuffle input files before we start assessing how much sample datA to load. + The purpose of the shuffle is to pick random samples when the sample + set is larger than what we can load in memory. */ + DISPLAYLEVEL(3, "Shuffling input files\n"); + DiB_shuffle(fileNamesTable, nbFiles); + + /* Figure out how much sample data to load with how many samples */ + fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); + + { + int const memMult = params ? MEMMULT : + coverParams ? COVER_MEMMULT: + FASTCOVER_MEMMULT; + size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult; + /* Limit the size of the training data to the free memory */ + /* Limit the size of the training data to 2GB */ + /* TODO: there is opportunity to stop DiB_fileStats() early when the data limit is reached */ + loadedSize = (size_t)MIN( MIN((S64)maxMem, fs.totalSizeToLoad), MAX_SAMPLES_SIZE ); + srcBuffer = malloc(loadedSize+NOISELENGTH); + sampleSizes = (size_t*)malloc(fs.nbSamples * sizeof(size_t)); + } + /* Checks */ if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */ @@ -289,31 +359,32 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n"); EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */ } - if (fs.totalSizeToLoad < (unsigned long long)maxDictSize * 8) { + if (fs.totalSizeToLoad < (S64)maxDictSize * 8) { DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n"); DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n"); } /* init */ - if (loadedSize < fs.totalSizeToLoad) - DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20)); + if ((S64)loadedSize < fs.totalSizeToLoad) + DISPLAYLEVEL(1, "Training samples set too large (%u MB); training on %u MB only...\n", + (unsigned)(fs.totalSizeToLoad / (1 MB)), + (unsigned)(loadedSize / (1 MB))); /* Load input buffer */ - DISPLAYLEVEL(3, "Shuffling input files\n"); - DiB_shuffle(fileNamesTable, nbFiles); - - DiB_loadFiles(srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel); + nbSamplesLoaded = DiB_loadFiles( + srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, + nbFiles, chunkSize, displayLevel); { size_t dictSize; if (params) { DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */ dictSize = ZDICT_trainFromBuffer_legacy(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, + srcBuffer, sampleSizes, nbSamplesLoaded, *params); } else if (coverParams) { if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, + srcBuffer, sampleSizes, nbSamplesLoaded, coverParams); if (!ZDICT_isError(dictSize)) { unsigned splitPercentage = (unsigned)(coverParams->splitPoint * 100); @@ -322,13 +393,13 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, } } else { dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, - sampleSizes, fs.nbSamples, *coverParams); + sampleSizes, nbSamplesLoaded, *coverParams); } } else { assert(fastCoverParams != NULL); if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, - srcBuffer, sampleSizes, fs.nbSamples, + srcBuffer, sampleSizes, nbSamplesLoaded, fastCoverParams); if (!ZDICT_isError(dictSize)) { unsigned splitPercentage = (unsigned)(fastCoverParams->splitPoint * 100); @@ -338,7 +409,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, } } else { dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, - sampleSizes, fs.nbSamples, *fastCoverParams); + sampleSizes, nbSamplesLoaded, *fastCoverParams); } } if (ZDICT_isError(dictSize)) { diff --git a/programs/dibio.h b/programs/dibio.h index f65ed9b8e..03ec80e59 100644 --- a/programs/dibio.h +++ b/programs/dibio.h @@ -31,8 +31,8 @@ `parameters` is optional and can be provided with values set to 0, meaning "default". @return : 0 == ok. Any other : error. */ -int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, - const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, +int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, + const char** fileNamesTable, int nbFiles, size_t chunkSize, ZDICT_legacy_params_t* params, ZDICT_cover_params_t* coverParams, ZDICT_fastCover_params_t* fastCoverParams, int optimize); diff --git a/programs/fileio.c b/programs/fileio.c index 5693ac399..ef10f8501 100644 --- a/programs/fileio.c +++ b/programs/fileio.c @@ -320,7 +320,7 @@ struct FIO_prefs_s { size_t targetCBlockSize; int srcSizeHint; int testMode; - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; /* IO preferences */ U32 removeSrcFile; @@ -392,7 +392,7 @@ FIO_prefs_t* FIO_createPreferences(void) ret->targetCBlockSize = 0; ret->srcSizeHint = 0; ret->testMode = 0; - ret->literalCompressionMode = ZSTD_lcm_auto; + ret->literalCompressionMode = ZSTD_ps_auto; ret->excludeCompressedFiles = 0; ret->allowBlockDevices = 0; return ret; @@ -510,7 +510,7 @@ void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode) { void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_literalCompressionMode_e mode) { + ZSTD_paramSwitch_e mode) { prefs->literalCompressionMode = mode; } @@ -889,26 +889,25 @@ static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* if (fCtx->nbFilesTotal > 1 && !prefs->overwrite) { if (g_display_prefs.displayLevel <= displayLevelCutoff) { if (prefs->removeSrcFile) { - DISPLAYLEVEL(1, "zstd: Aborting... not deleting files and processing into dst: %s", outFileName); + DISPLAYLEVEL(1, "zstd: Aborting... not deleting files and processing into dst: %s\n", outFileName); error = 1; } } else { if (!strcmp(outFileName, stdoutmark)) { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. "); + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); } else { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s ", outFileName); + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); } - DISPLAYLEVEL(2, "\nThe concatenated output CANNOT regenerate the original directory tree. ") + DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate the original directory tree. \n") if (prefs->removeSrcFile) { if (fCtx->hasStdoutOutput) { - DISPLAYLEVEL(1, "\nAborting. Use -f if you really want to delete the files and output to stdout"); + DISPLAYLEVEL(1, "Aborting. Use -f if you really want to delete the files and output to stdout\n"); error = 1; } else { error = g_display_prefs.displayLevel > displayLevelCutoff && UTIL_requireUserConfirmation("This is a destructive operation. Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); } } } - DISPLAY("\n"); } return error; } @@ -962,7 +961,7 @@ static void FIO_adjustParamsForPatchFromMode(FIO_prefs_t* const prefs, DISPLAYLEVEL(1, "- Use --single-thread mode in the zstd cli\n"); DISPLAYLEVEL(1, "- Set a larger targetLength (eg. --zstd=targetLength=4096)\n"); DISPLAYLEVEL(1, "- Set a larger chainLog (eg. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); - DISPLAYLEVEL(1, "Also consdier playing around with searchLog and hashLog\n"); + DISPLAYLEVEL(1, "Also consider playing around with searchLog and hashLog\n"); } } @@ -1332,6 +1331,7 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, FILE* const dstFile = ress.dstFile; U64 compressedfilesize = 0; ZSTD_EndDirective directive = ZSTD_e_continue; + U64 pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* stats */ ZSTD_frameProgression previous_zfp_update = { 0, 0, 0, 0, 0, 0 }; @@ -1342,16 +1342,31 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, unsigned inputPresented = 0; unsigned inputBlocked = 0; unsigned lastJobID = 0; + UTIL_HumanReadableSize_t const file_hrs = UTIL_makeHumanReadableSize(fileSize); DISPLAYLEVEL(6, "compression using zstd format \n"); /* init */ if (fileSize != UTIL_FILESIZE_UNKNOWN) { + pledgedSrcSize = fileSize; CHECK(ZSTD_CCtx_setPledgedSrcSize(ress.cctx, fileSize)); } else if (prefs->streamSrcSize > 0) { /* unknown source size; use the declared stream size */ + pledgedSrcSize = prefs->streamSrcSize; CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, prefs->streamSrcSize) ); } + + { + int windowLog; + UTIL_HumanReadableSize_t windowSize; + CHECK(ZSTD_CCtx_getParameter(ress.cctx, ZSTD_c_windowLog, &windowLog)); + if (windowLog == 0) { + const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); + windowLog = cParams.windowLog; + } + windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize))); + DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix); + } (void)srcFileName; /* Main compression loop */ @@ -1395,14 +1410,17 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, if (READY_FOR_UPDATE()) { ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); double const cShare = (double)zfp.produced / (double)(zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; + UTIL_HumanReadableSize_t const buffered_hrs = UTIL_makeHumanReadableSize(zfp.ingested - zfp.consumed); + UTIL_HumanReadableSize_t const consumed_hrs = UTIL_makeHumanReadableSize(zfp.consumed); + UTIL_HumanReadableSize_t const produced_hrs = UTIL_makeHumanReadableSize(zfp.produced); /* display progress notifications */ if (g_display_prefs.displayLevel >= 3) { - DISPLAYUPDATE(3, "\r(L%i) Buffered :%4u MB - Consumed :%4u MB - Compressed :%4u MB => %.2f%% ", + DISPLAYUPDATE(3, "\r(L%i) Buffered :%6.*f%4s - Consumed :%6.*f%4s - Compressed :%6.*f%4s => %.2f%% ", compressionLevel, - (unsigned)((zfp.ingested - zfp.consumed) >> 20), - (unsigned)(zfp.consumed >> 20), - (unsigned)(zfp.produced >> 20), + buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, + consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, + produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, cShare ); } else if (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) { /* Require level 2 or forcibly displayed progress counter for summarized updates */ @@ -1419,10 +1437,10 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, fCtx->currFileIdx+1, fCtx->nbFilesTotal, (int)(18-srcFileNameSize), srcFileName); } } - DISPLAYLEVEL(1, "Read : %2u ", (unsigned)(zfp.consumed >> 20)); + DISPLAYLEVEL(1, "Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); if (fileSize != UTIL_FILESIZE_UNKNOWN) - DISPLAYLEVEL(2, "/ %2u ", (unsigned)(fileSize >> 20)); - DISPLAYLEVEL(1, "MB ==> %2.f%%", cShare); + DISPLAYLEVEL(2, "/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); + DISPLAYLEVEL(1, " ==> %2.f%%", cShare); DELAY_NEXT_UPDATE(); } @@ -1592,16 +1610,20 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, if (g_display_prefs.displayLevel >= 2 && !fCtx->hasStdoutOutput && (g_display_prefs.displayLevel >= 3 || fCtx->nbFilesTotal <= 1)) { + UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) readsize); + UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) compressedfilesize); if (readsize == 0) { - DISPLAYLEVEL(2,"%-20s : (%6llu => %6llu bytes, %s) \n", + DISPLAYLEVEL(2,"%-20s : (%6.*f%4s => %6.*f%4s, %s) \n", srcFileName, - (unsigned long long)readsize, (unsigned long long) compressedfilesize, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix, dstFileName); } else { - DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n", + DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6.*f%4s => %6.*f%4s, %s) \n", srcFileName, (double)compressedfilesize / (double)readsize * 100, - (unsigned long long)readsize, (unsigned long long) compressedfilesize, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix, dstFileName); } } @@ -1638,6 +1660,7 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, int closeDstFile = 0; int result; stat_t statbuf; + int transferMTime = 0; assert(ress.srcFile != NULL); if (ress.dstFile == NULL) { int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; @@ -1645,6 +1668,7 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, && UTIL_stat(srcFileName, &statbuf) && UTIL_isRegularFileStat(&statbuf) ) { dstFilePermissions = statbuf.st_mode; + transferMTime = 1; } closeDstFile = 1; @@ -1671,6 +1695,9 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result=1; } + if (transferMTime) { + UTIL_utime(dstFileName, &statbuf); + } if ( (result != 0) /* operation failure */ && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ ) { @@ -1754,6 +1781,50 @@ FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, return result; } +static const char* checked_index(const char* options[], size_t length, size_t index) { + assert(index < length); + // Necessary to avoid warnings since -O3 will omit the above `assert` + (void) length; + return options[index]; +} + +#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (index)) + +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { + static const char* formatOptions[5] = {ZSTD_EXTENSION, GZ_EXTENSION, XZ_EXTENSION, + LZMA_EXTENSION, LZ4_EXTENSION}; + static const char* sparseOptions[3] = {" --no-sparse", "", " --sparse"}; + static const char* checkSumOptions[3] = {" --no-check", "", " --check"}; + static const char* rowMatchFinderOptions[3] = {"", " --no-row-match-finder", " --row-match-finder"}; + static const char* compressLiteralsOptions[3] = {"", " --compress-literals", " --no-compress-literals"}; + + assert(g_display_prefs.displayLevel >= 4); + + DISPLAY("--format=%s", formatOptions[prefs->compressionType]); + DISPLAY("%s", INDEX(sparseOptions, prefs->sparseFileSupport)); + DISPLAY("%s", prefs->dictIDFlag ? "" : " --no-dictID"); + DISPLAY("%s", INDEX(checkSumOptions, prefs->checksumFlag)); + DISPLAY(" --block-size=%d", prefs->blockSize); + if (prefs->adaptiveMode) + DISPLAY(" --adapt=min=%d,max=%d", prefs->minAdaptLevel, prefs->maxAdaptLevel); + DISPLAY("%s", INDEX(rowMatchFinderOptions, prefs->useRowMatchFinder)); + DISPLAY("%s", prefs->rsyncable ? " --rsyncable" : ""); + if (prefs->streamSrcSize) + DISPLAY(" --stream-size=%u", (unsigned) prefs->streamSrcSize); + if (prefs->srcSizeHint) + DISPLAY(" --size-hint=%d", prefs->srcSizeHint); + if (prefs->targetCBlockSize) + DISPLAY(" --target-compressed-block-size=%u", (unsigned) prefs->targetCBlockSize); + DISPLAY("%s", INDEX(compressLiteralsOptions, prefs->literalCompressionMode)); + DISPLAY(" --memory=%u", prefs->memLimit ? prefs->memLimit : 128 MB); + DISPLAY(" --threads=%d", prefs->nbWorkers); + DISPLAY("%s", prefs->excludeCompressedFiles ? " --exclude-compressed" : ""); + DISPLAY(" --%scontent-size", prefs->contentSize ? "" : "no-"); + DISPLAY("\n"); +} + +#undef INDEX + int FIO_compressFilename(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, const char* dstFileName, const char* srcFileName, const char* dictFileName, int compressionLevel, ZSTD_compressionParameters comprParams) @@ -1890,10 +1961,15 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, } if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesInput != 0) { + UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesInput); + UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesOutput); + DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%d files compressed : %.2f%% (%6zu => %6zu bytes)\n", fCtx->nbFilesProcessed, + DISPLAYLEVEL(2, "%3d files compressed :%.2f%% (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, - fCtx->totalBytesInput, fCtx->totalBytesOutput); + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); } FIO_freeCResources(&ress); @@ -2155,6 +2231,8 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, ZSTD_inBuffer inBuff = { ress->srcBuffer, ress->srcBufferLoaded, 0 }; ZSTD_outBuffer outBuff= { ress->dstBuffer, ress->dstBufferSize, 0 }; size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff); + const int displayLevel = (!fCtx->hasStdoutOutput || g_display_prefs.progressSetting == FIO_ps_always) ? 1 : 2; + UTIL_HumanReadableSize_t const hrs = UTIL_makeHumanReadableSize(alreadyDecoded+frameSize); if (ZSTD_isError(readSizeHint)) { DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n", srcFileName, ZSTD_getErrorName(readSizeHint)); @@ -2165,21 +2243,19 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, /* Write block */ storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, prefs, storedSkips); frameSize += outBuff.pos; - if (!fCtx->hasStdoutOutput || g_display_prefs.progressSetting == FIO_ps_always) { - if (fCtx->nbFilesTotal > 1) { - size_t srcFileNameSize = strlen(srcFileName); - if (srcFileNameSize > 18) { - const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYUPDATE(2, "\rDecompress: %2u/%2u files. Current: ...%s : %u MB... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) ); - } else { - DISPLAYUPDATE(2, "\rDecompress: %2u/%2u files. Current: %s : %u MB... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) ); - } + if (fCtx->nbFilesTotal > 1) { + size_t srcFileNameSize = strlen(srcFileName); + if (srcFileNameSize > 18) { + const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; + DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); } else { - DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ", - srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) ); + DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, hrs.precision, hrs.value, hrs.suffix); } + } else { + DISPLAYUPDATE(displayLevel, "\r%-20.20s : %.*f%s... ", + srcFileName, hrs.precision, hrs.value, hrs.suffix); } if (inBuff.pos > 0) { @@ -2403,9 +2479,11 @@ FIO_decompressLz4Frame(dRess_t* ress, FILE* srcFile, /* Write Block */ if (decodedBytes) { + UTIL_HumanReadableSize_t hrs; storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decodedBytes, prefs, storedSkips); filesize += decodedBytes; - DISPLAYUPDATE(2, "\rDecompressed : %u MB ", (unsigned)(filesize>>20)); + hrs = UTIL_makeHumanReadableSize(filesize); + DISPLAYUPDATE(2, "\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); } if (!nextToLoad) break; @@ -2513,10 +2591,10 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, fCtx->totalBytesOutput += (size_t)filesize; DISPLAYLEVEL(2, "\r%79s\r", ""); /* No status message in pipe mode (stdin - stdout) or multi-files mode */ - if (g_display_prefs.displayLevel >= 2) { - if (fCtx->nbFilesTotal <= 1 || g_display_prefs.displayLevel >= 3) { - DISPLAYLEVEL(2, "%-20s: %llu bytes \n", srcFileName, filesize); - } + if ((g_display_prefs.displayLevel >= 2 && fCtx->nbFilesTotal <= 1) || + g_display_prefs.displayLevel >= 3 || + g_display_prefs.progressSetting == FIO_ps_always) { + DISPLAYLEVEL(1, "\r%-20s: %llu bytes \n", srcFileName, filesize); } return 0; @@ -2537,6 +2615,7 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, int result; stat_t statbuf; int releaseDstFile = 0; + int transferMTime = 0; if ((ress.dstFile == NULL) && (prefs->testMode==0)) { int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; @@ -2544,6 +2623,7 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, && UTIL_stat(srcFileName, &statbuf) && UTIL_isRegularFileStat(&statbuf) ) { dstFilePermissions = statbuf.st_mode; + transferMTime = 1; } releaseDstFile = 1; @@ -2569,6 +2649,10 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, result = 1; } + if (transferMTime) { + UTIL_utime(dstFileName, &statbuf); + } + if ( (result != 0) /* operation failure */ && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ ) { @@ -2973,25 +3057,24 @@ getFileInfo(fileInfo_t* info, const char* srcFileName) static void displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) { - unsigned const unit = info->compressedSize < (1 MB) ? (1 KB) : (1 MB); - const char* const unitStr = info->compressedSize < (1 MB) ? "KB" : "MB"; - double const windowSizeUnit = (double)info->windowSize / unit; - double const compressedSizeUnit = (double)info->compressedSize / unit; - double const decompressedSizeUnit = (double)info->decompressedSize / unit; + UTIL_HumanReadableSize_t const window_hrs = UTIL_makeHumanReadableSize(info->windowSize); + UTIL_HumanReadableSize_t const compressed_hrs = UTIL_makeHumanReadableSize(info->compressedSize); + UTIL_HumanReadableSize_t const decompressed_hrs = UTIL_makeHumanReadableSize(info->decompressedSize); double const ratio = (info->compressedSize == 0) ? 0 : ((double)info->decompressedSize)/(double)info->compressedSize; const char* const checkString = (info->usesCheck ? "XXH64" : "None"); if (displayLevel <= 2) { if (!info->decompUnavailable) { - DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %s\n", + DISPLAYOUT("%6d %5d %6.*f%4s %8.*f%4s %5.3f %5s %s\n", info->numSkippableFrames + info->numActualFrames, info->numSkippableFrames, - compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, ratio, checkString, inFileName); } else { - DISPLAYOUT("%6d %5d %7.2f %2s %5s %s\n", + DISPLAYOUT("%6d %5d %6.*f%4s %5s %s\n", info->numSkippableFrames + info->numActualFrames, info->numSkippableFrames, - compressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, checkString, inFileName); } } else { @@ -2999,15 +3082,15 @@ displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) DISPLAYOUT("# Zstandard Frames: %d\n", info->numActualFrames); if (info->numSkippableFrames) DISPLAYOUT("# Skippable Frames: %d\n", info->numSkippableFrames); - DISPLAYOUT("Window Size: %.2f %2s (%llu B)\n", - windowSizeUnit, unitStr, + DISPLAYOUT("Window Size: %.*f%s (%llu B)\n", + window_hrs.precision, window_hrs.value, window_hrs.suffix, (unsigned long long)info->windowSize); - DISPLAYOUT("Compressed Size: %.2f %2s (%llu B)\n", - compressedSizeUnit, unitStr, + DISPLAYOUT("Compressed Size: %.*f%s (%llu B)\n", + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, (unsigned long long)info->compressedSize); if (!info->decompUnavailable) { - DISPLAYOUT("Decompressed Size: %.2f %2s (%llu B)\n", - decompressedSizeUnit, unitStr, + DISPLAYOUT("Decompressed Size: %.*f%s (%llu B)\n", + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, (unsigned long long)info->decompressedSize); DISPLAYOUT("Ratio: %.4f\n", ratio); } @@ -3095,24 +3178,23 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis error |= FIO_listFile(&total, filenameTable[u], displayLevel); } } if (numFiles > 1 && displayLevel <= 2) { /* display total */ - unsigned const unit = total.compressedSize < (1 MB) ? (1 KB) : (1 MB); - const char* const unitStr = total.compressedSize < (1 MB) ? "KB" : "MB"; - double const compressedSizeUnit = (double)total.compressedSize / unit; - double const decompressedSizeUnit = (double)total.decompressedSize / unit; + UTIL_HumanReadableSize_t const compressed_hrs = UTIL_makeHumanReadableSize(total.compressedSize); + UTIL_HumanReadableSize_t const decompressed_hrs = UTIL_makeHumanReadableSize(total.decompressedSize); double const ratio = (total.compressedSize == 0) ? 0 : ((double)total.decompressedSize)/(double)total.compressedSize; const char* const checkString = (total.usesCheck ? "XXH64" : ""); DISPLAYOUT("----------------------------------------------------------------- \n"); if (total.decompUnavailable) { - DISPLAYOUT("%6d %5d %7.2f %2s %5s %u files\n", + DISPLAYOUT("%6d %5d %6.*f%4s %5s %u files\n", total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames, - compressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, checkString, (unsigned)total.nbFiles); } else { - DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n", + DISPLAYOUT("%6d %5d %6.*f%4s %8.*f%4s %5.3f %5s %u files\n", total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames, - compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr, + compressed_hrs.precision, compressed_hrs.value, compressed_hrs.suffix, + decompressed_hrs.precision, decompressed_hrs.value, decompressed_hrs.suffix, ratio, checkString, (unsigned)total.nbFiles); } } return error; diff --git a/programs/fileio.h b/programs/fileio.h index 9d97ec8bf..61094db83 100644 --- a/programs/fileio.h +++ b/programs/fileio.h @@ -100,7 +100,7 @@ void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint); void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode); void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_literalCompressionMode_e mode); + ZSTD_paramSwitch_e mode); void FIO_setProgressSetting(FIO_progressSetting_e progressSetting); void FIO_setNotificationLevel(int level); @@ -108,6 +108,7 @@ void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompresse void FIO_setAllowBlockDevices(FIO_prefs_t* const prefs, int allowBlockDevices); void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value); void FIO_setContentSize(FIO_prefs_t* const prefs, int value); +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs); /* FIO_ctx_t functions */ void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value); diff --git a/programs/util.c b/programs/util.c index 8d190c62c..d69b72a37 100644 --- a/programs/util.c +++ b/programs/util.c @@ -159,6 +159,29 @@ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions) return chmod(filename, permissions); } +/* set access and modification times */ +int UTIL_utime(const char* filename, const stat_t *statbuf) +{ + int ret; + /* We check that st_mtime is a macro here in order to give us confidence + * that struct stat has a struct timespec st_mtim member. We need this + * check because there are some platforms that claim to be POSIX 2008 + * compliant but which do not have st_mtim... */ +#if (PLATFORM_POSIX_VERSION >= 200809L) && defined(st_mtime) + /* (atime, mtime) */ + struct timespec timebuf[2] = { {0, UTIME_NOW} }; + timebuf[1] = statbuf->st_mtim; + ret = utimensat(AT_FDCWD, filename, timebuf, 0); +#else + struct utimbuf timebuf; + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + ret = utime(filename, &timebuf); +#endif + errno = 0; + return ret; +} + int UTIL_setFileStat(const char *filename, const stat_t *statbuf) { int res = 0; @@ -168,25 +191,7 @@ int UTIL_setFileStat(const char *filename, const stat_t *statbuf) return -1; /* set access and modification times */ - /* We check that st_mtime is a macro here in order to give us confidence - * that struct stat has a struct timespec st_mtim member. We need this - * check because there are some platforms that claim to be POSIX 2008 - * compliant but which do not have st_mtim... */ -#if (PLATFORM_POSIX_VERSION >= 200809L) && defined(st_mtime) - { - /* (atime, mtime) */ - struct timespec timebuf[2] = { {0, UTIME_NOW} }; - timebuf[1] = statbuf->st_mtim; - res += utimensat(AT_FDCWD, filename, timebuf, 0); - } -#else - { - struct utimbuf timebuf; - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - res += utime(filename, &timebuf); - } -#endif + res += UTIL_utime(filename, statbuf); #if !defined(_WIN32) res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ @@ -303,6 +308,62 @@ U64 UTIL_getFileSizeStat(const stat_t* statbuf) return (U64)statbuf->st_size; } +UTIL_HumanReadableSize_t UTIL_makeHumanReadableSize(U64 size) +{ + UTIL_HumanReadableSize_t hrs; + + if (g_utilDisplayLevel > 3) { + /* In verbose mode, do not scale sizes down, except in the case of + * values that exceed the integral precision of a double. */ + if (size >= (1ull << 53)) { + hrs.value = (double)size / (1ull << 20); + hrs.suffix = " MiB"; + /* At worst, a double representation of a maximal size will be + * accurate to better than tens of kilobytes. */ + hrs.precision = 2; + } else { + hrs.value = (double)size; + hrs.suffix = " B"; + hrs.precision = 0; + } + } else { + /* In regular mode, scale sizes down and use suffixes. */ + if (size >= (1ull << 60)) { + hrs.value = (double)size / (1ull << 60); + hrs.suffix = " EiB"; + } else if (size >= (1ull << 50)) { + hrs.value = (double)size / (1ull << 50); + hrs.suffix = " PiB"; + } else if (size >= (1ull << 40)) { + hrs.value = (double)size / (1ull << 40); + hrs.suffix = " TiB"; + } else if (size >= (1ull << 30)) { + hrs.value = (double)size / (1ull << 30); + hrs.suffix = " GiB"; + } else if (size >= (1ull << 20)) { + hrs.value = (double)size / (1ull << 20); + hrs.suffix = " MiB"; + } else if (size >= (1ull << 10)) { + hrs.value = (double)size / (1ull << 10); + hrs.suffix = " KiB"; + } else { + hrs.value = (double)size; + hrs.suffix = " B"; + } + + if (hrs.value >= 100 || (U64)hrs.value == size) { + hrs.precision = 0; + } else if (hrs.value >= 10) { + hrs.precision = 1; + } else if (hrs.value > 1) { + hrs.precision = 2; + } else { + hrs.precision = 3; + } + } + + return hrs; +} U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles) { @@ -931,7 +992,7 @@ makeUniqueMirroredDestDirs(char** srcDirNames, unsigned nbFile, const char* outD char* prevDirName = srcDirNames[i - 1]; char* currDirName = srcDirNames[i]; - /* note: we alwasy compare trimmed path, i.e.: + /* note: we always compare trimmed path, i.e.: * src dir of "./foo" and "/foo" will be both saved into: * "outDirName/foo/" */ if (!firstIsParentOrSameDirOfSecond(trimPath(prevDirName), @@ -939,7 +1000,7 @@ makeUniqueMirroredDestDirs(char** srcDirNames, unsigned nbFile, const char* outD uniqueDirNr++; /* we need maintain original src dir name instead of trimmed - * dir, so we can retrive the original src dir's mode_t */ + * dir, so we can retrieve the original src dir's mode_t */ uniqueDirNames[uniqueDirNr - 1] = currDirName; } @@ -1048,7 +1109,7 @@ FileNamesTable* UTIL_createFNT_fromROTable(const char** filenames, size_t nbFile /*-**************************************** -* count the number of physical cores +* count the number of cores ******************************************/ #if defined(_WIN32) || defined(WIN32) @@ -1057,10 +1118,26 @@ FileNamesTable* UTIL_createFNT_fromROTable(const char** filenames, size_t nbFile typedef BOOL(WINAPI* LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); -int UTIL_countPhysicalCores(void) +DWORD CountSetBits(ULONG_PTR bitMask) { - static int numPhysicalCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + DWORD LSHIFT = sizeof(ULONG_PTR)*8 - 1; + DWORD bitSetCount = 0; + ULONG_PTR bitTest = (ULONG_PTR)1 << LSHIFT; + DWORD i; + + for (i = 0; i <= LSHIFT; ++i) + { + bitSetCount += ((bitMask & bitTest)?1:0); + bitTest/=2; + } + + return bitSetCount; +} + +int UTIL_countCores(int logical) +{ + static int numCores = 0; + if (numCores != 0) return numCores; { LPFN_GLPI glpi; BOOL done = FALSE; @@ -1106,7 +1183,10 @@ int UTIL_countPhysicalCores(void) while (byteOffset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= returnLength) { if (ptr->Relationship == RelationProcessorCore) { - numPhysicalCores++; + if (logical) + numCores += CountSetBits(ptr->ProcessorMask); + else + numCores++; } ptr++; @@ -1115,17 +1195,17 @@ int UTIL_countPhysicalCores(void) free(buffer); - return numPhysicalCores; + return numCores; } failed: /* try to fall back on GetSystemInfo */ { SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); - numPhysicalCores = sysinfo.dwNumberOfProcessors; - if (numPhysicalCores == 0) numPhysicalCores = 1; /* just in case */ + numCores = sysinfo.dwNumberOfProcessors; + if (numCores == 0) numCores = 1; /* just in case */ } - return numPhysicalCores; + return numCores; } #elif defined(__APPLE__) @@ -1134,24 +1214,24 @@ failed: /* Use apple-provided syscall * see: man 3 sysctl */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static S32 numPhysicalCores = 0; /* apple specifies int32_t */ - if (numPhysicalCores != 0) return numPhysicalCores; + static S32 numCores = 0; /* apple specifies int32_t */ + if (numCores != 0) return numCores; { size_t size = sizeof(S32); - int const ret = sysctlbyname("hw.physicalcpu", &numPhysicalCores, &size, NULL, 0); + int const ret = sysctlbyname(logical ? "hw.logicalcpu" : "hw.physicalcpu", &numCores, &size, NULL, 0); if (ret != 0) { if (errno == ENOENT) { /* entry not present, fall back on 1 */ - numPhysicalCores = 1; + numCores = 1; } else { - perror("zstd: can't get number of physical cpus"); + perror("zstd: can't get number of cpus"); exit(1); } } - return numPhysicalCores; + return numCores; } } @@ -1160,16 +1240,16 @@ int UTIL_countPhysicalCores(void) /* parse /proc/cpuinfo * siblings / cpu cores should give hyperthreading ratio * otherwise fall back on sysconf */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; + static int numCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + if (numCores != 0) return numCores; - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - return numPhysicalCores = 1; + return numCores = 1; } /* try to determine if there's hyperthreading */ @@ -1183,7 +1263,7 @@ int UTIL_countPhysicalCores(void) if (cpuinfo == NULL) { /* fall back on the sysconf value */ - return numPhysicalCores; + return numCores; } /* assume the cpu cores/siblings values will be constant across all @@ -1216,13 +1296,13 @@ int UTIL_countPhysicalCores(void) ratio = siblings / cpu_cores; } - if (ratio && numPhysicalCores > ratio) { - numPhysicalCores = numPhysicalCores / ratio; + if (ratio && numCores > ratio && !logical) { + numCores = numCores / ratio; } failed: fclose(cpuinfo); - return numPhysicalCores; + return numCores; } } @@ -1233,52 +1313,70 @@ failed: /* Use physical core sysctl when available * see: man 4 smp, man 3 sysctl */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; /* freebsd sysctl is native int sized */ - if (numPhysicalCores != 0) return numPhysicalCores; + static int numCores = 0; /* freebsd sysctl is native int sized */ +#if __FreeBSD_version >= 1300008 + static int perCore = 1; +#endif + if (numCores != 0) return numCores; #if __FreeBSD_version >= 1300008 - { size_t size = sizeof(numPhysicalCores); - int ret = sysctlbyname("kern.smp.cores", &numPhysicalCores, &size, NULL, 0); - if (ret == 0) return numPhysicalCores; + { size_t size = sizeof(numCores); + int ret = sysctlbyname("kern.smp.cores", &numCores, &size, NULL, 0); + if (ret == 0) { + if (logical) { + ret = sysctlbyname("kern.smp.threads_per_core", &perCore, &size, NULL, 0); + /* default to physical cores if logical cannot be read */ + if (ret == 0) + numCores *= perCore; + } + + return numCores; + } if (errno != ENOENT) { - perror("zstd: can't get number of physical cpus"); + perror("zstd: can't get number of cpus"); exit(1); } /* sysctl not present, fall through to older sysconf method */ } +#else + /* suppress unused parameter warning */ + (void) logical; #endif - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - numPhysicalCores = 1; + numCores = 1; } - return numPhysicalCores; + return numCores; } #elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__CYGWIN__) /* Use POSIX sysconf * see: man 3 sysconf */ -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { - static int numPhysicalCores = 0; + static int numCores = 0; - if (numPhysicalCores != 0) return numPhysicalCores; + /* suppress unused parameter warning */ + (void)logical; - numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN); - if (numPhysicalCores == -1) { + if (numCores != 0) return numCores; + + numCores = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (numCores == -1) { /* value not queryable, fall back on 1 */ - return numPhysicalCores = 1; + return numCores = 1; } - return numPhysicalCores; + return numCores; } #else -int UTIL_countPhysicalCores(void) +int UTIL_countCores(int logical) { /* assume 1 */ return 1; @@ -1286,6 +1384,16 @@ int UTIL_countPhysicalCores(void) #endif +int UTIL_countPhysicalCores(void) +{ + return UTIL_countCores(0); +} + +int UTIL_countLogicalCores(void) +{ + return UTIL_countCores(1); +} + #if defined (__cplusplus) } #endif diff --git a/programs/util.h b/programs/util.h index 24cce4480..add165d57 100644 --- a/programs/util.h +++ b/programs/util.h @@ -64,7 +64,7 @@ extern "C" { # define SET_REALTIME_PRIORITY /* disabled */ # endif -#else /* unknown non-unix operating systen */ +#else /* unknown non-unix operating system */ # define UTIL_sleep(s) /* disabled */ # define UTIL_sleepMilli(milli) /* disabled */ # define SET_REALTIME_PRIORITY /* disabled */ @@ -122,6 +122,7 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, const #define STRDUP(s) strdup(s) #endif + /** * Calls platform's equivalent of stat() on filename and writes info to statbuf. * Returns success (1) or failure (0). @@ -135,6 +136,14 @@ int UTIL_stat(const char* filename, stat_t* statbuf); */ int UTIL_setFileStat(const char* filename, const stat_t* statbuf); +/** + * Set atime to now and mtime to the st_mtim in statbuf. + * + * Directly wraps utime() or utimensat(). Returns -1 on error. + * Does not validate filename is valid. + */ +int UTIL_utime(const char* filename, const stat_t *statbuf); + /* * These helpers operate on a pre-populated stat_t, i.e., the result of * calling one of the above functions. @@ -170,6 +179,23 @@ int UTIL_isFIFO(const char* infilename); U64 UTIL_getFileSize(const char* infilename); U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles); +/** + * Take @size in bytes, + * prepare the components to pretty-print it in a scaled way. + * The components in the returned struct should be passed in + * precision, value, suffix order to a "%.*f%s" format string. + * Output policy is sensible to @g_utilDisplayLevel, + * for verbose mode (@g_utilDisplayLevel >= 4), + * does not scale down. + */ +typedef struct { + double value; + int precision; + const char* suffix; +} UTIL_HumanReadableSize_t; + +UTIL_HumanReadableSize_t UTIL_makeHumanReadableSize(U64 size); + int UTIL_compareStr(const void *p1, const void *p2); const char* UTIL_getFileExtension(const char* infilename); void UTIL_mirrorSourceFilesDirectories(const char** fileNamesTable, unsigned int nbFiles, const char *outDirName); @@ -275,13 +301,19 @@ void UTIL_refFilename(FileNamesTable* fnt, const char* filename); FileNamesTable* UTIL_createExpandedFNT(const char* const* filenames, size_t nbFilenames, int followLinks); +#if defined(_WIN32) || defined(WIN32) +DWORD CountSetBits(ULONG_PTR bitMask); +#endif /*-**************************************** * System ******************************************/ +int UTIL_countCores(int logical); + int UTIL_countPhysicalCores(void); +int UTIL_countLogicalCores(void); #if defined (__cplusplus) } diff --git a/programs/zstd.1 b/programs/zstd.1 index 861f9380b..3dd8634df 100644 --- a/programs/zstd.1 +++ b/programs/zstd.1 @@ -1,491 +1,340 @@ -. -.TH "ZSTD" "1" "May 2021" "zstd 1.5.0" "User Commands" -. +.TH "ZSTD" "1" "September 2021" "zstd 1.5.1" "User Commands" .SH "NAME" \fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files -. .SH "SYNOPSIS" -\fBzstd\fR [\fIOPTIONS\fR] [\-|\fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] -. +.TS +allbox; +\fBzstd\fR [\fIOPTIONS\fR] [\- \fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] +.TE .P \fBzstdmt\fR is equivalent to \fBzstd \-T0\fR -. .P \fBunzstd\fR is equivalent to \fBzstd \-d\fR -. .P \fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR -. .SH "DESCRIPTION" \fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, with fast modes at > 200 MB/s per core, and strong modes nearing lzma compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. -. .P \fBzstd\fR command line syntax is generally similar to gzip, but features the following differences : -. -.IP "\(bu" 4 +.IP "\[ci]" 4 Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fBzstd\fR does not accept input from console, but it properly accepts \fBstdin\fR when it\'s not the console\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\. -. .IP "" 0 -. .P \fBzstd\fR compresses or decompresses each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. -. .P Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name: -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing, the suffix \fB\.zst\fR is appended to the source filename to get the target filename\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When decompressing, the \fB\.zst\fR suffix is removed from the source filename to get the target filename -. .IP "" 0 -. .SS "Concatenation with \.zst files" It is possible to concatenate \fB\.zst\fR files as is\. \fBzstd\fR will decompress such files as if they were a single \fB\.zst\fR file\. -. .SH "OPTIONS" -. .SS "Integer suffixes and special values" In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\. -. .TP \fBKiB\fR -Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. -. +Multiply the integer by 1,024 (2\e^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. .TP \fBMiB\fR -Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. -. +Multiply the integer by 1,048,576 (2\e^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. .SS "Operation mode" If multiple operation mode options are given, the last one takes effect\. -. .TP \fB\-z\fR, \fB\-\-compress\fR Compress\. This is the default operation mode when no operation mode option is specified and no other operation mode is implied from the command name (for example, \fBunzstd\fR implies \fB\-\-decompress\fR)\. -. .TP \fB\-d\fR, \fB\-\-decompress\fR, \fB\-\-uncompress\fR Decompress\. -. .TP \fB\-t\fR, \fB\-\-test\fR Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout\fR except that the decompressed data is discarded instead of being written to standard output\. No files are created or removed\. -. .TP \fB\-b#\fR Benchmark file(s) using compression level # -. .TP \fB\-\-train FILEs\fR Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. -. .TP \fB\-l\fR, \fB\-\-list\fR Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command can be augmented with the \fB\-v\fR modifier\. -. .SS "Operation modifiers" -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-#\fR: \fB#\fR compression level [1\-19] (default: 3) -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-single\-thread\fR: Does not spawn a thread for compression, use a single thread for both I/O and compression\. In this mode, compression is serialized with I/O, which is slightly slower\. (This is different from \fB\-T1\fR, which spawns 1 compression thread in parallel of I/O)\. This mode is the only one available when multithread support is disabled\. Single\-thread mode features lower memory usage\. Final compressed result is slightly different from \fB\-T1\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\. +.IP "\[ci]" 4 \fB\-\-adapt[=min=#,max=#]\fR : \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if not \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. -. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s) -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that windowSize > srcSize\. -. .IP Note: cannot use both this and \-D together Note: \fB\-\-long\fR mode will be automatically activated if chainLog < fileLog (fileLog being the windowLog required to cover the whole file)\. You can also manually force it\. Node: for all levels, you can use \-\-patch\-from in \-\-single\-thread mode to improve compression ratio at the cost of speed Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e 4096), and by setting a large \fB\-\-zstd=chainLog=\fR -. -.IP "\(bu" 4 -\fB\-\-rsyncable\fR : \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your milage may vary\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-rsyncable\fR : \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. +.IP "\[ci]" 4 \fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled) -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \-\-content\-size (meaning that the original size will be placed in the header)\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, Zstandard uses 128 MB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (ie\. you can increase or decrease it)\. -. .IP This is also used during compression when using with \-\-patch\-from=\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MB)\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-stream\-size=#\fR : Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-o FILE\fR: save result into \fBFILE\fR -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. -. -.IP "\(bu" 4 -\fB\-c\fR, \fB\-\-stdout\fR: force write to standard output, even if it is the console -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console) +.IP "\[ci]" 4 \fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. If used in combination with \-o, will trigger a confirmation prompt (which can be silenced with \-f), as this is a destructive operation\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\. -. -.IP "\(bu" 4 -\fB\-r\fR: operate recursively on directories -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-r\fR: operate recursively on directories\. It selects all files in the named directory and all its subdirectories\. This can be useful both to reduce command line typing, and to circumvent shell expansion limitations, when there are a lot of files and naming breaks the maximum size of a command line\. +.IP "\[ci]" 4 \fB\-\-filelist FILE\fR read a list of files to process as content from \fBFILE\fR\. Format is compatible with \fBls\fR output, with one file per line\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-flat DIR\fR: resulting files are stored into target \fBDIR\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBDIR\fR, while in combination with \fB\-f\fR, the last file will be present instead\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-mirror DIR\fR: similar to \fB\-\-output\-dir\-flat\fR, the output files are stored underneath target \fBDIR\fR directory, but this option will replicate input directory hierarchy into output \fBDIR\fR\. -. .IP If input directory contains "\.\.", the files in this directory will be ignored\. If input directory is an absolute directory (i\.e\. "/var/tmp/abc"), it will be stored into the "output\-dir/var/tmp/abc"\. If there are multiple input files or directories, name collision resolution will follow the same rules as \fB\-\-output\-dir\-flat\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-format=FORMAT\fR: compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced : \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-q\fR, \fB\-\-quiet\fR: suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-show\-default\-cparams\fR: Shows the default compression parameters that will be used for a particular src file\. If the provided src file is not a regular file (eg\. named pipe), the cli will just output the default parameters\. That is, the parameters that are used when the src size is unknown\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files -. .IP "" 0 -. .SS "Restricted usage of Environment Variables" Using environment variables to set parameters has security implications\. Therefore, this avenue is intentionally restricted\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. -. .P \fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. -. .P \fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of (\fB1\fR), and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this to have any effect\. -. .P They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\. -. .SH "DICTIONARY BUILDER" \fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. -. .TP \fB\-\-train FILEs\fR -Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\. -. +Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. .IP -Supports multithreading if \fBzstd\fR is compiled with threading support\. Additional parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. -. +\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. .TP \fB\-o file\fR Dictionary saved into \fBfile\fR (default name: dictionary)\. -. .TP \fB\-\-maxdict=#\fR Limit dictionary to specified size (default: 112640)\. -. .TP \fB\-#\fR Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. -. .TP \fB\-B#\fR -Split input files in blocks of size # (default: no split) -. +Split input files into blocks of size # (default: no split) .TP \fB\-\-dictID=#\fR A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. -. .TP \fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. -. .IP Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. -. .IP Examples: -. .IP \fBzstd \-\-train\-cover FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR -. .IP \fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink=2 FILEs\fR -. .TP \fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. -. .IP \fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. -. .IP Examples: -. .IP \fBzstd \-\-train\-fastcover FILEs\fR -. .IP \fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR -. .TP \fB\-\-train\-legacy[=selectivity=#]\fR Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its possible maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. -. .IP Examples: -. .IP \fBzstd \-\-train\-legacy FILEs\fR -. .IP \fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR -. .SH "BENCHMARK" -. .TP \fB\-b#\fR benchmark file(s) using compression level # -. .TP \fB\-e#\fR benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) -. .TP \fB\-i#\fR minimum evaluation time, in seconds (default: 3s), benchmark mode only -. .TP \fB\-B#\fR, \fB\-\-block\-size=#\fR cut file(s) into independent blocks of size # (default: no block) -. .TP \fB\-\-priority=rt\fR set process priority to real\-time -. .P \fBOutput Format:\fR CompressionLevel#Filename : IntputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed -. .P \fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. -. .SH "ADVANCED COMPRESSION OPTIONS" -. -.SS "\-B#:" -Select the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to (slightly) different compressed frames\. -. +### \-B#: Select the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to (slightly) different compressed frames\. .SS "\-\-zstd[=options]:" \fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: -. .TP \fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR Specify a strategy used by a match finder\. -. .IP There are 9 strategies numbered from 1 to 9, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra, 9=ZSTD_btultra2\. -. .TP \fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR Specify the maximum number of bits for a match distance\. -. .IP The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\. -. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. .TP \fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR Specify the maximum number of bits for a hash table\. -. .IP Bigger hash tables cause less collisions which usually makes compression faster, but requires more memory during compression\. -. .IP The minimum \fIhlog\fR is 6 (64 B) and the maximum is 30 (1 GiB)\. -. .TP \fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR Specify the maximum number of bits for a hash chain or a binary tree\. -. .IP Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\. -. .IP The minimum \fIclog\fR is 6 (64 B) and the maximum is 29 (524 Mib) on 32\-bit platforms and 30 (1 Gib) on 64\-bit platforms\. -. .TP \fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\. -. .IP More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. -. .IP The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. -. .TP \fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR Specify the minimum searched length of a match in a hash table\. -. .IP Larger search lengths usually decrease compression ratio but improve decompression speed\. -. .IP The minimum \fImml\fR is 3 and the maximum is 7\. -. .TP \fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR The impact of this field vary depending on selected strategy\. -. .IP For ZSTD_btopt, ZSTD_btultra and ZSTD_btultra2, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. t For ZSTD_fast, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed : a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. -. .IP For all other strategies, this field has no impact\. -. .IP The minimum \fItlen\fR is 0 and the maximum is 128 Kib\. -. .TP \fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. -. .IP The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default" : \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. -. .TP \fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR Specify the maximum size for a hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. -. .IP The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: 20)\. -. .TP \fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR Specify the minimum searched length of a match for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger/very small values usually decrease compression ratio\. -. .IP The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\. -. .TP \fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR Specify the size of each bucket for the hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger bucket sizes improve collision resolution but decrease compression speed\. -. .IP The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 3)\. -. .TP \fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR Specify the frequency of inserting entries into the long distance matching hash table\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. -. .IP The default value is \fBwlog \- lhlog\fR\. -. .SS "Example" The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: -. .P \fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 -. .SH "BUGS" Report bugs at: https://github\.com/facebook/zstd/issues -. .SH "AUTHOR" Yann Collet diff --git a/programs/zstd.1.md b/programs/zstd.1.md index ae5092866..ef37fef32 100644 --- a/programs/zstd.1.md +++ b/programs/zstd.1.md @@ -125,6 +125,9 @@ the last one takes effect. This mode is the only one available when multithread support is disabled. Single-thread mode features lower memory usage. Final compressed result is slightly different from `-T1`. +* `--auto-threads={physical,logical} (default: physical)`: + When using a default amount of threads via `-T0`, choose the default based on the number + of detected physical or logical cores. * `--adapt[=min=#,max=#]` : `zstd` will dynamically adapt compression level to perceived I/O conditions. Compression level adaptation can be observed live by using command `-v`. @@ -168,7 +171,7 @@ the last one takes effect. compression speed hit. This feature does not work with `--single-thread`. You probably don't want to use it with long range mode, since it will decrease the effectiveness of - the synchronization points, but your milage may vary. + the synchronization points, but your mileage may vary. * `-C`, `--[no-]check`: add integrity check computed from uncompressed data (default: enabled) * `--[no-]content-size`: @@ -205,7 +208,7 @@ the last one takes effect. disable input and output checks. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc. * `-c`, `--stdout`: - force write to standard output, even if it is the console + write to standard output (even if it is the console) * `--[no-]sparse`: enable / disable sparse FS support, to make files with many zeroes smaller on disk. @@ -221,7 +224,11 @@ the last one takes effect. keep source file(s) after successful compression or decompression. This is the default behavior. * `-r`: - operate recursively on directories + operate recursively on directories. + It selects all files in the named directory and all its subdirectories. + This can be useful both to reduce command line typing, + and to circumvent shell expansion limitations, + when there are a lot of files and naming breaks the maximum size of a command line. * `--filelist FILE` read a list of files to process as content from `FILE`. Format is compatible with `ls` output, with one file per line. @@ -304,12 +311,14 @@ Compression of small files similar to the sample set will be greatly improved. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary). + `--train` can be combined with `-r` to indicate a directory rather than listing all the files, + which can be useful to circumvent shell expansion limits. - Supports multithreading if `zstd` is compiled with threading support. + `--train` supports multithreading if `zstd` is compiled with threading support (default). Additional parameters can be specified with `--train-fastcover`. The legacy dictionary builder can be accessed with `--train-legacy`. - The cover dictionary builder can be accessed with `--train-cover`. - Equivalent to `--train-fastcover=d=8,steps=4`. + The slower cover dictionary builder can be accessed with `--train-cover`. + Default is equivalent to `--train-fastcover=d=8,steps=4`. * `-o file`: Dictionary saved into `file` (default name: dictionary). * `--maxdict=#`: @@ -319,10 +328,10 @@ Compression of small files similar to the sample set will be greatly improved. Will generate statistics more tuned for selected compression level, resulting in a _small_ compression ratio improvement for this level. * `-B#`: - Split input files in blocks of size # (default: no split) + Split input files into blocks of size # (default: no split) * `--dictID=#`: - A dictionary ID is a locally unique ID that a decoder can use to verify it is - using the right dictionary. + A dictionary ID is a locally unique ID + that a decoder can use to verify it is using the right dictionary. By default, zstd will create a 4-bytes random number ID. It's possible to give a precise number instead. Short numbers have an advantage : an ID < 256 will only need 1 byte in the diff --git a/programs/zstdcli.c b/programs/zstdcli.c index 239aaf406..a3f8ebab7 100644 --- a/programs/zstdcli.c +++ b/programs/zstdcli.c @@ -163,7 +163,7 @@ static void usage_advanced(const char* programName) DISPLAYOUT( "Advanced arguments : \n"); DISPLAYOUT( " -V : display Version number and exit \n"); - DISPLAYOUT( " -c : force write to standard output, even if it is the console \n"); + DISPLAYOUT( " -c : write to standard output (even if it is the console) \n"); DISPLAYOUT( " -v : verbose mode; specify multiple times to increase verbosity \n"); DISPLAYOUT( " -q : suppress warnings; specify twice to suppress errors too \n"); @@ -208,10 +208,12 @@ static void usage_advanced(const char* programName) DISPLAYOUT( "--fast[=#]: switch to very fast compression levels (default: %u) \n", 1); DISPLAYOUT( "--adapt : dynamically adapt compression level to I/O conditions \n"); DISPLAYOUT( "--[no-]row-match-finder : force enable/disable usage of fast row-based matchfinder for greedy, lazy, and lazy2 strategies \n"); + DISPLAYOUT( "--patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine. \n"); # ifdef ZSTD_MULTITHREAD DISPLAYOUT( " -T# : spawns # compression threads (default: 1, 0==# cores) \n"); DISPLAYOUT( " -B# : select size of each job (default: 0==automatic) \n"); DISPLAYOUT( "--single-thread : use a single thread for both I/O and compression (result slightly different than -T1) \n"); + DISPLAYOUT( "--auto-threads={physical,logical} (default: physical} : use either physical cores or logical cores as default when specifying -T0 \n"); DISPLAYOUT( "--rsyncable : compress using a rsync-friendly method (-B sets block size) \n"); # endif DISPLAYOUT( "--exclude-compressed: only compress files that are not already compressed \n"); @@ -353,6 +355,23 @@ static unsigned readU32FromChar(const char** stringPtr) { return result; } +/*! readIntFromChar() : + * @return : signed integer value read from input in `char` format. + * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * Will also modify `*stringPtr`, advancing it to position where it stopped reading. + * Note : function will exit() program if digit sequence overflows */ +static int readIntFromChar(const char** stringPtr) { + static const char errorMsg[] = "error: numeric value overflows 32-bit int"; + int sign = 1; + unsigned result; + if (**stringPtr=='-') { + (*stringPtr)++; + sign = -1; + } + if (readU32FromCharChecked(stringPtr, &result)) { errorOut(errorMsg); } + return (int) result * sign; +} + /*! readSizeTFromCharChecked() : * @return 0 if success, and store the result in *value. * allows and interprets K, KB, KiB, M, MB and MiB suffix. @@ -546,8 +565,8 @@ static ZDICT_fastCover_params_t defaultFastCoverParams(void) static unsigned parseAdaptParameters(const char* stringPtr, int* adaptMinPtr, int* adaptMaxPtr) { for ( ; ;) { - if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = (int)readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } - if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = (int)readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = readIntFromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } + if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = readIntFromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; } DISPLAYLEVEL(4, "invalid compression parameter \n"); return 0; } @@ -628,6 +647,48 @@ static void printVersion(void) } } } +#define ZSTD_NB_STRATEGIES 9 +static const char* ZSTD_strategyMap[ZSTD_NB_STRATEGIES + 1] = { "", "ZSTD_fast", + "ZSTD_dfast", "ZSTD_greedy", "ZSTD_lazy", "ZSTD_lazy2", "ZSTD_btlazy2", + "ZSTD_btopt", "ZSTD_btultra", "ZSTD_btultra2"}; + +#ifndef ZSTD_NOCOMPRESS + +static void printDefaultCParams(const char* filename, const char* dictFileName, int cLevel) { + unsigned long long fileSize = UTIL_getFileSize(filename); + const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; + const ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, fileSize, dictSize); + if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%u bytes)\n", filename, (unsigned)fileSize); + else DISPLAY("%s (src size unknown)\n", filename); + DISPLAY(" - windowLog : %u\n", cParams.windowLog); + DISPLAY(" - chainLog : %u\n", cParams.chainLog); + DISPLAY(" - hashLog : %u\n", cParams.hashLog); + DISPLAY(" - searchLog : %u\n", cParams.searchLog); + DISPLAY(" - minMatch : %u\n", cParams.minMatch); + DISPLAY(" - targetLength : %u\n", cParams.targetLength); + assert(cParams.strategy < ZSTD_NB_STRATEGIES + 1); + DISPLAY(" - strategy : %s (%u)\n", ZSTD_strategyMap[(int)cParams.strategy], (unsigned)cParams.strategy); +} + +static void printActualCParams(const char* filename, const char* dictFileName, int cLevel, const ZSTD_compressionParameters* cParams) { + unsigned long long fileSize = UTIL_getFileSize(filename); + const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; + ZSTD_compressionParameters actualCParams = ZSTD_getCParams(cLevel, fileSize, dictSize); + assert(g_displayLevel >= 4); + actualCParams.windowLog = cParams->windowLog == 0 ? actualCParams.windowLog : cParams->windowLog; + actualCParams.chainLog = cParams->chainLog == 0 ? actualCParams.chainLog : cParams->chainLog; + actualCParams.hashLog = cParams->hashLog == 0 ? actualCParams.hashLog : cParams->hashLog; + actualCParams.searchLog = cParams->searchLog == 0 ? actualCParams.searchLog : cParams->searchLog; + actualCParams.minMatch = cParams->minMatch == 0 ? actualCParams.minMatch : cParams->minMatch; + actualCParams.targetLength = cParams->targetLength == 0 ? actualCParams.targetLength : cParams->targetLength; + actualCParams.strategy = cParams->strategy == 0 ? actualCParams.strategy : cParams->strategy; + DISPLAY("--zstd=wlog=%d,clog=%d,hlog=%d,slog=%d,mml=%d,tlen=%d,strat=%d\n", + actualCParams.windowLog, actualCParams.chainLog, actualCParams.hashLog, actualCParams.searchLog, + actualCParams.minMatch, actualCParams.targetLength, actualCParams.strategy); +} + +#endif + /* Environment variables for parameter setting */ #define ENV_CLEVEL "ZSTD_CLEVEL" #define ENV_NBTHREADS "ZSTD_NBTHREADS" /* takes lower precedence than directly specifying -T# in the CLI */ @@ -704,11 +765,6 @@ static unsigned init_nbThreads(void) { val32 = readU32FromChar(&__nb); \ } -#define ZSTD_NB_STRATEGIES 9 -static const char* ZSTD_strategyMap[ZSTD_NB_STRATEGIES + 1] = { "", "ZSTD_fast", - "ZSTD_dfast", "ZSTD_greedy", "ZSTD_lazy", "ZSTD_lazy2", "ZSTD_btlazy2", - "ZSTD_btopt", "ZSTD_btultra", "ZSTD_btultra2"}; - typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode; #define CLEAN_RETURN(i) { operationResult = (i); goto _end; } @@ -722,7 +778,7 @@ typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom # define MAXCLEVEL ZSTD_maxCLevel() #endif -int main(int const argCount, const char* argv[]) +int main(int argCount, const char* argv[]) { int argNb, followLinks = 0, @@ -743,6 +799,9 @@ int main(int const argCount, const char* argv[]) separateFiles = 0, setRealTimePrio = 0, singleThread = 0, +#ifdef ZSTD_MULTITHREAD + defaultLogicalCores = 0, +#endif showDefaultCParams = 0, ultra=0, contentSize=1; @@ -782,7 +841,7 @@ int main(int const argCount, const char* argv[]) #ifndef ZSTD_NOBENCH BMK_advancedParams_t benchParams = BMK_initAdvancedParams(); #endif - ZSTD_literalCompressionMode_e literalCompressionMode = ZSTD_lcm_auto; + ZSTD_paramSwitch_e literalCompressionMode = ZSTD_ps_auto; /* init */ @@ -878,8 +937,8 @@ int main(int const argCount, const char* argv[]) if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); continue; } #endif if (!strcmp(argument, "--rsyncable")) { rsyncable = 1; continue; } - if (!strcmp(argument, "--compress-literals")) { literalCompressionMode = ZSTD_lcm_huffman; continue; } - if (!strcmp(argument, "--no-compress-literals")) { literalCompressionMode = ZSTD_lcm_uncompressed; continue; } + if (!strcmp(argument, "--compress-literals")) { literalCompressionMode = ZSTD_ps_enable; continue; } + if (!strcmp(argument, "--no-compress-literals")) { literalCompressionMode = ZSTD_ps_disable; continue; } if (!strcmp(argument, "--no-progress")) { FIO_setProgressSetting(FIO_ps_never); continue; } if (!strcmp(argument, "--progress")) { FIO_setProgressSetting(FIO_ps_always); continue; } if (!strcmp(argument, "--exclude-compressed")) { FIO_setExcludeCompressedFile(prefs, 1); continue; } @@ -932,6 +991,15 @@ int main(int const argCount, const char* argv[]) if (longCommandWArg(&argument, "--target-compressed-block-size=")) { targetCBlockSize = readSizeTFromChar(&argument); continue; } if (longCommandWArg(&argument, "--size-hint=")) { srcSizeHint = readSizeTFromChar(&argument); continue; } if (longCommandWArg(&argument, "--output-dir-flat")) { NEXT_FIELD(outDirName); continue; } +#ifdef ZSTD_MULTITHREAD + if (longCommandWArg(&argument, "--auto-threads")) { + const char* threadDefault = NULL; + NEXT_FIELD(threadDefault); + if (strcmp(threadDefault, "logical") == 0) + defaultLogicalCores = 1; + continue; + } +#endif #ifdef UTIL_HAS_MIRRORFILELIST if (longCommandWArg(&argument, "--output-dir-mirror")) { NEXT_FIELD(outMirroredDirName); continue; } #endif @@ -1138,15 +1206,21 @@ int main(int const argCount, const char* argv[]) #ifdef ZSTD_MULTITHREAD if ((nbWorkers==0) && (!singleThread)) { /* automatically set # workers based on # of reported cpus */ - nbWorkers = UTIL_countPhysicalCores(); - DISPLAYLEVEL(3, "Note: %d physical core(s) detected \n", nbWorkers); + if (defaultLogicalCores) { + nbWorkers = UTIL_countLogicalCores(); + DISPLAYLEVEL(3, "Note: %d logical core(s) detected \n", nbWorkers); + } else { + nbWorkers = UTIL_countPhysicalCores(); + DISPLAYLEVEL(3, "Note: %d physical core(s) detected \n", nbWorkers); + } } #else (void)singleThread; (void)nbWorkers; #endif -#ifdef UTIL_HAS_CREATEFILELIST g_utilDisplayLevel = g_displayLevel; + +#ifdef UTIL_HAS_CREATEFILELIST if (!followLinks) { unsigned u, fileNamesNb; unsigned const nbFilenames = (unsigned)filenames->tableSize; @@ -1253,18 +1327,18 @@ int main(int const argCount, const char* argv[]) int const optimize = !coverParams.k || !coverParams.d; coverParams.nbThreads = (unsigned)nbWorkers; coverParams.zParams = zParams; - operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (unsigned)filenames->tableSize, blockSize, NULL, &coverParams, NULL, optimize); + operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (int)filenames->tableSize, blockSize, NULL, &coverParams, NULL, optimize); } else if (dict == fastCover) { int const optimize = !fastCoverParams.k || !fastCoverParams.d; fastCoverParams.nbThreads = (unsigned)nbWorkers; fastCoverParams.zParams = zParams; - operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (unsigned)filenames->tableSize, blockSize, NULL, NULL, &fastCoverParams, optimize); + operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (int)filenames->tableSize, blockSize, NULL, NULL, &fastCoverParams, optimize); } else { ZDICT_legacy_params_t dictParams; memset(&dictParams, 0, sizeof(dictParams)); dictParams.selectivityLevel = dictSelect; dictParams.zParams = zParams; - operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (unsigned)filenames->tableSize, blockSize, &dictParams, NULL, NULL, 0); + operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenames->fileNames, (int)filenames->tableSize, blockSize, &dictParams, NULL, NULL, 0); } #else (void)dictCLevel; (void)dictSelect; (void)dictID; (void)maxDictSize; /* not used when ZSTD_NODICT set */ @@ -1374,25 +1448,18 @@ int main(int const argCount, const char* argv[]) assert(ZSTD_NB_STRATEGIES == strategyBounds.upperBound); (void)strategyBounds; } - if (showDefaultCParams) { + if (showDefaultCParams || g_displayLevel >= 4) { size_t fileNb; for (fileNb = 0; fileNb < (size_t)filenames->tableSize; fileNb++) { - unsigned long long fileSize = UTIL_getFileSize(filenames->fileNames[fileNb]); - const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; - const ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, fileSize, dictSize); - if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%u bytes)\n", filenames->fileNames[fileNb], (unsigned)fileSize); - else DISPLAY("%s (src size unknown)\n", filenames->fileNames[fileNb]); - DISPLAY(" - windowLog : %u\n", cParams.windowLog); - DISPLAY(" - chainLog : %u\n", cParams.chainLog); - DISPLAY(" - hashLog : %u\n", cParams.hashLog); - DISPLAY(" - searchLog : %u\n", cParams.searchLog); - DISPLAY(" - minMatch : %u\n", cParams.minMatch); - DISPLAY(" - targetLength : %u\n", cParams.targetLength); - assert(cParams.strategy < ZSTD_NB_STRATEGIES + 1); - DISPLAY(" - strategy : %s (%u)\n", ZSTD_strategyMap[(int)cParams.strategy], (unsigned)cParams.strategy); + if (showDefaultCParams) + printDefaultCParams(filenames->fileNames[fileNb], dictFileName, cLevel); + if (g_displayLevel >= 4) + printActualCParams(filenames->fileNames[fileNb], dictFileName, cLevel, &compressionParams); } } + if (g_displayLevel >= 4) + FIO_displayCompressionParameters(prefs); if ((filenames->tableSize==1) && outFileName) operationResult = FIO_compressFilename(fCtx, prefs, outFileName, filenames->fileNames[0], dictFileName, cLevel, compressionParams); else diff --git a/programs/zstdgrep.1 b/programs/zstdgrep.1 index bf96185b7..ae4da3354 100644 --- a/programs/zstdgrep.1 +++ b/programs/zstdgrep.1 @@ -1,23 +1,17 @@ -. -.TH "ZSTDGREP" "1" "May 2021" "zstd 1.5.0" "User Commands" -. +.TH "ZSTDGREP" "1" "September 2021" "zstd 1.5.1" "User Commands" .SH "NAME" \fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \.\.\.] -. +\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \|\.\|\.\|\.] .SH "DESCRIPTION" -\fBzstdgrep\fR runs \fBgrep (1)\fR on files or stdin, if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. +\fBzstdgrep\fR runs \fBgrep (1)\fR on files, or \fBstdin\fR if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. .P The grep\-flags and pattern arguments are passed on to \fBgrep (1)\fR\. If an \fB\-e\fR flag is found in the \fBgrep\-flags\fR, \fBzstdgrep\fR will not look for a pattern argument\. -. +.P +Note that modern \fBgrep\fR alternatives such as \fBripgrep\fR (\fBrg\fR) support \fBzstd\fR\-compressed files out of the box, and can prove better alternatives than \fBzstdgrep\fR notably for unsupported complex pattern searches\. Note though that such alternatives may also feature some minor command line differences\. .SH "EXIT STATUS" In case of missing arguments or missing pattern, 1 will be returned, otherwise 0\. -. .SH "SEE ALSO" \fBzstd (1)\fR -. .SH "AUTHORS" Thomas Klausner \fIwiz@NetBSD\.org\fR diff --git a/programs/zstdgrep.1.md b/programs/zstdgrep.1.md index 363ad4f99..35186a4bf 100644 --- a/programs/zstdgrep.1.md +++ b/programs/zstdgrep.1.md @@ -9,10 +9,14 @@ SYNOPSIS DESCRIPTION ----------- -`zstdgrep` runs `grep (1)` on files or stdin, if no files argument is given, after decompressing them with `zstdcat (1)`. +`zstdgrep` runs `grep (1)` on files, or `stdin` if no files argument is given, after decompressing them with `zstdcat (1)`. The grep-flags and pattern arguments are passed on to `grep (1)`. If an `-e` flag is found in the `grep-flags`, `zstdgrep` will not look for a pattern argument. +Note that modern `grep` alternatives such as `ripgrep` (`rg`) support `zstd`-compressed files out of the box, +and can prove better alternatives than `zstdgrep` notably for unsupported complex pattern searches. +Note though that such alternatives may also feature some minor command line differences. + EXIT STATUS ----------- In case of missing arguments or missing pattern, 1 will be returned, otherwise 0. diff --git a/programs/zstdless.1 b/programs/zstdless.1 index f08bc1921..22354763b 100644 --- a/programs/zstdless.1 +++ b/programs/zstdless.1 @@ -1,14 +1,9 @@ -. -.TH "ZSTDLESS" "1" "May 2021" "zstd 1.5.0" "User Commands" -. +.TH "ZSTDLESS" "1" "September 2021" "zstd 1.5.1" "User Commands" .SH "NAME" \fBzstdless\fR \- view zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \.\.\.] -. +\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \|\.\|\.\|\.] .SH "DESCRIPTION" \fBzstdless\fR runs \fBless (1)\fR on files or stdin, if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. .SH "SEE ALSO" \fBzstd (1)\fR diff --git a/tests/Makefile b/tests/Makefile index 85553007d..bd45451a5 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,4 +1,5 @@ -# ################################################################ + + # ################################################################ # Copyright (c) Yann Collet, Facebook, Inc. # All rights reserved. # @@ -19,46 +20,43 @@ # zstreamtest32: Same as zstreamtest, but forced to compile in 32-bits mode # ########################################################################## -ZSTDDIR = ../lib +LIBZSTD = ../lib + +ZSTD_LEGACY_SUPPORT ?= 0 + +DEBUGLEVEL ?= 2 +export DEBUGLEVEL # transmit value to sub-makefiles + +include $(LIBZSTD)/libzstd.mk + +ZSTDDIR = $(LIBZSTD) PRGDIR = ../programs PYTHON ?= python3 TESTARTEFACT := versionsTest -DEBUGLEVEL ?= 2 -export DEBUGLEVEL # transmit value to sub-makefiles -DEBUGFLAGS = -g -DDEBUGLEVEL=$(DEBUGLEVEL) +DEBUGFLAGS += -g -Wno-c++-compat CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) \ -DZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY=1 -ifeq ($(OS),Windows_NT) # MinGW assumed -CPPFLAGS += -D__USE_MINGW_ANSI_STDIO # compatibility with %zu formatting -endif -CFLAGS ?= -O3 -CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ - -Wstrict-prototypes -Wundef \ - -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls -Wmissing-prototypes -Wno-deprecated-declarations -CFLAGS += $(DEBUGFLAGS) -CPPFLAGS += $(MOREFLAGS) - -ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c -ZSTDCOMP_FILES := $(ZSTDDIR)/compress/*.c -ZSTDDECOMP_FILES := $(ZSTDDIR)/decompress/*.c +ZSTDCOMMON_FILES := $(sort $(ZSTD_COMMON_FILES)) +ZSTDCOMP_FILES := $(sort $(ZSTD_COMPRESS_FILES)) +ZSTDDECOMP_FILES := $(sort $(ZSTD_DECOMPRESS_FILES)) ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) -ZDICT_FILES := $(ZSTDDIR)/dictBuilder/*.c +ZDICT_FILES := $(sort $(ZSTD_DICTBUILDER_FILES)) ZSTD_F1 := $(wildcard $(ZSTD_FILES)) ZSTD_OBJ1 := $(subst $(ZSTDDIR)/common/,zstdm_,$(ZSTD_F1)) ZSTD_OBJ2 := $(subst $(ZSTDDIR)/compress/,zstdc_,$(ZSTD_OBJ1)) ZSTD_OBJ3 := $(subst $(ZSTDDIR)/decompress/,zstdd_,$(ZSTD_OBJ2)) -ZSTD_OBJECTS := $(ZSTD_OBJ3:.c=.o) +ZSTD_OBJ4 := $(ZSTD_OBJ3:.c=.o) +ZSTD_OBJECTS := $(ZSTD_OBJ4:.S=.o) ZSTDMT_OBJ1 := $(subst $(ZSTDDIR)/common/,zstdmt_m_,$(ZSTD_F1)) ZSTDMT_OBJ2 := $(subst $(ZSTDDIR)/compress/,zstdmt_c_,$(ZSTDMT_OBJ1)) ZSTDMT_OBJ3 := $(subst $(ZSTDDIR)/decompress/,zstdmt_d_,$(ZSTDMT_OBJ2)) -ZSTDMT_OBJECTS := $(ZSTDMT_OBJ3:.c=.o) +ZSTDMT_OBJ4 := $(ZSTDMT_OBJ3:.c=.o) +ZSTDMT_OBJECTS := $(ZSTDMT_OBJ4:.S=.o) # Define *.exe as extension for Windows systems ifneq (,$(filter Windows%,$(OS))) @@ -119,6 +117,9 @@ zstdc_%.o : $(ZSTDDIR)/compress/%.c zstdd_%.o : $(ZSTDDIR)/decompress/%.c $(CC) -c $(CPPFLAGS) $(CFLAGS) $< -o $@ +zstdd_%.o : $(ZSTDDIR)/decompress/%.S + $(CC) -c $(CPPFLAGS) $(CFLAGS) $< -o $@ + zstdmt%.o : CPPFLAGS += $(MULTITHREAD_CPP) zstdmt_m_%.o : $(ZSTDDIR)/common/%.c @@ -130,6 +131,9 @@ zstdmt_c_%.o : $(ZSTDDIR)/compress/%.c zstdmt_d_%.o : $(ZSTDDIR)/decompress/%.c $(CC) -c $(CPPFLAGS) $(CFLAGS) $< -o $@ +zstdmt_d_%.o : $(ZSTDDIR)/decompress/%.S + $(CC) -c $(CPPFLAGS) $(CFLAGS) $< -o $@ + fullbench32: CPPFLAGS += -m32 fullbench fullbench32 : CPPFLAGS += $(MULTITHREAD_CPP) -Wno-deprecated-declarations fullbench fullbench32 : LDFLAGS += $(MULTITHREAD_LD) @@ -201,7 +205,7 @@ bigdict: $(ZSTDMT_OBJECTS) $(PRGDIR)/datagen.c bigdict.c invalidDictionaries : $(ZSTD_OBJECTS) invalidDictionaries.c -legacy : CPPFLAGS += -I$(ZSTDDIR)/legacy -DZSTD_LEGACY_SUPPORT=4 +legacy : CPPFLAGS += -I$(ZSTDDIR)/legacy -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=4 legacy : $(ZSTD_FILES) $(wildcard $(ZSTDDIR)/legacy/*.c) legacy.c decodecorpus : LDLIBS += -lm @@ -244,7 +248,7 @@ clean: # valgrind tests are validated only for some posix platforms #---------------------------------------------------------------------------------- UNAME := $(shell uname) -ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS)) +ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS AIX)) HOST_OS = POSIX valgrindTest: VALGRIND = valgrind --leak-check=full --show-leak-kinds=all --error-exitcode=1 @@ -263,7 +267,6 @@ valgrindTest: zstd datagen fuzzer fullbench endif - ifneq (,$(filter MINGW% MSYS%,$(UNAME))) HOST_OS = MSYS endif diff --git a/tests/README.md b/tests/README.md index cd255e904..c6ffb4095 100644 --- a/tests/README.md +++ b/tests/README.md @@ -56,7 +56,7 @@ optional arguments: --mode MODE 'fastmode', 'onetime', 'current', or 'continuous' (see README.md for details) --dict DICT filename of dictionary to use (when set, this - dictioanry will be used to compress the files provided + dictionary will be used to compress the files provided inside --directory) ``` diff --git a/tests/automated_benchmarking.py b/tests/automated_benchmarking.py index 458bda451..e0c03ec2d 100644 --- a/tests/automated_benchmarking.py +++ b/tests/automated_benchmarking.py @@ -87,7 +87,7 @@ def clone_and_build(build): git clone {github_url} zstd-{user}-{sha} && cd zstd-{user}-{sha} && {checkout_command} - make && + make -j && cd ../ """.format( user=build["user"], @@ -100,7 +100,7 @@ def clone_and_build(build): ) return "zstd-{user}-{sha}/zstd".format(user=build["user"], sha=build["hash"]) else: - os.system("cd ../ && make && cd tests") + os.system("cd ../ && make -j && cd tests") return "../zstd" @@ -112,9 +112,9 @@ def parse_benchmark_output(output): def benchmark_single(executable, level, filename): return parse_benchmark_output(( subprocess.run( - [executable, "-qb{}".format(level), filename], stderr=subprocess.PIPE + [executable, "-qb{}".format(level), filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) - .stderr.decode("utf-8") + .stdout.decode("utf-8") .split(" ") )) @@ -145,7 +145,7 @@ def benchmark(build, filenames, levels, iterations): def benchmark_dictionary_single(executable, filenames_directory, dictionary_filename, level, iterations): cspeeds, dspeeds = [], [] for _ in range(iterations): - output = subprocess.run([executable, "-qb{}".format(level), "-D", dictionary_filename, "-r", filenames_directory], stderr=subprocess.PIPE).stderr.decode("utf-8").split(" ") + output = subprocess.run([executable, "-qb{}".format(level), "-D", dictionary_filename, "-r", filenames_directory], stdout=subprocess.PIPE).stdout.decode("utf-8").split(" ") cspeed, dspeed = parse_benchmark_output(output) cspeeds.append(cspeed) dspeeds.append(dspeed) @@ -296,7 +296,7 @@ if __name__ == "__main__": parser.add_argument("--emails", help="email addresses of people who will be alerted upon regression. Only for continuous mode", default=None) parser.add_argument("--frequency", help="specifies the number of seconds to wait before each successive check for new PRs in continuous mode", default=DEFAULT_MAX_API_CALL_FREQUENCY_SEC) parser.add_argument("--mode", help="'fastmode', 'onetime', 'current', or 'continuous' (see README.md for details)", default="current") - parser.add_argument("--dict", help="filename of dictionary to use (when set, this dictioanry will be used to compress the files provided inside --directory)", default=None) + parser.add_argument("--dict", help="filename of dictionary to use (when set, this dictionary will be used to compress the files provided inside --directory)", default=None) args = parser.parse_args() filenames = args.directory diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c index fa6a2d6ed..4f4db8575 100644 --- a/tests/decodecorpus.c +++ b/tests/decodecorpus.c @@ -185,7 +185,7 @@ BYTE SEQUENCE_LLCODE[ZSTD_BLOCKSIZE_MAX]; BYTE SEQUENCE_MLCODE[ZSTD_BLOCKSIZE_MAX]; BYTE SEQUENCE_OFCODE[ZSTD_BLOCKSIZE_MAX]; -unsigned WKSP[HUF_WORKSPACE_SIZE_U32]; +U64 WKSP[HUF_WORKSPACE_SIZE_U64]; typedef struct { size_t contentSize; /* 0 means unknown (unless contentSize == windowSize == 0) */ @@ -199,7 +199,7 @@ typedef struct { int hufInit; /* the distribution used in the previous block for repeat mode */ BYTE hufDist[DISTSIZE]; - HUF_CElt hufTable [256]; + HUF_CElt hufTable [HUF_CTABLE_SIZE_ST(255)]; int fseInit; FSE_CTable offcodeCTable [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; diff --git a/tests/fullbench.c b/tests/fullbench.c index a71117e09..f610fef4c 100644 --- a/tests/fullbench.c +++ b/tests/fullbench.c @@ -123,11 +123,15 @@ static size_t local_ZSTD_decompress(const void* src, size_t srcSize, static ZSTD_DCtx* g_zdc = NULL; #ifndef ZSTD_DLL_IMPORT -extern size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* ctx, const void* src, size_t srcSize); +typedef enum { + not_streaming = 0, + is_streaming = 1 +} streaming_operation; +extern size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* ctx, const void* src, size_t srcSize, void* dst, size_t dstCapacity, const streaming_operation streaming); static size_t local_ZSTD_decodeLiteralsBlock(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2) { (void)src; (void)srcSize; (void)dst; (void)dstSize; - return ZSTD_decodeLiteralsBlock(g_zdc, buff2, g_cSize); + return ZSTD_decodeLiteralsBlock(g_zdc, buff2, g_cSize, dst, dstSize, not_streaming); } static size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2) @@ -177,7 +181,7 @@ FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src dctx->entropy.hufTable, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); #else return HUF_readDTableX2_wksp( dctx->entropy.hufTable, @@ -577,7 +581,7 @@ static int benchMem(unsigned benchNb, ip += ZSTD_blockHeaderSize; /* skip block header */ ZSTD_decompressBegin(g_zdc); CONTROL(iend > ip); - ip += ZSTD_decodeLiteralsBlock(g_zdc, ip, (size_t)(iend-ip)); /* skip literal segment */ + ip += ZSTD_decodeLiteralsBlock(g_zdc, ip, (size_t)(iend-ip), dstBuff, dstBuffSize, not_streaming); /* skip literal segment */ g_cSize = (size_t)(iend-ip); memcpy(dstBuff2, ip, g_cSize); /* copy rest of block (it starts by SeqHeader) */ srcSize = srcSize > 128 KB ? 128 KB : srcSize; /* speed relative to block */ diff --git a/tests/fuzz/.gitignore b/tests/fuzz/.gitignore index 93d935a85..02c2f10be 100644 --- a/tests/fuzz/.gitignore +++ b/tests/fuzz/.gitignore @@ -17,6 +17,7 @@ decompress_dstSize_tooSmall fse_read_ncount sequence_compression_api seekable_roundtrip +huf_round_trip fuzz-*.log rt_lib_* d_lib_* diff --git a/tests/fuzz/Makefile b/tests/fuzz/Makefile index ccb574b79..d733f7cad 100644 --- a/tests/fuzz/Makefile +++ b/tests/fuzz/Makefile @@ -23,6 +23,12 @@ else endif CORPORA_URL_PREFIX:=https://github.com/facebook/zstd/releases/download/fuzz-corpora/ +LIBZSTD = ../../lib +DEBUGLEVEL ?= 2 +ZSTD_LEGACY_SUPPORT ?= 1 + +include $(LIBZSTD)/libzstd.mk + ZSTDDIR = ../../lib PRGDIR = ../../programs CONTRIBDIR = ../../contrib @@ -34,7 +40,7 @@ FUZZ_EXTRA_FLAGS := -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ -Wstrict-prototypes -Wundef \ -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ - -Wredundant-decls \ + -Wredundant-decls -Wno-deprecated-declarations \ -g -fno-omit-frame-pointer FUZZ_CFLAGS := $(FUZZ_EXTRA_FLAGS) $(CFLAGS) FUZZ_CXXFLAGS := $(FUZZ_EXTRA_FLAGS) -std=c++11 $(CXXFLAGS) @@ -50,11 +56,11 @@ FUZZ_SRC := $(PRGDIR)/util.c ./fuzz_helpers.c ./zstd_helpers.c ./fuzz_data_produ SEEKABLE_HEADERS = $(CONTRIBDIR)/seekable_format/zstd_seekable.h SEEKABLE_OBJS = $(CONTRIBDIR)/seekable_format/zstdseek_compress.c $(CONTRIBDIR)/seekable_format/zstdseek_decompress.c -ZSTDCOMMON_SRC := $(ZSTDDIR)/common/*.c -ZSTDCOMP_SRC := $(ZSTDDIR)/compress/*.c -ZSTDDECOMP_SRC := $(ZSTDDIR)/decompress/*.c -ZSTDDICT_SRC := $(ZSTDDIR)/dictBuilder/*.c -ZSTDLEGACY_SRC := $(ZSTDDIR)/legacy/*.c +ZSTDCOMMON_SRC := $(ZSTD_COMMON_FILES) +ZSTDCOMP_SRC := $(ZSTD_COMPRESS_FILES) +ZSTDDECOMP_SRC := $(ZSTD_DECOMPRESS_FILES) +ZSTDDICT_SRC := $(ZSTD_DICTBUILDER_FILES) +ZSTDLEGACY_SRC := $(ZSTD_LEGACY_FILES) FUZZ_SRC := \ $(FUZZ_SRC) \ $(ZSTDDECOMP_SRC) \ @@ -71,7 +77,8 @@ FUZZ_D_OBJ4 := $(subst $(ZSTDDIR)/dictBuilder/,d_lib_dictBuilder_,$(FUZZ_D_OBJ3) FUZZ_D_OBJ5 := $(subst $(ZSTDDIR)/legacy/,d_lib_legacy_,$(FUZZ_D_OBJ4)) FUZZ_D_OBJ6 := $(subst $(PRGDIR)/,d_prg_,$(FUZZ_D_OBJ5)) FUZZ_D_OBJ7 := $(subst $\./,d_fuzz_,$(FUZZ_D_OBJ6)) -FUZZ_DECOMPRESS_OBJ := $(FUZZ_D_OBJ7:.c=.o) +FUZZ_D_OBJ8 := $(FUZZ_D_OBJ7:.c=.o) +FUZZ_DECOMPRESS_OBJ := $(FUZZ_D_OBJ8:.S=.o) FUZZ_RT_OBJ1 := $(subst $(ZSTDDIR)/common/,rt_lib_common_,$(FUZZ_SRC)) FUZZ_RT_OBJ2 := $(subst $(ZSTDDIR)/compress/,rt_lib_compress_,$(FUZZ_RT_OBJ1)) @@ -80,7 +87,8 @@ FUZZ_RT_OBJ4 := $(subst $(ZSTDDIR)/dictBuilder/,rt_lib_dictBuilder_,$(FUZZ_RT_OB FUZZ_RT_OBJ5 := $(subst $(ZSTDDIR)/legacy/,rt_lib_legacy_,$(FUZZ_RT_OBJ4)) FUZZ_RT_OBJ6 := $(subst $(PRGDIR)/,rt_prg_,$(FUZZ_RT_OBJ5)) FUZZ_RT_OBJ7 := $(subst $\./,rt_fuzz_,$(FUZZ_RT_OBJ6)) -FUZZ_ROUND_TRIP_OBJ := $(FUZZ_RT_OBJ7:.c=.o) +FUZZ_RT_OBJ8 := $(FUZZ_RT_OBJ7:.c=.o) +FUZZ_ROUND_TRIP_OBJ := $(FUZZ_RT_OBJ8:.S=.o) .PHONY: default all clean cleanall @@ -103,7 +111,9 @@ FUZZ_TARGETS := \ decompress_dstSize_tooSmall \ fse_read_ncount \ sequence_compression_api \ - seekable_roundtrip + seekable_roundtrip \ + huf_round_trip \ + huf_decompress all: libregression.a $(FUZZ_TARGETS) @@ -116,6 +126,9 @@ rt_lib_compress_%.o: $(ZSTDDIR)/compress/%.c rt_lib_decompress_%.o: $(ZSTDDIR)/decompress/%.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $(FUZZ_ROUND_TRIP_FLAGS) $< -c -o $@ +rt_lib_decompress_%.o: $(ZSTDDIR)/decompress/%.S + $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $(FUZZ_ROUND_TRIP_FLAGS) $< -c -o $@ + rt_lib_dictBuilder_%.o: $(ZSTDDIR)/dictBuilder/%.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $(FUZZ_ROUND_TRIP_FLAGS) $< -c -o $@ @@ -137,6 +150,9 @@ d_lib_compress_%.o: $(ZSTDDIR)/compress/%.c d_lib_decompress_%.o: $(ZSTDDIR)/decompress/%.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ +d_lib_decompress_%.o: $(ZSTDDIR)/decompress/%.S + $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ + d_lib_dictBuilder_%.o: $(ZSTDDIR)/dictBuilder/%.c $(CC) $(FUZZ_CPPFLAGS) $(FUZZ_CFLAGS) $< -c -o $@ @@ -200,6 +216,12 @@ sequence_compression_api: $(FUZZ_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_sequenc seekable_roundtrip: $(FUZZ_HEADERS) $(SEEKABLE_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) $(SEEKABLE_OBJS) rt_fuzz_seekable_roundtrip.o $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_ROUND_TRIP_OBJ) $(SEEKABLE_OBJS) rt_fuzz_seekable_roundtrip.o $(LIB_FUZZING_ENGINE) -o $@ +huf_round_trip: $(FUZZ_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_huf_round_trip.o + $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_huf_round_trip.o $(LIB_FUZZING_ENGINE) -o $@ + +huf_decompress: $(FUZZ_HEADERS) $(FUZZ_DECOMPRESS_OBJ) d_fuzz_huf_decompress.o + $(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_DECOMPRESS_OBJ) d_fuzz_huf_decompress.o $(LIB_FUZZING_ENGINE) -o $@ + libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h $(PRGDIR)/util.c d_fuzz_regression_driver.o $(AR) $(FUZZ_ARFLAGS) $@ d_fuzz_regression_driver.o diff --git a/tests/fuzz/dictionary_round_trip.c b/tests/fuzz/dictionary_round_trip.c index 7bff4bd6c..0b20e8d67 100644 --- a/tests/fuzz/dictionary_round_trip.c +++ b/tests/fuzz/dictionary_round_trip.c @@ -42,8 +42,23 @@ static size_t roundTripTest(void *result, size_t resultCapacity, src, srcSize, dict.buff, dict.size, cLevel); + FUZZ_ZASSERT(cSize); + // Compress a second time and check for determinism + { + size_t const cSize0 = cSize; + XXH64_hash_t const hash0 = XXH64(compressed, cSize, 0); + cSize = ZSTD_compress_usingDict(cctx, + compressed, compressedCapacity, + src, srcSize, + dict.buff, dict.size, + cLevel); + FUZZ_ASSERT(cSize == cSize0); + FUZZ_ASSERT(XXH64(compressed, cSize, 0) == hash0); + } } else { + size_t remainingBytes; dictContentType = FUZZ_dataProducer_uint32Range(producer, 0, 2); + remainingBytes = FUZZ_dataProducer_remainingBytes(producer); FUZZ_setRandomParameters(cctx, srcSize, producer); /* Disable checksum so we can use sizes smaller than compress bound. */ FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 0)); @@ -51,14 +66,29 @@ static size_t roundTripTest(void *result, size_t resultCapacity, FUZZ_ZASSERT(ZSTD_CCtx_refPrefix_advanced( cctx, dict.buff, dict.size, dictContentType)); - else + else FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary_advanced( cctx, dict.buff, dict.size, (ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1), dictContentType)); cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize); + FUZZ_ZASSERT(cSize); + // Compress a second time and check for determinism + { + size_t const cSize0 = cSize; + XXH64_hash_t const hash0 = XXH64(compressed, cSize, 0); + FUZZ_dataProducer_rollBack(producer, remainingBytes); + FUZZ_setRandomParameters(cctx, srcSize, producer); + FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 0)); + if (refPrefix) + FUZZ_ZASSERT(ZSTD_CCtx_refPrefix_advanced( + cctx, dict.buff, dict.size, + dictContentType)); + cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize); + FUZZ_ASSERT(cSize == cSize0); + FUZZ_ASSERT(XXH64(compressed, cSize, 0) == hash0); + } } - FUZZ_ZASSERT(cSize); if (refPrefix) FUZZ_ZASSERT(ZSTD_DCtx_refPrefix_advanced( dctx, dict.buff, dict.size, diff --git a/tests/fuzz/fuzz.py b/tests/fuzz/fuzz.py index d8dfa7782..0c56cccb2 100755 --- a/tests/fuzz/fuzz.py +++ b/tests/fuzz/fuzz.py @@ -63,6 +63,8 @@ TARGET_INFO = { 'fse_read_ncount': TargetInfo(InputType.RAW_DATA), 'sequence_compression_api': TargetInfo(InputType.RAW_DATA), 'seekable_roundtrip': TargetInfo(InputType.RAW_DATA), + 'huf_round_trip': TargetInfo(InputType.RAW_DATA), + 'huf_decompress': TargetInfo(InputType.RAW_DATA), } TARGETS = list(TARGET_INFO.keys()) ALL_TARGETS = TARGETS + ['all'] diff --git a/tests/fuzz/fuzz_data_producer.c b/tests/fuzz/fuzz_data_producer.c index beb0155cf..eae8ee4b6 100644 --- a/tests/fuzz/fuzz_data_producer.c +++ b/tests/fuzz/fuzz_data_producer.c @@ -66,6 +66,12 @@ size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer){ return producer->size; } +void FUZZ_dataProducer_rollBack(FUZZ_dataProducer_t *producer, size_t remainingBytes) +{ + FUZZ_ASSERT(remainingBytes >= producer->size); + producer->size = remainingBytes; +} + int FUZZ_dataProducer_empty(FUZZ_dataProducer_t *producer) { return producer->size == 0; } diff --git a/tests/fuzz/fuzz_data_producer.h b/tests/fuzz/fuzz_data_producer.h index 045aaff83..62771a9f8 100644 --- a/tests/fuzz/fuzz_data_producer.h +++ b/tests/fuzz/fuzz_data_producer.h @@ -49,6 +49,9 @@ int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer, /* Returns the size of the remaining bytes of data in the producer */ size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer); +/* Rolls back the data producer state to have remainingBytes remaining */ +void FUZZ_dataProducer_rollBack(FUZZ_dataProducer_t *producer, size_t remainingBytes); + /* Returns true if the data producer is out of bytes */ int FUZZ_dataProducer_empty(FUZZ_dataProducer_t *producer); diff --git a/tests/fuzz/huf_decompress.c b/tests/fuzz/huf_decompress.c new file mode 100644 index 000000000..fea09fc93 --- /dev/null +++ b/tests/fuzz/huf_decompress.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/** + * This fuzz target performs a zstd round-trip test (compress & decompress), + * compares the result with the original, and calls abort() on corruption. + */ + +#define HUF_STATIC_LINKING_ONLY + +#include +#include +#include +#include +#include "common/cpu.h" +#include "common/huf.h" +#include "fuzz_helpers.h" +#include "fuzz_data_producer.h" + +int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) +{ + FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size); + /* Select random parameters: #streams, X1 or X2 decoding, bmi2 */ + int const streams = FUZZ_dataProducer_int32Range(producer, 0, 1); + int const symbols = FUZZ_dataProducer_int32Range(producer, 0, 1); + int const bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1); + /* Select a random cBufSize - it may be too small */ + size_t const dBufSize = FUZZ_dataProducer_uint32Range(producer, 0, 8 * size + 500); + size_t const maxTableLog = FUZZ_dataProducer_uint32Range(producer, 1, HUF_TABLELOG_MAX); + HUF_DTable* dt = (HUF_DTable*)FUZZ_malloc(HUF_DTABLE_SIZE(maxTableLog) * sizeof(HUF_DTable)); + size_t const wkspSize = HUF_WORKSPACE_SIZE; + void* wksp = FUZZ_malloc(wkspSize); + void* dBuf = FUZZ_malloc(dBufSize); + dt[0] = maxTableLog * 0x01000001; + size = FUZZ_dataProducer_remainingBytes(producer); + + if (symbols == 0) { + size_t const err = HUF_readDTableX1_wksp_bmi2(dt, src, size, wksp, wkspSize, bmi2); + if (ZSTD_isError(err)) + goto _out; + } else { + size_t const err = HUF_readDTableX2_wksp_bmi2(dt, src, size, wksp, wkspSize, bmi2); + if (ZSTD_isError(err)) + goto _out; + } + if (streams == 0) + HUF_decompress1X_usingDTable_bmi2(dBuf, dBufSize, src, size, dt, bmi2); + else + HUF_decompress4X_usingDTable_bmi2(dBuf, dBufSize, src, size, dt, bmi2); + +_out: + free(dt); + free(wksp); + free(dBuf); + FUZZ_dataProducer_free(producer); + return 0; +} diff --git a/tests/fuzz/huf_round_trip.c b/tests/fuzz/huf_round_trip.c new file mode 100644 index 000000000..0e26ca9b5 --- /dev/null +++ b/tests/fuzz/huf_round_trip.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/** + * This fuzz target performs a zstd round-trip test (compress & decompress), + * compares the result with the original, and calls abort() on corruption. + */ + +#define HUF_STATIC_LINKING_ONLY + +#include +#include +#include +#include +#include "common/cpu.h" +#include "compress/hist.h" +#include "common/huf.h" +#include "fuzz_helpers.h" +#include "fuzz_data_producer.h" + +static size_t adjustTableLog(size_t tableLog, size_t maxSymbol) +{ + size_t const alphabetSize = maxSymbol + 1; + size_t minTableLog = BIT_highbit32(alphabetSize) + 1; + if ((alphabetSize & (alphabetSize - 1)) != 0) { + ++minTableLog; + } + assert(minTableLog <= 9); + if (tableLog < minTableLog) + return minTableLog; + else + return tableLog; +} + +int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size) +{ + FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size); + /* Select random parameters: #streams, X1 or X2 decoding, bmi2 */ + int const streams = FUZZ_dataProducer_int32Range(producer, 0, 1); + int const symbols = FUZZ_dataProducer_int32Range(producer, 0, 1); + int const bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()) && FUZZ_dataProducer_int32Range(producer, 0, 1); + /* Select a random cBufSize - it may be too small */ + size_t const cBufSize = FUZZ_dataProducer_uint32Range(producer, 0, 4 * size); + /* Select a random tableLog - we'll adjust it up later */ + size_t tableLog = FUZZ_dataProducer_uint32Range(producer, 1, 12); + size_t const kMaxSize = 256 * 1024; + size = FUZZ_dataProducer_remainingBytes(producer); + if (size > kMaxSize) + size = kMaxSize; + + if (size <= 1) { + FUZZ_dataProducer_free(producer); + return 0; + } + + uint32_t maxSymbol = 255; + + U32 count[256]; + size_t const mostFrequent = HIST_count(count, &maxSymbol, src, size); + FUZZ_ZASSERT(mostFrequent); + if (mostFrequent == size) { + /* RLE */ + FUZZ_dataProducer_free(producer); + return 0; + + } + FUZZ_ASSERT(maxSymbol <= 255); + tableLog = adjustTableLog(tableLog, maxSymbol); + + size_t const wkspSize = HUF_WORKSPACE_SIZE; + void* wksp = FUZZ_malloc(wkspSize); + void* rBuf = FUZZ_malloc(size); + void* cBuf = FUZZ_malloc(cBufSize); + HUF_CElt* ct = (HUF_CElt*)FUZZ_malloc(HUF_CTABLE_SIZE(maxSymbol)); + HUF_DTable* dt = (HUF_DTable*)FUZZ_malloc(HUF_DTABLE_SIZE(tableLog) * sizeof(HUF_DTable)); + dt[0] = tableLog * 0x01000001; + + tableLog = HUF_optimalTableLog(tableLog, size, maxSymbol); + FUZZ_ASSERT(tableLog <= 12); + tableLog = HUF_buildCTable_wksp(ct, count, maxSymbol, tableLog, wksp, wkspSize); + FUZZ_ZASSERT(tableLog); + size_t const tableSize = HUF_writeCTable_wksp(cBuf, cBufSize, ct, maxSymbol, tableLog, wksp, wkspSize); + if (ERR_isError(tableSize)) { + /* Errors on uncompressible data or cBufSize too small */ + goto _out; + } + FUZZ_ZASSERT(tableSize); + if (symbols == 0) { + FUZZ_ZASSERT(HUF_readDTableX1_wksp_bmi2(dt, cBuf, tableSize, wksp, wkspSize, bmi2)); + } else { + size_t const ret = HUF_readDTableX2_wksp(dt, cBuf, tableSize, wksp, wkspSize); + if (ERR_getErrorCode(ret) == ZSTD_error_tableLog_tooLarge) { + FUZZ_ZASSERT(HUF_readDTableX1_wksp_bmi2(dt, cBuf, tableSize, wksp, wkspSize, bmi2)); + } else { + FUZZ_ZASSERT(ret); + } + } + + size_t cSize; + size_t rSize; + if (streams == 0) { + cSize = HUF_compress1X_usingCTable_bmi2(cBuf, cBufSize, src, size, ct, bmi2); + FUZZ_ZASSERT(cSize); + if (cSize != 0) + rSize = HUF_decompress1X_usingDTable_bmi2(rBuf, size, cBuf, cSize, dt, bmi2); + } else { + cSize = HUF_compress4X_usingCTable_bmi2(cBuf, cBufSize, src, size, ct, bmi2); + FUZZ_ZASSERT(cSize); + if (cSize != 0) + rSize = HUF_decompress4X_usingDTable_bmi2(rBuf, size, cBuf, cSize, dt, bmi2); + } + if (cSize != 0) { + FUZZ_ZASSERT(rSize); + FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size"); + FUZZ_ASSERT_MSG(!FUZZ_memcmp(src, rBuf, size), "Corruption!"); + } +_out: + free(rBuf); + free(cBuf); + free(ct); + free(dt); + free(wksp); + FUZZ_dataProducer_free(producer); + return 0; +} diff --git a/tests/fuzz/simple_round_trip.c b/tests/fuzz/simple_round_trip.c index c9fac26c5..9da986bc9 100644 --- a/tests/fuzz/simple_round_trip.c +++ b/tests/fuzz/simple_round_trip.c @@ -35,16 +35,36 @@ static size_t roundTripTest(void *result, size_t resultCapacity, size_t dSize; int targetCBlockSize = 0; if (FUZZ_dataProducer_uint32Range(producer, 0, 1)) { + size_t const remainingBytes = FUZZ_dataProducer_remainingBytes(producer); FUZZ_setRandomParameters(cctx, srcSize, producer); cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize); + FUZZ_ZASSERT(cSize); FUZZ_ZASSERT(ZSTD_CCtx_getParameter(cctx, ZSTD_c_targetCBlockSize, &targetCBlockSize)); + // Compress a second time and check for determinism + { + size_t const cSize0 = cSize; + XXH64_hash_t const hash0 = XXH64(compressed, cSize, 0); + FUZZ_dataProducer_rollBack(producer, remainingBytes); + FUZZ_setRandomParameters(cctx, srcSize, producer); + cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize); + FUZZ_ASSERT(cSize == cSize0); + FUZZ_ASSERT(XXH64(compressed, cSize, 0) == hash0); + } } else { - int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel); - + int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel); cSize = ZSTD_compressCCtx( cctx, compressed, compressedCapacity, src, srcSize, cLevel); + FUZZ_ZASSERT(cSize); + // Compress a second time and check for determinism + { + size_t const cSize0 = cSize; + XXH64_hash_t const hash0 = XXH64(compressed, cSize, 0); + cSize = ZSTD_compressCCtx( + cctx, compressed, compressedCapacity, src, srcSize, cLevel); + FUZZ_ASSERT(cSize == cSize0); + FUZZ_ASSERT(XXH64(compressed, cSize, 0) == hash0); + } } - FUZZ_ZASSERT(cSize); dSize = ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize); FUZZ_ZASSERT(dSize); /* When superblock is enabled make sure we don't expand the block more than expected. diff --git a/tests/fuzz/zstd_helpers.c b/tests/fuzz/zstd_helpers.c index 4d889deeb..0fbf3bed7 100644 --- a/tests/fuzz/zstd_helpers.c +++ b/tests/fuzz/zstd_helpers.c @@ -96,7 +96,7 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer setRand(cctx, ZSTD_c_forceMaxWindow, 0, 1, producer); setRand(cctx, ZSTD_c_literalCompressionMode, 0, 2, producer); setRand(cctx, ZSTD_c_forceAttachDict, 0, 2, producer); - setRand(cctx, ZSTD_c_splitBlocks, 0, 1, producer); + setRand(cctx, ZSTD_c_useBlockSplitter, 0, 2, producer); setRand(cctx, ZSTD_c_deterministicRefPrefix, 0, 1, producer); if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) { setRand(cctx, ZSTD_c_srcSizeHint, ZSTD_SRCSIZEHINT_MIN, 2 * srcSize, producer); diff --git a/tests/fuzzer.c b/tests/fuzzer.c index 1ea65210d..e2eedbcea 100644 --- a/tests/fuzzer.c +++ b/tests/fuzzer.c @@ -42,7 +42,7 @@ #include "util.h" #include "timefn.h" /* SEC_TO_MICRO, UTIL_time_t, UTIL_TIME_INITIALIZER, UTIL_clockSpanMicro, UTIL_getTime */ /* must be included after util.h, due to ERROR macro redefinition issue on Visual Studio */ -#include "zstd_internal.h" /* ZSTD_WORKSPACETOOLARGE_MAXDURATION, ZSTD_WORKSPACETOOLARGE_FACTOR, KB, MB */ +#include "zstd_internal.h" /* ZSTD_WORKSPACETOOLARGE_MAXDURATION, ZSTD_WORKSPACETOOLARGE_FACTOR, KB, MB */ #include "threading.h" /* ZSTD_pthread_create, ZSTD_pthread_join */ @@ -128,7 +128,7 @@ static U32 FUZ_highbit32(U32 v32) #define CHECK_VAR(var, fn) var = fn; if (ZSTD_isError(var)) { DISPLAYLEVEL(1, "%s : fails : %s \n", #fn, ZSTD_getErrorName(var)); goto _output_error; } #define CHECK_NEWV(var, fn) size_t const CHECK_VAR(var, fn) -#define CHECK(fn) { CHECK_NEWV(err, fn); } +#define CHECK(fn) { CHECK_NEWV(__err, fn); } #define CHECKPLUS(var, fn, more) { CHECK_NEWV(var, fn); more; } #define CHECK_OP(op, lhs, rhs) { \ @@ -1528,7 +1528,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : resize context to full CCtx size : ", testNb++); staticCCtx = ZSTD_initStaticCStream(staticCCtxBuffer, staticCCtxSize); - DISPLAYLEVEL(4, "staticCCtxBuffer = %p, staticCCtx = %p , ", staticCCtxBuffer, staticCCtx); + DISPLAYLEVEL(4, "staticCCtxBuffer = %p, staticCCtx = %p , ", staticCCtxBuffer, (void*)staticCCtx); if (staticCCtx == NULL) goto _output_error; DISPLAYLEVEL(3, "OK \n"); @@ -1718,7 +1718,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "test%3i : compress with block splitting : ", testNb++) { ZSTD_CCtx* cctx = ZSTD_createCCtx(); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_splitBlocks, 1) ); + CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_useBlockSplitter, ZSTD_ps_enable) ); cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); CHECK(cSize); ZSTD_freeCCtx(cctx); @@ -1732,7 +1732,7 @@ static int basicUnitTests(U32 const seed, double compressibility) CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, 2) ); cSize1 = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); CHECK(cSize1); - CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_literalCompressionMode, ZSTD_lcm_uncompressed) ); + CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_literalCompressionMode, ZSTD_ps_disable) ); cSize2 = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); CHECK(cSize2); CHECK_LT(cSize1, cSize2); @@ -1830,6 +1830,47 @@ static int basicUnitTests(U32 const seed, double compressibility) if (memcmp(decodedBuffer, CNBuffer, CNBuffSize / 2) != 0) goto _output_error; DISPLAYLEVEL(3, "OK \n"); + /* Simple API skippable frame test */ + DISPLAYLEVEL(3, "test%3i : read/write a skippable frame : ", testNb++); + { U32 i; + unsigned readMagic; + unsigned long long receivedSize; + size_t skippableSize; + const U32 skipLen = 129 KB; + char* const skipBuff = (char*)malloc(skipLen); + assert(skipBuff != NULL); + for (i = 0; i < skipLen; i++) + skipBuff[i] = (char) ((seed + i) % 256); + skippableSize = ZSTD_writeSkippableFrame( + compressedBuffer, compressedBufferSize, + skipBuff, skipLen, seed % 15); + CHECK_Z(skippableSize); + CHECK_EQ(1, ZSTD_isSkippableFrame(compressedBuffer, skippableSize)); + receivedSize = ZSTD_readSkippableFrame(decodedBuffer, CNBuffSize, &readMagic, compressedBuffer, skippableSize); + CHECK_EQ(skippableSize, receivedSize + ZSTD_SKIPPABLEHEADERSIZE); + CHECK_EQ(seed % 15, readMagic); + if (memcmp(decodedBuffer, skipBuff, skipLen) != 0) goto _output_error; + + free(skipBuff); + } + DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : read/write an empty skippable frame : ", testNb++); + { + unsigned readMagic; + unsigned long long receivedSize; + size_t skippableSize; + skippableSize = ZSTD_writeSkippableFrame( + compressedBuffer, compressedBufferSize, + CNBuffer, 0, seed % 15); + CHECK_EQ(ZSTD_SKIPPABLEHEADERSIZE, skippableSize); + CHECK_EQ(1, ZSTD_isSkippableFrame(compressedBuffer, skippableSize)); + receivedSize = ZSTD_readSkippableFrame(NULL, 0, &readMagic, compressedBuffer, skippableSize); + CHECK_EQ(skippableSize, receivedSize + ZSTD_SKIPPABLEHEADERSIZE); + CHECK_EQ(seed % 15, readMagic); + } + DISPLAYLEVEL(3, "OK \n"); + /* Dictionary and CCtx Duplication tests */ { ZSTD_CCtx* const ctxOrig = ZSTD_createCCtx(); ZSTD_CCtx* const ctxDuplicated = ZSTD_createCCtx(); @@ -1915,27 +1956,33 @@ static int basicUnitTests(U32 const seed, double compressibility) } } DISPLAYLEVEL(3, "OK \n"); + /* Note : these tests should be replaced by proper regression tests, + * but existing ones do not focus on small data + dictionary + all levels. + */ if ((int)(compressibility * 100 + 0.1) == FUZ_compressibility_default) { /* test only valid with known input */ size_t const flatdictSize = 22 KB; size_t const contentSize = 9 KB; const void* const dict = (const char*)CNBuffer; const void* const contentStart = (const char*)dict + flatdictSize; /* These upper bounds are generally within a few bytes of the compressed size */ - size_t const target_nodict_cSize[22+1] = { 3840, 3770, 3870, 3830, 3770, - 3770, 3770, 3770, 3750, 3750, - 3742, 3670, 3670, 3660, 3660, - 3660, 3660, 3660, 3660, 3660, - 3660, 3660, 3660 }; - size_t const target_wdict_cSize[22+1] = { 2830, 2890, 2890, 2820, 2940, + size_t target_nodict_cSize[22+1] = { 3840, 3770, 3870, 3830, 3770, + 3770, 3770, 3770, 3750, 3750, + 3742, 3675, 3674, 3665, 3664, + 3663, 3662, 3661, 3660, 3660, + 3660, 3660, 3660 }; + size_t const target_wdict_cSize[22+1] = { 2830, 2896, 2890, 2820, 2940, 2950, 2950, 2925, 2900, 2891, - 2910, 2910, 2910, 2770, 2760, - 2750, 2750, 2750, 2750, 2750, - 2750, 2750, 2750 }; + 2910, 2910, 2910, 2780, 2775, + 2765, 2760, 2755, 2754, 2753, + 2753, 2753, 2753 }; int l = 1; int const maxLevel = ZSTD_maxCLevel(); + /* clevels with strategies that support rowhash on small inputs */ + int rowLevel = 4; + int const rowLevelEnd = 8; DISPLAYLEVEL(3, "test%3i : flat-dictionary efficiency test : \n", testNb++); - assert(maxLevel == 22); + assert(maxLevel == (MEM_32bits() ? 21 : 22)); RDG_genBuffer(CNBuffer, flatdictSize + contentSize, compressibility, 0., seed); DISPLAYLEVEL(4, "content hash : %016llx; dict hash : %016llx \n", XXH64(contentStart, contentSize, 0), XXH64(dict, flatdictSize, 0)); @@ -1964,6 +2011,27 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(4, "level %i with dictionary : max expected %u >= reached %u \n", l, (unsigned)target_wdict_cSize[l], (unsigned)wdict_cSize); } + /* Compression with ZSTD_compress2 and row match finder force enabled. + * Give some slack for force-enabled row matchfinder since we're on a small input (9KB) + */ + for ( ; rowLevel <= rowLevelEnd; ++rowLevel) target_nodict_cSize[rowLevel] += 5; + for (l=1 ; l <= maxLevel; l++) { + ZSTD_CCtx* const cctx = ZSTD_createCCtx(); + size_t nodict_cSize; + ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, l); + ZSTD_CCtx_setParameter(cctx, ZSTD_c_useRowMatchFinder, ZSTD_ps_enable); + nodict_cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, + contentStart, contentSize); + if (nodict_cSize > target_nodict_cSize[l]) { + DISPLAYLEVEL(1, "error : compression with compress2 at level %i worse than expected (%u > %u) \n", + l, (unsigned)nodict_cSize, (unsigned)target_nodict_cSize[l]); + ZSTD_freeCCtx(cctx); + goto _output_error; + } + DISPLAYLEVEL(4, "level %i with compress2 : max expected %u >= reached %u \n", + l, (unsigned)target_nodict_cSize[l], (unsigned)nodict_cSize); + ZSTD_freeCCtx(cctx); + } /* Dict compression with DMS */ for ( l=1 ; l <= maxLevel; l++) { size_t wdict_cSize; @@ -2470,7 +2538,7 @@ static int basicUnitTests(U32 const seed, double compressibility) ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); CHECK_Z( ZSTD_DCtx_loadDictionary(dctx, dictBuffer, dictSize) ); CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) ); - /* The dictionary should presist across calls. */ + /* The dictionary should persist across calls. */ CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) ); /* When we reset the context the dictionary is cleared. */ ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); @@ -2489,7 +2557,7 @@ static int basicUnitTests(U32 const seed, double compressibility) ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); CHECK_Z( ZSTD_DCtx_refDDict(dctx, ddict) ); CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) ); - /* The ddict should presist across calls. */ + /* The ddict should persist across calls. */ CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) ); /* When we reset the context the ddict is cleared. */ ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters); @@ -3289,6 +3357,23 @@ static int basicUnitTests(U32 const seed, double compressibility) FSE_normalizeCount(norm, tableLog, count, nbSeq, maxSymbolValue, /* useLowProbCount */ 1); } DISPLAYLEVEL(3, "OK \n"); + + DISPLAYLEVEL(3, "test%3i : testing FSE_writeNCount() PR#2779: ", testNb++); + { + size_t const outBufSize = 9; + short const count[11] = {1, 0, 1, 0, 1, 0, 1, 0, 1, 9, 18}; + unsigned const tableLog = 5; + unsigned const maxSymbolValue = 10; + BYTE* outBuf = (BYTE*)malloc(outBufSize*sizeof(BYTE)); + + /* Ensure that this write doesn't write out of bounds, and that + * FSE_writeNCount_generic() is *not* called with writeIsSafe == 1. + */ + FSE_writeNCount(outBuf, outBufSize, count, maxSymbolValue, tableLog); + free(outBuf); + } + DISPLAYLEVEL(3, "OK \n"); + #ifdef ZSTD_MULTITHREAD DISPLAYLEVEL(3, "test%3i : passing wrong full dict should fail on compressStream2 refPrefix ", testNb++); { ZSTD_CCtx* cctx = ZSTD_createCCtx(); @@ -3407,11 +3492,7 @@ static int basicUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "error! l: %d dict: %zu srcSize: %zu cctx size cpar: %zu, cctx size level: %zu\n", level, dictSize, srcSize, cctxSizeUsingCParams, cctxSizeUsingLevel); goto _output_error; - } - } - } - } - } + } } } } } DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : thread pool API tests : \n", testNb++) @@ -3527,8 +3608,7 @@ static int longUnitTests(U32 const seed, double compressibility) DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "longtest%3i : testing ldm no regressions in size for opt parser : ", testNb++); - { - size_t cSizeLdm; + { size_t cSizeLdm; size_t cSizeNoLdm; ZSTD_CCtx* const cctx = ZSTD_createCCtx(); @@ -3606,7 +3686,7 @@ static int longUnitTests(U32 const seed, double compressibility) CHECK(cdict != NULL); CHECK_Z(ZSTD_CCtx_refCDict(cctx, cdict)); - CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, attachPref)); + CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_forceAttachDict, (int)attachPref)); cSize = ZSTD_compress2(cctx, compressedBuffer, compressedBufferSize, CNBuffer, CNBuffSize); CHECK_Z(cSize); diff --git a/tests/libzstd_partial_builds.sh b/tests/libzstd_builds.sh similarity index 92% rename from tests/libzstd_partial_builds.sh rename to tests/libzstd_builds.sh index 05dad8f98..f9e1e76c6 100755 --- a/tests/libzstd_partial_builds.sh +++ b/tests/libzstd_builds.sh @@ -23,14 +23,19 @@ mustBeAbsent() { # default compilation : all features enabled - no zbuff $ECHO "testing default library compilation" -CFLAGS= make -C $DIR/../lib libzstd.a > $INTOVOID +CFLAGS= make -C $DIR/../lib libzstd libzstd.a > $INTOVOID nm $DIR/../lib/libzstd.a | $GREP "\.o" > tmplog isPresent "zstd_compress.o" isPresent "zstd_decompress.o" isPresent "zdict.o" isPresent "zstd_v07.o" mustBeAbsent "zbuff_compress.o" -$RM $DIR/../lib/libzstd.a tmplog +$RM tmplog + +# Check that the exec-stack bit isn't set +readelf -lW $DIR/../lib/libzstd.so | $GREP "GNU_STACK" > tmplog +mustBeAbsent "RWE" +$RM $DIR/../lib/libzstd.a $DIR/../lib/libzstd.so* tmplog # compression disabled => also disable zdict $ECHO "testing with compression disabled" diff --git a/tests/paramgrill.c b/tests/paramgrill.c index a0cfa589b..033a10181 100644 --- a/tests/paramgrill.c +++ b/tests/paramgrill.c @@ -2652,7 +2652,7 @@ static int usage_advanced(void) (unsigned)g_timeLimit_s, (double)g_timeLimit_s / 3600); DISPLAY( " -v : Prints Benchmarking output\n"); DISPLAY( " -D : Next argument dictionary file\n"); - DISPLAY( " -s : Seperate Files\n"); + DISPLAY( " -s : Separate Files\n"); return 0; } @@ -2707,7 +2707,7 @@ int main(int argc, const char** argv) const char* dictFileName = NULL; U32 main_pause = 0; int cLevelOpt = 0, cLevelRun = 0; - int seperateFiles = 0; + int separateFiles = 0; double compressibility = COMPRESSIBILITY_DEFAULT; U32 memoTableLog = PARAM_UNSET; constraint_t target = { 0, 0, (U32)-1 }; @@ -2895,7 +2895,7 @@ int main(int argc, const char** argv) case 's': argument++; - seperateFiles = 1; + separateFiles = 1; break; case 'q': @@ -2940,7 +2940,7 @@ int main(int argc, const char** argv) result = benchSample(compressibility, cLevelRun); } } else { - if(seperateFiles) { + if(separateFiles) { for(i = 0; i < argc - filenamesStart; i++) { if (g_optimizer) { result = optimizeForSize(argv+filenamesStart + i, 1, dictFileName, target, paramTarget, cLevelOpt, cLevelRun, memoTableLog); diff --git a/tests/playTests.sh b/tests/playTests.sh index fa748c0cf..6dbfffca2 100755 --- a/tests/playTests.sh +++ b/tests/playTests.sh @@ -124,6 +124,13 @@ case "$UNAME" in Darwin | FreeBSD | OpenBSD | NetBSD) MTIME="stat -f %m" ;; esac +assertSameMTime() { + MT1=$($MTIME "$1") + MT2=$($MTIME "$2") + echo MTIME $MT1 $MT2 + [ "$MT1" = "$MT2" ] || die "mtime on $1 doesn't match mtime on $2 ($MT1 != $MT2)" +} + GET_PERMS="stat -c %a" case "$UNAME" in Darwin | FreeBSD | OpenBSD | NetBSD) GET_PERMS="stat -f %Lp" ;; @@ -159,10 +166,11 @@ if [ -z "${DATAGEN_BIN}" ]; then DATAGEN_BIN="$TESTDIR/datagen" fi -ZSTD_BIN="$EXE_PREFIX$ZSTD_BIN" +# Why was this line here ? Generates a strange ZSTD_BIN when EXE_PREFIX is non empty +# ZSTD_BIN="$EXE_PREFIX$ZSTD_BIN" # assertions -[ -n "$ZSTD_BIN" ] || die "zstd not found at $ZSTD_BIN! \n Please define ZSTD_BIN pointing to the zstd binary. You might also consider rebuilding zstd follwing the instructions in README.md" +[ -n "$ZSTD_BIN" ] || die "zstd not found at $ZSTD_BIN! \n Please define ZSTD_BIN pointing to the zstd binary. You might also consider rebuilding zstd following the instructions in README.md" [ -n "$DATAGEN_BIN" ] || die "datagen not found at $DATAGEN_BIN! \n Please define DATAGEN_BIN pointing to the datagen binary. You might also consider rebuilding zstd tests following the instructions in README.md. " println "\nStarting playTests.sh isWindows=$isWindows EXE_PREFIX='$EXE_PREFIX' ZSTD_BIN='$ZSTD_BIN' DATAGEN_BIN='$DATAGEN_BIN'" @@ -263,6 +271,10 @@ zstd -q -f tmpro println "test: --no-progress flag" zstd tmpro -c --no-progress | zstd -d -f -o "$INTOVOID" --no-progress zstd tmpro -cv --no-progress | zstd -dv -f -o "$INTOVOID" --no-progress +println "test: --progress flag" +zstd tmpro -c | zstd -d -f -o "$INTOVOID" --progress 2>&1 | grep -E "[A-Za-z0-9._ ]+: [0-9]+ bytes" +zstd tmpro -c | zstd -d -f -q -o "$INTOVOID" --progress 2>&1 | grep -E "[A-Za-z0-9._ ]+: [0-9]+ bytes" +zstd tmpro -c | zstd -d -f -v -o "$INTOVOID" 2>&1 | grep -E "[A-Za-z0-9._ ]+: [0-9]+ bytes" rm -f tmpro tmpro.zst println "test: overwrite input file (must fail)" zstd tmp -fo tmp && die "zstd compression overwrote the input file" @@ -325,6 +337,7 @@ zstd --long --rm -r precompressedFilterTestDir # Files should get compressed again without the --exclude-compressed flag. test -f precompressedFilterTestDir/input.5.zst.zst test -f precompressedFilterTestDir/input.6.zst.zst +rm -rf precompressedFilterTestDir println "Test completed" @@ -583,6 +596,17 @@ if [ -n "$READFROMBLOCKDEVICE" ] ; then rm -f tmp.img tmp.img.zst tmp.img.copy fi +println "\n===> zstd created file timestamp tests" +datagen > tmp +touch -m -t 200001010000.00 tmp +println "test : copy mtime in file -> file compression " +zstd -f tmp -o tmp.zst +assertSameMTime tmp tmp.zst +println "test : copy mtime in file -> file decompression " +zstd -f -d tmp.zst -o tmp.out +assertSameMTime tmp.zst tmp.out +rm -f tmp + println "\n===> compress multiple files into an output directory, --output-dir-flat" println henlo > tmp1 mkdir tmpInputTestDir @@ -689,6 +713,10 @@ test -f tmp2 test -f tmp3 test -f tmp4 +println "test : survive the list of files with too long filenames (--filelist=FILE)" +datagen -g5M > tmp_badList +zstd -f --filelist=tmp_badList && die "should have failed : file name length is too long" + println "test : survive a list of files which is text garbage (--filelist=FILE)" datagen > tmp_badList zstd -f --filelist=tmp_badList && die "should have failed : list is text garbage" @@ -729,6 +757,13 @@ datagen -g257000 > tmp_files/tmp3 zstd --show-default-cparams -f -r tmp_files rm -rf tmp* +println "test : show compression parameters in verbose mode" +datagen > tmp +zstd -vv tmp 2>&1 | \ +grep -q -E -- "--zstd=wlog=[[:digit:]]+,clog=[[:digit:]]+,hlog=[[:digit:]]+,\ +slog=[[:digit:]]+,mml=[[:digit:]]+,tlen=[[:digit:]]+,strat=[[:digit:]]+" +rm -rf tmp* + println "\n===> Advanced compression parameters " println "Hello world!" | zstd --zstd=windowLog=21, - -o tmp.zst && die "wrong parameters not detected!" println "Hello world!" | zstd --zstd=windowLo=21 - -o tmp.zst && die "wrong parameters not detected!" @@ -787,7 +822,7 @@ rm -f ./*.tmp ./*.zstd println "frame concatenation tests completed" -if [ "$isWindows" = false ] && [ "$UNAME" != 'SunOS' ] && [ "$UNAME" != "OpenBSD" ] ; then +if [ "$isWindows" = false ] && [ "$UNAME" != 'SunOS' ] && [ "$UNAME" != "OpenBSD" ] && [ "$UNAME" != "AIX" ]; then println "\n**** flush write error test **** " println "println foo | zstd > /dev/full" @@ -912,8 +947,13 @@ cat tmp | zstd -14 -f --size-hint=5500 | zstd -t # considerably too low println "\n===> dictionary tests " - -println "- test with raw dict (content only) " +println "- Test high/low compressibility corpus training" +datagen -g12M -P90 > tmpCorpusHighCompress +datagen -g12M -P5 > tmpCorpusLowCompress +zstd --train -B2K tmpCorpusHighCompress -o tmpDictHighCompress +zstd --train -B2K tmpCorpusLowCompress -o tmpDictLowCompress +rm -f tmpCorpusHighCompress tmpCorpusLowCompress tmpDictHighCompress tmpDictLowCompress +println "- Test with raw dict (content only) " datagen > tmpDict datagen -g1M | $MD5SUM > tmp1 datagen -g1M | zstd -D tmpDict | zstd -D tmpDict -dvq | $MD5SUM > tmp2 @@ -982,12 +1022,8 @@ zstd -o tmpDict --train "$TESTDIR"/*.c "$PRGDIR"/*.c test -f tmpDict zstd --train "$TESTDIR"/*.c "$PRGDIR"/*.c test -f dictionary -println "- Test dictionary training fails" -echo "000000000000000000000000000000000" > tmpz -zstd --train tmpz tmpz tmpz tmpz tmpz tmpz tmpz tmpz tmpz && die "Dictionary training should fail : source is all zeros" if [ -n "$hasMT" ] then - zstd --train -T0 tmpz tmpz tmpz tmpz tmpz tmpz tmpz tmpz tmpz && die "Dictionary training should fail : source is all zeros" println "- Create dictionary with multithreading enabled" zstd --train -T0 "$TESTDIR"/*.c "$PRGDIR"/*.c -o tmpDict fi @@ -1285,7 +1321,7 @@ println "\n===> tar extension tests " rm -f tmp tmp.tar tmp.tzst tmp.tgz tmp.txz tmp.tlz4 tmp1.zstd datagen > tmp -tar cf tmp.tar tmp +tar -cf tmp.tar tmp zstd tmp.tar -o tmp.tzst rm -f tmp.tar zstd -d tmp.tzst @@ -1293,21 +1329,21 @@ zstd -d tmp.tzst rm -f tmp.tar tmp.tzst if [ $GZIPMODE -eq 1 ]; then - tar czf tmp.tgz tmp + tar -c tmp | gzip > tmp.tgz zstd -d tmp.tgz [ -e tmp.tar ] || die ".tgz failed to decompress to .tar!" rm -f tmp.tar tmp.tgz fi if [ $LZMAMODE -eq 1 ]; then - tar c tmp | zstd --format=xz > tmp.txz + tar -c tmp | zstd --format=xz > tmp.txz zstd -d tmp.txz [ -e tmp.tar ] || die ".txz failed to decompress to .tar!" rm -f tmp.tar tmp.txz fi if [ $LZ4MODE -eq 1 ]; then - tar c tmp | zstd --format=lz4 > tmp.tlz4 + tar -c tmp | zstd --format=lz4 > tmp.tlz4 zstd -d tmp.tlz4 [ -e tmp.tar ] || die ".tlz4 failed to decompress to .tar!" rm -f tmp.tar tmp.tlz4 @@ -1347,6 +1383,8 @@ if [ -n "$hasMT" ] then println "\n===> zstdmt round-trip tests " roundTripTest -g4M "1 -T0" + roundTripTest -g4M "1 -T0 --auto-threads=physical" + roundTripTest -g4M "1 -T0 --auto-threads=logical" roundTripTest -g8M "3 -T2" roundTripTest -g8M "19 --long" roundTripTest -g8000K "2 --threads=2" @@ -1441,7 +1479,7 @@ datagen -g0 > tmp5 zstd tmp5 zstd -l tmp5.zst zstd -l tmp5* && die "-l must fail on non-zstd file" -zstd -lv tmp5.zst | grep "Decompressed Size: 0.00 KB (0 B)" # check that 0 size is present in header +zstd -lv tmp5.zst | grep "Decompressed Size: 0 B (0 B)" # check that 0 size is present in header zstd -lv tmp5* && die "-l must fail on non-zstd file" println "\n===> zstd --list/-l test with no content size field " @@ -1500,6 +1538,11 @@ elif [ "$longCSize19wlog23" -gt "$optCSize19wlog23" ]; then exit 1 fi +if [ -n "$CHECK_CONSTRAINED_MEM" ]; then + println "\n===> zsdt constrained memory tests " + # shellcheck disable=SC2039 + (ulimit -Sv 500000 ; datagen -g2M | zstd -22 --single-thread --ultra > /dev/null) +fi if [ "$1" != "--test-large-data" ]; then println "Skipping large data tests" @@ -1515,6 +1558,7 @@ then println "\n===> adaptive mode " roundTripTest -g270000000 " --adapt" roundTripTest -g27000000 " --adapt=min=1,max=4" + roundTripTest -g27000000 " --adapt=min=-2,max=-1" println "===> test: --adapt must fail on incoherent bounds " datagen > tmp zstd -f -vv --adapt=min=10,max=9 tmp && die "--adapt must fail on incoherent bounds" diff --git a/tests/regression/Makefile b/tests/regression/Makefile index d0d7bcf0b..a440c6c94 100644 --- a/tests/regression/Makefile +++ b/tests/regression/Makefile @@ -15,7 +15,7 @@ CURL_LDFLAGS := $(shell curl-config --libs) -pthread PROGDIR := ../../programs LIBDIR := ../../lib -ZSTD_CPPFLAGS := -I$(PROGDIR) -I$(LIBDIR) -I$(LIBDIR)/common +ZSTD_CPPFLAGS := -I$(PROGDIR) -I$(LIBDIR) -I$(LIBDIR)/common -Wno-deprecated-declarations REGRESSION_CFLAGS = $(CFLAGS) $(CURL_CFLAGS) REGRESSION_CPPFLAGS = $(CPPFLAGS) $(ZSTD_CPPFLAGS) @@ -46,6 +46,7 @@ result.o: result.c result.h test.o: test.c data.h config.h method.h $(CC) $(REGRESSION_CFLAGS) $(REGRESSION_CPPFLAGS) $< -c -o $@ +.PHONY: libzstd.a libzstd.a: $(MAKE) -C $(LIBDIR) libzstd.a-mt cp $(LIBDIR)/libzstd.a . diff --git a/tests/regression/config.c b/tests/regression/config.c index 4c66dd150..57cd110c6 100644 --- a/tests/regression/config.c +++ b/tests/regression/config.c @@ -215,7 +215,7 @@ static config_t mt_ldm = { static param_value_t mt_advanced_param_values[] = { {.param = ZSTD_c_nbWorkers, .value = 2}, - {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_lcm_uncompressed}, + {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_ps_disable}, }; static config_t mt_advanced = { @@ -258,7 +258,7 @@ static config_t small_clog = { static param_value_t const uncompressed_literals_param_values[] = { {.param = ZSTD_c_compressionLevel, .value = 3}, - {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_lcm_uncompressed}, + {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_ps_disable}, }; static config_t uncompressed_literals = { @@ -269,7 +269,7 @@ static config_t uncompressed_literals = { static param_value_t const uncompressed_literals_opt_param_values[] = { {.param = ZSTD_c_compressionLevel, .value = 19}, - {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_lcm_uncompressed}, + {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_ps_disable}, }; static config_t uncompressed_literals_opt = { @@ -280,7 +280,7 @@ static config_t uncompressed_literals_opt = { static param_value_t const huffman_literals_param_values[] = { {.param = ZSTD_c_compressionLevel, .value = -1}, - {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_lcm_huffman}, + {.param = ZSTD_c_literalCompressionMode, .value = ZSTD_ps_enable}, }; static config_t huffman_literals = { diff --git a/tests/regression/levels.h b/tests/regression/levels.h index 3b211f8c2..e98209d80 100644 --- a/tests/regression/levels.h +++ b/tests/regression/levels.h @@ -35,21 +35,23 @@ LEVEL(1) LEVEL(3) LEVEL(4) /* ROW_LEVEL triggers the row hash (force enabled and disabled) with different - * dictionary strategies, and 16/32 row entries based on the level/searchLog. - * 1 == disabled, 2 == enabled. + * dictionary strategies, and 16/32/64 row entries based on the level/searchLog. + * 1 == enabled, 2 == disabled. */ ROW_LEVEL(5, 1) -ROW_LEVEL(5, 2) +ROW_LEVEL(5, 2) /* 16-entry rows */ LEVEL(5) LEVEL(6) ROW_LEVEL(7, 1) -ROW_LEVEL(7, 2) +ROW_LEVEL(7, 2) /* 16-entry rows */ LEVEL(7) LEVEL(9) +ROW_LEVEL(11, 1) +ROW_LEVEL(11, 2) /* 32-entry rows */ ROW_LEVEL(12, 1) -ROW_LEVEL(12, 2) +ROW_LEVEL(12, 2) /* 64-entry rows */ LEVEL(13) LEVEL(16) diff --git a/tests/regression/results.csv b/tests/regression/results.csv index b94d5501e..9a19d33cc 100644 --- a/tests/regression/results.csv +++ b/tests/regression/results.csv @@ -1,74 +1,74 @@ Data, Config, Method, Total compressed size -silesia.tar, level -5, compress simple, 6738593 -silesia.tar, level -3, compress simple, 6446372 -silesia.tar, level -1, compress simple, 6186042 -silesia.tar, level 0, compress simple, 4861425 -silesia.tar, level 1, compress simple, 5334885 -silesia.tar, level 3, compress simple, 4861425 -silesia.tar, level 4, compress simple, 4799630 -silesia.tar, level 5, compress simple, 4719256 -silesia.tar, level 6, compress simple, 4677721 -silesia.tar, level 7, compress simple, 4613541 -silesia.tar, level 9, compress simple, 4555426 -silesia.tar, level 13, compress simple, 4491764 -silesia.tar, level 16, compress simple, 4381332 -silesia.tar, level 19, compress simple, 4281605 -silesia.tar, uncompressed literals, compress simple, 4861425 -silesia.tar, uncompressed literals optimal, compress simple, 4281605 -silesia.tar, huffman literals, compress simple, 6186042 -github.tar, level -5, compress simple, 46856 -github.tar, level -3, compress simple, 43754 -github.tar, level -1, compress simple, 42490 -github.tar, level 0, compress simple, 38441 -github.tar, level 1, compress simple, 39265 -github.tar, level 3, compress simple, 38441 -github.tar, level 4, compress simple, 38467 -github.tar, level 5, compress simple, 39693 -github.tar, level 6, compress simple, 39621 -github.tar, level 7, compress simple, 39213 -github.tar, level 9, compress simple, 36758 -github.tar, level 13, compress simple, 35621 -github.tar, level 16, compress simple, 40255 -github.tar, level 19, compress simple, 32837 -github.tar, uncompressed literals, compress simple, 38441 -github.tar, uncompressed literals optimal, compress simple, 32837 -github.tar, huffman literals, compress simple, 42490 -silesia, level -5, compress cctx, 6737607 -silesia, level -3, compress cctx, 6444677 -silesia, level -1, compress cctx, 6178460 -silesia, level 0, compress cctx, 4849552 -silesia, level 1, compress cctx, 5313204 -silesia, level 3, compress cctx, 4849552 -silesia, level 4, compress cctx, 4786970 -silesia, level 5, compress cctx, 4707794 -silesia, level 6, compress cctx, 4666383 -silesia, level 7, compress cctx, 4603381 -silesia, level 9, compress cctx, 4546001 -silesia, level 13, compress cctx, 4482135 -silesia, level 16, compress cctx, 4377465 -silesia, level 19, compress cctx, 4293330 -silesia, long distance mode, compress cctx, 4849552 -silesia, multithreaded, compress cctx, 4849552 -silesia, multithreaded long distance mode, compress cctx, 4849552 -silesia, small window log, compress cctx, 7084179 -silesia, small hash log, compress cctx, 6555021 -silesia, small chain log, compress cctx, 4931148 -silesia, explicit params, compress cctx, 4794479 -silesia, uncompressed literals, compress cctx, 4849552 -silesia, uncompressed literals optimal, compress cctx, 4293330 -silesia, huffman literals, compress cctx, 6178460 -silesia, multithreaded with advanced params, compress cctx, 4849552 -github, level -5, compress cctx, 205285 +silesia.tar, level -5, compress simple, 7359401 +silesia.tar, level -3, compress simple, 6901672 +silesia.tar, level -1, compress simple, 6182241 +silesia.tar, level 0, compress simple, 4854086 +silesia.tar, level 1, compress simple, 5331946 +silesia.tar, level 3, compress simple, 4854086 +silesia.tar, level 4, compress simple, 4791503 +silesia.tar, level 5, compress simple, 4649987 +silesia.tar, level 6, compress simple, 4616797 +silesia.tar, level 7, compress simple, 4576661 +silesia.tar, level 9, compress simple, 4552381 +silesia.tar, level 13, compress simple, 4502956 +silesia.tar, level 16, compress simple, 4360527 +silesia.tar, level 19, compress simple, 4267266 +silesia.tar, uncompressed literals, compress simple, 4854086 +silesia.tar, uncompressed literals optimal, compress simple, 4267266 +silesia.tar, huffman literals, compress simple, 6182241 +github.tar, level -5, compress simple, 66914 +github.tar, level -3, compress simple, 52127 +github.tar, level -1, compress simple, 42560 +github.tar, level 0, compress simple, 38831 +github.tar, level 1, compress simple, 39200 +github.tar, level 3, compress simple, 38831 +github.tar, level 4, compress simple, 38893 +github.tar, level 5, compress simple, 38366 +github.tar, level 6, compress simple, 38648 +github.tar, level 7, compress simple, 38110 +github.tar, level 9, compress simple, 36760 +github.tar, level 13, compress simple, 35501 +github.tar, level 16, compress simple, 40471 +github.tar, level 19, compress simple, 32134 +github.tar, uncompressed literals, compress simple, 38831 +github.tar, uncompressed literals optimal, compress simple, 32134 +github.tar, huffman literals, compress simple, 42560 +silesia, level -5, compress cctx, 7354675 +silesia, level -3, compress cctx, 6902374 +silesia, level -1, compress cctx, 6177565 +silesia, level 0, compress cctx, 4842075 +silesia, level 1, compress cctx, 5309098 +silesia, level 3, compress cctx, 4842075 +silesia, level 4, compress cctx, 4779186 +silesia, level 5, compress cctx, 4638691 +silesia, level 6, compress cctx, 4605296 +silesia, level 7, compress cctx, 4566984 +silesia, level 9, compress cctx, 4543018 +silesia, level 13, compress cctx, 4493990 +silesia, level 16, compress cctx, 4359864 +silesia, level 19, compress cctx, 4296880 +silesia, long distance mode, compress cctx, 4842075 +silesia, multithreaded, compress cctx, 4842075 +silesia, multithreaded long distance mode, compress cctx, 4842075 +silesia, small window log, compress cctx, 7082951 +silesia, small hash log, compress cctx, 6526141 +silesia, small chain log, compress cctx, 4912197 +silesia, explicit params, compress cctx, 4794052 +silesia, uncompressed literals, compress cctx, 4842075 +silesia, uncompressed literals optimal, compress cctx, 4296880 +silesia, huffman literals, compress cctx, 6177565 +silesia, multithreaded with advanced params, compress cctx, 4842075 +github, level -5, compress cctx, 232315 github, level -5 with dict, compress cctx, 47294 -github, level -3, compress cctx, 190643 +github, level -3, compress cctx, 220760 github, level -3 with dict, compress cctx, 48047 -github, level -1, compress cctx, 175568 +github, level -1, compress cctx, 175468 github, level -1 with dict, compress cctx, 43527 -github, level 0, compress cctx, 136335 +github, level 0, compress cctx, 136332 github, level 0 with dict, compress cctx, 41534 -github, level 1, compress cctx, 142465 +github, level 1, compress cctx, 142365 github, level 1 with dict, compress cctx, 42157 -github, level 3, compress cctx, 136335 +github, level 3, compress cctx, 136332 github, level 3 with dict, compress cctx, 41534 github, level 4, compress cctx, 136199 github, level 4 with dict, compress cctx, 41725 @@ -86,231 +86,235 @@ github, level 16, compress github, level 16 with dict, compress cctx, 37568 github, level 19, compress cctx, 134064 github, level 19 with dict, compress cctx, 37567 -github, long distance mode, compress cctx, 141102 -github, multithreaded, compress cctx, 141102 -github, multithreaded long distance mode, compress cctx, 141102 -github, small window log, compress cctx, 141102 +github, long distance mode, compress cctx, 141069 +github, multithreaded, compress cctx, 141069 +github, multithreaded long distance mode, compress cctx, 141069 +github, small window log, compress cctx, 141069 github, small hash log, compress cctx, 138949 github, small chain log, compress cctx, 139242 github, explicit params, compress cctx, 140932 -github, uncompressed literals, compress cctx, 136335 +github, uncompressed literals, compress cctx, 136332 github, uncompressed literals optimal, compress cctx, 134064 -github, huffman literals, compress cctx, 175568 -github, multithreaded with advanced params, compress cctx, 141102 -silesia, level -5, zstdcli, 6737655 -silesia, level -3, zstdcli, 6444725 -silesia, level -1, zstdcli, 6178508 -silesia, level 0, zstdcli, 4849600 -silesia, level 1, zstdcli, 5313252 -silesia, level 3, zstdcli, 4849600 -silesia, level 4, zstdcli, 4787018 -silesia, level 5, zstdcli, 4707842 -silesia, level 6, zstdcli, 4666431 -silesia, level 7, zstdcli, 4603429 -silesia, level 9, zstdcli, 4546049 -silesia, level 13, zstdcli, 4482183 -silesia, level 16, zstdcli, 4360299 -silesia, level 19, zstdcli, 4283285 -silesia, long distance mode, zstdcli, 4840806 -silesia, multithreaded, zstdcli, 4849600 -silesia, multithreaded long distance mode, zstdcli, 4840806 -silesia, small window log, zstdcli, 7095967 +github, huffman literals, compress cctx, 175468 +github, multithreaded with advanced params, compress cctx, 141069 +silesia, level -5, zstdcli, 7354723 +silesia, level -3, zstdcli, 6902422 +silesia, level -1, zstdcli, 6177613 +silesia, level 0, zstdcli, 4842123 +silesia, level 1, zstdcli, 5309146 +silesia, level 3, zstdcli, 4842123 +silesia, level 4, zstdcli, 4779234 +silesia, level 5, zstdcli, 4638739 +silesia, level 6, zstdcli, 4605344 +silesia, level 7, zstdcli, 4567032 +silesia, level 9, zstdcli, 4543066 +silesia, level 13, zstdcli, 4494038 +silesia, level 16, zstdcli, 4359912 +silesia, level 19, zstdcli, 4296928 +silesia, long distance mode, zstdcli, 4833785 +silesia, multithreaded, zstdcli, 4842123 +silesia, multithreaded long distance mode, zstdcli, 4833785 +silesia, small window log, zstdcli, 7095048 silesia, small hash log, zstdcli, 6526189 silesia, small chain log, zstdcli, 4912245 -silesia, explicit params, zstdcli, 4795856 -silesia, uncompressed literals, zstdcli, 5128030 -silesia, uncompressed literals optimal, zstdcli, 4317944 -silesia, huffman literals, zstdcli, 5326316 -silesia, multithreaded with advanced params, zstdcli, 5128030 -silesia.tar, level -5, zstdcli, 6738934 -silesia.tar, level -3, zstdcli, 6448419 -silesia.tar, level -1, zstdcli, 6186912 -silesia.tar, level 0, zstdcli, 4861512 -silesia.tar, level 1, zstdcli, 5336318 -silesia.tar, level 3, zstdcli, 4861512 -silesia.tar, level 4, zstdcli, 4800529 -silesia.tar, level 5, zstdcli, 4720121 -silesia.tar, level 6, zstdcli, 4678661 -silesia.tar, level 7, zstdcli, 4614424 -silesia.tar, level 9, zstdcli, 4556062 -silesia.tar, level 13, zstdcli, 4491768 -silesia.tar, level 16, zstdcli, 4356831 -silesia.tar, level 19, zstdcli, 4264491 -silesia.tar, no source size, zstdcli, 4861508 -silesia.tar, long distance mode, zstdcli, 4853226 -silesia.tar, multithreaded, zstdcli, 4861512 -silesia.tar, multithreaded long distance mode, zstdcli, 4853226 -silesia.tar, small window log, zstdcli, 7101576 -silesia.tar, small hash log, zstdcli, 6529290 +silesia, explicit params, zstdcli, 4795432 +silesia, uncompressed literals, zstdcli, 5120614 +silesia, uncompressed literals optimal, zstdcli, 4319566 +silesia, huffman literals, zstdcli, 5326394 +silesia, multithreaded with advanced params, zstdcli, 5120614 +silesia.tar, level -5, zstdcli, 7363866 +silesia.tar, level -3, zstdcli, 6902158 +silesia.tar, level -1, zstdcli, 6182939 +silesia.tar, level 0, zstdcli, 4854164 +silesia.tar, level 1, zstdcli, 5333183 +silesia.tar, level 3, zstdcli, 4854164 +silesia.tar, level 4, zstdcli, 4792352 +silesia.tar, level 5, zstdcli, 4650946 +silesia.tar, level 6, zstdcli, 4618390 +silesia.tar, level 7, zstdcli, 4578719 +silesia.tar, level 9, zstdcli, 4553299 +silesia.tar, level 13, zstdcli, 4502960 +silesia.tar, level 16, zstdcli, 4360531 +silesia.tar, level 19, zstdcli, 4267270 +silesia.tar, no source size, zstdcli, 4854160 +silesia.tar, long distance mode, zstdcli, 4845745 +silesia.tar, multithreaded, zstdcli, 4854164 +silesia.tar, multithreaded long distance mode, zstdcli, 4845745 +silesia.tar, small window log, zstdcli, 7100701 +silesia.tar, small hash log, zstdcli, 6529289 silesia.tar, small chain log, zstdcli, 4917022 -silesia.tar, explicit params, zstdcli, 4821274 -silesia.tar, uncompressed literals, zstdcli, 5129559 -silesia.tar, uncompressed literals optimal, zstdcli, 4307457 -silesia.tar, huffman literals, zstdcli, 5347610 -silesia.tar, multithreaded with advanced params, zstdcli, 5129559 -github, level -5, zstdcli, 207285 +silesia.tar, explicit params, zstdcli, 4820713 +silesia.tar, uncompressed literals, zstdcli, 5122571 +silesia.tar, uncompressed literals optimal, zstdcli, 4310145 +silesia.tar, huffman literals, zstdcli, 5344915 +silesia.tar, multithreaded with advanced params, zstdcli, 5122571 +github, level -5, zstdcli, 234315 github, level -5 with dict, zstdcli, 48718 -github, level -3, zstdcli, 192643 +github, level -3, zstdcli, 222760 github, level -3 with dict, zstdcli, 47395 -github, level -1, zstdcli, 177568 +github, level -1, zstdcli, 177468 github, level -1 with dict, zstdcli, 45170 -github, level 0, zstdcli, 138335 +github, level 0, zstdcli, 138332 github, level 0 with dict, zstdcli, 43148 -github, level 1, zstdcli, 144465 +github, level 1, zstdcli, 144365 github, level 1 with dict, zstdcli, 43682 -github, level 3, zstdcli, 138335 +github, level 3, zstdcli, 138332 github, level 3 with dict, zstdcli, 43148 github, level 4, zstdcli, 138199 github, level 4 with dict, zstdcli, 43251 github, level 5, zstdcli, 137121 github, level 5 with dict, zstdcli, 40728 github, level 6, zstdcli, 137122 -github, level 6 with dict, zstdcli, 40630 +github, level 6 with dict, zstdcli, 40636 github, level 7, zstdcli, 137122 -github, level 7 with dict, zstdcli, 40747 +github, level 7 with dict, zstdcli, 40745 github, level 9, zstdcli, 137122 -github, level 9 with dict, zstdcli, 41338 +github, level 9 with dict, zstdcli, 41393 github, level 13, zstdcli, 136064 -github, level 13 with dict, zstdcli, 41743 +github, level 13 with dict, zstdcli, 41900 github, level 16, zstdcli, 136064 github, level 16 with dict, zstdcli, 39577 github, level 19, zstdcli, 136064 github, level 19 with dict, zstdcli, 39576 -github, long distance mode, zstdcli, 138335 -github, multithreaded, zstdcli, 138335 -github, multithreaded long distance mode, zstdcli, 138335 -github, small window log, zstdcli, 138335 +github, long distance mode, zstdcli, 138332 +github, multithreaded, zstdcli, 138332 +github, multithreaded long distance mode, zstdcli, 138332 +github, small window log, zstdcli, 138332 github, small hash log, zstdcli, 137590 github, small chain log, zstdcli, 138341 github, explicit params, zstdcli, 136197 -github, uncompressed literals, zstdcli, 167915 +github, uncompressed literals, zstdcli, 167911 github, uncompressed literals optimal, zstdcli, 159227 -github, huffman literals, zstdcli, 144465 -github, multithreaded with advanced params, zstdcli, 167915 -github.tar, level -5, zstdcli, 46860 -github.tar, level -5 with dict, zstdcli, 44575 -github.tar, level -3, zstdcli, 43758 -github.tar, level -3 with dict, zstdcli, 41451 -github.tar, level -1, zstdcli, 42494 -github.tar, level -1 with dict, zstdcli, 41135 -github.tar, level 0, zstdcli, 38445 +github, huffman literals, zstdcli, 144365 +github, multithreaded with advanced params, zstdcli, 167911 +github.tar, level -5, zstdcli, 66918 +github.tar, level -5 with dict, zstdcli, 51529 +github.tar, level -3, zstdcli, 52131 +github.tar, level -3 with dict, zstdcli, 44246 +github.tar, level -1, zstdcli, 42564 +github.tar, level -1 with dict, zstdcli, 41140 +github.tar, level 0, zstdcli, 38835 github.tar, level 0 with dict, zstdcli, 37999 -github.tar, level 1, zstdcli, 39269 -github.tar, level 1 with dict, zstdcli, 38284 -github.tar, level 3, zstdcli, 38445 +github.tar, level 1, zstdcli, 39204 +github.tar, level 1 with dict, zstdcli, 38288 +github.tar, level 3, zstdcli, 38835 github.tar, level 3 with dict, zstdcli, 37999 -github.tar, level 4, zstdcli, 38471 +github.tar, level 4, zstdcli, 38897 github.tar, level 4 with dict, zstdcli, 37952 -github.tar, level 5, zstdcli, 39697 -github.tar, level 5 with dict, zstdcli, 39032 -github.tar, level 6, zstdcli, 39625 -github.tar, level 6 with dict, zstdcli, 38614 -github.tar, level 7, zstdcli, 39217 -github.tar, level 7 with dict, zstdcli, 37871 -github.tar, level 9, zstdcli, 36762 -github.tar, level 9 with dict, zstdcli, 36641 -github.tar, level 13, zstdcli, 35625 -github.tar, level 13 with dict, zstdcli, 38730 -github.tar, level 16, zstdcli, 40259 -github.tar, level 16 with dict, zstdcli, 33643 -github.tar, level 19, zstdcli, 32841 -github.tar, level 19 with dict, zstdcli, 32899 -github.tar, no source size, zstdcli, 38442 +github.tar, level 5, zstdcli, 38370 +github.tar, level 5 with dict, zstdcli, 39071 +github.tar, level 6, zstdcli, 38652 +github.tar, level 6 with dict, zstdcli, 38638 +github.tar, level 7, zstdcli, 38114 +github.tar, level 7 with dict, zstdcli, 37886 +github.tar, level 9, zstdcli, 36764 +github.tar, level 9 with dict, zstdcli, 36632 +github.tar, level 13, zstdcli, 35505 +github.tar, level 13 with dict, zstdcli, 37134 +github.tar, level 16, zstdcli, 40475 +github.tar, level 16 with dict, zstdcli, 33382 +github.tar, level 19, zstdcli, 32138 +github.tar, level 19 with dict, zstdcli, 32713 +github.tar, no source size, zstdcli, 38832 github.tar, no source size with dict, zstdcli, 38004 -github.tar, long distance mode, zstdcli, 39730 -github.tar, multithreaded, zstdcli, 38445 -github.tar, multithreaded long distance mode, zstdcli, 39730 +github.tar, long distance mode, zstdcli, 40236 +github.tar, multithreaded, zstdcli, 38835 +github.tar, multithreaded long distance mode, zstdcli, 40236 github.tar, small window log, zstdcli, 198544 github.tar, small hash log, zstdcli, 129874 github.tar, small chain log, zstdcli, 41673 -github.tar, explicit params, zstdcli, 41227 -github.tar, uncompressed literals, zstdcli, 41126 -github.tar, uncompressed literals optimal, zstdcli, 35392 -github.tar, huffman literals, zstdcli, 38781 -github.tar, multithreaded with advanced params, zstdcli, 41126 -silesia, level -5, advanced one pass, 6737607 -silesia, level -3, advanced one pass, 6444677 -silesia, level -1, advanced one pass, 6178460 -silesia, level 0, advanced one pass, 4849552 -silesia, level 1, advanced one pass, 5313204 -silesia, level 3, advanced one pass, 4849552 -silesia, level 4, advanced one pass, 4786970 -silesia, level 5 row 1, advanced one pass, 4710236 -silesia, level 5 row 2, advanced one pass, 4707794 -silesia, level 5, advanced one pass, 4707794 -silesia, level 6, advanced one pass, 4666383 -silesia, level 7 row 1, advanced one pass, 4596296 -silesia, level 7 row 2, advanced one pass, 4603381 -silesia, level 7, advanced one pass, 4603381 -silesia, level 9, advanced one pass, 4546001 -silesia, level 12 row 1, advanced one pass, 4519288 -silesia, level 12 row 2, advanced one pass, 4521397 -silesia, level 13, advanced one pass, 4482135 -silesia, level 16, advanced one pass, 4360251 -silesia, level 19, advanced one pass, 4283237 -silesia, no source size, advanced one pass, 4849552 -silesia, long distance mode, advanced one pass, 4840738 -silesia, multithreaded, advanced one pass, 4849552 -silesia, multithreaded long distance mode, advanced one pass, 4840758 -silesia, small window log, advanced one pass, 7095919 +github.tar, explicit params, zstdcli, 41385 +github.tar, uncompressed literals, zstdcli, 41529 +github.tar, uncompressed literals optimal, zstdcli, 35401 +github.tar, huffman literals, zstdcli, 38857 +github.tar, multithreaded with advanced params, zstdcli, 41529 +silesia, level -5, advanced one pass, 7354675 +silesia, level -3, advanced one pass, 6902374 +silesia, level -1, advanced one pass, 6177565 +silesia, level 0, advanced one pass, 4842075 +silesia, level 1, advanced one pass, 5309098 +silesia, level 3, advanced one pass, 4842075 +silesia, level 4, advanced one pass, 4779186 +silesia, level 5 row 1, advanced one pass, 4638691 +silesia, level 5 row 2, advanced one pass, 4640752 +silesia, level 5, advanced one pass, 4638691 +silesia, level 6, advanced one pass, 4605296 +silesia, level 7 row 1, advanced one pass, 4566984 +silesia, level 7 row 2, advanced one pass, 4564868 +silesia, level 7, advanced one pass, 4566984 +silesia, level 9, advanced one pass, 4543018 +silesia, level 11 row 1, advanced one pass, 4521323 +silesia, level 11 row 2, advanced one pass, 4519288 +silesia, level 12 row 1, advanced one pass, 4505046 +silesia, level 12 row 2, advanced one pass, 4503116 +silesia, level 13, advanced one pass, 4493990 +silesia, level 16, advanced one pass, 4359864 +silesia, level 19, advanced one pass, 4296880 +silesia, no source size, advanced one pass, 4842075 +silesia, long distance mode, advanced one pass, 4833710 +silesia, multithreaded, advanced one pass, 4842075 +silesia, multithreaded long distance mode, advanced one pass, 4833737 +silesia, small window log, advanced one pass, 7095000 silesia, small hash log, advanced one pass, 6526141 silesia, small chain log, advanced one pass, 4912197 -silesia, explicit params, advanced one pass, 4795856 -silesia, uncompressed literals, advanced one pass, 5127982 -silesia, uncompressed literals optimal, advanced one pass, 4317896 -silesia, huffman literals, advanced one pass, 5326268 -silesia, multithreaded with advanced params, advanced one pass, 5127982 -silesia.tar, level -5, advanced one pass, 6738593 -silesia.tar, level -3, advanced one pass, 6446372 -silesia.tar, level -1, advanced one pass, 6186042 -silesia.tar, level 0, advanced one pass, 4861425 -silesia.tar, level 1, advanced one pass, 5334885 -silesia.tar, level 3, advanced one pass, 4861425 -silesia.tar, level 4, advanced one pass, 4799630 -silesia.tar, level 5 row 1, advanced one pass, 4722324 -silesia.tar, level 5 row 2, advanced one pass, 4719256 -silesia.tar, level 5, advanced one pass, 4719256 -silesia.tar, level 6, advanced one pass, 4677721 -silesia.tar, level 7 row 1, advanced one pass, 4606715 -silesia.tar, level 7 row 2, advanced one pass, 4613541 -silesia.tar, level 7, advanced one pass, 4613541 -silesia.tar, level 9, advanced one pass, 4555426 -silesia.tar, level 12 row 1, advanced one pass, 4529459 -silesia.tar, level 12 row 2, advanced one pass, 4530256 -silesia.tar, level 13, advanced one pass, 4491764 -silesia.tar, level 16, advanced one pass, 4356827 -silesia.tar, level 19, advanced one pass, 4264487 -silesia.tar, no source size, advanced one pass, 4861425 -silesia.tar, long distance mode, advanced one pass, 4847754 -silesia.tar, multithreaded, advanced one pass, 4861508 -silesia.tar, multithreaded long distance mode, advanced one pass, 4853222 -silesia.tar, small window log, advanced one pass, 7101530 -silesia.tar, small hash log, advanced one pass, 6529232 +silesia, explicit params, advanced one pass, 4795432 +silesia, uncompressed literals, advanced one pass, 5120566 +silesia, uncompressed literals optimal, advanced one pass, 4319518 +silesia, huffman literals, advanced one pass, 5326346 +silesia, multithreaded with advanced params, advanced one pass, 5120566 +silesia.tar, level -5, advanced one pass, 7359401 +silesia.tar, level -3, advanced one pass, 6901672 +silesia.tar, level -1, advanced one pass, 6182241 +silesia.tar, level 0, advanced one pass, 4854086 +silesia.tar, level 1, advanced one pass, 5331946 +silesia.tar, level 3, advanced one pass, 4854086 +silesia.tar, level 4, advanced one pass, 4791503 +silesia.tar, level 5 row 1, advanced one pass, 4649987 +silesia.tar, level 5 row 2, advanced one pass, 4652862 +silesia.tar, level 5, advanced one pass, 4649987 +silesia.tar, level 6, advanced one pass, 4616797 +silesia.tar, level 7 row 1, advanced one pass, 4576661 +silesia.tar, level 7 row 2, advanced one pass, 4575393 +silesia.tar, level 7, advanced one pass, 4576661 +silesia.tar, level 9, advanced one pass, 4552381 +silesia.tar, level 11 row 1, advanced one pass, 4530241 +silesia.tar, level 11 row 2, advanced one pass, 4529461 +silesia.tar, level 12 row 1, advanced one pass, 4514432 +silesia.tar, level 12 row 2, advanced one pass, 4513604 +silesia.tar, level 13, advanced one pass, 4502956 +silesia.tar, level 16, advanced one pass, 4360527 +silesia.tar, level 19, advanced one pass, 4267266 +silesia.tar, no source size, advanced one pass, 4854086 +silesia.tar, long distance mode, advanced one pass, 4840452 +silesia.tar, multithreaded, advanced one pass, 4854160 +silesia.tar, multithreaded long distance mode, advanced one pass, 4845741 +silesia.tar, small window log, advanced one pass, 7100655 +silesia.tar, small hash log, advanced one pass, 6529231 silesia.tar, small chain log, advanced one pass, 4917041 -silesia.tar, explicit params, advanced one pass, 4807380 -silesia.tar, uncompressed literals, advanced one pass, 5129458 -silesia.tar, uncompressed literals optimal, advanced one pass, 4307453 -silesia.tar, huffman literals, advanced one pass, 5347335 -silesia.tar, multithreaded with advanced params, advanced one pass, 5129555 -github, level -5, advanced one pass, 205285 +silesia.tar, explicit params, advanced one pass, 4806855 +silesia.tar, uncompressed literals, advanced one pass, 5122473 +silesia.tar, uncompressed literals optimal, advanced one pass, 4310141 +silesia.tar, huffman literals, advanced one pass, 5344545 +silesia.tar, multithreaded with advanced params, advanced one pass, 5122567 +github, level -5, advanced one pass, 232315 github, level -5 with dict, advanced one pass, 46718 -github, level -3, advanced one pass, 190643 +github, level -3, advanced one pass, 220760 github, level -3 with dict, advanced one pass, 45395 -github, level -1, advanced one pass, 175568 +github, level -1, advanced one pass, 175468 github, level -1 with dict, advanced one pass, 43170 -github, level 0, advanced one pass, 136335 +github, level 0, advanced one pass, 136332 github, level 0 with dict, advanced one pass, 41148 github, level 0 with dict dms, advanced one pass, 41148 github, level 0 with dict dds, advanced one pass, 41148 github, level 0 with dict copy, advanced one pass, 41124 github, level 0 with dict load, advanced one pass, 42252 -github, level 1, advanced one pass, 142465 +github, level 1, advanced one pass, 142365 github, level 1 with dict, advanced one pass, 41682 github, level 1 with dict dms, advanced one pass, 41682 github, level 1 with dict dds, advanced one pass, 41682 github, level 1 with dict copy, advanced one pass, 41674 github, level 1 with dict load, advanced one pass, 43755 -github, level 3, advanced one pass, 136335 +github, level 3, advanced one pass, 136332 github, level 3 with dict, advanced one pass, 41148 github, level 3 with dict dms, advanced one pass, 41148 github, level 3 with dict dds, advanced one pass, 41148 @@ -322,16 +326,16 @@ github, level 4 with dict dms, advanced github, level 4 with dict dds, advanced one pass, 41251 github, level 4 with dict copy, advanced one pass, 41216 github, level 4 with dict load, advanced one pass, 41159 -github, level 5 row 1, advanced one pass, 135121 -github, level 5 row 1 with dict dms, advanced one pass, 38938 -github, level 5 row 1 with dict dds, advanced one pass, 38732 -github, level 5 row 1 with dict copy, advanced one pass, 38934 -github, level 5 row 1 with dict load, advanced one pass, 40725 -github, level 5 row 2, advanced one pass, 134584 -github, level 5 row 2 with dict dms, advanced one pass, 38758 -github, level 5 row 2 with dict dds, advanced one pass, 38728 -github, level 5 row 2 with dict copy, advanced one pass, 38759 -github, level 5 row 2 with dict load, advanced one pass, 41518 +github, level 5 row 1, advanced one pass, 134584 +github, level 5 row 1 with dict dms, advanced one pass, 38758 +github, level 5 row 1 with dict dds, advanced one pass, 38728 +github, level 5 row 1 with dict copy, advanced one pass, 38759 +github, level 5 row 1 with dict load, advanced one pass, 41518 +github, level 5 row 2, advanced one pass, 135121 +github, level 5 row 2 with dict dms, advanced one pass, 38938 +github, level 5 row 2 with dict dds, advanced one pass, 38732 +github, level 5 row 2 with dict copy, advanced one pass, 38934 +github, level 5 row 2 with dict load, advanced one pass, 40725 github, level 5, advanced one pass, 135121 github, level 5 with dict, advanced one pass, 38758 github, level 5 with dict dms, advanced one pass, 38758 @@ -341,31 +345,41 @@ github, level 5 with dict load, advanced github, level 6, advanced one pass, 135122 github, level 6 with dict, advanced one pass, 38671 github, level 6 with dict dms, advanced one pass, 38671 -github, level 6 with dict dds, advanced one pass, 38630 +github, level 6 with dict dds, advanced one pass, 38636 github, level 6 with dict copy, advanced one pass, 38669 github, level 6 with dict load, advanced one pass, 40695 -github, level 7 row 1, advanced one pass, 135122 -github, level 7 row 1 with dict dms, advanced one pass, 38771 -github, level 7 row 1 with dict dds, advanced one pass, 38771 -github, level 7 row 1 with dict copy, advanced one pass, 38745 -github, level 7 row 1 with dict load, advanced one pass, 40695 -github, level 7 row 2, advanced one pass, 134584 -github, level 7 row 2 with dict dms, advanced one pass, 38758 -github, level 7 row 2 with dict dds, advanced one pass, 38747 -github, level 7 row 2 with dict copy, advanced one pass, 38755 -github, level 7 row 2 with dict load, advanced one pass, 41030 +github, level 7 row 1, advanced one pass, 134584 +github, level 7 row 1 with dict dms, advanced one pass, 38758 +github, level 7 row 1 with dict dds, advanced one pass, 38745 +github, level 7 row 1 with dict copy, advanced one pass, 38755 +github, level 7 row 1 with dict load, advanced one pass, 43154 +github, level 7 row 2, advanced one pass, 135122 +github, level 7 row 2 with dict dms, advanced one pass, 38860 +github, level 7 row 2 with dict dds, advanced one pass, 38766 +github, level 7 row 2 with dict copy, advanced one pass, 38834 +github, level 7 row 2 with dict load, advanced one pass, 40695 github, level 7, advanced one pass, 135122 github, level 7 with dict, advanced one pass, 38758 github, level 7 with dict dms, advanced one pass, 38758 -github, level 7 with dict dds, advanced one pass, 38747 +github, level 7 with dict dds, advanced one pass, 38745 github, level 7 with dict copy, advanced one pass, 38755 github, level 7 with dict load, advanced one pass, 40695 github, level 9, advanced one pass, 135122 github, level 9 with dict, advanced one pass, 39437 github, level 9 with dict dms, advanced one pass, 39437 -github, level 9 with dict dds, advanced one pass, 39338 +github, level 9 with dict dds, advanced one pass, 39393 github, level 9 with dict copy, advanced one pass, 39398 github, level 9 with dict load, advanced one pass, 41710 +github, level 11 row 1, advanced one pass, 135119 +github, level 11 row 1 with dict dms, advanced one pass, 39671 +github, level 11 row 1 with dict dds, advanced one pass, 39671 +github, level 11 row 1 with dict copy, advanced one pass, 39651 +github, level 11 row 1 with dict load, advanced one pass, 41360 +github, level 11 row 2, advanced one pass, 135119 +github, level 11 row 2 with dict dms, advanced one pass, 39671 +github, level 11 row 2 with dict dds, advanced one pass, 39671 +github, level 11 row 2 with dict copy, advanced one pass, 39651 +github, level 11 row 2 with dict load, advanced one pass, 41360 github, level 12 row 1, advanced one pass, 134180 github, level 12 row 1 with dict dms, advanced one pass, 39677 github, level 12 row 1 with dict dds, advanced one pass, 39677 @@ -377,9 +391,9 @@ github, level 12 row 2 with dict dds, advanced github, level 12 row 2 with dict copy, advanced one pass, 39677 github, level 12 row 2 with dict load, advanced one pass, 41166 github, level 13, advanced one pass, 134064 -github, level 13 with dict, advanced one pass, 39743 -github, level 13 with dict dms, advanced one pass, 39743 -github, level 13 with dict dds, advanced one pass, 39743 +github, level 13 with dict, advanced one pass, 39900 +github, level 13 with dict dms, advanced one pass, 39900 +github, level 13 with dict dds, advanced one pass, 39900 github, level 13 with dict copy, advanced one pass, 39948 github, level 13 with dict load, advanced one pass, 42626 github, level 16, advanced one pass, 134064 @@ -394,217 +408,231 @@ github, level 19 with dict dms, advanced github, level 19 with dict dds, advanced one pass, 37576 github, level 19 with dict copy, advanced one pass, 37567 github, level 19 with dict load, advanced one pass, 39613 -github, no source size, advanced one pass, 136335 +github, no source size, advanced one pass, 136332 github, no source size with dict, advanced one pass, 41148 -github, long distance mode, advanced one pass, 136335 -github, multithreaded, advanced one pass, 136335 -github, multithreaded long distance mode, advanced one pass, 136335 -github, small window log, advanced one pass, 136335 +github, long distance mode, advanced one pass, 136332 +github, multithreaded, advanced one pass, 136332 +github, multithreaded long distance mode, advanced one pass, 136332 +github, small window log, advanced one pass, 136332 github, small hash log, advanced one pass, 135590 github, small chain log, advanced one pass, 136341 github, explicit params, advanced one pass, 137727 -github, uncompressed literals, advanced one pass, 165915 +github, uncompressed literals, advanced one pass, 165911 github, uncompressed literals optimal, advanced one pass, 157227 -github, huffman literals, advanced one pass, 142465 -github, multithreaded with advanced params, advanced one pass, 165915 -github.tar, level -5, advanced one pass, 46856 -github.tar, level -5 with dict, advanced one pass, 44571 -github.tar, level -3, advanced one pass, 43754 -github.tar, level -3 with dict, advanced one pass, 41447 -github.tar, level -1, advanced one pass, 42490 -github.tar, level -1 with dict, advanced one pass, 41131 -github.tar, level 0, advanced one pass, 38441 +github, huffman literals, advanced one pass, 142365 +github, multithreaded with advanced params, advanced one pass, 165911 +github.tar, level -5, advanced one pass, 66914 +github.tar, level -5 with dict, advanced one pass, 51525 +github.tar, level -3, advanced one pass, 52127 +github.tar, level -3 with dict, advanced one pass, 44242 +github.tar, level -1, advanced one pass, 42560 +github.tar, level -1 with dict, advanced one pass, 41136 +github.tar, level 0, advanced one pass, 38831 github.tar, level 0 with dict, advanced one pass, 37995 github.tar, level 0 with dict dms, advanced one pass, 38003 github.tar, level 0 with dict dds, advanced one pass, 38003 github.tar, level 0 with dict copy, advanced one pass, 37995 github.tar, level 0 with dict load, advanced one pass, 37956 -github.tar, level 1, advanced one pass, 39265 -github.tar, level 1 with dict, advanced one pass, 38280 -github.tar, level 1 with dict dms, advanced one pass, 38290 -github.tar, level 1 with dict dds, advanced one pass, 38290 -github.tar, level 1 with dict copy, advanced one pass, 38280 -github.tar, level 1 with dict load, advanced one pass, 38729 -github.tar, level 3, advanced one pass, 38441 +github.tar, level 1, advanced one pass, 39200 +github.tar, level 1 with dict, advanced one pass, 38284 +github.tar, level 1 with dict dms, advanced one pass, 38294 +github.tar, level 1 with dict dds, advanced one pass, 38294 +github.tar, level 1 with dict copy, advanced one pass, 38284 +github.tar, level 1 with dict load, advanced one pass, 38724 +github.tar, level 3, advanced one pass, 38831 github.tar, level 3 with dict, advanced one pass, 37995 github.tar, level 3 with dict dms, advanced one pass, 38003 github.tar, level 3 with dict dds, advanced one pass, 38003 github.tar, level 3 with dict copy, advanced one pass, 37995 github.tar, level 3 with dict load, advanced one pass, 37956 -github.tar, level 4, advanced one pass, 38467 +github.tar, level 4, advanced one pass, 38893 github.tar, level 4 with dict, advanced one pass, 37948 github.tar, level 4 with dict dms, advanced one pass, 37954 github.tar, level 4 with dict dds, advanced one pass, 37954 github.tar, level 4 with dict copy, advanced one pass, 37948 github.tar, level 4 with dict load, advanced one pass, 37927 -github.tar, level 5 row 1, advanced one pass, 39788 -github.tar, level 5 row 1 with dict dms, advanced one pass, 39365 -github.tar, level 5 row 1 with dict dds, advanced one pass, 39233 -github.tar, level 5 row 1 with dict copy, advanced one pass, 39715 -github.tar, level 5 row 1 with dict load, advanced one pass, 39209 -github.tar, level 5 row 2, advanced one pass, 39693 -github.tar, level 5 row 2 with dict dms, advanced one pass, 39024 -github.tar, level 5 row 2 with dict dds, advanced one pass, 39028 -github.tar, level 5 row 2 with dict copy, advanced one pass, 39040 -github.tar, level 5 row 2 with dict load, advanced one pass, 39037 -github.tar, level 5, advanced one pass, 39693 -github.tar, level 5 with dict, advanced one pass, 39040 -github.tar, level 5 with dict dms, advanced one pass, 39024 -github.tar, level 5 with dict dds, advanced one pass, 39028 -github.tar, level 5 with dict copy, advanced one pass, 39040 -github.tar, level 5 with dict load, advanced one pass, 39037 -github.tar, level 6, advanced one pass, 39621 -github.tar, level 6 with dict, advanced one pass, 38622 -github.tar, level 6 with dict dms, advanced one pass, 38608 -github.tar, level 6 with dict dds, advanced one pass, 38610 -github.tar, level 6 with dict copy, advanced one pass, 38622 -github.tar, level 6 with dict load, advanced one pass, 38962 -github.tar, level 7 row 1, advanced one pass, 39206 -github.tar, level 7 row 1 with dict dms, advanced one pass, 37954 -github.tar, level 7 row 1 with dict dds, advanced one pass, 37954 -github.tar, level 7 row 1 with dict copy, advanced one pass, 38071 -github.tar, level 7 row 1 with dict load, advanced one pass, 38584 -github.tar, level 7 row 2, advanced one pass, 39213 -github.tar, level 7 row 2 with dict dms, advanced one pass, 37848 -github.tar, level 7 row 2 with dict dds, advanced one pass, 37867 -github.tar, level 7 row 2 with dict copy, advanced one pass, 37848 -github.tar, level 7 row 2 with dict load, advanced one pass, 38582 -github.tar, level 7, advanced one pass, 39213 -github.tar, level 7 with dict, advanced one pass, 37848 -github.tar, level 7 with dict dms, advanced one pass, 37848 -github.tar, level 7 with dict dds, advanced one pass, 37867 -github.tar, level 7 with dict copy, advanced one pass, 37848 -github.tar, level 7 with dict load, advanced one pass, 38582 -github.tar, level 9, advanced one pass, 36758 -github.tar, level 9 with dict, advanced one pass, 36457 -github.tar, level 9 with dict dms, advanced one pass, 36549 -github.tar, level 9 with dict dds, advanced one pass, 36637 -github.tar, level 9 with dict copy, advanced one pass, 36457 -github.tar, level 9 with dict load, advanced one pass, 36350 -github.tar, level 12 row 1, advanced one pass, 36435 +github.tar, level 5 row 1, advanced one pass, 38366 +github.tar, level 5 row 1 with dict dms, advanced one pass, 39059 +github.tar, level 5 row 1 with dict dds, advanced one pass, 39067 +github.tar, level 5 row 1 with dict copy, advanced one pass, 39082 +github.tar, level 5 row 1 with dict load, advanced one pass, 37656 +github.tar, level 5 row 2, advanced one pass, 38534 +github.tar, level 5 row 2 with dict dms, advanced one pass, 39365 +github.tar, level 5 row 2 with dict dds, advanced one pass, 39233 +github.tar, level 5 row 2 with dict copy, advanced one pass, 39715 +github.tar, level 5 row 2 with dict load, advanced one pass, 38019 +github.tar, level 5, advanced one pass, 38366 +github.tar, level 5 with dict, advanced one pass, 39082 +github.tar, level 5 with dict dms, advanced one pass, 39059 +github.tar, level 5 with dict dds, advanced one pass, 39067 +github.tar, level 5 with dict copy, advanced one pass, 39082 +github.tar, level 5 with dict load, advanced one pass, 37656 +github.tar, level 6, advanced one pass, 38648 +github.tar, level 6 with dict, advanced one pass, 38656 +github.tar, level 6 with dict dms, advanced one pass, 38636 +github.tar, level 6 with dict dds, advanced one pass, 38634 +github.tar, level 6 with dict copy, advanced one pass, 38656 +github.tar, level 6 with dict load, advanced one pass, 37865 +github.tar, level 7 row 1, advanced one pass, 38110 +github.tar, level 7 row 1 with dict dms, advanced one pass, 37858 +github.tar, level 7 row 1 with dict dds, advanced one pass, 37882 +github.tar, level 7 row 1 with dict copy, advanced one pass, 37865 +github.tar, level 7 row 1 with dict load, advanced one pass, 37436 +github.tar, level 7 row 2, advanced one pass, 38077 +github.tar, level 7 row 2 with dict dms, advanced one pass, 38012 +github.tar, level 7 row 2 with dict dds, advanced one pass, 38014 +github.tar, level 7 row 2 with dict copy, advanced one pass, 38101 +github.tar, level 7 row 2 with dict load, advanced one pass, 37402 +github.tar, level 7, advanced one pass, 38110 +github.tar, level 7 with dict, advanced one pass, 37865 +github.tar, level 7 with dict dms, advanced one pass, 37858 +github.tar, level 7 with dict dds, advanced one pass, 37882 +github.tar, level 7 with dict copy, advanced one pass, 37865 +github.tar, level 7 with dict load, advanced one pass, 37436 +github.tar, level 9, advanced one pass, 36760 +github.tar, level 9 with dict, advanced one pass, 36484 +github.tar, level 9 with dict dms, advanced one pass, 36567 +github.tar, level 9 with dict dds, advanced one pass, 36628 +github.tar, level 9 with dict copy, advanced one pass, 36484 +github.tar, level 9 with dict load, advanced one pass, 36401 +github.tar, level 11 row 1, advanced one pass, 36452 +github.tar, level 11 row 1 with dict dms, advanced one pass, 36963 +github.tar, level 11 row 1 with dict dds, advanced one pass, 36963 +github.tar, level 11 row 1 with dict copy, advanced one pass, 36557 +github.tar, level 11 row 1 with dict load, advanced one pass, 36455 +github.tar, level 11 row 2, advanced one pass, 36435 +github.tar, level 11 row 2 with dict dms, advanced one pass, 36963 +github.tar, level 11 row 2 with dict dds, advanced one pass, 36963 +github.tar, level 11 row 2 with dict copy, advanced one pass, 36557 +github.tar, level 11 row 2 with dict load, advanced one pass, 36419 +github.tar, level 12 row 1, advanced one pass, 36081 github.tar, level 12 row 1 with dict dms, advanced one pass, 36986 github.tar, level 12 row 1 with dict dds, advanced one pass, 36986 github.tar, level 12 row 1 with dict copy, advanced one pass, 36609 -github.tar, level 12 row 1 with dict load, advanced one pass, 36419 -github.tar, level 12 row 2, advanced one pass, 36435 +github.tar, level 12 row 1 with dict load, advanced one pass, 36434 +github.tar, level 12 row 2, advanced one pass, 36110 github.tar, level 12 row 2 with dict dms, advanced one pass, 36986 github.tar, level 12 row 2 with dict dds, advanced one pass, 36986 github.tar, level 12 row 2 with dict copy, advanced one pass, 36609 -github.tar, level 12 row 2 with dict load, advanced one pass, 36424 -github.tar, level 13, advanced one pass, 35621 -github.tar, level 13 with dict, advanced one pass, 38726 -github.tar, level 13 with dict dms, advanced one pass, 38903 -github.tar, level 13 with dict dds, advanced one pass, 38903 -github.tar, level 13 with dict copy, advanced one pass, 38726 -github.tar, level 13 with dict load, advanced one pass, 36372 -github.tar, level 16, advanced one pass, 40255 -github.tar, level 16 with dict, advanced one pass, 33639 -github.tar, level 16 with dict dms, advanced one pass, 33544 -github.tar, level 16 with dict dds, advanced one pass, 33544 -github.tar, level 16 with dict copy, advanced one pass, 33639 -github.tar, level 16 with dict load, advanced one pass, 39353 -github.tar, level 19, advanced one pass, 32837 -github.tar, level 19 with dict, advanced one pass, 32895 -github.tar, level 19 with dict dms, advanced one pass, 32672 -github.tar, level 19 with dict dds, advanced one pass, 32672 -github.tar, level 19 with dict copy, advanced one pass, 32895 -github.tar, level 19 with dict load, advanced one pass, 32676 -github.tar, no source size, advanced one pass, 38441 +github.tar, level 12 row 2 with dict load, advanced one pass, 36459 +github.tar, level 13, advanced one pass, 35501 +github.tar, level 13 with dict, advanced one pass, 37130 +github.tar, level 13 with dict dms, advanced one pass, 37220 +github.tar, level 13 with dict dds, advanced one pass, 37220 +github.tar, level 13 with dict copy, advanced one pass, 37130 +github.tar, level 13 with dict load, advanced one pass, 36010 +github.tar, level 16, advanced one pass, 40471 +github.tar, level 16 with dict, advanced one pass, 33378 +github.tar, level 16 with dict dms, advanced one pass, 33213 +github.tar, level 16 with dict dds, advanced one pass, 33213 +github.tar, level 16 with dict copy, advanced one pass, 33378 +github.tar, level 16 with dict load, advanced one pass, 39081 +github.tar, level 19, advanced one pass, 32134 +github.tar, level 19 with dict, advanced one pass, 32709 +github.tar, level 19 with dict dms, advanced one pass, 32553 +github.tar, level 19 with dict dds, advanced one pass, 32553 +github.tar, level 19 with dict copy, advanced one pass, 32709 +github.tar, level 19 with dict load, advanced one pass, 32474 +github.tar, no source size, advanced one pass, 38831 github.tar, no source size with dict, advanced one pass, 37995 -github.tar, long distance mode, advanced one pass, 39757 -github.tar, multithreaded, advanced one pass, 38441 -github.tar, multithreaded long distance mode, advanced one pass, 39726 +github.tar, long distance mode, advanced one pass, 40252 +github.tar, multithreaded, advanced one pass, 38831 +github.tar, multithreaded long distance mode, advanced one pass, 40232 github.tar, small window log, advanced one pass, 198540 github.tar, small hash log, advanced one pass, 129870 github.tar, small chain log, advanced one pass, 41669 -github.tar, explicit params, advanced one pass, 41227 -github.tar, uncompressed literals, advanced one pass, 41122 -github.tar, uncompressed literals optimal, advanced one pass, 35388 -github.tar, huffman literals, advanced one pass, 38777 -github.tar, multithreaded with advanced params, advanced one pass, 41122 -silesia, level -5, advanced one pass small out, 6737607 -silesia, level -3, advanced one pass small out, 6444677 -silesia, level -1, advanced one pass small out, 6178460 -silesia, level 0, advanced one pass small out, 4849552 -silesia, level 1, advanced one pass small out, 5313204 -silesia, level 3, advanced one pass small out, 4849552 -silesia, level 4, advanced one pass small out, 4786970 -silesia, level 5 row 1, advanced one pass small out, 4710236 -silesia, level 5 row 2, advanced one pass small out, 4707794 -silesia, level 5, advanced one pass small out, 4707794 -silesia, level 6, advanced one pass small out, 4666383 -silesia, level 7 row 1, advanced one pass small out, 4596296 -silesia, level 7 row 2, advanced one pass small out, 4603381 -silesia, level 7, advanced one pass small out, 4603381 -silesia, level 9, advanced one pass small out, 4546001 -silesia, level 12 row 1, advanced one pass small out, 4519288 -silesia, level 12 row 2, advanced one pass small out, 4521397 -silesia, level 13, advanced one pass small out, 4482135 -silesia, level 16, advanced one pass small out, 4360251 -silesia, level 19, advanced one pass small out, 4283237 -silesia, no source size, advanced one pass small out, 4849552 -silesia, long distance mode, advanced one pass small out, 4840738 -silesia, multithreaded, advanced one pass small out, 4849552 -silesia, multithreaded long distance mode, advanced one pass small out, 4840758 -silesia, small window log, advanced one pass small out, 7095919 +github.tar, explicit params, advanced one pass, 41385 +github.tar, uncompressed literals, advanced one pass, 41525 +github.tar, uncompressed literals optimal, advanced one pass, 35397 +github.tar, huffman literals, advanced one pass, 38853 +github.tar, multithreaded with advanced params, advanced one pass, 41525 +silesia, level -5, advanced one pass small out, 7354675 +silesia, level -3, advanced one pass small out, 6902374 +silesia, level -1, advanced one pass small out, 6177565 +silesia, level 0, advanced one pass small out, 4842075 +silesia, level 1, advanced one pass small out, 5309098 +silesia, level 3, advanced one pass small out, 4842075 +silesia, level 4, advanced one pass small out, 4779186 +silesia, level 5 row 1, advanced one pass small out, 4638691 +silesia, level 5 row 2, advanced one pass small out, 4640752 +silesia, level 5, advanced one pass small out, 4638691 +silesia, level 6, advanced one pass small out, 4605296 +silesia, level 7 row 1, advanced one pass small out, 4566984 +silesia, level 7 row 2, advanced one pass small out, 4564868 +silesia, level 7, advanced one pass small out, 4566984 +silesia, level 9, advanced one pass small out, 4543018 +silesia, level 11 row 1, advanced one pass small out, 4521323 +silesia, level 11 row 2, advanced one pass small out, 4519288 +silesia, level 12 row 1, advanced one pass small out, 4505046 +silesia, level 12 row 2, advanced one pass small out, 4503116 +silesia, level 13, advanced one pass small out, 4493990 +silesia, level 16, advanced one pass small out, 4359864 +silesia, level 19, advanced one pass small out, 4296880 +silesia, no source size, advanced one pass small out, 4842075 +silesia, long distance mode, advanced one pass small out, 4833710 +silesia, multithreaded, advanced one pass small out, 4842075 +silesia, multithreaded long distance mode, advanced one pass small out, 4833737 +silesia, small window log, advanced one pass small out, 7095000 silesia, small hash log, advanced one pass small out, 6526141 silesia, small chain log, advanced one pass small out, 4912197 -silesia, explicit params, advanced one pass small out, 4795856 -silesia, uncompressed literals, advanced one pass small out, 5127982 -silesia, uncompressed literals optimal, advanced one pass small out, 4317896 -silesia, huffman literals, advanced one pass small out, 5326268 -silesia, multithreaded with advanced params, advanced one pass small out, 5127982 -silesia.tar, level -5, advanced one pass small out, 6738593 -silesia.tar, level -3, advanced one pass small out, 6446372 -silesia.tar, level -1, advanced one pass small out, 6186042 -silesia.tar, level 0, advanced one pass small out, 4861425 -silesia.tar, level 1, advanced one pass small out, 5334885 -silesia.tar, level 3, advanced one pass small out, 4861425 -silesia.tar, level 4, advanced one pass small out, 4799630 -silesia.tar, level 5 row 1, advanced one pass small out, 4722324 -silesia.tar, level 5 row 2, advanced one pass small out, 4719256 -silesia.tar, level 5, advanced one pass small out, 4719256 -silesia.tar, level 6, advanced one pass small out, 4677721 -silesia.tar, level 7 row 1, advanced one pass small out, 4606715 -silesia.tar, level 7 row 2, advanced one pass small out, 4613541 -silesia.tar, level 7, advanced one pass small out, 4613541 -silesia.tar, level 9, advanced one pass small out, 4555426 -silesia.tar, level 12 row 1, advanced one pass small out, 4529459 -silesia.tar, level 12 row 2, advanced one pass small out, 4530256 -silesia.tar, level 13, advanced one pass small out, 4491764 -silesia.tar, level 16, advanced one pass small out, 4356827 -silesia.tar, level 19, advanced one pass small out, 4264487 -silesia.tar, no source size, advanced one pass small out, 4861425 -silesia.tar, long distance mode, advanced one pass small out, 4847754 -silesia.tar, multithreaded, advanced one pass small out, 4861508 -silesia.tar, multithreaded long distance mode, advanced one pass small out, 4853222 -silesia.tar, small window log, advanced one pass small out, 7101530 -silesia.tar, small hash log, advanced one pass small out, 6529232 +silesia, explicit params, advanced one pass small out, 4795432 +silesia, uncompressed literals, advanced one pass small out, 5120566 +silesia, uncompressed literals optimal, advanced one pass small out, 4319518 +silesia, huffman literals, advanced one pass small out, 5326346 +silesia, multithreaded with advanced params, advanced one pass small out, 5120566 +silesia.tar, level -5, advanced one pass small out, 7359401 +silesia.tar, level -3, advanced one pass small out, 6901672 +silesia.tar, level -1, advanced one pass small out, 6182241 +silesia.tar, level 0, advanced one pass small out, 4854086 +silesia.tar, level 1, advanced one pass small out, 5331946 +silesia.tar, level 3, advanced one pass small out, 4854086 +silesia.tar, level 4, advanced one pass small out, 4791503 +silesia.tar, level 5 row 1, advanced one pass small out, 4649987 +silesia.tar, level 5 row 2, advanced one pass small out, 4652862 +silesia.tar, level 5, advanced one pass small out, 4649987 +silesia.tar, level 6, advanced one pass small out, 4616797 +silesia.tar, level 7 row 1, advanced one pass small out, 4576661 +silesia.tar, level 7 row 2, advanced one pass small out, 4575393 +silesia.tar, level 7, advanced one pass small out, 4576661 +silesia.tar, level 9, advanced one pass small out, 4552381 +silesia.tar, level 11 row 1, advanced one pass small out, 4530241 +silesia.tar, level 11 row 2, advanced one pass small out, 4529461 +silesia.tar, level 12 row 1, advanced one pass small out, 4514432 +silesia.tar, level 12 row 2, advanced one pass small out, 4513604 +silesia.tar, level 13, advanced one pass small out, 4502956 +silesia.tar, level 16, advanced one pass small out, 4360527 +silesia.tar, level 19, advanced one pass small out, 4267266 +silesia.tar, no source size, advanced one pass small out, 4854086 +silesia.tar, long distance mode, advanced one pass small out, 4840452 +silesia.tar, multithreaded, advanced one pass small out, 4854160 +silesia.tar, multithreaded long distance mode, advanced one pass small out, 4845741 +silesia.tar, small window log, advanced one pass small out, 7100655 +silesia.tar, small hash log, advanced one pass small out, 6529231 silesia.tar, small chain log, advanced one pass small out, 4917041 -silesia.tar, explicit params, advanced one pass small out, 4807380 -silesia.tar, uncompressed literals, advanced one pass small out, 5129458 -silesia.tar, uncompressed literals optimal, advanced one pass small out, 4307453 -silesia.tar, huffman literals, advanced one pass small out, 5347335 -silesia.tar, multithreaded with advanced params, advanced one pass small out, 5129555 -github, level -5, advanced one pass small out, 205285 +silesia.tar, explicit params, advanced one pass small out, 4806855 +silesia.tar, uncompressed literals, advanced one pass small out, 5122473 +silesia.tar, uncompressed literals optimal, advanced one pass small out, 4310141 +silesia.tar, huffman literals, advanced one pass small out, 5344545 +silesia.tar, multithreaded with advanced params, advanced one pass small out, 5122567 +github, level -5, advanced one pass small out, 232315 github, level -5 with dict, advanced one pass small out, 46718 -github, level -3, advanced one pass small out, 190643 +github, level -3, advanced one pass small out, 220760 github, level -3 with dict, advanced one pass small out, 45395 -github, level -1, advanced one pass small out, 175568 +github, level -1, advanced one pass small out, 175468 github, level -1 with dict, advanced one pass small out, 43170 -github, level 0, advanced one pass small out, 136335 +github, level 0, advanced one pass small out, 136332 github, level 0 with dict, advanced one pass small out, 41148 github, level 0 with dict dms, advanced one pass small out, 41148 github, level 0 with dict dds, advanced one pass small out, 41148 github, level 0 with dict copy, advanced one pass small out, 41124 github, level 0 with dict load, advanced one pass small out, 42252 -github, level 1, advanced one pass small out, 142465 +github, level 1, advanced one pass small out, 142365 github, level 1 with dict, advanced one pass small out, 41682 github, level 1 with dict dms, advanced one pass small out, 41682 github, level 1 with dict dds, advanced one pass small out, 41682 github, level 1 with dict copy, advanced one pass small out, 41674 github, level 1 with dict load, advanced one pass small out, 43755 -github, level 3, advanced one pass small out, 136335 +github, level 3, advanced one pass small out, 136332 github, level 3 with dict, advanced one pass small out, 41148 github, level 3 with dict dms, advanced one pass small out, 41148 github, level 3 with dict dds, advanced one pass small out, 41148 @@ -616,16 +644,16 @@ github, level 4 with dict dms, advanced github, level 4 with dict dds, advanced one pass small out, 41251 github, level 4 with dict copy, advanced one pass small out, 41216 github, level 4 with dict load, advanced one pass small out, 41159 -github, level 5 row 1, advanced one pass small out, 135121 -github, level 5 row 1 with dict dms, advanced one pass small out, 38938 -github, level 5 row 1 with dict dds, advanced one pass small out, 38732 -github, level 5 row 1 with dict copy, advanced one pass small out, 38934 -github, level 5 row 1 with dict load, advanced one pass small out, 40725 -github, level 5 row 2, advanced one pass small out, 134584 -github, level 5 row 2 with dict dms, advanced one pass small out, 38758 -github, level 5 row 2 with dict dds, advanced one pass small out, 38728 -github, level 5 row 2 with dict copy, advanced one pass small out, 38759 -github, level 5 row 2 with dict load, advanced one pass small out, 41518 +github, level 5 row 1, advanced one pass small out, 134584 +github, level 5 row 1 with dict dms, advanced one pass small out, 38758 +github, level 5 row 1 with dict dds, advanced one pass small out, 38728 +github, level 5 row 1 with dict copy, advanced one pass small out, 38759 +github, level 5 row 1 with dict load, advanced one pass small out, 41518 +github, level 5 row 2, advanced one pass small out, 135121 +github, level 5 row 2 with dict dms, advanced one pass small out, 38938 +github, level 5 row 2 with dict dds, advanced one pass small out, 38732 +github, level 5 row 2 with dict copy, advanced one pass small out, 38934 +github, level 5 row 2 with dict load, advanced one pass small out, 40725 github, level 5, advanced one pass small out, 135121 github, level 5 with dict, advanced one pass small out, 38758 github, level 5 with dict dms, advanced one pass small out, 38758 @@ -635,31 +663,41 @@ github, level 5 with dict load, advanced github, level 6, advanced one pass small out, 135122 github, level 6 with dict, advanced one pass small out, 38671 github, level 6 with dict dms, advanced one pass small out, 38671 -github, level 6 with dict dds, advanced one pass small out, 38630 +github, level 6 with dict dds, advanced one pass small out, 38636 github, level 6 with dict copy, advanced one pass small out, 38669 github, level 6 with dict load, advanced one pass small out, 40695 -github, level 7 row 1, advanced one pass small out, 135122 -github, level 7 row 1 with dict dms, advanced one pass small out, 38771 -github, level 7 row 1 with dict dds, advanced one pass small out, 38771 -github, level 7 row 1 with dict copy, advanced one pass small out, 38745 -github, level 7 row 1 with dict load, advanced one pass small out, 40695 -github, level 7 row 2, advanced one pass small out, 134584 -github, level 7 row 2 with dict dms, advanced one pass small out, 38758 -github, level 7 row 2 with dict dds, advanced one pass small out, 38747 -github, level 7 row 2 with dict copy, advanced one pass small out, 38755 -github, level 7 row 2 with dict load, advanced one pass small out, 41030 +github, level 7 row 1, advanced one pass small out, 134584 +github, level 7 row 1 with dict dms, advanced one pass small out, 38758 +github, level 7 row 1 with dict dds, advanced one pass small out, 38745 +github, level 7 row 1 with dict copy, advanced one pass small out, 38755 +github, level 7 row 1 with dict load, advanced one pass small out, 43154 +github, level 7 row 2, advanced one pass small out, 135122 +github, level 7 row 2 with dict dms, advanced one pass small out, 38860 +github, level 7 row 2 with dict dds, advanced one pass small out, 38766 +github, level 7 row 2 with dict copy, advanced one pass small out, 38834 +github, level 7 row 2 with dict load, advanced one pass small out, 40695 github, level 7, advanced one pass small out, 135122 github, level 7 with dict, advanced one pass small out, 38758 github, level 7 with dict dms, advanced one pass small out, 38758 -github, level 7 with dict dds, advanced one pass small out, 38747 +github, level 7 with dict dds, advanced one pass small out, 38745 github, level 7 with dict copy, advanced one pass small out, 38755 github, level 7 with dict load, advanced one pass small out, 40695 github, level 9, advanced one pass small out, 135122 github, level 9 with dict, advanced one pass small out, 39437 github, level 9 with dict dms, advanced one pass small out, 39437 -github, level 9 with dict dds, advanced one pass small out, 39338 +github, level 9 with dict dds, advanced one pass small out, 39393 github, level 9 with dict copy, advanced one pass small out, 39398 github, level 9 with dict load, advanced one pass small out, 41710 +github, level 11 row 1, advanced one pass small out, 135119 +github, level 11 row 1 with dict dms, advanced one pass small out, 39671 +github, level 11 row 1 with dict dds, advanced one pass small out, 39671 +github, level 11 row 1 with dict copy, advanced one pass small out, 39651 +github, level 11 row 1 with dict load, advanced one pass small out, 41360 +github, level 11 row 2, advanced one pass small out, 135119 +github, level 11 row 2 with dict dms, advanced one pass small out, 39671 +github, level 11 row 2 with dict dds, advanced one pass small out, 39671 +github, level 11 row 2 with dict copy, advanced one pass small out, 39651 +github, level 11 row 2 with dict load, advanced one pass small out, 41360 github, level 12 row 1, advanced one pass small out, 134180 github, level 12 row 1 with dict dms, advanced one pass small out, 39677 github, level 12 row 1 with dict dds, advanced one pass small out, 39677 @@ -671,9 +709,9 @@ github, level 12 row 2 with dict dds, advanced github, level 12 row 2 with dict copy, advanced one pass small out, 39677 github, level 12 row 2 with dict load, advanced one pass small out, 41166 github, level 13, advanced one pass small out, 134064 -github, level 13 with dict, advanced one pass small out, 39743 -github, level 13 with dict dms, advanced one pass small out, 39743 -github, level 13 with dict dds, advanced one pass small out, 39743 +github, level 13 with dict, advanced one pass small out, 39900 +github, level 13 with dict dms, advanced one pass small out, 39900 +github, level 13 with dict dds, advanced one pass small out, 39900 github, level 13 with dict copy, advanced one pass small out, 39948 github, level 13 with dict load, advanced one pass small out, 42626 github, level 16, advanced one pass small out, 134064 @@ -688,217 +726,231 @@ github, level 19 with dict dms, advanced github, level 19 with dict dds, advanced one pass small out, 37576 github, level 19 with dict copy, advanced one pass small out, 37567 github, level 19 with dict load, advanced one pass small out, 39613 -github, no source size, advanced one pass small out, 136335 +github, no source size, advanced one pass small out, 136332 github, no source size with dict, advanced one pass small out, 41148 -github, long distance mode, advanced one pass small out, 136335 -github, multithreaded, advanced one pass small out, 136335 -github, multithreaded long distance mode, advanced one pass small out, 136335 -github, small window log, advanced one pass small out, 136335 +github, long distance mode, advanced one pass small out, 136332 +github, multithreaded, advanced one pass small out, 136332 +github, multithreaded long distance mode, advanced one pass small out, 136332 +github, small window log, advanced one pass small out, 136332 github, small hash log, advanced one pass small out, 135590 github, small chain log, advanced one pass small out, 136341 github, explicit params, advanced one pass small out, 137727 -github, uncompressed literals, advanced one pass small out, 165915 +github, uncompressed literals, advanced one pass small out, 165911 github, uncompressed literals optimal, advanced one pass small out, 157227 -github, huffman literals, advanced one pass small out, 142465 -github, multithreaded with advanced params, advanced one pass small out, 165915 -github.tar, level -5, advanced one pass small out, 46856 -github.tar, level -5 with dict, advanced one pass small out, 44571 -github.tar, level -3, advanced one pass small out, 43754 -github.tar, level -3 with dict, advanced one pass small out, 41447 -github.tar, level -1, advanced one pass small out, 42490 -github.tar, level -1 with dict, advanced one pass small out, 41131 -github.tar, level 0, advanced one pass small out, 38441 +github, huffman literals, advanced one pass small out, 142365 +github, multithreaded with advanced params, advanced one pass small out, 165911 +github.tar, level -5, advanced one pass small out, 66914 +github.tar, level -5 with dict, advanced one pass small out, 51525 +github.tar, level -3, advanced one pass small out, 52127 +github.tar, level -3 with dict, advanced one pass small out, 44242 +github.tar, level -1, advanced one pass small out, 42560 +github.tar, level -1 with dict, advanced one pass small out, 41136 +github.tar, level 0, advanced one pass small out, 38831 github.tar, level 0 with dict, advanced one pass small out, 37995 github.tar, level 0 with dict dms, advanced one pass small out, 38003 github.tar, level 0 with dict dds, advanced one pass small out, 38003 github.tar, level 0 with dict copy, advanced one pass small out, 37995 github.tar, level 0 with dict load, advanced one pass small out, 37956 -github.tar, level 1, advanced one pass small out, 39265 -github.tar, level 1 with dict, advanced one pass small out, 38280 -github.tar, level 1 with dict dms, advanced one pass small out, 38290 -github.tar, level 1 with dict dds, advanced one pass small out, 38290 -github.tar, level 1 with dict copy, advanced one pass small out, 38280 -github.tar, level 1 with dict load, advanced one pass small out, 38729 -github.tar, level 3, advanced one pass small out, 38441 +github.tar, level 1, advanced one pass small out, 39200 +github.tar, level 1 with dict, advanced one pass small out, 38284 +github.tar, level 1 with dict dms, advanced one pass small out, 38294 +github.tar, level 1 with dict dds, advanced one pass small out, 38294 +github.tar, level 1 with dict copy, advanced one pass small out, 38284 +github.tar, level 1 with dict load, advanced one pass small out, 38724 +github.tar, level 3, advanced one pass small out, 38831 github.tar, level 3 with dict, advanced one pass small out, 37995 github.tar, level 3 with dict dms, advanced one pass small out, 38003 github.tar, level 3 with dict dds, advanced one pass small out, 38003 github.tar, level 3 with dict copy, advanced one pass small out, 37995 github.tar, level 3 with dict load, advanced one pass small out, 37956 -github.tar, level 4, advanced one pass small out, 38467 +github.tar, level 4, advanced one pass small out, 38893 github.tar, level 4 with dict, advanced one pass small out, 37948 github.tar, level 4 with dict dms, advanced one pass small out, 37954 github.tar, level 4 with dict dds, advanced one pass small out, 37954 github.tar, level 4 with dict copy, advanced one pass small out, 37948 github.tar, level 4 with dict load, advanced one pass small out, 37927 -github.tar, level 5 row 1, advanced one pass small out, 39788 -github.tar, level 5 row 1 with dict dms, advanced one pass small out, 39365 -github.tar, level 5 row 1 with dict dds, advanced one pass small out, 39233 -github.tar, level 5 row 1 with dict copy, advanced one pass small out, 39715 -github.tar, level 5 row 1 with dict load, advanced one pass small out, 39209 -github.tar, level 5 row 2, advanced one pass small out, 39693 -github.tar, level 5 row 2 with dict dms, advanced one pass small out, 39024 -github.tar, level 5 row 2 with dict dds, advanced one pass small out, 39028 -github.tar, level 5 row 2 with dict copy, advanced one pass small out, 39040 -github.tar, level 5 row 2 with dict load, advanced one pass small out, 39037 -github.tar, level 5, advanced one pass small out, 39693 -github.tar, level 5 with dict, advanced one pass small out, 39040 -github.tar, level 5 with dict dms, advanced one pass small out, 39024 -github.tar, level 5 with dict dds, advanced one pass small out, 39028 -github.tar, level 5 with dict copy, advanced one pass small out, 39040 -github.tar, level 5 with dict load, advanced one pass small out, 39037 -github.tar, level 6, advanced one pass small out, 39621 -github.tar, level 6 with dict, advanced one pass small out, 38622 -github.tar, level 6 with dict dms, advanced one pass small out, 38608 -github.tar, level 6 with dict dds, advanced one pass small out, 38610 -github.tar, level 6 with dict copy, advanced one pass small out, 38622 -github.tar, level 6 with dict load, advanced one pass small out, 38962 -github.tar, level 7 row 1, advanced one pass small out, 39206 -github.tar, level 7 row 1 with dict dms, advanced one pass small out, 37954 -github.tar, level 7 row 1 with dict dds, advanced one pass small out, 37954 -github.tar, level 7 row 1 with dict copy, advanced one pass small out, 38071 -github.tar, level 7 row 1 with dict load, advanced one pass small out, 38584 -github.tar, level 7 row 2, advanced one pass small out, 39213 -github.tar, level 7 row 2 with dict dms, advanced one pass small out, 37848 -github.tar, level 7 row 2 with dict dds, advanced one pass small out, 37867 -github.tar, level 7 row 2 with dict copy, advanced one pass small out, 37848 -github.tar, level 7 row 2 with dict load, advanced one pass small out, 38582 -github.tar, level 7, advanced one pass small out, 39213 -github.tar, level 7 with dict, advanced one pass small out, 37848 -github.tar, level 7 with dict dms, advanced one pass small out, 37848 -github.tar, level 7 with dict dds, advanced one pass small out, 37867 -github.tar, level 7 with dict copy, advanced one pass small out, 37848 -github.tar, level 7 with dict load, advanced one pass small out, 38582 -github.tar, level 9, advanced one pass small out, 36758 -github.tar, level 9 with dict, advanced one pass small out, 36457 -github.tar, level 9 with dict dms, advanced one pass small out, 36549 -github.tar, level 9 with dict dds, advanced one pass small out, 36637 -github.tar, level 9 with dict copy, advanced one pass small out, 36457 -github.tar, level 9 with dict load, advanced one pass small out, 36350 -github.tar, level 12 row 1, advanced one pass small out, 36435 +github.tar, level 5 row 1, advanced one pass small out, 38366 +github.tar, level 5 row 1 with dict dms, advanced one pass small out, 39059 +github.tar, level 5 row 1 with dict dds, advanced one pass small out, 39067 +github.tar, level 5 row 1 with dict copy, advanced one pass small out, 39082 +github.tar, level 5 row 1 with dict load, advanced one pass small out, 37656 +github.tar, level 5 row 2, advanced one pass small out, 38534 +github.tar, level 5 row 2 with dict dms, advanced one pass small out, 39365 +github.tar, level 5 row 2 with dict dds, advanced one pass small out, 39233 +github.tar, level 5 row 2 with dict copy, advanced one pass small out, 39715 +github.tar, level 5 row 2 with dict load, advanced one pass small out, 38019 +github.tar, level 5, advanced one pass small out, 38366 +github.tar, level 5 with dict, advanced one pass small out, 39082 +github.tar, level 5 with dict dms, advanced one pass small out, 39059 +github.tar, level 5 with dict dds, advanced one pass small out, 39067 +github.tar, level 5 with dict copy, advanced one pass small out, 39082 +github.tar, level 5 with dict load, advanced one pass small out, 37656 +github.tar, level 6, advanced one pass small out, 38648 +github.tar, level 6 with dict, advanced one pass small out, 38656 +github.tar, level 6 with dict dms, advanced one pass small out, 38636 +github.tar, level 6 with dict dds, advanced one pass small out, 38634 +github.tar, level 6 with dict copy, advanced one pass small out, 38656 +github.tar, level 6 with dict load, advanced one pass small out, 37865 +github.tar, level 7 row 1, advanced one pass small out, 38110 +github.tar, level 7 row 1 with dict dms, advanced one pass small out, 37858 +github.tar, level 7 row 1 with dict dds, advanced one pass small out, 37882 +github.tar, level 7 row 1 with dict copy, advanced one pass small out, 37865 +github.tar, level 7 row 1 with dict load, advanced one pass small out, 37436 +github.tar, level 7 row 2, advanced one pass small out, 38077 +github.tar, level 7 row 2 with dict dms, advanced one pass small out, 38012 +github.tar, level 7 row 2 with dict dds, advanced one pass small out, 38014 +github.tar, level 7 row 2 with dict copy, advanced one pass small out, 38101 +github.tar, level 7 row 2 with dict load, advanced one pass small out, 37402 +github.tar, level 7, advanced one pass small out, 38110 +github.tar, level 7 with dict, advanced one pass small out, 37865 +github.tar, level 7 with dict dms, advanced one pass small out, 37858 +github.tar, level 7 with dict dds, advanced one pass small out, 37882 +github.tar, level 7 with dict copy, advanced one pass small out, 37865 +github.tar, level 7 with dict load, advanced one pass small out, 37436 +github.tar, level 9, advanced one pass small out, 36760 +github.tar, level 9 with dict, advanced one pass small out, 36484 +github.tar, level 9 with dict dms, advanced one pass small out, 36567 +github.tar, level 9 with dict dds, advanced one pass small out, 36628 +github.tar, level 9 with dict copy, advanced one pass small out, 36484 +github.tar, level 9 with dict load, advanced one pass small out, 36401 +github.tar, level 11 row 1, advanced one pass small out, 36452 +github.tar, level 11 row 1 with dict dms, advanced one pass small out, 36963 +github.tar, level 11 row 1 with dict dds, advanced one pass small out, 36963 +github.tar, level 11 row 1 with dict copy, advanced one pass small out, 36557 +github.tar, level 11 row 1 with dict load, advanced one pass small out, 36455 +github.tar, level 11 row 2, advanced one pass small out, 36435 +github.tar, level 11 row 2 with dict dms, advanced one pass small out, 36963 +github.tar, level 11 row 2 with dict dds, advanced one pass small out, 36963 +github.tar, level 11 row 2 with dict copy, advanced one pass small out, 36557 +github.tar, level 11 row 2 with dict load, advanced one pass small out, 36419 +github.tar, level 12 row 1, advanced one pass small out, 36081 github.tar, level 12 row 1 with dict dms, advanced one pass small out, 36986 github.tar, level 12 row 1 with dict dds, advanced one pass small out, 36986 github.tar, level 12 row 1 with dict copy, advanced one pass small out, 36609 -github.tar, level 12 row 1 with dict load, advanced one pass small out, 36419 -github.tar, level 12 row 2, advanced one pass small out, 36435 +github.tar, level 12 row 1 with dict load, advanced one pass small out, 36434 +github.tar, level 12 row 2, advanced one pass small out, 36110 github.tar, level 12 row 2 with dict dms, advanced one pass small out, 36986 github.tar, level 12 row 2 with dict dds, advanced one pass small out, 36986 github.tar, level 12 row 2 with dict copy, advanced one pass small out, 36609 -github.tar, level 12 row 2 with dict load, advanced one pass small out, 36424 -github.tar, level 13, advanced one pass small out, 35621 -github.tar, level 13 with dict, advanced one pass small out, 38726 -github.tar, level 13 with dict dms, advanced one pass small out, 38903 -github.tar, level 13 with dict dds, advanced one pass small out, 38903 -github.tar, level 13 with dict copy, advanced one pass small out, 38726 -github.tar, level 13 with dict load, advanced one pass small out, 36372 -github.tar, level 16, advanced one pass small out, 40255 -github.tar, level 16 with dict, advanced one pass small out, 33639 -github.tar, level 16 with dict dms, advanced one pass small out, 33544 -github.tar, level 16 with dict dds, advanced one pass small out, 33544 -github.tar, level 16 with dict copy, advanced one pass small out, 33639 -github.tar, level 16 with dict load, advanced one pass small out, 39353 -github.tar, level 19, advanced one pass small out, 32837 -github.tar, level 19 with dict, advanced one pass small out, 32895 -github.tar, level 19 with dict dms, advanced one pass small out, 32672 -github.tar, level 19 with dict dds, advanced one pass small out, 32672 -github.tar, level 19 with dict copy, advanced one pass small out, 32895 -github.tar, level 19 with dict load, advanced one pass small out, 32676 -github.tar, no source size, advanced one pass small out, 38441 +github.tar, level 12 row 2 with dict load, advanced one pass small out, 36459 +github.tar, level 13, advanced one pass small out, 35501 +github.tar, level 13 with dict, advanced one pass small out, 37130 +github.tar, level 13 with dict dms, advanced one pass small out, 37220 +github.tar, level 13 with dict dds, advanced one pass small out, 37220 +github.tar, level 13 with dict copy, advanced one pass small out, 37130 +github.tar, level 13 with dict load, advanced one pass small out, 36010 +github.tar, level 16, advanced one pass small out, 40471 +github.tar, level 16 with dict, advanced one pass small out, 33378 +github.tar, level 16 with dict dms, advanced one pass small out, 33213 +github.tar, level 16 with dict dds, advanced one pass small out, 33213 +github.tar, level 16 with dict copy, advanced one pass small out, 33378 +github.tar, level 16 with dict load, advanced one pass small out, 39081 +github.tar, level 19, advanced one pass small out, 32134 +github.tar, level 19 with dict, advanced one pass small out, 32709 +github.tar, level 19 with dict dms, advanced one pass small out, 32553 +github.tar, level 19 with dict dds, advanced one pass small out, 32553 +github.tar, level 19 with dict copy, advanced one pass small out, 32709 +github.tar, level 19 with dict load, advanced one pass small out, 32474 +github.tar, no source size, advanced one pass small out, 38831 github.tar, no source size with dict, advanced one pass small out, 37995 -github.tar, long distance mode, advanced one pass small out, 39757 -github.tar, multithreaded, advanced one pass small out, 38441 -github.tar, multithreaded long distance mode, advanced one pass small out, 39726 +github.tar, long distance mode, advanced one pass small out, 40252 +github.tar, multithreaded, advanced one pass small out, 38831 +github.tar, multithreaded long distance mode, advanced one pass small out, 40232 github.tar, small window log, advanced one pass small out, 198540 github.tar, small hash log, advanced one pass small out, 129870 github.tar, small chain log, advanced one pass small out, 41669 -github.tar, explicit params, advanced one pass small out, 41227 -github.tar, uncompressed literals, advanced one pass small out, 41122 -github.tar, uncompressed literals optimal, advanced one pass small out, 35388 -github.tar, huffman literals, advanced one pass small out, 38777 -github.tar, multithreaded with advanced params, advanced one pass small out, 41122 -silesia, level -5, advanced streaming, 6882505 -silesia, level -3, advanced streaming, 6568376 -silesia, level -1, advanced streaming, 6183403 -silesia, level 0, advanced streaming, 4849552 -silesia, level 1, advanced streaming, 5314162 -silesia, level 3, advanced streaming, 4849552 -silesia, level 4, advanced streaming, 4786970 -silesia, level 5 row 1, advanced streaming, 4710236 -silesia, level 5 row 2, advanced streaming, 4707794 -silesia, level 5, advanced streaming, 4707794 -silesia, level 6, advanced streaming, 4666383 -silesia, level 7 row 1, advanced streaming, 4596296 -silesia, level 7 row 2, advanced streaming, 4603381 -silesia, level 7, advanced streaming, 4603381 -silesia, level 9, advanced streaming, 4546001 -silesia, level 12 row 1, advanced streaming, 4519288 -silesia, level 12 row 2, advanced streaming, 4521397 -silesia, level 13, advanced streaming, 4482135 -silesia, level 16, advanced streaming, 4360251 -silesia, level 19, advanced streaming, 4283237 -silesia, no source size, advanced streaming, 4849516 -silesia, long distance mode, advanced streaming, 4840738 -silesia, multithreaded, advanced streaming, 4849552 -silesia, multithreaded long distance mode, advanced streaming, 4840758 -silesia, small window log, advanced streaming, 7112062 +github.tar, explicit params, advanced one pass small out, 41385 +github.tar, uncompressed literals, advanced one pass small out, 41525 +github.tar, uncompressed literals optimal, advanced one pass small out, 35397 +github.tar, huffman literals, advanced one pass small out, 38853 +github.tar, multithreaded with advanced params, advanced one pass small out, 41525 +silesia, level -5, advanced streaming, 7292053 +silesia, level -3, advanced streaming, 6867875 +silesia, level -1, advanced streaming, 6183923 +silesia, level 0, advanced streaming, 4842075 +silesia, level 1, advanced streaming, 5312694 +silesia, level 3, advanced streaming, 4842075 +silesia, level 4, advanced streaming, 4779186 +silesia, level 5 row 1, advanced streaming, 4638691 +silesia, level 5 row 2, advanced streaming, 4640752 +silesia, level 5, advanced streaming, 4638691 +silesia, level 6, advanced streaming, 4605296 +silesia, level 7 row 1, advanced streaming, 4566984 +silesia, level 7 row 2, advanced streaming, 4564868 +silesia, level 7, advanced streaming, 4566984 +silesia, level 9, advanced streaming, 4543018 +silesia, level 11 row 1, advanced streaming, 4521323 +silesia, level 11 row 2, advanced streaming, 4519288 +silesia, level 12 row 1, advanced streaming, 4505046 +silesia, level 12 row 2, advanced streaming, 4503116 +silesia, level 13, advanced streaming, 4493990 +silesia, level 16, advanced streaming, 4359864 +silesia, level 19, advanced streaming, 4296880 +silesia, no source size, advanced streaming, 4842039 +silesia, long distance mode, advanced streaming, 4833710 +silesia, multithreaded, advanced streaming, 4842075 +silesia, multithreaded long distance mode, advanced streaming, 4833737 +silesia, small window log, advanced streaming, 7111103 silesia, small hash log, advanced streaming, 6526141 silesia, small chain log, advanced streaming, 4912197 -silesia, explicit params, advanced streaming, 4795887 -silesia, uncompressed literals, advanced streaming, 5127982 -silesia, uncompressed literals optimal, advanced streaming, 4317896 -silesia, huffman literals, advanced streaming, 5331168 -silesia, multithreaded with advanced params, advanced streaming, 5127982 -silesia.tar, level -5, advanced streaming, 6982759 -silesia.tar, level -3, advanced streaming, 6641283 -silesia.tar, level -1, advanced streaming, 6190795 -silesia.tar, level 0, advanced streaming, 4861427 -silesia.tar, level 1, advanced streaming, 5336939 -silesia.tar, level 3, advanced streaming, 4861427 -silesia.tar, level 4, advanced streaming, 4799630 -silesia.tar, level 5 row 1, advanced streaming, 4722329 -silesia.tar, level 5 row 2, advanced streaming, 4719261 -silesia.tar, level 5, advanced streaming, 4719261 -silesia.tar, level 6, advanced streaming, 4677729 -silesia.tar, level 7 row 1, advanced streaming, 4606715 -silesia.tar, level 7 row 2, advanced streaming, 4613544 -silesia.tar, level 7, advanced streaming, 4613544 -silesia.tar, level 9, advanced streaming, 4555432 -silesia.tar, level 12 row 1, advanced streaming, 4529459 -silesia.tar, level 12 row 2, advanced streaming, 4530258 -silesia.tar, level 13, advanced streaming, 4491765 -silesia.tar, level 16, advanced streaming, 4356834 -silesia.tar, level 19, advanced streaming, 4264392 -silesia.tar, no source size, advanced streaming, 4861423 -silesia.tar, long distance mode, advanced streaming, 4847754 -silesia.tar, multithreaded, advanced streaming, 4861508 -silesia.tar, multithreaded long distance mode, advanced streaming, 4853222 -silesia.tar, small window log, advanced streaming, 7118769 -silesia.tar, small hash log, advanced streaming, 6529235 +silesia, explicit params, advanced streaming, 4795452 +silesia, uncompressed literals, advanced streaming, 5120566 +silesia, uncompressed literals optimal, advanced streaming, 4319518 +silesia, huffman literals, advanced streaming, 5332234 +silesia, multithreaded with advanced params, advanced streaming, 5120566 +silesia.tar, level -5, advanced streaming, 7260007 +silesia.tar, level -3, advanced streaming, 6845151 +silesia.tar, level -1, advanced streaming, 6187938 +silesia.tar, level 0, advanced streaming, 4859271 +silesia.tar, level 1, advanced streaming, 5334890 +silesia.tar, level 3, advanced streaming, 4859271 +silesia.tar, level 4, advanced streaming, 4797470 +silesia.tar, level 5 row 1, advanced streaming, 4649992 +silesia.tar, level 5 row 2, advanced streaming, 4652866 +silesia.tar, level 5, advanced streaming, 4649992 +silesia.tar, level 6, advanced streaming, 4616803 +silesia.tar, level 7 row 1, advanced streaming, 4576664 +silesia.tar, level 7 row 2, advanced streaming, 4575394 +silesia.tar, level 7, advanced streaming, 4576664 +silesia.tar, level 9, advanced streaming, 4552386 +silesia.tar, level 11 row 1, advanced streaming, 4530243 +silesia.tar, level 11 row 2, advanced streaming, 4529461 +silesia.tar, level 12 row 1, advanced streaming, 4514433 +silesia.tar, level 12 row 2, advanced streaming, 4513604 +silesia.tar, level 13, advanced streaming, 4502956 +silesia.tar, level 16, advanced streaming, 4360527 +silesia.tar, level 19, advanced streaming, 4267266 +silesia.tar, no source size, advanced streaming, 4859267 +silesia.tar, long distance mode, advanced streaming, 4840452 +silesia.tar, multithreaded, advanced streaming, 4854160 +silesia.tar, multithreaded long distance mode, advanced streaming, 4845741 +silesia.tar, small window log, advanced streaming, 7117559 +silesia.tar, small hash log, advanced streaming, 6529234 silesia.tar, small chain log, advanced streaming, 4917021 -silesia.tar, explicit params, advanced streaming, 4807401 -silesia.tar, uncompressed literals, advanced streaming, 5129461 -silesia.tar, uncompressed literals optimal, advanced streaming, 4307400 -silesia.tar, huffman literals, advanced streaming, 5352360 -silesia.tar, multithreaded with advanced params, advanced streaming, 5129555 -github, level -5, advanced streaming, 205285 +silesia.tar, explicit params, advanced streaming, 4806873 +silesia.tar, uncompressed literals, advanced streaming, 5127423 +silesia.tar, uncompressed literals optimal, advanced streaming, 4310141 +silesia.tar, huffman literals, advanced streaming, 5350519 +silesia.tar, multithreaded with advanced params, advanced streaming, 5122567 +github, level -5, advanced streaming, 232315 github, level -5 with dict, advanced streaming, 46718 -github, level -3, advanced streaming, 190643 +github, level -3, advanced streaming, 220760 github, level -3 with dict, advanced streaming, 45395 -github, level -1, advanced streaming, 175568 +github, level -1, advanced streaming, 175468 github, level -1 with dict, advanced streaming, 43170 -github, level 0, advanced streaming, 136335 +github, level 0, advanced streaming, 136332 github, level 0 with dict, advanced streaming, 41148 github, level 0 with dict dms, advanced streaming, 41148 github, level 0 with dict dds, advanced streaming, 41148 github, level 0 with dict copy, advanced streaming, 41124 github, level 0 with dict load, advanced streaming, 42252 -github, level 1, advanced streaming, 142465 +github, level 1, advanced streaming, 142365 github, level 1 with dict, advanced streaming, 41682 github, level 1 with dict dms, advanced streaming, 41682 github, level 1 with dict dds, advanced streaming, 41682 github, level 1 with dict copy, advanced streaming, 41674 github, level 1 with dict load, advanced streaming, 43755 -github, level 3, advanced streaming, 136335 +github, level 3, advanced streaming, 136332 github, level 3 with dict, advanced streaming, 41148 github, level 3 with dict dms, advanced streaming, 41148 github, level 3 with dict dds, advanced streaming, 41148 @@ -910,16 +962,16 @@ github, level 4 with dict dms, advanced github, level 4 with dict dds, advanced streaming, 41251 github, level 4 with dict copy, advanced streaming, 41216 github, level 4 with dict load, advanced streaming, 41159 -github, level 5 row 1, advanced streaming, 135121 -github, level 5 row 1 with dict dms, advanced streaming, 38938 -github, level 5 row 1 with dict dds, advanced streaming, 38732 -github, level 5 row 1 with dict copy, advanced streaming, 38934 -github, level 5 row 1 with dict load, advanced streaming, 40725 -github, level 5 row 2, advanced streaming, 134584 -github, level 5 row 2 with dict dms, advanced streaming, 38758 -github, level 5 row 2 with dict dds, advanced streaming, 38728 -github, level 5 row 2 with dict copy, advanced streaming, 38759 -github, level 5 row 2 with dict load, advanced streaming, 41518 +github, level 5 row 1, advanced streaming, 134584 +github, level 5 row 1 with dict dms, advanced streaming, 38758 +github, level 5 row 1 with dict dds, advanced streaming, 38728 +github, level 5 row 1 with dict copy, advanced streaming, 38759 +github, level 5 row 1 with dict load, advanced streaming, 41518 +github, level 5 row 2, advanced streaming, 135121 +github, level 5 row 2 with dict dms, advanced streaming, 38938 +github, level 5 row 2 with dict dds, advanced streaming, 38732 +github, level 5 row 2 with dict copy, advanced streaming, 38934 +github, level 5 row 2 with dict load, advanced streaming, 40725 github, level 5, advanced streaming, 135121 github, level 5 with dict, advanced streaming, 38758 github, level 5 with dict dms, advanced streaming, 38758 @@ -929,31 +981,41 @@ github, level 5 with dict load, advanced github, level 6, advanced streaming, 135122 github, level 6 with dict, advanced streaming, 38671 github, level 6 with dict dms, advanced streaming, 38671 -github, level 6 with dict dds, advanced streaming, 38630 +github, level 6 with dict dds, advanced streaming, 38636 github, level 6 with dict copy, advanced streaming, 38669 github, level 6 with dict load, advanced streaming, 40695 -github, level 7 row 1, advanced streaming, 135122 -github, level 7 row 1 with dict dms, advanced streaming, 38771 -github, level 7 row 1 with dict dds, advanced streaming, 38771 -github, level 7 row 1 with dict copy, advanced streaming, 38745 -github, level 7 row 1 with dict load, advanced streaming, 40695 -github, level 7 row 2, advanced streaming, 134584 -github, level 7 row 2 with dict dms, advanced streaming, 38758 -github, level 7 row 2 with dict dds, advanced streaming, 38747 -github, level 7 row 2 with dict copy, advanced streaming, 38755 -github, level 7 row 2 with dict load, advanced streaming, 41030 +github, level 7 row 1, advanced streaming, 134584 +github, level 7 row 1 with dict dms, advanced streaming, 38758 +github, level 7 row 1 with dict dds, advanced streaming, 38745 +github, level 7 row 1 with dict copy, advanced streaming, 38755 +github, level 7 row 1 with dict load, advanced streaming, 43154 +github, level 7 row 2, advanced streaming, 135122 +github, level 7 row 2 with dict dms, advanced streaming, 38860 +github, level 7 row 2 with dict dds, advanced streaming, 38766 +github, level 7 row 2 with dict copy, advanced streaming, 38834 +github, level 7 row 2 with dict load, advanced streaming, 40695 github, level 7, advanced streaming, 135122 github, level 7 with dict, advanced streaming, 38758 github, level 7 with dict dms, advanced streaming, 38758 -github, level 7 with dict dds, advanced streaming, 38747 +github, level 7 with dict dds, advanced streaming, 38745 github, level 7 with dict copy, advanced streaming, 38755 github, level 7 with dict load, advanced streaming, 40695 github, level 9, advanced streaming, 135122 github, level 9 with dict, advanced streaming, 39437 github, level 9 with dict dms, advanced streaming, 39437 -github, level 9 with dict dds, advanced streaming, 39338 +github, level 9 with dict dds, advanced streaming, 39393 github, level 9 with dict copy, advanced streaming, 39398 github, level 9 with dict load, advanced streaming, 41710 +github, level 11 row 1, advanced streaming, 135119 +github, level 11 row 1 with dict dms, advanced streaming, 39671 +github, level 11 row 1 with dict dds, advanced streaming, 39671 +github, level 11 row 1 with dict copy, advanced streaming, 39651 +github, level 11 row 1 with dict load, advanced streaming, 41360 +github, level 11 row 2, advanced streaming, 135119 +github, level 11 row 2 with dict dms, advanced streaming, 39671 +github, level 11 row 2 with dict dds, advanced streaming, 39671 +github, level 11 row 2 with dict copy, advanced streaming, 39651 +github, level 11 row 2 with dict load, advanced streaming, 41360 github, level 12 row 1, advanced streaming, 134180 github, level 12 row 1 with dict dms, advanced streaming, 39677 github, level 12 row 1 with dict dds, advanced streaming, 39677 @@ -965,9 +1027,9 @@ github, level 12 row 2 with dict dds, advanced github, level 12 row 2 with dict copy, advanced streaming, 39677 github, level 12 row 2 with dict load, advanced streaming, 41166 github, level 13, advanced streaming, 134064 -github, level 13 with dict, advanced streaming, 39743 -github, level 13 with dict dms, advanced streaming, 39743 -github, level 13 with dict dds, advanced streaming, 39743 +github, level 13 with dict, advanced streaming, 39900 +github, level 13 with dict dms, advanced streaming, 39900 +github, level 13 with dict dds, advanced streaming, 39900 github, level 13 with dict copy, advanced streaming, 39948 github, level 13 with dict load, advanced streaming, 42626 github, level 16, advanced streaming, 134064 @@ -982,181 +1044,191 @@ github, level 19 with dict dms, advanced github, level 19 with dict dds, advanced streaming, 37576 github, level 19 with dict copy, advanced streaming, 37567 github, level 19 with dict load, advanced streaming, 39613 -github, no source size, advanced streaming, 136335 +github, no source size, advanced streaming, 136332 github, no source size with dict, advanced streaming, 41148 -github, long distance mode, advanced streaming, 136335 -github, multithreaded, advanced streaming, 136335 -github, multithreaded long distance mode, advanced streaming, 136335 -github, small window log, advanced streaming, 136335 +github, long distance mode, advanced streaming, 136332 +github, multithreaded, advanced streaming, 136332 +github, multithreaded long distance mode, advanced streaming, 136332 +github, small window log, advanced streaming, 136332 github, small hash log, advanced streaming, 135590 github, small chain log, advanced streaming, 136341 github, explicit params, advanced streaming, 137727 -github, uncompressed literals, advanced streaming, 165915 +github, uncompressed literals, advanced streaming, 165911 github, uncompressed literals optimal, advanced streaming, 157227 -github, huffman literals, advanced streaming, 142465 -github, multithreaded with advanced params, advanced streaming, 165915 -github.tar, level -5, advanced streaming, 46747 -github.tar, level -5 with dict, advanced streaming, 44440 -github.tar, level -3, advanced streaming, 43537 -github.tar, level -3 with dict, advanced streaming, 41112 -github.tar, level -1, advanced streaming, 42465 -github.tar, level -1 with dict, advanced streaming, 41196 -github.tar, level 0, advanced streaming, 38441 +github, huffman literals, advanced streaming, 142365 +github, multithreaded with advanced params, advanced streaming, 165911 +github.tar, level -5, advanced streaming, 64132 +github.tar, level -5 with dict, advanced streaming, 48642 +github.tar, level -3, advanced streaming, 50964 +github.tar, level -3 with dict, advanced streaming, 42750 +github.tar, level -1, advanced streaming, 42536 +github.tar, level -1 with dict, advanced streaming, 41198 +github.tar, level 0, advanced streaming, 38831 github.tar, level 0 with dict, advanced streaming, 37995 github.tar, level 0 with dict dms, advanced streaming, 38003 github.tar, level 0 with dict dds, advanced streaming, 38003 github.tar, level 0 with dict copy, advanced streaming, 37995 github.tar, level 0 with dict load, advanced streaming, 37956 -github.tar, level 1, advanced streaming, 39342 -github.tar, level 1 with dict, advanced streaming, 38293 -github.tar, level 1 with dict dms, advanced streaming, 38303 -github.tar, level 1 with dict dds, advanced streaming, 38303 -github.tar, level 1 with dict copy, advanced streaming, 38293 -github.tar, level 1 with dict load, advanced streaming, 38766 -github.tar, level 3, advanced streaming, 38441 +github.tar, level 1, advanced streaming, 39270 +github.tar, level 1 with dict, advanced streaming, 38316 +github.tar, level 1 with dict dms, advanced streaming, 38326 +github.tar, level 1 with dict dds, advanced streaming, 38326 +github.tar, level 1 with dict copy, advanced streaming, 38316 +github.tar, level 1 with dict load, advanced streaming, 38761 +github.tar, level 3, advanced streaming, 38831 github.tar, level 3 with dict, advanced streaming, 37995 github.tar, level 3 with dict dms, advanced streaming, 38003 github.tar, level 3 with dict dds, advanced streaming, 38003 github.tar, level 3 with dict copy, advanced streaming, 37995 github.tar, level 3 with dict load, advanced streaming, 37956 -github.tar, level 4, advanced streaming, 38467 +github.tar, level 4, advanced streaming, 38893 github.tar, level 4 with dict, advanced streaming, 37948 github.tar, level 4 with dict dms, advanced streaming, 37954 github.tar, level 4 with dict dds, advanced streaming, 37954 github.tar, level 4 with dict copy, advanced streaming, 37948 github.tar, level 4 with dict load, advanced streaming, 37927 -github.tar, level 5 row 1, advanced streaming, 39788 -github.tar, level 5 row 1 with dict dms, advanced streaming, 39365 -github.tar, level 5 row 1 with dict dds, advanced streaming, 39233 -github.tar, level 5 row 1 with dict copy, advanced streaming, 39715 -github.tar, level 5 row 1 with dict load, advanced streaming, 39209 -github.tar, level 5 row 2, advanced streaming, 39693 -github.tar, level 5 row 2 with dict dms, advanced streaming, 39024 -github.tar, level 5 row 2 with dict dds, advanced streaming, 39028 -github.tar, level 5 row 2 with dict copy, advanced streaming, 39040 -github.tar, level 5 row 2 with dict load, advanced streaming, 39037 -github.tar, level 5, advanced streaming, 39693 -github.tar, level 5 with dict, advanced streaming, 39040 -github.tar, level 5 with dict dms, advanced streaming, 39024 -github.tar, level 5 with dict dds, advanced streaming, 39028 -github.tar, level 5 with dict copy, advanced streaming, 39040 -github.tar, level 5 with dict load, advanced streaming, 39037 -github.tar, level 6, advanced streaming, 39621 -github.tar, level 6 with dict, advanced streaming, 38622 -github.tar, level 6 with dict dms, advanced streaming, 38608 -github.tar, level 6 with dict dds, advanced streaming, 38610 -github.tar, level 6 with dict copy, advanced streaming, 38622 -github.tar, level 6 with dict load, advanced streaming, 38962 -github.tar, level 7 row 1, advanced streaming, 39206 -github.tar, level 7 row 1 with dict dms, advanced streaming, 37954 -github.tar, level 7 row 1 with dict dds, advanced streaming, 37954 -github.tar, level 7 row 1 with dict copy, advanced streaming, 38071 -github.tar, level 7 row 1 with dict load, advanced streaming, 38584 -github.tar, level 7 row 2, advanced streaming, 39213 -github.tar, level 7 row 2 with dict dms, advanced streaming, 37848 -github.tar, level 7 row 2 with dict dds, advanced streaming, 37867 -github.tar, level 7 row 2 with dict copy, advanced streaming, 37848 -github.tar, level 7 row 2 with dict load, advanced streaming, 38582 -github.tar, level 7, advanced streaming, 39213 -github.tar, level 7 with dict, advanced streaming, 37848 -github.tar, level 7 with dict dms, advanced streaming, 37848 -github.tar, level 7 with dict dds, advanced streaming, 37867 -github.tar, level 7 with dict copy, advanced streaming, 37848 -github.tar, level 7 with dict load, advanced streaming, 38582 -github.tar, level 9, advanced streaming, 36758 -github.tar, level 9 with dict, advanced streaming, 36457 -github.tar, level 9 with dict dms, advanced streaming, 36549 -github.tar, level 9 with dict dds, advanced streaming, 36637 -github.tar, level 9 with dict copy, advanced streaming, 36457 -github.tar, level 9 with dict load, advanced streaming, 36350 -github.tar, level 12 row 1, advanced streaming, 36435 +github.tar, level 5 row 1, advanced streaming, 38366 +github.tar, level 5 row 1 with dict dms, advanced streaming, 39059 +github.tar, level 5 row 1 with dict dds, advanced streaming, 39067 +github.tar, level 5 row 1 with dict copy, advanced streaming, 39082 +github.tar, level 5 row 1 with dict load, advanced streaming, 37656 +github.tar, level 5 row 2, advanced streaming, 38534 +github.tar, level 5 row 2 with dict dms, advanced streaming, 39365 +github.tar, level 5 row 2 with dict dds, advanced streaming, 39233 +github.tar, level 5 row 2 with dict copy, advanced streaming, 39715 +github.tar, level 5 row 2 with dict load, advanced streaming, 38019 +github.tar, level 5, advanced streaming, 38366 +github.tar, level 5 with dict, advanced streaming, 39082 +github.tar, level 5 with dict dms, advanced streaming, 39059 +github.tar, level 5 with dict dds, advanced streaming, 39067 +github.tar, level 5 with dict copy, advanced streaming, 39082 +github.tar, level 5 with dict load, advanced streaming, 37656 +github.tar, level 6, advanced streaming, 38648 +github.tar, level 6 with dict, advanced streaming, 38656 +github.tar, level 6 with dict dms, advanced streaming, 38636 +github.tar, level 6 with dict dds, advanced streaming, 38634 +github.tar, level 6 with dict copy, advanced streaming, 38656 +github.tar, level 6 with dict load, advanced streaming, 37865 +github.tar, level 7 row 1, advanced streaming, 38110 +github.tar, level 7 row 1 with dict dms, advanced streaming, 37858 +github.tar, level 7 row 1 with dict dds, advanced streaming, 37882 +github.tar, level 7 row 1 with dict copy, advanced streaming, 37865 +github.tar, level 7 row 1 with dict load, advanced streaming, 37436 +github.tar, level 7 row 2, advanced streaming, 38077 +github.tar, level 7 row 2 with dict dms, advanced streaming, 38012 +github.tar, level 7 row 2 with dict dds, advanced streaming, 38014 +github.tar, level 7 row 2 with dict copy, advanced streaming, 38101 +github.tar, level 7 row 2 with dict load, advanced streaming, 37402 +github.tar, level 7, advanced streaming, 38110 +github.tar, level 7 with dict, advanced streaming, 37865 +github.tar, level 7 with dict dms, advanced streaming, 37858 +github.tar, level 7 with dict dds, advanced streaming, 37882 +github.tar, level 7 with dict copy, advanced streaming, 37865 +github.tar, level 7 with dict load, advanced streaming, 37436 +github.tar, level 9, advanced streaming, 36760 +github.tar, level 9 with dict, advanced streaming, 36484 +github.tar, level 9 with dict dms, advanced streaming, 36567 +github.tar, level 9 with dict dds, advanced streaming, 36628 +github.tar, level 9 with dict copy, advanced streaming, 36484 +github.tar, level 9 with dict load, advanced streaming, 36401 +github.tar, level 11 row 1, advanced streaming, 36452 +github.tar, level 11 row 1 with dict dms, advanced streaming, 36963 +github.tar, level 11 row 1 with dict dds, advanced streaming, 36963 +github.tar, level 11 row 1 with dict copy, advanced streaming, 36557 +github.tar, level 11 row 1 with dict load, advanced streaming, 36455 +github.tar, level 11 row 2, advanced streaming, 36435 +github.tar, level 11 row 2 with dict dms, advanced streaming, 36963 +github.tar, level 11 row 2 with dict dds, advanced streaming, 36963 +github.tar, level 11 row 2 with dict copy, advanced streaming, 36557 +github.tar, level 11 row 2 with dict load, advanced streaming, 36419 +github.tar, level 12 row 1, advanced streaming, 36081 github.tar, level 12 row 1 with dict dms, advanced streaming, 36986 github.tar, level 12 row 1 with dict dds, advanced streaming, 36986 github.tar, level 12 row 1 with dict copy, advanced streaming, 36609 -github.tar, level 12 row 1 with dict load, advanced streaming, 36419 -github.tar, level 12 row 2, advanced streaming, 36435 +github.tar, level 12 row 1 with dict load, advanced streaming, 36434 +github.tar, level 12 row 2, advanced streaming, 36110 github.tar, level 12 row 2 with dict dms, advanced streaming, 36986 github.tar, level 12 row 2 with dict dds, advanced streaming, 36986 github.tar, level 12 row 2 with dict copy, advanced streaming, 36609 -github.tar, level 12 row 2 with dict load, advanced streaming, 36424 -github.tar, level 13, advanced streaming, 35621 -github.tar, level 13 with dict, advanced streaming, 38726 -github.tar, level 13 with dict dms, advanced streaming, 38903 -github.tar, level 13 with dict dds, advanced streaming, 38903 -github.tar, level 13 with dict copy, advanced streaming, 38726 -github.tar, level 13 with dict load, advanced streaming, 36372 -github.tar, level 16, advanced streaming, 40255 -github.tar, level 16 with dict, advanced streaming, 33639 -github.tar, level 16 with dict dms, advanced streaming, 33544 -github.tar, level 16 with dict dds, advanced streaming, 33544 -github.tar, level 16 with dict copy, advanced streaming, 33639 -github.tar, level 16 with dict load, advanced streaming, 39353 -github.tar, level 19, advanced streaming, 32837 -github.tar, level 19 with dict, advanced streaming, 32895 -github.tar, level 19 with dict dms, advanced streaming, 32672 -github.tar, level 19 with dict dds, advanced streaming, 32672 -github.tar, level 19 with dict copy, advanced streaming, 32895 -github.tar, level 19 with dict load, advanced streaming, 32676 -github.tar, no source size, advanced streaming, 38438 +github.tar, level 12 row 2 with dict load, advanced streaming, 36459 +github.tar, level 13, advanced streaming, 35501 +github.tar, level 13 with dict, advanced streaming, 37130 +github.tar, level 13 with dict dms, advanced streaming, 37220 +github.tar, level 13 with dict dds, advanced streaming, 37220 +github.tar, level 13 with dict copy, advanced streaming, 37130 +github.tar, level 13 with dict load, advanced streaming, 36010 +github.tar, level 16, advanced streaming, 40471 +github.tar, level 16 with dict, advanced streaming, 33378 +github.tar, level 16 with dict dms, advanced streaming, 33213 +github.tar, level 16 with dict dds, advanced streaming, 33213 +github.tar, level 16 with dict copy, advanced streaming, 33378 +github.tar, level 16 with dict load, advanced streaming, 39081 +github.tar, level 19, advanced streaming, 32134 +github.tar, level 19 with dict, advanced streaming, 32709 +github.tar, level 19 with dict dms, advanced streaming, 32553 +github.tar, level 19 with dict dds, advanced streaming, 32553 +github.tar, level 19 with dict copy, advanced streaming, 32709 +github.tar, level 19 with dict load, advanced streaming, 32474 +github.tar, no source size, advanced streaming, 38828 github.tar, no source size with dict, advanced streaming, 38000 -github.tar, long distance mode, advanced streaming, 39757 -github.tar, multithreaded, advanced streaming, 38441 -github.tar, multithreaded long distance mode, advanced streaming, 39726 +github.tar, long distance mode, advanced streaming, 40252 +github.tar, multithreaded, advanced streaming, 38831 +github.tar, multithreaded long distance mode, advanced streaming, 40232 github.tar, small window log, advanced streaming, 199558 github.tar, small hash log, advanced streaming, 129870 github.tar, small chain log, advanced streaming, 41669 -github.tar, explicit params, advanced streaming, 41227 -github.tar, uncompressed literals, advanced streaming, 41122 -github.tar, uncompressed literals optimal, advanced streaming, 35388 -github.tar, huffman literals, advanced streaming, 38800 -github.tar, multithreaded with advanced params, advanced streaming, 41122 -silesia, level -5, old streaming, 6882505 -silesia, level -3, old streaming, 6568376 -silesia, level -1, old streaming, 6183403 -silesia, level 0, old streaming, 4849552 -silesia, level 1, old streaming, 5314162 -silesia, level 3, old streaming, 4849552 -silesia, level 4, old streaming, 4786970 -silesia, level 5, old streaming, 4707794 -silesia, level 6, old streaming, 4666383 -silesia, level 7, old streaming, 4603381 -silesia, level 9, old streaming, 4546001 -silesia, level 13, old streaming, 4482135 -silesia, level 16, old streaming, 4360251 -silesia, level 19, old streaming, 4283237 -silesia, no source size, old streaming, 4849516 -silesia, uncompressed literals, old streaming, 4849552 -silesia, uncompressed literals optimal, old streaming, 4283237 -silesia, huffman literals, old streaming, 6183403 -silesia.tar, level -5, old streaming, 6982759 -silesia.tar, level -3, old streaming, 6641283 -silesia.tar, level -1, old streaming, 6190795 -silesia.tar, level 0, old streaming, 4861427 -silesia.tar, level 1, old streaming, 5336939 -silesia.tar, level 3, old streaming, 4861427 -silesia.tar, level 4, old streaming, 4799630 -silesia.tar, level 5, old streaming, 4719261 -silesia.tar, level 6, old streaming, 4677729 -silesia.tar, level 7, old streaming, 4613544 -silesia.tar, level 9, old streaming, 4555432 -silesia.tar, level 13, old streaming, 4491765 -silesia.tar, level 16, old streaming, 4356834 -silesia.tar, level 19, old streaming, 4264392 -silesia.tar, no source size, old streaming, 4861423 -silesia.tar, uncompressed literals, old streaming, 4861427 -silesia.tar, uncompressed literals optimal, old streaming, 4264392 -silesia.tar, huffman literals, old streaming, 6190795 -github, level -5, old streaming, 205285 +github.tar, explicit params, advanced streaming, 41385 +github.tar, uncompressed literals, advanced streaming, 41525 +github.tar, uncompressed literals optimal, advanced streaming, 35397 +github.tar, huffman literals, advanced streaming, 38874 +github.tar, multithreaded with advanced params, advanced streaming, 41525 +silesia, level -5, old streaming, 7292053 +silesia, level -3, old streaming, 6867875 +silesia, level -1, old streaming, 6183923 +silesia, level 0, old streaming, 4842075 +silesia, level 1, old streaming, 5312694 +silesia, level 3, old streaming, 4842075 +silesia, level 4, old streaming, 4779186 +silesia, level 5, old streaming, 4638691 +silesia, level 6, old streaming, 4605296 +silesia, level 7, old streaming, 4566984 +silesia, level 9, old streaming, 4543018 +silesia, level 13, old streaming, 4493990 +silesia, level 16, old streaming, 4359864 +silesia, level 19, old streaming, 4296880 +silesia, no source size, old streaming, 4842039 +silesia, uncompressed literals, old streaming, 4842075 +silesia, uncompressed literals optimal, old streaming, 4296880 +silesia, huffman literals, old streaming, 6183923 +silesia.tar, level -5, old streaming, 7260007 +silesia.tar, level -3, old streaming, 6845151 +silesia.tar, level -1, old streaming, 6187938 +silesia.tar, level 0, old streaming, 4859271 +silesia.tar, level 1, old streaming, 5334890 +silesia.tar, level 3, old streaming, 4859271 +silesia.tar, level 4, old streaming, 4797470 +silesia.tar, level 5, old streaming, 4649992 +silesia.tar, level 6, old streaming, 4616803 +silesia.tar, level 7, old streaming, 4576664 +silesia.tar, level 9, old streaming, 4552386 +silesia.tar, level 13, old streaming, 4502956 +silesia.tar, level 16, old streaming, 4360527 +silesia.tar, level 19, old streaming, 4267266 +silesia.tar, no source size, old streaming, 4859267 +silesia.tar, uncompressed literals, old streaming, 4859271 +silesia.tar, uncompressed literals optimal, old streaming, 4267266 +silesia.tar, huffman literals, old streaming, 6187938 +github, level -5, old streaming, 232315 github, level -5 with dict, old streaming, 46718 -github, level -3, old streaming, 190643 +github, level -3, old streaming, 220760 github, level -3 with dict, old streaming, 45395 -github, level -1, old streaming, 175568 +github, level -1, old streaming, 175468 github, level -1 with dict, old streaming, 43170 -github, level 0, old streaming, 136335 +github, level 0, old streaming, 136332 github, level 0 with dict, old streaming, 41148 -github, level 1, old streaming, 142465 +github, level 1, old streaming, 142365 github, level 1 with dict, old streaming, 41682 -github, level 3, old streaming, 136335 +github, level 3, old streaming, 136332 github, level 3 with dict, old streaming, 41148 github, level 4, old streaming, 136199 github, level 4 with dict, old streaming, 41251 @@ -1169,130 +1241,130 @@ github, level 7 with dict, old stre github, level 9, old streaming, 135122 github, level 9 with dict, old streaming, 39437 github, level 13, old streaming, 134064 -github, level 13 with dict, old streaming, 39743 +github, level 13 with dict, old streaming, 39900 github, level 16, old streaming, 134064 github, level 16 with dict, old streaming, 37577 github, level 19, old streaming, 134064 github, level 19 with dict, old streaming, 37576 -github, no source size, old streaming, 140632 +github, no source size, old streaming, 140599 github, no source size with dict, old streaming, 40654 -github, uncompressed literals, old streaming, 136335 +github, uncompressed literals, old streaming, 136332 github, uncompressed literals optimal, old streaming, 134064 -github, huffman literals, old streaming, 175568 -github.tar, level -5, old streaming, 46747 -github.tar, level -5 with dict, old streaming, 44440 -github.tar, level -3, old streaming, 43537 -github.tar, level -3 with dict, old streaming, 41112 -github.tar, level -1, old streaming, 42465 -github.tar, level -1 with dict, old streaming, 41196 -github.tar, level 0, old streaming, 38441 +github, huffman literals, old streaming, 175468 +github.tar, level -5, old streaming, 64132 +github.tar, level -5 with dict, old streaming, 48642 +github.tar, level -3, old streaming, 50964 +github.tar, level -3 with dict, old streaming, 42750 +github.tar, level -1, old streaming, 42536 +github.tar, level -1 with dict, old streaming, 41198 +github.tar, level 0, old streaming, 38831 github.tar, level 0 with dict, old streaming, 37995 -github.tar, level 1, old streaming, 39342 -github.tar, level 1 with dict, old streaming, 38293 -github.tar, level 3, old streaming, 38441 +github.tar, level 1, old streaming, 39270 +github.tar, level 1 with dict, old streaming, 38316 +github.tar, level 3, old streaming, 38831 github.tar, level 3 with dict, old streaming, 37995 -github.tar, level 4, old streaming, 38467 +github.tar, level 4, old streaming, 38893 github.tar, level 4 with dict, old streaming, 37948 -github.tar, level 5, old streaming, 39693 -github.tar, level 5 with dict, old streaming, 39040 -github.tar, level 6, old streaming, 39621 -github.tar, level 6 with dict, old streaming, 38622 -github.tar, level 7, old streaming, 39213 -github.tar, level 7 with dict, old streaming, 37848 -github.tar, level 9, old streaming, 36758 -github.tar, level 9 with dict, old streaming, 36457 -github.tar, level 13, old streaming, 35621 -github.tar, level 13 with dict, old streaming, 38726 -github.tar, level 16, old streaming, 40255 -github.tar, level 16 with dict, old streaming, 33639 -github.tar, level 19, old streaming, 32837 -github.tar, level 19 with dict, old streaming, 32895 -github.tar, no source size, old streaming, 38438 +github.tar, level 5, old streaming, 38366 +github.tar, level 5 with dict, old streaming, 39082 +github.tar, level 6, old streaming, 38648 +github.tar, level 6 with dict, old streaming, 38656 +github.tar, level 7, old streaming, 38110 +github.tar, level 7 with dict, old streaming, 37865 +github.tar, level 9, old streaming, 36760 +github.tar, level 9 with dict, old streaming, 36484 +github.tar, level 13, old streaming, 35501 +github.tar, level 13 with dict, old streaming, 37130 +github.tar, level 16, old streaming, 40471 +github.tar, level 16 with dict, old streaming, 33378 +github.tar, level 19, old streaming, 32134 +github.tar, level 19 with dict, old streaming, 32709 +github.tar, no source size, old streaming, 38828 github.tar, no source size with dict, old streaming, 38000 -github.tar, uncompressed literals, old streaming, 38441 -github.tar, uncompressed literals optimal, old streaming, 32837 -github.tar, huffman literals, old streaming, 42465 -silesia, level -5, old streaming advanced, 6882505 -silesia, level -3, old streaming advanced, 6568376 -silesia, level -1, old streaming advanced, 6183403 -silesia, level 0, old streaming advanced, 4849552 -silesia, level 1, old streaming advanced, 5314162 -silesia, level 3, old streaming advanced, 4849552 -silesia, level 4, old streaming advanced, 4786970 -silesia, level 5, old streaming advanced, 4707794 -silesia, level 6, old streaming advanced, 4666383 -silesia, level 7, old streaming advanced, 4603381 -silesia, level 9, old streaming advanced, 4546001 -silesia, level 13, old streaming advanced, 4482135 -silesia, level 16, old streaming advanced, 4360251 -silesia, level 19, old streaming advanced, 4283237 -silesia, no source size, old streaming advanced, 4849516 -silesia, long distance mode, old streaming advanced, 4849552 -silesia, multithreaded, old streaming advanced, 4849552 -silesia, multithreaded long distance mode, old streaming advanced, 4849552 -silesia, small window log, old streaming advanced, 7112062 +github.tar, uncompressed literals, old streaming, 38831 +github.tar, uncompressed literals optimal, old streaming, 32134 +github.tar, huffman literals, old streaming, 42536 +silesia, level -5, old streaming advanced, 7292053 +silesia, level -3, old streaming advanced, 6867875 +silesia, level -1, old streaming advanced, 6183923 +silesia, level 0, old streaming advanced, 4842075 +silesia, level 1, old streaming advanced, 5312694 +silesia, level 3, old streaming advanced, 4842075 +silesia, level 4, old streaming advanced, 4779186 +silesia, level 5, old streaming advanced, 4638691 +silesia, level 6, old streaming advanced, 4605296 +silesia, level 7, old streaming advanced, 4566984 +silesia, level 9, old streaming advanced, 4543018 +silesia, level 13, old streaming advanced, 4493990 +silesia, level 16, old streaming advanced, 4359864 +silesia, level 19, old streaming advanced, 4296880 +silesia, no source size, old streaming advanced, 4842039 +silesia, long distance mode, old streaming advanced, 4842075 +silesia, multithreaded, old streaming advanced, 4842075 +silesia, multithreaded long distance mode, old streaming advanced, 4842075 +silesia, small window log, old streaming advanced, 7111103 silesia, small hash log, old streaming advanced, 6526141 silesia, small chain log, old streaming advanced, 4912197 -silesia, explicit params, old streaming advanced, 4795887 -silesia, uncompressed literals, old streaming advanced, 4849552 -silesia, uncompressed literals optimal, old streaming advanced, 4283237 -silesia, huffman literals, old streaming advanced, 6183403 -silesia, multithreaded with advanced params, old streaming advanced, 4849552 -silesia.tar, level -5, old streaming advanced, 6982759 -silesia.tar, level -3, old streaming advanced, 6641283 -silesia.tar, level -1, old streaming advanced, 6190795 -silesia.tar, level 0, old streaming advanced, 4861427 -silesia.tar, level 1, old streaming advanced, 5336939 -silesia.tar, level 3, old streaming advanced, 4861427 -silesia.tar, level 4, old streaming advanced, 4799630 -silesia.tar, level 5, old streaming advanced, 4719261 -silesia.tar, level 6, old streaming advanced, 4677729 -silesia.tar, level 7, old streaming advanced, 4613544 -silesia.tar, level 9, old streaming advanced, 4555432 -silesia.tar, level 13, old streaming advanced, 4491765 -silesia.tar, level 16, old streaming advanced, 4356834 -silesia.tar, level 19, old streaming advanced, 4264392 -silesia.tar, no source size, old streaming advanced, 4861423 -silesia.tar, long distance mode, old streaming advanced, 4861427 -silesia.tar, multithreaded, old streaming advanced, 4861427 -silesia.tar, multithreaded long distance mode, old streaming advanced, 4861427 -silesia.tar, small window log, old streaming advanced, 7118772 -silesia.tar, small hash log, old streaming advanced, 6529235 +silesia, explicit params, old streaming advanced, 4795452 +silesia, uncompressed literals, old streaming advanced, 4842075 +silesia, uncompressed literals optimal, old streaming advanced, 4296880 +silesia, huffman literals, old streaming advanced, 6183923 +silesia, multithreaded with advanced params, old streaming advanced, 4842075 +silesia.tar, level -5, old streaming advanced, 7260007 +silesia.tar, level -3, old streaming advanced, 6845151 +silesia.tar, level -1, old streaming advanced, 6187938 +silesia.tar, level 0, old streaming advanced, 4859271 +silesia.tar, level 1, old streaming advanced, 5334890 +silesia.tar, level 3, old streaming advanced, 4859271 +silesia.tar, level 4, old streaming advanced, 4797470 +silesia.tar, level 5, old streaming advanced, 4649992 +silesia.tar, level 6, old streaming advanced, 4616803 +silesia.tar, level 7, old streaming advanced, 4576664 +silesia.tar, level 9, old streaming advanced, 4552386 +silesia.tar, level 13, old streaming advanced, 4502956 +silesia.tar, level 16, old streaming advanced, 4360527 +silesia.tar, level 19, old streaming advanced, 4267266 +silesia.tar, no source size, old streaming advanced, 4859267 +silesia.tar, long distance mode, old streaming advanced, 4859271 +silesia.tar, multithreaded, old streaming advanced, 4859271 +silesia.tar, multithreaded long distance mode, old streaming advanced, 4859271 +silesia.tar, small window log, old streaming advanced, 7117562 +silesia.tar, small hash log, old streaming advanced, 6529234 silesia.tar, small chain log, old streaming advanced, 4917021 -silesia.tar, explicit params, old streaming advanced, 4807401 -silesia.tar, uncompressed literals, old streaming advanced, 4861427 -silesia.tar, uncompressed literals optimal, old streaming advanced, 4264392 -silesia.tar, huffman literals, old streaming advanced, 6190795 -silesia.tar, multithreaded with advanced params, old streaming advanced, 4861427 -github, level -5, old streaming advanced, 216734 +silesia.tar, explicit params, old streaming advanced, 4806873 +silesia.tar, uncompressed literals, old streaming advanced, 4859271 +silesia.tar, uncompressed literals optimal, old streaming advanced, 4267266 +silesia.tar, huffman literals, old streaming advanced, 6187938 +silesia.tar, multithreaded with advanced params, old streaming advanced, 4859271 +github, level -5, old streaming advanced, 241214 github, level -5 with dict, old streaming advanced, 49562 -github, level -3, old streaming advanced, 192160 +github, level -3, old streaming advanced, 222937 github, level -3 with dict, old streaming advanced, 44956 -github, level -1, old streaming advanced, 181108 +github, level -1, old streaming advanced, 181107 github, level -1 with dict, old streaming advanced, 42383 github, level 0, old streaming advanced, 141104 github, level 0 with dict, old streaming advanced, 41113 -github, level 1, old streaming advanced, 143692 +github, level 1, old streaming advanced, 143693 github, level 1 with dict, old streaming advanced, 42430 github, level 3, old streaming advanced, 141104 github, level 3 with dict, old streaming advanced, 41113 github, level 4, old streaming advanced, 141104 github, level 4 with dict, old streaming advanced, 41084 -github, level 5, old streaming advanced, 139399 -github, level 5 with dict, old streaming advanced, 38633 +github, level 5, old streaming advanced, 139402 +github, level 5 with dict, old streaming advanced, 38805 github, level 6, old streaming advanced, 139402 -github, level 6 with dict, old streaming advanced, 38723 +github, level 6 with dict, old streaming advanced, 39363 github, level 7, old streaming advanced, 138676 -github, level 7 with dict, old streaming advanced, 38744 +github, level 7 with dict, old streaming advanced, 38924 github, level 9, old streaming advanced, 138676 github, level 9 with dict, old streaming advanced, 38981 github, level 13, old streaming advanced, 138676 -github, level 13 with dict, old streaming advanced, 39731 +github, level 13 with dict, old streaming advanced, 39725 github, level 16, old streaming advanced, 138676 github, level 16 with dict, old streaming advanced, 40789 github, level 19, old streaming advanced, 134064 github, level 19 with dict, old streaming advanced, 37576 -github, no source size, old streaming advanced, 140632 +github, no source size, old streaming advanced, 140599 github, no source size with dict, old streaming advanced, 40608 github, long distance mode, old streaming advanced, 141104 github, multithreaded, old streaming advanced, 141104 @@ -1303,49 +1375,49 @@ github, small chain log, old stre github, explicit params, old streaming advanced, 140937 github, uncompressed literals, old streaming advanced, 141104 github, uncompressed literals optimal, old streaming advanced, 134064 -github, huffman literals, old streaming advanced, 181108 +github, huffman literals, old streaming advanced, 181107 github, multithreaded with advanced params, old streaming advanced, 141104 -github.tar, level -5, old streaming advanced, 46747 -github.tar, level -5 with dict, old streaming advanced, 44824 -github.tar, level -3, old streaming advanced, 43537 -github.tar, level -3 with dict, old streaming advanced, 41800 -github.tar, level -1, old streaming advanced, 42465 -github.tar, level -1 with dict, old streaming advanced, 41471 -github.tar, level 0, old streaming advanced, 38441 +github.tar, level -5, old streaming advanced, 64132 +github.tar, level -5 with dict, old streaming advanced, 48982 +github.tar, level -3, old streaming advanced, 50964 +github.tar, level -3 with dict, old streaming advanced, 43357 +github.tar, level -1, old streaming advanced, 42536 +github.tar, level -1 with dict, old streaming advanced, 41494 +github.tar, level 0, old streaming advanced, 38831 github.tar, level 0 with dict, old streaming advanced, 38013 -github.tar, level 1, old streaming advanced, 39342 -github.tar, level 1 with dict, old streaming advanced, 38940 -github.tar, level 3, old streaming advanced, 38441 +github.tar, level 1, old streaming advanced, 39270 +github.tar, level 1 with dict, old streaming advanced, 38934 +github.tar, level 3, old streaming advanced, 38831 github.tar, level 3 with dict, old streaming advanced, 38013 -github.tar, level 4, old streaming advanced, 38467 +github.tar, level 4, old streaming advanced, 38893 github.tar, level 4 with dict, old streaming advanced, 38063 -github.tar, level 5, old streaming advanced, 39693 -github.tar, level 5 with dict, old streaming advanced, 39049 -github.tar, level 6, old streaming advanced, 39621 -github.tar, level 6 with dict, old streaming advanced, 38959 -github.tar, level 7, old streaming advanced, 39213 -github.tar, level 7 with dict, old streaming advanced, 38573 -github.tar, level 9, old streaming advanced, 36758 -github.tar, level 9 with dict, old streaming advanced, 36233 -github.tar, level 13, old streaming advanced, 35621 -github.tar, level 13 with dict, old streaming advanced, 36035 -github.tar, level 16, old streaming advanced, 40255 -github.tar, level 16 with dict, old streaming advanced, 38736 -github.tar, level 19, old streaming advanced, 32837 -github.tar, level 19 with dict, old streaming advanced, 32876 -github.tar, no source size, old streaming advanced, 38438 +github.tar, level 5, old streaming advanced, 38366 +github.tar, level 5 with dict, old streaming advanced, 37728 +github.tar, level 6, old streaming advanced, 38648 +github.tar, level 6 with dict, old streaming advanced, 37820 +github.tar, level 7, old streaming advanced, 38110 +github.tar, level 7 with dict, old streaming advanced, 37387 +github.tar, level 9, old streaming advanced, 36760 +github.tar, level 9 with dict, old streaming advanced, 36312 +github.tar, level 13, old streaming advanced, 35501 +github.tar, level 13 with dict, old streaming advanced, 35807 +github.tar, level 16, old streaming advanced, 40471 +github.tar, level 16 with dict, old streaming advanced, 38578 +github.tar, level 19, old streaming advanced, 32134 +github.tar, level 19 with dict, old streaming advanced, 32702 +github.tar, no source size, old streaming advanced, 38828 github.tar, no source size with dict, old streaming advanced, 38015 -github.tar, long distance mode, old streaming advanced, 38441 -github.tar, multithreaded, old streaming advanced, 38441 -github.tar, multithreaded long distance mode, old streaming advanced, 38441 +github.tar, long distance mode, old streaming advanced, 38831 +github.tar, multithreaded, old streaming advanced, 38831 +github.tar, multithreaded long distance mode, old streaming advanced, 38831 github.tar, small window log, old streaming advanced, 199561 github.tar, small hash log, old streaming advanced, 129870 github.tar, small chain log, old streaming advanced, 41669 -github.tar, explicit params, old streaming advanced, 41227 -github.tar, uncompressed literals, old streaming advanced, 38441 -github.tar, uncompressed literals optimal, old streaming advanced, 32837 -github.tar, huffman literals, old streaming advanced, 42465 -github.tar, multithreaded with advanced params, old streaming advanced, 38441 +github.tar, explicit params, old streaming advanced, 41385 +github.tar, uncompressed literals, old streaming advanced, 38831 +github.tar, uncompressed literals optimal, old streaming advanced, 32134 +github.tar, huffman literals, old streaming advanced, 42536 +github.tar, multithreaded with advanced params, old streaming advanced, 38831 github, level -5 with dict, old streaming cdict, 46718 github, level -3 with dict, old streaming cdict, 45395 github, level -1 with dict, old streaming cdict, 43170 @@ -1357,24 +1429,24 @@ github, level 5 with dict, old stre github, level 6 with dict, old streaming cdict, 38671 github, level 7 with dict, old streaming cdict, 38758 github, level 9 with dict, old streaming cdict, 39437 -github, level 13 with dict, old streaming cdict, 39743 +github, level 13 with dict, old streaming cdict, 39900 github, level 16 with dict, old streaming cdict, 37577 github, level 19 with dict, old streaming cdict, 37576 github, no source size with dict, old streaming cdict, 40654 -github.tar, level -5 with dict, old streaming cdict, 45018 -github.tar, level -3 with dict, old streaming cdict, 41886 -github.tar, level -1 with dict, old streaming cdict, 41636 +github.tar, level -5 with dict, old streaming cdict, 49146 +github.tar, level -3 with dict, old streaming cdict, 43468 +github.tar, level -1 with dict, old streaming cdict, 41662 github.tar, level 0 with dict, old streaming cdict, 37956 -github.tar, level 1 with dict, old streaming cdict, 38766 +github.tar, level 1 with dict, old streaming cdict, 38761 github.tar, level 3 with dict, old streaming cdict, 37956 github.tar, level 4 with dict, old streaming cdict, 37927 -github.tar, level 5 with dict, old streaming cdict, 39037 -github.tar, level 6 with dict, old streaming cdict, 38962 -github.tar, level 7 with dict, old streaming cdict, 38582 -github.tar, level 9 with dict, old streaming cdict, 36350 -github.tar, level 13 with dict, old streaming cdict, 36372 -github.tar, level 16 with dict, old streaming cdict, 39353 -github.tar, level 19 with dict, old streaming cdict, 32676 +github.tar, level 5 with dict, old streaming cdict, 37656 +github.tar, level 6 with dict, old streaming cdict, 37865 +github.tar, level 7 with dict, old streaming cdict, 37436 +github.tar, level 9 with dict, old streaming cdict, 36401 +github.tar, level 13 with dict, old streaming cdict, 36010 +github.tar, level 16 with dict, old streaming cdict, 39081 +github.tar, level 19 with dict, old streaming cdict, 32474 github.tar, no source size with dict, old streaming cdict, 38000 github, level -5 with dict, old streaming advanced cdict, 49562 github, level -3 with dict, old streaming advanced cdict, 44956 @@ -1383,11 +1455,11 @@ github, level 0 with dict, old stre github, level 1 with dict, old streaming advanced cdict, 42430 github, level 3 with dict, old streaming advanced cdict, 41113 github, level 4 with dict, old streaming advanced cdict, 41084 -github, level 5 with dict, old streaming advanced cdict, 38633 -github, level 6 with dict, old streaming advanced cdict, 38723 -github, level 7 with dict, old streaming advanced cdict, 38744 +github, level 5 with dict, old streaming advanced cdict, 38805 +github, level 6 with dict, old streaming advanced cdict, 39363 +github, level 7 with dict, old streaming advanced cdict, 38924 github, level 9 with dict, old streaming advanced cdict, 38981 -github, level 13 with dict, old streaming advanced cdict, 39731 +github, level 13 with dict, old streaming advanced cdict, 39725 github, level 16 with dict, old streaming advanced cdict, 40789 github, level 19 with dict, old streaming advanced cdict, 37576 github, no source size with dict, old streaming advanced cdict, 40608 @@ -1398,11 +1470,11 @@ github.tar, level 0 with dict, old stre github.tar, level 1 with dict, old streaming advanced cdict, 39002 github.tar, level 3 with dict, old streaming advanced cdict, 38013 github.tar, level 4 with dict, old streaming advanced cdict, 38063 -github.tar, level 5 with dict, old streaming advanced cdict, 39049 -github.tar, level 6 with dict, old streaming advanced cdict, 38959 -github.tar, level 7 with dict, old streaming advanced cdict, 38573 -github.tar, level 9 with dict, old streaming advanced cdict, 36233 -github.tar, level 13 with dict, old streaming advanced cdict, 36035 -github.tar, level 16 with dict, old streaming advanced cdict, 38736 -github.tar, level 19 with dict, old streaming advanced cdict, 32876 +github.tar, level 5 with dict, old streaming advanced cdict, 37728 +github.tar, level 6 with dict, old streaming advanced cdict, 37820 +github.tar, level 7 with dict, old streaming advanced cdict, 37387 +github.tar, level 9 with dict, old streaming advanced cdict, 36312 +github.tar, level 13 with dict, old streaming advanced cdict, 35807 +github.tar, level 16 with dict, old streaming advanced cdict, 38578 +github.tar, level 19 with dict, old streaming advanced cdict, 32702 github.tar, no source size with dict, old streaming advanced cdict, 38015 diff --git a/tests/test-variants.sh b/tests/test-variants.sh new file mode 100755 index 000000000..f3a9e065b --- /dev/null +++ b/tests/test-variants.sh @@ -0,0 +1,115 @@ +#!/bin/sh +set -e +set -u +set -x + + +SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) +PROG_DIR="$SCRIPT_DIR/../programs" + +ZSTD="$PROG_DIR/zstd" +ZSTD_COMPRESS="$PROG_DIR/zstd-compress" +ZSTD_DECOMPRESS="$PROG_DIR/zstd-decompress" +ZSTD_NOLEGACY="$PROG_DIR/zstd-nolegacy" +ZSTD_DICTBUILDER="$PROG_DIR/zstd-dictBuilder" +ZSTD_FRUGAL="$PROG_DIR/zstd-frugal" +ZSTD_NOMT="$PROG_DIR/zstd-nomt" + +println() { + printf '%b\n' "${*}" +} + +die() { + println "$@" 1>&2 + exit 1 +} + +symbol_present() { + (nm $1 || echo "symbol_present $@ failed") | grep $2 +} + +symbol_not_present() { + symbol_present $@ && die "Binary '$1' mistakenly contains symbol '$2'" ||: +} + +compress_not_present() { + symbol_not_present "$1" ZSTD_compress +} + +decompress_not_present() { + symbol_not_present "$1" ZSTD_decompress +} + +dict_not_present() { + symbol_not_present "$1" ZDICT_ + symbol_not_present "$1" COVER_ +} + +cliextra_not_present() { + symbol_not_present "$1" TRACE_ + symbol_not_present "$1" BMK_ +} + +legacy_not_present() { + symbol_not_present "$1" ZSTDv0 +} + +test_help() { + "$1" --help | grep -- "$2" +} + +test_no_help() { + test_help $@ && die "'$1' supports '$2' when it shouldn't" ||: +} + +extras_not_present() { + dict_not_present $@ + legacy_not_present $@ + cliextra_not_present $@ + test_no_help $@ "--train" + test_no_help $@ "-b#" +} + +test_compress() { + echo "hello" | "$1" | "$ZSTD" -t +} + +test_decompress() { + echo "hello" | "$ZSTD" | "$1" -t +} + +test_zstd() { + test_compress $@ + test_decompress $@ +} + +extras_not_present "$ZSTD_FRUGAL" +extras_not_present "$ZSTD_COMPRESS" +extras_not_present "$ZSTD_DECOMPRESS" + +compress_not_present "$ZSTD_DECOMPRESS" + +decompress_not_present "$ZSTD_COMPRESS" +decompress_not_present "$ZSTD_DICTBUILDER" + +cliextra_not_present "$ZSTD_DICTBUILDER" + +legacy_not_present "$ZSTD_DICTBUILDER" +legacy_not_present "$ZSTD_NOLEGACY" + +symbol_not_present "$ZSTD" ZSTDv01 +symbol_not_present "$ZSTD" ZSTDv02 +symbol_not_present "$ZSTD" ZSTDv03 +symbol_not_present "$ZSTD" ZSTDv04 + +test_compress "$ZSTD_COMPRESS" +test_decompress "$ZSTD_DECOMPRESS" + +test_zstd "$ZSTD_FRUGAL" +test_zstd "$ZSTD_NOLEGACY" + +test_help "$ZSTD" '-b#' +test_help "$ZSTD" --train +test_help "$ZSTD_DICTBUILDER" --train + +println "Success!" diff --git a/tests/test-zstd-versions.py b/tests/test-zstd-versions.py index c86af7ddf..baca251f5 100755 --- a/tests/test-zstd-versions.py +++ b/tests/test-zstd-versions.py @@ -23,6 +23,7 @@ from subprocess import Popen, PIPE repo_url = 'https://github.com/facebook/zstd.git' tmp_dir_name = 'tests/versionsTest' make_cmd = 'make' +make_args = ['-j','CFLAGS=-O1'] git_cmd = 'git' test_dat_src = 'README.md' test_dat = 'test_dat' @@ -56,8 +57,11 @@ def proc(cmd_args, pipe=True, dummy=False): return subproc.communicate() -def make(args, pipe=True): - return proc([make_cmd] + args, pipe) +def make(targets, pipe=True): + cmd = [make_cmd] + make_args + targets + cmd_str = str(cmd) + print('compilation command : ' + cmd_str) + return proc(cmd, pipe) def git(args, pipe=True): @@ -223,20 +227,28 @@ if __name__ == '__main__': dst_zstd = '{}/zstd.{}'.format(tmp_dir, tag) # /path/to/zstd/tests/versionsTest/zstd. if not os.path.isfile(dst_zstd) or tag == head: if tag != head: + print('-----------------------------------------------') + print('compiling ' + tag) + print('-----------------------------------------------') r_dir = '{}/{}'.format(tmp_dir, tag) # /path/to/zstd/tests/versionsTest/ os.makedirs(r_dir, exist_ok=True) os.chdir(clone_dir) git(['--work-tree=' + r_dir, 'checkout', tag, '--', '.'], False) if tag == 'v0.5.0': os.chdir(r_dir + '/dictBuilder') # /path/to/zstd/tests/versionsTest/v0.5.0/dictBuilder - make(['clean', 'dictBuilder'], False) + make(['clean'], False) # separate 'clean' target to allow parallel build + make(['dictBuilder'], False) shutil.copy2('dictBuilder', '{}/dictBuilder.{}'.format(tmp_dir, tag)) os.chdir(r_dir + '/programs') # /path/to/zstd/tests/versionsTest//programs - make(['clean', 'zstd'], False) + make(['clean'], False) # separate 'clean' target to allow parallel build + make(['zstd'], False) else: os.chdir(programs_dir) + print('-----------------------------------------------') + print('compiling head') + print('-----------------------------------------------') make(['zstd'], False) - shutil.copy2('zstd', dst_zstd) + shutil.copy2('zstd', dst_zstd) # remove any remaining *.zst and *.dec from previous test os.chdir(tmp_dir) @@ -251,7 +263,9 @@ if __name__ == '__main__': print('cp ' + dict_files + ' ' + dict_source_path) execute('cp ' + dict_files + ' ' + dict_source_path, param_shell=True) + print('-----------------------------------------------') print('Compress test.dat by all released zstd') + print('-----------------------------------------------') error_code = 0 for tag in tags: diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c index bbef903f8..6eb107f9d 100644 --- a/tests/zstreamtest.c +++ b/tests/zstreamtest.c @@ -906,7 +906,7 @@ static int basicUnitTests(U32 seed, double compressibility) in.pos = 0; in.size = CNBufferSize - in.size; CHECK(!(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end) == 0), "Not finished"); - CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize)); + CHECK_Z(ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, out.pos)); DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "test%3i : ZSTD_compressStream2() ZSTD_c_stableOutBuffer modify buffer : ", testNb++); @@ -1063,7 +1063,7 @@ static int basicUnitTests(U32 seed, double compressibility) if (ZSTD_decompressStream(dctx, &out, &in) != 0) goto _output_error; if (in.pos != in.size) goto _output_error; } - /* The dictionary should presist across calls. */ + /* The dictionary should persist across calls. */ { ZSTD_outBuffer out = {decodedBuffer, decodedBufferSize, 0}; ZSTD_inBuffer in = {compressedBuffer, cSize, 0}; if (ZSTD_decompressStream(dctx, &out, &in) != 0) goto _output_error; @@ -1128,7 +1128,7 @@ static int basicUnitTests(U32 seed, double compressibility) if (ZSTD_decompressStream(dctx, &out, &in) != 0) goto _output_error; if (in.pos != in.size) goto _output_error; } - /* The ddict should presist across calls. */ + /* The ddict should persist across calls. */ { ZSTD_outBuffer out = {decodedBuffer, decodedBufferSize, 0}; ZSTD_inBuffer in = {compressedBuffer, cSize, 0}; if (ZSTD_decompressStream(dctx, &out, &in) != 0) goto _output_error; @@ -1175,12 +1175,12 @@ static int basicUnitTests(U32 seed, double compressibility) /* We should succeed to decompress with the dictionary. */ CHECK_Z( ZSTD_initDStream_usingDict(dctx, dictionary.start, dictionary.filled) ); CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, decodedBufferSize, compressedBuffer, cSize) ); - /* The dictionary should presist across calls. */ + /* The dictionary should persist across calls. */ CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, decodedBufferSize, compressedBuffer, cSize) ); /* We should succeed to decompress with the ddict. */ CHECK_Z( ZSTD_initDStream_usingDDict(dctx, ddict) ); CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, decodedBufferSize, compressedBuffer, cSize) ); - /* The ddict should presist across calls. */ + /* The ddict should persist across calls. */ CHECK_Z( ZSTD_decompressDCtx(dctx, decodedBuffer, decodedBufferSize, compressedBuffer, cSize) ); /* When we reset the context the ddict is cleared. */ CHECK_Z( ZSTD_initDStream(dctx) ); @@ -2277,7 +2277,7 @@ static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest, CHECK_Z( ZSTD_CCtx_refPrefix(zc, dict, dictSize) ); } - /* Adjust number of workers occassionally - result must be deterministic independent of nbWorkers */ + /* Adjust number of workers occasionally - result must be deterministic independent of nbWorkers */ CHECK_Z(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers)); if (nbWorkers > 0 && (FUZ_rand(&lseed) & 7) == 0) { DISPLAYLEVEL(6, "t%u: Modify nbWorkers: %d -> %d \n", testNb, nbWorkers, nbWorkers + iter); diff --git a/zlibWrapper/examples/fitblk.c b/zlibWrapper/examples/fitblk.c index 669b176eb..8dc7071ee 100644 --- a/zlibWrapper/examples/fitblk.c +++ b/zlibWrapper/examples/fitblk.c @@ -119,7 +119,7 @@ local int recompress(z_streamp inf, z_streamp def) if (ret == Z_MEM_ERROR) return ret; - /* compress what was decompresed until done or no room */ + /* compress what was decompressed until done or no room */ def->avail_in = RAWLEN - inf->avail_out; def->next_in = raw; if (inf->avail_out != 0) diff --git a/zlibWrapper/examples/fitblk_original.c b/zlibWrapper/examples/fitblk_original.c index 20f351bfa..723dc0028 100644 --- a/zlibWrapper/examples/fitblk_original.c +++ b/zlibWrapper/examples/fitblk_original.c @@ -109,7 +109,7 @@ local int recompress(z_streamp inf, z_streamp def) if (ret == Z_MEM_ERROR) return ret; - /* compress what was decompresed until done or no room */ + /* compress what was decompressed until done or no room */ def->avail_in = RAWLEN - inf->avail_out; def->next_in = raw; if (inf->avail_out != 0) diff --git a/zlibWrapper/zstd_zlibwrapper.c b/zlibWrapper/zstd_zlibwrapper.c index ceb239379..adb231f06 100644 --- a/zlibWrapper/zstd_zlibwrapper.c +++ b/zlibWrapper/zstd_zlibwrapper.c @@ -1189,3 +1189,10 @@ ZEXTERN const z_crc_t FAR * ZEXPORT z_get_crc_table OF((void)) return get_crc_table(); } #endif + + /* Error function */ +ZEXTERN const char * ZEXPORT z_zError OF((int err)) +{ + /* Just use zlib Error function */ + return zError(err); +}