2019-11-10 05:31:00 +01:00
|
|
|
/*
|
2023-05-22 14:32:14 +02:00
|
|
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
2019-11-10 05:31:00 +01:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
|
|
* in the COPYING file in the root directory of this source tree).
|
|
|
|
* You may select, at your option, one of the above-listed licenses.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ZSTD_CWKSP_H
|
|
|
|
#define ZSTD_CWKSP_H
|
|
|
|
|
|
|
|
/*-*************************************
|
|
|
|
* Dependencies
|
|
|
|
***************************************/
|
2023-05-22 14:32:14 +02:00
|
|
|
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
2020-09-18 21:38:36 +02:00
|
|
|
#include "../common/zstd_internal.h"
|
2023-05-22 14:32:14 +02:00
|
|
|
#include "../common/portability_macros.h"
|
2019-11-10 05:31:00 +01:00
|
|
|
|
|
|
|
#if defined (__cplusplus)
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*-*************************************
|
|
|
|
* Constants
|
|
|
|
***************************************/
|
|
|
|
|
|
|
|
/* Since the workspace is effectively its own little malloc implementation /
|
|
|
|
* arena, when we run under ASAN, we should similarly insert redzones between
|
|
|
|
* each internal element of the workspace, so ASAN will catch overruns that
|
|
|
|
* reach outside an object but that stay inside the workspace.
|
|
|
|
*
|
|
|
|
* This defines the size of that redzone.
|
|
|
|
*/
|
|
|
|
#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
|
|
|
|
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
|
|
|
#endif
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
|
|
|
|
/* Set our tables and aligneds to align by 64 bytes */
|
|
|
|
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
|
|
|
|
2019-11-10 05:31:00 +01:00
|
|
|
/*-*************************************
|
|
|
|
* Structures
|
|
|
|
***************************************/
|
|
|
|
typedef enum {
|
|
|
|
ZSTD_cwksp_alloc_objects,
|
2023-05-22 14:32:14 +02:00
|
|
|
ZSTD_cwksp_alloc_aligned_init_once,
|
|
|
|
ZSTD_cwksp_alloc_aligned,
|
|
|
|
ZSTD_cwksp_alloc_buffers
|
2019-11-10 05:31:00 +01:00
|
|
|
} ZSTD_cwksp_alloc_phase_e;
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
/**
|
|
|
|
* Used to describe whether the workspace is statically allocated (and will not
|
|
|
|
* necessarily ever be freed), or if it's dynamically allocated and we can
|
|
|
|
* expect a well-formed caller to free this.
|
|
|
|
*/
|
|
|
|
typedef enum {
|
|
|
|
ZSTD_cwksp_dynamic_alloc,
|
|
|
|
ZSTD_cwksp_static_alloc
|
|
|
|
} ZSTD_cwksp_static_alloc_e;
|
|
|
|
|
2019-11-10 05:31:00 +01:00
|
|
|
/**
|
|
|
|
* Zstd fits all its internal datastructures into a single continuous buffer,
|
|
|
|
* so that it only needs to perform a single OS allocation (or so that a buffer
|
|
|
|
* can be provided to it and it can perform no allocations at all). This buffer
|
|
|
|
* is called the workspace.
|
|
|
|
*
|
|
|
|
* Several optimizations complicate that process of allocating memory ranges
|
|
|
|
* from this workspace for each internal datastructure:
|
|
|
|
*
|
|
|
|
* - These different internal datastructures have different setup requirements:
|
|
|
|
*
|
|
|
|
* - The static objects need to be cleared once and can then be trivially
|
|
|
|
* reused for each compression.
|
|
|
|
*
|
|
|
|
* - Various buffers don't need to be initialized at all--they are always
|
|
|
|
* written into before they're read.
|
|
|
|
*
|
|
|
|
* - The matchstate tables have a unique requirement that they don't need
|
|
|
|
* their memory to be totally cleared, but they do need the memory to have
|
|
|
|
* some bound, i.e., a guarantee that all values in the memory they've been
|
|
|
|
* allocated is less than some maximum value (which is the starting value
|
|
|
|
* for the indices that they will then use for compression). When this
|
|
|
|
* guarantee is provided to them, they can use the memory without any setup
|
|
|
|
* work. When it can't, they have to clear the area.
|
|
|
|
*
|
|
|
|
* - These buffers also have different alignment requirements.
|
|
|
|
*
|
|
|
|
* - We would like to reuse the objects in the workspace for multiple
|
|
|
|
* compressions without having to perform any expensive reallocation or
|
|
|
|
* reinitialization work.
|
|
|
|
*
|
|
|
|
* - We would like to be able to efficiently reuse the workspace across
|
|
|
|
* multiple compressions **even when the compression parameters change** and
|
|
|
|
* we need to resize some of the objects (where possible).
|
|
|
|
*
|
|
|
|
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
|
|
|
|
* abstraction was created. It works as follows:
|
|
|
|
*
|
|
|
|
* Workspace Layout:
|
|
|
|
*
|
2023-05-22 14:32:14 +02:00
|
|
|
* [ ... workspace ... ]
|
|
|
|
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
|
2019-11-10 05:31:00 +01:00
|
|
|
*
|
|
|
|
* The various objects that live in the workspace are divided into the
|
|
|
|
* following categories, and are allocated separately:
|
|
|
|
*
|
|
|
|
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
|
|
|
* so that literally everything fits in a single buffer. Note: if present,
|
2021-01-08 11:21:43 +01:00
|
|
|
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
2019-11-10 05:31:00 +01:00
|
|
|
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
|
|
|
* required.
|
|
|
|
*
|
|
|
|
* - Fixed size objects: these are fixed-size, fixed-count objects that are
|
|
|
|
* nonetheless "dynamically" allocated in the workspace so that we can
|
|
|
|
* control how they're initialized separately from the broader ZSTD_CCtx.
|
|
|
|
* Examples:
|
|
|
|
* - Entropy Workspace
|
|
|
|
* - 2 x ZSTD_compressedBlockState_t
|
|
|
|
* - CDict dictionary contents
|
|
|
|
*
|
|
|
|
* - Tables: these are any of several different datastructures (hash tables,
|
|
|
|
* chain tables, binary trees) that all respect a common format: they are
|
|
|
|
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
2021-11-19 12:54:45 +01:00
|
|
|
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
2019-11-10 05:31:00 +01:00
|
|
|
*
|
2023-05-22 14:32:14 +02:00
|
|
|
* - Init once: these buffers require to be initialized at least once before
|
|
|
|
* use. They should be used when we want to skip memory initialization
|
|
|
|
* while not triggering memory checkers (like Valgrind) when reading from
|
|
|
|
* from this memory without writing to it first.
|
|
|
|
* These buffers should be used carefully as they might contain data
|
|
|
|
* from previous compressions.
|
|
|
|
* Buffers are aligned to 64 bytes.
|
|
|
|
*
|
|
|
|
* - Aligned: these buffers don't require any initialization before they're
|
|
|
|
* used. The user of the buffer should make sure they write into a buffer
|
|
|
|
* location before reading from it.
|
|
|
|
* Buffers are aligned to 64 bytes.
|
2019-11-10 05:31:00 +01:00
|
|
|
*
|
|
|
|
* - Buffers: these buffers are used for various purposes that don't require
|
|
|
|
* any alignment or initialization before they're used. This means they can
|
|
|
|
* be moved around at no cost for a new compression.
|
|
|
|
*
|
|
|
|
* Allocating Memory:
|
|
|
|
*
|
|
|
|
* The various types of objects must be allocated in order, so they can be
|
|
|
|
* correctly packed into the workspace buffer. That order is:
|
|
|
|
*
|
|
|
|
* 1. Objects
|
2023-05-22 14:32:14 +02:00
|
|
|
* 2. Init once / Tables
|
|
|
|
* 3. Aligned / Tables
|
|
|
|
* 4. Buffers / Tables
|
2019-11-10 05:31:00 +01:00
|
|
|
*
|
|
|
|
* Attempts to reserve objects of different types out of order will fail.
|
|
|
|
*/
|
|
|
|
typedef struct {
|
|
|
|
void* workspace;
|
|
|
|
void* workspaceEnd;
|
|
|
|
|
|
|
|
void* objectEnd;
|
|
|
|
void* tableEnd;
|
|
|
|
void* tableValidEnd;
|
|
|
|
void* allocStart;
|
2023-05-22 14:32:14 +02:00
|
|
|
void* initOnceStart;
|
2019-11-10 05:31:00 +01:00
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
BYTE allocFailed;
|
2019-11-10 05:31:00 +01:00
|
|
|
int workspaceOversizedDuration;
|
|
|
|
ZSTD_cwksp_alloc_phase_e phase;
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_cwksp_static_alloc_e isStatic;
|
2019-11-10 05:31:00 +01:00
|
|
|
} ZSTD_cwksp;
|
|
|
|
|
|
|
|
/*-*************************************
|
|
|
|
* Functions
|
|
|
|
***************************************/
|
|
|
|
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
2023-05-22 14:32:14 +02:00
|
|
|
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
|
2019-11-10 05:31:00 +01:00
|
|
|
|
|
|
|
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
|
|
|
(void)ws;
|
|
|
|
assert(ws->workspace <= ws->objectEnd);
|
|
|
|
assert(ws->objectEnd <= ws->tableEnd);
|
|
|
|
assert(ws->objectEnd <= ws->tableValidEnd);
|
|
|
|
assert(ws->tableEnd <= ws->allocStart);
|
|
|
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
|
|
assert(ws->allocStart <= ws->workspaceEnd);
|
2023-05-22 14:32:14 +02:00
|
|
|
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
|
|
|
|
assert(ws->workspace <= ws->initOnceStart);
|
|
|
|
#if ZSTD_MEMORY_SANITIZER
|
|
|
|
{
|
|
|
|
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
|
|
|
|
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
|
|
|
|
#if defined(ZSTD_MSAN_PRINT)
|
|
|
|
if(offset!=-1) {
|
|
|
|
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
assert(offset==-1);
|
|
|
|
};
|
|
|
|
#endif
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Align must be a power of 2.
|
|
|
|
*/
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
|
|
|
size_t const mask = align - 1;
|
|
|
|
assert((align & mask) == 0);
|
|
|
|
return (size + mask) & ~mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Use this to determine how much space in the workspace we will consume to
|
|
|
|
* allocate this object. (Normally it should be exactly the size of the object,
|
|
|
|
* but under special conditions, like ASAN, where we pad each object, it might
|
|
|
|
* be larger.)
|
|
|
|
*
|
|
|
|
* Since tables aren't currently redzoned, you don't need to call through this
|
|
|
|
* to figure out how much space you need for the matchState tables. Everything
|
|
|
|
* else is though.
|
2021-11-19 12:54:45 +01:00
|
|
|
*
|
|
|
|
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
2019-11-10 05:31:00 +01:00
|
|
|
*/
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
2021-01-08 11:21:43 +01:00
|
|
|
if (size == 0)
|
|
|
|
return 0;
|
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
|
|
#else
|
|
|
|
return size;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
/**
|
|
|
|
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
|
|
|
* Used to determine the number of bytes required for a given "aligned".
|
|
|
|
*/
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
|
|
|
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the amount of additional space the cwksp must allocate
|
|
|
|
* for internal purposes (currently only alignment).
|
|
|
|
*/
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
2023-05-22 14:32:14 +02:00
|
|
|
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
|
|
|
|
* bytes to align the beginning of tables section and end of buffers;
|
2021-11-19 12:54:45 +01:00
|
|
|
*/
|
2023-05-22 14:32:14 +02:00
|
|
|
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
|
2021-11-19 12:54:45 +01:00
|
|
|
return slackSpace;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
|
|
|
* alignBytes must be a power of two.
|
|
|
|
*/
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
|
|
|
size_t const alignBytesMask = alignBytes - 1;
|
|
|
|
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
|
|
|
assert((alignBytes & alignBytesMask) == 0);
|
2023-05-22 14:32:14 +02:00
|
|
|
assert(bytes < alignBytes);
|
2021-11-19 12:54:45 +01:00
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
2023-05-22 14:32:14 +02:00
|
|
|
/**
|
|
|
|
* Returns the initial value for allocStart which is used to determine the position from
|
|
|
|
* which we can allocate from the end of the workspace.
|
|
|
|
*/
|
|
|
|
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
|
|
|
|
return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
|
|
|
|
}
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
/**
|
|
|
|
* Internal function. Do not use directly.
|
2022-01-24 11:04:45 +01:00
|
|
|
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
|
|
|
* which counts from the end of the wksp (as opposed to the object/table segment).
|
2021-11-19 12:54:45 +01:00
|
|
|
*
|
|
|
|
* Returns a pointer to the beginning of that space.
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void*
|
|
|
|
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
|
|
|
{
|
2021-11-19 12:54:45 +01:00
|
|
|
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
|
|
|
void* const bottom = ws->tableEnd;
|
|
|
|
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
|
|
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
assert(alloc >= bottom);
|
|
|
|
if (alloc < bottom) {
|
|
|
|
DEBUGLOG(4, "cwksp: alloc failed!");
|
|
|
|
ws->allocFailed = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-01-24 11:04:45 +01:00
|
|
|
/* the area is reserved from the end of wksp.
|
|
|
|
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
2021-11-19 12:54:45 +01:00
|
|
|
if (alloc < ws->tableValidEnd) {
|
|
|
|
ws->tableValidEnd = alloc;
|
|
|
|
}
|
|
|
|
ws->allocStart = alloc;
|
|
|
|
return alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Moves the cwksp to the next phase, and does any necessary allocations.
|
2022-01-24 11:04:45 +01:00
|
|
|
* cwksp initialization must necessarily go through each phase in order.
|
2021-11-19 12:54:45 +01:00
|
|
|
* Returns a 0 on success, or zstd error
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC size_t
|
|
|
|
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
|
|
|
{
|
2019-11-10 05:31:00 +01:00
|
|
|
assert(phase >= ws->phase);
|
|
|
|
if (phase > ws->phase) {
|
2023-05-22 14:32:14 +02:00
|
|
|
/* Going from allocating objects to allocating initOnce / tables */
|
|
|
|
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
|
|
|
|
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
|
2019-11-10 05:31:00 +01:00
|
|
|
ws->tableValidEnd = ws->objectEnd;
|
2023-05-22 14:32:14 +02:00
|
|
|
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
2021-11-19 12:54:45 +01:00
|
|
|
|
|
|
|
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
2023-05-22 14:32:14 +02:00
|
|
|
void *const alloc = ws->objectEnd;
|
2021-11-19 12:54:45 +01:00
|
|
|
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
2023-05-22 14:32:14 +02:00
|
|
|
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
|
2021-11-19 12:54:45 +01:00
|
|
|
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
2022-01-24 11:04:45 +01:00
|
|
|
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
2021-11-19 12:54:45 +01:00
|
|
|
"table phase - alignment initial allocation failed!");
|
2022-01-24 11:04:45 +01:00
|
|
|
ws->objectEnd = objectEnd;
|
|
|
|
ws->tableEnd = objectEnd; /* table area starts being empty */
|
|
|
|
if (ws->tableValidEnd < ws->tableEnd) {
|
|
|
|
ws->tableValidEnd = ws->tableEnd;
|
2023-05-22 14:32:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
ws->phase = phase;
|
2021-11-19 12:54:45 +01:00
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
2021-11-19 12:54:45 +01:00
|
|
|
return 0;
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns whether this object/buffer/etc was allocated in this workspace.
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
|
|
|
{
|
2023-05-22 14:32:14 +02:00
|
|
|
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal function. Do not use directly.
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void*
|
|
|
|
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
|
|
|
{
|
2019-11-10 05:31:00 +01:00
|
|
|
void* alloc;
|
2021-11-19 12:54:45 +01:00
|
|
|
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
2021-01-08 11:21:43 +01:00
|
|
|
return NULL;
|
2021-11-19 12:54:45 +01:00
|
|
|
}
|
2021-01-08 11:21:43 +01:00
|
|
|
|
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* over-reserve space */
|
2021-11-19 12:54:45 +01:00
|
|
|
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
2019-11-10 05:31:00 +01:00
|
|
|
#endif
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
2019-11-10 05:31:00 +01:00
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
|
|
|
* either size. */
|
2021-11-19 12:54:45 +01:00
|
|
|
if (alloc) {
|
|
|
|
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
|
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
2023-05-22 14:32:14 +02:00
|
|
|
/* We need to keep the redzone poisoned while unpoisoning the bytes that
|
|
|
|
* are actually allocated. */
|
|
|
|
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
|
2021-11-19 12:54:45 +01:00
|
|
|
}
|
2021-01-08 11:21:43 +01:00
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reserves and returns unaligned memory.
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
|
|
|
{
|
2019-11-10 05:31:00 +01:00
|
|
|
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
|
|
|
}
|
|
|
|
|
2023-05-22 14:32:14 +02:00
|
|
|
/**
|
|
|
|
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
|
|
|
* This memory has been initialized at least once in the past.
|
|
|
|
* This doesn't mean it has been initialized this time, and it might contain data from previous
|
|
|
|
* operations.
|
|
|
|
* The main usage is for algorithms that might need read access into uninitialized memory.
|
|
|
|
* The algorithm must maintain safety under these conditions and must make sure it doesn't
|
|
|
|
* leak any of the past data (directly or in side channels).
|
|
|
|
*/
|
|
|
|
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
|
|
|
|
{
|
|
|
|
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
|
|
|
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
|
|
|
|
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
|
|
|
if(ptr && ptr < ws->initOnceStart) {
|
|
|
|
/* We assume the memory following the current allocation is either:
|
|
|
|
* 1. Not usable as initOnce memory (end of workspace)
|
|
|
|
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
|
|
|
|
* 3. An ASAN redzone, in which case we don't want to write on it
|
|
|
|
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
|
|
|
|
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
|
|
|
|
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
|
|
|
|
ws->initOnceStart = ptr;
|
|
|
|
}
|
|
|
|
#if ZSTD_MEMORY_SANITIZER
|
|
|
|
assert(__msan_test_shadow(ptr, bytes) == -1);
|
|
|
|
#endif
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2019-11-10 05:31:00 +01:00
|
|
|
/**
|
2021-11-19 12:54:45 +01:00
|
|
|
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
2019-11-10 05:31:00 +01:00
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
|
|
|
{
|
2021-11-19 12:54:45 +01:00
|
|
|
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
|
|
|
ZSTD_cwksp_alloc_aligned);
|
|
|
|
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
|
|
|
return ptr;
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-11-19 12:54:45 +01:00
|
|
|
* Aligned on 64 bytes. These buffers have the special property that
|
2019-11-10 05:31:00 +01:00
|
|
|
* their values remain constrained, allowing us to re-use them without
|
|
|
|
* memset()-ing them.
|
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
|
|
|
{
|
2023-05-22 14:32:14 +02:00
|
|
|
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
|
2021-11-19 12:54:45 +01:00
|
|
|
void* alloc;
|
|
|
|
void* end;
|
|
|
|
void* top;
|
|
|
|
|
2023-05-22 14:32:14 +02:00
|
|
|
/* We can only start allocating tables after we are done reserving space for objects at the
|
|
|
|
* start of the workspace */
|
|
|
|
if(ws->phase < phase) {
|
|
|
|
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-11-19 12:54:45 +01:00
|
|
|
}
|
|
|
|
alloc = ws->tableEnd;
|
|
|
|
end = (BYTE *)alloc + bytes;
|
|
|
|
top = ws->allocStart;
|
2019-11-10 05:31:00 +01:00
|
|
|
|
|
|
|
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
|
|
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
|
|
|
assert((bytes & (sizeof(U32)-1)) == 0);
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
assert(end <= top);
|
|
|
|
if (end > top) {
|
|
|
|
DEBUGLOG(4, "cwksp: table alloc failed!");
|
|
|
|
ws->allocFailed = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ws->tableEnd = end;
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
|
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
|
|
__asan_unpoison_memory_region(alloc, bytes);
|
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
#endif
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
|
|
|
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
2019-11-10 05:31:00 +01:00
|
|
|
return alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Aligned on sizeof(void*).
|
2022-01-24 11:04:45 +01:00
|
|
|
* Note : should happen only once, at workspace first initialization
|
2019-11-10 05:31:00 +01:00
|
|
|
*/
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
|
|
|
{
|
|
|
|
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
2019-11-10 05:31:00 +01:00
|
|
|
void* alloc = ws->objectEnd;
|
|
|
|
void* end = (BYTE*)alloc + roundedBytes;
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* over-reserve space */
|
|
|
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
|
|
#endif
|
|
|
|
|
2022-01-24 11:04:45 +01:00
|
|
|
DEBUGLOG(4,
|
2019-11-10 05:31:00 +01:00
|
|
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
|
|
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
2022-01-24 11:04:45 +01:00
|
|
|
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
|
|
|
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
2019-11-10 05:31:00 +01:00
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
/* we must be in the first phase, no advance is possible */
|
|
|
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
2022-01-24 11:04:45 +01:00
|
|
|
DEBUGLOG(3, "cwksp: object alloc failed!");
|
2019-11-10 05:31:00 +01:00
|
|
|
ws->allocFailed = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ws->objectEnd = end;
|
|
|
|
ws->tableEnd = end;
|
|
|
|
ws->tableValidEnd = end;
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
|
|
|
* either size. */
|
2022-01-24 11:04:45 +01:00
|
|
|
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
2021-01-08 11:21:43 +01:00
|
|
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
|
|
__asan_unpoison_memory_region(alloc, bytes);
|
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return alloc;
|
|
|
|
}
|
|
|
|
|
2022-01-24 11:04:45 +01:00
|
|
|
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
|
|
|
{
|
2019-11-10 05:31:00 +01:00
|
|
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* To validate that the table re-use logic is sound, and that we don't
|
|
|
|
* access table space that we haven't cleaned, we re-"poison" the table
|
2023-05-22 14:32:14 +02:00
|
|
|
* space every time we mark it dirty.
|
|
|
|
* Since tableValidEnd space and initOnce space may overlap we don't poison
|
|
|
|
* the initOnce portion as it break its promise. This means that this poisoning
|
|
|
|
* check isn't always applied fully. */
|
2019-11-10 05:31:00 +01:00
|
|
|
{
|
|
|
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
|
|
|
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
2023-05-22 14:32:14 +02:00
|
|
|
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
|
|
|
__msan_poison(ws->objectEnd, size);
|
|
|
|
} else {
|
|
|
|
assert(ws->initOnceStart >= ws->objectEnd);
|
|
|
|
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
|
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
|
|
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
|
|
ws->tableValidEnd = ws->objectEnd;
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
|
|
|
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
|
|
|
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
|
|
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
|
|
if (ws->tableValidEnd < ws->tableEnd) {
|
|
|
|
ws->tableValidEnd = ws->tableEnd;
|
|
|
|
}
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Zero the part of the allocated tables not already marked clean.
|
|
|
|
*/
|
|
|
|
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
|
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
|
|
|
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
|
|
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
|
|
if (ws->tableValidEnd < ws->tableEnd) {
|
2023-05-22 14:32:14 +02:00
|
|
|
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
ZSTD_cwksp_mark_tables_clean(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Invalidates table allocations.
|
|
|
|
* All other allocations remain valid.
|
|
|
|
*/
|
|
|
|
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
|
|
|
DEBUGLOG(4, "cwksp: clearing tables!");
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
|
|
/* We don't do this when the workspace is statically allocated, because
|
|
|
|
* when that is the case, we have no capability to hook into the end of the
|
|
|
|
* workspace's lifecycle to unpoison the memory.
|
|
|
|
*/
|
|
|
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
2019-11-10 05:31:00 +01:00
|
|
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
|
|
|
__asan_poison_memory_region(ws->objectEnd, size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ws->tableEnd = ws->objectEnd;
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Invalidates all buffer, aligned, and table allocations.
|
|
|
|
* Object allocations remain valid.
|
|
|
|
*/
|
|
|
|
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
|
|
DEBUGLOG(4, "cwksp: clearing!");
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
2019-11-10 05:31:00 +01:00
|
|
|
/* To validate that the context re-use logic is sound, and that we don't
|
|
|
|
* access stuff that this compression hasn't initialized, we re-"poison"
|
2023-05-22 14:32:14 +02:00
|
|
|
* the workspace except for the areas in which we expect memory re-use
|
|
|
|
* without initialization (objects, valid tables area and init once
|
|
|
|
* memory). */
|
2019-11-10 05:31:00 +01:00
|
|
|
{
|
2023-05-22 14:32:14 +02:00
|
|
|
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
|
|
|
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
|
|
|
|
__msan_poison(ws->tableValidEnd, size);
|
|
|
|
}
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
|
|
/* We don't do this when the workspace is statically allocated, because
|
|
|
|
* when that is the case, we have no capability to hook into the end of the
|
|
|
|
* workspace's lifecycle to unpoison the memory.
|
|
|
|
*/
|
|
|
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
2019-11-10 05:31:00 +01:00
|
|
|
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
|
|
|
__asan_poison_memory_region(ws->objectEnd, size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ws->tableEnd = ws->objectEnd;
|
2023-05-22 14:32:14 +02:00
|
|
|
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
|
2019-11-10 05:31:00 +01:00
|
|
|
ws->allocFailed = 0;
|
2023-05-22 14:32:14 +02:00
|
|
|
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
|
|
|
|
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The provided workspace takes ownership of the buffer [start, start+size).
|
|
|
|
* Any existing values in the workspace are ignored (the previously managed
|
|
|
|
* buffer, if present, must be separately freed).
|
|
|
|
*/
|
2021-01-08 11:21:43 +01:00
|
|
|
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
2019-11-10 05:31:00 +01:00
|
|
|
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
|
|
|
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
|
|
|
ws->workspace = start;
|
|
|
|
ws->workspaceEnd = (BYTE*)start + size;
|
|
|
|
ws->objectEnd = ws->workspace;
|
|
|
|
ws->tableValidEnd = ws->objectEnd;
|
2023-05-22 14:32:14 +02:00
|
|
|
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
2019-11-10 05:31:00 +01:00
|
|
|
ws->phase = ZSTD_cwksp_alloc_objects;
|
2021-01-08 11:21:43 +01:00
|
|
|
ws->isStatic = isStatic;
|
2019-11-10 05:31:00 +01:00
|
|
|
ZSTD_cwksp_clear(ws);
|
|
|
|
ws->workspaceOversizedDuration = 0;
|
|
|
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
2021-01-08 11:21:43 +01:00
|
|
|
void* workspace = ZSTD_customMalloc(size, customMem);
|
2019-11-10 05:31:00 +01:00
|
|
|
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
2020-09-18 21:38:36 +02:00
|
|
|
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
2019-11-10 05:31:00 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
|
|
|
void *ptr = ws->workspace;
|
|
|
|
DEBUGLOG(4, "cwksp: freeing workspace");
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
|
|
|
ZSTD_customFree(ptr, customMem);
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
2021-11-19 12:54:45 +01:00
|
|
|
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
2019-11-10 05:31:00 +01:00
|
|
|
*/
|
|
|
|
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
|
|
|
*dst = *src;
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
2019-11-10 05:31:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
|
|
|
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
|
|
|
}
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
|
|
|
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
|
|
|
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
|
|
|
}
|
|
|
|
|
2019-11-10 05:31:00 +01:00
|
|
|
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
|
|
return ws->allocFailed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-*************************************
|
|
|
|
* Functions Checking Free Space
|
|
|
|
***************************************/
|
|
|
|
|
2021-11-19 12:54:45 +01:00
|
|
|
/* ZSTD_alignmentSpaceWithinBounds() :
|
|
|
|
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
|
|
|
* actual amount of space used.
|
|
|
|
*/
|
2023-05-22 14:32:14 +02:00
|
|
|
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
|
|
|
|
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
|
|
|
|
* the alignment bytes difference between estimation and actual usage */
|
|
|
|
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
|
|
|
|
ZSTD_cwksp_used(ws) <= estimatedSpace;
|
2021-11-19 12:54:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-10 05:31:00 +01:00
|
|
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
|
|
|
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
|
|
|
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
|
|
|
return ZSTD_cwksp_check_available(
|
|
|
|
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
|
|
|
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
|
|
|
|
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
|
|
|
|
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
|
|
|
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
|
|
|
|
ws->workspaceOversizedDuration++;
|
|
|
|
} else {
|
|
|
|
ws->workspaceOversizedDuration = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined (__cplusplus)
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* ZSTD_CWKSP_H */
|