virtualx-engine/thirdparty/zstd/common/pool.c

351 lines
11 KiB
C
Raw Normal View History

/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
2017-10-26 22:41:47 +02:00
* You may select, at your option, one of the above-listed licenses.
*/
/* ====== Dependencies ======= */
2021-01-08 11:21:43 +01:00
#include "zstd_deps.h" /* size_t */
2019-01-04 01:30:03 +01:00
#include "debug.h" /* assert */
2021-01-08 11:21:43 +01:00
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
2019-01-04 01:30:03 +01:00
#include "pool.h"
/* ====== Compiler specifics ====== */
#if defined(_MSC_VER)
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
#endif
#ifdef ZSTD_MULTITHREAD
#include "threading.h" /* pthread adaptation */
/* A job is a function and an opaque argument */
typedef struct POOL_job_s {
2017-10-26 22:41:47 +02:00
POOL_function function;
void *opaque;
} POOL_job;
struct POOL_ctx_s {
2017-10-26 22:41:47 +02:00
ZSTD_customMem customMem;
/* Keep track of the threads */
2019-01-04 01:30:03 +01:00
ZSTD_pthread_t* threads;
size_t threadCapacity;
size_t threadLimit;
/* The queue is a circular buffer */
POOL_job *queue;
size_t queueHead;
size_t queueTail;
size_t queueSize;
/* The number of threads working on jobs */
size_t numThreadsBusy;
/* Indicates if the queue is empty */
int queueEmpty;
/* The mutex protects the queue */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_t queueMutex;
/* Condition variable for pushers to wait on when the queue is full */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_t queuePushCond;
/* Condition variables for poppers to wait on when the queue is empty */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_t queuePopCond;
/* Indicates if the queue is shutting down */
int shutdown;
};
/* POOL_thread() :
2019-01-04 01:30:03 +01:00
* Work thread for the thread pool.
* Waits for jobs and executes them.
* @returns : NULL on failure else non-null.
*/
static void* POOL_thread(void* opaque) {
POOL_ctx* const ctx = (POOL_ctx*)opaque;
if (!ctx) { return NULL; }
for (;;) {
/* Lock the mutex and wait for a non-empty queue or until shutdown */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
2019-01-04 01:30:03 +01:00
while ( ctx->queueEmpty
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
if (ctx->shutdown) {
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
* a few threads will be shutdown while !queueEmpty,
* but enough threads will remain active to finish the queue */
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return opaque;
}
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
}
/* Pop a job off the queue */
{ POOL_job const job = ctx->queue[ctx->queueHead];
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
ctx->numThreadsBusy++;
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
/* Unlock the mutex, signal a pusher, and run the job */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
2019-01-04 01:30:03 +01:00
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
job.function(job.opaque);
/* If the intended queue size was 0, signal after finishing job */
2019-01-04 01:30:03 +01:00
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->numThreadsBusy--;
if (ctx->queueSize == 1) {
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
2019-01-04 01:30:03 +01:00
}
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
}
} /* for (;;) */
2019-01-04 01:30:03 +01:00
assert(0); /* Unreachable */
}
2021-01-08 11:21:43 +01:00
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
return POOL_create (numThreads, 0);
}
2017-10-26 22:41:47 +02:00
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
}
2019-01-04 01:30:03 +01:00
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ZSTD_customMem customMem) {
2017-10-26 22:41:47 +02:00
POOL_ctx* ctx;
2019-01-04 01:30:03 +01:00
/* Check parameters */
if (!numThreads) { return NULL; }
/* Allocate the context and zero initialize */
2021-01-08 11:21:43 +01:00
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
if (!ctx) { return NULL; }
/* Initialize the job queue.
2019-01-04 01:30:03 +01:00
* It needs one extra space since one space is wasted to differentiate
* empty and full queues.
*/
ctx->queueSize = queueSize + 1;
2021-01-08 11:21:43 +01:00
ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
ctx->queueHead = 0;
ctx->queueTail = 0;
ctx->numThreadsBusy = 0;
ctx->queueEmpty = 1;
2019-11-10 05:31:00 +01:00
{
int error = 0;
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
if (error) { POOL_free(ctx); return NULL; }
}
ctx->shutdown = 0;
/* Allocate space for the thread handles */
2021-01-08 11:21:43 +01:00
ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
2019-01-04 01:30:03 +01:00
ctx->threadCapacity = 0;
2017-10-26 22:41:47 +02:00
ctx->customMem = customMem;
/* Check for errors */
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
/* Initialize the threads */
{ size_t i;
for (i = 0; i < numThreads; ++i) {
2017-10-26 22:41:47 +02:00
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
2019-01-04 01:30:03 +01:00
ctx->threadCapacity = i;
POOL_free(ctx);
return NULL;
} }
2019-01-04 01:30:03 +01:00
ctx->threadCapacity = numThreads;
ctx->threadLimit = numThreads;
}
return ctx;
}
/*! POOL_join() :
Shutdown the queue, wake any sleeping threads, and join all of the threads.
*/
2017-10-26 22:41:47 +02:00
static void POOL_join(POOL_ctx* ctx) {
/* Shut down the queue */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->shutdown = 1;
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
/* Wake up sleeping threads */
2017-10-26 22:41:47 +02:00
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
/* Join all of the threads */
{ size_t i;
2019-01-04 01:30:03 +01:00
for (i = 0; i < ctx->threadCapacity; ++i) {
ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
} }
}
void POOL_free(POOL_ctx *ctx) {
if (!ctx) { return; }
POOL_join(ctx);
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
2021-01-08 11:21:43 +01:00
ZSTD_customFree(ctx->queue, ctx->customMem);
ZSTD_customFree(ctx->threads, ctx->customMem);
ZSTD_customFree(ctx, ctx->customMem);
}
2021-01-08 11:21:43 +01:00
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
POOL_free (pool);
}
2019-01-04 01:30:03 +01:00
2017-07-22 23:46:05 +02:00
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx)
+ ctx->queueSize * sizeof(POOL_job)
2019-01-04 01:30:03 +01:00
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
}
/* @return : 0 on success, 1 on error */
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
{
if (numThreads <= ctx->threadCapacity) {
if (!numThreads) return 1;
ctx->threadLimit = numThreads;
return 0;
}
/* numThreads > threadCapacity */
2021-01-08 11:21:43 +01:00
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
2019-01-04 01:30:03 +01:00
if (!threadPool) return 1;
/* replace existing thread pool */
2021-01-08 11:21:43 +01:00
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
ZSTD_customFree(ctx->threads, ctx->customMem);
2019-01-04 01:30:03 +01:00
ctx->threads = threadPool;
/* Initialize additional threads */
{ size_t threadId;
for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
ctx->threadCapacity = threadId;
return 1;
} }
} }
/* successfully expanded */
ctx->threadCapacity = numThreads;
ctx->threadLimit = numThreads;
return 0;
}
/* @return : 0 on success, 1 on error */
int POOL_resize(POOL_ctx* ctx, size_t numThreads)
{
int result;
if (ctx==NULL) return 1;
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
result = POOL_resize_internal(ctx, numThreads);
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return result;
2017-07-22 23:46:05 +02:00
}
/**
* Returns 1 if the queue is full and 0 otherwise.
*
2019-01-04 01:30:03 +01:00
* When queueSize is 1 (pool was created with an intended queueSize of 0),
* then a queue is empty if there is a thread free _and_ no job is waiting.
*/
static int isQueueFull(POOL_ctx const* ctx) {
if (ctx->queueSize > 1) {
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
} else {
2019-01-04 01:30:03 +01:00
return (ctx->numThreadsBusy == ctx->threadLimit) ||
!ctx->queueEmpty;
}
}
2018-05-15 19:45:22 +02:00
static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
{
POOL_job const job = {function, opaque};
assert(ctx != NULL);
if (ctx->shutdown) return;
ctx->queueEmpty = 0;
ctx->queue[ctx->queueTail] = job;
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
}
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
{
assert(ctx != NULL);
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
2018-05-15 19:45:22 +02:00
/* Wait until there is space in the queue for the new job */
while (isQueueFull(ctx) && (!ctx->shutdown)) {
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
}
POOL_add_internal(ctx, function, opaque);
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
}
2018-05-15 19:45:22 +02:00
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
{
assert(ctx != NULL);
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
if (isQueueFull(ctx)) {
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return 0;
}
2018-05-15 19:45:22 +02:00
POOL_add_internal(ctx, function, opaque);
2017-10-26 22:41:47 +02:00
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
2018-05-15 19:45:22 +02:00
return 1;
}
2018-05-15 19:45:22 +02:00
#else /* ZSTD_MULTITHREAD not defined */
2018-05-15 19:45:22 +02:00
/* ========================== */
/* No multi-threading support */
2018-05-15 19:45:22 +02:00
/* ========================== */
2018-05-15 19:45:22 +02:00
/* We don't need any data, but if it is empty, malloc() might return NULL. */
struct POOL_ctx_s {
2017-10-26 22:41:47 +02:00
int dummy;
};
2021-01-08 11:21:43 +01:00
static POOL_ctx g_poolCtx;
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
2017-10-26 22:41:47 +02:00
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
}
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
(void)numThreads;
(void)queueSize;
(void)customMem;
2021-01-08 11:21:43 +01:00
return &g_poolCtx;
}
void POOL_free(POOL_ctx* ctx) {
2021-01-08 11:21:43 +01:00
assert(!ctx || ctx == &g_poolCtx);
2017-10-26 22:41:47 +02:00
(void)ctx;
}
2019-01-04 01:30:03 +01:00
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
(void)ctx; (void)numThreads;
return 0;
}
2018-05-15 19:45:22 +02:00
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
(void)ctx;
function(opaque);
}
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
2017-10-26 22:41:47 +02:00
(void)ctx;
function(opaque);
2018-05-15 19:45:22 +02:00
return 1;
}
size_t POOL_sizeof(POOL_ctx* ctx) {
2017-07-22 23:46:05 +02:00
if (ctx==NULL) return 0; /* supports sizeof NULL */
2021-01-08 11:21:43 +01:00
assert(ctx == &g_poolCtx);
2017-07-22 23:46:05 +02:00
return sizeof(*ctx);
}
#endif /* ZSTD_MULTITHREAD */