2019-01-04 01:30:03 +01:00
|
|
|
/* ******************************************************************
|
2020-09-18 21:38:36 +02:00
|
|
|
* hist : Histogram functions
|
|
|
|
* part of Finite State Entropy project
|
2023-05-22 14:32:14 +02:00
|
|
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
2020-09-18 21:38:36 +02:00
|
|
|
*
|
|
|
|
* You can contact the author at :
|
|
|
|
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
|
|
|
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
|
|
|
*
|
|
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
|
|
* in the COPYING file in the root directory of this source tree).
|
|
|
|
* You may select, at your option, one of the above-listed licenses.
|
2019-01-04 01:30:03 +01:00
|
|
|
****************************************************************** */
|
|
|
|
|
|
|
|
/* --- dependencies --- */
|
2020-09-18 21:38:36 +02:00
|
|
|
#include "../common/mem.h" /* U32, BYTE, etc. */
|
|
|
|
#include "../common/debug.h" /* assert, DEBUGLOG */
|
|
|
|
#include "../common/error_private.h" /* ERROR */
|
2019-01-04 01:30:03 +01:00
|
|
|
#include "hist.h"
|
|
|
|
|
|
|
|
|
|
|
|
/* --- Error management --- */
|
|
|
|
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
|
|
|
|
|
|
|
|
/*-**************************************************************
|
|
|
|
* Histogram functions
|
|
|
|
****************************************************************/
|
|
|
|
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* src, size_t srcSize)
|
|
|
|
{
|
|
|
|
const BYTE* ip = (const BYTE*)src;
|
|
|
|
const BYTE* const end = ip + srcSize;
|
|
|
|
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
|
|
|
unsigned largestCount=0;
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
|
2019-01-04 01:30:03 +01:00
|
|
|
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
|
|
|
|
|
|
|
while (ip<end) {
|
|
|
|
assert(*ip <= maxSymbolValue);
|
|
|
|
count[*ip++]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!count[maxSymbolValue]) maxSymbolValue--;
|
|
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
|
|
|
|
|
|
{ U32 s;
|
|
|
|
for (s=0; s<=maxSymbolValue; s++)
|
|
|
|
if (count[s] > largestCount) largestCount = count[s];
|
|
|
|
}
|
|
|
|
|
|
|
|
return largestCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
|
|
|
|
|
|
|
|
/* HIST_count_parallel_wksp() :
|
|
|
|
* store histogram into 4 intermediate tables, recombined at the end.
|
|
|
|
* this design makes better use of OoO cpus,
|
|
|
|
* and is noticeably faster when some values are heavily repeated.
|
|
|
|
* But it needs some additional workspace for intermediate tables.
|
2021-01-08 11:21:43 +01:00
|
|
|
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
|
2019-01-04 01:30:03 +01:00
|
|
|
* @return : largest histogram frequency,
|
2021-01-08 11:21:43 +01:00
|
|
|
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
|
2019-01-04 01:30:03 +01:00
|
|
|
static size_t HIST_count_parallel_wksp(
|
|
|
|
unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* source, size_t sourceSize,
|
|
|
|
HIST_checkInput_e check,
|
|
|
|
U32* const workSpace)
|
|
|
|
{
|
|
|
|
const BYTE* ip = (const BYTE*)source;
|
|
|
|
const BYTE* const iend = ip+sourceSize;
|
2021-01-08 11:21:43 +01:00
|
|
|
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
|
2019-01-04 01:30:03 +01:00
|
|
|
unsigned max=0;
|
|
|
|
U32* const Counting1 = workSpace;
|
|
|
|
U32* const Counting2 = Counting1 + 256;
|
|
|
|
U32* const Counting3 = Counting2 + 256;
|
|
|
|
U32* const Counting4 = Counting3 + 256;
|
|
|
|
|
|
|
|
/* safety checks */
|
2021-01-08 11:21:43 +01:00
|
|
|
assert(*maxSymbolValuePtr <= 255);
|
2019-01-04 01:30:03 +01:00
|
|
|
if (!sourceSize) {
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_memset(count, 0, countSize);
|
2019-01-04 01:30:03 +01:00
|
|
|
*maxSymbolValuePtr = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2021-01-08 11:21:43 +01:00
|
|
|
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
|
2019-01-04 01:30:03 +01:00
|
|
|
|
|
|
|
/* by stripes of 16 bytes */
|
|
|
|
{ U32 cached = MEM_read32(ip); ip += 4;
|
|
|
|
while (ip < iend-15) {
|
|
|
|
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
|
|
|
Counting1[(BYTE) c ]++;
|
|
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
|
|
Counting4[ c>>24 ]++;
|
|
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
|
|
Counting1[(BYTE) c ]++;
|
|
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
|
|
Counting4[ c>>24 ]++;
|
|
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
|
|
Counting1[(BYTE) c ]++;
|
|
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
|
|
Counting4[ c>>24 ]++;
|
|
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
|
|
Counting1[(BYTE) c ]++;
|
|
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
|
|
Counting4[ c>>24 ]++;
|
|
|
|
}
|
|
|
|
ip-=4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* finish last symbols */
|
|
|
|
while (ip<iend) Counting1[*ip++]++;
|
|
|
|
|
|
|
|
{ U32 s;
|
2021-01-08 11:21:43 +01:00
|
|
|
for (s=0; s<256; s++) {
|
|
|
|
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
|
|
|
if (Counting1[s] > max) max = Counting1[s];
|
2019-01-04 01:30:03 +01:00
|
|
|
} }
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
{ unsigned maxSymbolValue = 255;
|
|
|
|
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
|
|
|
|
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
|
|
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
|
|
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
|
|
|
|
}
|
2019-01-04 01:30:03 +01:00
|
|
|
return (size_t)max;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* HIST_countFast_wksp() :
|
|
|
|
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
|
|
|
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
|
|
|
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
|
|
|
*/
|
|
|
|
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* source, size_t sourceSize,
|
|
|
|
void* workSpace, size_t workSpaceSize)
|
|
|
|
{
|
|
|
|
if (sourceSize < 1500) /* heuristic threshold */
|
|
|
|
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
|
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* HIST_count_wksp() :
|
|
|
|
* Same as HIST_count(), but using an externally provided scratch buffer.
|
|
|
|
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
|
|
|
|
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* source, size_t sourceSize,
|
|
|
|
void* workSpace, size_t workSpaceSize)
|
|
|
|
{
|
|
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
|
|
if (*maxSymbolValuePtr < 255)
|
|
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
|
|
|
|
*maxSymbolValuePtr = 255;
|
|
|
|
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
|
|
|
|
}
|
|
|
|
|
2021-01-08 11:21:43 +01:00
|
|
|
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
|
|
|
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
|
|
|
size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* source, size_t sourceSize)
|
|
|
|
{
|
|
|
|
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
|
|
|
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
|
|
|
|
}
|
|
|
|
|
2019-01-04 01:30:03 +01:00
|
|
|
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
|
|
const void* src, size_t srcSize)
|
|
|
|
{
|
|
|
|
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
|
|
|
return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
|
|
|
|
}
|
2021-01-08 11:21:43 +01:00
|
|
|
#endif
|