Coverage Report

Created: 2025-07-11 06:33

/src/zstd/lib/dictBuilder/cover.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
/* *****************************************************************************
12
 * Constructs a dictionary using a heuristic based on the following paper:
13
 *
14
 * Liao, Petri, Moffat, Wirth
15
 * Effective Construction of Relative Lempel-Ziv Dictionaries
16
 * Published in WWW 2016.
17
 *
18
 * Adapted from code originally written by @ot (Giuseppe Ottaviano).
19
 ******************************************************************************/
20
21
/*-*************************************
22
*  Dependencies
23
***************************************/
24
/* qsort_r is an extension. */
25
#if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \
26
    defined(__CYGWIN__) || defined(__MSYS__)
27
# if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */
28
#   define _GNU_SOURCE
29
# endif
30
#endif
31
32
#define __STDC_WANT_LIB_EXT1__ 1 /* request C11 Annex K, which includes qsort_s() */
33
34
#include <stdio.h>  /* fprintf */
35
#include <stdlib.h> /* malloc, free, qsort_r */
36
37
#include <string.h> /* memset */
38
#include <time.h>   /* clock */
39
40
#ifndef ZDICT_STATIC_LINKING_ONLY
41
#  define ZDICT_STATIC_LINKING_ONLY
42
#endif
43
44
#include "../common/debug.h" /* DEBUG_STATIC_ASSERT */
45
#include "../common/mem.h" /* read */
46
#include "../common/pool.h" /* POOL_ctx */
47
#include "../common/threading.h" /* ZSTD_pthread_mutex_t */
48
#include "../common/zstd_internal.h" /* includes zstd.h */
49
#include "../common/bits.h" /* ZSTD_highbit32 */
50
#include "../zdict.h"
51
#include "cover.h"
52
53
/*-*************************************
54
*  Constants
55
***************************************/
56
/**
57
* There are 32bit indexes used to ref samples, so limit samples size to 4GB
58
* on 64bit builds.
59
* For 32bit builds we choose 1 GB.
60
* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
61
* contiguous buffer, so 1GB is already a high limit.
62
*/
63
0
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
64
0
#define COVER_DEFAULT_SPLITPOINT 1.0
65
66
/**
67
 * Select the qsort() variant used by cover
68
 */
69
#define ZDICT_QSORT_MIN 0
70
#define ZDICT_QSORT_C90 ZDICT_QSORT_MIN
71
#define ZDICT_QSORT_GNU 1
72
#define ZDICT_QSORT_APPLE 2
73
#define ZDICT_QSORT_MSVC 3
74
#define ZDICT_QSORT_C11 ZDICT_QSORT_MAX
75
#define ZDICT_QSORT_MAX 4
76
77
#ifndef ZDICT_QSORT
78
# if defined(__APPLE__)
79
#   define ZDICT_QSORT ZDICT_QSORT_APPLE /* uses qsort_r() with a different order for parameters */
80
# elif defined(_GNU_SOURCE)
81
#   define ZDICT_QSORT ZDICT_QSORT_GNU /* uses qsort_r() */
82
# elif defined(_WIN32) && defined(_MSC_VER)
83
#   define ZDICT_QSORT ZDICT_QSORT_MSVC /* uses qsort_s() with a different order for parameters */
84
# elif defined(STDC_LIB_EXT1) && (STDC_LIB_EXT1 > 0) /* C11 Annex K */
85
#   define ZDICT_QSORT ZDICT_QSORT_C11 /* uses qsort_s() */
86
# else
87
#   define ZDICT_QSORT ZDICT_QSORT_C90 /* uses standard qsort() which is not re-entrant (requires global variable) */
88
# endif
89
#endif
90
91
92
/*-*************************************
93
*  Console display
94
*
95
* Captures the `displayLevel` variable in the local scope.
96
***************************************/
97
#undef  DISPLAY
98
#define DISPLAY(...)                                                           \
99
0
  {                                                                            \
100
0
    fprintf(stderr, __VA_ARGS__);                                              \
101
0
    fflush(stderr);                                                            \
102
0
  }
103
#undef  DISPLAYLEVEL
104
#define DISPLAYLEVEL(l, ...)                                                   \
105
0
  if (displayLevel >= l) {                                                     \
106
0
    DISPLAY(__VA_ARGS__);                                                      \
107
0
  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
108
109
#undef  DISPLAYUPDATE
110
#define DISPLAYUPDATE(lastUpdateTime, l, ...)                                  \
111
0
  if (displayLevel >= l) {                                                     \
112
0
    const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;                     \
113
0
    if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) {     \
114
0
      lastUpdateTime = clock();                                                \
115
0
      DISPLAY(__VA_ARGS__);                                                    \
116
0
    }                                                                          \
117
0
  }
118
119
/*-*************************************
120
* Hash table
121
***************************************
122
* A small specialized hash map for storing activeDmers.
123
* The map does not resize, so if it becomes full it will loop forever.
124
* Thus, the map must be large enough to store every value.
125
* The map implements linear probing and keeps its load less than 0.5.
126
*/
127
128
0
#define MAP_EMPTY_VALUE ((U32)-1)
129
typedef struct COVER_map_pair_t_s {
130
  U32 key;
131
  U32 value;
132
} COVER_map_pair_t;
133
134
typedef struct COVER_map_s {
135
  COVER_map_pair_t *data;
136
  U32 sizeLog;
137
  U32 size;
138
  U32 sizeMask;
139
} COVER_map_t;
140
141
/**
142
 * Clear the map.
143
 */
144
0
static void COVER_map_clear(COVER_map_t *map) {
145
0
  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
146
0
}
147
148
/**
149
 * Initializes a map of the given size.
150
 * Returns 1 on success and 0 on failure.
151
 * The map must be destroyed with COVER_map_destroy().
152
 * The map is only guaranteed to be large enough to hold size elements.
153
 */
154
0
static int COVER_map_init(COVER_map_t *map, U32 size) {
155
0
  map->sizeLog = ZSTD_highbit32(size) + 2;
156
0
  map->size = (U32)1 << map->sizeLog;
157
0
  map->sizeMask = map->size - 1;
158
0
  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
159
0
  if (!map->data) {
160
0
    map->sizeLog = 0;
161
0
    map->size = 0;
162
0
    return 0;
163
0
  }
164
0
  COVER_map_clear(map);
165
0
  return 1;
166
0
}
167
168
/**
169
 * Internal hash function
170
 */
171
static const U32 COVER_prime4bytes = 2654435761U;
172
0
static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
173
0
  return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
174
0
}
175
176
/**
177
 * Helper function that returns the index that a key should be placed into.
178
 */
179
0
static U32 COVER_map_index(COVER_map_t *map, U32 key) {
180
0
  const U32 hash = COVER_map_hash(map, key);
181
0
  U32 i;
182
0
  for (i = hash;; i = (i + 1) & map->sizeMask) {
183
0
    COVER_map_pair_t *pos = &map->data[i];
184
0
    if (pos->value == MAP_EMPTY_VALUE) {
185
0
      return i;
186
0
    }
187
0
    if (pos->key == key) {
188
0
      return i;
189
0
    }
190
0
  }
191
0
}
192
193
/**
194
 * Returns the pointer to the value for key.
195
 * If key is not in the map, it is inserted and the value is set to 0.
196
 * The map must not be full.
197
 */
198
0
static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
199
0
  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
200
0
  if (pos->value == MAP_EMPTY_VALUE) {
201
0
    pos->key = key;
202
0
    pos->value = 0;
203
0
  }
204
0
  return &pos->value;
205
0
}
206
207
/**
208
 * Deletes key from the map if present.
209
 */
210
0
static void COVER_map_remove(COVER_map_t *map, U32 key) {
211
0
  U32 i = COVER_map_index(map, key);
212
0
  COVER_map_pair_t* del = &map->data[i];
213
0
  U32 shift = 1;
214
0
  if (del->value == MAP_EMPTY_VALUE) {
215
0
    return;
216
0
  }
217
0
  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
218
0
    COVER_map_pair_t *const pos = &map->data[i];
219
    /* If the position is empty we are done */
220
0
    if (pos->value == MAP_EMPTY_VALUE) {
221
0
      del->value = MAP_EMPTY_VALUE;
222
0
      return;
223
0
    }
224
    /* If pos can be moved to del do so */
225
0
    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
226
0
      del->key = pos->key;
227
0
      del->value = pos->value;
228
0
      del = pos;
229
0
      shift = 1;
230
0
    } else {
231
0
      ++shift;
232
0
    }
233
0
  }
234
0
}
235
236
/**
237
 * Destroys a map that is inited with COVER_map_init().
238
 */
239
0
static void COVER_map_destroy(COVER_map_t *map) {
240
0
  if (map->data) {
241
0
    free(map->data);
242
0
  }
243
0
  map->data = NULL;
244
0
  map->size = 0;
245
0
}
246
247
/*-*************************************
248
* Context
249
***************************************/
250
251
typedef struct {
252
  const BYTE *samples;
253
  size_t *offsets;
254
  const size_t *samplesSizes;
255
  size_t nbSamples;
256
  size_t nbTrainSamples;
257
  size_t nbTestSamples;
258
  U32 *suffix;
259
  size_t suffixSize;
260
  U32 *freqs;
261
  U32 *dmerAt;
262
  unsigned d;
263
  int displayLevel;
264
} COVER_ctx_t;
265
266
#if ZDICT_QSORT == ZDICT_QSORT_C90
267
/* Use global context for non-reentrant sort functions */
268
static COVER_ctx_t *g_coverCtx = NULL;
269
#endif
270
271
/*-*************************************
272
*  Helper functions
273
***************************************/
274
275
/**
276
 * Returns the sum of the sample sizes.
277
 */
278
0
size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
279
0
  size_t sum = 0;
280
0
  unsigned i;
281
0
  for (i = 0; i < nbSamples; ++i) {
282
0
    sum += samplesSizes[i];
283
0
  }
284
0
  return sum;
285
0
}
286
287
/**
288
 * Returns -1 if the dmer at lp is less than the dmer at rp.
289
 * Return 0 if the dmers at lp and rp are equal.
290
 * Returns 1 if the dmer at lp is greater than the dmer at rp.
291
 */
292
0
static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
293
0
  U32 const lhs = *(U32 const *)lp;
294
0
  U32 const rhs = *(U32 const *)rp;
295
0
  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
296
0
}
297
/**
298
 * Faster version for d <= 8.
299
 */
300
0
static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
301
0
  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
302
0
  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
303
0
  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
304
0
  if (lhs < rhs) {
305
0
    return -1;
306
0
  }
307
0
  return (lhs > rhs);
308
0
}
309
310
/**
311
 * Same as COVER_cmp() except ties are broken by pointer value
312
 */
313
#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE)
314
static int WIN_CDECL COVER_strict_cmp(void* g_coverCtx, const void* lp, const void* rp) {
315
#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11)
316
0
static int COVER_strict_cmp(const void *lp, const void *rp, void *g_coverCtx) {
317
#else /* C90 fallback.*/
318
static int COVER_strict_cmp(const void *lp, const void *rp) {
319
#endif
320
0
  int result = COVER_cmp((COVER_ctx_t*)g_coverCtx, lp, rp);
321
0
  if (result == 0) {
322
0
    result = lp < rp ? -1 : 1;
323
0
  }
324
0
  return result;
325
0
}
326
/**
327
 * Faster version for d <= 8.
328
 */
329
#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE)
330
static int WIN_CDECL COVER_strict_cmp8(void* g_coverCtx, const void* lp, const void* rp) {
331
#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11)
332
0
static int COVER_strict_cmp8(const void *lp, const void *rp, void *g_coverCtx) {
333
#else /* C90 fallback.*/
334
static int COVER_strict_cmp8(const void *lp, const void *rp) {
335
#endif
336
0
  int result = COVER_cmp8((COVER_ctx_t*)g_coverCtx, lp, rp);
337
0
  if (result == 0) {
338
0
    result = lp < rp ? -1 : 1;
339
0
  }
340
0
  return result;
341
0
}
342
343
/**
344
 * Abstract away divergence of qsort_r() parameters.
345
 * Hopefully when C11 become the norm, we will be able
346
 * to clean it up.
347
 */
348
static void stableSort(COVER_ctx_t *ctx)
349
0
{
350
0
    DEBUG_STATIC_ASSERT(ZDICT_QSORT_MIN <= ZDICT_QSORT && ZDICT_QSORT <= ZDICT_QSORT_MAX);
351
#if (ZDICT_QSORT == ZDICT_QSORT_APPLE)
352
    qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
353
            ctx,
354
            (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
355
#elif (ZDICT_QSORT == ZDICT_QSORT_GNU)
356
    qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
357
0
            (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
358
0
            ctx);
359
#elif (ZDICT_QSORT == ZDICT_QSORT_MSVC)
360
    qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32),
361
            (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
362
            ctx);
363
#elif (ZDICT_QSORT == ZDICT_QSORT_C11)
364
    qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32),
365
            (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
366
            ctx);
367
#else /* C90 fallback.*/
368
    g_coverCtx = ctx;
369
    /* TODO(cavalcanti): implement a reentrant qsort() when _r is not available. */
370
    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
371
          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
372
#endif
373
0
}
374
375
/**
376
 * Returns the first pointer in [first, last) whose element does not compare
377
 * less than value.  If no such element exists it returns last.
378
 */
379
static const size_t *COVER_lower_bound(const size_t* first, const size_t* last,
380
0
                                       size_t value) {
381
0
  size_t count = (size_t)(last - first);
382
0
  assert(last >= first);
383
0
  while (count != 0) {
384
0
    size_t step = count / 2;
385
0
    const size_t *ptr = first;
386
0
    ptr += step;
387
0
    if (*ptr < value) {
388
0
      first = ++ptr;
389
0
      count -= step + 1;
390
0
    } else {
391
0
      count = step;
392
0
    }
393
0
  }
394
0
  return first;
395
0
}
396
397
/**
398
 * Generic groupBy function.
399
 * Groups an array sorted by cmp into groups with equivalent values.
400
 * Calls grp for each group.
401
 */
402
static void
403
COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
404
              int (*cmp)(COVER_ctx_t *, const void *, const void *),
405
0
              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
406
0
  const BYTE *ptr = (const BYTE *)data;
407
0
  size_t num = 0;
408
0
  while (num < count) {
409
0
    const BYTE *grpEnd = ptr + size;
410
0
    ++num;
411
0
    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
412
0
      grpEnd += size;
413
0
      ++num;
414
0
    }
415
0
    grp(ctx, ptr, grpEnd);
416
0
    ptr = grpEnd;
417
0
  }
418
0
}
419
420
/*-*************************************
421
*  Cover functions
422
***************************************/
423
424
/**
425
 * Called on each group of positions with the same dmer.
426
 * Counts the frequency of each dmer and saves it in the suffix array.
427
 * Fills `ctx->dmerAt`.
428
 */
429
static void COVER_group(COVER_ctx_t *ctx, const void *group,
430
0
                        const void *groupEnd) {
431
  /* The group consists of all the positions with the same first d bytes. */
432
0
  const U32 *grpPtr = (const U32 *)group;
433
0
  const U32 *grpEnd = (const U32 *)groupEnd;
434
  /* The dmerId is how we will reference this dmer.
435
   * This allows us to map the whole dmer space to a much smaller space, the
436
   * size of the suffix array.
437
   */
438
0
  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
439
  /* Count the number of samples this dmer shows up in */
440
0
  U32 freq = 0;
441
  /* Details */
442
0
  const size_t *curOffsetPtr = ctx->offsets;
443
0
  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
444
  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
445
   * different sample than the last.
446
   */
447
0
  size_t curSampleEnd = ctx->offsets[0];
448
0
  for (; grpPtr != grpEnd; ++grpPtr) {
449
    /* Save the dmerId for this position so we can get back to it. */
450
0
    ctx->dmerAt[*grpPtr] = dmerId;
451
    /* Dictionaries only help for the first reference to the dmer.
452
     * After that zstd can reference the match from the previous reference.
453
     * So only count each dmer once for each sample it is in.
454
     */
455
0
    if (*grpPtr < curSampleEnd) {
456
0
      continue;
457
0
    }
458
0
    freq += 1;
459
    /* Binary search to find the end of the sample *grpPtr is in.
460
     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
461
     * search because the loop is over.
462
     */
463
0
    if (grpPtr + 1 != grpEnd) {
464
0
      const size_t *sampleEndPtr =
465
0
          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
466
0
      curSampleEnd = *sampleEndPtr;
467
0
      curOffsetPtr = sampleEndPtr + 1;
468
0
    }
469
0
  }
470
  /* At this point we are never going to look at this segment of the suffix
471
   * array again.  We take advantage of this fact to save memory.
472
   * We store the frequency of the dmer in the first position of the group,
473
   * which is dmerId.
474
   */
475
0
  ctx->suffix[dmerId] = freq;
476
0
}
477
478
479
/**
480
 * Selects the best segment in an epoch.
481
 * Segments of are scored according to the function:
482
 *
483
 * Let F(d) be the frequency of dmer d.
484
 * Let S_i be the dmer at position i of segment S which has length k.
485
 *
486
 *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
487
 *
488
 * Once the dmer d is in the dictionary we set F(d) = 0.
489
 */
490
static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
491
                                           COVER_map_t *activeDmers, U32 begin,
492
                                           U32 end,
493
0
                                           ZDICT_cover_params_t parameters) {
494
  /* Constants */
495
0
  const U32 k = parameters.k;
496
0
  const U32 d = parameters.d;
497
0
  const U32 dmersInK = k - d + 1;
498
  /* Try each segment (activeSegment) and save the best (bestSegment) */
499
0
  COVER_segment_t bestSegment = {0, 0, 0};
500
0
  COVER_segment_t activeSegment;
501
  /* Reset the activeDmers in the segment */
502
0
  COVER_map_clear(activeDmers);
503
  /* The activeSegment starts at the beginning of the epoch. */
504
0
  activeSegment.begin = begin;
505
0
  activeSegment.end = begin;
506
0
  activeSegment.score = 0;
507
  /* Slide the activeSegment through the whole epoch.
508
   * Save the best segment in bestSegment.
509
   */
510
0
  while (activeSegment.end < end) {
511
    /* The dmerId for the dmer at the next position */
512
0
    U32 newDmer = ctx->dmerAt[activeSegment.end];
513
    /* The entry in activeDmers for this dmerId */
514
0
    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
515
    /* If the dmer isn't already present in the segment add its score. */
516
0
    if (*newDmerOcc == 0) {
517
      /* The paper suggest using the L-0.5 norm, but experiments show that it
518
       * doesn't help.
519
       */
520
0
      activeSegment.score += freqs[newDmer];
521
0
    }
522
    /* Add the dmer to the segment */
523
0
    activeSegment.end += 1;
524
0
    *newDmerOcc += 1;
525
526
    /* If the window is now too large, drop the first position */
527
0
    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
528
0
      U32 delDmer = ctx->dmerAt[activeSegment.begin];
529
0
      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
530
0
      activeSegment.begin += 1;
531
0
      *delDmerOcc -= 1;
532
      /* If this is the last occurrence of the dmer, subtract its score */
533
0
      if (*delDmerOcc == 0) {
534
0
        COVER_map_remove(activeDmers, delDmer);
535
0
        activeSegment.score -= freqs[delDmer];
536
0
      }
537
0
    }
538
539
    /* If this segment is the best so far save it */
540
0
    if (activeSegment.score > bestSegment.score) {
541
0
      bestSegment = activeSegment;
542
0
    }
543
0
  }
544
0
  {
545
    /* Trim off the zero frequency head and tail from the segment. */
546
0
    U32 newBegin = bestSegment.end;
547
0
    U32 newEnd = bestSegment.begin;
548
0
    U32 pos;
549
0
    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
550
0
      U32 freq = freqs[ctx->dmerAt[pos]];
551
0
      if (freq != 0) {
552
0
        newBegin = MIN(newBegin, pos);
553
0
        newEnd = pos + 1;
554
0
      }
555
0
    }
556
0
    bestSegment.begin = newBegin;
557
0
    bestSegment.end = newEnd;
558
0
  }
559
0
  {
560
    /* Zero out the frequency of each dmer covered by the chosen segment. */
561
0
    U32 pos;
562
0
    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
563
0
      freqs[ctx->dmerAt[pos]] = 0;
564
0
    }
565
0
  }
566
0
  return bestSegment;
567
0
}
568
569
/**
570
 * Check the validity of the parameters.
571
 * Returns non-zero if the parameters are valid and 0 otherwise.
572
 */
573
static int COVER_checkParameters(ZDICT_cover_params_t parameters,
574
0
                                 size_t maxDictSize) {
575
  /* k and d are required parameters */
576
0
  if (parameters.d == 0 || parameters.k == 0) {
577
0
    return 0;
578
0
  }
579
  /* k <= maxDictSize */
580
0
  if (parameters.k > maxDictSize) {
581
0
    return 0;
582
0
  }
583
  /* d <= k */
584
0
  if (parameters.d > parameters.k) {
585
0
    return 0;
586
0
  }
587
  /* 0 < splitPoint <= 1 */
588
0
  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
589
0
    return 0;
590
0
  }
591
0
  return 1;
592
0
}
593
594
/**
595
 * Clean up a context initialized with `COVER_ctx_init()`.
596
 */
597
0
static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
598
0
  if (!ctx) {
599
0
    return;
600
0
  }
601
0
  if (ctx->suffix) {
602
0
    free(ctx->suffix);
603
0
    ctx->suffix = NULL;
604
0
  }
605
0
  if (ctx->freqs) {
606
0
    free(ctx->freqs);
607
0
    ctx->freqs = NULL;
608
0
  }
609
0
  if (ctx->dmerAt) {
610
0
    free(ctx->dmerAt);
611
0
    ctx->dmerAt = NULL;
612
0
  }
613
0
  if (ctx->offsets) {
614
0
    free(ctx->offsets);
615
0
    ctx->offsets = NULL;
616
0
  }
617
0
}
618
619
/**
620
 * Prepare a context for dictionary building.
621
 * The context is only dependent on the parameter `d` and can be used multiple
622
 * times.
623
 * Returns 0 on success or error code on error.
624
 * The context must be destroyed with `COVER_ctx_destroy()`.
625
 */
626
static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
627
                          const size_t *samplesSizes, unsigned nbSamples,
628
                          unsigned d, double splitPoint, int displayLevel)
629
0
{
630
0
  const BYTE *const samples = (const BYTE *)samplesBuffer;
631
0
  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
632
  /* Split samples into testing and training sets */
633
0
  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
634
0
  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
635
0
  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
636
0
  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
637
0
  ctx->displayLevel = displayLevel;
638
  /* Checks */
639
0
  if (totalSamplesSize < MAX(d, sizeof(U64)) ||
640
0
      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
641
0
    DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
642
0
                 (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
643
0
    return ERROR(srcSize_wrong);
644
0
  }
645
  /* Check if there are at least 5 training samples */
646
0
  if (nbTrainSamples < 5) {
647
0
    DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
648
0
    return ERROR(srcSize_wrong);
649
0
  }
650
  /* Check if there's testing sample */
651
0
  if (nbTestSamples < 1) {
652
0
    DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
653
0
    return ERROR(srcSize_wrong);
654
0
  }
655
  /* Zero the context */
656
0
  memset(ctx, 0, sizeof(*ctx));
657
0
  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
658
0
               (unsigned)trainingSamplesSize);
659
0
  DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
660
0
               (unsigned)testSamplesSize);
661
0
  ctx->samples = samples;
662
0
  ctx->samplesSizes = samplesSizes;
663
0
  ctx->nbSamples = nbSamples;
664
0
  ctx->nbTrainSamples = nbTrainSamples;
665
0
  ctx->nbTestSamples = nbTestSamples;
666
  /* Partial suffix array */
667
0
  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
668
0
  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
669
  /* Maps index to the dmerID */
670
0
  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
671
  /* The offsets of each file */
672
0
  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
673
0
  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
674
0
    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
675
0
    COVER_ctx_destroy(ctx);
676
0
    return ERROR(memory_allocation);
677
0
  }
678
0
  ctx->freqs = NULL;
679
0
  ctx->d = d;
680
681
  /* Fill offsets from the samplesSizes */
682
0
  {
683
0
    U32 i;
684
0
    ctx->offsets[0] = 0;
685
0
    for (i = 1; i <= nbSamples; ++i) {
686
0
      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
687
0
    }
688
0
  }
689
0
  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
690
0
  {
691
    /* suffix is a partial suffix array.
692
     * It only sorts suffixes by their first parameters.d bytes.
693
     * The sort is stable, so each dmer group is sorted by position in input.
694
     */
695
0
    U32 i;
696
0
    for (i = 0; i < ctx->suffixSize; ++i) {
697
0
      ctx->suffix[i] = i;
698
0
    }
699
0
    stableSort(ctx);
700
0
  }
701
0
  DISPLAYLEVEL(2, "Computing frequencies\n");
702
  /* For each dmer group (group of positions with the same first d bytes):
703
   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
704
   *    (groupBeginPtr - suffix).  This allows us to go from position to
705
   *    dmerID so we can look up values in freq.
706
   * 2. We calculate how many samples the dmer occurs in and save it in
707
   *    freqs[dmerId].
708
   */
709
0
  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
710
0
                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
711
0
  ctx->freqs = ctx->suffix;
712
0
  ctx->suffix = NULL;
713
0
  return 0;
714
0
}
715
716
void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
717
0
{
718
0
  const double ratio = (double)nbDmers / (double)maxDictSize;
719
0
  if (ratio >= 10) {
720
0
      return;
721
0
  }
722
0
  DISPLAYLEVEL(1,
723
0
               "WARNING: The maximum dictionary size %u is too large "
724
0
               "compared to the source size %u! "
725
0
               "size(source)/size(dictionary) = %f, but it should be >= "
726
0
               "10! This may lead to a subpar dictionary! We recommend "
727
0
               "training on sources at least 10x, and preferably 100x "
728
0
               "the size of the dictionary! \n", (U32)maxDictSize,
729
0
               (U32)nbDmers, ratio);
730
0
}
731
732
COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
733
                                       U32 nbDmers, U32 k, U32 passes)
734
0
{
735
0
  const U32 minEpochSize = k * 10;
736
0
  COVER_epoch_info_t epochs;
737
0
  epochs.num = MAX(1, maxDictSize / k / passes);
738
0
  epochs.size = nbDmers / epochs.num;
739
0
  if (epochs.size >= minEpochSize) {
740
0
      assert(epochs.size * epochs.num <= nbDmers);
741
0
      return epochs;
742
0
  }
743
0
  epochs.size = MIN(minEpochSize, nbDmers);
744
0
  epochs.num = nbDmers / epochs.size;
745
0
  assert(epochs.size * epochs.num <= nbDmers);
746
0
  return epochs;
747
0
}
748
749
/**
750
 * Given the prepared context build the dictionary.
751
 */
752
static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
753
                                    COVER_map_t *activeDmers, void *dictBuffer,
754
                                    size_t dictBufferCapacity,
755
0
                                    ZDICT_cover_params_t parameters) {
756
0
  BYTE *const dict = (BYTE *)dictBuffer;
757
0
  size_t tail = dictBufferCapacity;
758
  /* Divide the data into epochs. We will select one segment from each epoch. */
759
0
  const COVER_epoch_info_t epochs = COVER_computeEpochs(
760
0
      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
761
0
  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
762
0
  size_t zeroScoreRun = 0;
763
0
  size_t epoch;
764
0
  clock_t lastUpdateTime = 0;
765
0
  const int displayLevel = ctx->displayLevel;
766
0
  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
767
0
                (U32)epochs.num, (U32)epochs.size);
768
  /* Loop through the epochs until there are no more segments or the dictionary
769
   * is full.
770
   */
771
0
  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
772
0
    const U32 epochBegin = (U32)(epoch * epochs.size);
773
0
    const U32 epochEnd = epochBegin + epochs.size;
774
0
    size_t segmentSize;
775
    /* Select a segment */
776
0
    COVER_segment_t segment = COVER_selectSegment(
777
0
        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
778
    /* If the segment covers no dmers, then we are out of content.
779
     * There may be new content in other epochs, for continue for some time.
780
     */
781
0
    if (segment.score == 0) {
782
0
      if (++zeroScoreRun >= maxZeroScoreRun) {
783
0
          break;
784
0
      }
785
0
      continue;
786
0
    }
787
0
    zeroScoreRun = 0;
788
    /* Trim the segment if necessary and if it is too small then we are done */
789
0
    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
790
0
    if (segmentSize < parameters.d) {
791
0
      break;
792
0
    }
793
    /* We fill the dictionary from the back to allow the best segments to be
794
     * referenced with the smallest offsets.
795
     */
796
0
    tail -= segmentSize;
797
0
    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
798
0
    DISPLAYUPDATE(
799
0
        lastUpdateTime,
800
0
        2, "\r%u%%       ",
801
0
        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
802
0
  }
803
0
  DISPLAYLEVEL(2, "\r%79s\r", "");
804
0
  return tail;
805
0
}
806
807
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover(
808
    void *dictBuffer, size_t dictBufferCapacity,
809
    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
810
    ZDICT_cover_params_t parameters)
811
0
{
812
0
  BYTE* const dict = (BYTE*)dictBuffer;
813
0
  COVER_ctx_t ctx;
814
0
  COVER_map_t activeDmers;
815
0
  const int displayLevel = (int)parameters.zParams.notificationLevel;
816
0
  parameters.splitPoint = 1.0;
817
  /* Checks */
818
0
  if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
819
0
    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
820
0
    return ERROR(parameter_outOfBound);
821
0
  }
822
0
  if (nbSamples == 0) {
823
0
    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
824
0
    return ERROR(srcSize_wrong);
825
0
  }
826
0
  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
827
0
    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
828
0
                 ZDICT_DICTSIZE_MIN);
829
0
    return ERROR(dstSize_tooSmall);
830
0
  }
831
  /* Initialize context and activeDmers */
832
0
  {
833
0
    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
834
0
                      parameters.d, parameters.splitPoint, displayLevel);
835
0
    if (ZSTD_isError(initVal)) {
836
0
      return initVal;
837
0
    }
838
0
  }
839
0
  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
840
0
  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
841
0
    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
842
0
    COVER_ctx_destroy(&ctx);
843
0
    return ERROR(memory_allocation);
844
0
  }
845
846
0
  DISPLAYLEVEL(2, "Building dictionary\n");
847
0
  {
848
0
    const size_t tail =
849
0
        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
850
0
                              dictBufferCapacity, parameters);
851
0
    const size_t dictionarySize = ZDICT_finalizeDictionary(
852
0
        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
853
0
        samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
854
0
    if (!ZSTD_isError(dictionarySize)) {
855
0
      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
856
0
                   (unsigned)dictionarySize);
857
0
    }
858
0
    COVER_ctx_destroy(&ctx);
859
0
    COVER_map_destroy(&activeDmers);
860
0
    return dictionarySize;
861
0
  }
862
0
}
863
864
865
866
size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
867
                                    const size_t *samplesSizes, const BYTE *samples,
868
                                    size_t *offsets,
869
                                    size_t nbTrainSamples, size_t nbSamples,
870
0
                                    BYTE *const dict, size_t dictBufferCapacity) {
871
0
  size_t totalCompressedSize = ERROR(GENERIC);
872
  /* Pointers */
873
0
  ZSTD_CCtx *cctx;
874
0
  ZSTD_CDict *cdict;
875
0
  void *dst;
876
  /* Local variables */
877
0
  size_t dstCapacity;
878
0
  size_t i;
879
  /* Allocate dst with enough space to compress the maximum sized sample */
880
0
  {
881
0
    size_t maxSampleSize = 0;
882
0
    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
883
0
    for (; i < nbSamples; ++i) {
884
0
      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
885
0
    }
886
0
    dstCapacity = ZSTD_compressBound(maxSampleSize);
887
0
    dst = malloc(dstCapacity);
888
0
  }
889
  /* Create the cctx and cdict */
890
0
  cctx = ZSTD_createCCtx();
891
0
  cdict = ZSTD_createCDict(dict, dictBufferCapacity,
892
0
                           parameters.zParams.compressionLevel);
893
0
  if (!dst || !cctx || !cdict) {
894
0
    goto _compressCleanup;
895
0
  }
896
  /* Compress each sample and sum their sizes (or error) */
897
0
  totalCompressedSize = dictBufferCapacity;
898
0
  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
899
0
  for (; i < nbSamples; ++i) {
900
0
    const size_t size = ZSTD_compress_usingCDict(
901
0
        cctx, dst, dstCapacity, samples + offsets[i],
902
0
        samplesSizes[i], cdict);
903
0
    if (ZSTD_isError(size)) {
904
0
      totalCompressedSize = size;
905
0
      goto _compressCleanup;
906
0
    }
907
0
    totalCompressedSize += size;
908
0
  }
909
0
_compressCleanup:
910
0
  ZSTD_freeCCtx(cctx);
911
0
  ZSTD_freeCDict(cdict);
912
0
  if (dst) {
913
0
    free(dst);
914
0
  }
915
0
  return totalCompressedSize;
916
0
}
917
918
919
/**
920
 * Initialize the `COVER_best_t`.
921
 */
922
0
void COVER_best_init(COVER_best_t *best) {
923
0
  if (best==NULL) return; /* compatible with init on NULL */
924
0
  (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
925
0
  (void)ZSTD_pthread_cond_init(&best->cond, NULL);
926
0
  best->liveJobs = 0;
927
0
  best->dict = NULL;
928
0
  best->dictSize = 0;
929
0
  best->compressedSize = (size_t)-1;
930
0
  memset(&best->parameters, 0, sizeof(best->parameters));
931
0
}
932
933
/**
934
 * Wait until liveJobs == 0.
935
 */
936
0
void COVER_best_wait(COVER_best_t *best) {
937
0
  if (!best) {
938
0
    return;
939
0
  }
940
0
  ZSTD_pthread_mutex_lock(&best->mutex);
941
0
  while (best->liveJobs != 0) {
942
0
    ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
943
0
  }
944
0
  ZSTD_pthread_mutex_unlock(&best->mutex);
945
0
}
946
947
/**
948
 * Call COVER_best_wait() and then destroy the COVER_best_t.
949
 */
950
0
void COVER_best_destroy(COVER_best_t *best) {
951
0
  if (!best) {
952
0
    return;
953
0
  }
954
0
  COVER_best_wait(best);
955
0
  if (best->dict) {
956
0
    free(best->dict);
957
0
  }
958
0
  ZSTD_pthread_mutex_destroy(&best->mutex);
959
0
  ZSTD_pthread_cond_destroy(&best->cond);
960
0
}
961
962
/**
963
 * Called when a thread is about to be launched.
964
 * Increments liveJobs.
965
 */
966
0
void COVER_best_start(COVER_best_t *best) {
967
0
  if (!best) {
968
0
    return;
969
0
  }
970
0
  ZSTD_pthread_mutex_lock(&best->mutex);
971
0
  ++best->liveJobs;
972
0
  ZSTD_pthread_mutex_unlock(&best->mutex);
973
0
}
974
975
/**
976
 * Called when a thread finishes executing, both on error or success.
977
 * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
978
 * If this dictionary is the best so far save it and its parameters.
979
 */
980
void COVER_best_finish(COVER_best_t* best,
981
                      ZDICT_cover_params_t parameters,
982
                      COVER_dictSelection_t selection)
983
0
{
984
0
  void* dict = selection.dictContent;
985
0
  size_t compressedSize = selection.totalCompressedSize;
986
0
  size_t dictSize = selection.dictSize;
987
0
  if (!best) {
988
0
    return;
989
0
  }
990
0
  {
991
0
    size_t liveJobs;
992
0
    ZSTD_pthread_mutex_lock(&best->mutex);
993
0
    --best->liveJobs;
994
0
    liveJobs = best->liveJobs;
995
    /* If the new dictionary is better */
996
0
    if (compressedSize < best->compressedSize) {
997
      /* Allocate space if necessary */
998
0
      if (!best->dict || best->dictSize < dictSize) {
999
0
        if (best->dict) {
1000
0
          free(best->dict);
1001
0
        }
1002
0
        best->dict = malloc(dictSize);
1003
0
        if (!best->dict) {
1004
0
          best->compressedSize = ERROR(GENERIC);
1005
0
          best->dictSize = 0;
1006
0
          ZSTD_pthread_cond_signal(&best->cond);
1007
0
          ZSTD_pthread_mutex_unlock(&best->mutex);
1008
0
          return;
1009
0
        }
1010
0
      }
1011
      /* Save the dictionary, parameters, and size */
1012
0
      if (dict) {
1013
0
        memcpy(best->dict, dict, dictSize);
1014
0
        best->dictSize = dictSize;
1015
0
        best->parameters = parameters;
1016
0
        best->compressedSize = compressedSize;
1017
0
      }
1018
0
    }
1019
0
    if (liveJobs == 0) {
1020
0
      ZSTD_pthread_cond_broadcast(&best->cond);
1021
0
    }
1022
0
    ZSTD_pthread_mutex_unlock(&best->mutex);
1023
0
  }
1024
0
}
1025
1026
static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz)
1027
0
{
1028
0
    COVER_dictSelection_t ds;
1029
0
    ds.dictContent = buf;
1030
0
    ds.dictSize = s;
1031
0
    ds.totalCompressedSize = csz;
1032
0
    return ds;
1033
0
}
1034
1035
0
COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
1036
0
    return setDictSelection(NULL, 0, error);
1037
0
}
1038
1039
0
unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
1040
0
  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
1041
0
}
1042
1043
0
void COVER_dictSelectionFree(COVER_dictSelection_t selection){
1044
0
  free(selection.dictContent);
1045
0
}
1046
1047
COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
1048
        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
1049
0
        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
1050
1051
0
  size_t largestDict = 0;
1052
0
  size_t largestCompressed = 0;
1053
0
  BYTE* customDictContentEnd = customDictContent + dictContentSize;
1054
1055
0
  BYTE* largestDictbuffer = (BYTE*)malloc(dictBufferCapacity);
1056
0
  BYTE* candidateDictBuffer = (BYTE*)malloc(dictBufferCapacity);
1057
0
  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
1058
1059
0
  if (!largestDictbuffer || !candidateDictBuffer) {
1060
0
    free(largestDictbuffer);
1061
0
    free(candidateDictBuffer);
1062
0
    return COVER_dictSelectionError(dictContentSize);
1063
0
  }
1064
1065
  /* Initial dictionary size and compressed size */
1066
0
  memcpy(largestDictbuffer, customDictContent, dictContentSize);
1067
0
  dictContentSize = ZDICT_finalizeDictionary(
1068
0
    largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
1069
0
    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1070
1071
0
  if (ZDICT_isError(dictContentSize)) {
1072
0
    free(largestDictbuffer);
1073
0
    free(candidateDictBuffer);
1074
0
    return COVER_dictSelectionError(dictContentSize);
1075
0
  }
1076
1077
0
  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1078
0
                                                       samplesBuffer, offsets,
1079
0
                                                       nbCheckSamples, nbSamples,
1080
0
                                                       largestDictbuffer, dictContentSize);
1081
1082
0
  if (ZSTD_isError(totalCompressedSize)) {
1083
0
    free(largestDictbuffer);
1084
0
    free(candidateDictBuffer);
1085
0
    return COVER_dictSelectionError(totalCompressedSize);
1086
0
  }
1087
1088
0
  if (params.shrinkDict == 0) {
1089
0
    free(candidateDictBuffer);
1090
0
    return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
1091
0
  }
1092
1093
0
  largestDict = dictContentSize;
1094
0
  largestCompressed = totalCompressedSize;
1095
0
  dictContentSize = ZDICT_DICTSIZE_MIN;
1096
1097
  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
1098
0
  while (dictContentSize < largestDict) {
1099
0
    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
1100
0
    dictContentSize = ZDICT_finalizeDictionary(
1101
0
      candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
1102
0
      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1103
1104
0
    if (ZDICT_isError(dictContentSize)) {
1105
0
      free(largestDictbuffer);
1106
0
      free(candidateDictBuffer);
1107
0
      return COVER_dictSelectionError(dictContentSize);
1108
1109
0
    }
1110
1111
0
    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1112
0
                                                         samplesBuffer, offsets,
1113
0
                                                         nbCheckSamples, nbSamples,
1114
0
                                                         candidateDictBuffer, dictContentSize);
1115
1116
0
    if (ZSTD_isError(totalCompressedSize)) {
1117
0
      free(largestDictbuffer);
1118
0
      free(candidateDictBuffer);
1119
0
      return COVER_dictSelectionError(totalCompressedSize);
1120
0
    }
1121
1122
0
    if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) {
1123
0
      free(largestDictbuffer);
1124
0
      return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize );
1125
0
    }
1126
0
    dictContentSize *= 2;
1127
0
  }
1128
0
  dictContentSize = largestDict;
1129
0
  totalCompressedSize = largestCompressed;
1130
0
  free(candidateDictBuffer);
1131
0
  return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize );
1132
0
}
1133
1134
/**
1135
 * Parameters for COVER_tryParameters().
1136
 */
1137
typedef struct COVER_tryParameters_data_s {
1138
  const COVER_ctx_t *ctx;
1139
  COVER_best_t *best;
1140
  size_t dictBufferCapacity;
1141
  ZDICT_cover_params_t parameters;
1142
} COVER_tryParameters_data_t;
1143
1144
/**
1145
 * Tries a set of parameters and updates the COVER_best_t with the results.
1146
 * This function is thread safe if zstd is compiled with multithreaded support.
1147
 * It takes its parameters as an *OWNING* opaque pointer to support threading.
1148
 */
1149
static void COVER_tryParameters(void *opaque)
1150
0
{
1151
  /* Save parameters as local variables */
1152
0
  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
1153
0
  const COVER_ctx_t *const ctx = data->ctx;
1154
0
  const ZDICT_cover_params_t parameters = data->parameters;
1155
0
  size_t dictBufferCapacity = data->dictBufferCapacity;
1156
0
  size_t totalCompressedSize = ERROR(GENERIC);
1157
  /* Allocate space for hash table, dict, and freqs */
1158
0
  COVER_map_t activeDmers;
1159
0
  BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
1160
0
  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
1161
0
  U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
1162
0
  const int displayLevel = ctx->displayLevel;
1163
0
  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
1164
0
    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
1165
0
    goto _cleanup;
1166
0
  }
1167
0
  if (!dict || !freqs) {
1168
0
    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
1169
0
    goto _cleanup;
1170
0
  }
1171
  /* Copy the frequencies because we need to modify them */
1172
0
  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
1173
  /* Build the dictionary */
1174
0
  {
1175
0
    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
1176
0
                                              dictBufferCapacity, parameters);
1177
0
    selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
1178
0
        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
1179
0
        totalCompressedSize);
1180
1181
0
    if (COVER_dictSelectionIsError(selection)) {
1182
0
      DISPLAYLEVEL(1, "Failed to select dictionary\n");
1183
0
      goto _cleanup;
1184
0
    }
1185
0
  }
1186
0
_cleanup:
1187
0
  free(dict);
1188
0
  COVER_best_finish(data->best, parameters, selection);
1189
0
  free(data);
1190
0
  COVER_map_destroy(&activeDmers);
1191
0
  COVER_dictSelectionFree(selection);
1192
0
  free(freqs);
1193
0
}
1194
1195
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover(
1196
    void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
1197
    const size_t* samplesSizes, unsigned nbSamples,
1198
    ZDICT_cover_params_t* parameters)
1199
0
{
1200
  /* constants */
1201
0
  const unsigned nbThreads = parameters->nbThreads;
1202
0
  const double splitPoint =
1203
0
      parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
1204
0
  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
1205
0
  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
1206
0
  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
1207
0
  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
1208
0
  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
1209
0
  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
1210
0
  const unsigned kIterations =
1211
0
      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
1212
0
  const unsigned shrinkDict = 0;
1213
  /* Local variables */
1214
0
  int displayLevel = (int)parameters->zParams.notificationLevel;
1215
0
  unsigned iteration = 1;
1216
0
  unsigned d;
1217
0
  unsigned k;
1218
0
  COVER_best_t best;
1219
0
  POOL_ctx *pool = NULL;
1220
0
  int warned = 0;
1221
0
  clock_t lastUpdateTime = 0;
1222
1223
  /* Checks */
1224
0
  if (splitPoint <= 0 || splitPoint > 1) {
1225
0
    DISPLAYLEVEL(1, "Incorrect parameters\n");
1226
0
    return ERROR(parameter_outOfBound);
1227
0
  }
1228
0
  if (kMinK < kMaxD || kMaxK < kMinK) {
1229
0
    DISPLAYLEVEL(1, "Incorrect parameters\n");
1230
0
    return ERROR(parameter_outOfBound);
1231
0
  }
1232
0
  if (nbSamples == 0) {
1233
0
    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
1234
0
    return ERROR(srcSize_wrong);
1235
0
  }
1236
0
  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
1237
0
    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
1238
0
                 ZDICT_DICTSIZE_MIN);
1239
0
    return ERROR(dstSize_tooSmall);
1240
0
  }
1241
0
  if (nbThreads > 1) {
1242
0
    pool = POOL_create(nbThreads, 1);
1243
0
    if (!pool) {
1244
0
      return ERROR(memory_allocation);
1245
0
    }
1246
0
  }
1247
  /* Initialization */
1248
0
  COVER_best_init(&best);
1249
  /* Loop through d first because each new value needs a new context */
1250
0
  DISPLAYLEVEL(2, "Trying %u different sets of parameters\n",
1251
0
                    kIterations);
1252
0
  for (d = kMinD; d <= kMaxD; d += 2) {
1253
    /* Initialize the context for this value of d */
1254
0
    COVER_ctx_t ctx;
1255
0
    DISPLAYLEVEL(3, "d=%u\n", d);
1256
0
    {
1257
      /* Turn down global display level to clean up display at level 2 and below */
1258
0
      const int childDisplayLevel = (displayLevel == 0) ? 0 : displayLevel - 1;
1259
0
      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, childDisplayLevel);
1260
0
      if (ZSTD_isError(initVal)) {
1261
0
        DISPLAYLEVEL(1, "Failed to initialize context\n");
1262
0
        COVER_best_destroy(&best);
1263
0
        POOL_free(pool);
1264
0
        return initVal;
1265
0
      }
1266
0
    }
1267
0
    if (!warned) {
1268
0
      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
1269
0
      warned = 1;
1270
0
    }
1271
    /* Loop through k reusing the same context */
1272
0
    for (k = kMinK; k <= kMaxK; k += kStepSize) {
1273
      /* Prepare the arguments */
1274
0
      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
1275
0
          sizeof(COVER_tryParameters_data_t));
1276
0
      DISPLAYLEVEL(3, "k=%u\n", k);
1277
0
      if (!data) {
1278
0
        DISPLAYLEVEL(1, "Failed to allocate parameters\n");
1279
0
        COVER_best_destroy(&best);
1280
0
        COVER_ctx_destroy(&ctx);
1281
0
        POOL_free(pool);
1282
0
        return ERROR(memory_allocation);
1283
0
      }
1284
0
      data->ctx = &ctx;
1285
0
      data->best = &best;
1286
0
      data->dictBufferCapacity = dictBufferCapacity;
1287
0
      data->parameters = *parameters;
1288
0
      data->parameters.k = k;
1289
0
      data->parameters.d = d;
1290
0
      data->parameters.splitPoint = splitPoint;
1291
0
      data->parameters.steps = kSteps;
1292
0
      data->parameters.shrinkDict = shrinkDict;
1293
0
      data->parameters.zParams.notificationLevel = (unsigned)ctx.displayLevel;
1294
      /* Check the parameters */
1295
0
      if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
1296
0
        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
1297
0
        free(data);
1298
0
        continue;
1299
0
      }
1300
      /* Call the function and pass ownership of data to it */
1301
0
      COVER_best_start(&best);
1302
0
      if (pool) {
1303
0
        POOL_add(pool, &COVER_tryParameters, data);
1304
0
      } else {
1305
0
        COVER_tryParameters(data);
1306
0
      }
1307
      /* Print status */
1308
0
      DISPLAYUPDATE(lastUpdateTime, 2, "\r%u%%       ",
1309
0
                    (unsigned)((iteration * 100) / kIterations));
1310
0
      ++iteration;
1311
0
    }
1312
0
    COVER_best_wait(&best);
1313
0
    COVER_ctx_destroy(&ctx);
1314
0
  }
1315
0
  DISPLAYLEVEL(2, "\r%79s\r", "");
1316
  /* Fill the output buffer and parameters with output of the best parameters */
1317
0
  {
1318
0
    const size_t dictSize = best.dictSize;
1319
0
    if (ZSTD_isError(best.compressedSize)) {
1320
0
      const size_t compressedSize = best.compressedSize;
1321
0
      COVER_best_destroy(&best);
1322
0
      POOL_free(pool);
1323
0
      return compressedSize;
1324
0
    }
1325
0
    *parameters = best.parameters;
1326
0
    memcpy(dictBuffer, best.dict, dictSize);
1327
0
    COVER_best_destroy(&best);
1328
0
    POOL_free(pool);
1329
0
    return dictSize;
1330
0
  }
1331
0
}