Coverage Report

Created: 2026-03-31 06:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/freeradius-server/src/lib/util/lst.c
Line
Count
Source
1
/*
2
 *   This program is free software; you can redistribute it and/or modify
3
 *   it under the terms of the GNU General Public License as published by
4
 *   the Free Software Foundation; either version 2 of the License, or
5
 *   (at your option) any later version.
6
 *
7
 *   This program is distributed in the hope that it will be useful,
8
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 *   GNU General Public License for more details.
11
 *
12
 *   You should have received a copy of the GNU General Public License
13
 *   along with this program; if not, write to the Free Software
14
 *   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15
 */
16
17
/** Functions for a Leftmost Skeleton Tree
18
 *
19
 * @file src/lib/util/lst.c
20
 *
21
 * @copyright 2021 Network RADIUS SAS (legal@networkradius.com)
22
 */
23
RCSID("$Id: 1585f07f55589f3c217054332444b8118b880f9a $")
24
25
#include <freeradius-devel/util/lst.h>
26
#include <freeradius-devel/util/rand.h>
27
#include <freeradius-devel/util/strerror.h>
28
29
/*
30
 * Leftmost Skeleton Trees are defined in "Stronger Quickheaps" (Gonzalo Navarro,
31
 * Rodrigo Paredes, Patricio V. Poblete, and Peter Sanders) International Journal
32
 * of Foundations of Computer Science, November 2011. As the title suggests, it
33
 * is inspired by quickheaps, and indeed the underlying representation looks
34
 * like a quickheap.
35
 *
36
 * heap/priority queue operations are defined in the paper in terms of LST
37
 * operations.
38
 */
39
40
/*
41
 * The LST as defined in the paper has a fixed size set at creation.
42
 * Here, as with quickheaps, but we want to allow for expansion...
43
 * though given that, as the paper shows, the expected stack depth
44
 * is proportion to the log of the number of items in the LST, expanding
45
 * the pivot stack may be a rare event.
46
 */
47
0
#define INITIAL_CAPACITY  2048
48
49
0
#define is_power_of_2(_n) ((_n) && (((_n) & ((_n) - 1)) == 0))
50
51
typedef unsigned int stack_index_t;
52
53
typedef struct {
54
  stack_index_t depth;    //!< The current stack depth.
55
  unsigned int  size;   //!< The current stack size (number of frames)
56
  fr_lst_index_t  *data;    //!< Array of indices of the pivots (sometimes called roots)
57
} pivot_stack_t;
58
59
struct fr_lst_s {
60
  unsigned int  capacity; //!< Number of elements that will fit
61
  fr_lst_index_t  idx;    //!< Starting index, initially zero
62
  unsigned int  num_elements; //!< Number of elements in the LST
63
  size_t    offset;   //!< Offset of heap index in element structure.
64
  void    **p;    //!< Array of elements.
65
  pivot_stack_t s;    //!< Stack of pivots, always with depth >= 1.
66
  fr_fast_rand_t  rand_ctx; //!< Seed for random choices.
67
  char const  *type;    //!< Type of elements.
68
  fr_cmp_t  cmp;    //!< Comparator function.
69
};
70
71
static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx) CC_HINT(always_inline, nonnull);
72
static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index) CC_HINT(always_inline, nonnull);
73
74
static inline CC_HINT(always_inline, nonnull) void *index_addr(fr_lst_t const *lst, void *data)
75
0
{
76
0
  return ((uint8_t *)data) + (lst)->offset;
77
0
}
78
79
/*
80
 * Concerning item_index() and item_index_set():
81
 * To let zero be the value *as stored in an item* that indicates not being in an LST,
82
 * we add one to the real index when storing it and subtract one when retrieving it.
83
 *
84
 * This lets the LST functions use item indices in [0, lst->capacity), important for
85
 * 1. the circular array, which allows an important optimization for fr_lst_pop()
86
 * 2. quick reduction of indices
87
 *
88
 * fr_item_insert() needs to see the value actually stored, hence raw_item_index().
89
 */
90
static inline CC_HINT(always_inline, nonnull) fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
91
0
{
92
0
  return *(fr_lst_index_t *)index_addr(lst, data);
93
0
}
94
95
static inline CC_HINT(always_inline, nonnull) fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
96
0
{
97
0
  return  raw_item_index(lst, data) - 1;
98
0
}
99
100
static inline CC_HINT(always_inline, nonnull) void item_index_set(fr_lst_t *lst, void *data, fr_lst_index_t idx)
101
0
{
102
0
  (*(fr_lst_index_t *)index_addr(lst, data)) = idx + 1;
103
0
}
104
105
static inline CC_HINT(always_inline, nonnull) fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
106
0
{
107
0
  return idx & ((lst)->capacity - 1);
108
0
}
109
110
static inline CC_HINT(always_inline, nonnull)
111
bool is_equivalent(fr_lst_t const *lst, fr_lst_index_t idx1, fr_lst_index_t idx2)
112
0
{
113
0
  return (index_reduce(lst, idx1 - idx2) == 0);
114
0
}
115
116
static inline CC_HINT(always_inline, nonnull) void item_set(fr_lst_t *lst, fr_lst_index_t idx, void *data)
117
0
{
118
0
  lst->p[index_reduce(lst, idx)] = data;
119
0
}
120
121
static inline CC_HINT(always_inline, nonnull) void *item(fr_lst_t const *lst, fr_lst_index_t idx)
122
0
{
123
0
  return (lst->p[index_reduce(lst, idx)]);
124
0
}
125
126
static inline CC_HINT(always_inline, nonnull) void *pivot_item(fr_lst_t const *lst, stack_index_t idx)
127
0
{
128
0
  return item(lst, stack_item(&lst->s, idx));
129
0
}
130
131
/*
132
 * The paper defines randomized priority queue operations appropriately for the
133
 * sum type definition the authors use for LSTs, which are used to implement the
134
 * RPQ operations. This code, however, deals with the internal representation,
135
 * including the root/pivot stack, which must change as the LST changes. Also, an
136
 * insertion or deletion may shift the position of any number of buckets or change
137
 * the number of buckets.
138
 *
139
 * So... for those operations, we will pass in the pointer to the LST, but
140
 * internally, we'll represent it and its subtrees with an (LST pointer, stack index)
141
 * pair. The index is that of the least pivot greater than or equal to all items in
142
 * the subtree (considering the "fictitious" pivot greater than anything, so (lst, 0)
143
 * represents the entire tree.
144
 *
145
 * The fictitious pivot at the bottom of the stack isn't actually in the array,
146
 * so don't try to refer to what's there.
147
 *
148
 * The index is visible for the size and length functions, since they need
149
 * to know the subtree they're working on.
150
 */
151
static inline CC_HINT(always_inline, nonnull) bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
152
0
{
153
0
  return lst_length(lst, idx) == 1;
154
0
}
155
156
static bool stack_expand(fr_lst_t *lst, pivot_stack_t *s)
157
0
{
158
0
  fr_lst_index_t  *n;
159
0
  unsigned int  n_size;
160
161
0
#ifndef NDEBUG
162
  /*
163
   *  This likely can't happen, we just include
164
   *  the guard to keep static analysis happy.
165
   */
166
0
  if (unlikely(s->size > (UINT_MAX - s->size))) {
167
0
    if (s->size == UINT_MAX) {
168
0
      fr_strerror_const("lst stack is full");
169
0
      return false;
170
0
    } else {
171
0
      n_size = UINT_MAX;
172
0
    }
173
0
  } else {
174
0
#endif
175
0
    n_size = s->size * 2;
176
0
#ifndef NDEBUG
177
0
  }
178
0
#endif
179
180
0
  n = talloc_realloc(lst, s->data, fr_lst_index_t, n_size);
181
0
  if (unlikely(!n)) {
182
0
    fr_strerror_printf("Failed expanding lst stack to %u elements (%zu bytes)",
183
0
           n_size, n_size * sizeof(fr_lst_index_t));
184
0
    return false;
185
0
  }
186
187
0
  s->size = n_size;
188
0
  s->data = n;
189
0
  return true;
190
0
}
191
192
static inline CC_HINT(always_inline, nonnull) int stack_push(fr_lst_t *lst, pivot_stack_t *s, fr_lst_index_t pivot)
193
0
{
194
0
  if (unlikely(s->depth == s->size && !stack_expand(lst, s))) return -1;
195
196
0
  s->data[s->depth++] = pivot;
197
0
  return 0;
198
0
}
199
200
static inline CC_HINT(always_inline, nonnull) void stack_pop(pivot_stack_t *s, unsigned int n)
201
0
{
202
0
  s->depth -= n;
203
0
}
204
205
static inline CC_HINT(always_inline, nonnull) stack_index_t stack_depth(pivot_stack_t const *s)
206
0
{
207
0
  return s->depth;
208
0
}
209
210
static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx)
211
0
{
212
0
  return s->data[idx];
213
0
}
214
215
static inline CC_HINT(always_inline, nonnull)
216
void stack_set(pivot_stack_t *s, stack_index_t idx, fr_lst_index_t new_value)
217
0
{
218
0
  s->data[idx] = new_value;
219
0
}
220
221
fr_lst_t *_fr_lst_alloc(TALLOC_CTX *ctx, fr_cmp_t cmp, char const *type, size_t offset, fr_lst_index_t init)
222
0
{
223
0
  fr_lst_t  *lst;
224
0
  pivot_stack_t *s;
225
0
  unsigned int  initial_stack_capacity;
226
227
0
  if (!init) {
228
0
    init = INITIAL_CAPACITY;
229
0
  } else if (!is_power_of_2(init)) {
230
0
    init = 1 << fr_high_bit_pos(init);
231
0
  }
232
233
0
  for (initial_stack_capacity = 1; (1U << initial_stack_capacity) < init; initial_stack_capacity++) ;
234
235
  /*
236
   *  Pre-allocate stack memory as it is
237
   *  unlikely to need to grow in practice.
238
   *
239
   *  We don't pre-allocate the array of elements
240
   *  If we pre-allocated the array of elements
241
   *  we'd end up wasting that memory as soon as
242
   *  we needed to expand the array.
243
   *
244
   *  Pre-allocating three chunks appears to be
245
   *  the optimum.
246
   */
247
0
  lst = talloc_zero_pooled_object(ctx, fr_lst_t, 3, (initial_stack_capacity * sizeof(fr_lst_index_t)));
248
0
  if (unlikely(!lst)) return NULL;
249
250
0
  lst->capacity = init;
251
0
  lst->p = talloc_array(lst, void *, lst->capacity);
252
0
  if (unlikely(!lst->p)) {
253
0
  cleanup:
254
0
    talloc_free(lst);
255
0
    return NULL;
256
0
  }
257
258
  /*
259
   *  Allocate the initial stack
260
   */
261
0
  s = &lst->s;
262
0
  s->data = talloc_array(lst, fr_lst_index_t, initial_stack_capacity);
263
0
  if (unlikely(!s->data)) goto cleanup;
264
0
  s->depth = 0;
265
0
  s->size = initial_stack_capacity;
266
267
  /* Initially the LST is empty and we start at the beginning of the array */
268
0
  stack_push(lst, &lst->s, 0);
269
270
0
  lst->idx = 0;
271
272
  /* Prepare for random choices */
273
0
  lst->rand_ctx.a = fr_rand();
274
0
  lst->rand_ctx.b = fr_rand();
275
276
0
  lst->type = type;
277
0
  lst->cmp = cmp;
278
0
  lst->offset = offset;
279
280
0
  return lst;
281
0
}
282
283
/** The length function for LSTs (how many buckets it contains)
284
 *
285
 */
286
static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
287
0
{
288
0
  return stack_depth(&lst->s) - stack_index;
289
0
}
290
291
/** The size function for LSTs (number of items a (sub)tree contains)
292
 *
293
 */
294
static CC_HINT(nonnull) fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
295
0
{
296
0
  fr_lst_index_t  reduced_right, reduced_idx;
297
298
0
  if (stack_index == 0) return lst->num_elements;
299
300
0
  reduced_right = index_reduce(lst, stack_item(&lst->s, stack_index));
301
0
  reduced_idx = index_reduce(lst, lst->idx);
302
303
0
  if (reduced_idx <= reduced_right) return reduced_right - reduced_idx; /* No wraparound--easy. */
304
305
0
  return (lst->capacity - reduced_idx) + reduced_right;
306
0
}
307
308
/** Flatten an LST, i.e. turn it into the base-case one bucket [sub]tree
309
 *
310
 * NOTE: so doing leaves the passed stack_index valid--we just add
311
 * everything once in the left subtree to it.
312
 */
313
static inline CC_HINT(always_inline, nonnull) void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
314
0
{
315
0
  stack_pop(&lst->s, stack_depth(&lst->s) - stack_index);
316
0
}
317
318
/** Move data to a specific location in an LST's array.
319
 *
320
 * The caller must have made sure the location is available and exists
321
 * in said array.
322
 */
323
static inline CC_HINT(always_inline, nonnull) void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
324
0
{
325
0
  item_set(lst, location, data);
326
0
  item_index_set(lst, data, index_reduce(lst, location));
327
0
}
328
329
/** Add data to the bucket of a specified (sub)tree.
330
 *
331
 */
332
static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
333
0
{
334
0
  fr_lst_index_t  new_space;
335
0
  stack_index_t ridx;
336
337
  /*
338
   * For each bucket to the right, starting from the top,
339
   * make a space available at the top and move the bottom item
340
   * into it. Since ordering within a bucket doesn't matter, we
341
   * can do that, minimizing moving and index adjustment.
342
   *
343
   * The fictitious pivot doesn't correspond to an actual value,
344
   * so we save pivot moving for the end of the loop.
345
   */
346
0
  for (ridx = 0; ridx < stack_index; ridx++) {
347
0
    fr_lst_index_t  prev_pivot_index = stack_item(&lst->s, ridx + 1);
348
0
    bool    empty_bucket;
349
350
0
    new_space = stack_item(&lst->s, ridx);
351
0
    empty_bucket = (new_space - prev_pivot_index) == 1;
352
0
    stack_set(&lst->s, ridx, new_space + 1);
353
354
0
    if (!empty_bucket) lst_move(lst, new_space, item(lst, prev_pivot_index + 1));
355
356
    /* move the pivot up, leaving space for the next bucket */
357
0
    lst_move(lst, prev_pivot_index + 1, item(lst, prev_pivot_index));
358
0
  }
359
360
  /*
361
   * If the bucket isn't the leftmost, the above loop has made space
362
   * available where the pivot used to be.
363
   * If it is the leftmost, the loop wasn't executed, but the fictitious
364
   * pivot isn't there, which is just as good.
365
   */
366
0
  new_space = stack_item(&lst->s, stack_index);
367
0
  stack_set(&lst->s, stack_index, new_space + 1);
368
0
  lst_move(lst, new_space, data);
369
370
0
  lst->num_elements++;
371
0
}
372
373
/** Reduce pivot stack indices based on their difference from lst->idx, and then reduce lst->idx
374
 *
375
 */
376
static void lst_indices_reduce(fr_lst_t *lst)
377
0
{
378
0
  fr_lst_index_t  reduced_idx = index_reduce(lst, lst->idx);
379
0
  stack_index_t depth = stack_depth(&lst->s), i;
380
381
0
  for (i = 0; i < depth; i++) stack_set(&lst->s, i, reduced_idx + stack_item(&lst->s, i) - lst->idx);
382
383
0
  lst->idx = reduced_idx;
384
0
}
385
386
/** Make more space available in an LST
387
 *
388
 * The LST paper only mentions this option in passing, pointing out that it's O(n); the only
389
 * constructor in the paper lets you hand it an array of items to initially insert
390
 * in the LST, so elements will have to be removed to make room for more (though it's
391
 * easy to see how one could specify extra space).
392
 *
393
 * Were it not for the circular array optimization, it would be talloc_realloc() and done;
394
 * it works or it doesn't. (That's still O(n), since it may require copying the data.)
395
 *
396
 * With the circular array optimization, if lst->idx refers to something other than the
397
 * beginning of the array, you have to move the elements preceding it to beginning of the
398
 * newly-available space so it's still contiguous, and keep pivot stack entries consistent
399
 * with the positions of the elements.
400
 */
401
static bool lst_expand(fr_lst_t *lst)
402
0
{
403
0
  void    **n;
404
0
  unsigned int  old_capacity = lst->capacity, n_capacity;
405
0
  fr_lst_index_t  i;
406
407
0
  if (unlikely(old_capacity > (UINT_MAX - old_capacity))) {
408
0
    if (old_capacity == UINT_MAX) {
409
0
      fr_strerror_const("lst is full");
410
0
      return false;
411
0
    } else {
412
0
      n_capacity = UINT_MAX;
413
0
    }
414
0
  } else {
415
0
    n_capacity = old_capacity * 2;
416
0
  }
417
418
0
  n = talloc_realloc(lst, lst->p, void *, n_capacity);
419
0
  if (unlikely(!n)) {
420
0
    fr_strerror_printf("Failed expanding lst to %u elements (%zu bytes)",
421
0
           n_capacity, n_capacity * sizeof(void *));
422
0
    return false;
423
0
  }
424
425
0
  lst->p = n;
426
0
  lst->capacity = n_capacity;
427
428
0
  lst_indices_reduce(lst);
429
430
0
  for (i = 0; i < lst->idx; i++) {
431
0
    void    *to_be_moved = item(lst, i);
432
0
    fr_lst_index_t  new_index = item_index(lst, to_be_moved) + old_capacity;
433
434
0
    lst_move(lst, new_index, to_be_moved);
435
0
  }
436
437
0
  return true;
438
0
}
439
440
static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
441
0
{
442
0
  if (is_bucket(lst, stack_index)) return lst->idx;
443
444
0
  return stack_item(&lst->s, stack_index + 1) + 1;
445
0
}
446
447
/*
448
 * Note: buckets can be empty,
449
 */
450
static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
451
0
{
452
0
  return stack_item(&lst->s, stack_index) - 1;
453
0
}
454
455
/*
456
 * Partition an LST
457
 * It's only called for trees that are a single nonempty bucket;
458
 * if it's a subtree, it is thus necessarily the leftmost.
459
 */
460
static void partition(fr_lst_t *lst, stack_index_t stack_index)
461
0
{
462
0
  fr_lst_index_t  low = bucket_lwb(lst, stack_index);
463
0
  fr_lst_index_t  high = bucket_upb(lst, stack_index);
464
0
  fr_lst_index_t  l, h;
465
0
  fr_lst_index_t  pivot_index;
466
0
  void    *pivot;
467
0
  void    *temp;
468
469
  /*
470
   * Hoare partition doesn't do the trivial case, so catch it here.
471
   */
472
0
  if (is_equivalent(lst, low, high)) {
473
0
    stack_push(lst, &lst->s, low);
474
0
    return;
475
0
  }
476
477
0
  pivot_index = low + (fr_fast_rand(&lst->rand_ctx) % (high + 1 - low));
478
0
  pivot = item(lst, pivot_index);
479
480
0
  if (pivot_index != low) {
481
0
    lst_move(lst, pivot_index, item(lst, low));
482
0
    lst_move(lst, low, pivot);
483
0
  }
484
485
  /*
486
   * Hoare partition; on the avaerage, it does a third the swaps of
487
   * Lomuto.
488
   */
489
0
  l = low - 1;
490
0
  h = high + 1;
491
0
  for (;;) {
492
0
    while (lst->cmp(item(lst, --h), pivot) > 0) ;
493
0
    while (lst->cmp(item(lst, ++l), pivot) < 0) ;
494
0
    if (l >= h) break;
495
0
    temp = item(lst, l);
496
0
    lst_move(lst, l, item(lst, h));
497
0
    lst_move(lst, h, temp);
498
0
  }
499
500
  /*
501
   * Hoare partition doesn't guarantee the pivot sits at location h
502
   * the way Lomuto does and LST needs, so first get its location...
503
   */
504
0
  pivot_index = item_index(lst, pivot);
505
0
  if (pivot_index >= index_reduce(lst, low)) {
506
0
    pivot_index = low + pivot_index - index_reduce(lst, low);
507
0
  } else {
508
0
    pivot_index = high - (index_reduce(lst, high) - pivot_index);
509
0
  }
510
511
  /*
512
   * ...and then move it if need be.
513
   */
514
0
  if (pivot_index < h) {
515
0
    lst_move(lst, pivot_index, item(lst, h));
516
0
    lst_move(lst, h, pivot);
517
0
  }
518
0
  if (pivot_index > h) {
519
0
    h++;
520
0
    lst_move(lst, pivot_index, item(lst, h));
521
0
    lst_move(lst, h, pivot);
522
0
  }
523
524
0
  stack_push(lst, &lst->s, h);
525
0
}
526
527
/*
528
 * Delete an item from a bucket in an LST
529
 */
530
static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
531
0
{
532
0
  fr_lst_index_t  location = item_index(lst, data);
533
0
  fr_lst_index_t  top;
534
535
0
  if (is_equivalent(lst, location, lst->idx)) {
536
0
    lst->idx++;
537
0
    if (is_equivalent(lst, lst->idx, 0)) lst_indices_reduce(lst);
538
0
  } else {
539
0
    for (;;) {
540
0
      top = bucket_upb(lst, stack_index);
541
0
      if (!is_equivalent(lst, location, top)) lst_move(lst, location, item(lst, top));
542
0
      stack_set(&lst->s, stack_index, top);
543
0
      if (stack_index == 0) break;
544
0
      lst_move(lst, top, item(lst, top + 1));
545
0
      stack_index--;
546
0
      location = top + 1;
547
0
    }
548
0
  }
549
550
0
  lst->num_elements--;
551
0
  item_index_set(lst, data, -1);
552
0
}
553
554
/*
555
 * We precede each function that does the real work with a Pythonish
556
 * (but colon-free) version of the pseudocode from the paper.
557
 *
558
 * clang, in version 13, will have a way to force tail call optimization
559
 * with a "musttail" attribute. gcc has -f-foptimize-sibling-calls, but
560
 * it works only with -O[23s]. For now, -O2 will assure TCO. In its absence,
561
 * the recursion depth is bounded by the number of pivot stack entries, aka
562
 * the "length" of the LST, which has an expected value proportional to
563
 * log(number of nodes).
564
 *
565
 * NOTE: inlining a recursive function is not advisable, so no
566
 * always_inline here.
567
 */
568
569
/*
570
 * ExtractMin(LST T ) // assumes s(T ) > 0
571
 *  If T = bucket(B) Then
572
 *    Partition(T ) // O(|B|)
573
 *  Let T = tree(r, L, B )
574
 *  If s(L) = 0 Then
575
 *    Flatten T into bucket(B ) // O(1)
576
 *    Remove r from bucket B // O(1)
577
 *    Return r
578
 *  Else
579
 *    Return ExtractMin(L)
580
 */
581
static inline CC_HINT(nonnull) void *_fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
582
0
{
583
0
  if (is_bucket(lst, stack_index)) partition(lst, stack_index);
584
0
  ++stack_index;
585
0
  if (lst_size(lst, stack_index) == 0) {
586
0
    void *min = pivot_item(lst, stack_index);
587
588
0
    lst_flatten(lst, stack_index);
589
0
    bucket_delete(lst, stack_index, min);
590
0
    return min;
591
0
  }
592
0
  return _fr_lst_pop(lst, stack_index);
593
0
}
594
595
/*
596
 * FindMin(LST T ) // assumes s(T ) > 0
597
 *  If T = bucket(B) Then
598
 *    Partition(T ) // O(|B|)
599
 *  Let T = tree(r, L, B )
600
 *  If s(L) = 0 Then
601
 *    Return r
602
 *  Else
603
 *    Return FindMin(L)
604
 */
605
static inline CC_HINT(nonnull) void *_fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
606
0
{
607
0
  if (is_bucket(lst, stack_index)) partition(lst, stack_index);
608
0
  ++stack_index;
609
0
  if (lst_size(lst, stack_index) == 0) return pivot_item(lst, stack_index);
610
0
  return _fr_lst_peek(lst, stack_index);
611
0
}
612
613
/*
614
 * Delete(LST T, x ∈ Z)
615
 *  If T = bucket(B) Then
616
 *    Remove x from bucket B // O(depth)
617
 *  Else
618
 *    Let T = tree(r, L, B′)
619
 *    If x < r Then
620
 *      Delete(L, x)
621
 *    Else If x > r Then
622
 *      Remove x from bucket B ′ // O(depth)
623
 *    Else
624
 *      Flatten T into bucket(B′′) // O(1)
625
 *      Remove x from bucket B′′ // O(depth)
626
 */
627
static inline CC_HINT(nonnull) void _fr_lst_extract(fr_lst_t *lst,  stack_index_t stack_index, void *data)
628
0
{
629
0
  int8_t  cmp;
630
631
0
  if (is_bucket(lst, stack_index)) {
632
0
    bucket_delete(lst, stack_index, data);
633
0
    return;
634
0
  }
635
0
  stack_index++;
636
0
  cmp = lst->cmp(data, pivot_item(lst, stack_index));
637
0
  if (cmp < 0) {
638
0
    _fr_lst_extract(lst, stack_index, data);
639
0
  } else if (cmp > 0) {
640
0
    bucket_delete(lst, stack_index - 1, data);
641
0
  } else {
642
0
    lst_flatten(lst, stack_index);
643
0
    bucket_delete(lst, stack_index, data);
644
0
  }
645
0
}
646
647
/*
648
 * Insert(LST T, x ∈ Z)
649
 *  If T = bucket(B) Then
650
 *    Add x to bucket B // O(depth)
651
 *  Else
652
 *    Let T = tree(r, L, B)
653
 *    If random(s(T) + 1) != 1 Then
654
 *      If x < r Then
655
 *        Insert(L, x)
656
 *      Else
657
 *        Add x to bucket B // O(depth)
658
 *    Else
659
 *      Flatten T into bucket(B′) // O(1)
660
 *      Add x to bucket B′ // O(depth)
661
 */
662
static inline CC_HINT(nonnull) void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
663
0
{
664
0
#ifndef TALLOC_GET_TYPE_ABORT_NOOP
665
0
  if (lst->type) (void)_talloc_get_type_abort(data, lst->type, __location__);
666
0
#endif
667
668
0
  if (is_bucket(lst, stack_index)) {
669
0
    bucket_add(lst, stack_index, data);
670
0
    return;
671
0
  }
672
0
  stack_index++;
673
0
  if (fr_fast_rand(&lst->rand_ctx) % (lst_size(lst, stack_index) + 1) != 0) {
674
0
    if (lst->cmp(data, pivot_item(lst, stack_index)) < 0) {
675
0
      _fr_lst_insert(lst, stack_index, data);
676
0
    } else {
677
0
      bucket_add(lst, stack_index - 1, data);
678
0
    }
679
0
  } else {
680
0
    lst_flatten(lst, stack_index);
681
0
    bucket_add(lst, stack_index, data);
682
0
  }
683
0
}
684
685
/*
686
 * We represent a (sub)tree with an (lst, stack index) pair, so
687
 * fr_lst_pop(), fr_lst_peek(), and fr_lst_extract() are minimal
688
 * wrappers that
689
 *
690
 * (1) hide our representation from the user and preserve the interface
691
 * (2) check preconditions
692
 */
693
694
void *fr_lst_pop(fr_lst_t *lst)
695
0
{
696
0
  if (unlikely(lst->num_elements == 0)) return NULL;
697
0
  return _fr_lst_pop(lst, 0);
698
0
}
699
700
void *fr_lst_peek(fr_lst_t *lst)
701
0
{
702
0
  if (unlikely(lst->num_elements == 0)) return NULL;
703
0
  return _fr_lst_peek(lst, 0);
704
0
}
705
706
/** Remove an element from an LST
707
 *
708
 * @param[in] lst   the LST to remove an element from
709
 * @param[in] data    the element to remove
710
 * @return
711
 *  - 0 if removal succeeds
712
 *  - -1 if removal fails
713
 */
714
int fr_lst_extract(fr_lst_t *lst, void *data)
715
0
{
716
0
  if (unlikely(lst->num_elements == 0)) {
717
0
    fr_strerror_const("Tried to extract element from empty LST");
718
0
    return -1;
719
0
  }
720
721
0
  if (unlikely(raw_item_index(lst, data) == 0)) {
722
0
    fr_strerror_const("Tried to extract element not in LST");
723
0
    return -1;
724
0
  }
725
726
0
  _fr_lst_extract(lst, 0, data);
727
0
  return 0;
728
0
}
729
730
int fr_lst_insert(fr_lst_t *lst, void *data)
731
0
{
732
  /*
733
   * Expand if need be. Not in the paper, but we want the capability.
734
   */
735
0
  if (unlikely((lst->num_elements == lst->capacity) && !lst_expand(lst))) return -1;
736
737
  /*
738
   * Don't insert something that looks like it's already in an LST.
739
   */
740
0
  if (unlikely(raw_item_index(lst, data) > 0)) {
741
0
    fr_strerror_const("Node is already in the LST");
742
0
    return -1;
743
0
  }
744
745
0
  _fr_lst_insert(lst, 0, data);
746
0
  return 0;
747
0
}
748
749
unsigned int fr_lst_num_elements(fr_lst_t *lst)
750
0
{
751
0
  return lst->num_elements;
752
0
}
753
754
/** Iterate over entries in LST
755
 *
756
 * @note If the LST is modified, the iterator should be considered invalidated.
757
 *
758
 * @param[in] lst to iterate over.
759
 * @param[in] iter  Pointer to an iterator struct, used to maintain
760
 *      state between calls.
761
 * @return
762
 *  - User data.
763
 *  - NULL if at the end of the list.
764
 */
765
void *fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
766
0
{
767
0
  if (unlikely(lst->num_elements == 0)) return NULL;
768
769
0
  *iter = lst->idx;
770
0
  return item(lst, *iter);
771
0
}
772
773
/** Get the next entry in an LST
774
 *
775
 * @note If the LST is modified, the iterator should be considered invalidated.
776
 *
777
 * @param[in] lst to iterate over.
778
 * @param[in] iter  Pointer to an iterator struct, used to maintain
779
 *      state between calls.
780
 * @return
781
 *  - User data.
782
 *  - NULL if at the end of the list.
783
 */
784
void *fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
785
0
{
786
0
  if ((*iter + 1) >= stack_item(&lst->s, 0)) return NULL;
787
0
  *iter += 1;
788
789
0
  return item(lst, *iter);
790
0
}
791
792
#ifndef TALLOC_GET_TYPE_ABORT_NOOP
793
void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
794
0
{
795
0
  fr_lst_index_t  fake_pivot_index, reduced_fake_pivot_index, reduced_end;
796
0
  stack_index_t depth = stack_depth(&(lst->s));
797
0
  int   bucket_size_sum;
798
0
  bool    pivots_in_order = true;
799
0
  bool    pivot_indices_in_order = true;
800
801
0
  fr_fatal_assert_msg(lst, "CONSISTENCY CHECK FAILED %s[%i]: LST pointer NULL", file, line);
802
0
  talloc_get_type_abort(lst, fr_lst_t);
803
804
  /*
805
   *  There must be at least the fictitious pivot.
806
   */
807
0
  fr_fatal_assert_msg(depth >= 1, "CONSISTENCY CHECK FAILED %s[%i]: LST pivot stack empty", file, line);
808
809
  /*
810
   *  Modulo circularity, idx + the number of elements should be the index
811
   *  of the fictitious pivot.
812
   */
813
0
  fake_pivot_index = stack_item(&(lst->s), 0);
814
0
  reduced_fake_pivot_index = index_reduce(lst, fake_pivot_index);
815
0
  reduced_end = index_reduce(lst, lst->idx + lst->num_elements);
816
0
  fr_fatal_assert_msg(reduced_fake_pivot_index == reduced_end,
817
0
          "CONSISTENCY CHECK FAILED %s[%i]: fictitious pivot doesn't point past last element",
818
0
          file, line);
819
820
  /*
821
   *  Bucket sizes must make sense.
822
   */
823
0
  if (lst->num_elements) {
824
0
    bucket_size_sum = 0;
825
826
0
    for (stack_index_t stack_index = 0; stack_index < depth; stack_index++)  {
827
0
      fr_lst_index_t bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
828
0
      fr_fatal_assert_msg(bucket_size <= lst->num_elements,
829
0
              "CONSISTENCY CHECK FAILED %s[%i]: bucket %u size %u is invalid",
830
0
              file, line, stack_index, bucket_size);
831
0
      bucket_size_sum += bucket_size;
832
0
    }
833
834
0
    fr_fatal_assert_msg(bucket_size_sum + depth - 1 == lst->num_elements,
835
0
            "CONSISTENCY CHECK FAILED %s[%i]: buckets inconsistent with number of elements",
836
0
            file, line);
837
0
  }
838
839
  /*
840
   *  No elements should be NULL;
841
   *  they should have the correct index stored,
842
   *  and if a type is specified, they should point at something of that type,
843
   */
844
0
  for (fr_lst_index_t i = 0; i < lst->num_elements; i++) {
845
0
    void  *element = item(lst, lst->idx + i);
846
847
0
    fr_fatal_assert_msg(element, "CONSISTENCY CHECK FAILED %s[%i]: null element pointer at %u",
848
0
            file, line, lst->idx + i);
849
0
    fr_fatal_assert_msg(is_equivalent(lst, lst->idx + i, item_index(lst, element)),
850
0
            "CONSISTENCY CHECK FAILED %s[%i]: element %u index mismatch", file, line, i);
851
0
    if (lst->type)  (void) _talloc_get_type_abort(element, lst->type, __location__);
852
0
  }
853
854
  /*
855
   * There's nothing more to check for a one-bucket tree.
856
   */
857
0
  if (is_bucket(lst, 0)) return;
858
859
  /*
860
   * Otherwise, first, pivots from left to right (aside from the fictitious
861
   * one) should be in ascending order.
862
   */
863
0
  for (stack_index_t stack_index = 1; stack_index + 1 < depth; stack_index++) {
864
0
    void  *current_pivot = pivot_item(lst, stack_index);
865
0
    void  *next_pivot = pivot_item(lst, stack_index + 1);
866
867
0
    if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
868
0
  }
869
0
  fr_fatal_assert_msg(pivots_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots not in ascending order",
870
0
          file, line);
871
872
  /*
873
   * Next, the stacked pivot indices should decrease as you ascend from
874
   * the bottom of the pivot stack. Here we *do* include the fictitious
875
   * pivot; we're just comparing indices.
876
   */
877
0
  for (stack_index_t stack_index = 0; stack_index + 1 < depth; stack_index++) {
878
0
    fr_lst_index_t current_pivot_index = stack_item(&(lst->s), stack_index);
879
0
    fr_lst_index_t previous_pivot_index = stack_item(&(lst->s), stack_index + 1);
880
881
0
    if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
882
0
  }
883
0
  fr_fatal_assert_msg(pivot_indices_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots indices not in order",
884
0
          file, line);
885
886
  /*
887
   * Finally...
888
   * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
889
   * and shouldn't "precede" the pivot to the immediate left (if it exists)
890
   */
891
0
  for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
892
0
    fr_lst_index_t  lwb, upb, pivot_index;
893
0
    void    *pivot_item, *element;
894
895
0
    if (stack_index > 0) {
896
0
      lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(&(lst->s), stack_index + 1);
897
0
      pivot_index = upb = stack_item(&(lst->s), stack_index);
898
0
      pivot_item = item(lst, pivot_index);
899
0
      for (fr_lst_index_t index = lwb; index < upb; index++) {
900
0
        element = item(lst, index);
901
0
        fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(element, pivot_item) <= 0,
902
0
                "CONSISTENCY CHECK FAILED %s[%i]: element at %u > pivot at %u",
903
0
                file, line, index, pivot_index);
904
0
      }
905
0
    }
906
0
    if (stack_index + 1 < depth) {
907
0
      upb = stack_item(&(lst->s), stack_index);
908
0
      lwb = pivot_index = stack_item(&(lst->s), stack_index + 1);
909
0
      pivot_item = item(lst, pivot_index);
910
0
      for (fr_lst_index_t index = lwb; index < upb; index++) {
911
0
        element = item(lst, index);
912
0
        fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(pivot_item, element) <= 0,
913
0
                "CONSISTENCY CHECK FAILED %s[%i]: element at %u < pivot at %u",
914
0
                file, line, index, pivot_index);
915
0
      }
916
0
    }
917
0
  }
918
0
}
919
#endif