Coverage Report

Created: 2025-11-24 06:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/unbound/util/storage/lruhash.c
Line
Count
Source
1
/*
2
 * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
3
 *
4
 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5
 *
6
 * This software is open source.
7
 * 
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 
12
 * Redistributions of source code must retain the above copyright notice,
13
 * this list of conditions and the following disclaimer.
14
 * 
15
 * Redistributions in binary form must reproduce the above copyright notice,
16
 * this list of conditions and the following disclaimer in the documentation
17
 * and/or other materials provided with the distribution.
18
 * 
19
 * Neither the name of the NLNET LABS nor the names of its contributors may
20
 * be used to endorse or promote products derived from this software without
21
 * specific prior written permission.
22
 * 
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29
 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
 */
35
36
/**
37
 * \file
38
 *
39
 * This file contains a hashtable with LRU keeping of entries.
40
 *
41
 */
42
43
#include "config.h"
44
#include "util/storage/lruhash.h"
45
#include "util/fptr_wlist.h"
46
47
void
48
bin_init(struct lruhash_bin* array, size_t size)
49
15.7k
{
50
15.7k
  size_t i;
51
#ifdef THREADS_DISABLED
52
  (void)array;
53
#endif
54
16.1M
  for(i=0; i<size; i++) {
55
16.1M
    lock_quick_init(&array[i].lock);
56
16.1M
    lock_protect(&array[i].lock, &array[i], 
57
16.1M
      sizeof(struct lruhash_bin));
58
16.1M
  }
59
15.7k
}
60
61
struct lruhash* 
62
lruhash_create(size_t start_size, size_t maxmem,
63
  lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
64
  lruhash_delkeyfunc_type delkeyfunc,
65
  lruhash_deldatafunc_type deldatafunc, void* arg)
66
15.7k
{
67
15.7k
  struct lruhash* table = (struct lruhash*)calloc(1, 
68
15.7k
    sizeof(struct lruhash));
69
15.7k
  if(!table)
70
0
    return NULL;
71
15.7k
  lock_quick_init(&table->lock);
72
15.7k
  table->sizefunc = sizefunc;
73
15.7k
  table->compfunc = compfunc;
74
15.7k
  table->delkeyfunc = delkeyfunc;
75
15.7k
  table->deldatafunc = deldatafunc;
76
15.7k
  table->cb_arg = arg;
77
15.7k
  table->size = start_size;
78
15.7k
  table->size_mask = (int)(start_size-1);
79
15.7k
  table->lru_start = NULL;
80
15.7k
  table->lru_end = NULL;
81
15.7k
  table->num = 0;
82
15.7k
  table->space_used = 0;
83
15.7k
  table->space_max = maxmem;
84
15.7k
  table->max_collisions = 0;
85
15.7k
  table->array = calloc(table->size, sizeof(struct lruhash_bin));
86
15.7k
  if(!table->array) {
87
0
    lock_quick_destroy(&table->lock);
88
0
    free(table);
89
0
    return NULL;
90
0
  }
91
15.7k
  bin_init(table->array, table->size);
92
15.7k
  lock_protect(&table->lock, table, sizeof(*table));
93
15.7k
  lock_protect(&table->lock, table->array, 
94
15.7k
    table->size*sizeof(struct lruhash_bin));
95
15.7k
  return table;
96
15.7k
}
97
98
void 
99
bin_delete(struct lruhash* table, struct lruhash_bin* bin)
100
16.1M
{
101
16.1M
  struct lruhash_entry* p, *np;
102
16.1M
  void *d;
103
16.1M
  if(!bin)
104
0
    return;
105
16.1M
  lock_quick_destroy(&bin->lock);
106
16.1M
  p = bin->overflow_list;
107
16.1M
  bin->overflow_list = NULL;
108
16.1M
  while(p) {
109
9.46k
    np = p->overflow_next;
110
9.46k
    d = p->data;
111
9.46k
    (*table->delkeyfunc)(p->key, table->cb_arg);
112
9.46k
    (*table->deldatafunc)(d, table->cb_arg);
113
9.46k
    p = np;
114
9.46k
  }
115
16.1M
}
116
117
void 
118
bin_split(struct lruhash* table, struct lruhash_bin* newa, 
119
  int newmask)
120
0
{
121
0
  size_t i;
122
0
  struct lruhash_entry *p, *np;
123
0
  struct lruhash_bin* newbin;
124
  /* move entries to new table. Notice that since hash x is mapped to
125
   * bin x & mask, and new mask uses one more bit, so all entries in
126
   * one bin will go into the old bin or bin | newbit */
127
0
#ifndef THREADS_DISABLED
128
0
  int newbit = newmask - table->size_mask;
129
0
#endif
130
  /* so, really, this task could also be threaded, per bin. */
131
  /* LRU list is not changed */
132
0
  for(i=0; i<table->size; i++)
133
0
  {
134
0
    lock_quick_lock(&table->array[i].lock);
135
0
    p = table->array[i].overflow_list;
136
    /* lock both destination bins */
137
0
    lock_quick_lock(&newa[i].lock);
138
0
    lock_quick_lock(&newa[newbit|i].lock);
139
0
    while(p) {
140
0
      np = p->overflow_next;
141
      /* link into correct new bin */
142
0
      newbin = &newa[p->hash & newmask];
143
0
      p->overflow_next = newbin->overflow_list;
144
0
      newbin->overflow_list = p;
145
0
      p=np;
146
0
    }
147
0
    lock_quick_unlock(&newa[i].lock);
148
0
    lock_quick_unlock(&newa[newbit|i].lock);
149
0
    lock_quick_unlock(&table->array[i].lock);
150
0
  }
151
0
}
152
153
void 
154
lruhash_delete(struct lruhash* table)
155
15.7k
{
156
15.7k
  size_t i;
157
15.7k
  if(!table)
158
0
    return;
159
  /* delete lock on hashtable to force check its OK */
160
15.7k
  lock_quick_destroy(&table->lock);
161
16.1M
  for(i=0; i<table->size; i++)
162
16.1M
    bin_delete(table, &table->array[i]);
163
15.7k
  free(table->array);
164
15.7k
  free(table);
165
15.7k
}
166
167
void 
168
bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
169
0
{
170
0
  struct lruhash_entry* p = bin->overflow_list;
171
0
  struct lruhash_entry** prevp = &bin->overflow_list;
172
0
  while(p) {
173
0
    if(p == entry) {
174
0
      *prevp = p->overflow_next;
175
0
      return;
176
0
    }
177
0
    prevp = &p->overflow_next;
178
0
    p = p->overflow_next;
179
0
  }
180
0
}
181
182
void 
183
reclaim_space(struct lruhash* table, struct lruhash_entry** list)
184
0
{
185
0
  struct lruhash_entry* d;
186
0
  struct lruhash_bin* bin;
187
0
  log_assert(table);
188
  /* does not delete MRU entry, so table will not be empty. */
189
0
  while(table->num > 1 && table->space_used > table->space_max) {
190
    /* notice that since we hold the hashtable lock, nobody
191
       can change the lru chain. So it cannot be deleted underneath
192
       us. We still need the hashbin and entry write lock to make 
193
       sure we flush all users away from the entry. 
194
       which is unlikely, since it is LRU, if someone got a rdlock
195
       it would be moved to front, but to be sure. */
196
0
    d = table->lru_end;
197
    /* specialised, delete from end of double linked list,
198
       and we know num>1, so there is a previous lru entry. */
199
0
    log_assert(d && d->lru_prev);
200
0
    table->lru_end = d->lru_prev;
201
0
    d->lru_prev->lru_next = NULL;
202
    /* schedule entry for deletion */
203
0
    bin = &table->array[d->hash & table->size_mask];
204
0
    table->num --;
205
0
    lock_quick_lock(&bin->lock);
206
0
    bin_overflow_remove(bin, d);
207
0
    d->overflow_next = *list;
208
0
    *list = d;
209
0
    lock_rw_wrlock(&d->lock);
210
0
    table->space_used -= table->sizefunc(d->key, d->data);
211
0
    if(table->markdelfunc)
212
0
      (*table->markdelfunc)(d->key);
213
0
    lock_rw_unlock(&d->lock);
214
0
    lock_quick_unlock(&bin->lock);
215
0
  }
216
0
}
217
218
struct lruhash_entry* 
219
bin_find_entry(struct lruhash* table, 
220
  struct lruhash_bin* bin, hashvalue_type hash, void* key, size_t* collisions)
221
18.9k
{
222
18.9k
  size_t c = 0;
223
18.9k
  struct lruhash_entry* p = bin->overflow_list;
224
20.8k
  while(p) {
225
1.94k
    if(p->hash == hash && table->compfunc(p->key, key) == 0)
226
0
      break;
227
1.94k
    c++;
228
1.94k
    p = p->overflow_next;
229
1.94k
  }
230
18.9k
  if (collisions != NULL)
231
9.46k
    *collisions = c;
232
18.9k
  return p;
233
18.9k
}
234
235
void 
236
table_grow(struct lruhash* table)
237
0
{
238
0
  struct lruhash_bin* newa;
239
0
  int newmask;
240
0
  size_t i;
241
0
  if(table->size_mask == (int)(((size_t)-1)>>1)) {
242
0
    log_err("hash array malloc: size_t too small");
243
0
    return;
244
0
  }
245
  /* try to allocate new array, if not fail */
246
0
  newa = calloc(table->size*2, sizeof(struct lruhash_bin));
247
0
  if(!newa) {
248
0
    log_err("hash grow: malloc failed");
249
    /* continue with smaller array. Though its slower. */
250
0
    return;
251
0
  }
252
0
  bin_init(newa, table->size*2);
253
0
  newmask = (table->size_mask << 1) | 1;
254
0
  bin_split(table, newa, newmask);
255
  /* delete the old bins */
256
0
  lock_unprotect(&table->lock, table->array);
257
0
  for(i=0; i<table->size; i++) {
258
0
    lock_quick_destroy(&table->array[i].lock);
259
0
  }
260
0
  free(table->array);
261
  
262
0
  table->size *= 2;
263
0
  table->size_mask = newmask;
264
0
  table->array = newa;
265
0
  lock_protect(&table->lock, table->array, 
266
0
    table->size*sizeof(struct lruhash_bin));
267
0
  return;
268
0
}
269
270
void 
271
lru_front(struct lruhash* table, struct lruhash_entry* entry)
272
9.46k
{
273
9.46k
  entry->lru_prev = NULL;
274
9.46k
  entry->lru_next = table->lru_start;
275
9.46k
  if(!table->lru_start)
276
789
    table->lru_end = entry;
277
8.67k
  else  table->lru_start->lru_prev = entry;
278
9.46k
  table->lru_start = entry;
279
9.46k
}
280
281
void 
282
lru_remove(struct lruhash* table, struct lruhash_entry* entry)
283
0
{
284
0
  if(entry->lru_prev)
285
0
    entry->lru_prev->lru_next = entry->lru_next;
286
0
  else  table->lru_start = entry->lru_next;
287
0
  if(entry->lru_next)
288
0
    entry->lru_next->lru_prev = entry->lru_prev;
289
0
  else  table->lru_end = entry->lru_prev;
290
0
}
291
292
void 
293
lru_touch(struct lruhash* table, struct lruhash_entry* entry)
294
0
{
295
0
  log_assert(table && entry);
296
0
  if(entry == table->lru_start)
297
0
    return; /* nothing to do */
298
  /* remove from current lru position */
299
0
  lru_remove(table, entry);
300
  /* add at front */
301
0
  lru_front(table, entry);
302
0
}
303
304
void 
305
lruhash_insert(struct lruhash* table, hashvalue_type hash,
306
        struct lruhash_entry* entry, void* data, void* cb_arg)
307
9.46k
{
308
9.46k
  struct lruhash_bin* bin;
309
9.46k
  struct lruhash_entry* found, *reclaimlist=NULL;
310
9.46k
  size_t need_size;
311
9.46k
  size_t collisions;
312
9.46k
  fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
313
9.46k
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
314
9.46k
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
315
9.46k
  fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
316
9.46k
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
317
9.46k
  need_size = table->sizefunc(entry->key, data);
318
9.46k
  if(cb_arg == NULL) cb_arg = table->cb_arg;
319
320
  /* find bin */
321
9.46k
  lock_quick_lock(&table->lock);
322
9.46k
  bin = &table->array[hash & table->size_mask];
323
9.46k
  lock_quick_lock(&bin->lock);
324
325
  /* see if entry exists already */
326
9.46k
  if(!(found=bin_find_entry(table, bin, hash, entry->key, &collisions))) {
327
    /* if not: add to bin */
328
9.46k
    entry->overflow_next = bin->overflow_list;
329
9.46k
    bin->overflow_list = entry;
330
9.46k
    lru_front(table, entry);
331
9.46k
    table->num++;
332
9.46k
    if (table->max_collisions < collisions)
333
377
      table->max_collisions = collisions;
334
9.46k
    table->space_used += need_size;
335
9.46k
  } else {
336
    /* if so: update data - needs a writelock */
337
0
    table->space_used += need_size -
338
0
      (*table->sizefunc)(found->key, found->data);
339
0
    (*table->delkeyfunc)(entry->key, cb_arg);
340
0
    lru_touch(table, found);
341
0
    lock_rw_wrlock(&found->lock);
342
0
    (*table->deldatafunc)(found->data, cb_arg);
343
0
    found->data = data;
344
0
    lock_rw_unlock(&found->lock);
345
0
  }
346
9.46k
  lock_quick_unlock(&bin->lock);
347
9.46k
  if(table->space_used > table->space_max)
348
0
    reclaim_space(table, &reclaimlist);
349
9.46k
  if(table->num >= table->size)
350
0
    table_grow(table);
351
9.46k
  lock_quick_unlock(&table->lock);
352
353
  /* finish reclaim if any (outside of critical region) */
354
9.46k
  while(reclaimlist) {
355
0
    struct lruhash_entry* n = reclaimlist->overflow_next;
356
0
    void* d = reclaimlist->data;
357
0
    (*table->delkeyfunc)(reclaimlist->key, cb_arg);
358
0
    (*table->deldatafunc)(d, cb_arg);
359
0
    reclaimlist = n;
360
0
  }
361
9.46k
}
362
363
struct lruhash_entry* 
364
lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
365
9.46k
{
366
9.46k
  struct lruhash_entry* entry;
367
9.46k
  struct lruhash_bin* bin;
368
9.46k
  fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
369
370
9.46k
  lock_quick_lock(&table->lock);
371
9.46k
  bin = &table->array[hash & table->size_mask];
372
9.46k
  lock_quick_lock(&bin->lock);
373
9.46k
  if((entry=bin_find_entry(table, bin, hash, key, NULL)))
374
0
    lru_touch(table, entry);
375
9.46k
  lock_quick_unlock(&table->lock);
376
377
9.46k
  if(entry) {
378
0
    if(wr) { lock_rw_wrlock(&entry->lock); }
379
0
    else  { lock_rw_rdlock(&entry->lock); }
380
0
  }
381
9.46k
  lock_quick_unlock(&bin->lock);
382
9.46k
  return entry;
383
9.46k
}
384
385
void 
386
lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
387
0
{
388
0
  struct lruhash_entry* entry;
389
0
  struct lruhash_bin* bin;
390
0
  void *d;
391
0
  fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
392
0
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
393
0
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
394
0
  fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
395
0
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
396
397
0
  lock_quick_lock(&table->lock);
398
0
  bin = &table->array[hash & table->size_mask];
399
0
  lock_quick_lock(&bin->lock);
400
0
  if((entry=bin_find_entry(table, bin, hash, key, NULL))) {
401
0
    bin_overflow_remove(bin, entry);
402
0
    lru_remove(table, entry);
403
0
  } else {
404
0
    lock_quick_unlock(&table->lock);
405
0
    lock_quick_unlock(&bin->lock);
406
0
    return;
407
0
  }
408
0
  table->num--;
409
0
  table->space_used -= (*table->sizefunc)(entry->key, entry->data); 
410
0
  lock_rw_wrlock(&entry->lock);
411
0
  if(table->markdelfunc)
412
0
    (*table->markdelfunc)(entry->key);
413
0
  lock_rw_unlock(&entry->lock);
414
0
  lock_quick_unlock(&bin->lock);
415
0
  lock_quick_unlock(&table->lock);
416
  /* finish removal */
417
0
  d = entry->data;
418
0
  (*table->delkeyfunc)(entry->key, table->cb_arg);
419
0
  (*table->deldatafunc)(d, table->cb_arg);
420
0
}
421
422
/** clear bin, respecting locks, does not do space, LRU */
423
static void
424
bin_clear(struct lruhash* table, struct lruhash_bin* bin)
425
0
{
426
0
  struct lruhash_entry* p, *np;
427
0
  void *d;
428
0
  lock_quick_lock(&bin->lock);
429
0
  p = bin->overflow_list; 
430
0
  while(p) {
431
0
    lock_rw_wrlock(&p->lock);
432
0
    np = p->overflow_next;
433
0
    d = p->data;
434
0
    if(table->markdelfunc)
435
0
      (*table->markdelfunc)(p->key);
436
0
    lock_rw_unlock(&p->lock);
437
0
    (*table->delkeyfunc)(p->key, table->cb_arg);
438
0
    (*table->deldatafunc)(d, table->cb_arg);
439
0
    p = np;
440
0
  }
441
0
  bin->overflow_list = NULL;
442
0
  lock_quick_unlock(&bin->lock);
443
0
}
444
445
void
446
lruhash_clear(struct lruhash* table)
447
0
{
448
0
  size_t i;
449
0
  if(!table)
450
0
    return;
451
0
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
452
0
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
453
0
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
454
455
0
  lock_quick_lock(&table->lock);
456
0
  for(i=0; i<table->size; i++) {
457
0
    bin_clear(table, &table->array[i]);
458
0
  }
459
0
  table->lru_start = NULL;
460
0
  table->lru_end = NULL;
461
0
  table->num = 0;
462
0
  table->space_used = 0;
463
0
  lock_quick_unlock(&table->lock);
464
0
}
465
466
void 
467
lruhash_status(struct lruhash* table, const char* id, int extended)
468
0
{
469
0
  lock_quick_lock(&table->lock);
470
0
  log_info("%s: %u entries, memory %u / %u",
471
0
    id, (unsigned)table->num, (unsigned)table->space_used,
472
0
    (unsigned)table->space_max);
473
0
  log_info("  itemsize %u, array %u, mask %d",
474
0
    (unsigned)(table->num? table->space_used/table->num : 0),
475
0
    (unsigned)table->size, table->size_mask);
476
0
  if(extended) {
477
0
    size_t i;
478
0
    int min=(int)table->size*2, max=-2;
479
0
    for(i=0; i<table->size; i++) {
480
0
      int here = 0;
481
0
      struct lruhash_entry *en;
482
0
      lock_quick_lock(&table->array[i].lock);
483
0
      en = table->array[i].overflow_list;
484
0
      while(en) {
485
0
        here ++;
486
0
        en = en->overflow_next;
487
0
      }
488
0
      lock_quick_unlock(&table->array[i].lock);
489
0
      if(extended >= 2)
490
0
        log_info("bin[%d] %d", (int)i, here);
491
0
      if(here > max) max = here;
492
0
      if(here < min) min = here;
493
0
    }
494
0
    log_info("  bin min %d, avg %.2lf, max %d", min, 
495
0
      (double)table->num/(double)table->size, max);
496
0
  }
497
0
  lock_quick_unlock(&table->lock);
498
0
}
499
500
size_t
501
lruhash_get_mem(struct lruhash* table)
502
0
{
503
0
  size_t s;
504
0
  lock_quick_lock(&table->lock);
505
0
  s = sizeof(struct lruhash) + table->space_used;
506
#ifdef USE_THREAD_DEBUG
507
  if(table->size != 0) {
508
    size_t i;
509
    for(i=0; i<table->size; i++)
510
      s += sizeof(struct lruhash_bin) + 
511
        lock_get_mem(&table->array[i].lock);
512
  }
513
#else /* no THREAD_DEBUG */
514
0
  if(table->size != 0)
515
0
    s += (table->size)*(sizeof(struct lruhash_bin) + 
516
0
      lock_get_mem(&table->array[0].lock));
517
0
#endif
518
0
  lock_quick_unlock(&table->lock);
519
0
  s += lock_get_mem(&table->lock);
520
0
  return s;
521
0
}
522
523
void 
524
lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
525
15.7k
{
526
15.7k
  lock_quick_lock(&table->lock);
527
15.7k
  table->markdelfunc = md;
528
15.7k
  lock_quick_unlock(&table->lock);
529
15.7k
}
530
531
void
532
lruhash_update_space_used(struct lruhash* table, void* cb_arg, int diff_size)
533
0
{
534
0
  struct lruhash_entry *reclaimlist = NULL;
535
536
0
  fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
537
0
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
538
0
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
539
0
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
540
541
0
  if(cb_arg == NULL) cb_arg = table->cb_arg;
542
543
  /* update space used */
544
0
  lock_quick_lock(&table->lock);
545
546
0
  if((int)table->space_used + diff_size < 0)
547
0
    table->space_used = 0;
548
0
  else table->space_used = (size_t)((int)table->space_used + diff_size);
549
550
0
  if(table->space_used > table->space_max)
551
0
    reclaim_space(table, &reclaimlist);
552
553
0
  lock_quick_unlock(&table->lock);
554
555
  /* finish reclaim if any (outside of critical region) */
556
0
  while(reclaimlist) {
557
0
    struct lruhash_entry* n = reclaimlist->overflow_next;
558
0
    void* d = reclaimlist->data;
559
0
    (*table->delkeyfunc)(reclaimlist->key, cb_arg);
560
0
    (*table->deldatafunc)(d, cb_arg);
561
0
    reclaimlist = n;
562
0
  }
563
0
}
564
565
void lruhash_update_space_max(struct lruhash* table, void* cb_arg, size_t max)
566
0
{
567
0
  struct lruhash_entry *reclaimlist = NULL;
568
569
0
  fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
570
0
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
571
0
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
572
0
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
573
574
0
  if(cb_arg == NULL) cb_arg = table->cb_arg;
575
576
  /* update space max */
577
0
  lock_quick_lock(&table->lock);
578
0
  table->space_max = max;
579
580
0
  if(table->space_used > table->space_max)
581
0
    reclaim_space(table, &reclaimlist);
582
583
0
  lock_quick_unlock(&table->lock);
584
585
  /* finish reclaim if any (outside of critical region) */
586
0
  while(reclaimlist) {
587
0
    struct lruhash_entry* n = reclaimlist->overflow_next;
588
0
    void* d = reclaimlist->data;
589
0
    (*table->delkeyfunc)(reclaimlist->key, cb_arg);
590
0
    (*table->deldatafunc)(d, cb_arg);
591
0
    reclaimlist = n;
592
0
  }
593
0
}
594
595
void 
596
lruhash_traverse(struct lruhash* h, int wr, 
597
  void (*func)(struct lruhash_entry*, void*), void* arg)
598
0
{
599
0
  size_t i;
600
0
  struct lruhash_entry* e;
601
602
0
  lock_quick_lock(&h->lock);
603
0
  for(i=0; i<h->size; i++) {
604
0
    lock_quick_lock(&h->array[i].lock);
605
0
    for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
606
0
      if(wr) {
607
0
        lock_rw_wrlock(&e->lock);
608
0
      } else {
609
0
        lock_rw_rdlock(&e->lock);
610
0
      }
611
0
      (*func)(e, arg);
612
0
      lock_rw_unlock(&e->lock);
613
0
    }
614
0
    lock_quick_unlock(&h->array[i].lock);
615
0
  }
616
0
  lock_quick_unlock(&h->lock);
617
0
}
618
619
/*
620
 * Demote: the opposite of touch, move an entry to the bottom
621
 * of the LRU pile.
622
 */
623
624
void
625
lru_demote(struct lruhash* table, struct lruhash_entry* entry)
626
0
{
627
0
  log_assert(table && entry);
628
0
  if (entry == table->lru_end)
629
0
    return; /* nothing to do */
630
  /* remove from current lru position */
631
0
  lru_remove(table, entry);
632
  /* add at end */
633
0
  entry->lru_next = NULL;
634
0
  entry->lru_prev = table->lru_end;
635
636
0
  if (table->lru_end == NULL)
637
0
  {
638
0
    table->lru_start = entry;
639
0
  }
640
0
  else
641
0
  {
642
0
    table->lru_end->lru_next = entry;
643
0
  }
644
0
  table->lru_end = entry;
645
0
}
646
647
struct lruhash_entry*
648
lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash,
649
  struct lruhash_entry* entry, void* data, void* cb_arg)
650
0
{
651
0
  struct lruhash_bin* bin;
652
0
  struct lruhash_entry* found, *reclaimlist = NULL;
653
0
  size_t need_size;
654
0
  size_t collisions;
655
0
  fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
656
0
  fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
657
0
  fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
658
0
  fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
659
0
  fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
660
0
  need_size = table->sizefunc(entry->key, data);
661
0
  if (cb_arg == NULL) cb_arg = table->cb_arg;
662
663
  /* find bin */
664
0
  lock_quick_lock(&table->lock);
665
0
  bin = &table->array[hash & table->size_mask];
666
0
  lock_quick_lock(&bin->lock);
667
668
  /* see if entry exists already */
669
0
  if ((found = bin_find_entry(table, bin, hash, entry->key, &collisions)) != NULL) {
670
    /* if so: keep the existing data - acquire a writelock */
671
0
    lock_rw_wrlock(&found->lock);
672
0
  }
673
0
  else
674
0
  {
675
    /* if not: add to bin */
676
0
    entry->overflow_next = bin->overflow_list;
677
0
    bin->overflow_list = entry;
678
0
    lru_front(table, entry);
679
0
    table->num++;
680
0
    if (table->max_collisions < collisions)
681
0
      table->max_collisions = collisions;
682
0
    table->space_used += need_size;
683
    /* return the entry that was presented, and lock it */
684
0
    found = entry;
685
0
    lock_rw_wrlock(&found->lock);
686
0
  }
687
0
  lock_quick_unlock(&bin->lock);
688
0
  if (table->space_used > table->space_max)
689
0
    reclaim_space(table, &reclaimlist);
690
0
  if (table->num >= table->size)
691
0
    table_grow(table);
692
0
  lock_quick_unlock(&table->lock);
693
694
  /* finish reclaim if any (outside of critical region) */
695
0
  while (reclaimlist) {
696
0
    struct lruhash_entry* n = reclaimlist->overflow_next;
697
0
    void* d = reclaimlist->data;
698
0
    (*table->delkeyfunc)(reclaimlist->key, cb_arg);
699
0
    (*table->deldatafunc)(d, cb_arg);
700
0
    reclaimlist = n;
701
0
  }
702
703
  /* return the entry that was selected */
704
0
  return found;
705
0
}
706