Coverage Report

Created: 2026-03-15 06:35

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/unbound/services/cache/rrset.c
Line
Count
Source
1
/*
2
 * services/cache/rrset.c - Resource record set cache.
3
 *
4
 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5
 *
6
 * This software is open source.
7
 * 
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 
12
 * Redistributions of source code must retain the above copyright notice,
13
 * this list of conditions and the following disclaimer.
14
 * 
15
 * Redistributions in binary form must reproduce the above copyright notice,
16
 * this list of conditions and the following disclaimer in the documentation
17
 * and/or other materials provided with the distribution.
18
 * 
19
 * Neither the name of the NLNET LABS nor the names of its contributors may
20
 * be used to endorse or promote products derived from this software without
21
 * specific prior written permission.
22
 * 
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29
 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
 */
35
36
/**
37
 * \file
38
 *
39
 * This file contains the rrset cache.
40
 */
41
#include "config.h"
42
#include "services/cache/rrset.h"
43
#include "sldns/rrdef.h"
44
#include "util/storage/slabhash.h"
45
#include "util/config_file.h"
46
#include "util/data/packed_rrset.h"
47
#include "util/data/msgreply.h"
48
#include "util/data/msgparse.h"
49
#include "util/data/dname.h"
50
#include "util/regional.h"
51
#include "util/alloc.h"
52
#include "util/net_help.h"
53
54
void
55
rrset_markdel(void* key)
56
0
{
57
0
  struct ub_packed_rrset_key* r = (struct ub_packed_rrset_key*)key;
58
0
  r->id = 0;
59
0
}
60
61
struct rrset_cache* rrset_cache_create(struct config_file* cfg, 
62
  struct alloc_cache* alloc)
63
0
{
64
0
  size_t slabs = (cfg?cfg->rrset_cache_slabs:HASH_DEFAULT_SLABS);
65
0
  size_t startarray = HASH_DEFAULT_STARTARRAY;
66
0
  size_t maxmem = (cfg?cfg->rrset_cache_size:HASH_DEFAULT_MAXMEM);
67
68
0
  struct rrset_cache *r = (struct rrset_cache*)slabhash_create(slabs,
69
0
    startarray, maxmem, ub_rrset_sizefunc, ub_rrset_compare,
70
0
    ub_rrset_key_delete, rrset_data_delete, alloc);
71
0
  if(!r)
72
0
    return NULL;
73
0
  slabhash_setmarkdel(&r->table, &rrset_markdel);
74
0
  return r;
75
0
}
76
77
void rrset_cache_delete(struct rrset_cache* r)
78
0
{
79
0
  if(!r) 
80
0
    return;
81
0
  slabhash_delete(&r->table);
82
  /* slabhash delete also does free(r), since table is first in struct*/
83
0
}
84
85
struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r, 
86
  struct config_file* cfg, struct alloc_cache* alloc)
87
0
{
88
0
  if(!r || !cfg || !slabhash_is_size(&r->table, cfg->rrset_cache_size,
89
0
    cfg->rrset_cache_slabs))
90
0
  {
91
0
    rrset_cache_delete(r);
92
0
    r = rrset_cache_create(cfg, alloc);
93
0
  }
94
0
  return r;
95
0
}
96
97
void 
98
rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key,
99
        hashvalue_type hash, rrset_id_type id)
100
0
{
101
0
  struct lruhash* table = slabhash_gettable(&r->table, hash);
102
  /* 
103
   * This leads to locking problems, deadlocks, if the caller is 
104
   * holding any other rrset lock.
105
   * Because a lookup through the hashtable does:
106
   *  tablelock -> entrylock  (for that entry caller holds)
107
   * And this would do
108
   *  entrylock(already held) -> tablelock
109
   * And if two threads do this, it results in deadlock.
110
   * So, the caller must not hold entrylock.
111
   */
112
0
  lock_quick_lock(&table->lock);
113
  /* we have locked the hash table, the item can still be deleted.
114
   * because it could already have been reclaimed, but not yet set id=0.
115
   * This is because some lruhash routines have lazy deletion.
116
   * so, we must acquire a lock on the item to verify the id != 0.
117
   * also, with hash not changed, we are using the right slab.
118
   */
119
0
  lock_rw_rdlock(&key->entry.lock);
120
0
  if(key->id == id && key->entry.hash == hash) {
121
0
    lru_touch(table, &key->entry);
122
0
  }
123
0
  lock_rw_unlock(&key->entry.lock);
124
0
  lock_quick_unlock(&table->lock);
125
0
}
126
127
/** see if rrset needs to be updated in the cache */
128
static int
129
need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns)
130
0
{
131
0
  struct packed_rrset_data* newd = (struct packed_rrset_data*)nd;
132
0
  struct packed_rrset_data* cached = (struct packed_rrset_data*)cd;
133
  /*  o if new data is expired, cached data is better */
134
0
  if( TTL_IS_EXPIRED(newd->ttl, timenow) && !TTL_IS_EXPIRED(cached->ttl, timenow))
135
0
    return 0;
136
  /*  o store if rrset has been validated 
137
   *      everything better than bogus data 
138
   *      secure is preferred */
139
0
  if( newd->security == sec_status_secure &&
140
0
    cached->security != sec_status_secure)
141
0
    return 1;
142
0
  if( cached->security == sec_status_bogus && 
143
0
    newd->security != sec_status_bogus && !equal)
144
0
    return 1;
145
        /*      o if new RRset is more trustworthy - insert it */
146
0
        if( newd->trust > cached->trust ) {
147
    /* if the cached rrset is bogus, and new is equal,
148
     * do not update the TTL - let it expire. */
149
0
    if(equal && !TTL_IS_EXPIRED(cached->ttl, timenow) &&
150
0
      cached->security == sec_status_bogus)
151
0
      return 0;
152
0
                return 1;
153
0
  }
154
  /*  o item in cache has expired */
155
0
  if( TTL_IS_EXPIRED(cached->ttl, timenow) )
156
0
    return 1;
157
  /*  o same trust, but different in data - insert it */
158
0
  if( newd->trust == cached->trust && !equal ) {
159
    /* if this is type NS, do not 'stick' to owner that changes
160
     * the NS RRset, but use the cached TTL for the new data, and
161
     * update to fetch the latest data. ttl is not expired, because
162
     * that check was before this one. */
163
0
    if(ns) {
164
0
      size_t i;
165
0
      newd->ttl = cached->ttl;
166
0
      for(i=0; i<(newd->count+newd->rrsig_count); i++)
167
0
        if(newd->rr_ttl[i] > newd->ttl)
168
0
          newd->rr_ttl[i] = newd->ttl;
169
0
    }
170
0
    return 1;
171
0
  }
172
0
  return 0;
173
0
}
174
175
/** Update RRSet special key ID */
176
static void
177
rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc)
178
0
{
179
  /* this may clear the cache and invalidate lock below */
180
0
  uint64_t newid = alloc_get_id(alloc);
181
  /* obtain writelock */
182
0
  lock_rw_wrlock(&ref->key->entry.lock);
183
  /* check if it was deleted in the meantime, if so, skip update */
184
0
  if(ref->key->id == ref->id) {
185
0
    ref->key->id = newid;
186
0
    ref->id = newid;
187
0
  }
188
0
  lock_rw_unlock(&ref->key->entry.lock);
189
0
}
190
191
int 
192
rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
193
  struct alloc_cache* alloc, time_t timenow)
194
0
{
195
0
  struct lruhash_entry* e;
196
0
  struct ub_packed_rrset_key* k = ref->key;
197
0
  hashvalue_type h = k->entry.hash;
198
0
  uint16_t rrset_type = ntohs(k->rk.type);
199
0
  int equal = 0;
200
0
  log_assert(ref->id != 0 && k->id != 0);
201
0
  log_assert(k->rk.dname != NULL);
202
  /* looks up item with a readlock - no editing! */
203
0
  if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) {
204
    /* return id and key as they will be used in the cache
205
     * since the lruhash_insert, if item already exists, deallocs
206
     * the passed key in favor of the already stored key.
207
     * because of the small gap (see below) this key ptr and id
208
     * may prove later to be already deleted, which is no problem
209
     * as it only makes a cache miss. 
210
     */
211
0
    ref->key = (struct ub_packed_rrset_key*)e->key;
212
0
    ref->id = ref->key->id;
213
0
    equal = rrsetdata_equal((struct packed_rrset_data*)k->entry.
214
0
      data, (struct packed_rrset_data*)e->data);
215
0
    if(!need_to_update_rrset(k->entry.data, e->data, timenow,
216
0
      equal, (rrset_type==LDNS_RR_TYPE_NS))) {
217
      /* cache is superior, return that value */
218
0
      lock_rw_unlock(&e->lock);
219
0
      ub_packed_rrset_parsedelete(k, alloc);
220
0
      if(equal) return 2;
221
0
      return 1;
222
0
    }
223
0
    lock_rw_unlock(&e->lock);
224
    /* Go on and insert the passed item.
225
     * small gap here, where entry is not locked.
226
     * possibly entry is updated with something else.
227
     * we then overwrite that with our data.
228
     * this is just too bad, its cache anyway. */
229
    /* use insert to update entry to manage lruhash
230
     * cache size values nicely. */
231
0
  }
232
0
  log_assert(ref->key->id != 0);
233
0
  slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc);
234
0
  if(e) {
235
    /* For NSEC, NSEC3, DNAME, when rdata is updated, update 
236
     * the ID number so that proofs in message cache are 
237
     * invalidated */
238
0
    if((rrset_type == LDNS_RR_TYPE_NSEC 
239
0
      || rrset_type == LDNS_RR_TYPE_NSEC3
240
0
      || rrset_type == LDNS_RR_TYPE_DNAME) && !equal) {
241
0
      rrset_update_id(ref, alloc);
242
0
    }
243
0
    return 1;
244
0
  }
245
0
  return 0;
246
0
}
247
248
void rrset_cache_update_wildcard(struct rrset_cache* rrset_cache, 
249
  struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len,
250
  struct alloc_cache* alloc, time_t timenow)
251
0
{
252
0
  struct rrset_ref ref;
253
0
  uint8_t wc_dname[LDNS_MAX_DOMAINLEN+3];
254
0
  rrset = packed_rrset_copy_alloc(rrset, alloc, timenow);
255
0
  if(!rrset) {
256
0
    log_err("malloc failure in rrset_cache_update_wildcard");
257
0
    return;
258
0
  }
259
  /* ce has at least one label less then qname, we can therefore safely
260
   * add the wildcard label. */
261
0
  wc_dname[0] = 1;
262
0
  wc_dname[1] = (uint8_t)'*';
263
0
  memmove(wc_dname+2, ce, ce_len);
264
265
0
  free(rrset->rk.dname);
266
0
  rrset->rk.dname_len = ce_len + 2;
267
0
  rrset->rk.dname = (uint8_t*)memdup(wc_dname, rrset->rk.dname_len);
268
0
  if(!rrset->rk.dname) {
269
0
    alloc_special_release(alloc, rrset);
270
0
    log_err("memdup failure in rrset_cache_update_wildcard");
271
0
    return;
272
0
  }
273
274
0
  rrset->entry.hash = rrset_key_hash(&rrset->rk);
275
0
  ref.key = rrset;
276
0
  ref.id = rrset->id;
277
  /* ignore ret: if it was in the cache, ref updated */
278
0
  (void)rrset_cache_update(rrset_cache, &ref, alloc, timenow);
279
0
}
280
281
/** Grace period in seconds for TTL=0 DNAME rrsets (RFC 2308: do not cache).
282
 * Allows synthesis from cache within this window to reduce recursion load. */
283
0
#define DNAME_TTL0_GRACE_SECONDS 1
284
285
struct ub_packed_rrset_key* 
286
rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen, 
287
  uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow,
288
  int wr)
289
0
{
290
0
  struct lruhash_entry* e;
291
0
  struct ub_packed_rrset_key key;
292
  
293
0
  key.entry.key = &key;
294
0
  key.entry.data = NULL;
295
0
  key.rk.dname = qname;
296
0
  key.rk.dname_len = qnamelen;
297
0
  key.rk.type = htons(qtype);
298
0
  key.rk.rrset_class = htons(qclass);
299
0
  key.rk.flags = flags;
300
301
0
  key.entry.hash = rrset_key_hash(&key.rk);
302
303
0
  if((e = slabhash_lookup(&r->table, key.entry.hash, &key, wr))) {
304
    /* check TTL */
305
0
    struct packed_rrset_data* data = 
306
0
      (struct packed_rrset_data*)e->data;
307
0
    struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)e->key;
308
0
    if(TTL_IS_EXPIRED(data->ttl, timenow)) {
309
      /* Allow TTL=0 DNAME within grace period for synthesis */
310
0
      if(qtype == LDNS_RR_TYPE_DNAME &&
311
0
         (k->rk.flags & PACKED_RRSET_UPSTREAM_0TTL) &&
312
0
         (timenow - data->ttl_add) <= DNAME_TTL0_GRACE_SECONDS) {
313
        /* within grace: allow for synthesis */
314
0
      } else {
315
0
        lock_rw_unlock(&e->lock);
316
0
        return NULL;
317
0
      }
318
0
    }
319
    /* we're done */
320
0
    return k;
321
0
  }
322
0
  return NULL;
323
0
}
324
325
int
326
rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow)
327
0
{
328
0
  size_t i;
329
0
  struct packed_rrset_data* d;
330
0
  for(i=0; i<count; i++) {
331
0
    if(i>0 && ref[i].key == ref[i-1].key)
332
0
      continue; /* only lock items once */
333
0
    lock_rw_rdlock(&ref[i].key->entry.lock);
334
0
    d = ref[i].key->entry.data;
335
0
    if(ref[i].id != ref[i].key->id ||
336
0
      TTL_IS_EXPIRED(d->ttl, timenow)) {
337
      /* failure! rollback our readlocks */
338
0
      rrset_array_unlock(ref, i+1);
339
0
      return 0;
340
0
    }
341
0
  }
342
0
  return 1;
343
0
}
344
345
void 
346
rrset_array_unlock(struct rrset_ref* ref, size_t count)
347
0
{
348
0
  size_t i;
349
0
  for(i=0; i<count; i++) {
350
0
    if(i>0 && ref[i].key == ref[i-1].key)
351
0
      continue; /* only unlock items once */
352
0
    lock_rw_unlock(&ref[i].key->entry.lock);
353
0
  }
354
0
}
355
356
void 
357
rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch,
358
  struct rrset_ref* ref, size_t count)
359
0
{
360
0
  hashvalue_type* h;
361
0
  size_t i;
362
0
  if(count > RR_COUNT_MAX || !(h = (hashvalue_type*)regional_alloc(
363
0
    scratch, sizeof(hashvalue_type)*count))) {
364
0
    log_warn("rrset LRU: memory allocation failed");
365
0
    h = NULL;
366
0
  } else   /* store hash values */
367
0
    for(i=0; i<count; i++)
368
0
      h[i] = ref[i].key->entry.hash;
369
  /* unlock */
370
0
  for(i=0; i<count; i++) {
371
0
    if(i>0 && ref[i].key == ref[i-1].key)
372
0
      continue; /* only unlock items once */
373
0
    lock_rw_unlock(&ref[i].key->entry.lock);
374
0
  }
375
0
  if(h) {
376
    /* LRU touch, with no rrset locks held */
377
0
    for(i=0; i<count; i++) {
378
0
      if(i>0 && ref[i].key == ref[i-1].key)
379
0
        continue; /* only touch items once */
380
0
      rrset_cache_touch(r, ref[i].key, h[i], ref[i].id);
381
0
    }
382
0
  }
383
0
}
384
385
void 
386
rrset_update_sec_status(struct rrset_cache* r, 
387
  struct ub_packed_rrset_key* rrset, time_t now)
388
0
{
389
0
  struct packed_rrset_data* updata = 
390
0
    (struct packed_rrset_data*)rrset->entry.data;
391
0
  struct lruhash_entry* e;
392
0
  struct packed_rrset_data* cachedata;
393
394
  /* hash it again to make sure it has a hash */
395
0
  rrset->entry.hash = rrset_key_hash(&rrset->rk);
396
397
0
  e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 1);
398
0
  if(!e)
399
0
    return; /* not in the cache anymore */
400
0
  cachedata = (struct packed_rrset_data*)e->data;
401
0
  if(!rrsetdata_equal(updata, cachedata)) {
402
0
    lock_rw_unlock(&e->lock);
403
0
    return; /* rrset has changed in the meantime */
404
0
  }
405
  /* update the cached rrset */
406
0
  if(updata->security > cachedata->security) {
407
0
    size_t i;
408
0
    if(updata->trust > cachedata->trust)
409
0
      cachedata->trust = updata->trust;
410
0
    cachedata->security = updata->security;
411
    /* for NS records only shorter TTLs, other types: update it */
412
0
    if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_NS ||
413
0
      updata->ttl+now < cachedata->ttl ||
414
0
      cachedata->ttl < now ||
415
0
      updata->security == sec_status_bogus) {
416
0
      cachedata->ttl = updata->ttl + now;
417
0
      for(i=0; i<cachedata->count+cachedata->rrsig_count; i++)
418
0
        cachedata->rr_ttl[i] = updata->rr_ttl[i]+now;
419
0
      cachedata->ttl_add = now;
420
0
    }
421
0
  }
422
0
  lock_rw_unlock(&e->lock);
423
0
}
424
425
void 
426
rrset_check_sec_status(struct rrset_cache* r, 
427
  struct ub_packed_rrset_key* rrset, time_t now)
428
0
{
429
0
  struct packed_rrset_data* updata = 
430
0
    (struct packed_rrset_data*)rrset->entry.data;
431
0
  struct lruhash_entry* e;
432
0
  struct packed_rrset_data* cachedata;
433
434
  /* hash it again to make sure it has a hash */
435
0
  rrset->entry.hash = rrset_key_hash(&rrset->rk);
436
437
0
  e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 0);
438
0
  if(!e)
439
0
    return; /* not in the cache anymore */
440
0
  cachedata = (struct packed_rrset_data*)e->data;
441
0
  if(now > cachedata->ttl || !rrsetdata_equal(updata, cachedata)) {
442
0
    lock_rw_unlock(&e->lock);
443
0
    return; /* expired, or rrset has changed in the meantime */
444
0
  }
445
0
  if(cachedata->security > updata->security) {
446
0
    updata->security = cachedata->security;
447
0
    if(cachedata->security == sec_status_bogus) {
448
0
      size_t i;
449
0
      updata->ttl = cachedata->ttl - now;
450
0
      for(i=0; i<cachedata->count+cachedata->rrsig_count; i++)
451
0
        if(cachedata->rr_ttl[i] < now)
452
0
          updata->rr_ttl[i] = 0;
453
0
        else updata->rr_ttl[i] = 
454
0
          cachedata->rr_ttl[i]-now;
455
0
    }
456
0
    if(cachedata->trust > updata->trust)
457
0
      updata->trust = cachedata->trust;
458
0
  }
459
0
  lock_rw_unlock(&e->lock);
460
0
}
461
462
void
463
rrset_cache_remove_above(struct rrset_cache* r, uint8_t** qname, size_t*
464
  qnamelen, uint16_t searchtype, uint16_t qclass, time_t now, uint8_t*
465
  qnametop, size_t qnametoplen)
466
0
{
467
0
  struct ub_packed_rrset_key *rrset;
468
0
  uint8_t lablen;
469
470
0
  while(*qnamelen > 0) {
471
    /* look one label higher */
472
0
    lablen = **qname;
473
0
    *qname += lablen + 1;
474
0
    *qnamelen -= lablen + 1;
475
0
    if(*qnamelen <= 0)
476
0
      return;
477
478
    /* stop at qnametop */
479
0
    if(qnametop && *qnamelen == qnametoplen &&
480
0
      query_dname_compare(*qname, qnametop)==0)
481
0
      return;
482
483
0
    if(verbosity >= VERB_ALGO) {
484
      /* looks up with a time of 0, to see expired entries */
485
0
      if((rrset = rrset_cache_lookup(r, *qname,
486
0
        *qnamelen, searchtype, qclass, 0, 0, 0))) {
487
0
        struct packed_rrset_data* data =
488
0
          (struct packed_rrset_data*)rrset->entry.data;
489
0
        int expired = (now > data->ttl);
490
0
        lock_rw_unlock(&rrset->entry.lock);
491
0
        if(expired)
492
0
          log_nametypeclass(verbosity, "this "
493
0
            "(grand)parent rrset will be "
494
0
            "removed (expired)",
495
0
            *qname, searchtype, qclass);
496
0
        else  log_nametypeclass(verbosity, "this "
497
0
            "(grand)parent rrset will be "
498
0
            "removed",
499
0
            *qname, searchtype, qclass);
500
0
      }
501
0
    }
502
0
    rrset_cache_remove(r, *qname, *qnamelen, searchtype, qclass, 0);
503
0
  }
504
0
}
505
506
int
507
rrset_cache_expired_above(struct rrset_cache* r, uint8_t** qname, size_t*
508
  qnamelen, uint16_t searchtype, uint16_t qclass, time_t now, uint8_t*
509
  qnametop, size_t qnametoplen)
510
0
{
511
0
  struct ub_packed_rrset_key *rrset;
512
0
  uint8_t lablen;
513
514
0
  while(*qnamelen > 0) {
515
    /* look one label higher */
516
0
    lablen = **qname;
517
0
    *qname += lablen + 1;
518
0
    *qnamelen -= lablen + 1;
519
0
    if(*qnamelen <= 0)
520
0
      break;
521
522
    /* looks up with a time of 0, to see expired entries */
523
0
    if((rrset = rrset_cache_lookup(r, *qname,
524
0
      *qnamelen, searchtype, qclass, 0, 0, 0))) {
525
0
      struct packed_rrset_data* data =
526
0
        (struct packed_rrset_data*)rrset->entry.data;
527
0
      if(TTL_IS_EXPIRED(data->ttl, now)) {
528
        /* it is expired, this is not wanted */
529
0
        lock_rw_unlock(&rrset->entry.lock);
530
0
        log_nametypeclass(VERB_ALGO, "this rrset is expired", *qname, searchtype, qclass);
531
0
        return 1;
532
0
      }
533
      /* it is not expired, continue looking */
534
0
      lock_rw_unlock(&rrset->entry.lock);
535
0
    }
536
537
    /* do not look above the qnametop. */
538
0
    if(qnametop && *qnamelen == qnametoplen &&
539
0
      query_dname_compare(*qname, qnametop)==0)
540
0
      break;
541
0
  }
542
0
  return 0;
543
0
}
544
545
void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen,
546
  uint16_t type, uint16_t dclass, uint32_t flags)
547
0
{
548
0
  struct ub_packed_rrset_key key;
549
0
  key.entry.key = &key;
550
0
  key.rk.dname = nm;
551
0
  key.rk.dname_len = nmlen;
552
0
  key.rk.rrset_class = htons(dclass);
553
  key.rk.type = htons(type);
554
0
  key.rk.flags = flags;
555
0
  key.entry.hash = rrset_key_hash(&key.rk);
556
0
  slabhash_remove(&r->table, key.entry.hash, &key);
557
0
}