/src/unbound/services/cache/rrset.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * services/cache/rrset.c - Resource record set cache. |
3 | | * |
4 | | * Copyright (c) 2007, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains the rrset cache. |
40 | | */ |
41 | | #include "config.h" |
42 | | #include "services/cache/rrset.h" |
43 | | #include "sldns/rrdef.h" |
44 | | #include "util/storage/slabhash.h" |
45 | | #include "util/config_file.h" |
46 | | #include "util/data/packed_rrset.h" |
47 | | #include "util/data/msgreply.h" |
48 | | #include "util/data/msgparse.h" |
49 | | #include "util/data/dname.h" |
50 | | #include "util/regional.h" |
51 | | #include "util/alloc.h" |
52 | | #include "util/net_help.h" |
53 | | |
54 | | void |
55 | | rrset_markdel(void* key) |
56 | 0 | { |
57 | 0 | struct ub_packed_rrset_key* r = (struct ub_packed_rrset_key*)key; |
58 | 0 | r->id = 0; |
59 | 0 | } |
60 | | |
61 | | struct rrset_cache* rrset_cache_create(struct config_file* cfg, |
62 | | struct alloc_cache* alloc) |
63 | 0 | { |
64 | 0 | size_t slabs = (cfg?cfg->rrset_cache_slabs:HASH_DEFAULT_SLABS); |
65 | 0 | size_t startarray = HASH_DEFAULT_STARTARRAY; |
66 | 0 | size_t maxmem = (cfg?cfg->rrset_cache_size:HASH_DEFAULT_MAXMEM); |
67 | |
|
68 | 0 | struct rrset_cache *r = (struct rrset_cache*)slabhash_create(slabs, |
69 | 0 | startarray, maxmem, ub_rrset_sizefunc, ub_rrset_compare, |
70 | 0 | ub_rrset_key_delete, rrset_data_delete, alloc); |
71 | 0 | if(!r) |
72 | 0 | return NULL; |
73 | 0 | slabhash_setmarkdel(&r->table, &rrset_markdel); |
74 | 0 | return r; |
75 | 0 | } |
76 | | |
77 | | void rrset_cache_delete(struct rrset_cache* r) |
78 | 0 | { |
79 | 0 | if(!r) |
80 | 0 | return; |
81 | 0 | slabhash_delete(&r->table); |
82 | | /* slabhash delete also does free(r), since table is first in struct*/ |
83 | 0 | } |
84 | | |
85 | | struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r, |
86 | | struct config_file* cfg, struct alloc_cache* alloc) |
87 | 0 | { |
88 | 0 | if(!r || !cfg || !slabhash_is_size(&r->table, cfg->rrset_cache_size, |
89 | 0 | cfg->rrset_cache_slabs)) |
90 | 0 | { |
91 | 0 | rrset_cache_delete(r); |
92 | 0 | r = rrset_cache_create(cfg, alloc); |
93 | 0 | } |
94 | 0 | return r; |
95 | 0 | } |
96 | | |
97 | | void |
98 | | rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key, |
99 | | hashvalue_type hash, rrset_id_type id) |
100 | 0 | { |
101 | 0 | struct lruhash* table = slabhash_gettable(&r->table, hash); |
102 | | /* |
103 | | * This leads to locking problems, deadlocks, if the caller is |
104 | | * holding any other rrset lock. |
105 | | * Because a lookup through the hashtable does: |
106 | | * tablelock -> entrylock (for that entry caller holds) |
107 | | * And this would do |
108 | | * entrylock(already held) -> tablelock |
109 | | * And if two threads do this, it results in deadlock. |
110 | | * So, the caller must not hold entrylock. |
111 | | */ |
112 | 0 | lock_quick_lock(&table->lock); |
113 | | /* we have locked the hash table, the item can still be deleted. |
114 | | * because it could already have been reclaimed, but not yet set id=0. |
115 | | * This is because some lruhash routines have lazy deletion. |
116 | | * so, we must acquire a lock on the item to verify the id != 0. |
117 | | * also, with hash not changed, we are using the right slab. |
118 | | */ |
119 | 0 | lock_rw_rdlock(&key->entry.lock); |
120 | 0 | if(key->id == id && key->entry.hash == hash) { |
121 | 0 | lru_touch(table, &key->entry); |
122 | 0 | } |
123 | 0 | lock_rw_unlock(&key->entry.lock); |
124 | 0 | lock_quick_unlock(&table->lock); |
125 | 0 | } |
126 | | |
127 | | /** see if rrset needs to be updated in the cache */ |
128 | | static int |
129 | | need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns) |
130 | 0 | { |
131 | 0 | struct packed_rrset_data* newd = (struct packed_rrset_data*)nd; |
132 | 0 | struct packed_rrset_data* cached = (struct packed_rrset_data*)cd; |
133 | | /* o if new data is expired, cached data is better */ |
134 | 0 | if( newd->ttl < timenow && timenow <= cached->ttl) |
135 | 0 | return 0; |
136 | | /* o store if rrset has been validated |
137 | | * everything better than bogus data |
138 | | * secure is preferred */ |
139 | 0 | if( newd->security == sec_status_secure && |
140 | 0 | cached->security != sec_status_secure) |
141 | 0 | return 1; |
142 | 0 | if( cached->security == sec_status_bogus && |
143 | 0 | newd->security != sec_status_bogus && !equal) |
144 | 0 | return 1; |
145 | | /* o if new RRset is more trustworthy - insert it */ |
146 | 0 | if( newd->trust > cached->trust ) { |
147 | | /* if the cached rrset is bogus, and new is equal, |
148 | | * do not update the TTL - let it expire. */ |
149 | 0 | if(equal && cached->ttl >= timenow && |
150 | 0 | cached->security == sec_status_bogus) |
151 | 0 | return 0; |
152 | 0 | return 1; |
153 | 0 | } |
154 | | /* o item in cache has expired */ |
155 | 0 | if( cached->ttl < timenow ) |
156 | 0 | return 1; |
157 | | /* o same trust, but different in data - insert it */ |
158 | 0 | if( newd->trust == cached->trust && !equal ) { |
159 | | /* if this is type NS, do not 'stick' to owner that changes |
160 | | * the NS RRset, but use the cached TTL for the new data, and |
161 | | * update to fetch the latest data. ttl is not expired, because |
162 | | * that check was before this one. */ |
163 | 0 | if(ns) { |
164 | 0 | size_t i; |
165 | 0 | newd->ttl = cached->ttl; |
166 | 0 | for(i=0; i<(newd->count+newd->rrsig_count); i++) |
167 | 0 | if(newd->rr_ttl[i] > newd->ttl) |
168 | 0 | newd->rr_ttl[i] = newd->ttl; |
169 | 0 | } |
170 | 0 | return 1; |
171 | 0 | } |
172 | 0 | return 0; |
173 | 0 | } |
174 | | |
175 | | /** Update RRSet special key ID */ |
176 | | static void |
177 | | rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc) |
178 | 0 | { |
179 | | /* this may clear the cache and invalidate lock below */ |
180 | 0 | uint64_t newid = alloc_get_id(alloc); |
181 | | /* obtain writelock */ |
182 | 0 | lock_rw_wrlock(&ref->key->entry.lock); |
183 | | /* check if it was deleted in the meantime, if so, skip update */ |
184 | 0 | if(ref->key->id == ref->id) { |
185 | 0 | ref->key->id = newid; |
186 | 0 | ref->id = newid; |
187 | 0 | } |
188 | 0 | lock_rw_unlock(&ref->key->entry.lock); |
189 | 0 | } |
190 | | |
191 | | int |
192 | | rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, |
193 | | struct alloc_cache* alloc, time_t timenow) |
194 | 0 | { |
195 | 0 | struct lruhash_entry* e; |
196 | 0 | struct ub_packed_rrset_key* k = ref->key; |
197 | 0 | hashvalue_type h = k->entry.hash; |
198 | 0 | uint16_t rrset_type = ntohs(k->rk.type); |
199 | 0 | int equal = 0; |
200 | 0 | log_assert(ref->id != 0 && k->id != 0); |
201 | 0 | log_assert(k->rk.dname != NULL); |
202 | | /* looks up item with a readlock - no editing! */ |
203 | 0 | if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) { |
204 | | /* return id and key as they will be used in the cache |
205 | | * since the lruhash_insert, if item already exists, deallocs |
206 | | * the passed key in favor of the already stored key. |
207 | | * because of the small gap (see below) this key ptr and id |
208 | | * may prove later to be already deleted, which is no problem |
209 | | * as it only makes a cache miss. |
210 | | */ |
211 | 0 | ref->key = (struct ub_packed_rrset_key*)e->key; |
212 | 0 | ref->id = ref->key->id; |
213 | 0 | equal = rrsetdata_equal((struct packed_rrset_data*)k->entry. |
214 | 0 | data, (struct packed_rrset_data*)e->data); |
215 | 0 | if(!need_to_update_rrset(k->entry.data, e->data, timenow, |
216 | 0 | equal, (rrset_type==LDNS_RR_TYPE_NS))) { |
217 | | /* cache is superior, return that value */ |
218 | 0 | lock_rw_unlock(&e->lock); |
219 | 0 | ub_packed_rrset_parsedelete(k, alloc); |
220 | 0 | if(equal) return 2; |
221 | 0 | return 1; |
222 | 0 | } |
223 | 0 | lock_rw_unlock(&e->lock); |
224 | | /* Go on and insert the passed item. |
225 | | * small gap here, where entry is not locked. |
226 | | * possibly entry is updated with something else. |
227 | | * we then overwrite that with our data. |
228 | | * this is just too bad, its cache anyway. */ |
229 | | /* use insert to update entry to manage lruhash |
230 | | * cache size values nicely. */ |
231 | 0 | } |
232 | 0 | log_assert(ref->key->id != 0); |
233 | 0 | slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc); |
234 | 0 | if(e) { |
235 | | /* For NSEC, NSEC3, DNAME, when rdata is updated, update |
236 | | * the ID number so that proofs in message cache are |
237 | | * invalidated */ |
238 | 0 | if((rrset_type == LDNS_RR_TYPE_NSEC |
239 | 0 | || rrset_type == LDNS_RR_TYPE_NSEC3 |
240 | 0 | || rrset_type == LDNS_RR_TYPE_DNAME) && !equal) { |
241 | 0 | rrset_update_id(ref, alloc); |
242 | 0 | } |
243 | 0 | return 1; |
244 | 0 | } |
245 | 0 | return 0; |
246 | 0 | } |
247 | | |
248 | | void rrset_cache_update_wildcard(struct rrset_cache* rrset_cache, |
249 | | struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len, |
250 | | struct alloc_cache* alloc, time_t timenow) |
251 | 0 | { |
252 | 0 | struct rrset_ref ref; |
253 | 0 | uint8_t wc_dname[LDNS_MAX_DOMAINLEN+3]; |
254 | 0 | rrset = packed_rrset_copy_alloc(rrset, alloc, timenow); |
255 | 0 | if(!rrset) { |
256 | 0 | log_err("malloc failure in rrset_cache_update_wildcard"); |
257 | 0 | return; |
258 | 0 | } |
259 | | /* ce has at least one label less then qname, we can therefore safely |
260 | | * add the wildcard label. */ |
261 | 0 | wc_dname[0] = 1; |
262 | 0 | wc_dname[1] = (uint8_t)'*'; |
263 | 0 | memmove(wc_dname+2, ce, ce_len); |
264 | |
|
265 | 0 | free(rrset->rk.dname); |
266 | 0 | rrset->rk.dname_len = ce_len + 2; |
267 | 0 | rrset->rk.dname = (uint8_t*)memdup(wc_dname, rrset->rk.dname_len); |
268 | 0 | if(!rrset->rk.dname) { |
269 | 0 | alloc_special_release(alloc, rrset); |
270 | 0 | log_err("memdup failure in rrset_cache_update_wildcard"); |
271 | 0 | return; |
272 | 0 | } |
273 | | |
274 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
275 | 0 | ref.key = rrset; |
276 | 0 | ref.id = rrset->id; |
277 | | /* ignore ret: if it was in the cache, ref updated */ |
278 | 0 | (void)rrset_cache_update(rrset_cache, &ref, alloc, timenow); |
279 | 0 | } |
280 | | |
281 | | struct ub_packed_rrset_key* |
282 | | rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen, |
283 | | uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow, |
284 | | int wr) |
285 | 0 | { |
286 | 0 | struct lruhash_entry* e; |
287 | 0 | struct ub_packed_rrset_key key; |
288 | | |
289 | 0 | key.entry.key = &key; |
290 | 0 | key.entry.data = NULL; |
291 | 0 | key.rk.dname = qname; |
292 | 0 | key.rk.dname_len = qnamelen; |
293 | 0 | key.rk.type = htons(qtype); |
294 | 0 | key.rk.rrset_class = htons(qclass); |
295 | 0 | key.rk.flags = flags; |
296 | |
|
297 | 0 | key.entry.hash = rrset_key_hash(&key.rk); |
298 | |
|
299 | 0 | if((e = slabhash_lookup(&r->table, key.entry.hash, &key, wr))) { |
300 | | /* check TTL */ |
301 | 0 | struct packed_rrset_data* data = |
302 | 0 | (struct packed_rrset_data*)e->data; |
303 | 0 | if(timenow > data->ttl) { |
304 | 0 | lock_rw_unlock(&e->lock); |
305 | 0 | return NULL; |
306 | 0 | } |
307 | | /* we're done */ |
308 | 0 | return (struct ub_packed_rrset_key*)e->key; |
309 | 0 | } |
310 | 0 | return NULL; |
311 | 0 | } |
312 | | |
313 | | int |
314 | | rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow) |
315 | 0 | { |
316 | 0 | size_t i; |
317 | 0 | for(i=0; i<count; i++) { |
318 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
319 | 0 | continue; /* only lock items once */ |
320 | 0 | lock_rw_rdlock(&ref[i].key->entry.lock); |
321 | 0 | if(ref[i].id != ref[i].key->id || timenow > |
322 | 0 | ((struct packed_rrset_data*)(ref[i].key->entry.data)) |
323 | 0 | ->ttl) { |
324 | | /* failure! rollback our readlocks */ |
325 | 0 | rrset_array_unlock(ref, i+1); |
326 | 0 | return 0; |
327 | 0 | } |
328 | 0 | } |
329 | 0 | return 1; |
330 | 0 | } |
331 | | |
332 | | void |
333 | | rrset_array_unlock(struct rrset_ref* ref, size_t count) |
334 | 0 | { |
335 | 0 | size_t i; |
336 | 0 | for(i=0; i<count; i++) { |
337 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
338 | 0 | continue; /* only unlock items once */ |
339 | 0 | lock_rw_unlock(&ref[i].key->entry.lock); |
340 | 0 | } |
341 | 0 | } |
342 | | |
343 | | void |
344 | | rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch, |
345 | | struct rrset_ref* ref, size_t count) |
346 | 0 | { |
347 | 0 | hashvalue_type* h; |
348 | 0 | size_t i; |
349 | 0 | if(count > RR_COUNT_MAX || !(h = (hashvalue_type*)regional_alloc( |
350 | 0 | scratch, sizeof(hashvalue_type)*count))) { |
351 | 0 | log_warn("rrset LRU: memory allocation failed"); |
352 | 0 | h = NULL; |
353 | 0 | } else /* store hash values */ |
354 | 0 | for(i=0; i<count; i++) |
355 | 0 | h[i] = ref[i].key->entry.hash; |
356 | | /* unlock */ |
357 | 0 | for(i=0; i<count; i++) { |
358 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
359 | 0 | continue; /* only unlock items once */ |
360 | 0 | lock_rw_unlock(&ref[i].key->entry.lock); |
361 | 0 | } |
362 | 0 | if(h) { |
363 | | /* LRU touch, with no rrset locks held */ |
364 | 0 | for(i=0; i<count; i++) { |
365 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
366 | 0 | continue; /* only touch items once */ |
367 | 0 | rrset_cache_touch(r, ref[i].key, h[i], ref[i].id); |
368 | 0 | } |
369 | 0 | } |
370 | 0 | } |
371 | | |
372 | | void |
373 | | rrset_update_sec_status(struct rrset_cache* r, |
374 | | struct ub_packed_rrset_key* rrset, time_t now) |
375 | 0 | { |
376 | 0 | struct packed_rrset_data* updata = |
377 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
378 | 0 | struct lruhash_entry* e; |
379 | 0 | struct packed_rrset_data* cachedata; |
380 | | |
381 | | /* hash it again to make sure it has a hash */ |
382 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
383 | |
|
384 | 0 | e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 1); |
385 | 0 | if(!e) |
386 | 0 | return; /* not in the cache anymore */ |
387 | 0 | cachedata = (struct packed_rrset_data*)e->data; |
388 | 0 | if(!rrsetdata_equal(updata, cachedata)) { |
389 | 0 | lock_rw_unlock(&e->lock); |
390 | 0 | return; /* rrset has changed in the meantime */ |
391 | 0 | } |
392 | | /* update the cached rrset */ |
393 | 0 | if(updata->security > cachedata->security) { |
394 | 0 | size_t i; |
395 | 0 | if(updata->trust > cachedata->trust) |
396 | 0 | cachedata->trust = updata->trust; |
397 | 0 | cachedata->security = updata->security; |
398 | | /* for NS records only shorter TTLs, other types: update it */ |
399 | 0 | if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_NS || |
400 | 0 | updata->ttl+now < cachedata->ttl || |
401 | 0 | cachedata->ttl < now || |
402 | 0 | updata->security == sec_status_bogus) { |
403 | 0 | cachedata->ttl = updata->ttl + now; |
404 | 0 | for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) |
405 | 0 | cachedata->rr_ttl[i] = updata->rr_ttl[i]+now; |
406 | 0 | cachedata->ttl_add = now; |
407 | 0 | } |
408 | 0 | } |
409 | 0 | lock_rw_unlock(&e->lock); |
410 | 0 | } |
411 | | |
412 | | void |
413 | | rrset_check_sec_status(struct rrset_cache* r, |
414 | | struct ub_packed_rrset_key* rrset, time_t now) |
415 | 0 | { |
416 | 0 | struct packed_rrset_data* updata = |
417 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
418 | 0 | struct lruhash_entry* e; |
419 | 0 | struct packed_rrset_data* cachedata; |
420 | | |
421 | | /* hash it again to make sure it has a hash */ |
422 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
423 | |
|
424 | 0 | e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 0); |
425 | 0 | if(!e) |
426 | 0 | return; /* not in the cache anymore */ |
427 | 0 | cachedata = (struct packed_rrset_data*)e->data; |
428 | 0 | if(now > cachedata->ttl || !rrsetdata_equal(updata, cachedata)) { |
429 | 0 | lock_rw_unlock(&e->lock); |
430 | 0 | return; /* expired, or rrset has changed in the meantime */ |
431 | 0 | } |
432 | 0 | if(cachedata->security > updata->security) { |
433 | 0 | updata->security = cachedata->security; |
434 | 0 | if(cachedata->security == sec_status_bogus) { |
435 | 0 | size_t i; |
436 | 0 | updata->ttl = cachedata->ttl - now; |
437 | 0 | for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) |
438 | 0 | if(cachedata->rr_ttl[i] < now) |
439 | 0 | updata->rr_ttl[i] = 0; |
440 | 0 | else updata->rr_ttl[i] = |
441 | 0 | cachedata->rr_ttl[i]-now; |
442 | 0 | } |
443 | 0 | if(cachedata->trust > updata->trust) |
444 | 0 | updata->trust = cachedata->trust; |
445 | 0 | } |
446 | 0 | lock_rw_unlock(&e->lock); |
447 | 0 | } |
448 | | |
449 | | void |
450 | | rrset_cache_remove_above(struct rrset_cache* r, uint8_t** qname, size_t* |
451 | | qnamelen, uint16_t searchtype, uint16_t qclass, time_t now, uint8_t* |
452 | | qnametop, size_t qnametoplen) |
453 | 0 | { |
454 | 0 | struct ub_packed_rrset_key *rrset; |
455 | 0 | uint8_t lablen; |
456 | |
|
457 | 0 | while(*qnamelen > 0) { |
458 | | /* look one label higher */ |
459 | 0 | lablen = **qname; |
460 | 0 | *qname += lablen + 1; |
461 | 0 | *qnamelen -= lablen + 1; |
462 | 0 | if(*qnamelen <= 0) |
463 | 0 | return; |
464 | | |
465 | | /* stop at qnametop */ |
466 | 0 | if(qnametop && *qnamelen == qnametoplen && |
467 | 0 | query_dname_compare(*qname, qnametop)==0) |
468 | 0 | return; |
469 | | |
470 | 0 | if(verbosity >= VERB_ALGO) { |
471 | | /* looks up with a time of 0, to see expired entries */ |
472 | 0 | if((rrset = rrset_cache_lookup(r, *qname, |
473 | 0 | *qnamelen, searchtype, qclass, 0, 0, 0))) { |
474 | 0 | struct packed_rrset_data* data = |
475 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
476 | 0 | int expired = (now > data->ttl); |
477 | 0 | lock_rw_unlock(&rrset->entry.lock); |
478 | 0 | if(expired) |
479 | 0 | log_nametypeclass(verbosity, "this " |
480 | 0 | "(grand)parent rrset will be " |
481 | 0 | "removed (expired)", |
482 | 0 | *qname, searchtype, qclass); |
483 | 0 | else log_nametypeclass(verbosity, "this " |
484 | 0 | "(grand)parent rrset will be " |
485 | 0 | "removed", |
486 | 0 | *qname, searchtype, qclass); |
487 | 0 | } |
488 | 0 | } |
489 | 0 | rrset_cache_remove(r, *qname, *qnamelen, searchtype, qclass, 0); |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | | int |
494 | | rrset_cache_expired_above(struct rrset_cache* r, uint8_t** qname, size_t* |
495 | | qnamelen, uint16_t searchtype, uint16_t qclass, time_t now, uint8_t* |
496 | | qnametop, size_t qnametoplen) |
497 | 0 | { |
498 | 0 | struct ub_packed_rrset_key *rrset; |
499 | 0 | uint8_t lablen; |
500 | |
|
501 | 0 | while(*qnamelen > 0) { |
502 | | /* look one label higher */ |
503 | 0 | lablen = **qname; |
504 | 0 | *qname += lablen + 1; |
505 | 0 | *qnamelen -= lablen + 1; |
506 | 0 | if(*qnamelen <= 0) |
507 | 0 | break; |
508 | | |
509 | | /* looks up with a time of 0, to see expired entries */ |
510 | 0 | if((rrset = rrset_cache_lookup(r, *qname, |
511 | 0 | *qnamelen, searchtype, qclass, 0, 0, 0))) { |
512 | 0 | struct packed_rrset_data* data = |
513 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
514 | 0 | if(now > data->ttl) { |
515 | | /* it is expired, this is not wanted */ |
516 | 0 | lock_rw_unlock(&rrset->entry.lock); |
517 | 0 | log_nametypeclass(VERB_ALGO, "this rrset is expired", *qname, searchtype, qclass); |
518 | 0 | return 1; |
519 | 0 | } |
520 | | /* it is not expired, continue looking */ |
521 | 0 | lock_rw_unlock(&rrset->entry.lock); |
522 | 0 | } |
523 | | |
524 | | /* do not look above the qnametop. */ |
525 | 0 | if(qnametop && *qnamelen == qnametoplen && |
526 | 0 | query_dname_compare(*qname, qnametop)==0) |
527 | 0 | break; |
528 | 0 | } |
529 | 0 | return 0; |
530 | 0 | } |
531 | | |
532 | | void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen, |
533 | | uint16_t type, uint16_t dclass, uint32_t flags) |
534 | 0 | { |
535 | 0 | struct ub_packed_rrset_key key; |
536 | 0 | key.entry.key = &key; |
537 | 0 | key.rk.dname = nm; |
538 | 0 | key.rk.dname_len = nmlen; |
539 | 0 | key.rk.rrset_class = htons(dclass); |
540 | 0 | key.rk.type = htons(type); |
541 | 0 | key.rk.flags = flags; |
542 | 0 | key.entry.hash = rrset_key_hash(&key.rk); |
543 | 0 | slabhash_remove(&r->table, key.entry.hash, &key); |
544 | 0 | } |