/src/unbound/services/cache/rrset.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * services/cache/rrset.c - Resource record set cache. |
3 | | * |
4 | | * Copyright (c) 2007, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains the rrset cache. |
40 | | */ |
41 | | #include "config.h" |
42 | | #include "services/cache/rrset.h" |
43 | | #include "sldns/rrdef.h" |
44 | | #include "util/storage/slabhash.h" |
45 | | #include "util/config_file.h" |
46 | | #include "util/data/packed_rrset.h" |
47 | | #include "util/data/msgreply.h" |
48 | | #include "util/data/msgparse.h" |
49 | | #include "util/regional.h" |
50 | | #include "util/alloc.h" |
51 | | #include "util/net_help.h" |
52 | | |
53 | | void |
54 | | rrset_markdel(void* key) |
55 | 304 | { |
56 | 304 | struct ub_packed_rrset_key* r = (struct ub_packed_rrset_key*)key; |
57 | 304 | r->id = 0; |
58 | 304 | } |
59 | | |
60 | | struct rrset_cache* rrset_cache_create(struct config_file* cfg, |
61 | | struct alloc_cache* alloc) |
62 | 4.59k | { |
63 | 4.59k | size_t slabs = (cfg?cfg->rrset_cache_slabs:HASH_DEFAULT_SLABS); |
64 | 4.59k | size_t startarray = HASH_DEFAULT_STARTARRAY; |
65 | 4.59k | size_t maxmem = (cfg?cfg->rrset_cache_size:HASH_DEFAULT_MAXMEM); |
66 | | |
67 | 4.59k | struct rrset_cache *r = (struct rrset_cache*)slabhash_create(slabs, |
68 | 4.59k | startarray, maxmem, ub_rrset_sizefunc, ub_rrset_compare, |
69 | 4.59k | ub_rrset_key_delete, rrset_data_delete, alloc); |
70 | 4.59k | slabhash_setmarkdel(&r->table, &rrset_markdel); |
71 | 4.59k | return r; |
72 | 4.59k | } |
73 | | |
74 | | void rrset_cache_delete(struct rrset_cache* r) |
75 | 4.59k | { |
76 | 4.59k | if(!r) |
77 | 0 | return; |
78 | 4.59k | slabhash_delete(&r->table); |
79 | | /* slabhash delete also does free(r), since table is first in struct*/ |
80 | 4.59k | } |
81 | | |
82 | | struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r, |
83 | | struct config_file* cfg, struct alloc_cache* alloc) |
84 | 0 | { |
85 | 0 | if(!r || !cfg || !slabhash_is_size(&r->table, cfg->rrset_cache_size, |
86 | 0 | cfg->rrset_cache_slabs)) |
87 | 0 | { |
88 | 0 | rrset_cache_delete(r); |
89 | 0 | r = rrset_cache_create(cfg, alloc); |
90 | 0 | } |
91 | 0 | return r; |
92 | 0 | } |
93 | | |
94 | | void |
95 | | rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key, |
96 | | hashvalue_type hash, rrset_id_type id) |
97 | 0 | { |
98 | 0 | struct lruhash* table = slabhash_gettable(&r->table, hash); |
99 | | /* |
100 | | * This leads to locking problems, deadlocks, if the caller is |
101 | | * holding any other rrset lock. |
102 | | * Because a lookup through the hashtable does: |
103 | | * tablelock -> entrylock (for that entry caller holds) |
104 | | * And this would do |
105 | | * entrylock(already held) -> tablelock |
106 | | * And if two threads do this, it results in deadlock. |
107 | | * So, the caller must not hold entrylock. |
108 | | */ |
109 | 0 | lock_quick_lock(&table->lock); |
110 | | /* we have locked the hash table, the item can still be deleted. |
111 | | * because it could already have been reclaimed, but not yet set id=0. |
112 | | * This is because some lruhash routines have lazy deletion. |
113 | | * so, we must acquire a lock on the item to verify the id != 0. |
114 | | * also, with hash not changed, we are using the right slab. |
115 | | */ |
116 | 0 | lock_rw_rdlock(&key->entry.lock); |
117 | 0 | if(key->id == id && key->entry.hash == hash) { |
118 | 0 | lru_touch(table, &key->entry); |
119 | 0 | } |
120 | 0 | lock_rw_unlock(&key->entry.lock); |
121 | 0 | lock_quick_unlock(&table->lock); |
122 | 0 | } |
123 | | |
124 | | /** see if rrset needs to be updated in the cache */ |
125 | | static int |
126 | | need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns) |
127 | 0 | { |
128 | 0 | struct packed_rrset_data* newd = (struct packed_rrset_data*)nd; |
129 | 0 | struct packed_rrset_data* cached = (struct packed_rrset_data*)cd; |
130 | | /* o store if rrset has been validated |
131 | | * everything better than bogus data |
132 | | * secure is preferred */ |
133 | 0 | if( newd->security == sec_status_secure && |
134 | 0 | cached->security != sec_status_secure) |
135 | 0 | return 1; |
136 | 0 | if( cached->security == sec_status_bogus && |
137 | 0 | newd->security != sec_status_bogus && !equal) |
138 | 0 | return 1; |
139 | | /* o if current RRset is more trustworthy - insert it */ |
140 | 0 | if( newd->trust > cached->trust ) { |
141 | | /* if the cached rrset is bogus, and this one equal, |
142 | | * do not update the TTL - let it expire. */ |
143 | 0 | if(equal && cached->ttl >= timenow && |
144 | 0 | cached->security == sec_status_bogus) |
145 | 0 | return 0; |
146 | 0 | return 1; |
147 | 0 | } |
148 | | /* o item in cache has expired */ |
149 | 0 | if( cached->ttl < timenow ) |
150 | 0 | return 1; |
151 | | /* o same trust, but different in data - insert it */ |
152 | 0 | if( newd->trust == cached->trust && !equal ) { |
153 | | /* if this is type NS, do not 'stick' to owner that changes |
154 | | * the NS RRset, but use the old TTL for the new data, and |
155 | | * update to fetch the latest data. ttl is not expired, because |
156 | | * that check was before this one. */ |
157 | 0 | if(ns) { |
158 | 0 | size_t i; |
159 | 0 | newd->ttl = cached->ttl; |
160 | 0 | for(i=0; i<(newd->count+newd->rrsig_count); i++) |
161 | 0 | if(newd->rr_ttl[i] > newd->ttl) |
162 | 0 | newd->rr_ttl[i] = newd->ttl; |
163 | 0 | } |
164 | 0 | return 1; |
165 | 0 | } |
166 | 0 | return 0; |
167 | 0 | } |
168 | | |
169 | | /** Update RRSet special key ID */ |
170 | | static void |
171 | | rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc) |
172 | 0 | { |
173 | | /* this may clear the cache and invalidate lock below */ |
174 | 0 | uint64_t newid = alloc_get_id(alloc); |
175 | | /* obtain writelock */ |
176 | 0 | lock_rw_wrlock(&ref->key->entry.lock); |
177 | | /* check if it was deleted in the meantime, if so, skip update */ |
178 | 0 | if(ref->key->id == ref->id) { |
179 | 0 | ref->key->id = newid; |
180 | 0 | ref->id = newid; |
181 | 0 | } |
182 | 0 | lock_rw_unlock(&ref->key->entry.lock); |
183 | 0 | } |
184 | | |
185 | | int |
186 | | rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, |
187 | | struct alloc_cache* alloc, time_t timenow) |
188 | 12.9k | { |
189 | 12.9k | struct lruhash_entry* e; |
190 | 12.9k | struct ub_packed_rrset_key* k = ref->key; |
191 | 12.9k | hashvalue_type h = k->entry.hash; |
192 | 12.9k | uint16_t rrset_type = ntohs(k->rk.type); |
193 | 12.9k | int equal = 0; |
194 | 12.9k | log_assert(ref->id != 0 && k->id != 0); |
195 | 12.9k | log_assert(k->rk.dname != NULL); |
196 | | /* looks up item with a readlock - no editing! */ |
197 | 12.9k | if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) { |
198 | | /* return id and key as they will be used in the cache |
199 | | * since the lruhash_insert, if item already exists, deallocs |
200 | | * the passed key in favor of the already stored key. |
201 | | * because of the small gap (see below) this key ptr and id |
202 | | * may prove later to be already deleted, which is no problem |
203 | | * as it only makes a cache miss. |
204 | | */ |
205 | 0 | ref->key = (struct ub_packed_rrset_key*)e->key; |
206 | 0 | ref->id = ref->key->id; |
207 | 0 | equal = rrsetdata_equal((struct packed_rrset_data*)k->entry. |
208 | 0 | data, (struct packed_rrset_data*)e->data); |
209 | 0 | if(!need_to_update_rrset(k->entry.data, e->data, timenow, |
210 | 0 | equal, (rrset_type==LDNS_RR_TYPE_NS))) { |
211 | | /* cache is superior, return that value */ |
212 | 0 | lock_rw_unlock(&e->lock); |
213 | 0 | ub_packed_rrset_parsedelete(k, alloc); |
214 | 0 | if(equal) return 2; |
215 | 0 | return 1; |
216 | 0 | } |
217 | 0 | lock_rw_unlock(&e->lock); |
218 | | /* Go on and insert the passed item. |
219 | | * small gap here, where entry is not locked. |
220 | | * possibly entry is updated with something else. |
221 | | * we then overwrite that with our data. |
222 | | * this is just too bad, its cache anyway. */ |
223 | | /* use insert to update entry to manage lruhash |
224 | | * cache size values nicely. */ |
225 | 0 | } |
226 | 12.9k | log_assert(ref->key->id != 0); |
227 | 12.9k | slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc); |
228 | 12.9k | if(e) { |
229 | | /* For NSEC, NSEC3, DNAME, when rdata is updated, update |
230 | | * the ID number so that proofs in message cache are |
231 | | * invalidated */ |
232 | 0 | if((rrset_type == LDNS_RR_TYPE_NSEC |
233 | 0 | || rrset_type == LDNS_RR_TYPE_NSEC3 |
234 | 0 | || rrset_type == LDNS_RR_TYPE_DNAME) && !equal) { |
235 | 0 | rrset_update_id(ref, alloc); |
236 | 0 | } |
237 | 0 | return 1; |
238 | 0 | } |
239 | 12.9k | return 0; |
240 | 12.9k | } |
241 | | |
242 | | void rrset_cache_update_wildcard(struct rrset_cache* rrset_cache, |
243 | | struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len, |
244 | | struct alloc_cache* alloc, time_t timenow) |
245 | 0 | { |
246 | 0 | struct rrset_ref ref; |
247 | 0 | uint8_t wc_dname[LDNS_MAX_DOMAINLEN+3]; |
248 | 0 | rrset = packed_rrset_copy_alloc(rrset, alloc, timenow); |
249 | 0 | if(!rrset) { |
250 | 0 | log_err("malloc failure in rrset_cache_update_wildcard"); |
251 | 0 | return; |
252 | 0 | } |
253 | | /* ce has at least one label less then qname, we can therefore safely |
254 | | * add the wildcard label. */ |
255 | 0 | wc_dname[0] = 1; |
256 | 0 | wc_dname[1] = (uint8_t)'*'; |
257 | 0 | memmove(wc_dname+2, ce, ce_len); |
258 | |
|
259 | 0 | free(rrset->rk.dname); |
260 | 0 | rrset->rk.dname_len = ce_len + 2; |
261 | 0 | rrset->rk.dname = (uint8_t*)memdup(wc_dname, rrset->rk.dname_len); |
262 | 0 | if(!rrset->rk.dname) { |
263 | 0 | alloc_special_release(alloc, rrset); |
264 | 0 | log_err("memdup failure in rrset_cache_update_wildcard"); |
265 | 0 | return; |
266 | 0 | } |
267 | | |
268 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
269 | 0 | ref.key = rrset; |
270 | 0 | ref.id = rrset->id; |
271 | | /* ignore ret: if it was in the cache, ref updated */ |
272 | 0 | (void)rrset_cache_update(rrset_cache, &ref, alloc, timenow); |
273 | 0 | } |
274 | | |
275 | | struct ub_packed_rrset_key* |
276 | | rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen, |
277 | | uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow, |
278 | | int wr) |
279 | 0 | { |
280 | 0 | struct lruhash_entry* e; |
281 | 0 | struct ub_packed_rrset_key key; |
282 | | |
283 | 0 | key.entry.key = &key; |
284 | 0 | key.entry.data = NULL; |
285 | 0 | key.rk.dname = qname; |
286 | 0 | key.rk.dname_len = qnamelen; |
287 | 0 | key.rk.type = htons(qtype); |
288 | 0 | key.rk.rrset_class = htons(qclass); |
289 | 0 | key.rk.flags = flags; |
290 | |
|
291 | 0 | key.entry.hash = rrset_key_hash(&key.rk); |
292 | |
|
293 | 0 | if((e = slabhash_lookup(&r->table, key.entry.hash, &key, wr))) { |
294 | | /* check TTL */ |
295 | 0 | struct packed_rrset_data* data = |
296 | 0 | (struct packed_rrset_data*)e->data; |
297 | 0 | if(timenow > data->ttl) { |
298 | 0 | lock_rw_unlock(&e->lock); |
299 | 0 | return NULL; |
300 | 0 | } |
301 | | /* we're done */ |
302 | 0 | return (struct ub_packed_rrset_key*)e->key; |
303 | 0 | } |
304 | 0 | return NULL; |
305 | 0 | } |
306 | | |
307 | | int |
308 | | rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow) |
309 | 0 | { |
310 | 0 | size_t i; |
311 | 0 | for(i=0; i<count; i++) { |
312 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
313 | 0 | continue; /* only lock items once */ |
314 | 0 | lock_rw_rdlock(&ref[i].key->entry.lock); |
315 | 0 | if(ref[i].id != ref[i].key->id || timenow > |
316 | 0 | ((struct packed_rrset_data*)(ref[i].key->entry.data)) |
317 | 0 | ->ttl) { |
318 | | /* failure! rollback our readlocks */ |
319 | 0 | rrset_array_unlock(ref, i+1); |
320 | 0 | return 0; |
321 | 0 | } |
322 | 0 | } |
323 | 0 | return 1; |
324 | 0 | } |
325 | | |
326 | | void |
327 | | rrset_array_unlock(struct rrset_ref* ref, size_t count) |
328 | 0 | { |
329 | 0 | size_t i; |
330 | 0 | for(i=0; i<count; i++) { |
331 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
332 | 0 | continue; /* only unlock items once */ |
333 | 0 | lock_rw_unlock(&ref[i].key->entry.lock); |
334 | 0 | } |
335 | 0 | } |
336 | | |
337 | | void |
338 | | rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch, |
339 | | struct rrset_ref* ref, size_t count) |
340 | 0 | { |
341 | 0 | hashvalue_type* h; |
342 | 0 | size_t i; |
343 | 0 | if(count > RR_COUNT_MAX || !(h = (hashvalue_type*)regional_alloc( |
344 | 0 | scratch, sizeof(hashvalue_type)*count))) { |
345 | 0 | log_warn("rrset LRU: memory allocation failed"); |
346 | 0 | h = NULL; |
347 | 0 | } else /* store hash values */ |
348 | 0 | for(i=0; i<count; i++) |
349 | 0 | h[i] = ref[i].key->entry.hash; |
350 | | /* unlock */ |
351 | 0 | for(i=0; i<count; i++) { |
352 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
353 | 0 | continue; /* only unlock items once */ |
354 | 0 | lock_rw_unlock(&ref[i].key->entry.lock); |
355 | 0 | } |
356 | 0 | if(h) { |
357 | | /* LRU touch, with no rrset locks held */ |
358 | 0 | for(i=0; i<count; i++) { |
359 | 0 | if(i>0 && ref[i].key == ref[i-1].key) |
360 | 0 | continue; /* only touch items once */ |
361 | 0 | rrset_cache_touch(r, ref[i].key, h[i], ref[i].id); |
362 | 0 | } |
363 | 0 | } |
364 | 0 | } |
365 | | |
366 | | void |
367 | | rrset_update_sec_status(struct rrset_cache* r, |
368 | | struct ub_packed_rrset_key* rrset, time_t now) |
369 | 0 | { |
370 | 0 | struct packed_rrset_data* updata = |
371 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
372 | 0 | struct lruhash_entry* e; |
373 | 0 | struct packed_rrset_data* cachedata; |
374 | | |
375 | | /* hash it again to make sure it has a hash */ |
376 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
377 | |
|
378 | 0 | e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 1); |
379 | 0 | if(!e) |
380 | 0 | return; /* not in the cache anymore */ |
381 | 0 | cachedata = (struct packed_rrset_data*)e->data; |
382 | 0 | if(!rrsetdata_equal(updata, cachedata)) { |
383 | 0 | lock_rw_unlock(&e->lock); |
384 | 0 | return; /* rrset has changed in the meantime */ |
385 | 0 | } |
386 | | /* update the cached rrset */ |
387 | 0 | if(updata->security > cachedata->security) { |
388 | 0 | size_t i; |
389 | 0 | if(updata->trust > cachedata->trust) |
390 | 0 | cachedata->trust = updata->trust; |
391 | 0 | cachedata->security = updata->security; |
392 | | /* for NS records only shorter TTLs, other types: update it */ |
393 | 0 | if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_NS || |
394 | 0 | updata->ttl+now < cachedata->ttl || |
395 | 0 | cachedata->ttl < now || |
396 | 0 | updata->security == sec_status_bogus) { |
397 | 0 | cachedata->ttl = updata->ttl + now; |
398 | 0 | for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) |
399 | 0 | cachedata->rr_ttl[i] = updata->rr_ttl[i]+now; |
400 | 0 | cachedata->ttl_add = now; |
401 | 0 | } |
402 | 0 | } |
403 | 0 | lock_rw_unlock(&e->lock); |
404 | 0 | } |
405 | | |
406 | | void |
407 | | rrset_check_sec_status(struct rrset_cache* r, |
408 | | struct ub_packed_rrset_key* rrset, time_t now) |
409 | 0 | { |
410 | 0 | struct packed_rrset_data* updata = |
411 | 0 | (struct packed_rrset_data*)rrset->entry.data; |
412 | 0 | struct lruhash_entry* e; |
413 | 0 | struct packed_rrset_data* cachedata; |
414 | | |
415 | | /* hash it again to make sure it has a hash */ |
416 | 0 | rrset->entry.hash = rrset_key_hash(&rrset->rk); |
417 | |
|
418 | 0 | e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 0); |
419 | 0 | if(!e) |
420 | 0 | return; /* not in the cache anymore */ |
421 | 0 | cachedata = (struct packed_rrset_data*)e->data; |
422 | 0 | if(now > cachedata->ttl || !rrsetdata_equal(updata, cachedata)) { |
423 | 0 | lock_rw_unlock(&e->lock); |
424 | 0 | return; /* expired, or rrset has changed in the meantime */ |
425 | 0 | } |
426 | 0 | if(cachedata->security > updata->security) { |
427 | 0 | updata->security = cachedata->security; |
428 | 0 | if(cachedata->security == sec_status_bogus) { |
429 | 0 | size_t i; |
430 | 0 | updata->ttl = cachedata->ttl - now; |
431 | 0 | for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) |
432 | 0 | if(cachedata->rr_ttl[i] < now) |
433 | 0 | updata->rr_ttl[i] = 0; |
434 | 0 | else updata->rr_ttl[i] = |
435 | 0 | cachedata->rr_ttl[i]-now; |
436 | 0 | } |
437 | 0 | if(cachedata->trust > updata->trust) |
438 | 0 | updata->trust = cachedata->trust; |
439 | 0 | } |
440 | 0 | lock_rw_unlock(&e->lock); |
441 | 0 | } |
442 | | |
443 | | void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen, |
444 | | uint16_t type, uint16_t dclass, uint32_t flags) |
445 | 0 | { |
446 | 0 | struct ub_packed_rrset_key key; |
447 | 0 | key.entry.key = &key; |
448 | 0 | key.rk.dname = nm; |
449 | 0 | key.rk.dname_len = nmlen; |
450 | 0 | key.rk.rrset_class = htons(dclass); |
451 | 0 | key.rk.type = htons(type); |
452 | 0 | key.rk.flags = flags; |
453 | 0 | key.entry.hash = rrset_key_hash(&key.rk); |
454 | 0 | slabhash_remove(&r->table, key.entry.hash, &key); |
455 | 0 | } |