/src/unbound/services/authzone.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * services/authzone.c - authoritative zone that is locally hosted. |
3 | | * |
4 | | * Copyright (c) 2017, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains the functions for an authority zone. This zone |
40 | | * is queried by the iterator, just like a stub or forward zone, but then |
41 | | * the data is locally held. |
42 | | */ |
43 | | |
44 | | #include "config.h" |
45 | | #include "services/authzone.h" |
46 | | #include "util/data/dname.h" |
47 | | #include "util/data/msgparse.h" |
48 | | #include "util/data/msgreply.h" |
49 | | #include "util/data/msgencode.h" |
50 | | #include "util/data/packed_rrset.h" |
51 | | #include "util/regional.h" |
52 | | #include "util/net_help.h" |
53 | | #include "util/netevent.h" |
54 | | #include "util/config_file.h" |
55 | | #include "util/log.h" |
56 | | #include "util/module.h" |
57 | | #include "util/random.h" |
58 | | #include "services/cache/dns.h" |
59 | | #include "services/outside_network.h" |
60 | | #include "services/listen_dnsport.h" |
61 | | #include "services/mesh.h" |
62 | | #include "sldns/rrdef.h" |
63 | | #include "sldns/pkthdr.h" |
64 | | #include "sldns/sbuffer.h" |
65 | | #include "sldns/str2wire.h" |
66 | | #include "sldns/wire2str.h" |
67 | | #include "sldns/parseutil.h" |
68 | | #include "sldns/keyraw.h" |
69 | | #include "validator/val_nsec3.h" |
70 | | #include "validator/val_nsec.h" |
71 | | #include "validator/val_secalgo.h" |
72 | | #include "validator/val_sigcrypt.h" |
73 | | #include "validator/val_anchor.h" |
74 | | #include "validator/val_utils.h" |
75 | | #include <ctype.h> |
76 | | |
77 | | /** bytes to use for NSEC3 hash buffer. 20 for sha1 */ |
78 | | #define N3HASHBUFLEN 32 |
79 | | /** max number of CNAMEs we are willing to follow (in one answer) */ |
80 | 0 | #define MAX_CNAME_CHAIN 8 |
81 | | /** timeout for probe packets for SOA */ |
82 | 0 | #define AUTH_PROBE_TIMEOUT 100 /* msec */ |
83 | | /** when to stop with SOA probes (when exponential timeouts exceed this) */ |
84 | 0 | #define AUTH_PROBE_TIMEOUT_STOP 1000 /* msec */ |
85 | | /* auth transfer timeout for TCP connections, in msec */ |
86 | 0 | #define AUTH_TRANSFER_TIMEOUT 10000 /* msec */ |
87 | | /* auth transfer max backoff for failed transfers and probes */ |
88 | 0 | #define AUTH_TRANSFER_MAX_BACKOFF 86400 /* sec */ |
89 | | /* auth http port number */ |
90 | 0 | #define AUTH_HTTP_PORT 80 |
91 | | /* auth https port number */ |
92 | 0 | #define AUTH_HTTPS_PORT 443 |
93 | | /* max depth for nested $INCLUDEs */ |
94 | 0 | #define MAX_INCLUDE_DEPTH 10 |
95 | | /** number of timeouts before we fallback from IXFR to AXFR, |
96 | | * because some versions of servers (eg. dnsmasq) drop IXFR packets. */ |
97 | 0 | #define NUM_TIMEOUTS_FALLBACK_IXFR 3 |
98 | | |
99 | | /** pick up nextprobe task to start waiting to perform transfer actions */ |
100 | | static void xfr_set_timeout(struct auth_xfer* xfr, struct module_env* env, |
101 | | int failure, int lookup_only); |
102 | | /** move to sending the probe packets, next if fails. task_probe */ |
103 | | static void xfr_probe_send_or_end(struct auth_xfer* xfr, |
104 | | struct module_env* env); |
105 | | /** pick up probe task with specified(or NULL) destination first, |
106 | | * or transfer task if nothing to probe, or false if already in progress */ |
107 | | static int xfr_start_probe(struct auth_xfer* xfr, struct module_env* env, |
108 | | struct auth_master* spec); |
109 | | /** delete xfer structure (not its tree entry) */ |
110 | | void auth_xfer_delete(struct auth_xfer* xfr); |
111 | | |
112 | | /** create new dns_msg */ |
113 | | static struct dns_msg* |
114 | | msg_create(struct regional* region, struct query_info* qinfo) |
115 | 0 | { |
116 | 0 | struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, |
117 | 0 | sizeof(struct dns_msg)); |
118 | 0 | if(!msg) |
119 | 0 | return NULL; |
120 | 0 | msg->qinfo.qname = regional_alloc_init(region, qinfo->qname, |
121 | 0 | qinfo->qname_len); |
122 | 0 | if(!msg->qinfo.qname) |
123 | 0 | return NULL; |
124 | 0 | msg->qinfo.qname_len = qinfo->qname_len; |
125 | 0 | msg->qinfo.qtype = qinfo->qtype; |
126 | 0 | msg->qinfo.qclass = qinfo->qclass; |
127 | 0 | msg->qinfo.local_alias = NULL; |
128 | | /* non-packed reply_info, because it needs to grow the array */ |
129 | 0 | msg->rep = (struct reply_info*)regional_alloc_zero(region, |
130 | 0 | sizeof(struct reply_info)-sizeof(struct rrset_ref)); |
131 | 0 | if(!msg->rep) |
132 | 0 | return NULL; |
133 | 0 | msg->rep->flags = (uint16_t)(BIT_QR | BIT_AA); |
134 | 0 | msg->rep->authoritative = 1; |
135 | 0 | msg->rep->reason_bogus = LDNS_EDE_NONE; |
136 | 0 | msg->rep->qdcount = 1; |
137 | | /* rrsets is NULL, no rrsets yet */ |
138 | 0 | return msg; |
139 | 0 | } |
140 | | |
141 | | /** grow rrset array by one in msg */ |
142 | | static int |
143 | | msg_grow_array(struct regional* region, struct dns_msg* msg) |
144 | 0 | { |
145 | 0 | if(msg->rep->rrsets == NULL) { |
146 | 0 | msg->rep->rrsets = regional_alloc_zero(region, |
147 | 0 | sizeof(struct ub_packed_rrset_key*)*(msg->rep->rrset_count+1)); |
148 | 0 | if(!msg->rep->rrsets) |
149 | 0 | return 0; |
150 | 0 | } else { |
151 | 0 | struct ub_packed_rrset_key** rrsets_old = msg->rep->rrsets; |
152 | 0 | msg->rep->rrsets = regional_alloc_zero(region, |
153 | 0 | sizeof(struct ub_packed_rrset_key*)*(msg->rep->rrset_count+1)); |
154 | 0 | if(!msg->rep->rrsets) |
155 | 0 | return 0; |
156 | 0 | memmove(msg->rep->rrsets, rrsets_old, |
157 | 0 | sizeof(struct ub_packed_rrset_key*)*msg->rep->rrset_count); |
158 | 0 | } |
159 | 0 | return 1; |
160 | 0 | } |
161 | | |
162 | | /** get ttl of rrset */ |
163 | | static time_t |
164 | | get_rrset_ttl(struct ub_packed_rrset_key* k) |
165 | 0 | { |
166 | 0 | struct packed_rrset_data* d = (struct packed_rrset_data*) |
167 | 0 | k->entry.data; |
168 | 0 | return d->ttl; |
169 | 0 | } |
170 | | |
171 | | /** Copy rrset into region from domain-datanode and packet rrset */ |
172 | | static struct ub_packed_rrset_key* |
173 | | auth_packed_rrset_copy_region(struct auth_zone* z, struct auth_data* node, |
174 | | struct auth_rrset* rrset, struct regional* region, time_t adjust) |
175 | 0 | { |
176 | 0 | struct ub_packed_rrset_key key; |
177 | 0 | memset(&key, 0, sizeof(key)); |
178 | 0 | key.entry.key = &key; |
179 | 0 | key.entry.data = rrset->data; |
180 | 0 | key.rk.dname = node->name; |
181 | 0 | key.rk.dname_len = node->namelen; |
182 | 0 | key.rk.type = htons(rrset->type); |
183 | 0 | key.rk.rrset_class = htons(z->dclass); |
184 | 0 | key.entry.hash = rrset_key_hash(&key.rk); |
185 | 0 | return packed_rrset_copy_region(&key, region, adjust); |
186 | 0 | } |
187 | | |
188 | | /** fix up msg->rep TTL and prefetch ttl */ |
189 | | static void |
190 | | msg_ttl(struct dns_msg* msg) |
191 | 0 | { |
192 | 0 | if(msg->rep->rrset_count == 0) return; |
193 | 0 | if(msg->rep->rrset_count == 1) { |
194 | 0 | msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[0]); |
195 | 0 | msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); |
196 | 0 | msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; |
197 | 0 | } else if(get_rrset_ttl(msg->rep->rrsets[msg->rep->rrset_count-1]) < |
198 | 0 | msg->rep->ttl) { |
199 | 0 | msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[ |
200 | 0 | msg->rep->rrset_count-1]); |
201 | 0 | msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); |
202 | 0 | msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; |
203 | 0 | } |
204 | 0 | } |
205 | | |
206 | | /** see if rrset is a duplicate in the answer message */ |
207 | | static int |
208 | | msg_rrset_duplicate(struct dns_msg* msg, uint8_t* nm, size_t nmlen, |
209 | | uint16_t type, uint16_t dclass) |
210 | 0 | { |
211 | 0 | size_t i; |
212 | 0 | for(i=0; i<msg->rep->rrset_count; i++) { |
213 | 0 | struct ub_packed_rrset_key* k = msg->rep->rrsets[i]; |
214 | 0 | if(ntohs(k->rk.type) == type && k->rk.dname_len == nmlen && |
215 | 0 | ntohs(k->rk.rrset_class) == dclass && |
216 | 0 | query_dname_compare(k->rk.dname, nm) == 0) |
217 | 0 | return 1; |
218 | 0 | } |
219 | 0 | return 0; |
220 | 0 | } |
221 | | |
222 | | /** add rrset to answer section (no auth, add rrsets yet) */ |
223 | | static int |
224 | | msg_add_rrset_an(struct auth_zone* z, struct regional* region, |
225 | | struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) |
226 | 0 | { |
227 | 0 | log_assert(msg->rep->ns_numrrsets == 0); |
228 | 0 | log_assert(msg->rep->ar_numrrsets == 0); |
229 | 0 | if(!rrset || !node) |
230 | 0 | return 1; |
231 | 0 | if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, |
232 | 0 | z->dclass)) |
233 | 0 | return 1; |
234 | | /* grow array */ |
235 | 0 | if(!msg_grow_array(region, msg)) |
236 | 0 | return 0; |
237 | | /* copy it */ |
238 | 0 | if(!(msg->rep->rrsets[msg->rep->rrset_count] = |
239 | 0 | auth_packed_rrset_copy_region(z, node, rrset, region, 0))) |
240 | 0 | return 0; |
241 | 0 | msg->rep->rrset_count++; |
242 | 0 | msg->rep->an_numrrsets++; |
243 | 0 | msg_ttl(msg); |
244 | 0 | return 1; |
245 | 0 | } |
246 | | |
247 | | /** add rrset to authority section (no additional section rrsets yet) */ |
248 | | static int |
249 | | msg_add_rrset_ns(struct auth_zone* z, struct regional* region, |
250 | | struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) |
251 | 0 | { |
252 | 0 | log_assert(msg->rep->ar_numrrsets == 0); |
253 | 0 | if(!rrset || !node) |
254 | 0 | return 1; |
255 | 0 | if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, |
256 | 0 | z->dclass)) |
257 | 0 | return 1; |
258 | | /* grow array */ |
259 | 0 | if(!msg_grow_array(region, msg)) |
260 | 0 | return 0; |
261 | | /* copy it */ |
262 | 0 | if(!(msg->rep->rrsets[msg->rep->rrset_count] = |
263 | 0 | auth_packed_rrset_copy_region(z, node, rrset, region, 0))) |
264 | 0 | return 0; |
265 | 0 | msg->rep->rrset_count++; |
266 | 0 | msg->rep->ns_numrrsets++; |
267 | 0 | msg_ttl(msg); |
268 | 0 | return 1; |
269 | 0 | } |
270 | | |
271 | | /** add rrset to additional section */ |
272 | | static int |
273 | | msg_add_rrset_ar(struct auth_zone* z, struct regional* region, |
274 | | struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) |
275 | 0 | { |
276 | 0 | if(!rrset || !node) |
277 | 0 | return 1; |
278 | 0 | if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, |
279 | 0 | z->dclass)) |
280 | 0 | return 1; |
281 | | /* grow array */ |
282 | 0 | if(!msg_grow_array(region, msg)) |
283 | 0 | return 0; |
284 | | /* copy it */ |
285 | 0 | if(!(msg->rep->rrsets[msg->rep->rrset_count] = |
286 | 0 | auth_packed_rrset_copy_region(z, node, rrset, region, 0))) |
287 | 0 | return 0; |
288 | 0 | msg->rep->rrset_count++; |
289 | 0 | msg->rep->ar_numrrsets++; |
290 | 0 | msg_ttl(msg); |
291 | 0 | return 1; |
292 | 0 | } |
293 | | |
294 | | struct auth_zones* auth_zones_create(void) |
295 | 0 | { |
296 | 0 | struct auth_zones* az = (struct auth_zones*)calloc(1, sizeof(*az)); |
297 | 0 | if(!az) { |
298 | 0 | log_err("out of memory"); |
299 | 0 | return NULL; |
300 | 0 | } |
301 | 0 | rbtree_init(&az->ztree, &auth_zone_cmp); |
302 | 0 | rbtree_init(&az->xtree, &auth_xfer_cmp); |
303 | 0 | lock_rw_init(&az->lock); |
304 | 0 | lock_protect(&az->lock, &az->ztree, sizeof(az->ztree)); |
305 | 0 | lock_protect(&az->lock, &az->xtree, sizeof(az->xtree)); |
306 | | /* also lock protects the rbnode's in struct auth_zone, auth_xfer */ |
307 | 0 | lock_rw_init(&az->rpz_lock); |
308 | 0 | lock_protect(&az->rpz_lock, &az->rpz_first, sizeof(az->rpz_first)); |
309 | 0 | return az; |
310 | 0 | } |
311 | | |
312 | | int auth_zone_cmp(const void* z1, const void* z2) |
313 | 0 | { |
314 | | /* first sort on class, so that hierarchy can be maintained within |
315 | | * a class */ |
316 | 0 | struct auth_zone* a = (struct auth_zone*)z1; |
317 | 0 | struct auth_zone* b = (struct auth_zone*)z2; |
318 | 0 | int m; |
319 | 0 | if(a->dclass != b->dclass) { |
320 | 0 | if(a->dclass < b->dclass) |
321 | 0 | return -1; |
322 | 0 | return 1; |
323 | 0 | } |
324 | | /* sorted such that higher zones sort before lower zones (their |
325 | | * contents) */ |
326 | 0 | return dname_lab_cmp(a->name, a->namelabs, b->name, b->namelabs, &m); |
327 | 0 | } |
328 | | |
329 | | int auth_data_cmp(const void* z1, const void* z2) |
330 | 0 | { |
331 | 0 | struct auth_data* a = (struct auth_data*)z1; |
332 | 0 | struct auth_data* b = (struct auth_data*)z2; |
333 | 0 | int m; |
334 | | /* canonical sort, because DNSSEC needs that */ |
335 | 0 | return dname_canon_lab_cmp(a->name, a->namelabs, b->name, |
336 | 0 | b->namelabs, &m); |
337 | 0 | } |
338 | | |
339 | | int auth_xfer_cmp(const void* z1, const void* z2) |
340 | 0 | { |
341 | | /* first sort on class, so that hierarchy can be maintained within |
342 | | * a class */ |
343 | 0 | struct auth_xfer* a = (struct auth_xfer*)z1; |
344 | 0 | struct auth_xfer* b = (struct auth_xfer*)z2; |
345 | 0 | int m; |
346 | 0 | if(a->dclass != b->dclass) { |
347 | 0 | if(a->dclass < b->dclass) |
348 | 0 | return -1; |
349 | 0 | return 1; |
350 | 0 | } |
351 | | /* sorted such that higher zones sort before lower zones (their |
352 | | * contents) */ |
353 | 0 | return dname_lab_cmp(a->name, a->namelabs, b->name, b->namelabs, &m); |
354 | 0 | } |
355 | | |
356 | | /** delete auth rrset node */ |
357 | | static void |
358 | | auth_rrset_delete(struct auth_rrset* rrset) |
359 | 0 | { |
360 | 0 | if(!rrset) return; |
361 | 0 | free(rrset->data); |
362 | 0 | free(rrset); |
363 | 0 | } |
364 | | |
365 | | /** delete auth data domain node */ |
366 | | static void |
367 | | auth_data_delete(struct auth_data* n) |
368 | 0 | { |
369 | 0 | struct auth_rrset* p, *np; |
370 | 0 | if(!n) return; |
371 | 0 | p = n->rrsets; |
372 | 0 | while(p) { |
373 | 0 | np = p->next; |
374 | 0 | auth_rrset_delete(p); |
375 | 0 | p = np; |
376 | 0 | } |
377 | 0 | free(n->name); |
378 | 0 | free(n); |
379 | 0 | } |
380 | | |
381 | | /** helper traverse to delete zones */ |
382 | | static void |
383 | | auth_data_del(rbnode_type* n, void* ATTR_UNUSED(arg)) |
384 | 0 | { |
385 | 0 | struct auth_data* z = (struct auth_data*)n->key; |
386 | 0 | auth_data_delete(z); |
387 | 0 | } |
388 | | |
389 | | /** delete an auth zone structure (tree remove must be done elsewhere) */ |
390 | | static void |
391 | | auth_zone_delete(struct auth_zone* z, struct auth_zones* az) |
392 | 0 | { |
393 | 0 | if(!z) return; |
394 | 0 | lock_rw_destroy(&z->lock); |
395 | 0 | traverse_postorder(&z->data, auth_data_del, NULL); |
396 | |
|
397 | 0 | if(az && z->rpz) { |
398 | | /* keep RPZ linked list intact */ |
399 | 0 | lock_rw_wrlock(&az->rpz_lock); |
400 | 0 | if(z->rpz_az_prev) |
401 | 0 | z->rpz_az_prev->rpz_az_next = z->rpz_az_next; |
402 | 0 | else |
403 | 0 | az->rpz_first = z->rpz_az_next; |
404 | 0 | if(z->rpz_az_next) |
405 | 0 | z->rpz_az_next->rpz_az_prev = z->rpz_az_prev; |
406 | 0 | lock_rw_unlock(&az->rpz_lock); |
407 | 0 | } |
408 | 0 | if(z->rpz) |
409 | 0 | rpz_delete(z->rpz); |
410 | 0 | free(z->name); |
411 | 0 | free(z->zonefile); |
412 | 0 | free(z); |
413 | 0 | } |
414 | | |
415 | | struct auth_zone* |
416 | | auth_zone_create(struct auth_zones* az, uint8_t* nm, size_t nmlen, |
417 | | uint16_t dclass) |
418 | 0 | { |
419 | 0 | struct auth_zone* z = (struct auth_zone*)calloc(1, sizeof(*z)); |
420 | 0 | if(!z) { |
421 | 0 | return NULL; |
422 | 0 | } |
423 | 0 | z->node.key = z; |
424 | 0 | z->dclass = dclass; |
425 | 0 | z->namelen = nmlen; |
426 | 0 | z->namelabs = dname_count_labels(nm); |
427 | 0 | z->name = memdup(nm, nmlen); |
428 | 0 | if(!z->name) { |
429 | 0 | free(z); |
430 | 0 | return NULL; |
431 | 0 | } |
432 | 0 | rbtree_init(&z->data, &auth_data_cmp); |
433 | 0 | lock_rw_init(&z->lock); |
434 | 0 | lock_protect(&z->lock, &z->name, sizeof(*z)-sizeof(rbnode_type)- |
435 | 0 | sizeof(&z->rpz_az_next)-sizeof(&z->rpz_az_prev)); |
436 | 0 | lock_rw_wrlock(&z->lock); |
437 | | /* z lock protects all, except rbtree itself and the rpz linked list |
438 | | * pointers, which are protected using az->lock */ |
439 | 0 | if(!rbtree_insert(&az->ztree, &z->node)) { |
440 | 0 | lock_rw_unlock(&z->lock); |
441 | 0 | auth_zone_delete(z, NULL); |
442 | 0 | log_warn("duplicate auth zone"); |
443 | 0 | return NULL; |
444 | 0 | } |
445 | 0 | return z; |
446 | 0 | } |
447 | | |
448 | | struct auth_zone* |
449 | | auth_zone_find(struct auth_zones* az, uint8_t* nm, size_t nmlen, |
450 | | uint16_t dclass) |
451 | 0 | { |
452 | 0 | struct auth_zone key; |
453 | 0 | key.node.key = &key; |
454 | 0 | key.dclass = dclass; |
455 | 0 | key.name = nm; |
456 | 0 | key.namelen = nmlen; |
457 | 0 | key.namelabs = dname_count_labels(nm); |
458 | 0 | return (struct auth_zone*)rbtree_search(&az->ztree, &key); |
459 | 0 | } |
460 | | |
461 | | struct auth_xfer* |
462 | | auth_xfer_find(struct auth_zones* az, uint8_t* nm, size_t nmlen, |
463 | | uint16_t dclass) |
464 | 0 | { |
465 | 0 | struct auth_xfer key; |
466 | 0 | key.node.key = &key; |
467 | 0 | key.dclass = dclass; |
468 | 0 | key.name = nm; |
469 | 0 | key.namelen = nmlen; |
470 | 0 | key.namelabs = dname_count_labels(nm); |
471 | 0 | return (struct auth_xfer*)rbtree_search(&az->xtree, &key); |
472 | 0 | } |
473 | | |
474 | | /** find an auth zone or sorted less-or-equal, return true if exact */ |
475 | | static int |
476 | | auth_zone_find_less_equal(struct auth_zones* az, uint8_t* nm, size_t nmlen, |
477 | | uint16_t dclass, struct auth_zone** z) |
478 | 0 | { |
479 | 0 | struct auth_zone key; |
480 | 0 | key.node.key = &key; |
481 | 0 | key.dclass = dclass; |
482 | 0 | key.name = nm; |
483 | 0 | key.namelen = nmlen; |
484 | 0 | key.namelabs = dname_count_labels(nm); |
485 | 0 | return rbtree_find_less_equal(&az->ztree, &key, (rbnode_type**)z); |
486 | 0 | } |
487 | | |
488 | | |
489 | | /** find the auth zone that is above the given name */ |
490 | | struct auth_zone* |
491 | | auth_zones_find_zone(struct auth_zones* az, uint8_t* name, size_t name_len, |
492 | | uint16_t dclass) |
493 | 0 | { |
494 | 0 | uint8_t* nm = name; |
495 | 0 | size_t nmlen = name_len; |
496 | 0 | struct auth_zone* z; |
497 | 0 | if(auth_zone_find_less_equal(az, nm, nmlen, dclass, &z)) { |
498 | | /* exact match */ |
499 | 0 | return z; |
500 | 0 | } else { |
501 | | /* less-or-nothing */ |
502 | 0 | if(!z) return NULL; /* nothing smaller, nothing above it */ |
503 | | /* we found smaller name; smaller may be above the name, |
504 | | * but not below it. */ |
505 | 0 | nm = dname_get_shared_topdomain(z->name, name); |
506 | 0 | dname_count_size_labels(nm, &nmlen); |
507 | 0 | z = NULL; |
508 | 0 | } |
509 | | |
510 | | /* search up */ |
511 | 0 | while(!z) { |
512 | 0 | z = auth_zone_find(az, nm, nmlen, dclass); |
513 | 0 | if(z) return z; |
514 | 0 | if(dname_is_root(nm)) break; |
515 | 0 | dname_remove_label(&nm, &nmlen); |
516 | 0 | } |
517 | 0 | return NULL; |
518 | 0 | } |
519 | | |
520 | | /** find or create zone with name str. caller must have lock on az. |
521 | | * returns a wrlocked zone */ |
522 | | static struct auth_zone* |
523 | | auth_zones_find_or_add_zone(struct auth_zones* az, char* name) |
524 | 0 | { |
525 | 0 | uint8_t nm[LDNS_MAX_DOMAINLEN+1]; |
526 | 0 | size_t nmlen = sizeof(nm); |
527 | 0 | struct auth_zone* z; |
528 | |
|
529 | 0 | if(sldns_str2wire_dname_buf(name, nm, &nmlen) != 0) { |
530 | 0 | log_err("cannot parse auth zone name: %s", name); |
531 | 0 | return 0; |
532 | 0 | } |
533 | 0 | z = auth_zone_find(az, nm, nmlen, LDNS_RR_CLASS_IN); |
534 | 0 | if(!z) { |
535 | | /* not found, create the zone */ |
536 | 0 | z = auth_zone_create(az, nm, nmlen, LDNS_RR_CLASS_IN); |
537 | 0 | } else { |
538 | 0 | lock_rw_wrlock(&z->lock); |
539 | 0 | } |
540 | 0 | return z; |
541 | 0 | } |
542 | | |
543 | | /** find or create xfer zone with name str. caller must have lock on az. |
544 | | * returns a locked xfer */ |
545 | | static struct auth_xfer* |
546 | | auth_zones_find_or_add_xfer(struct auth_zones* az, struct auth_zone* z) |
547 | 0 | { |
548 | 0 | struct auth_xfer* x; |
549 | 0 | x = auth_xfer_find(az, z->name, z->namelen, z->dclass); |
550 | 0 | if(!x) { |
551 | | /* not found, create the zone */ |
552 | 0 | x = auth_xfer_create(az, z); |
553 | 0 | } else { |
554 | 0 | lock_basic_lock(&x->lock); |
555 | 0 | } |
556 | 0 | return x; |
557 | 0 | } |
558 | | |
559 | | int |
560 | | auth_zone_set_zonefile(struct auth_zone* z, char* zonefile) |
561 | 0 | { |
562 | 0 | if(z->zonefile) free(z->zonefile); |
563 | 0 | if(zonefile == NULL) { |
564 | 0 | z->zonefile = NULL; |
565 | 0 | } else { |
566 | 0 | z->zonefile = strdup(zonefile); |
567 | 0 | if(!z->zonefile) { |
568 | 0 | log_err("malloc failure"); |
569 | 0 | return 0; |
570 | 0 | } |
571 | 0 | } |
572 | 0 | return 1; |
573 | 0 | } |
574 | | |
575 | | /** set auth zone fallback. caller must have lock on zone */ |
576 | | int |
577 | | auth_zone_set_fallback(struct auth_zone* z, char* fallbackstr) |
578 | 0 | { |
579 | 0 | if(strcmp(fallbackstr, "yes") != 0 && strcmp(fallbackstr, "no") != 0){ |
580 | 0 | log_err("auth zone fallback, expected yes or no, got %s", |
581 | 0 | fallbackstr); |
582 | 0 | return 0; |
583 | 0 | } |
584 | 0 | z->fallback_enabled = (strcmp(fallbackstr, "yes")==0); |
585 | 0 | return 1; |
586 | 0 | } |
587 | | |
588 | | /** create domain with the given name */ |
589 | | static struct auth_data* |
590 | | az_domain_create(struct auth_zone* z, uint8_t* nm, size_t nmlen) |
591 | 0 | { |
592 | 0 | struct auth_data* n = (struct auth_data*)malloc(sizeof(*n)); |
593 | 0 | if(!n) return NULL; |
594 | 0 | memset(n, 0, sizeof(*n)); |
595 | 0 | n->node.key = n; |
596 | 0 | n->name = memdup(nm, nmlen); |
597 | 0 | if(!n->name) { |
598 | 0 | free(n); |
599 | 0 | return NULL; |
600 | 0 | } |
601 | 0 | n->namelen = nmlen; |
602 | 0 | n->namelabs = dname_count_labels(nm); |
603 | 0 | if(!rbtree_insert(&z->data, &n->node)) { |
604 | 0 | log_warn("duplicate auth domain name"); |
605 | 0 | free(n->name); |
606 | 0 | free(n); |
607 | 0 | return NULL; |
608 | 0 | } |
609 | 0 | return n; |
610 | 0 | } |
611 | | |
612 | | /** find domain with exactly the given name */ |
613 | | static struct auth_data* |
614 | | az_find_name(struct auth_zone* z, uint8_t* nm, size_t nmlen) |
615 | 0 | { |
616 | 0 | struct auth_zone key; |
617 | 0 | key.node.key = &key; |
618 | 0 | key.name = nm; |
619 | 0 | key.namelen = nmlen; |
620 | 0 | key.namelabs = dname_count_labels(nm); |
621 | 0 | return (struct auth_data*)rbtree_search(&z->data, &key); |
622 | 0 | } |
623 | | |
624 | | /** Find domain name (or closest match) */ |
625 | | static void |
626 | | az_find_domain(struct auth_zone* z, struct query_info* qinfo, int* node_exact, |
627 | | struct auth_data** node) |
628 | 0 | { |
629 | 0 | struct auth_zone key; |
630 | 0 | key.node.key = &key; |
631 | 0 | key.name = qinfo->qname; |
632 | 0 | key.namelen = qinfo->qname_len; |
633 | 0 | key.namelabs = dname_count_labels(key.name); |
634 | 0 | *node_exact = rbtree_find_less_equal(&z->data, &key, |
635 | 0 | (rbnode_type**)node); |
636 | 0 | } |
637 | | |
638 | | /** find or create domain with name in zone */ |
639 | | static struct auth_data* |
640 | | az_domain_find_or_create(struct auth_zone* z, uint8_t* dname, |
641 | | size_t dname_len) |
642 | 0 | { |
643 | 0 | struct auth_data* n = az_find_name(z, dname, dname_len); |
644 | 0 | if(!n) { |
645 | 0 | n = az_domain_create(z, dname, dname_len); |
646 | 0 | } |
647 | 0 | return n; |
648 | 0 | } |
649 | | |
650 | | /** find rrset of given type in the domain */ |
651 | | static struct auth_rrset* |
652 | | az_domain_rrset(struct auth_data* n, uint16_t t) |
653 | 0 | { |
654 | 0 | struct auth_rrset* rrset; |
655 | 0 | if(!n) return NULL; |
656 | 0 | rrset = n->rrsets; |
657 | 0 | while(rrset) { |
658 | 0 | if(rrset->type == t) |
659 | 0 | return rrset; |
660 | 0 | rrset = rrset->next; |
661 | 0 | } |
662 | 0 | return NULL; |
663 | 0 | } |
664 | | |
665 | | /** remove rrset of this type from domain */ |
666 | | static void |
667 | | domain_remove_rrset(struct auth_data* node, uint16_t rr_type) |
668 | 0 | { |
669 | 0 | struct auth_rrset* rrset, *prev; |
670 | 0 | if(!node) return; |
671 | 0 | prev = NULL; |
672 | 0 | rrset = node->rrsets; |
673 | 0 | while(rrset) { |
674 | 0 | if(rrset->type == rr_type) { |
675 | | /* found it, now delete it */ |
676 | 0 | if(prev) prev->next = rrset->next; |
677 | 0 | else node->rrsets = rrset->next; |
678 | 0 | auth_rrset_delete(rrset); |
679 | 0 | return; |
680 | 0 | } |
681 | 0 | prev = rrset; |
682 | 0 | rrset = rrset->next; |
683 | 0 | } |
684 | 0 | } |
685 | | |
686 | | /** find an rrsig index in the rrset. returns true if found */ |
687 | | static int |
688 | | az_rrset_find_rrsig(struct packed_rrset_data* d, uint8_t* rdata, size_t len, |
689 | | size_t* index) |
690 | 0 | { |
691 | 0 | size_t i; |
692 | 0 | for(i=d->count; i<d->count + d->rrsig_count; i++) { |
693 | 0 | if(d->rr_len[i] != len) |
694 | 0 | continue; |
695 | 0 | if(memcmp(d->rr_data[i], rdata, len) == 0) { |
696 | 0 | *index = i; |
697 | 0 | return 1; |
698 | 0 | } |
699 | 0 | } |
700 | 0 | return 0; |
701 | 0 | } |
702 | | |
703 | | /** see if rdata is duplicate */ |
704 | | static int |
705 | | rdata_duplicate(struct packed_rrset_data* d, uint8_t* rdata, size_t len) |
706 | 0 | { |
707 | 0 | size_t i; |
708 | 0 | for(i=0; i<d->count + d->rrsig_count; i++) { |
709 | 0 | if(d->rr_len[i] != len) |
710 | 0 | continue; |
711 | 0 | if(memcmp(d->rr_data[i], rdata, len) == 0) |
712 | 0 | return 1; |
713 | 0 | } |
714 | 0 | return 0; |
715 | 0 | } |
716 | | |
717 | | /** get rrsig type covered from rdata. |
718 | | * @param rdata: rdata in wireformat, starting with 16bit rdlength. |
719 | | * @param rdatalen: length of rdata buffer. |
720 | | * @return type covered (or 0). |
721 | | */ |
722 | | static uint16_t |
723 | | rrsig_rdata_get_type_covered(uint8_t* rdata, size_t rdatalen) |
724 | 0 | { |
725 | 0 | if(rdatalen < 4) |
726 | 0 | return 0; |
727 | 0 | return sldns_read_uint16(rdata+2); |
728 | 0 | } |
729 | | |
730 | | /** remove RR from existing RRset. Also sig, if it is a signature. |
731 | | * reallocates the packed rrset for a new one, false on alloc failure */ |
732 | | static int |
733 | | rrset_remove_rr(struct auth_rrset* rrset, size_t index) |
734 | 0 | { |
735 | 0 | struct packed_rrset_data* d, *old = rrset->data; |
736 | 0 | size_t i; |
737 | 0 | if(index >= old->count + old->rrsig_count) |
738 | 0 | return 0; /* index out of bounds */ |
739 | 0 | d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) - ( |
740 | 0 | sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + |
741 | 0 | old->rr_len[index])); |
742 | 0 | if(!d) { |
743 | 0 | log_err("malloc failure"); |
744 | 0 | return 0; |
745 | 0 | } |
746 | 0 | d->ttl = old->ttl; |
747 | 0 | d->count = old->count; |
748 | 0 | d->rrsig_count = old->rrsig_count; |
749 | 0 | if(index < d->count) d->count--; |
750 | 0 | else d->rrsig_count--; |
751 | 0 | d->trust = old->trust; |
752 | 0 | d->security = old->security; |
753 | | |
754 | | /* set rr_len, needed for ptr_fixup */ |
755 | 0 | d->rr_len = (size_t*)((uint8_t*)d + |
756 | 0 | sizeof(struct packed_rrset_data)); |
757 | 0 | if(index > 0) |
758 | 0 | memmove(d->rr_len, old->rr_len, (index)*sizeof(size_t)); |
759 | 0 | if(index+1 < old->count+old->rrsig_count) |
760 | 0 | memmove(&d->rr_len[index], &old->rr_len[index+1], |
761 | 0 | (old->count+old->rrsig_count - (index+1))*sizeof(size_t)); |
762 | 0 | packed_rrset_ptr_fixup(d); |
763 | | |
764 | | /* move over ttls */ |
765 | 0 | if(index > 0) |
766 | 0 | memmove(d->rr_ttl, old->rr_ttl, (index)*sizeof(time_t)); |
767 | 0 | if(index+1 < old->count+old->rrsig_count) |
768 | 0 | memmove(&d->rr_ttl[index], &old->rr_ttl[index+1], |
769 | 0 | (old->count+old->rrsig_count - (index+1))*sizeof(time_t)); |
770 | | |
771 | | /* move over rr_data */ |
772 | 0 | for(i=0; i<d->count+d->rrsig_count; i++) { |
773 | 0 | size_t oldi; |
774 | 0 | if(i < index) oldi = i; |
775 | 0 | else oldi = i+1; |
776 | 0 | memmove(d->rr_data[i], old->rr_data[oldi], d->rr_len[i]); |
777 | 0 | } |
778 | | |
779 | | /* recalc ttl (lowest of remaining RR ttls) */ |
780 | 0 | if(d->count + d->rrsig_count > 0) |
781 | 0 | d->ttl = d->rr_ttl[0]; |
782 | 0 | for(i=0; i<d->count+d->rrsig_count; i++) { |
783 | 0 | if(d->rr_ttl[i] < d->ttl) |
784 | 0 | d->ttl = d->rr_ttl[i]; |
785 | 0 | } |
786 | |
|
787 | 0 | free(rrset->data); |
788 | 0 | rrset->data = d; |
789 | 0 | return 1; |
790 | 0 | } |
791 | | |
792 | | /** add RR to existing RRset. If insert_sig is true, add to rrsigs. |
793 | | * This reallocates the packed rrset for a new one */ |
794 | | static int |
795 | | rrset_add_rr(struct auth_rrset* rrset, uint32_t rr_ttl, uint8_t* rdata, |
796 | | size_t rdatalen, int insert_sig) |
797 | 0 | { |
798 | 0 | struct packed_rrset_data* d, *old = rrset->data; |
799 | 0 | size_t total, old_total; |
800 | |
|
801 | 0 | d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) |
802 | 0 | + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) |
803 | 0 | + rdatalen); |
804 | 0 | if(!d) { |
805 | 0 | log_err("out of memory"); |
806 | 0 | return 0; |
807 | 0 | } |
808 | | /* copy base values */ |
809 | 0 | memcpy(d, old, sizeof(struct packed_rrset_data)); |
810 | 0 | if(!insert_sig) { |
811 | 0 | d->count++; |
812 | 0 | } else { |
813 | 0 | d->rrsig_count++; |
814 | 0 | } |
815 | 0 | old_total = old->count + old->rrsig_count; |
816 | 0 | total = d->count + d->rrsig_count; |
817 | | /* set rr_len, needed for ptr_fixup */ |
818 | 0 | d->rr_len = (size_t*)((uint8_t*)d + |
819 | 0 | sizeof(struct packed_rrset_data)); |
820 | 0 | if(old->count != 0) |
821 | 0 | memmove(d->rr_len, old->rr_len, old->count*sizeof(size_t)); |
822 | 0 | if(old->rrsig_count != 0) |
823 | 0 | memmove(d->rr_len+d->count, old->rr_len+old->count, |
824 | 0 | old->rrsig_count*sizeof(size_t)); |
825 | 0 | if(!insert_sig) |
826 | 0 | d->rr_len[d->count-1] = rdatalen; |
827 | 0 | else d->rr_len[total-1] = rdatalen; |
828 | 0 | packed_rrset_ptr_fixup(d); |
829 | 0 | if((time_t)rr_ttl < d->ttl) |
830 | 0 | d->ttl = rr_ttl; |
831 | | |
832 | | /* copy old values into new array */ |
833 | 0 | if(old->count != 0) { |
834 | 0 | memmove(d->rr_ttl, old->rr_ttl, old->count*sizeof(time_t)); |
835 | | /* all the old rr pieces are allocated sequential, so we |
836 | | * can copy them in one go */ |
837 | 0 | memmove(d->rr_data[0], old->rr_data[0], |
838 | 0 | (old->rr_data[old->count-1] - old->rr_data[0]) + |
839 | 0 | old->rr_len[old->count-1]); |
840 | 0 | } |
841 | 0 | if(old->rrsig_count != 0) { |
842 | 0 | memmove(d->rr_ttl+d->count, old->rr_ttl+old->count, |
843 | 0 | old->rrsig_count*sizeof(time_t)); |
844 | 0 | memmove(d->rr_data[d->count], old->rr_data[old->count], |
845 | 0 | (old->rr_data[old_total-1] - old->rr_data[old->count]) + |
846 | 0 | old->rr_len[old_total-1]); |
847 | 0 | } |
848 | | |
849 | | /* insert new value */ |
850 | 0 | if(!insert_sig) { |
851 | 0 | d->rr_ttl[d->count-1] = rr_ttl; |
852 | 0 | memmove(d->rr_data[d->count-1], rdata, rdatalen); |
853 | 0 | } else { |
854 | 0 | d->rr_ttl[total-1] = rr_ttl; |
855 | 0 | memmove(d->rr_data[total-1], rdata, rdatalen); |
856 | 0 | } |
857 | |
|
858 | 0 | rrset->data = d; |
859 | 0 | free(old); |
860 | 0 | return 1; |
861 | 0 | } |
862 | | |
863 | | /** Create new rrset for node with packed rrset with one RR element */ |
864 | | static struct auth_rrset* |
865 | | rrset_create(struct auth_data* node, uint16_t rr_type, uint32_t rr_ttl, |
866 | | uint8_t* rdata, size_t rdatalen) |
867 | 0 | { |
868 | 0 | struct auth_rrset* rrset = (struct auth_rrset*)calloc(1, |
869 | 0 | sizeof(*rrset)); |
870 | 0 | struct auth_rrset* p, *prev; |
871 | 0 | struct packed_rrset_data* d; |
872 | 0 | if(!rrset) { |
873 | 0 | log_err("out of memory"); |
874 | 0 | return NULL; |
875 | 0 | } |
876 | 0 | rrset->type = rr_type; |
877 | | |
878 | | /* the rrset data structure, with one RR */ |
879 | 0 | d = (struct packed_rrset_data*)calloc(1, |
880 | 0 | sizeof(struct packed_rrset_data) + sizeof(size_t) + |
881 | 0 | sizeof(uint8_t*) + sizeof(time_t) + rdatalen); |
882 | 0 | if(!d) { |
883 | 0 | free(rrset); |
884 | 0 | log_err("out of memory"); |
885 | 0 | return NULL; |
886 | 0 | } |
887 | 0 | rrset->data = d; |
888 | 0 | d->ttl = rr_ttl; |
889 | 0 | d->trust = rrset_trust_prim_noglue; |
890 | 0 | d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data)); |
891 | 0 | d->rr_data = (uint8_t**)&(d->rr_len[1]); |
892 | 0 | d->rr_ttl = (time_t*)&(d->rr_data[1]); |
893 | 0 | d->rr_data[0] = (uint8_t*)&(d->rr_ttl[1]); |
894 | | |
895 | | /* insert the RR */ |
896 | 0 | d->rr_len[0] = rdatalen; |
897 | 0 | d->rr_ttl[0] = rr_ttl; |
898 | 0 | memmove(d->rr_data[0], rdata, rdatalen); |
899 | 0 | d->count++; |
900 | | |
901 | | /* insert rrset into linked list for domain */ |
902 | | /* find sorted place to link the rrset into the list */ |
903 | 0 | prev = NULL; |
904 | 0 | p = node->rrsets; |
905 | 0 | while(p && p->type<=rr_type) { |
906 | 0 | prev = p; |
907 | 0 | p = p->next; |
908 | 0 | } |
909 | | /* so, prev is smaller, and p is larger than rr_type */ |
910 | 0 | rrset->next = p; |
911 | 0 | if(prev) prev->next = rrset; |
912 | 0 | else node->rrsets = rrset; |
913 | 0 | return rrset; |
914 | 0 | } |
915 | | |
916 | | /** count number (and size) of rrsigs that cover a type */ |
917 | | static size_t |
918 | | rrsig_num_that_cover(struct auth_rrset* rrsig, uint16_t rr_type, size_t* sigsz) |
919 | 0 | { |
920 | 0 | struct packed_rrset_data* d = rrsig->data; |
921 | 0 | size_t i, num = 0; |
922 | 0 | *sigsz = 0; |
923 | 0 | log_assert(d && rrsig->type == LDNS_RR_TYPE_RRSIG); |
924 | 0 | for(i=0; i<d->count+d->rrsig_count; i++) { |
925 | 0 | if(rrsig_rdata_get_type_covered(d->rr_data[i], |
926 | 0 | d->rr_len[i]) == rr_type) { |
927 | 0 | num++; |
928 | 0 | (*sigsz) += d->rr_len[i]; |
929 | 0 | } |
930 | 0 | } |
931 | 0 | return num; |
932 | 0 | } |
933 | | |
934 | | /** See if rrsig set has covered sigs for rrset and move them over */ |
935 | | static int |
936 | | rrset_moveover_rrsigs(struct auth_data* node, uint16_t rr_type, |
937 | | struct auth_rrset* rrset, struct auth_rrset* rrsig) |
938 | 0 | { |
939 | 0 | size_t sigs, sigsz, i, j, total; |
940 | 0 | struct packed_rrset_data* sigold = rrsig->data; |
941 | 0 | struct packed_rrset_data* old = rrset->data; |
942 | 0 | struct packed_rrset_data* d, *sigd; |
943 | |
|
944 | 0 | log_assert(rrset->type == rr_type); |
945 | 0 | log_assert(rrsig->type == LDNS_RR_TYPE_RRSIG); |
946 | 0 | sigs = rrsig_num_that_cover(rrsig, rr_type, &sigsz); |
947 | 0 | if(sigs == 0) { |
948 | | /* 0 rrsigs to move over, done */ |
949 | 0 | return 1; |
950 | 0 | } |
951 | | |
952 | | /* allocate rrset sigsz larger for extra sigs elements, and |
953 | | * allocate rrsig sigsz smaller for less sigs elements. */ |
954 | 0 | d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) |
955 | 0 | + sigs*(sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)) |
956 | 0 | + sigsz); |
957 | 0 | if(!d) { |
958 | 0 | log_err("out of memory"); |
959 | 0 | return 0; |
960 | 0 | } |
961 | | /* copy base values */ |
962 | 0 | total = old->count + old->rrsig_count; |
963 | 0 | memcpy(d, old, sizeof(struct packed_rrset_data)); |
964 | 0 | d->rrsig_count += sigs; |
965 | | /* setup rr_len */ |
966 | 0 | d->rr_len = (size_t*)((uint8_t*)d + |
967 | 0 | sizeof(struct packed_rrset_data)); |
968 | 0 | if(total != 0) |
969 | 0 | memmove(d->rr_len, old->rr_len, total*sizeof(size_t)); |
970 | 0 | j = d->count+d->rrsig_count-sigs; |
971 | 0 | for(i=0; i<sigold->count+sigold->rrsig_count; i++) { |
972 | 0 | if(rrsig_rdata_get_type_covered(sigold->rr_data[i], |
973 | 0 | sigold->rr_len[i]) == rr_type) { |
974 | 0 | d->rr_len[j] = sigold->rr_len[i]; |
975 | 0 | j++; |
976 | 0 | } |
977 | 0 | } |
978 | 0 | packed_rrset_ptr_fixup(d); |
979 | | |
980 | | /* copy old values into new array */ |
981 | 0 | if(total != 0) { |
982 | 0 | memmove(d->rr_ttl, old->rr_ttl, total*sizeof(time_t)); |
983 | | /* all the old rr pieces are allocated sequential, so we |
984 | | * can copy them in one go */ |
985 | 0 | memmove(d->rr_data[0], old->rr_data[0], |
986 | 0 | (old->rr_data[total-1] - old->rr_data[0]) + |
987 | 0 | old->rr_len[total-1]); |
988 | 0 | } |
989 | | |
990 | | /* move over the rrsigs to the larger rrset*/ |
991 | 0 | j = d->count+d->rrsig_count-sigs; |
992 | 0 | for(i=0; i<sigold->count+sigold->rrsig_count; i++) { |
993 | 0 | if(rrsig_rdata_get_type_covered(sigold->rr_data[i], |
994 | 0 | sigold->rr_len[i]) == rr_type) { |
995 | | /* move this one over to location j */ |
996 | 0 | d->rr_ttl[j] = sigold->rr_ttl[i]; |
997 | 0 | memmove(d->rr_data[j], sigold->rr_data[i], |
998 | 0 | sigold->rr_len[i]); |
999 | 0 | if(d->rr_ttl[j] < d->ttl) |
1000 | 0 | d->ttl = d->rr_ttl[j]; |
1001 | 0 | j++; |
1002 | 0 | } |
1003 | 0 | } |
1004 | | |
1005 | | /* put it in and deallocate the old rrset */ |
1006 | 0 | rrset->data = d; |
1007 | 0 | free(old); |
1008 | | |
1009 | | /* now make rrsig set smaller */ |
1010 | 0 | if(sigold->count+sigold->rrsig_count == sigs) { |
1011 | | /* remove all sigs from rrsig, remove it entirely */ |
1012 | 0 | domain_remove_rrset(node, LDNS_RR_TYPE_RRSIG); |
1013 | 0 | return 1; |
1014 | 0 | } |
1015 | 0 | log_assert(packed_rrset_sizeof(sigold) > sigs*(sizeof(size_t) + |
1016 | 0 | sizeof(uint8_t*) + sizeof(time_t)) + sigsz); |
1017 | 0 | sigd = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(sigold) |
1018 | 0 | - sigs*(sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)) |
1019 | 0 | - sigsz); |
1020 | 0 | if(!sigd) { |
1021 | | /* no need to free up d, it has already been placed in the |
1022 | | * node->rrset structure */ |
1023 | 0 | log_err("out of memory"); |
1024 | 0 | return 0; |
1025 | 0 | } |
1026 | | /* copy base values */ |
1027 | 0 | memcpy(sigd, sigold, sizeof(struct packed_rrset_data)); |
1028 | | /* in sigd the RRSIGs are stored in the base of the RR, in count */ |
1029 | 0 | sigd->count -= sigs; |
1030 | | /* setup rr_len */ |
1031 | 0 | sigd->rr_len = (size_t*)((uint8_t*)sigd + |
1032 | 0 | sizeof(struct packed_rrset_data)); |
1033 | 0 | j = 0; |
1034 | 0 | for(i=0; i<sigold->count+sigold->rrsig_count; i++) { |
1035 | 0 | if(rrsig_rdata_get_type_covered(sigold->rr_data[i], |
1036 | 0 | sigold->rr_len[i]) != rr_type) { |
1037 | 0 | sigd->rr_len[j] = sigold->rr_len[i]; |
1038 | 0 | j++; |
1039 | 0 | } |
1040 | 0 | } |
1041 | 0 | packed_rrset_ptr_fixup(sigd); |
1042 | | |
1043 | | /* copy old values into new rrsig array */ |
1044 | 0 | j = 0; |
1045 | 0 | for(i=0; i<sigold->count+sigold->rrsig_count; i++) { |
1046 | 0 | if(rrsig_rdata_get_type_covered(sigold->rr_data[i], |
1047 | 0 | sigold->rr_len[i]) != rr_type) { |
1048 | | /* move this one over to location j */ |
1049 | 0 | sigd->rr_ttl[j] = sigold->rr_ttl[i]; |
1050 | 0 | memmove(sigd->rr_data[j], sigold->rr_data[i], |
1051 | 0 | sigold->rr_len[i]); |
1052 | 0 | if(j==0) sigd->ttl = sigd->rr_ttl[j]; |
1053 | 0 | else { |
1054 | 0 | if(sigd->rr_ttl[j] < sigd->ttl) |
1055 | 0 | sigd->ttl = sigd->rr_ttl[j]; |
1056 | 0 | } |
1057 | 0 | j++; |
1058 | 0 | } |
1059 | 0 | } |
1060 | | |
1061 | | /* put it in and deallocate the old rrset */ |
1062 | 0 | rrsig->data = sigd; |
1063 | 0 | free(sigold); |
1064 | |
|
1065 | 0 | return 1; |
1066 | 0 | } |
1067 | | |
1068 | | /** copy the rrsigs from the rrset to the rrsig rrset, because the rrset |
1069 | | * is going to be deleted. reallocates the RRSIG rrset data. */ |
1070 | | static int |
1071 | | rrsigs_copy_from_rrset_to_rrsigset(struct auth_rrset* rrset, |
1072 | | struct auth_rrset* rrsigset) |
1073 | 0 | { |
1074 | 0 | size_t i; |
1075 | 0 | if(rrset->data->rrsig_count == 0) |
1076 | 0 | return 1; |
1077 | | |
1078 | | /* move them over one by one, because there might be duplicates, |
1079 | | * duplicates are ignored */ |
1080 | 0 | for(i=rrset->data->count; |
1081 | 0 | i<rrset->data->count+rrset->data->rrsig_count; i++) { |
1082 | 0 | uint8_t* rdata = rrset->data->rr_data[i]; |
1083 | 0 | size_t rdatalen = rrset->data->rr_len[i]; |
1084 | 0 | time_t rr_ttl = rrset->data->rr_ttl[i]; |
1085 | |
|
1086 | 0 | if(rdata_duplicate(rrsigset->data, rdata, rdatalen)) { |
1087 | 0 | continue; |
1088 | 0 | } |
1089 | 0 | if(!rrset_add_rr(rrsigset, rr_ttl, rdata, rdatalen, 0)) |
1090 | 0 | return 0; |
1091 | 0 | } |
1092 | 0 | return 1; |
1093 | 0 | } |
1094 | | |
1095 | | /** Add rr to node, ignores duplicate RRs, |
1096 | | * rdata points to buffer with rdatalen octets, starts with 2bytelength. */ |
1097 | | static int |
1098 | | az_domain_add_rr(struct auth_data* node, uint16_t rr_type, uint32_t rr_ttl, |
1099 | | uint8_t* rdata, size_t rdatalen, int* duplicate) |
1100 | 0 | { |
1101 | 0 | struct auth_rrset* rrset; |
1102 | | /* packed rrsets have their rrsigs along with them, sort them out */ |
1103 | 0 | if(rr_type == LDNS_RR_TYPE_RRSIG) { |
1104 | 0 | uint16_t ctype = rrsig_rdata_get_type_covered(rdata, rdatalen); |
1105 | 0 | if((rrset=az_domain_rrset(node, ctype))!= NULL) { |
1106 | | /* a node of the correct type exists, add the RRSIG |
1107 | | * to the rrset of the covered data type */ |
1108 | 0 | if(rdata_duplicate(rrset->data, rdata, rdatalen)) { |
1109 | 0 | if(duplicate) *duplicate = 1; |
1110 | 0 | return 1; |
1111 | 0 | } |
1112 | 0 | if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 1)) |
1113 | 0 | return 0; |
1114 | 0 | } else if((rrset=az_domain_rrset(node, rr_type))!= NULL) { |
1115 | | /* add RRSIG to rrset of type RRSIG */ |
1116 | 0 | if(rdata_duplicate(rrset->data, rdata, rdatalen)) { |
1117 | 0 | if(duplicate) *duplicate = 1; |
1118 | 0 | return 1; |
1119 | 0 | } |
1120 | 0 | if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 0)) |
1121 | 0 | return 0; |
1122 | 0 | } else { |
1123 | | /* create rrset of type RRSIG */ |
1124 | 0 | if(!rrset_create(node, rr_type, rr_ttl, rdata, |
1125 | 0 | rdatalen)) |
1126 | 0 | return 0; |
1127 | 0 | } |
1128 | 0 | } else { |
1129 | | /* normal RR type */ |
1130 | 0 | if((rrset=az_domain_rrset(node, rr_type))!= NULL) { |
1131 | | /* add data to existing node with data type */ |
1132 | 0 | if(rdata_duplicate(rrset->data, rdata, rdatalen)) { |
1133 | 0 | if(duplicate) *duplicate = 1; |
1134 | 0 | return 1; |
1135 | 0 | } |
1136 | 0 | if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 0)) |
1137 | 0 | return 0; |
1138 | 0 | } else { |
1139 | 0 | struct auth_rrset* rrsig; |
1140 | | /* create new node with data type */ |
1141 | 0 | if(!(rrset=rrset_create(node, rr_type, rr_ttl, rdata, |
1142 | 0 | rdatalen))) |
1143 | 0 | return 0; |
1144 | | |
1145 | | /* see if node of type RRSIG has signatures that |
1146 | | * cover the data type, and move them over */ |
1147 | | /* and then make the RRSIG type smaller */ |
1148 | 0 | if((rrsig=az_domain_rrset(node, LDNS_RR_TYPE_RRSIG)) |
1149 | 0 | != NULL) { |
1150 | 0 | if(!rrset_moveover_rrsigs(node, rr_type, |
1151 | 0 | rrset, rrsig)) |
1152 | 0 | return 0; |
1153 | 0 | } |
1154 | 0 | } |
1155 | 0 | } |
1156 | 0 | return 1; |
1157 | 0 | } |
1158 | | |
1159 | | /** insert RR into zone, ignore duplicates */ |
1160 | | static int |
1161 | | az_insert_rr(struct auth_zone* z, uint8_t* rr, size_t rr_len, |
1162 | | size_t dname_len, int* duplicate) |
1163 | 0 | { |
1164 | 0 | struct auth_data* node; |
1165 | 0 | uint8_t* dname = rr; |
1166 | 0 | uint16_t rr_type = sldns_wirerr_get_type(rr, rr_len, dname_len); |
1167 | 0 | uint16_t rr_class = sldns_wirerr_get_class(rr, rr_len, dname_len); |
1168 | 0 | uint32_t rr_ttl = sldns_wirerr_get_ttl(rr, rr_len, dname_len); |
1169 | 0 | size_t rdatalen = ((size_t)sldns_wirerr_get_rdatalen(rr, rr_len, |
1170 | 0 | dname_len))+2; |
1171 | | /* rdata points to rdata prefixed with uint16 rdatalength */ |
1172 | 0 | uint8_t* rdata = sldns_wirerr_get_rdatawl(rr, rr_len, dname_len); |
1173 | |
|
1174 | 0 | if(rr_class != z->dclass) { |
1175 | 0 | log_err("wrong class for RR"); |
1176 | 0 | return 0; |
1177 | 0 | } |
1178 | 0 | if(!(node=az_domain_find_or_create(z, dname, dname_len))) { |
1179 | 0 | log_err("cannot create domain"); |
1180 | 0 | return 0; |
1181 | 0 | } |
1182 | 0 | if(!az_domain_add_rr(node, rr_type, rr_ttl, rdata, rdatalen, |
1183 | 0 | duplicate)) { |
1184 | 0 | log_err("cannot add RR to domain"); |
1185 | 0 | return 0; |
1186 | 0 | } |
1187 | 0 | if(z->rpz) { |
1188 | 0 | if(!(rpz_insert_rr(z->rpz, z->name, z->namelen, dname, |
1189 | 0 | dname_len, rr_type, rr_class, rr_ttl, rdata, rdatalen, |
1190 | 0 | rr, rr_len))) |
1191 | 0 | return 0; |
1192 | 0 | } |
1193 | 0 | return 1; |
1194 | 0 | } |
1195 | | |
1196 | | /** Remove rr from node, ignores nonexisting RRs, |
1197 | | * rdata points to buffer with rdatalen octets, starts with 2bytelength. */ |
1198 | | static int |
1199 | | az_domain_remove_rr(struct auth_data* node, uint16_t rr_type, |
1200 | | uint8_t* rdata, size_t rdatalen, int* nonexist) |
1201 | 0 | { |
1202 | 0 | struct auth_rrset* rrset; |
1203 | 0 | size_t index = 0; |
1204 | | |
1205 | | /* find the plain RR of the given type */ |
1206 | 0 | if((rrset=az_domain_rrset(node, rr_type))!= NULL) { |
1207 | 0 | if(packed_rrset_find_rr(rrset->data, rdata, rdatalen, &index)) { |
1208 | 0 | if(rrset->data->count == 1 && |
1209 | 0 | rrset->data->rrsig_count == 0) { |
1210 | | /* last RR, delete the rrset */ |
1211 | 0 | domain_remove_rrset(node, rr_type); |
1212 | 0 | } else if(rrset->data->count == 1 && |
1213 | 0 | rrset->data->rrsig_count != 0) { |
1214 | | /* move RRSIGs to the RRSIG rrset, or |
1215 | | * this one becomes that RRset */ |
1216 | 0 | struct auth_rrset* rrsigset = az_domain_rrset( |
1217 | 0 | node, LDNS_RR_TYPE_RRSIG); |
1218 | 0 | if(rrsigset) { |
1219 | | /* move left over rrsigs to the |
1220 | | * existing rrset of type RRSIG */ |
1221 | 0 | rrsigs_copy_from_rrset_to_rrsigset( |
1222 | 0 | rrset, rrsigset); |
1223 | | /* and then delete the rrset */ |
1224 | 0 | domain_remove_rrset(node, rr_type); |
1225 | 0 | } else { |
1226 | | /* no rrset of type RRSIG, this |
1227 | | * set is now of that type, |
1228 | | * just remove the rr */ |
1229 | 0 | if(!rrset_remove_rr(rrset, index)) |
1230 | 0 | return 0; |
1231 | 0 | rrset->type = LDNS_RR_TYPE_RRSIG; |
1232 | 0 | rrset->data->count = rrset->data->rrsig_count; |
1233 | 0 | rrset->data->rrsig_count = 0; |
1234 | 0 | } |
1235 | 0 | } else { |
1236 | | /* remove the RR from the rrset */ |
1237 | 0 | if(!rrset_remove_rr(rrset, index)) |
1238 | 0 | return 0; |
1239 | 0 | } |
1240 | 0 | return 1; |
1241 | 0 | } |
1242 | | /* rr not found in rrset */ |
1243 | 0 | } |
1244 | | |
1245 | | /* is it a type RRSIG, look under the covered type */ |
1246 | 0 | if(rr_type == LDNS_RR_TYPE_RRSIG) { |
1247 | 0 | uint16_t ctype = rrsig_rdata_get_type_covered(rdata, rdatalen); |
1248 | 0 | if((rrset=az_domain_rrset(node, ctype))!= NULL) { |
1249 | 0 | if(az_rrset_find_rrsig(rrset->data, rdata, rdatalen, |
1250 | 0 | &index)) { |
1251 | | /* rrsig should have d->count > 0, be |
1252 | | * over some rr of that type */ |
1253 | | /* remove the rrsig from the rrsigs list of the |
1254 | | * rrset */ |
1255 | 0 | if(!rrset_remove_rr(rrset, index)) |
1256 | 0 | return 0; |
1257 | 0 | return 1; |
1258 | 0 | } |
1259 | 0 | } |
1260 | | /* also RRSIG not found */ |
1261 | 0 | } |
1262 | | |
1263 | | /* nothing found to delete */ |
1264 | 0 | if(nonexist) *nonexist = 1; |
1265 | 0 | return 1; |
1266 | 0 | } |
1267 | | |
1268 | | /** remove RR from zone, ignore if it does not exist, false on alloc failure*/ |
1269 | | static int |
1270 | | az_remove_rr(struct auth_zone* z, uint8_t* rr, size_t rr_len, |
1271 | | size_t dname_len, int* nonexist) |
1272 | 0 | { |
1273 | 0 | struct auth_data* node; |
1274 | 0 | uint8_t* dname = rr; |
1275 | 0 | uint16_t rr_type = sldns_wirerr_get_type(rr, rr_len, dname_len); |
1276 | 0 | uint16_t rr_class = sldns_wirerr_get_class(rr, rr_len, dname_len); |
1277 | 0 | size_t rdatalen = ((size_t)sldns_wirerr_get_rdatalen(rr, rr_len, |
1278 | 0 | dname_len))+2; |
1279 | | /* rdata points to rdata prefixed with uint16 rdatalength */ |
1280 | 0 | uint8_t* rdata = sldns_wirerr_get_rdatawl(rr, rr_len, dname_len); |
1281 | |
|
1282 | 0 | if(rr_class != z->dclass) { |
1283 | 0 | log_err("wrong class for RR"); |
1284 | | /* really also a nonexisting entry, because no records |
1285 | | * of that class in the zone, but return an error because |
1286 | | * getting records of the wrong class is a failure of the |
1287 | | * zone transfer */ |
1288 | 0 | return 0; |
1289 | 0 | } |
1290 | 0 | node = az_find_name(z, dname, dname_len); |
1291 | 0 | if(!node) { |
1292 | | /* node with that name does not exist */ |
1293 | | /* nonexisting entry, because no such name */ |
1294 | 0 | *nonexist = 1; |
1295 | 0 | return 1; |
1296 | 0 | } |
1297 | 0 | if(!az_domain_remove_rr(node, rr_type, rdata, rdatalen, nonexist)) { |
1298 | | /* alloc failure or so */ |
1299 | 0 | return 0; |
1300 | 0 | } |
1301 | | /* remove the node, if necessary */ |
1302 | | /* an rrsets==NULL entry is not kept around for empty nonterminals, |
1303 | | * and also parent nodes are not kept around, so we just delete it */ |
1304 | 0 | if(node->rrsets == NULL) { |
1305 | 0 | (void)rbtree_delete(&z->data, node); |
1306 | 0 | auth_data_delete(node); |
1307 | 0 | } |
1308 | 0 | if(z->rpz) { |
1309 | 0 | rpz_remove_rr(z->rpz, z->namelen, dname, dname_len, rr_type, |
1310 | 0 | rr_class, rdata, rdatalen); |
1311 | 0 | } |
1312 | 0 | return 1; |
1313 | 0 | } |
1314 | | |
1315 | | /** decompress an RR into the buffer where it'll be an uncompressed RR |
1316 | | * with uncompressed dname and uncompressed rdata (dnames) */ |
1317 | | static int |
1318 | | decompress_rr_into_buffer(struct sldns_buffer* buf, uint8_t* pkt, |
1319 | | size_t pktlen, uint8_t* dname, uint16_t rr_type, uint16_t rr_class, |
1320 | | uint32_t rr_ttl, uint8_t* rr_data, uint16_t rr_rdlen) |
1321 | 0 | { |
1322 | 0 | sldns_buffer pktbuf; |
1323 | 0 | size_t dname_len = 0; |
1324 | 0 | size_t rdlenpos; |
1325 | 0 | size_t rdlen; |
1326 | 0 | uint8_t* rd; |
1327 | 0 | const sldns_rr_descriptor* desc; |
1328 | 0 | sldns_buffer_init_frm_data(&pktbuf, pkt, pktlen); |
1329 | 0 | sldns_buffer_clear(buf); |
1330 | | |
1331 | | /* decompress dname */ |
1332 | 0 | sldns_buffer_set_position(&pktbuf, |
1333 | 0 | (size_t)(dname - sldns_buffer_current(&pktbuf))); |
1334 | 0 | dname_len = pkt_dname_len(&pktbuf); |
1335 | 0 | if(dname_len == 0) return 0; /* parse fail on dname */ |
1336 | 0 | if(!sldns_buffer_available(buf, dname_len)) return 0; |
1337 | 0 | dname_pkt_copy(&pktbuf, sldns_buffer_current(buf), dname); |
1338 | 0 | sldns_buffer_skip(buf, (ssize_t)dname_len); |
1339 | | |
1340 | | /* type, class, ttl and rdatalength fields */ |
1341 | 0 | if(!sldns_buffer_available(buf, 10)) return 0; |
1342 | 0 | sldns_buffer_write_u16(buf, rr_type); |
1343 | 0 | sldns_buffer_write_u16(buf, rr_class); |
1344 | 0 | sldns_buffer_write_u32(buf, rr_ttl); |
1345 | 0 | rdlenpos = sldns_buffer_position(buf); |
1346 | 0 | sldns_buffer_write_u16(buf, 0); /* rd length position */ |
1347 | | |
1348 | | /* decompress rdata */ |
1349 | 0 | desc = sldns_rr_descript(rr_type); |
1350 | 0 | rd = rr_data; |
1351 | 0 | rdlen = rr_rdlen; |
1352 | 0 | if(rdlen > 0 && desc && desc->_dname_count > 0) { |
1353 | 0 | int count = (int)desc->_dname_count; |
1354 | 0 | int rdf = 0; |
1355 | 0 | size_t len; /* how much rdata to plain copy */ |
1356 | 0 | size_t uncompressed_len, compressed_len; |
1357 | 0 | size_t oldpos; |
1358 | | /* decompress dnames. */ |
1359 | 0 | while(rdlen > 0 && count) { |
1360 | 0 | switch(desc->_wireformat[rdf]) { |
1361 | 0 | case LDNS_RDF_TYPE_DNAME: |
1362 | 0 | sldns_buffer_set_position(&pktbuf, |
1363 | 0 | (size_t)(rd - |
1364 | 0 | sldns_buffer_begin(&pktbuf))); |
1365 | 0 | oldpos = sldns_buffer_position(&pktbuf); |
1366 | | /* moves pktbuf to right after the |
1367 | | * compressed dname, and returns uncompressed |
1368 | | * dname length */ |
1369 | 0 | uncompressed_len = pkt_dname_len(&pktbuf); |
1370 | 0 | if(!uncompressed_len) |
1371 | 0 | return 0; /* parse error in dname */ |
1372 | 0 | if(!sldns_buffer_available(buf, |
1373 | 0 | uncompressed_len)) |
1374 | | /* dname too long for buffer */ |
1375 | 0 | return 0; |
1376 | 0 | dname_pkt_copy(&pktbuf, |
1377 | 0 | sldns_buffer_current(buf), rd); |
1378 | 0 | sldns_buffer_skip(buf, (ssize_t)uncompressed_len); |
1379 | 0 | compressed_len = sldns_buffer_position( |
1380 | 0 | &pktbuf) - oldpos; |
1381 | 0 | rd += compressed_len; |
1382 | 0 | rdlen -= compressed_len; |
1383 | 0 | count--; |
1384 | 0 | len = 0; |
1385 | 0 | break; |
1386 | 0 | case LDNS_RDF_TYPE_STR: |
1387 | 0 | len = rd[0] + 1; |
1388 | 0 | break; |
1389 | 0 | default: |
1390 | 0 | len = get_rdf_size(desc->_wireformat[rdf]); |
1391 | 0 | break; |
1392 | 0 | } |
1393 | 0 | if(len) { |
1394 | 0 | if(!sldns_buffer_available(buf, len)) |
1395 | 0 | return 0; /* too long for buffer */ |
1396 | 0 | sldns_buffer_write(buf, rd, len); |
1397 | 0 | rd += len; |
1398 | 0 | rdlen -= len; |
1399 | 0 | } |
1400 | 0 | rdf++; |
1401 | 0 | } |
1402 | 0 | } |
1403 | | /* copy remaining data */ |
1404 | 0 | if(rdlen > 0) { |
1405 | 0 | if(!sldns_buffer_available(buf, rdlen)) return 0; |
1406 | 0 | sldns_buffer_write(buf, rd, rdlen); |
1407 | 0 | } |
1408 | | /* fixup rdlength */ |
1409 | 0 | sldns_buffer_write_u16_at(buf, rdlenpos, |
1410 | 0 | sldns_buffer_position(buf)-rdlenpos-2); |
1411 | 0 | sldns_buffer_flip(buf); |
1412 | 0 | return 1; |
1413 | 0 | } |
1414 | | |
1415 | | /** insert RR into zone, from packet, decompress RR, |
1416 | | * if duplicate is nonNULL set the flag but otherwise ignore duplicates */ |
1417 | | static int |
1418 | | az_insert_rr_decompress(struct auth_zone* z, uint8_t* pkt, size_t pktlen, |
1419 | | struct sldns_buffer* scratch_buffer, uint8_t* dname, uint16_t rr_type, |
1420 | | uint16_t rr_class, uint32_t rr_ttl, uint8_t* rr_data, |
1421 | | uint16_t rr_rdlen, int* duplicate) |
1422 | 0 | { |
1423 | 0 | uint8_t* rr; |
1424 | 0 | size_t rr_len; |
1425 | 0 | size_t dname_len; |
1426 | 0 | if(!decompress_rr_into_buffer(scratch_buffer, pkt, pktlen, dname, |
1427 | 0 | rr_type, rr_class, rr_ttl, rr_data, rr_rdlen)) { |
1428 | 0 | log_err("could not decompress RR"); |
1429 | 0 | return 0; |
1430 | 0 | } |
1431 | 0 | rr = sldns_buffer_begin(scratch_buffer); |
1432 | 0 | rr_len = sldns_buffer_limit(scratch_buffer); |
1433 | 0 | dname_len = dname_valid(rr, rr_len); |
1434 | 0 | return az_insert_rr(z, rr, rr_len, dname_len, duplicate); |
1435 | 0 | } |
1436 | | |
1437 | | /** remove RR from zone, from packet, decompress RR, |
1438 | | * if nonexist is nonNULL set the flag but otherwise ignore nonexisting entries*/ |
1439 | | static int |
1440 | | az_remove_rr_decompress(struct auth_zone* z, uint8_t* pkt, size_t pktlen, |
1441 | | struct sldns_buffer* scratch_buffer, uint8_t* dname, uint16_t rr_type, |
1442 | | uint16_t rr_class, uint32_t rr_ttl, uint8_t* rr_data, |
1443 | | uint16_t rr_rdlen, int* nonexist) |
1444 | 0 | { |
1445 | 0 | uint8_t* rr; |
1446 | 0 | size_t rr_len; |
1447 | 0 | size_t dname_len; |
1448 | 0 | if(!decompress_rr_into_buffer(scratch_buffer, pkt, pktlen, dname, |
1449 | 0 | rr_type, rr_class, rr_ttl, rr_data, rr_rdlen)) { |
1450 | 0 | log_err("could not decompress RR"); |
1451 | 0 | return 0; |
1452 | 0 | } |
1453 | 0 | rr = sldns_buffer_begin(scratch_buffer); |
1454 | 0 | rr_len = sldns_buffer_limit(scratch_buffer); |
1455 | 0 | dname_len = dname_valid(rr, rr_len); |
1456 | 0 | return az_remove_rr(z, rr, rr_len, dname_len, nonexist); |
1457 | 0 | } |
1458 | | |
1459 | | /** |
1460 | | * Parse zonefile |
1461 | | * @param z: zone to read in. |
1462 | | * @param in: file to read from (just opened). |
1463 | | * @param rr: buffer to use for RRs, 64k. |
1464 | | * passed so that recursive includes can use the same buffer and do |
1465 | | * not grow the stack too much. |
1466 | | * @param rrbuflen: sizeof rr buffer. |
1467 | | * @param state: parse state with $ORIGIN, $TTL and 'prev-dname' and so on, |
1468 | | * that is kept between includes. |
1469 | | * The lineno is set at 1 and then increased by the function. |
1470 | | * @param fname: file name. |
1471 | | * @param depth: recursion depth for includes |
1472 | | * @param cfg: config for chroot. |
1473 | | * returns false on failure, has printed an error message |
1474 | | */ |
1475 | | static int |
1476 | | az_parse_file(struct auth_zone* z, FILE* in, uint8_t* rr, size_t rrbuflen, |
1477 | | struct sldns_file_parse_state* state, char* fname, int depth, |
1478 | | struct config_file* cfg) |
1479 | 0 | { |
1480 | 0 | size_t rr_len, dname_len; |
1481 | 0 | int status; |
1482 | 0 | state->lineno = 1; |
1483 | |
|
1484 | 0 | while(!feof(in)) { |
1485 | 0 | rr_len = rrbuflen; |
1486 | 0 | dname_len = 0; |
1487 | 0 | status = sldns_fp2wire_rr_buf(in, rr, &rr_len, &dname_len, |
1488 | 0 | state); |
1489 | 0 | if(status == LDNS_WIREPARSE_ERR_INCLUDE && rr_len == 0) { |
1490 | | /* we have $INCLUDE or $something */ |
1491 | 0 | if(strncmp((char*)rr, "$INCLUDE ", 9) == 0 || |
1492 | 0 | strncmp((char*)rr, "$INCLUDE\t", 9) == 0) { |
1493 | 0 | FILE* inc; |
1494 | 0 | int lineno_orig = state->lineno; |
1495 | 0 | char* incfile = (char*)rr + 8; |
1496 | 0 | if(depth > MAX_INCLUDE_DEPTH) { |
1497 | 0 | log_err("%s:%d max include depth" |
1498 | 0 | "exceeded", fname, state->lineno); |
1499 | 0 | return 0; |
1500 | 0 | } |
1501 | | /* skip spaces */ |
1502 | 0 | while(*incfile == ' ' || *incfile == '\t') |
1503 | 0 | incfile++; |
1504 | | /* adjust for chroot on include file */ |
1505 | 0 | if(cfg->chrootdir && cfg->chrootdir[0] && |
1506 | 0 | strncmp(incfile, cfg->chrootdir, |
1507 | 0 | strlen(cfg->chrootdir)) == 0) |
1508 | 0 | incfile += strlen(cfg->chrootdir); |
1509 | 0 | incfile = strdup(incfile); |
1510 | 0 | if(!incfile) { |
1511 | 0 | log_err("malloc failure"); |
1512 | 0 | return 0; |
1513 | 0 | } |
1514 | 0 | verbose(VERB_ALGO, "opening $INCLUDE %s", |
1515 | 0 | incfile); |
1516 | 0 | inc = fopen(incfile, "r"); |
1517 | 0 | if(!inc) { |
1518 | 0 | log_err("%s:%d cannot open include " |
1519 | 0 | "file %s: %s", fname, |
1520 | 0 | lineno_orig, incfile, |
1521 | 0 | strerror(errno)); |
1522 | 0 | free(incfile); |
1523 | 0 | return 0; |
1524 | 0 | } |
1525 | | /* recurse read that file now */ |
1526 | 0 | if(!az_parse_file(z, inc, rr, rrbuflen, |
1527 | 0 | state, incfile, depth+1, cfg)) { |
1528 | 0 | log_err("%s:%d cannot parse include " |
1529 | 0 | "file %s", fname, |
1530 | 0 | lineno_orig, incfile); |
1531 | 0 | fclose(inc); |
1532 | 0 | free(incfile); |
1533 | 0 | return 0; |
1534 | 0 | } |
1535 | 0 | fclose(inc); |
1536 | 0 | verbose(VERB_ALGO, "done with $INCLUDE %s", |
1537 | 0 | incfile); |
1538 | 0 | free(incfile); |
1539 | 0 | state->lineno = lineno_orig; |
1540 | 0 | } |
1541 | 0 | continue; |
1542 | 0 | } |
1543 | 0 | if(status != 0) { |
1544 | 0 | log_err("parse error %s %d:%d: %s", fname, |
1545 | 0 | state->lineno, LDNS_WIREPARSE_OFFSET(status), |
1546 | 0 | sldns_get_errorstr_parse(status)); |
1547 | 0 | return 0; |
1548 | 0 | } |
1549 | 0 | if(rr_len == 0) { |
1550 | | /* EMPTY line, TTL or ORIGIN */ |
1551 | 0 | continue; |
1552 | 0 | } |
1553 | | /* insert wirerr in rrbuf */ |
1554 | 0 | if(!az_insert_rr(z, rr, rr_len, dname_len, NULL)) { |
1555 | 0 | char buf[17]; |
1556 | 0 | sldns_wire2str_type_buf(sldns_wirerr_get_type(rr, |
1557 | 0 | rr_len, dname_len), buf, sizeof(buf)); |
1558 | 0 | log_err("%s:%d cannot insert RR of type %s", |
1559 | 0 | fname, state->lineno, buf); |
1560 | 0 | return 0; |
1561 | 0 | } |
1562 | 0 | } |
1563 | 0 | return 1; |
1564 | 0 | } |
1565 | | |
1566 | | int |
1567 | | auth_zone_read_zonefile(struct auth_zone* z, struct config_file* cfg) |
1568 | 0 | { |
1569 | 0 | uint8_t rr[LDNS_RR_BUF_SIZE]; |
1570 | 0 | struct sldns_file_parse_state state; |
1571 | 0 | char* zfilename; |
1572 | 0 | FILE* in; |
1573 | 0 | if(!z || !z->zonefile || z->zonefile[0]==0) |
1574 | 0 | return 1; /* no file, or "", nothing to read */ |
1575 | | |
1576 | 0 | zfilename = z->zonefile; |
1577 | 0 | if(cfg->chrootdir && cfg->chrootdir[0] && strncmp(zfilename, |
1578 | 0 | cfg->chrootdir, strlen(cfg->chrootdir)) == 0) |
1579 | 0 | zfilename += strlen(cfg->chrootdir); |
1580 | 0 | if(verbosity >= VERB_ALGO) { |
1581 | 0 | char nm[255+1]; |
1582 | 0 | dname_str(z->name, nm); |
1583 | 0 | verbose(VERB_ALGO, "read zonefile %s for %s", zfilename, nm); |
1584 | 0 | } |
1585 | 0 | in = fopen(zfilename, "r"); |
1586 | 0 | if(!in) { |
1587 | 0 | char* n = sldns_wire2str_dname(z->name, z->namelen); |
1588 | 0 | if(z->zone_is_slave && errno == ENOENT) { |
1589 | | /* we fetch the zone contents later, no file yet */ |
1590 | 0 | verbose(VERB_ALGO, "no zonefile %s for %s", |
1591 | 0 | zfilename, n?n:"error"); |
1592 | 0 | free(n); |
1593 | 0 | return 1; |
1594 | 0 | } |
1595 | 0 | log_err("cannot open zonefile %s for %s: %s", |
1596 | 0 | zfilename, n?n:"error", strerror(errno)); |
1597 | 0 | free(n); |
1598 | 0 | return 0; |
1599 | 0 | } |
1600 | | |
1601 | | /* clear the data tree */ |
1602 | 0 | traverse_postorder(&z->data, auth_data_del, NULL); |
1603 | 0 | rbtree_init(&z->data, &auth_data_cmp); |
1604 | | /* clear the RPZ policies */ |
1605 | 0 | if(z->rpz) |
1606 | 0 | rpz_clear(z->rpz); |
1607 | |
|
1608 | 0 | memset(&state, 0, sizeof(state)); |
1609 | | /* default TTL to 3600 */ |
1610 | 0 | state.default_ttl = 3600; |
1611 | | /* set $ORIGIN to the zone name */ |
1612 | 0 | if(z->namelen <= sizeof(state.origin)) { |
1613 | 0 | memcpy(state.origin, z->name, z->namelen); |
1614 | 0 | state.origin_len = z->namelen; |
1615 | 0 | } |
1616 | | /* parse the (toplevel) file */ |
1617 | 0 | if(!az_parse_file(z, in, rr, sizeof(rr), &state, zfilename, 0, cfg)) { |
1618 | 0 | char* n = sldns_wire2str_dname(z->name, z->namelen); |
1619 | 0 | log_err("error parsing zonefile %s for %s", |
1620 | 0 | zfilename, n?n:"error"); |
1621 | 0 | free(n); |
1622 | 0 | fclose(in); |
1623 | 0 | return 0; |
1624 | 0 | } |
1625 | 0 | fclose(in); |
1626 | |
|
1627 | 0 | if(z->rpz) |
1628 | 0 | rpz_finish_config(z->rpz); |
1629 | 0 | return 1; |
1630 | 0 | } |
1631 | | |
1632 | | /** write buffer to file and check return codes */ |
1633 | | static int |
1634 | | write_out(FILE* out, const char* str, size_t len) |
1635 | 0 | { |
1636 | 0 | size_t r; |
1637 | 0 | if(len == 0) |
1638 | 0 | return 1; |
1639 | 0 | r = fwrite(str, 1, len, out); |
1640 | 0 | if(r == 0) { |
1641 | 0 | log_err("write failed: %s", strerror(errno)); |
1642 | 0 | return 0; |
1643 | 0 | } else if(r < len) { |
1644 | 0 | log_err("write failed: too short (disk full?)"); |
1645 | 0 | return 0; |
1646 | 0 | } |
1647 | 0 | return 1; |
1648 | 0 | } |
1649 | | |
1650 | | /** convert auth rr to string */ |
1651 | | static int |
1652 | | auth_rr_to_string(uint8_t* nm, size_t nmlen, uint16_t tp, uint16_t cl, |
1653 | | struct packed_rrset_data* data, size_t i, char* s, size_t buflen) |
1654 | 0 | { |
1655 | 0 | int w = 0; |
1656 | 0 | size_t slen = buflen, datlen; |
1657 | 0 | uint8_t* dat; |
1658 | 0 | if(i >= data->count) tp = LDNS_RR_TYPE_RRSIG; |
1659 | 0 | dat = nm; |
1660 | 0 | datlen = nmlen; |
1661 | 0 | w += sldns_wire2str_dname_scan(&dat, &datlen, &s, &slen, NULL, 0, NULL); |
1662 | 0 | w += sldns_str_print(&s, &slen, "\t"); |
1663 | 0 | w += sldns_str_print(&s, &slen, "%lu\t", (unsigned long)data->rr_ttl[i]); |
1664 | 0 | w += sldns_wire2str_class_print(&s, &slen, cl); |
1665 | 0 | w += sldns_str_print(&s, &slen, "\t"); |
1666 | 0 | w += sldns_wire2str_type_print(&s, &slen, tp); |
1667 | 0 | w += sldns_str_print(&s, &slen, "\t"); |
1668 | 0 | datlen = data->rr_len[i]-2; |
1669 | 0 | dat = data->rr_data[i]+2; |
1670 | 0 | w += sldns_wire2str_rdata_scan(&dat, &datlen, &s, &slen, tp, NULL, 0, NULL); |
1671 | |
|
1672 | 0 | if(tp == LDNS_RR_TYPE_DNSKEY) { |
1673 | 0 | w += sldns_str_print(&s, &slen, " ;{id = %u}", |
1674 | 0 | sldns_calc_keytag_raw(data->rr_data[i]+2, |
1675 | 0 | data->rr_len[i]-2)); |
1676 | 0 | } |
1677 | 0 | w += sldns_str_print(&s, &slen, "\n"); |
1678 | |
|
1679 | 0 | if(w >= (int)buflen) { |
1680 | 0 | log_nametypeclass(NO_VERBOSE, "RR too long to print", nm, tp, cl); |
1681 | 0 | return 0; |
1682 | 0 | } |
1683 | 0 | return 1; |
1684 | 0 | } |
1685 | | |
1686 | | /** write rrset to file */ |
1687 | | static int |
1688 | | auth_zone_write_rrset(struct auth_zone* z, struct auth_data* node, |
1689 | | struct auth_rrset* r, FILE* out) |
1690 | 0 | { |
1691 | 0 | size_t i, count = r->data->count + r->data->rrsig_count; |
1692 | 0 | char buf[LDNS_RR_BUF_SIZE]; |
1693 | 0 | for(i=0; i<count; i++) { |
1694 | 0 | if(!auth_rr_to_string(node->name, node->namelen, r->type, |
1695 | 0 | z->dclass, r->data, i, buf, sizeof(buf))) { |
1696 | 0 | verbose(VERB_ALGO, "failed to rr2str rr %d", (int)i); |
1697 | 0 | continue; |
1698 | 0 | } |
1699 | 0 | if(!write_out(out, buf, strlen(buf))) |
1700 | 0 | return 0; |
1701 | 0 | } |
1702 | 0 | return 1; |
1703 | 0 | } |
1704 | | |
1705 | | /** write domain to file */ |
1706 | | static int |
1707 | | auth_zone_write_domain(struct auth_zone* z, struct auth_data* n, FILE* out) |
1708 | 0 | { |
1709 | 0 | struct auth_rrset* r; |
1710 | | /* if this is zone apex, write SOA first */ |
1711 | 0 | if(z->namelen == n->namelen) { |
1712 | 0 | struct auth_rrset* soa = az_domain_rrset(n, LDNS_RR_TYPE_SOA); |
1713 | 0 | if(soa) { |
1714 | 0 | if(!auth_zone_write_rrset(z, n, soa, out)) |
1715 | 0 | return 0; |
1716 | 0 | } |
1717 | 0 | } |
1718 | | /* write all the RRsets for this domain */ |
1719 | 0 | for(r = n->rrsets; r; r = r->next) { |
1720 | 0 | if(z->namelen == n->namelen && |
1721 | 0 | r->type == LDNS_RR_TYPE_SOA) |
1722 | 0 | continue; /* skip SOA here */ |
1723 | 0 | if(!auth_zone_write_rrset(z, n, r, out)) |
1724 | 0 | return 0; |
1725 | 0 | } |
1726 | 0 | return 1; |
1727 | 0 | } |
1728 | | |
1729 | | int auth_zone_write_file(struct auth_zone* z, const char* fname) |
1730 | 0 | { |
1731 | 0 | FILE* out; |
1732 | 0 | struct auth_data* n; |
1733 | 0 | out = fopen(fname, "w"); |
1734 | 0 | if(!out) { |
1735 | 0 | log_err("could not open %s: %s", fname, strerror(errno)); |
1736 | 0 | return 0; |
1737 | 0 | } |
1738 | 0 | RBTREE_FOR(n, struct auth_data*, &z->data) { |
1739 | 0 | if(!auth_zone_write_domain(z, n, out)) { |
1740 | 0 | log_err("could not write domain to %s", fname); |
1741 | 0 | fclose(out); |
1742 | 0 | return 0; |
1743 | 0 | } |
1744 | 0 | } |
1745 | 0 | fclose(out); |
1746 | 0 | return 1; |
1747 | 0 | } |
1748 | | |
1749 | | /** offline verify for zonemd, while reading a zone file to immediately |
1750 | | * spot bad hashes in zonefile as they are read. |
1751 | | * Creates temp buffers, but uses anchors and validation environment |
1752 | | * from the module_env. */ |
1753 | | static void |
1754 | | zonemd_offline_verify(struct auth_zone* z, struct module_env* env_for_val, |
1755 | | struct module_stack* mods) |
1756 | 0 | { |
1757 | 0 | struct module_env env; |
1758 | 0 | time_t now = 0; |
1759 | 0 | if(!z->zonemd_check) |
1760 | 0 | return; |
1761 | 0 | env = *env_for_val; |
1762 | 0 | env.scratch_buffer = sldns_buffer_new(env.cfg->msg_buffer_size); |
1763 | 0 | if(!env.scratch_buffer) { |
1764 | 0 | log_err("out of memory"); |
1765 | 0 | goto clean_exit; |
1766 | 0 | } |
1767 | 0 | env.scratch = regional_create(); |
1768 | 0 | if(!env.now) { |
1769 | 0 | env.now = &now; |
1770 | 0 | now = time(NULL); |
1771 | 0 | } |
1772 | 0 | if(!env.scratch) { |
1773 | 0 | log_err("out of memory"); |
1774 | 0 | goto clean_exit; |
1775 | 0 | } |
1776 | 0 | auth_zone_verify_zonemd(z, &env, mods, NULL, 1, 0); |
1777 | |
|
1778 | 0 | clean_exit: |
1779 | | /* clean up and exit */ |
1780 | 0 | sldns_buffer_free(env.scratch_buffer); |
1781 | 0 | regional_destroy(env.scratch); |
1782 | 0 | } |
1783 | | |
1784 | | /** read all auth zones from file (if they have) */ |
1785 | | static int |
1786 | | auth_zones_read_zones(struct auth_zones* az, struct config_file* cfg, |
1787 | | struct module_env* env, struct module_stack* mods) |
1788 | 0 | { |
1789 | 0 | struct auth_zone* z; |
1790 | 0 | lock_rw_wrlock(&az->lock); |
1791 | 0 | RBTREE_FOR(z, struct auth_zone*, &az->ztree) { |
1792 | 0 | lock_rw_wrlock(&z->lock); |
1793 | 0 | if(!auth_zone_read_zonefile(z, cfg)) { |
1794 | 0 | lock_rw_unlock(&z->lock); |
1795 | 0 | lock_rw_unlock(&az->lock); |
1796 | 0 | return 0; |
1797 | 0 | } |
1798 | 0 | if(z->zonefile && z->zonefile[0]!=0 && env) |
1799 | 0 | zonemd_offline_verify(z, env, mods); |
1800 | 0 | lock_rw_unlock(&z->lock); |
1801 | 0 | } |
1802 | 0 | lock_rw_unlock(&az->lock); |
1803 | 0 | return 1; |
1804 | 0 | } |
1805 | | |
1806 | | /** fetch the content of a ZONEMD RR from the rdata */ |
1807 | | static int zonemd_fetch_parameters(struct auth_rrset* zonemd_rrset, size_t i, |
1808 | | uint32_t* serial, int* scheme, int* hashalgo, uint8_t** hash, |
1809 | | size_t* hashlen) |
1810 | 0 | { |
1811 | 0 | size_t rr_len; |
1812 | 0 | uint8_t* rdata; |
1813 | 0 | if(i >= zonemd_rrset->data->count) |
1814 | 0 | return 0; |
1815 | 0 | rr_len = zonemd_rrset->data->rr_len[i]; |
1816 | 0 | if(rr_len < 2+4+1+1) |
1817 | 0 | return 0; /* too short, for rdlen+serial+scheme+algo */ |
1818 | 0 | rdata = zonemd_rrset->data->rr_data[i]; |
1819 | 0 | *serial = sldns_read_uint32(rdata+2); |
1820 | 0 | *scheme = rdata[6]; |
1821 | 0 | *hashalgo = rdata[7]; |
1822 | 0 | *hashlen = rr_len - 8; |
1823 | 0 | if(*hashlen == 0) |
1824 | 0 | *hash = NULL; |
1825 | 0 | else *hash = rdata+8; |
1826 | 0 | return 1; |
1827 | 0 | } |
1828 | | |
1829 | | /** |
1830 | | * See if the ZONEMD scheme, hash occurs more than once. |
1831 | | * @param zonemd_rrset: the zonemd rrset to check with the RRs in it. |
1832 | | * @param index: index of the original, this is allowed to have that |
1833 | | * scheme and hashalgo, but other RRs should not have it. |
1834 | | * @param scheme: the scheme to check for. |
1835 | | * @param hashalgo: the hash algorithm to check for. |
1836 | | * @return true if it occurs more than once. |
1837 | | */ |
1838 | | static int zonemd_is_duplicate_scheme_hash(struct auth_rrset* zonemd_rrset, |
1839 | | size_t index, int scheme, int hashalgo) |
1840 | 0 | { |
1841 | 0 | size_t j; |
1842 | 0 | for(j=0; j<zonemd_rrset->data->count; j++) { |
1843 | 0 | uint32_t serial2 = 0; |
1844 | 0 | int scheme2 = 0, hashalgo2 = 0; |
1845 | 0 | uint8_t* hash2 = NULL; |
1846 | 0 | size_t hashlen2 = 0; |
1847 | 0 | if(index == j) { |
1848 | | /* this is the original */ |
1849 | 0 | continue; |
1850 | 0 | } |
1851 | 0 | if(!zonemd_fetch_parameters(zonemd_rrset, j, &serial2, |
1852 | 0 | &scheme2, &hashalgo2, &hash2, &hashlen2)) { |
1853 | | /* malformed, skip it */ |
1854 | 0 | continue; |
1855 | 0 | } |
1856 | 0 | if(scheme == scheme2 && hashalgo == hashalgo2) { |
1857 | | /* duplicate scheme, hash */ |
1858 | 0 | verbose(VERB_ALGO, "zonemd duplicate for scheme %d " |
1859 | 0 | "and hash %d", scheme, hashalgo); |
1860 | 0 | return 1; |
1861 | 0 | } |
1862 | 0 | } |
1863 | 0 | return 0; |
1864 | 0 | } |
1865 | | |
1866 | | /** |
1867 | | * Check ZONEMDs if present for the auth zone. Depending on config |
1868 | | * it can warn or fail on that. Checks the hash of the ZONEMD. |
1869 | | * @param z: auth zone to check for. |
1870 | | * caller must hold lock on zone. |
1871 | | * @param env: module env for temp buffers. |
1872 | | * @param reason: returned on failure. |
1873 | | * @return false on failure, true if hash checks out. |
1874 | | */ |
1875 | | static int auth_zone_zonemd_check_hash(struct auth_zone* z, |
1876 | | struct module_env* env, char** reason) |
1877 | 0 | { |
1878 | | /* loop over ZONEMDs and see which one is valid. if not print |
1879 | | * failure (depending on config) */ |
1880 | 0 | struct auth_data* apex; |
1881 | 0 | struct auth_rrset* zonemd_rrset; |
1882 | 0 | size_t i; |
1883 | 0 | struct regional* region = NULL; |
1884 | 0 | struct sldns_buffer* buf = NULL; |
1885 | 0 | uint32_t soa_serial = 0; |
1886 | 0 | char* unsupported_reason = NULL; |
1887 | 0 | int only_unsupported = 1; |
1888 | 0 | region = env->scratch; |
1889 | 0 | regional_free_all(region); |
1890 | 0 | buf = env->scratch_buffer; |
1891 | 0 | if(!auth_zone_get_serial(z, &soa_serial)) { |
1892 | 0 | *reason = "zone has no SOA serial"; |
1893 | 0 | return 0; |
1894 | 0 | } |
1895 | | |
1896 | 0 | apex = az_find_name(z, z->name, z->namelen); |
1897 | 0 | if(!apex) { |
1898 | 0 | *reason = "zone has no apex"; |
1899 | 0 | return 0; |
1900 | 0 | } |
1901 | 0 | zonemd_rrset = az_domain_rrset(apex, LDNS_RR_TYPE_ZONEMD); |
1902 | 0 | if(!zonemd_rrset || zonemd_rrset->data->count==0) { |
1903 | 0 | *reason = "zone has no ZONEMD"; |
1904 | 0 | return 0; /* no RRset or no RRs in rrset */ |
1905 | 0 | } |
1906 | | |
1907 | | /* we have a ZONEMD, check if it is correct */ |
1908 | 0 | for(i=0; i<zonemd_rrset->data->count; i++) { |
1909 | 0 | uint32_t serial = 0; |
1910 | 0 | int scheme = 0, hashalgo = 0; |
1911 | 0 | uint8_t* hash = NULL; |
1912 | 0 | size_t hashlen = 0; |
1913 | 0 | if(!zonemd_fetch_parameters(zonemd_rrset, i, &serial, &scheme, |
1914 | 0 | &hashalgo, &hash, &hashlen)) { |
1915 | | /* malformed RR */ |
1916 | 0 | *reason = "ZONEMD rdata malformed"; |
1917 | 0 | only_unsupported = 0; |
1918 | 0 | continue; |
1919 | 0 | } |
1920 | | /* check for duplicates */ |
1921 | 0 | if(zonemd_is_duplicate_scheme_hash(zonemd_rrset, i, scheme, |
1922 | 0 | hashalgo)) { |
1923 | | /* duplicate hash of the same scheme,hash |
1924 | | * is not allowed. */ |
1925 | 0 | *reason = "ZONEMD RRSet contains more than one RR " |
1926 | 0 | "with the same scheme and hash algorithm"; |
1927 | 0 | only_unsupported = 0; |
1928 | 0 | continue; |
1929 | 0 | } |
1930 | 0 | regional_free_all(region); |
1931 | 0 | if(serial != soa_serial) { |
1932 | 0 | *reason = "ZONEMD serial is wrong"; |
1933 | 0 | only_unsupported = 0; |
1934 | 0 | continue; |
1935 | 0 | } |
1936 | 0 | *reason = NULL; |
1937 | 0 | if(auth_zone_generate_zonemd_check(z, scheme, hashalgo, |
1938 | 0 | hash, hashlen, region, buf, reason)) { |
1939 | | /* success */ |
1940 | 0 | if(*reason) { |
1941 | 0 | if(!unsupported_reason) |
1942 | 0 | unsupported_reason = *reason; |
1943 | | /* continue to check for valid ZONEMD */ |
1944 | 0 | if(verbosity >= VERB_ALGO) { |
1945 | 0 | char zstr[255+1]; |
1946 | 0 | dname_str(z->name, zstr); |
1947 | 0 | verbose(VERB_ALGO, "auth-zone %s ZONEMD %d %d is unsupported: %s", zstr, (int)scheme, (int)hashalgo, *reason); |
1948 | 0 | } |
1949 | 0 | *reason = NULL; |
1950 | 0 | continue; |
1951 | 0 | } |
1952 | 0 | if(verbosity >= VERB_ALGO) { |
1953 | 0 | char zstr[255+1]; |
1954 | 0 | dname_str(z->name, zstr); |
1955 | 0 | if(!*reason) |
1956 | 0 | verbose(VERB_ALGO, "auth-zone %s ZONEMD hash is correct", zstr); |
1957 | 0 | } |
1958 | 0 | return 1; |
1959 | 0 | } |
1960 | 0 | only_unsupported = 0; |
1961 | | /* try next one */ |
1962 | 0 | } |
1963 | | /* have we seen no failures but only unsupported algo, |
1964 | | * and one unsupported algorithm, or more. */ |
1965 | 0 | if(only_unsupported && unsupported_reason) { |
1966 | | /* only unsupported algorithms, with valid serial, not |
1967 | | * malformed. Did not see supported algorithms, failed or |
1968 | | * successful ones. */ |
1969 | 0 | *reason = unsupported_reason; |
1970 | 0 | return 1; |
1971 | 0 | } |
1972 | | /* fail, we may have reason */ |
1973 | 0 | if(!*reason) |
1974 | 0 | *reason = "no ZONEMD records found"; |
1975 | 0 | if(verbosity >= VERB_ALGO) { |
1976 | 0 | char zstr[255+1]; |
1977 | 0 | dname_str(z->name, zstr); |
1978 | 0 | verbose(VERB_ALGO, "auth-zone %s ZONEMD failed: %s", zstr, *reason); |
1979 | 0 | } |
1980 | 0 | return 0; |
1981 | 0 | } |
1982 | | |
1983 | | /** find the apex SOA RRset, if it exists */ |
1984 | | struct auth_rrset* auth_zone_get_soa_rrset(struct auth_zone* z) |
1985 | 0 | { |
1986 | 0 | struct auth_data* apex; |
1987 | 0 | struct auth_rrset* soa; |
1988 | 0 | apex = az_find_name(z, z->name, z->namelen); |
1989 | 0 | if(!apex) return NULL; |
1990 | 0 | soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); |
1991 | 0 | return soa; |
1992 | 0 | } |
1993 | | |
1994 | | /** find serial number of zone or false if none */ |
1995 | | int |
1996 | | auth_zone_get_serial(struct auth_zone* z, uint32_t* serial) |
1997 | 0 | { |
1998 | 0 | struct auth_data* apex; |
1999 | 0 | struct auth_rrset* soa; |
2000 | 0 | struct packed_rrset_data* d; |
2001 | 0 | apex = az_find_name(z, z->name, z->namelen); |
2002 | 0 | if(!apex) return 0; |
2003 | 0 | soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); |
2004 | 0 | if(!soa || soa->data->count==0) |
2005 | 0 | return 0; /* no RRset or no RRs in rrset */ |
2006 | 0 | if(soa->data->rr_len[0] < 2+4*5) return 0; /* SOA too short */ |
2007 | 0 | d = soa->data; |
2008 | 0 | *serial = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-20)); |
2009 | 0 | return 1; |
2010 | 0 | } |
2011 | | |
2012 | | /** Find auth_zone SOA and populate the values in xfr(soa values). */ |
2013 | | int |
2014 | | xfr_find_soa(struct auth_zone* z, struct auth_xfer* xfr) |
2015 | 0 | { |
2016 | 0 | struct auth_data* apex; |
2017 | 0 | struct auth_rrset* soa; |
2018 | 0 | struct packed_rrset_data* d; |
2019 | 0 | apex = az_find_name(z, z->name, z->namelen); |
2020 | 0 | if(!apex) return 0; |
2021 | 0 | soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); |
2022 | 0 | if(!soa || soa->data->count==0) |
2023 | 0 | return 0; /* no RRset or no RRs in rrset */ |
2024 | 0 | if(soa->data->rr_len[0] < 2+4*5) return 0; /* SOA too short */ |
2025 | | /* SOA record ends with serial, refresh, retry, expiry, minimum, |
2026 | | * as 4 byte fields */ |
2027 | 0 | d = soa->data; |
2028 | 0 | xfr->have_zone = 1; |
2029 | 0 | xfr->serial = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-20)); |
2030 | 0 | xfr->refresh = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-16)); |
2031 | 0 | xfr->retry = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-12)); |
2032 | 0 | xfr->expiry = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-8)); |
2033 | | /* soa minimum at d->rr_len[0]-4 */ |
2034 | 0 | return 1; |
2035 | 0 | } |
2036 | | |
2037 | | /** |
2038 | | * Setup auth_xfer zone |
2039 | | * This populates the have_zone, soa values, and so on times. |
2040 | | * Doesn't do network traffic yet, can set option flags. |
2041 | | * @param z: locked by caller, and modified for setup |
2042 | | * @param x: locked by caller, and modified. |
2043 | | * @return false on failure. |
2044 | | */ |
2045 | | static int |
2046 | | auth_xfer_setup(struct auth_zone* z, struct auth_xfer* x) |
2047 | 0 | { |
2048 | | /* for a zone without zone transfers, x==NULL, so skip them, |
2049 | | * i.e. the zone config is fixed with no masters or urls */ |
2050 | 0 | if(!z || !x) return 1; |
2051 | 0 | if(!xfr_find_soa(z, x)) { |
2052 | 0 | return 1; |
2053 | 0 | } |
2054 | | /* nothing for probe, nextprobe and transfer tasks */ |
2055 | 0 | return 1; |
2056 | 0 | } |
2057 | | |
2058 | | /** |
2059 | | * Setup all zones |
2060 | | * @param az: auth zones structure |
2061 | | * @return false on failure. |
2062 | | */ |
2063 | | static int |
2064 | | auth_zones_setup_zones(struct auth_zones* az) |
2065 | 0 | { |
2066 | 0 | struct auth_zone* z; |
2067 | 0 | struct auth_xfer* x; |
2068 | 0 | lock_rw_wrlock(&az->lock); |
2069 | 0 | RBTREE_FOR(z, struct auth_zone*, &az->ztree) { |
2070 | 0 | lock_rw_wrlock(&z->lock); |
2071 | 0 | x = auth_xfer_find(az, z->name, z->namelen, z->dclass); |
2072 | 0 | if(x) { |
2073 | 0 | lock_basic_lock(&x->lock); |
2074 | 0 | } |
2075 | 0 | if(!auth_xfer_setup(z, x)) { |
2076 | 0 | if(x) { |
2077 | 0 | lock_basic_unlock(&x->lock); |
2078 | 0 | } |
2079 | 0 | lock_rw_unlock(&z->lock); |
2080 | 0 | lock_rw_unlock(&az->lock); |
2081 | 0 | return 0; |
2082 | 0 | } |
2083 | 0 | if(x) { |
2084 | 0 | lock_basic_unlock(&x->lock); |
2085 | 0 | } |
2086 | 0 | lock_rw_unlock(&z->lock); |
2087 | 0 | } |
2088 | 0 | lock_rw_unlock(&az->lock); |
2089 | 0 | return 1; |
2090 | 0 | } |
2091 | | |
2092 | | /** set config items and create zones */ |
2093 | | static int |
2094 | | auth_zones_cfg(struct auth_zones* az, struct config_auth* c) |
2095 | 0 | { |
2096 | 0 | struct auth_zone* z; |
2097 | 0 | struct auth_xfer* x = NULL; |
2098 | | |
2099 | | /* create zone */ |
2100 | 0 | if(c->isrpz) { |
2101 | | /* if the rpz lock is needed, grab it before the other |
2102 | | * locks to avoid a lock dependency cycle */ |
2103 | 0 | lock_rw_wrlock(&az->rpz_lock); |
2104 | 0 | } |
2105 | 0 | lock_rw_wrlock(&az->lock); |
2106 | 0 | if(!(z=auth_zones_find_or_add_zone(az, c->name))) { |
2107 | 0 | lock_rw_unlock(&az->lock); |
2108 | 0 | if(c->isrpz) { |
2109 | 0 | lock_rw_unlock(&az->rpz_lock); |
2110 | 0 | } |
2111 | 0 | return 0; |
2112 | 0 | } |
2113 | 0 | if(c->masters || c->urls) { |
2114 | 0 | if(!(x=auth_zones_find_or_add_xfer(az, z))) { |
2115 | 0 | lock_rw_unlock(&az->lock); |
2116 | 0 | lock_rw_unlock(&z->lock); |
2117 | 0 | if(c->isrpz) { |
2118 | 0 | lock_rw_unlock(&az->rpz_lock); |
2119 | 0 | } |
2120 | 0 | return 0; |
2121 | 0 | } |
2122 | 0 | } |
2123 | 0 | if(c->for_downstream) |
2124 | 0 | az->have_downstream = 1; |
2125 | 0 | lock_rw_unlock(&az->lock); |
2126 | | |
2127 | | /* set options */ |
2128 | 0 | z->zone_deleted = 0; |
2129 | 0 | if(!auth_zone_set_zonefile(z, c->zonefile)) { |
2130 | 0 | if(x) { |
2131 | 0 | lock_basic_unlock(&x->lock); |
2132 | 0 | } |
2133 | 0 | lock_rw_unlock(&z->lock); |
2134 | 0 | if(c->isrpz) { |
2135 | 0 | lock_rw_unlock(&az->rpz_lock); |
2136 | 0 | } |
2137 | 0 | return 0; |
2138 | 0 | } |
2139 | 0 | z->for_downstream = c->for_downstream; |
2140 | 0 | z->for_upstream = c->for_upstream; |
2141 | 0 | z->fallback_enabled = c->fallback_enabled; |
2142 | 0 | z->zonemd_check = c->zonemd_check; |
2143 | 0 | z->zonemd_reject_absence = c->zonemd_reject_absence; |
2144 | 0 | if(c->isrpz && !z->rpz){ |
2145 | 0 | if(!(z->rpz = rpz_create(c))){ |
2146 | 0 | fatal_exit("Could not setup RPZ zones"); |
2147 | 0 | return 0; |
2148 | 0 | } |
2149 | 0 | lock_protect(&z->lock, &z->rpz->local_zones, sizeof(*z->rpz)); |
2150 | | /* the az->rpz_lock is locked above */ |
2151 | 0 | z->rpz_az_next = az->rpz_first; |
2152 | 0 | if(az->rpz_first) |
2153 | 0 | az->rpz_first->rpz_az_prev = z; |
2154 | 0 | az->rpz_first = z; |
2155 | 0 | } |
2156 | 0 | if(c->isrpz) { |
2157 | 0 | lock_rw_unlock(&az->rpz_lock); |
2158 | 0 | } |
2159 | | |
2160 | | /* xfer zone */ |
2161 | 0 | if(x) { |
2162 | 0 | z->zone_is_slave = 1; |
2163 | | /* set options on xfer zone */ |
2164 | 0 | if(!xfer_set_masters(&x->task_probe->masters, c, 0)) { |
2165 | 0 | lock_basic_unlock(&x->lock); |
2166 | 0 | lock_rw_unlock(&z->lock); |
2167 | 0 | return 0; |
2168 | 0 | } |
2169 | 0 | if(!xfer_set_masters(&x->task_transfer->masters, c, 1)) { |
2170 | 0 | lock_basic_unlock(&x->lock); |
2171 | 0 | lock_rw_unlock(&z->lock); |
2172 | 0 | return 0; |
2173 | 0 | } |
2174 | 0 | lock_basic_unlock(&x->lock); |
2175 | 0 | } |
2176 | | |
2177 | 0 | lock_rw_unlock(&z->lock); |
2178 | 0 | return 1; |
2179 | 0 | } |
2180 | | |
2181 | | /** set all auth zones deleted, then in auth_zones_cfg, it marks them |
2182 | | * as nondeleted (if they are still in the config), and then later |
2183 | | * we can find deleted zones */ |
2184 | | static void |
2185 | | az_setall_deleted(struct auth_zones* az) |
2186 | 0 | { |
2187 | 0 | struct auth_zone* z; |
2188 | 0 | lock_rw_wrlock(&az->lock); |
2189 | 0 | RBTREE_FOR(z, struct auth_zone*, &az->ztree) { |
2190 | 0 | lock_rw_wrlock(&z->lock); |
2191 | 0 | z->zone_deleted = 1; |
2192 | 0 | lock_rw_unlock(&z->lock); |
2193 | 0 | } |
2194 | 0 | lock_rw_unlock(&az->lock); |
2195 | 0 | } |
2196 | | |
2197 | | /** find zones that are marked deleted and delete them. |
2198 | | * This is called from apply_cfg, and there are no threads and no |
2199 | | * workers, so the xfr can just be deleted. */ |
2200 | | static void |
2201 | | az_delete_deleted_zones(struct auth_zones* az) |
2202 | 0 | { |
2203 | 0 | struct auth_zone* z; |
2204 | 0 | struct auth_zone* delete_list = NULL, *next; |
2205 | 0 | struct auth_xfer* xfr; |
2206 | 0 | lock_rw_wrlock(&az->lock); |
2207 | 0 | RBTREE_FOR(z, struct auth_zone*, &az->ztree) { |
2208 | 0 | lock_rw_wrlock(&z->lock); |
2209 | 0 | if(z->zone_deleted) { |
2210 | | /* we cannot alter the rbtree right now, but |
2211 | | * we can put it on a linked list and then |
2212 | | * delete it */ |
2213 | 0 | z->delete_next = delete_list; |
2214 | 0 | delete_list = z; |
2215 | 0 | } |
2216 | 0 | lock_rw_unlock(&z->lock); |
2217 | 0 | } |
2218 | | /* now we are out of the tree loop and we can loop and delete |
2219 | | * the zones */ |
2220 | 0 | z = delete_list; |
2221 | 0 | while(z) { |
2222 | 0 | next = z->delete_next; |
2223 | 0 | xfr = auth_xfer_find(az, z->name, z->namelen, z->dclass); |
2224 | 0 | if(xfr) { |
2225 | 0 | (void)rbtree_delete(&az->xtree, &xfr->node); |
2226 | 0 | auth_xfer_delete(xfr); |
2227 | 0 | } |
2228 | 0 | (void)rbtree_delete(&az->ztree, &z->node); |
2229 | 0 | auth_zone_delete(z, az); |
2230 | 0 | z = next; |
2231 | 0 | } |
2232 | 0 | lock_rw_unlock(&az->lock); |
2233 | 0 | } |
2234 | | |
2235 | | int auth_zones_apply_cfg(struct auth_zones* az, struct config_file* cfg, |
2236 | | int setup, int* is_rpz, struct module_env* env, |
2237 | | struct module_stack* mods) |
2238 | 0 | { |
2239 | 0 | struct config_auth* p; |
2240 | 0 | az_setall_deleted(az); |
2241 | 0 | for(p = cfg->auths; p; p = p->next) { |
2242 | 0 | if(!p->name || p->name[0] == 0) { |
2243 | 0 | log_warn("auth-zone without a name, skipped"); |
2244 | 0 | continue; |
2245 | 0 | } |
2246 | 0 | *is_rpz = (*is_rpz || p->isrpz); |
2247 | 0 | if(!auth_zones_cfg(az, p)) { |
2248 | 0 | log_err("cannot config auth zone %s", p->name); |
2249 | 0 | return 0; |
2250 | 0 | } |
2251 | 0 | } |
2252 | 0 | az_delete_deleted_zones(az); |
2253 | 0 | if(!auth_zones_read_zones(az, cfg, env, mods)) |
2254 | 0 | return 0; |
2255 | 0 | if(setup) { |
2256 | 0 | if(!auth_zones_setup_zones(az)) |
2257 | 0 | return 0; |
2258 | 0 | } |
2259 | 0 | return 1; |
2260 | 0 | } |
2261 | | |
2262 | | /** delete chunks |
2263 | | * @param at: transfer structure with chunks list. The chunks and their |
2264 | | * data are freed. |
2265 | | */ |
2266 | | static void |
2267 | | auth_chunks_delete(struct auth_transfer* at) |
2268 | 0 | { |
2269 | 0 | if(at->chunks_first) { |
2270 | 0 | struct auth_chunk* c, *cn; |
2271 | 0 | c = at->chunks_first; |
2272 | 0 | while(c) { |
2273 | 0 | cn = c->next; |
2274 | 0 | free(c->data); |
2275 | 0 | free(c); |
2276 | 0 | c = cn; |
2277 | 0 | } |
2278 | 0 | } |
2279 | 0 | at->chunks_first = NULL; |
2280 | 0 | at->chunks_last = NULL; |
2281 | 0 | } |
2282 | | |
2283 | | /** free master addr list */ |
2284 | | static void |
2285 | | auth_free_master_addrs(struct auth_addr* list) |
2286 | 0 | { |
2287 | 0 | struct auth_addr *n; |
2288 | 0 | while(list) { |
2289 | 0 | n = list->next; |
2290 | 0 | free(list); |
2291 | 0 | list = n; |
2292 | 0 | } |
2293 | 0 | } |
2294 | | |
2295 | | /** free the masters list */ |
2296 | | static void |
2297 | | auth_free_masters(struct auth_master* list) |
2298 | 0 | { |
2299 | 0 | struct auth_master* n; |
2300 | 0 | while(list) { |
2301 | 0 | n = list->next; |
2302 | 0 | auth_free_master_addrs(list->list); |
2303 | 0 | free(list->host); |
2304 | 0 | free(list->file); |
2305 | 0 | free(list); |
2306 | 0 | list = n; |
2307 | 0 | } |
2308 | 0 | } |
2309 | | |
2310 | | /** delete auth xfer structure |
2311 | | * @param xfr: delete this xfer and its tasks. |
2312 | | */ |
2313 | | void |
2314 | | auth_xfer_delete(struct auth_xfer* xfr) |
2315 | 0 | { |
2316 | 0 | if(!xfr) return; |
2317 | 0 | lock_basic_destroy(&xfr->lock); |
2318 | 0 | free(xfr->name); |
2319 | 0 | if(xfr->task_nextprobe) { |
2320 | 0 | comm_timer_delete(xfr->task_nextprobe->timer); |
2321 | 0 | free(xfr->task_nextprobe); |
2322 | 0 | } |
2323 | 0 | if(xfr->task_probe) { |
2324 | 0 | auth_free_masters(xfr->task_probe->masters); |
2325 | 0 | comm_point_delete(xfr->task_probe->cp); |
2326 | 0 | comm_timer_delete(xfr->task_probe->timer); |
2327 | 0 | free(xfr->task_probe); |
2328 | 0 | } |
2329 | 0 | if(xfr->task_transfer) { |
2330 | 0 | auth_free_masters(xfr->task_transfer->masters); |
2331 | 0 | comm_point_delete(xfr->task_transfer->cp); |
2332 | 0 | comm_timer_delete(xfr->task_transfer->timer); |
2333 | 0 | if(xfr->task_transfer->chunks_first) { |
2334 | 0 | auth_chunks_delete(xfr->task_transfer); |
2335 | 0 | } |
2336 | 0 | free(xfr->task_transfer); |
2337 | 0 | } |
2338 | 0 | auth_free_masters(xfr->allow_notify_list); |
2339 | 0 | free(xfr); |
2340 | 0 | } |
2341 | | |
2342 | | /** helper traverse to delete zones */ |
2343 | | static void |
2344 | | auth_zone_del(rbnode_type* n, void* ATTR_UNUSED(arg)) |
2345 | 0 | { |
2346 | 0 | struct auth_zone* z = (struct auth_zone*)n->key; |
2347 | 0 | auth_zone_delete(z, NULL); |
2348 | 0 | } |
2349 | | |
2350 | | /** helper traverse to delete xfer zones */ |
2351 | | static void |
2352 | | auth_xfer_del(rbnode_type* n, void* ATTR_UNUSED(arg)) |
2353 | 0 | { |
2354 | 0 | struct auth_xfer* z = (struct auth_xfer*)n->key; |
2355 | 0 | auth_xfer_delete(z); |
2356 | 0 | } |
2357 | | |
2358 | | void auth_zones_delete(struct auth_zones* az) |
2359 | 0 | { |
2360 | 0 | if(!az) return; |
2361 | 0 | lock_rw_destroy(&az->lock); |
2362 | 0 | lock_rw_destroy(&az->rpz_lock); |
2363 | 0 | traverse_postorder(&az->ztree, auth_zone_del, NULL); |
2364 | 0 | traverse_postorder(&az->xtree, auth_xfer_del, NULL); |
2365 | 0 | free(az); |
2366 | 0 | } |
2367 | | |
2368 | | /** true if domain has only nsec3 */ |
2369 | | static int |
2370 | | domain_has_only_nsec3(struct auth_data* n) |
2371 | 0 | { |
2372 | 0 | struct auth_rrset* rrset = n->rrsets; |
2373 | 0 | int nsec3_seen = 0; |
2374 | 0 | while(rrset) { |
2375 | 0 | if(rrset->type == LDNS_RR_TYPE_NSEC3) { |
2376 | 0 | nsec3_seen = 1; |
2377 | 0 | } else if(rrset->type != LDNS_RR_TYPE_RRSIG) { |
2378 | 0 | return 0; |
2379 | 0 | } |
2380 | 0 | rrset = rrset->next; |
2381 | 0 | } |
2382 | 0 | return nsec3_seen; |
2383 | 0 | } |
2384 | | |
2385 | | /** see if the domain has a wildcard child '*.domain' */ |
2386 | | static struct auth_data* |
2387 | | az_find_wildcard_domain(struct auth_zone* z, uint8_t* nm, size_t nmlen) |
2388 | 0 | { |
2389 | 0 | uint8_t wc[LDNS_MAX_DOMAINLEN]; |
2390 | 0 | if(nmlen+2 > sizeof(wc)) |
2391 | 0 | return NULL; /* result would be too long */ |
2392 | 0 | wc[0] = 1; /* length of wildcard label */ |
2393 | 0 | wc[1] = (uint8_t)'*'; /* wildcard label */ |
2394 | 0 | memmove(wc+2, nm, nmlen); |
2395 | 0 | return az_find_name(z, wc, nmlen+2); |
2396 | 0 | } |
2397 | | |
2398 | | /** find wildcard between qname and cename */ |
2399 | | static struct auth_data* |
2400 | | az_find_wildcard(struct auth_zone* z, struct query_info* qinfo, |
2401 | | struct auth_data* ce) |
2402 | 0 | { |
2403 | 0 | uint8_t* nm = qinfo->qname; |
2404 | 0 | size_t nmlen = qinfo->qname_len; |
2405 | 0 | struct auth_data* node; |
2406 | 0 | if(!dname_subdomain_c(nm, z->name)) |
2407 | 0 | return NULL; /* out of zone */ |
2408 | 0 | while((node=az_find_wildcard_domain(z, nm, nmlen))==NULL) { |
2409 | | /* see if we can go up to find the wildcard */ |
2410 | 0 | if(nmlen == z->namelen) |
2411 | 0 | return NULL; /* top of zone reached */ |
2412 | 0 | if(ce && nmlen == ce->namelen) |
2413 | 0 | return NULL; /* ce reached */ |
2414 | 0 | if(dname_is_root(nm)) |
2415 | 0 | return NULL; /* cannot go up */ |
2416 | 0 | dname_remove_label(&nm, &nmlen); |
2417 | 0 | } |
2418 | 0 | return node; |
2419 | 0 | } |
2420 | | |
2421 | | /** domain is not exact, find first candidate ce (name that matches |
2422 | | * a part of qname) in tree */ |
2423 | | static struct auth_data* |
2424 | | az_find_candidate_ce(struct auth_zone* z, struct query_info* qinfo, |
2425 | | struct auth_data* n) |
2426 | 0 | { |
2427 | 0 | uint8_t* nm; |
2428 | 0 | size_t nmlen; |
2429 | 0 | if(n) { |
2430 | 0 | nm = dname_get_shared_topdomain(qinfo->qname, n->name); |
2431 | 0 | } else { |
2432 | 0 | nm = qinfo->qname; |
2433 | 0 | } |
2434 | 0 | dname_count_size_labels(nm, &nmlen); |
2435 | 0 | n = az_find_name(z, nm, nmlen); |
2436 | | /* delete labels and go up on name */ |
2437 | 0 | while(!n) { |
2438 | 0 | if(dname_is_root(nm)) |
2439 | 0 | return NULL; /* cannot go up */ |
2440 | 0 | dname_remove_label(&nm, &nmlen); |
2441 | 0 | n = az_find_name(z, nm, nmlen); |
2442 | 0 | } |
2443 | 0 | return n; |
2444 | 0 | } |
2445 | | |
2446 | | /** go up the auth tree to next existing name. */ |
2447 | | static struct auth_data* |
2448 | | az_domain_go_up(struct auth_zone* z, struct auth_data* n) |
2449 | 0 | { |
2450 | 0 | uint8_t* nm = n->name; |
2451 | 0 | size_t nmlen = n->namelen; |
2452 | 0 | while(!dname_is_root(nm)) { |
2453 | 0 | dname_remove_label(&nm, &nmlen); |
2454 | 0 | if((n=az_find_name(z, nm, nmlen)) != NULL) |
2455 | 0 | return n; |
2456 | 0 | } |
2457 | 0 | return NULL; |
2458 | 0 | } |
2459 | | |
2460 | | /** Find the closest encloser, an name that exists and is above the |
2461 | | * qname. |
2462 | | * return true if the node (param node) is existing, nonobscured and |
2463 | | * can be used to generate answers from. It is then also node_exact. |
2464 | | * returns false if the node is not good enough (or it wasn't node_exact) |
2465 | | * in this case the ce can be filled. |
2466 | | * if ce is NULL, no ce exists, and likely the zone is completely empty, |
2467 | | * not even with a zone apex. |
2468 | | * if ce is nonNULL it is the closest enclosing upper name (that exists |
2469 | | * itself for answer purposes). That name may have DNAME, NS or wildcard |
2470 | | * rrset is the closest DNAME or NS rrset that was found. |
2471 | | */ |
2472 | | static int |
2473 | | az_find_ce(struct auth_zone* z, struct query_info* qinfo, |
2474 | | struct auth_data* node, int node_exact, struct auth_data** ce, |
2475 | | struct auth_rrset** rrset) |
2476 | 0 | { |
2477 | 0 | struct auth_data* n = node; |
2478 | 0 | *ce = NULL; |
2479 | 0 | *rrset = NULL; |
2480 | 0 | if(!node_exact) { |
2481 | | /* if not exact, lookup closest exact match */ |
2482 | 0 | n = az_find_candidate_ce(z, qinfo, n); |
2483 | 0 | } else { |
2484 | | /* if exact, the node itself is the first candidate ce */ |
2485 | 0 | *ce = n; |
2486 | 0 | } |
2487 | | |
2488 | | /* no direct answer from nsec3-only domains */ |
2489 | 0 | if(n && domain_has_only_nsec3(n)) { |
2490 | 0 | node_exact = 0; |
2491 | 0 | *ce = NULL; |
2492 | 0 | } |
2493 | | |
2494 | | /* with exact matches, walk up the labels until we find the |
2495 | | * delegation, or DNAME or zone end */ |
2496 | 0 | while(n) { |
2497 | | /* see if the current candidate has issues */ |
2498 | | /* not zone apex and has type NS */ |
2499 | 0 | if(n->namelen != z->namelen && |
2500 | 0 | (*rrset=az_domain_rrset(n, LDNS_RR_TYPE_NS)) && |
2501 | | /* delegate here, but DS at exact the dp has notype */ |
2502 | 0 | (qinfo->qtype != LDNS_RR_TYPE_DS || |
2503 | 0 | n->namelen != qinfo->qname_len)) { |
2504 | | /* referral */ |
2505 | | /* this is ce and the lowernode is nonexisting */ |
2506 | 0 | *ce = n; |
2507 | 0 | return 0; |
2508 | 0 | } |
2509 | | /* not equal to qname and has type DNAME */ |
2510 | 0 | if(n->namelen != qinfo->qname_len && |
2511 | 0 | (*rrset=az_domain_rrset(n, LDNS_RR_TYPE_DNAME))) { |
2512 | | /* this is ce and the lowernode is nonexisting */ |
2513 | 0 | *ce = n; |
2514 | 0 | return 0; |
2515 | 0 | } |
2516 | | |
2517 | 0 | if(*ce == NULL && !domain_has_only_nsec3(n)) { |
2518 | | /* if not found yet, this exact name must be |
2519 | | * our lowest match (but not nsec3onlydomain) */ |
2520 | 0 | *ce = n; |
2521 | 0 | } |
2522 | | |
2523 | | /* walk up the tree by removing labels from name and lookup */ |
2524 | 0 | n = az_domain_go_up(z, n); |
2525 | 0 | } |
2526 | | /* found no problems, if it was an exact node, it is fine to use */ |
2527 | 0 | return node_exact; |
2528 | 0 | } |
2529 | | |
2530 | | /** add additional A/AAAA from domain names in rrset rdata (+offset) |
2531 | | * offset is number of bytes in rdata where the dname is located. */ |
2532 | | static int |
2533 | | az_add_additionals_from(struct auth_zone* z, struct regional* region, |
2534 | | struct dns_msg* msg, struct auth_rrset* rrset, size_t offset) |
2535 | 0 | { |
2536 | 0 | struct packed_rrset_data* d = rrset->data; |
2537 | 0 | size_t i; |
2538 | 0 | if(!d) return 0; |
2539 | 0 | for(i=0; i<d->count; i++) { |
2540 | 0 | size_t dlen; |
2541 | 0 | struct auth_data* domain; |
2542 | 0 | struct auth_rrset* ref; |
2543 | 0 | if(d->rr_len[i] < 2+offset) |
2544 | 0 | continue; /* too short */ |
2545 | 0 | if(!(dlen = dname_valid(d->rr_data[i]+2+offset, |
2546 | 0 | d->rr_len[i]-2-offset))) |
2547 | 0 | continue; /* malformed */ |
2548 | 0 | domain = az_find_name(z, d->rr_data[i]+2+offset, dlen); |
2549 | 0 | if(!domain) |
2550 | 0 | continue; |
2551 | 0 | if((ref=az_domain_rrset(domain, LDNS_RR_TYPE_A)) != NULL) { |
2552 | 0 | if(!msg_add_rrset_ar(z, region, msg, domain, ref)) |
2553 | 0 | return 0; |
2554 | 0 | } |
2555 | 0 | if((ref=az_domain_rrset(domain, LDNS_RR_TYPE_AAAA)) != NULL) { |
2556 | 0 | if(!msg_add_rrset_ar(z, region, msg, domain, ref)) |
2557 | 0 | return 0; |
2558 | 0 | } |
2559 | 0 | } |
2560 | 0 | return 1; |
2561 | 0 | } |
2562 | | |
2563 | | /** add negative SOA record (with negative TTL) */ |
2564 | | static int |
2565 | | az_add_negative_soa(struct auth_zone* z, struct regional* region, |
2566 | | struct dns_msg* msg) |
2567 | 0 | { |
2568 | 0 | time_t minimum; |
2569 | 0 | size_t i; |
2570 | 0 | struct packed_rrset_data* d; |
2571 | 0 | struct auth_rrset* soa; |
2572 | 0 | struct auth_data* apex = az_find_name(z, z->name, z->namelen); |
2573 | 0 | if(!apex) return 0; |
2574 | 0 | soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); |
2575 | 0 | if(!soa) return 0; |
2576 | | /* must be first to put in message; we want to fix the TTL with |
2577 | | * one RRset here, otherwise we'd need to loop over the RRs to get |
2578 | | * the resulting lower TTL */ |
2579 | 0 | log_assert(msg->rep->rrset_count == 0); |
2580 | 0 | if(!msg_add_rrset_ns(z, region, msg, apex, soa)) return 0; |
2581 | | /* fixup TTL */ |
2582 | 0 | d = (struct packed_rrset_data*)msg->rep->rrsets[msg->rep->rrset_count-1]->entry.data; |
2583 | | /* last 4 bytes are minimum ttl in network format */ |
2584 | 0 | if(d->count == 0) return 0; |
2585 | 0 | if(d->rr_len[0] < 2+4) return 0; |
2586 | 0 | minimum = (time_t)sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-4)); |
2587 | 0 | minimum = d->ttl<minimum?d->ttl:minimum; |
2588 | 0 | d->ttl = minimum; |
2589 | 0 | for(i=0; i < d->count + d->rrsig_count; i++) |
2590 | 0 | d->rr_ttl[i] = minimum; |
2591 | 0 | msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[0]); |
2592 | 0 | msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); |
2593 | 0 | msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; |
2594 | 0 | return 1; |
2595 | 0 | } |
2596 | | |
2597 | | /** See if the query goes to empty nonterminal (that has no auth_data, |
2598 | | * but there are nodes underneath. We already checked that there are |
2599 | | * not NS, or DNAME above, so that we only need to check if some node |
2600 | | * exists below (with nonempty rr list), return true if emptynonterminal */ |
2601 | | static int |
2602 | | az_empty_nonterminal(struct auth_zone* z, struct query_info* qinfo, |
2603 | | struct auth_data* node) |
2604 | 0 | { |
2605 | 0 | struct auth_data* next; |
2606 | 0 | if(!node) { |
2607 | | /* no smaller was found, use first (smallest) node as the |
2608 | | * next one */ |
2609 | 0 | next = (struct auth_data*)rbtree_first(&z->data); |
2610 | 0 | } else { |
2611 | 0 | next = (struct auth_data*)rbtree_next(&node->node); |
2612 | 0 | } |
2613 | 0 | while(next && (rbnode_type*)next != RBTREE_NULL && next->rrsets == NULL) { |
2614 | | /* the next name has empty rrsets, is an empty nonterminal |
2615 | | * itself, see if there exists something below it */ |
2616 | 0 | next = (struct auth_data*)rbtree_next(&node->node); |
2617 | 0 | } |
2618 | 0 | if((rbnode_type*)next == RBTREE_NULL || !next) { |
2619 | | /* there is no next node, so something below it cannot |
2620 | | * exist */ |
2621 | 0 | return 0; |
2622 | 0 | } |
2623 | | /* a next node exists, if there was something below the query, |
2624 | | * this node has to be it. See if it is below the query name */ |
2625 | 0 | if(dname_strict_subdomain_c(next->name, qinfo->qname)) |
2626 | 0 | return 1; |
2627 | 0 | return 0; |
2628 | 0 | } |
2629 | | |
2630 | | /** create synth cname target name in buffer, or fail if too long */ |
2631 | | static size_t |
2632 | | synth_cname_buf(uint8_t* qname, size_t qname_len, size_t dname_len, |
2633 | | uint8_t* dtarg, size_t dtarglen, uint8_t* buf, size_t buflen) |
2634 | 0 | { |
2635 | 0 | size_t newlen = qname_len + dtarglen - dname_len; |
2636 | 0 | if(newlen > buflen) { |
2637 | | /* YXDOMAIN error */ |
2638 | 0 | return 0; |
2639 | 0 | } |
2640 | | /* new name is concatenation of qname front (without DNAME owner) |
2641 | | * and DNAME target name */ |
2642 | 0 | memcpy(buf, qname, qname_len-dname_len); |
2643 | 0 | memmove(buf+(qname_len-dname_len), dtarg, dtarglen); |
2644 | 0 | return newlen; |
2645 | 0 | } |
2646 | | |
2647 | | /** create synthetic CNAME rrset for in a DNAME answer in region, |
2648 | | * false on alloc failure, cname==NULL when name too long. */ |
2649 | | static int |
2650 | | create_synth_cname(uint8_t* qname, size_t qname_len, struct regional* region, |
2651 | | struct auth_data* node, struct auth_rrset* dname, uint16_t dclass, |
2652 | | struct ub_packed_rrset_key** cname) |
2653 | 0 | { |
2654 | 0 | uint8_t buf[LDNS_MAX_DOMAINLEN]; |
2655 | 0 | uint8_t* dtarg; |
2656 | 0 | size_t dtarglen, newlen; |
2657 | 0 | struct packed_rrset_data* d; |
2658 | | |
2659 | | /* get DNAME target name */ |
2660 | 0 | if(dname->data->count < 1) return 0; |
2661 | 0 | if(dname->data->rr_len[0] < 3) return 0; /* at least rdatalen +1 */ |
2662 | 0 | dtarg = dname->data->rr_data[0]+2; |
2663 | 0 | dtarglen = dname->data->rr_len[0]-2; |
2664 | 0 | if(sldns_read_uint16(dname->data->rr_data[0]) != dtarglen) |
2665 | 0 | return 0; /* rdatalen in DNAME rdata is malformed */ |
2666 | 0 | if(dname_valid(dtarg, dtarglen) != dtarglen) |
2667 | 0 | return 0; /* DNAME RR has malformed rdata */ |
2668 | 0 | if(qname_len == 0) |
2669 | 0 | return 0; /* too short */ |
2670 | 0 | if(qname_len <= node->namelen) |
2671 | 0 | return 0; /* qname too short for dname removal */ |
2672 | | |
2673 | | /* synthesize a CNAME */ |
2674 | 0 | newlen = synth_cname_buf(qname, qname_len, node->namelen, |
2675 | 0 | dtarg, dtarglen, buf, sizeof(buf)); |
2676 | 0 | if(newlen == 0) { |
2677 | | /* YXDOMAIN error */ |
2678 | 0 | *cname = NULL; |
2679 | 0 | return 1; |
2680 | 0 | } |
2681 | 0 | *cname = (struct ub_packed_rrset_key*)regional_alloc(region, |
2682 | 0 | sizeof(struct ub_packed_rrset_key)); |
2683 | 0 | if(!*cname) |
2684 | 0 | return 0; /* out of memory */ |
2685 | 0 | memset(&(*cname)->entry, 0, sizeof((*cname)->entry)); |
2686 | 0 | (*cname)->entry.key = (*cname); |
2687 | 0 | (*cname)->rk.type = htons(LDNS_RR_TYPE_CNAME); |
2688 | 0 | (*cname)->rk.rrset_class = htons(dclass); |
2689 | 0 | (*cname)->rk.flags = 0; |
2690 | 0 | (*cname)->rk.dname = regional_alloc_init(region, qname, qname_len); |
2691 | 0 | if(!(*cname)->rk.dname) |
2692 | 0 | return 0; /* out of memory */ |
2693 | 0 | (*cname)->rk.dname_len = qname_len; |
2694 | 0 | (*cname)->entry.hash = rrset_key_hash(&(*cname)->rk); |
2695 | 0 | d = (struct packed_rrset_data*)regional_alloc_zero(region, |
2696 | 0 | sizeof(struct packed_rrset_data) + sizeof(size_t) + |
2697 | 0 | sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) |
2698 | 0 | + newlen); |
2699 | 0 | if(!d) |
2700 | 0 | return 0; /* out of memory */ |
2701 | 0 | (*cname)->entry.data = d; |
2702 | 0 | d->ttl = 0; /* 0 for synthesized CNAME TTL */ |
2703 | 0 | d->count = 1; |
2704 | 0 | d->rrsig_count = 0; |
2705 | 0 | d->trust = rrset_trust_ans_noAA; |
2706 | 0 | d->rr_len = (size_t*)((uint8_t*)d + |
2707 | 0 | sizeof(struct packed_rrset_data)); |
2708 | 0 | d->rr_len[0] = newlen + sizeof(uint16_t); |
2709 | 0 | packed_rrset_ptr_fixup(d); |
2710 | 0 | d->rr_ttl[0] = d->ttl; |
2711 | 0 | sldns_write_uint16(d->rr_data[0], newlen); |
2712 | 0 | memmove(d->rr_data[0] + sizeof(uint16_t), buf, newlen); |
2713 | 0 | return 1; |
2714 | 0 | } |
2715 | | |
2716 | | /** add a synthesized CNAME to the answer section */ |
2717 | | static int |
2718 | | add_synth_cname(struct auth_zone* z, uint8_t* qname, size_t qname_len, |
2719 | | struct regional* region, struct dns_msg* msg, struct auth_data* dname, |
2720 | | struct auth_rrset* rrset) |
2721 | 0 | { |
2722 | 0 | struct ub_packed_rrset_key* cname; |
2723 | | /* synthesize a CNAME */ |
2724 | 0 | if(!create_synth_cname(qname, qname_len, region, dname, rrset, |
2725 | 0 | z->dclass, &cname)) { |
2726 | | /* out of memory */ |
2727 | 0 | return 0; |
2728 | 0 | } |
2729 | 0 | if(!cname) { |
2730 | | /* cname cannot be create because of YXDOMAIN */ |
2731 | 0 | msg->rep->flags |= LDNS_RCODE_YXDOMAIN; |
2732 | 0 | return 1; |
2733 | 0 | } |
2734 | | /* add cname to message */ |
2735 | 0 | if(!msg_grow_array(region, msg)) |
2736 | 0 | return 0; |
2737 | 0 | msg->rep->rrsets[msg->rep->rrset_count] = cname; |
2738 | 0 | msg->rep->rrset_count++; |
2739 | 0 | msg->rep->an_numrrsets++; |
2740 | 0 | msg_ttl(msg); |
2741 | 0 | return 1; |
2742 | 0 | } |
2743 | | |
2744 | | /** Change a dname to a different one, for wildcard namechange */ |
2745 | | static void |
2746 | | az_change_dnames(struct dns_msg* msg, uint8_t* oldname, uint8_t* newname, |
2747 | | size_t newlen, int an_only) |
2748 | 0 | { |
2749 | 0 | size_t i; |
2750 | 0 | size_t start = 0, end = msg->rep->rrset_count; |
2751 | 0 | if(!an_only) start = msg->rep->an_numrrsets; |
2752 | 0 | if(an_only) end = msg->rep->an_numrrsets; |
2753 | 0 | for(i=start; i<end; i++) { |
2754 | | /* allocated in region so we can change the ptrs */ |
2755 | 0 | if(query_dname_compare(msg->rep->rrsets[i]->rk.dname, oldname) |
2756 | 0 | == 0) { |
2757 | 0 | msg->rep->rrsets[i]->rk.dname = newname; |
2758 | 0 | msg->rep->rrsets[i]->rk.dname_len = newlen; |
2759 | 0 | msg->rep->rrsets[i]->entry.hash = rrset_key_hash(&msg->rep->rrsets[i]->rk); |
2760 | 0 | } |
2761 | 0 | } |
2762 | 0 | } |
2763 | | |
2764 | | /** find NSEC record covering the query */ |
2765 | | static struct auth_rrset* |
2766 | | az_find_nsec_cover(struct auth_zone* z, struct auth_data** node) |
2767 | 0 | { |
2768 | 0 | uint8_t* nm = (*node)->name; |
2769 | 0 | size_t nmlen = (*node)->namelen; |
2770 | 0 | struct auth_rrset* rrset; |
2771 | | /* find the NSEC for the smallest-or-equal node */ |
2772 | | /* if node == NULL, we did not find a smaller name. But the zone |
2773 | | * name is the smallest name and should have an NSEC. So there is |
2774 | | * no NSEC to return (for a properly signed zone) */ |
2775 | | /* for empty nonterminals, the auth-data node should not exist, |
2776 | | * and thus we don't need to go rbtree_previous here to find |
2777 | | * a domain with an NSEC record */ |
2778 | | /* but there could be glue, and if this is node, then it has no NSEC. |
2779 | | * Go up to find nonglue (previous) NSEC-holding nodes */ |
2780 | 0 | while((rrset=az_domain_rrset(*node, LDNS_RR_TYPE_NSEC)) == NULL) { |
2781 | 0 | if(dname_is_root(nm)) return NULL; |
2782 | 0 | if(nmlen == z->namelen) return NULL; |
2783 | 0 | dname_remove_label(&nm, &nmlen); |
2784 | | /* adjust *node for the nsec rrset to find in */ |
2785 | 0 | *node = az_find_name(z, nm, nmlen); |
2786 | 0 | } |
2787 | 0 | return rrset; |
2788 | 0 | } |
2789 | | |
2790 | | /** Find NSEC and add for wildcard denial */ |
2791 | | static int |
2792 | | az_nsec_wildcard_denial(struct auth_zone* z, struct regional* region, |
2793 | | struct dns_msg* msg, uint8_t* cenm, size_t cenmlen) |
2794 | 0 | { |
2795 | 0 | struct query_info qinfo; |
2796 | 0 | int node_exact; |
2797 | 0 | struct auth_data* node; |
2798 | 0 | struct auth_rrset* nsec; |
2799 | 0 | uint8_t wc[LDNS_MAX_DOMAINLEN]; |
2800 | 0 | if(cenmlen+2 > sizeof(wc)) |
2801 | 0 | return 0; /* result would be too long */ |
2802 | 0 | wc[0] = 1; /* length of wildcard label */ |
2803 | 0 | wc[1] = (uint8_t)'*'; /* wildcard label */ |
2804 | 0 | memmove(wc+2, cenm, cenmlen); |
2805 | | |
2806 | | /* we have '*.ce' in wc wildcard name buffer */ |
2807 | | /* get nsec cover for that */ |
2808 | 0 | qinfo.qname = wc; |
2809 | 0 | qinfo.qname_len = cenmlen+2; |
2810 | 0 | qinfo.qtype = 0; |
2811 | 0 | qinfo.qclass = 0; |
2812 | 0 | az_find_domain(z, &qinfo, &node_exact, &node); |
2813 | 0 | if((nsec=az_find_nsec_cover(z, &node)) != NULL) { |
2814 | 0 | if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; |
2815 | 0 | } |
2816 | 0 | return 1; |
2817 | 0 | } |
2818 | | |
2819 | | /** Find the NSEC3PARAM rrset (if any) and if true you have the parameters */ |
2820 | | static int |
2821 | | az_nsec3_param(struct auth_zone* z, int* algo, size_t* iter, uint8_t** salt, |
2822 | | size_t* saltlen) |
2823 | 0 | { |
2824 | 0 | struct auth_data* apex; |
2825 | 0 | struct auth_rrset* param; |
2826 | 0 | size_t i; |
2827 | 0 | apex = az_find_name(z, z->name, z->namelen); |
2828 | 0 | if(!apex) return 0; |
2829 | 0 | param = az_domain_rrset(apex, LDNS_RR_TYPE_NSEC3PARAM); |
2830 | 0 | if(!param || param->data->count==0) |
2831 | 0 | return 0; /* no RRset or no RRs in rrset */ |
2832 | | /* find out which NSEC3PARAM RR has supported parameters */ |
2833 | | /* skip unknown flags (dynamic signer is recalculating nsec3 chain) */ |
2834 | 0 | for(i=0; i<param->data->count; i++) { |
2835 | 0 | uint8_t* rdata = param->data->rr_data[i]+2; |
2836 | 0 | size_t rdatalen = param->data->rr_len[i]; |
2837 | 0 | if(rdatalen < 2+5) |
2838 | 0 | continue; /* too short */ |
2839 | 0 | if(!nsec3_hash_algo_size_supported((int)(rdata[0]))) |
2840 | 0 | continue; /* unsupported algo */ |
2841 | 0 | if(rdatalen < (size_t)(2+5+(size_t)rdata[4])) |
2842 | 0 | continue; /* salt missing */ |
2843 | 0 | if((rdata[1]&NSEC3_UNKNOWN_FLAGS)!=0) |
2844 | 0 | continue; /* unknown flags */ |
2845 | 0 | *algo = (int)(rdata[0]); |
2846 | 0 | *iter = sldns_read_uint16(rdata+2); |
2847 | 0 | *saltlen = rdata[4]; |
2848 | 0 | if(*saltlen == 0) |
2849 | 0 | *salt = NULL; |
2850 | 0 | else *salt = rdata+5; |
2851 | 0 | return 1; |
2852 | 0 | } |
2853 | | /* no supported params */ |
2854 | 0 | return 0; |
2855 | 0 | } |
2856 | | |
2857 | | /** Hash a name with nsec3param into buffer, it has zone name appended. |
2858 | | * return length of hash */ |
2859 | | static size_t |
2860 | | az_nsec3_hash(uint8_t* buf, size_t buflen, uint8_t* nm, size_t nmlen, |
2861 | | int algo, size_t iter, uint8_t* salt, size_t saltlen) |
2862 | 0 | { |
2863 | 0 | size_t hlen = nsec3_hash_algo_size_supported(algo); |
2864 | | /* buffer has domain name, nsec3hash, and 256 is for max saltlen |
2865 | | * (salt has 0-255 length) */ |
2866 | 0 | unsigned char p[LDNS_MAX_DOMAINLEN+1+N3HASHBUFLEN+256]; |
2867 | 0 | size_t i; |
2868 | 0 | if(nmlen+saltlen > sizeof(p) || hlen+saltlen > sizeof(p)) |
2869 | 0 | return 0; |
2870 | 0 | if(hlen > buflen) |
2871 | 0 | return 0; /* somehow too large for destination buffer */ |
2872 | | /* hashfunc(name, salt) */ |
2873 | 0 | memmove(p, nm, nmlen); |
2874 | 0 | query_dname_tolower(p); |
2875 | 0 | if(salt && saltlen > 0) |
2876 | 0 | memmove(p+nmlen, salt, saltlen); |
2877 | 0 | (void)secalgo_nsec3_hash(algo, p, nmlen+saltlen, (unsigned char*)buf); |
2878 | 0 | for(i=0; i<iter; i++) { |
2879 | | /* hashfunc(hash, salt) */ |
2880 | 0 | memmove(p, buf, hlen); |
2881 | 0 | if(salt && saltlen > 0) |
2882 | 0 | memmove(p+hlen, salt, saltlen); |
2883 | 0 | (void)secalgo_nsec3_hash(algo, p, hlen+saltlen, |
2884 | 0 | (unsigned char*)buf); |
2885 | 0 | } |
2886 | 0 | return hlen; |
2887 | 0 | } |
2888 | | |
2889 | | /** Hash name and return b32encoded hashname for lookup, zone name appended */ |
2890 | | static int |
2891 | | az_nsec3_hashname(struct auth_zone* z, uint8_t* hashname, size_t* hashnmlen, |
2892 | | uint8_t* nm, size_t nmlen, int algo, size_t iter, uint8_t* salt, |
2893 | | size_t saltlen) |
2894 | 0 | { |
2895 | 0 | uint8_t hash[N3HASHBUFLEN]; |
2896 | 0 | size_t hlen; |
2897 | 0 | int ret; |
2898 | 0 | hlen = az_nsec3_hash(hash, sizeof(hash), nm, nmlen, algo, iter, |
2899 | 0 | salt, saltlen); |
2900 | 0 | if(!hlen) return 0; |
2901 | | /* b32 encode */ |
2902 | 0 | if(*hashnmlen < hlen*2+1+z->namelen) /* approx b32 as hexb16 */ |
2903 | 0 | return 0; |
2904 | 0 | ret = sldns_b32_ntop_extended_hex(hash, hlen, (char*)(hashname+1), |
2905 | 0 | (*hashnmlen)-1); |
2906 | 0 | if(ret<1) |
2907 | 0 | return 0; |
2908 | 0 | hashname[0] = (uint8_t)ret; |
2909 | 0 | ret++; |
2910 | 0 | if((*hashnmlen) - ret < z->namelen) |
2911 | 0 | return 0; |
2912 | 0 | memmove(hashname+ret, z->name, z->namelen); |
2913 | 0 | *hashnmlen = z->namelen+(size_t)ret; |
2914 | 0 | return 1; |
2915 | 0 | } |
2916 | | |
2917 | | /** Find the datanode that covers the nsec3hash-name */ |
2918 | | static struct auth_data* |
2919 | | az_nsec3_findnode(struct auth_zone* z, uint8_t* hashnm, size_t hashnmlen) |
2920 | 0 | { |
2921 | 0 | struct query_info qinfo; |
2922 | 0 | struct auth_data* node; |
2923 | 0 | int node_exact; |
2924 | 0 | qinfo.qclass = 0; |
2925 | 0 | qinfo.qtype = 0; |
2926 | 0 | qinfo.qname = hashnm; |
2927 | 0 | qinfo.qname_len = hashnmlen; |
2928 | | /* because canonical ordering and b32 nsec3 ordering are the same. |
2929 | | * this is a good lookup to find the nsec3 name. */ |
2930 | 0 | az_find_domain(z, &qinfo, &node_exact, &node); |
2931 | | /* but we may have to skip non-nsec3 nodes */ |
2932 | | /* this may be a lot, the way to speed that up is to have a |
2933 | | * separate nsec3 tree with nsec3 nodes */ |
2934 | 0 | while(node && (rbnode_type*)node != RBTREE_NULL && |
2935 | 0 | !az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) { |
2936 | 0 | node = (struct auth_data*)rbtree_previous(&node->node); |
2937 | 0 | } |
2938 | 0 | if((rbnode_type*)node == RBTREE_NULL) |
2939 | 0 | node = NULL; |
2940 | 0 | return node; |
2941 | 0 | } |
2942 | | |
2943 | | /** Find cover for hashed(nm, nmlen) (or NULL) */ |
2944 | | static struct auth_data* |
2945 | | az_nsec3_find_cover(struct auth_zone* z, uint8_t* nm, size_t nmlen, |
2946 | | int algo, size_t iter, uint8_t* salt, size_t saltlen) |
2947 | 0 | { |
2948 | 0 | struct auth_data* node; |
2949 | 0 | uint8_t hname[LDNS_MAX_DOMAINLEN]; |
2950 | 0 | size_t hlen = sizeof(hname); |
2951 | 0 | if(!az_nsec3_hashname(z, hname, &hlen, nm, nmlen, algo, iter, |
2952 | 0 | salt, saltlen)) |
2953 | 0 | return NULL; |
2954 | 0 | node = az_nsec3_findnode(z, hname, hlen); |
2955 | 0 | if(node) |
2956 | 0 | return node; |
2957 | | /* we did not find any, perhaps because the NSEC3 hash is before |
2958 | | * the first hash, we have to find the 'last hash' in the zone */ |
2959 | 0 | node = (struct auth_data*)rbtree_last(&z->data); |
2960 | 0 | while(node && (rbnode_type*)node != RBTREE_NULL && |
2961 | 0 | !az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) { |
2962 | 0 | node = (struct auth_data*)rbtree_previous(&node->node); |
2963 | 0 | } |
2964 | 0 | if((rbnode_type*)node == RBTREE_NULL) |
2965 | 0 | node = NULL; |
2966 | 0 | return node; |
2967 | 0 | } |
2968 | | |
2969 | | /** Find exact match for hashed(nm, nmlen) NSEC3 record or NULL */ |
2970 | | static struct auth_data* |
2971 | | az_nsec3_find_exact(struct auth_zone* z, uint8_t* nm, size_t nmlen, |
2972 | | int algo, size_t iter, uint8_t* salt, size_t saltlen) |
2973 | 0 | { |
2974 | 0 | struct auth_data* node; |
2975 | 0 | uint8_t hname[LDNS_MAX_DOMAINLEN]; |
2976 | 0 | size_t hlen = sizeof(hname); |
2977 | 0 | if(!az_nsec3_hashname(z, hname, &hlen, nm, nmlen, algo, iter, |
2978 | 0 | salt, saltlen)) |
2979 | 0 | return NULL; |
2980 | 0 | node = az_find_name(z, hname, hlen); |
2981 | 0 | if(az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) |
2982 | 0 | return node; |
2983 | 0 | return NULL; |
2984 | 0 | } |
2985 | | |
2986 | | /** Return nextcloser name (as a ref into the qname). This is one label |
2987 | | * more than the cenm (cename must be a suffix of qname) */ |
2988 | | static void |
2989 | | az_nsec3_get_nextcloser(uint8_t* cenm, uint8_t* qname, size_t qname_len, |
2990 | | uint8_t** nx, size_t* nxlen) |
2991 | 0 | { |
2992 | 0 | int celabs = dname_count_labels(cenm); |
2993 | 0 | int qlabs = dname_count_labels(qname); |
2994 | 0 | int strip = qlabs - celabs -1; |
2995 | 0 | log_assert(dname_strict_subdomain(qname, qlabs, cenm, celabs)); |
2996 | 0 | *nx = qname; |
2997 | 0 | *nxlen = qname_len; |
2998 | 0 | if(strip>0) |
2999 | 0 | dname_remove_labels(nx, nxlen, strip); |
3000 | 0 | } |
3001 | | |
3002 | | /** Find the closest encloser that has exact NSEC3. |
3003 | | * updated cenm to the new name. If it went up no-exact-ce is true. */ |
3004 | | static struct auth_data* |
3005 | | az_nsec3_find_ce(struct auth_zone* z, uint8_t** cenm, size_t* cenmlen, |
3006 | | int* no_exact_ce, int algo, size_t iter, uint8_t* salt, size_t saltlen) |
3007 | 0 | { |
3008 | 0 | struct auth_data* node; |
3009 | 0 | while((node = az_nsec3_find_exact(z, *cenm, *cenmlen, |
3010 | 0 | algo, iter, salt, saltlen)) == NULL) { |
3011 | 0 | if(*cenmlen == z->namelen) { |
3012 | | /* next step up would take us out of the zone. fail */ |
3013 | 0 | return NULL; |
3014 | 0 | } |
3015 | 0 | *no_exact_ce = 1; |
3016 | 0 | dname_remove_label(cenm, cenmlen); |
3017 | 0 | } |
3018 | 0 | return node; |
3019 | 0 | } |
3020 | | |
3021 | | /* Insert NSEC3 record in authority section, if NULL does nothing */ |
3022 | | static int |
3023 | | az_nsec3_insert(struct auth_zone* z, struct regional* region, |
3024 | | struct dns_msg* msg, struct auth_data* node) |
3025 | 0 | { |
3026 | 0 | struct auth_rrset* nsec3; |
3027 | 0 | if(!node) return 1; /* no node, skip this */ |
3028 | 0 | nsec3 = az_domain_rrset(node, LDNS_RR_TYPE_NSEC3); |
3029 | 0 | if(!nsec3) return 1; /* if no nsec3 RR, skip it */ |
3030 | 0 | if(!msg_add_rrset_ns(z, region, msg, node, nsec3)) return 0; |
3031 | 0 | return 1; |
3032 | 0 | } |
3033 | | |
3034 | | /** add NSEC3 records to the zone for the nsec3 proof. |
3035 | | * Specify with the flags with parts of the proof are required. |
3036 | | * the ce is the exact matching name (for notype) but also delegation points. |
3037 | | * qname is the one where the nextcloser name can be derived from. |
3038 | | * If NSEC3 is not properly there (in the zone) nothing is added. |
3039 | | * always enabled: include nsec3 proving about the Closest Encloser. |
3040 | | * that is an exact match that should exist for it. |
3041 | | * If that does not exist, a higher exact match + nxproof is enabled |
3042 | | * (for some sort of opt-out empty nonterminal cases). |
3043 | | * nodataproof: search for exact match and include that instead. |
3044 | | * ceproof: include ce proof NSEC3 (omitted for wildcard replies). |
3045 | | * nxproof: include denial of the qname. |
3046 | | * wcproof: include denial of wildcard (wildcard.ce). |
3047 | | */ |
3048 | | static int |
3049 | | az_add_nsec3_proof(struct auth_zone* z, struct regional* region, |
3050 | | struct dns_msg* msg, uint8_t* cenm, size_t cenmlen, uint8_t* qname, |
3051 | | size_t qname_len, int nodataproof, int ceproof, int nxproof, |
3052 | | int wcproof) |
3053 | 0 | { |
3054 | 0 | int algo; |
3055 | 0 | size_t iter, saltlen; |
3056 | 0 | uint8_t* salt; |
3057 | 0 | int no_exact_ce = 0; |
3058 | 0 | struct auth_data* node; |
3059 | | |
3060 | | /* find parameters of nsec3 proof */ |
3061 | 0 | if(!az_nsec3_param(z, &algo, &iter, &salt, &saltlen)) |
3062 | 0 | return 1; /* no nsec3 */ |
3063 | 0 | if(nodataproof) { |
3064 | | /* see if the node has a hash of itself for the nodata |
3065 | | * proof nsec3, this has to be an exact match nsec3. */ |
3066 | 0 | struct auth_data* match; |
3067 | 0 | match = az_nsec3_find_exact(z, qname, qname_len, algo, |
3068 | 0 | iter, salt, saltlen); |
3069 | 0 | if(match) { |
3070 | 0 | if(!az_nsec3_insert(z, region, msg, match)) |
3071 | 0 | return 0; |
3072 | | /* only nodata NSEC3 needed, no CE or others. */ |
3073 | 0 | return 1; |
3074 | 0 | } |
3075 | 0 | } |
3076 | | /* find ce that has an NSEC3 */ |
3077 | 0 | if(ceproof) { |
3078 | 0 | node = az_nsec3_find_ce(z, &cenm, &cenmlen, &no_exact_ce, |
3079 | 0 | algo, iter, salt, saltlen); |
3080 | 0 | if(no_exact_ce) nxproof = 1; |
3081 | 0 | if(!az_nsec3_insert(z, region, msg, node)) |
3082 | 0 | return 0; |
3083 | 0 | } |
3084 | | |
3085 | 0 | if(nxproof) { |
3086 | 0 | uint8_t* nx; |
3087 | 0 | size_t nxlen; |
3088 | | /* create nextcloser domain name */ |
3089 | 0 | az_nsec3_get_nextcloser(cenm, qname, qname_len, &nx, &nxlen); |
3090 | | /* find nsec3 that matches or covers it */ |
3091 | 0 | node = az_nsec3_find_cover(z, nx, nxlen, algo, iter, salt, |
3092 | 0 | saltlen); |
3093 | 0 | if(!az_nsec3_insert(z, region, msg, node)) |
3094 | 0 | return 0; |
3095 | 0 | } |
3096 | 0 | if(wcproof) { |
3097 | | /* create wildcard name *.ce */ |
3098 | 0 | uint8_t wc[LDNS_MAX_DOMAINLEN]; |
3099 | 0 | size_t wclen; |
3100 | 0 | if(cenmlen+2 > sizeof(wc)) |
3101 | 0 | return 0; /* result would be too long */ |
3102 | 0 | wc[0] = 1; /* length of wildcard label */ |
3103 | 0 | wc[1] = (uint8_t)'*'; /* wildcard label */ |
3104 | 0 | memmove(wc+2, cenm, cenmlen); |
3105 | 0 | wclen = cenmlen+2; |
3106 | | /* find nsec3 that matches or covers it */ |
3107 | 0 | node = az_nsec3_find_cover(z, wc, wclen, algo, iter, salt, |
3108 | 0 | saltlen); |
3109 | 0 | if(!az_nsec3_insert(z, region, msg, node)) |
3110 | 0 | return 0; |
3111 | 0 | } |
3112 | 0 | return 1; |
3113 | 0 | } |
3114 | | |
3115 | | /** generate answer for positive answer */ |
3116 | | static int |
3117 | | az_generate_positive_answer(struct auth_zone* z, struct regional* region, |
3118 | | struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) |
3119 | 0 | { |
3120 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3121 | | /* see if we want additional rrs */ |
3122 | 0 | if(rrset->type == LDNS_RR_TYPE_MX) { |
3123 | 0 | if(!az_add_additionals_from(z, region, msg, rrset, 2)) |
3124 | 0 | return 0; |
3125 | 0 | } else if(rrset->type == LDNS_RR_TYPE_SRV) { |
3126 | 0 | if(!az_add_additionals_from(z, region, msg, rrset, 6)) |
3127 | 0 | return 0; |
3128 | 0 | } else if(rrset->type == LDNS_RR_TYPE_NS) { |
3129 | 0 | if(!az_add_additionals_from(z, region, msg, rrset, 0)) |
3130 | 0 | return 0; |
3131 | 0 | } |
3132 | 0 | return 1; |
3133 | 0 | } |
3134 | | |
3135 | | /** generate answer for type ANY answer */ |
3136 | | static int |
3137 | | az_generate_any_answer(struct auth_zone* z, struct regional* region, |
3138 | | struct dns_msg* msg, struct auth_data* node) |
3139 | 0 | { |
3140 | 0 | struct auth_rrset* rrset; |
3141 | 0 | int added = 0; |
3142 | | /* add a couple (at least one) RRs */ |
3143 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_SOA)) != NULL) { |
3144 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3145 | 0 | added++; |
3146 | 0 | } |
3147 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_MX)) != NULL) { |
3148 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3149 | 0 | added++; |
3150 | 0 | } |
3151 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_A)) != NULL) { |
3152 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3153 | 0 | added++; |
3154 | 0 | } |
3155 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_AAAA)) != NULL) { |
3156 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3157 | 0 | added++; |
3158 | 0 | } |
3159 | 0 | if(added == 0 && node && node->rrsets) { |
3160 | 0 | if(!msg_add_rrset_an(z, region, msg, node, |
3161 | 0 | node->rrsets)) return 0; |
3162 | 0 | } |
3163 | 0 | return 1; |
3164 | 0 | } |
3165 | | |
3166 | | /** follow cname chain and add more data to the answer section */ |
3167 | | static int |
3168 | | follow_cname_chain(struct auth_zone* z, uint16_t qtype, |
3169 | | struct regional* region, struct dns_msg* msg, |
3170 | | struct packed_rrset_data* d) |
3171 | 0 | { |
3172 | 0 | int maxchain = 0; |
3173 | | /* see if we can add the target of the CNAME into the answer */ |
3174 | 0 | while(maxchain++ < MAX_CNAME_CHAIN) { |
3175 | 0 | struct auth_data* node; |
3176 | 0 | struct auth_rrset* rrset; |
3177 | 0 | size_t clen; |
3178 | | /* d has cname rdata */ |
3179 | 0 | if(d->count == 0) break; /* no CNAME */ |
3180 | 0 | if(d->rr_len[0] < 2+1) break; /* too small */ |
3181 | 0 | if((clen=dname_valid(d->rr_data[0]+2, d->rr_len[0]-2))==0) |
3182 | 0 | break; /* malformed */ |
3183 | 0 | if(!dname_subdomain_c(d->rr_data[0]+2, z->name)) |
3184 | 0 | break; /* target out of zone */ |
3185 | 0 | if((node = az_find_name(z, d->rr_data[0]+2, clen))==NULL) |
3186 | 0 | break; /* no such target name */ |
3187 | 0 | if((rrset=az_domain_rrset(node, qtype))!=NULL) { |
3188 | | /* done we found the target */ |
3189 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) |
3190 | 0 | return 0; |
3191 | 0 | break; |
3192 | 0 | } |
3193 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_CNAME))==NULL) |
3194 | 0 | break; /* no further CNAME chain, notype */ |
3195 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3196 | 0 | d = rrset->data; |
3197 | 0 | } |
3198 | 0 | return 1; |
3199 | 0 | } |
3200 | | |
3201 | | /** generate answer for cname answer */ |
3202 | | static int |
3203 | | az_generate_cname_answer(struct auth_zone* z, struct query_info* qinfo, |
3204 | | struct regional* region, struct dns_msg* msg, |
3205 | | struct auth_data* node, struct auth_rrset* rrset) |
3206 | 0 | { |
3207 | 0 | if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; |
3208 | 0 | if(!rrset) return 1; |
3209 | 0 | if(!follow_cname_chain(z, qinfo->qtype, region, msg, rrset->data)) |
3210 | 0 | return 0; |
3211 | 0 | return 1; |
3212 | 0 | } |
3213 | | |
3214 | | /** generate answer for notype answer */ |
3215 | | static int |
3216 | | az_generate_notype_answer(struct auth_zone* z, struct regional* region, |
3217 | | struct dns_msg* msg, struct auth_data* node) |
3218 | 0 | { |
3219 | 0 | struct auth_rrset* rrset; |
3220 | 0 | if(!az_add_negative_soa(z, region, msg)) return 0; |
3221 | | /* DNSSEC denial NSEC */ |
3222 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_NSEC))!=NULL) { |
3223 | 0 | if(!msg_add_rrset_ns(z, region, msg, node, rrset)) return 0; |
3224 | 0 | } else if(node) { |
3225 | | /* DNSSEC denial NSEC3 */ |
3226 | 0 | if(!az_add_nsec3_proof(z, region, msg, node->name, |
3227 | 0 | node->namelen, msg->qinfo.qname, |
3228 | 0 | msg->qinfo.qname_len, 1, 1, 0, 0)) |
3229 | 0 | return 0; |
3230 | 0 | } |
3231 | 0 | return 1; |
3232 | 0 | } |
3233 | | |
3234 | | /** generate answer for referral answer */ |
3235 | | static int |
3236 | | az_generate_referral_answer(struct auth_zone* z, struct regional* region, |
3237 | | struct dns_msg* msg, struct auth_data* ce, struct auth_rrset* rrset) |
3238 | 0 | { |
3239 | 0 | struct auth_rrset* ds, *nsec; |
3240 | | /* turn off AA flag, referral is nonAA because it leaves the zone */ |
3241 | 0 | log_assert(ce); |
3242 | 0 | msg->rep->flags &= ~BIT_AA; |
3243 | 0 | if(!msg_add_rrset_ns(z, region, msg, ce, rrset)) return 0; |
3244 | | /* add DS or deny it */ |
3245 | 0 | if((ds=az_domain_rrset(ce, LDNS_RR_TYPE_DS))!=NULL) { |
3246 | 0 | if(!msg_add_rrset_ns(z, region, msg, ce, ds)) return 0; |
3247 | 0 | } else { |
3248 | | /* deny the DS */ |
3249 | 0 | if((nsec=az_domain_rrset(ce, LDNS_RR_TYPE_NSEC))!=NULL) { |
3250 | 0 | if(!msg_add_rrset_ns(z, region, msg, ce, nsec)) |
3251 | 0 | return 0; |
3252 | 0 | } else { |
3253 | 0 | if(!az_add_nsec3_proof(z, region, msg, ce->name, |
3254 | 0 | ce->namelen, msg->qinfo.qname, |
3255 | 0 | msg->qinfo.qname_len, 1, 1, 0, 0)) |
3256 | 0 | return 0; |
3257 | 0 | } |
3258 | 0 | } |
3259 | | /* add additional rrs for type NS */ |
3260 | 0 | if(!az_add_additionals_from(z, region, msg, rrset, 0)) return 0; |
3261 | 0 | return 1; |
3262 | 0 | } |
3263 | | |
3264 | | /** generate answer for DNAME answer */ |
3265 | | static int |
3266 | | az_generate_dname_answer(struct auth_zone* z, struct query_info* qinfo, |
3267 | | struct regional* region, struct dns_msg* msg, struct auth_data* ce, |
3268 | | struct auth_rrset* rrset) |
3269 | 0 | { |
3270 | 0 | log_assert(ce); |
3271 | | /* add the DNAME and then a CNAME */ |
3272 | 0 | if(!msg_add_rrset_an(z, region, msg, ce, rrset)) return 0; |
3273 | 0 | if(!add_synth_cname(z, qinfo->qname, qinfo->qname_len, region, |
3274 | 0 | msg, ce, rrset)) return 0; |
3275 | 0 | if(FLAGS_GET_RCODE(msg->rep->flags) == LDNS_RCODE_YXDOMAIN) |
3276 | 0 | return 1; |
3277 | 0 | if(msg->rep->rrset_count == 0 || |
3278 | 0 | !msg->rep->rrsets[msg->rep->rrset_count-1]) |
3279 | 0 | return 0; |
3280 | 0 | if(!follow_cname_chain(z, qinfo->qtype, region, msg, |
3281 | 0 | (struct packed_rrset_data*)msg->rep->rrsets[ |
3282 | 0 | msg->rep->rrset_count-1]->entry.data)) |
3283 | 0 | return 0; |
3284 | 0 | return 1; |
3285 | 0 | } |
3286 | | |
3287 | | /** generate answer for wildcard answer */ |
3288 | | static int |
3289 | | az_generate_wildcard_answer(struct auth_zone* z, struct query_info* qinfo, |
3290 | | struct regional* region, struct dns_msg* msg, struct auth_data* ce, |
3291 | | struct auth_data* wildcard, struct auth_data* node) |
3292 | 0 | { |
3293 | 0 | struct auth_rrset* rrset, *nsec; |
3294 | 0 | int insert_ce = 0; |
3295 | 0 | if((rrset=az_domain_rrset(wildcard, qinfo->qtype)) != NULL) { |
3296 | | /* wildcard has type, add it */ |
3297 | 0 | if(!msg_add_rrset_an(z, region, msg, wildcard, rrset)) |
3298 | 0 | return 0; |
3299 | 0 | az_change_dnames(msg, wildcard->name, msg->qinfo.qname, |
3300 | 0 | msg->qinfo.qname_len, 1); |
3301 | 0 | } else if((rrset=az_domain_rrset(wildcard, LDNS_RR_TYPE_CNAME))!=NULL) { |
3302 | | /* wildcard has cname instead, do that */ |
3303 | 0 | if(!msg_add_rrset_an(z, region, msg, wildcard, rrset)) |
3304 | 0 | return 0; |
3305 | 0 | az_change_dnames(msg, wildcard->name, msg->qinfo.qname, |
3306 | 0 | msg->qinfo.qname_len, 1); |
3307 | 0 | if(!follow_cname_chain(z, qinfo->qtype, region, msg, |
3308 | 0 | rrset->data)) |
3309 | 0 | return 0; |
3310 | 0 | } else if(qinfo->qtype == LDNS_RR_TYPE_ANY && wildcard->rrsets) { |
3311 | | /* add ANY rrsets from wildcard node */ |
3312 | 0 | if(!az_generate_any_answer(z, region, msg, wildcard)) |
3313 | 0 | return 0; |
3314 | 0 | az_change_dnames(msg, wildcard->name, msg->qinfo.qname, |
3315 | 0 | msg->qinfo.qname_len, 1); |
3316 | 0 | } else { |
3317 | | /* wildcard has nodata, notype answer */ |
3318 | | /* call other notype routine for dnssec notype denials */ |
3319 | 0 | if(!az_generate_notype_answer(z, region, msg, wildcard)) |
3320 | 0 | return 0; |
3321 | | /* because the notype, there is no positive data with an |
3322 | | * RRSIG that indicates the wildcard position. Thus the |
3323 | | * wildcard qname denial needs to have a CE nsec3. */ |
3324 | 0 | insert_ce = 1; |
3325 | 0 | } |
3326 | | |
3327 | | /* ce and node for dnssec denial of wildcard original name */ |
3328 | 0 | if((nsec=az_find_nsec_cover(z, &node)) != NULL) { |
3329 | 0 | if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; |
3330 | 0 | } else if(ce) { |
3331 | 0 | uint8_t* wildup = wildcard->name; |
3332 | 0 | size_t wilduplen= wildcard->namelen; |
3333 | 0 | dname_remove_label(&wildup, &wilduplen); |
3334 | 0 | if(!az_add_nsec3_proof(z, region, msg, wildup, |
3335 | 0 | wilduplen, msg->qinfo.qname, |
3336 | 0 | msg->qinfo.qname_len, 0, insert_ce, 1, 0)) |
3337 | 0 | return 0; |
3338 | 0 | } |
3339 | | |
3340 | | /* fixup name of wildcard from *.zone to qname, use already allocated |
3341 | | * pointer to msg qname */ |
3342 | 0 | az_change_dnames(msg, wildcard->name, msg->qinfo.qname, |
3343 | 0 | msg->qinfo.qname_len, 0); |
3344 | 0 | return 1; |
3345 | 0 | } |
3346 | | |
3347 | | /** generate answer for nxdomain answer */ |
3348 | | static int |
3349 | | az_generate_nxdomain_answer(struct auth_zone* z, struct regional* region, |
3350 | | struct dns_msg* msg, struct auth_data* ce, struct auth_data* node) |
3351 | 0 | { |
3352 | 0 | struct auth_rrset* nsec; |
3353 | 0 | msg->rep->flags |= LDNS_RCODE_NXDOMAIN; |
3354 | 0 | if(!az_add_negative_soa(z, region, msg)) return 0; |
3355 | 0 | if((nsec=az_find_nsec_cover(z, &node)) != NULL) { |
3356 | 0 | if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; |
3357 | 0 | if(ce && !az_nsec_wildcard_denial(z, region, msg, ce->name, |
3358 | 0 | ce->namelen)) return 0; |
3359 | 0 | } else if(ce) { |
3360 | 0 | if(!az_add_nsec3_proof(z, region, msg, ce->name, |
3361 | 0 | ce->namelen, msg->qinfo.qname, |
3362 | 0 | msg->qinfo.qname_len, 0, 1, 1, 1)) |
3363 | 0 | return 0; |
3364 | 0 | } |
3365 | 0 | return 1; |
3366 | 0 | } |
3367 | | |
3368 | | /** Create answers when an exact match exists for the domain name */ |
3369 | | static int |
3370 | | az_generate_answer_with_node(struct auth_zone* z, struct query_info* qinfo, |
3371 | | struct regional* region, struct dns_msg* msg, struct auth_data* node) |
3372 | 0 | { |
3373 | 0 | struct auth_rrset* rrset; |
3374 | | /* positive answer, rrset we are looking for exists */ |
3375 | 0 | if((rrset=az_domain_rrset(node, qinfo->qtype)) != NULL) { |
3376 | 0 | return az_generate_positive_answer(z, region, msg, node, rrset); |
3377 | 0 | } |
3378 | | /* CNAME? */ |
3379 | 0 | if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_CNAME)) != NULL) { |
3380 | 0 | return az_generate_cname_answer(z, qinfo, region, msg, |
3381 | 0 | node, rrset); |
3382 | 0 | } |
3383 | | /* type ANY ? */ |
3384 | 0 | if(qinfo->qtype == LDNS_RR_TYPE_ANY) { |
3385 | 0 | return az_generate_any_answer(z, region, msg, node); |
3386 | 0 | } |
3387 | | /* NOERROR/NODATA (no such type at domain name) */ |
3388 | 0 | return az_generate_notype_answer(z, region, msg, node); |
3389 | 0 | } |
3390 | | |
3391 | | /** Generate answer without an existing-node that we can use. |
3392 | | * So it'll be a referral, DNAME or nxdomain */ |
3393 | | static int |
3394 | | az_generate_answer_nonexistnode(struct auth_zone* z, struct query_info* qinfo, |
3395 | | struct regional* region, struct dns_msg* msg, struct auth_data* ce, |
3396 | | struct auth_rrset* rrset, struct auth_data* node) |
3397 | 0 | { |
3398 | 0 | struct auth_data* wildcard; |
3399 | | |
3400 | | /* we do not have an exact matching name (that exists) */ |
3401 | | /* see if we have a NS or DNAME in the ce */ |
3402 | 0 | if(ce && rrset && rrset->type == LDNS_RR_TYPE_NS) { |
3403 | 0 | return az_generate_referral_answer(z, region, msg, ce, rrset); |
3404 | 0 | } |
3405 | 0 | if(ce && rrset && rrset->type == LDNS_RR_TYPE_DNAME) { |
3406 | 0 | return az_generate_dname_answer(z, qinfo, region, msg, ce, |
3407 | 0 | rrset); |
3408 | 0 | } |
3409 | | /* if there is an empty nonterminal, wildcard and nxdomain don't |
3410 | | * happen, it is a notype answer */ |
3411 | 0 | if(az_empty_nonterminal(z, qinfo, node)) { |
3412 | 0 | return az_generate_notype_answer(z, region, msg, node); |
3413 | 0 | } |
3414 | | /* see if we have a wildcard under the ce */ |
3415 | 0 | if((wildcard=az_find_wildcard(z, qinfo, ce)) != NULL) { |
3416 | 0 | return az_generate_wildcard_answer(z, qinfo, region, msg, |
3417 | 0 | ce, wildcard, node); |
3418 | 0 | } |
3419 | | /* generate nxdomain answer */ |
3420 | 0 | return az_generate_nxdomain_answer(z, region, msg, ce, node); |
3421 | 0 | } |
3422 | | |
3423 | | /** Lookup answer in a zone. */ |
3424 | | static int |
3425 | | auth_zone_generate_answer(struct auth_zone* z, struct query_info* qinfo, |
3426 | | struct regional* region, struct dns_msg** msg, int* fallback) |
3427 | 0 | { |
3428 | 0 | struct auth_data* node, *ce; |
3429 | 0 | struct auth_rrset* rrset; |
3430 | 0 | int node_exact, node_exists; |
3431 | | /* does the zone want fallback in case of failure? */ |
3432 | 0 | *fallback = z->fallback_enabled; |
3433 | 0 | if(!(*msg=msg_create(region, qinfo))) return 0; |
3434 | | |
3435 | | /* lookup if there is a matching domain name for the query */ |
3436 | 0 | az_find_domain(z, qinfo, &node_exact, &node); |
3437 | | |
3438 | | /* see if node exists for generating answers from (i.e. not glue and |
3439 | | * obscured by NS or DNAME or NSEC3-only), and also return the |
3440 | | * closest-encloser from that, closest node that should be used |
3441 | | * to generate answers from that is above the query */ |
3442 | 0 | node_exists = az_find_ce(z, qinfo, node, node_exact, &ce, &rrset); |
3443 | |
|
3444 | 0 | if(verbosity >= VERB_ALGO) { |
3445 | 0 | char zname[256], qname[256], nname[256], cename[256], |
3446 | 0 | tpstr[32], rrstr[32]; |
3447 | 0 | sldns_wire2str_dname_buf(qinfo->qname, qinfo->qname_len, qname, |
3448 | 0 | sizeof(qname)); |
3449 | 0 | sldns_wire2str_type_buf(qinfo->qtype, tpstr, sizeof(tpstr)); |
3450 | 0 | sldns_wire2str_dname_buf(z->name, z->namelen, zname, |
3451 | 0 | sizeof(zname)); |
3452 | 0 | if(node) |
3453 | 0 | sldns_wire2str_dname_buf(node->name, node->namelen, |
3454 | 0 | nname, sizeof(nname)); |
3455 | 0 | else snprintf(nname, sizeof(nname), "NULL"); |
3456 | 0 | if(ce) |
3457 | 0 | sldns_wire2str_dname_buf(ce->name, ce->namelen, |
3458 | 0 | cename, sizeof(cename)); |
3459 | 0 | else snprintf(cename, sizeof(cename), "NULL"); |
3460 | 0 | if(rrset) sldns_wire2str_type_buf(rrset->type, rrstr, |
3461 | 0 | sizeof(rrstr)); |
3462 | 0 | else snprintf(rrstr, sizeof(rrstr), "NULL"); |
3463 | 0 | log_info("auth_zone %s query %s %s, domain %s %s %s, " |
3464 | 0 | "ce %s, rrset %s", zname, qname, tpstr, nname, |
3465 | 0 | (node_exact?"exact":"notexact"), |
3466 | 0 | (node_exists?"exist":"notexist"), cename, rrstr); |
3467 | 0 | } |
3468 | |
|
3469 | 0 | if(node_exists) { |
3470 | | /* the node is fine, generate answer from node */ |
3471 | 0 | return az_generate_answer_with_node(z, qinfo, region, *msg, |
3472 | 0 | node); |
3473 | 0 | } |
3474 | 0 | return az_generate_answer_nonexistnode(z, qinfo, region, *msg, |
3475 | 0 | ce, rrset, node); |
3476 | 0 | } |
3477 | | |
3478 | | int auth_zones_lookup(struct auth_zones* az, struct query_info* qinfo, |
3479 | | struct regional* region, struct dns_msg** msg, int* fallback, |
3480 | | uint8_t* dp_nm, size_t dp_nmlen) |
3481 | 0 | { |
3482 | 0 | int r; |
3483 | 0 | struct auth_zone* z; |
3484 | | /* find the zone that should contain the answer. */ |
3485 | 0 | lock_rw_rdlock(&az->lock); |
3486 | 0 | z = auth_zone_find(az, dp_nm, dp_nmlen, qinfo->qclass); |
3487 | 0 | if(!z) { |
3488 | 0 | lock_rw_unlock(&az->lock); |
3489 | | /* no auth zone, fallback to internet */ |
3490 | 0 | *fallback = 1; |
3491 | 0 | return 0; |
3492 | 0 | } |
3493 | 0 | lock_rw_rdlock(&z->lock); |
3494 | 0 | lock_rw_unlock(&az->lock); |
3495 | | |
3496 | | /* if not for upstream queries, fallback */ |
3497 | 0 | if(!z->for_upstream) { |
3498 | 0 | lock_rw_unlock(&z->lock); |
3499 | 0 | *fallback = 1; |
3500 | 0 | return 0; |
3501 | 0 | } |
3502 | 0 | if(z->zone_expired) { |
3503 | 0 | *fallback = z->fallback_enabled; |
3504 | 0 | lock_rw_unlock(&z->lock); |
3505 | 0 | return 0; |
3506 | 0 | } |
3507 | | /* see what answer that zone would generate */ |
3508 | 0 | r = auth_zone_generate_answer(z, qinfo, region, msg, fallback); |
3509 | 0 | lock_rw_unlock(&z->lock); |
3510 | 0 | return r; |
3511 | 0 | } |
3512 | | |
3513 | | /** encode auth answer */ |
3514 | | static void |
3515 | | auth_answer_encode(struct query_info* qinfo, struct module_env* env, |
3516 | | struct edns_data* edns, struct comm_reply* repinfo, sldns_buffer* buf, |
3517 | | struct regional* temp, struct dns_msg* msg) |
3518 | 0 | { |
3519 | 0 | uint16_t udpsize; |
3520 | 0 | udpsize = edns->udp_size; |
3521 | 0 | edns->edns_version = EDNS_ADVERTISED_VERSION; |
3522 | 0 | edns->udp_size = EDNS_ADVERTISED_SIZE; |
3523 | 0 | edns->ext_rcode = 0; |
3524 | 0 | edns->bits &= EDNS_DO; |
3525 | |
|
3526 | 0 | if(!inplace_cb_reply_local_call(env, qinfo, NULL, msg->rep, |
3527 | 0 | (int)FLAGS_GET_RCODE(msg->rep->flags), edns, repinfo, temp, env->now_tv) |
3528 | 0 | || !reply_info_answer_encode(qinfo, msg->rep, |
3529 | 0 | *(uint16_t*)sldns_buffer_begin(buf), |
3530 | 0 | sldns_buffer_read_u16_at(buf, 2), |
3531 | 0 | buf, 0, 0, temp, udpsize, edns, |
3532 | 0 | (int)(edns->bits&EDNS_DO), 0)) { |
3533 | 0 | error_encode(buf, (LDNS_RCODE_SERVFAIL|BIT_AA), qinfo, |
3534 | 0 | *(uint16_t*)sldns_buffer_begin(buf), |
3535 | 0 | sldns_buffer_read_u16_at(buf, 2), edns); |
3536 | 0 | } |
3537 | 0 | } |
3538 | | |
3539 | | /** encode auth error answer */ |
3540 | | static void |
3541 | | auth_error_encode(struct query_info* qinfo, struct module_env* env, |
3542 | | struct edns_data* edns, struct comm_reply* repinfo, sldns_buffer* buf, |
3543 | | struct regional* temp, int rcode) |
3544 | 0 | { |
3545 | 0 | edns->edns_version = EDNS_ADVERTISED_VERSION; |
3546 | 0 | edns->udp_size = EDNS_ADVERTISED_SIZE; |
3547 | 0 | edns->ext_rcode = 0; |
3548 | 0 | edns->bits &= EDNS_DO; |
3549 | |
|
3550 | 0 | if(!inplace_cb_reply_local_call(env, qinfo, NULL, NULL, |
3551 | 0 | rcode, edns, repinfo, temp, env->now_tv)) |
3552 | 0 | edns->opt_list_inplace_cb_out = NULL; |
3553 | 0 | error_encode(buf, rcode|BIT_AA, qinfo, |
3554 | 0 | *(uint16_t*)sldns_buffer_begin(buf), |
3555 | 0 | sldns_buffer_read_u16_at(buf, 2), edns); |
3556 | 0 | } |
3557 | | |
3558 | | int auth_zones_answer(struct auth_zones* az, struct module_env* env, |
3559 | | struct query_info* qinfo, struct edns_data* edns, |
3560 | | struct comm_reply* repinfo, struct sldns_buffer* buf, struct regional* temp) |
3561 | 0 | { |
3562 | 0 | struct dns_msg* msg = NULL; |
3563 | 0 | struct auth_zone* z; |
3564 | 0 | int r; |
3565 | 0 | int fallback = 0; |
3566 | |
|
3567 | 0 | lock_rw_rdlock(&az->lock); |
3568 | 0 | if(!az->have_downstream) { |
3569 | | /* no downstream auth zones */ |
3570 | 0 | lock_rw_unlock(&az->lock); |
3571 | 0 | return 0; |
3572 | 0 | } |
3573 | 0 | if(qinfo->qtype == LDNS_RR_TYPE_DS) { |
3574 | 0 | uint8_t* delname = qinfo->qname; |
3575 | 0 | size_t delnamelen = qinfo->qname_len; |
3576 | 0 | dname_remove_label(&delname, &delnamelen); |
3577 | 0 | z = auth_zones_find_zone(az, delname, delnamelen, |
3578 | 0 | qinfo->qclass); |
3579 | 0 | } else { |
3580 | 0 | z = auth_zones_find_zone(az, qinfo->qname, qinfo->qname_len, |
3581 | 0 | qinfo->qclass); |
3582 | 0 | } |
3583 | 0 | if(!z) { |
3584 | | /* no zone above it */ |
3585 | 0 | lock_rw_unlock(&az->lock); |
3586 | 0 | return 0; |
3587 | 0 | } |
3588 | 0 | lock_rw_rdlock(&z->lock); |
3589 | 0 | lock_rw_unlock(&az->lock); |
3590 | 0 | if(!z->for_downstream) { |
3591 | 0 | lock_rw_unlock(&z->lock); |
3592 | 0 | return 0; |
3593 | 0 | } |
3594 | 0 | if(z->zone_expired) { |
3595 | 0 | if(z->fallback_enabled) { |
3596 | 0 | lock_rw_unlock(&z->lock); |
3597 | 0 | return 0; |
3598 | 0 | } |
3599 | 0 | lock_rw_unlock(&z->lock); |
3600 | 0 | lock_rw_wrlock(&az->lock); |
3601 | 0 | az->num_query_down++; |
3602 | 0 | lock_rw_unlock(&az->lock); |
3603 | 0 | auth_error_encode(qinfo, env, edns, repinfo, buf, temp, |
3604 | 0 | LDNS_RCODE_SERVFAIL); |
3605 | 0 | return 1; |
3606 | 0 | } |
3607 | | |
3608 | | /* answer it from zone z */ |
3609 | 0 | r = auth_zone_generate_answer(z, qinfo, temp, &msg, &fallback); |
3610 | 0 | lock_rw_unlock(&z->lock); |
3611 | 0 | if(!r && fallback) { |
3612 | | /* fallback to regular answering (recursive) */ |
3613 | 0 | return 0; |
3614 | 0 | } |
3615 | 0 | lock_rw_wrlock(&az->lock); |
3616 | 0 | az->num_query_down++; |
3617 | 0 | lock_rw_unlock(&az->lock); |
3618 | | |
3619 | | /* encode answer */ |
3620 | 0 | if(!r) |
3621 | 0 | auth_error_encode(qinfo, env, edns, repinfo, buf, temp, |
3622 | 0 | LDNS_RCODE_SERVFAIL); |
3623 | 0 | else auth_answer_encode(qinfo, env, edns, repinfo, buf, temp, msg); |
3624 | |
|
3625 | 0 | return 1; |
3626 | 0 | } |
3627 | | |
3628 | | int auth_zones_can_fallback(struct auth_zones* az, uint8_t* nm, size_t nmlen, |
3629 | | uint16_t dclass) |
3630 | 0 | { |
3631 | 0 | int r; |
3632 | 0 | struct auth_zone* z; |
3633 | 0 | lock_rw_rdlock(&az->lock); |
3634 | 0 | z = auth_zone_find(az, nm, nmlen, dclass); |
3635 | 0 | if(!z) { |
3636 | 0 | lock_rw_unlock(&az->lock); |
3637 | | /* no such auth zone, fallback */ |
3638 | 0 | return 1; |
3639 | 0 | } |
3640 | 0 | lock_rw_rdlock(&z->lock); |
3641 | 0 | lock_rw_unlock(&az->lock); |
3642 | 0 | r = z->fallback_enabled || (!z->for_upstream); |
3643 | 0 | lock_rw_unlock(&z->lock); |
3644 | 0 | return r; |
3645 | 0 | } |
3646 | | |
3647 | | int |
3648 | | auth_zone_parse_notify_serial(sldns_buffer* pkt, uint32_t *serial) |
3649 | 0 | { |
3650 | 0 | struct query_info q; |
3651 | 0 | uint16_t rdlen; |
3652 | 0 | memset(&q, 0, sizeof(q)); |
3653 | 0 | sldns_buffer_set_position(pkt, 0); |
3654 | 0 | if(!query_info_parse(&q, pkt)) return 0; |
3655 | 0 | if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) == 0) return 0; |
3656 | | /* skip name of RR in answer section */ |
3657 | 0 | if(sldns_buffer_remaining(pkt) < 1) return 0; |
3658 | 0 | if(pkt_dname_len(pkt) == 0) return 0; |
3659 | | /* check type */ |
3660 | 0 | if(sldns_buffer_remaining(pkt) < 10 /* type,class,ttl,rdatalen*/) |
3661 | 0 | return 0; |
3662 | 0 | if(sldns_buffer_read_u16(pkt) != LDNS_RR_TYPE_SOA) return 0; |
3663 | 0 | sldns_buffer_skip(pkt, 2); /* class */ |
3664 | 0 | sldns_buffer_skip(pkt, 4); /* ttl */ |
3665 | 0 | rdlen = sldns_buffer_read_u16(pkt); /* rdatalen */ |
3666 | 0 | if(sldns_buffer_remaining(pkt) < rdlen) return 0; |
3667 | 0 | if(rdlen < 22) return 0; /* bad soa length */ |
3668 | 0 | sldns_buffer_skip(pkt, (ssize_t)(rdlen-20)); |
3669 | 0 | *serial = sldns_buffer_read_u32(pkt); |
3670 | | /* return true when has serial in answer section */ |
3671 | 0 | return 1; |
3672 | 0 | } |
3673 | | |
3674 | | /** see if addr appears in the list */ |
3675 | | static int |
3676 | | addr_in_list(struct auth_addr* list, struct sockaddr_storage* addr, |
3677 | | socklen_t addrlen) |
3678 | 0 | { |
3679 | 0 | struct auth_addr* p; |
3680 | 0 | for(p=list; p; p=p->next) { |
3681 | 0 | if(sockaddr_cmp_addr(addr, addrlen, &p->addr, p->addrlen)==0) |
3682 | 0 | return 1; |
3683 | 0 | } |
3684 | 0 | return 0; |
3685 | 0 | } |
3686 | | |
3687 | | /** check if an address matches a master specification (or one of its |
3688 | | * addresses in the addr list) */ |
3689 | | static int |
3690 | | addr_matches_master(struct auth_master* master, struct sockaddr_storage* addr, |
3691 | | socklen_t addrlen, struct auth_master** fromhost) |
3692 | 0 | { |
3693 | 0 | struct sockaddr_storage a; |
3694 | 0 | socklen_t alen = 0; |
3695 | 0 | int net = 0; |
3696 | 0 | if(addr_in_list(master->list, addr, addrlen)) { |
3697 | 0 | *fromhost = master; |
3698 | 0 | return 1; |
3699 | 0 | } |
3700 | | /* compare address (but not port number, that is the destination |
3701 | | * port of the master, the port number of the received notify is |
3702 | | * allowed to by any port on that master) */ |
3703 | 0 | if(extstrtoaddr(master->host, &a, &alen, UNBOUND_DNS_PORT) && |
3704 | 0 | sockaddr_cmp_addr(addr, addrlen, &a, alen)==0) { |
3705 | 0 | *fromhost = master; |
3706 | 0 | return 1; |
3707 | 0 | } |
3708 | | /* prefixes, addr/len, like 10.0.0.0/8 */ |
3709 | | /* not http and has a / and there is one / */ |
3710 | 0 | if(master->allow_notify && !master->http && |
3711 | 0 | strchr(master->host, '/') != NULL && |
3712 | 0 | strchr(master->host, '/') == strrchr(master->host, '/') && |
3713 | 0 | netblockstrtoaddr(master->host, UNBOUND_DNS_PORT, &a, &alen, |
3714 | 0 | &net) && alen == addrlen) { |
3715 | 0 | if(addr_in_common(addr, (addr_is_ip6(addr, addrlen)?128:32), |
3716 | 0 | &a, net, alen) >= net) { |
3717 | 0 | *fromhost = NULL; /* prefix does not have destination |
3718 | | to send the probe or transfer with */ |
3719 | 0 | return 1; /* matches the netblock */ |
3720 | 0 | } |
3721 | 0 | } |
3722 | 0 | return 0; |
3723 | 0 | } |
3724 | | |
3725 | | /** check access list for notifies */ |
3726 | | static int |
3727 | | az_xfr_allowed_notify(struct auth_xfer* xfr, struct sockaddr_storage* addr, |
3728 | | socklen_t addrlen, struct auth_master** fromhost) |
3729 | 0 | { |
3730 | 0 | struct auth_master* p; |
3731 | 0 | for(p=xfr->allow_notify_list; p; p=p->next) { |
3732 | 0 | if(addr_matches_master(p, addr, addrlen, fromhost)) { |
3733 | 0 | return 1; |
3734 | 0 | } |
3735 | 0 | } |
3736 | 0 | return 0; |
3737 | 0 | } |
3738 | | |
3739 | | /** see if the serial means the zone has to be updated, i.e. the serial |
3740 | | * is newer than the zone serial, or we have no zone */ |
3741 | | static int |
3742 | | xfr_serial_means_update(struct auth_xfer* xfr, uint32_t serial) |
3743 | 0 | { |
3744 | 0 | if(!xfr->have_zone) |
3745 | 0 | return 1; /* no zone, anything is better */ |
3746 | 0 | if(xfr->zone_expired) |
3747 | 0 | return 1; /* expired, the sent serial is better than expired |
3748 | | data */ |
3749 | 0 | if(compare_serial(xfr->serial, serial) < 0) |
3750 | 0 | return 1; /* our serial is smaller than the sent serial, |
3751 | | the data is newer, fetch it */ |
3752 | 0 | return 0; |
3753 | 0 | } |
3754 | | |
3755 | | /** note notify serial, updates the notify information in the xfr struct */ |
3756 | | static void |
3757 | | xfr_note_notify_serial(struct auth_xfer* xfr, int has_serial, uint32_t serial) |
3758 | 0 | { |
3759 | 0 | if(xfr->notify_received && xfr->notify_has_serial && has_serial) { |
3760 | | /* see if this serial is newer */ |
3761 | 0 | if(compare_serial(xfr->notify_serial, serial) < 0) |
3762 | 0 | xfr->notify_serial = serial; |
3763 | 0 | } else if(xfr->notify_received && xfr->notify_has_serial && |
3764 | 0 | !has_serial) { |
3765 | | /* remove serial, we have notify without serial */ |
3766 | 0 | xfr->notify_has_serial = 0; |
3767 | 0 | xfr->notify_serial = 0; |
3768 | 0 | } else if(xfr->notify_received && !xfr->notify_has_serial) { |
3769 | | /* we already have notify without serial, keep it |
3770 | | * that way; no serial check when current operation |
3771 | | * is done */ |
3772 | 0 | } else { |
3773 | 0 | xfr->notify_received = 1; |
3774 | 0 | xfr->notify_has_serial = has_serial; |
3775 | 0 | xfr->notify_serial = serial; |
3776 | 0 | } |
3777 | 0 | } |
3778 | | |
3779 | | /** process a notify serial, start new probe or note serial. xfr is locked */ |
3780 | | static void |
3781 | | xfr_process_notify(struct auth_xfer* xfr, struct module_env* env, |
3782 | | int has_serial, uint32_t serial, struct auth_master* fromhost) |
3783 | 0 | { |
3784 | | /* if the serial of notify is older than we have, don't fetch |
3785 | | * a zone, we already have it */ |
3786 | 0 | if(has_serial && !xfr_serial_means_update(xfr, serial)) { |
3787 | 0 | lock_basic_unlock(&xfr->lock); |
3788 | 0 | return; |
3789 | 0 | } |
3790 | | /* start new probe with this addr src, or note serial */ |
3791 | 0 | if(!xfr_start_probe(xfr, env, fromhost)) { |
3792 | | /* not started because already in progress, note the serial */ |
3793 | 0 | xfr_note_notify_serial(xfr, has_serial, serial); |
3794 | 0 | lock_basic_unlock(&xfr->lock); |
3795 | 0 | } |
3796 | | /* successful end of start_probe unlocked xfr->lock */ |
3797 | 0 | } |
3798 | | |
3799 | | int auth_zones_notify(struct auth_zones* az, struct module_env* env, |
3800 | | uint8_t* nm, size_t nmlen, uint16_t dclass, |
3801 | | struct sockaddr_storage* addr, socklen_t addrlen, int has_serial, |
3802 | | uint32_t serial, int* refused) |
3803 | 0 | { |
3804 | 0 | struct auth_xfer* xfr; |
3805 | 0 | struct auth_master* fromhost = NULL; |
3806 | | /* see which zone this is */ |
3807 | 0 | lock_rw_rdlock(&az->lock); |
3808 | 0 | xfr = auth_xfer_find(az, nm, nmlen, dclass); |
3809 | 0 | if(!xfr) { |
3810 | 0 | lock_rw_unlock(&az->lock); |
3811 | | /* no such zone, refuse the notify */ |
3812 | 0 | *refused = 1; |
3813 | 0 | return 0; |
3814 | 0 | } |
3815 | 0 | lock_basic_lock(&xfr->lock); |
3816 | 0 | lock_rw_unlock(&az->lock); |
3817 | | |
3818 | | /* check access list for notifies */ |
3819 | 0 | if(!az_xfr_allowed_notify(xfr, addr, addrlen, &fromhost)) { |
3820 | 0 | lock_basic_unlock(&xfr->lock); |
3821 | | /* notify not allowed, refuse the notify */ |
3822 | 0 | *refused = 1; |
3823 | 0 | return 0; |
3824 | 0 | } |
3825 | | |
3826 | | /* process the notify */ |
3827 | 0 | xfr_process_notify(xfr, env, has_serial, serial, fromhost); |
3828 | 0 | return 1; |
3829 | 0 | } |
3830 | | |
3831 | | int auth_zones_startprobesequence(struct auth_zones* az, |
3832 | | struct module_env* env, uint8_t* nm, size_t nmlen, uint16_t dclass) |
3833 | 0 | { |
3834 | 0 | struct auth_xfer* xfr; |
3835 | 0 | lock_rw_rdlock(&az->lock); |
3836 | 0 | xfr = auth_xfer_find(az, nm, nmlen, dclass); |
3837 | 0 | if(!xfr) { |
3838 | 0 | lock_rw_unlock(&az->lock); |
3839 | 0 | return 0; |
3840 | 0 | } |
3841 | 0 | lock_basic_lock(&xfr->lock); |
3842 | 0 | lock_rw_unlock(&az->lock); |
3843 | |
|
3844 | 0 | xfr_process_notify(xfr, env, 0, 0, NULL); |
3845 | 0 | return 1; |
3846 | 0 | } |
3847 | | |
3848 | | /** set a zone expired */ |
3849 | | static void |
3850 | | auth_xfer_set_expired(struct auth_xfer* xfr, struct module_env* env, |
3851 | | int expired) |
3852 | 0 | { |
3853 | 0 | struct auth_zone* z; |
3854 | | |
3855 | | /* expire xfr */ |
3856 | 0 | lock_basic_lock(&xfr->lock); |
3857 | 0 | xfr->zone_expired = expired; |
3858 | 0 | lock_basic_unlock(&xfr->lock); |
3859 | | |
3860 | | /* find auth_zone */ |
3861 | 0 | lock_rw_rdlock(&env->auth_zones->lock); |
3862 | 0 | z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, |
3863 | 0 | xfr->dclass); |
3864 | 0 | if(!z) { |
3865 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
3866 | 0 | return; |
3867 | 0 | } |
3868 | 0 | lock_rw_wrlock(&z->lock); |
3869 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
3870 | | |
3871 | | /* expire auth_zone */ |
3872 | 0 | z->zone_expired = expired; |
3873 | 0 | lock_rw_unlock(&z->lock); |
3874 | 0 | } |
3875 | | |
3876 | | /** find master (from notify or probe) in list of masters */ |
3877 | | static struct auth_master* |
3878 | | find_master_by_host(struct auth_master* list, char* host) |
3879 | 0 | { |
3880 | 0 | struct auth_master* p; |
3881 | 0 | for(p=list; p; p=p->next) { |
3882 | 0 | if(strcmp(p->host, host) == 0) |
3883 | 0 | return p; |
3884 | 0 | } |
3885 | 0 | return NULL; |
3886 | 0 | } |
3887 | | |
3888 | | /** delete the looked up auth_addrs for all the masters in the list */ |
3889 | | static void |
3890 | | xfr_masterlist_free_addrs(struct auth_master* list) |
3891 | 0 | { |
3892 | 0 | struct auth_master* m; |
3893 | 0 | for(m=list; m; m=m->next) { |
3894 | 0 | if(m->list) { |
3895 | 0 | auth_free_master_addrs(m->list); |
3896 | 0 | m->list = NULL; |
3897 | 0 | } |
3898 | 0 | } |
3899 | 0 | } |
3900 | | |
3901 | | /** copy a list of auth_addrs */ |
3902 | | static struct auth_addr* |
3903 | | auth_addr_list_copy(struct auth_addr* source) |
3904 | 0 | { |
3905 | 0 | struct auth_addr* list = NULL, *last = NULL; |
3906 | 0 | struct auth_addr* p; |
3907 | 0 | for(p=source; p; p=p->next) { |
3908 | 0 | struct auth_addr* a = (struct auth_addr*)memdup(p, sizeof(*p)); |
3909 | 0 | if(!a) { |
3910 | 0 | log_err("malloc failure"); |
3911 | 0 | auth_free_master_addrs(list); |
3912 | 0 | return NULL; |
3913 | 0 | } |
3914 | 0 | a->next = NULL; |
3915 | 0 | if(last) last->next = a; |
3916 | 0 | if(!list) list = a; |
3917 | 0 | last = a; |
3918 | 0 | } |
3919 | 0 | return list; |
3920 | 0 | } |
3921 | | |
3922 | | /** copy a master to a new structure, NULL on alloc failure */ |
3923 | | static struct auth_master* |
3924 | | auth_master_copy(struct auth_master* o) |
3925 | 0 | { |
3926 | 0 | struct auth_master* m; |
3927 | 0 | if(!o) return NULL; |
3928 | 0 | m = (struct auth_master*)memdup(o, sizeof(*o)); |
3929 | 0 | if(!m) { |
3930 | 0 | log_err("malloc failure"); |
3931 | 0 | return NULL; |
3932 | 0 | } |
3933 | 0 | m->next = NULL; |
3934 | 0 | if(m->host) { |
3935 | 0 | m->host = strdup(m->host); |
3936 | 0 | if(!m->host) { |
3937 | 0 | free(m); |
3938 | 0 | log_err("malloc failure"); |
3939 | 0 | return NULL; |
3940 | 0 | } |
3941 | 0 | } |
3942 | 0 | if(m->file) { |
3943 | 0 | m->file = strdup(m->file); |
3944 | 0 | if(!m->file) { |
3945 | 0 | free(m->host); |
3946 | 0 | free(m); |
3947 | 0 | log_err("malloc failure"); |
3948 | 0 | return NULL; |
3949 | 0 | } |
3950 | 0 | } |
3951 | 0 | if(m->list) { |
3952 | 0 | m->list = auth_addr_list_copy(m->list); |
3953 | 0 | if(!m->list) { |
3954 | 0 | free(m->file); |
3955 | 0 | free(m->host); |
3956 | 0 | free(m); |
3957 | 0 | return NULL; |
3958 | 0 | } |
3959 | 0 | } |
3960 | 0 | return m; |
3961 | 0 | } |
3962 | | |
3963 | | /** copy the master addresses from the task_probe lookups to the allow_notify |
3964 | | * list of masters */ |
3965 | | static void |
3966 | | probe_copy_masters_for_allow_notify(struct auth_xfer* xfr) |
3967 | 0 | { |
3968 | 0 | struct auth_master* list = NULL, *last = NULL; |
3969 | 0 | struct auth_master* p; |
3970 | | /* build up new list with copies */ |
3971 | 0 | for(p = xfr->task_transfer->masters; p; p=p->next) { |
3972 | 0 | struct auth_master* m = auth_master_copy(p); |
3973 | 0 | if(!m) { |
3974 | 0 | auth_free_masters(list); |
3975 | | /* failed because of malloc failure, use old list */ |
3976 | 0 | return; |
3977 | 0 | } |
3978 | 0 | m->next = NULL; |
3979 | 0 | if(last) last->next = m; |
3980 | 0 | if(!list) list = m; |
3981 | 0 | last = m; |
3982 | 0 | } |
3983 | | /* success, replace list */ |
3984 | 0 | auth_free_masters(xfr->allow_notify_list); |
3985 | 0 | xfr->allow_notify_list = list; |
3986 | 0 | } |
3987 | | |
3988 | | /** start the lookups for task_transfer */ |
3989 | | static void |
3990 | | xfr_transfer_start_lookups(struct auth_xfer* xfr) |
3991 | 0 | { |
3992 | | /* delete all the looked up addresses in the list */ |
3993 | 0 | xfr->task_transfer->scan_addr = NULL; |
3994 | 0 | xfr_masterlist_free_addrs(xfr->task_transfer->masters); |
3995 | | |
3996 | | /* start lookup at the first master */ |
3997 | 0 | xfr->task_transfer->lookup_target = xfr->task_transfer->masters; |
3998 | 0 | xfr->task_transfer->lookup_aaaa = 0; |
3999 | 0 | } |
4000 | | |
4001 | | /** move to the next lookup of hostname for task_transfer */ |
4002 | | static void |
4003 | | xfr_transfer_move_to_next_lookup(struct auth_xfer* xfr, struct module_env* env) |
4004 | 0 | { |
4005 | 0 | if(!xfr->task_transfer->lookup_target) |
4006 | 0 | return; /* already at end of list */ |
4007 | 0 | if(!xfr->task_transfer->lookup_aaaa && env->cfg->do_ip6) { |
4008 | | /* move to lookup AAAA */ |
4009 | 0 | xfr->task_transfer->lookup_aaaa = 1; |
4010 | 0 | return; |
4011 | 0 | } |
4012 | 0 | xfr->task_transfer->lookup_target = |
4013 | 0 | xfr->task_transfer->lookup_target->next; |
4014 | 0 | xfr->task_transfer->lookup_aaaa = 0; |
4015 | 0 | if(!env->cfg->do_ip4 && xfr->task_transfer->lookup_target!=NULL) |
4016 | 0 | xfr->task_transfer->lookup_aaaa = 1; |
4017 | 0 | } |
4018 | | |
4019 | | /** start the lookups for task_probe */ |
4020 | | static void |
4021 | | xfr_probe_start_lookups(struct auth_xfer* xfr) |
4022 | 0 | { |
4023 | | /* delete all the looked up addresses in the list */ |
4024 | 0 | xfr->task_probe->scan_addr = NULL; |
4025 | 0 | xfr_masterlist_free_addrs(xfr->task_probe->masters); |
4026 | | |
4027 | | /* start lookup at the first master */ |
4028 | 0 | xfr->task_probe->lookup_target = xfr->task_probe->masters; |
4029 | 0 | xfr->task_probe->lookup_aaaa = 0; |
4030 | 0 | } |
4031 | | |
4032 | | /** move to the next lookup of hostname for task_probe */ |
4033 | | static void |
4034 | | xfr_probe_move_to_next_lookup(struct auth_xfer* xfr, struct module_env* env) |
4035 | 0 | { |
4036 | 0 | if(!xfr->task_probe->lookup_target) |
4037 | 0 | return; /* already at end of list */ |
4038 | 0 | if(!xfr->task_probe->lookup_aaaa && env->cfg->do_ip6) { |
4039 | | /* move to lookup AAAA */ |
4040 | 0 | xfr->task_probe->lookup_aaaa = 1; |
4041 | 0 | return; |
4042 | 0 | } |
4043 | 0 | xfr->task_probe->lookup_target = xfr->task_probe->lookup_target->next; |
4044 | 0 | xfr->task_probe->lookup_aaaa = 0; |
4045 | 0 | if(!env->cfg->do_ip4 && xfr->task_probe->lookup_target!=NULL) |
4046 | 0 | xfr->task_probe->lookup_aaaa = 1; |
4047 | 0 | } |
4048 | | |
4049 | | /** start the iteration of the task_transfer list of masters */ |
4050 | | static void |
4051 | | xfr_transfer_start_list(struct auth_xfer* xfr, struct auth_master* spec) |
4052 | 0 | { |
4053 | 0 | if(spec) { |
4054 | 0 | xfr->task_transfer->scan_specific = find_master_by_host( |
4055 | 0 | xfr->task_transfer->masters, spec->host); |
4056 | 0 | if(xfr->task_transfer->scan_specific) { |
4057 | 0 | xfr->task_transfer->scan_target = NULL; |
4058 | 0 | xfr->task_transfer->scan_addr = NULL; |
4059 | 0 | if(xfr->task_transfer->scan_specific->list) |
4060 | 0 | xfr->task_transfer->scan_addr = |
4061 | 0 | xfr->task_transfer->scan_specific->list; |
4062 | 0 | return; |
4063 | 0 | } |
4064 | 0 | } |
4065 | | /* no specific (notified) host to scan */ |
4066 | 0 | xfr->task_transfer->scan_specific = NULL; |
4067 | 0 | xfr->task_transfer->scan_addr = NULL; |
4068 | | /* pick up first scan target */ |
4069 | 0 | xfr->task_transfer->scan_target = xfr->task_transfer->masters; |
4070 | 0 | if(xfr->task_transfer->scan_target && xfr->task_transfer-> |
4071 | 0 | scan_target->list) |
4072 | 0 | xfr->task_transfer->scan_addr = |
4073 | 0 | xfr->task_transfer->scan_target->list; |
4074 | 0 | } |
4075 | | |
4076 | | /** start the iteration of the task_probe list of masters */ |
4077 | | static void |
4078 | | xfr_probe_start_list(struct auth_xfer* xfr, struct auth_master* spec) |
4079 | 0 | { |
4080 | 0 | if(spec) { |
4081 | 0 | xfr->task_probe->scan_specific = find_master_by_host( |
4082 | 0 | xfr->task_probe->masters, spec->host); |
4083 | 0 | if(xfr->task_probe->scan_specific) { |
4084 | 0 | xfr->task_probe->scan_target = NULL; |
4085 | 0 | xfr->task_probe->scan_addr = NULL; |
4086 | 0 | if(xfr->task_probe->scan_specific->list) |
4087 | 0 | xfr->task_probe->scan_addr = |
4088 | 0 | xfr->task_probe->scan_specific->list; |
4089 | 0 | return; |
4090 | 0 | } |
4091 | 0 | } |
4092 | | /* no specific (notified) host to scan */ |
4093 | 0 | xfr->task_probe->scan_specific = NULL; |
4094 | 0 | xfr->task_probe->scan_addr = NULL; |
4095 | | /* pick up first scan target */ |
4096 | 0 | xfr->task_probe->scan_target = xfr->task_probe->masters; |
4097 | 0 | if(xfr->task_probe->scan_target && xfr->task_probe->scan_target->list) |
4098 | 0 | xfr->task_probe->scan_addr = |
4099 | 0 | xfr->task_probe->scan_target->list; |
4100 | 0 | } |
4101 | | |
4102 | | /** pick up the master that is being scanned right now, task_transfer */ |
4103 | | static struct auth_master* |
4104 | | xfr_transfer_current_master(struct auth_xfer* xfr) |
4105 | 0 | { |
4106 | 0 | if(xfr->task_transfer->scan_specific) |
4107 | 0 | return xfr->task_transfer->scan_specific; |
4108 | 0 | return xfr->task_transfer->scan_target; |
4109 | 0 | } |
4110 | | |
4111 | | /** pick up the master that is being scanned right now, task_probe */ |
4112 | | static struct auth_master* |
4113 | | xfr_probe_current_master(struct auth_xfer* xfr) |
4114 | 0 | { |
4115 | 0 | if(xfr->task_probe->scan_specific) |
4116 | 0 | return xfr->task_probe->scan_specific; |
4117 | 0 | return xfr->task_probe->scan_target; |
4118 | 0 | } |
4119 | | |
4120 | | /** true if at end of list, task_transfer */ |
4121 | | static int |
4122 | | xfr_transfer_end_of_list(struct auth_xfer* xfr) |
4123 | 0 | { |
4124 | 0 | return !xfr->task_transfer->scan_specific && |
4125 | 0 | !xfr->task_transfer->scan_target; |
4126 | 0 | } |
4127 | | |
4128 | | /** true if at end of list, task_probe */ |
4129 | | static int |
4130 | | xfr_probe_end_of_list(struct auth_xfer* xfr) |
4131 | 0 | { |
4132 | 0 | return !xfr->task_probe->scan_specific && !xfr->task_probe->scan_target; |
4133 | 0 | } |
4134 | | |
4135 | | /** move to next master in list, task_transfer */ |
4136 | | static void |
4137 | | xfr_transfer_nextmaster(struct auth_xfer* xfr) |
4138 | 0 | { |
4139 | 0 | if(!xfr->task_transfer->scan_specific && |
4140 | 0 | !xfr->task_transfer->scan_target) |
4141 | 0 | return; |
4142 | 0 | if(xfr->task_transfer->scan_addr) { |
4143 | 0 | xfr->task_transfer->scan_addr = |
4144 | 0 | xfr->task_transfer->scan_addr->next; |
4145 | 0 | if(xfr->task_transfer->scan_addr) |
4146 | 0 | return; |
4147 | 0 | } |
4148 | 0 | if(xfr->task_transfer->scan_specific) { |
4149 | 0 | xfr->task_transfer->scan_specific = NULL; |
4150 | 0 | xfr->task_transfer->scan_target = xfr->task_transfer->masters; |
4151 | 0 | if(xfr->task_transfer->scan_target && xfr->task_transfer-> |
4152 | 0 | scan_target->list) |
4153 | 0 | xfr->task_transfer->scan_addr = |
4154 | 0 | xfr->task_transfer->scan_target->list; |
4155 | 0 | return; |
4156 | 0 | } |
4157 | 0 | if(!xfr->task_transfer->scan_target) |
4158 | 0 | return; |
4159 | 0 | xfr->task_transfer->scan_target = xfr->task_transfer->scan_target->next; |
4160 | 0 | if(xfr->task_transfer->scan_target && xfr->task_transfer-> |
4161 | 0 | scan_target->list) |
4162 | 0 | xfr->task_transfer->scan_addr = |
4163 | 0 | xfr->task_transfer->scan_target->list; |
4164 | 0 | return; |
4165 | 0 | } |
4166 | | |
4167 | | /** move to next master in list, task_probe */ |
4168 | | static void |
4169 | | xfr_probe_nextmaster(struct auth_xfer* xfr) |
4170 | 0 | { |
4171 | 0 | if(!xfr->task_probe->scan_specific && !xfr->task_probe->scan_target) |
4172 | 0 | return; |
4173 | 0 | if(xfr->task_probe->scan_addr) { |
4174 | 0 | xfr->task_probe->scan_addr = xfr->task_probe->scan_addr->next; |
4175 | 0 | if(xfr->task_probe->scan_addr) |
4176 | 0 | return; |
4177 | 0 | } |
4178 | 0 | if(xfr->task_probe->scan_specific) { |
4179 | 0 | xfr->task_probe->scan_specific = NULL; |
4180 | 0 | xfr->task_probe->scan_target = xfr->task_probe->masters; |
4181 | 0 | if(xfr->task_probe->scan_target && xfr->task_probe-> |
4182 | 0 | scan_target->list) |
4183 | 0 | xfr->task_probe->scan_addr = |
4184 | 0 | xfr->task_probe->scan_target->list; |
4185 | 0 | return; |
4186 | 0 | } |
4187 | 0 | if(!xfr->task_probe->scan_target) |
4188 | 0 | return; |
4189 | 0 | xfr->task_probe->scan_target = xfr->task_probe->scan_target->next; |
4190 | 0 | if(xfr->task_probe->scan_target && xfr->task_probe-> |
4191 | 0 | scan_target->list) |
4192 | 0 | xfr->task_probe->scan_addr = |
4193 | 0 | xfr->task_probe->scan_target->list; |
4194 | 0 | return; |
4195 | 0 | } |
4196 | | |
4197 | | /** create SOA probe packet for xfr */ |
4198 | | static void |
4199 | | xfr_create_soa_probe_packet(struct auth_xfer* xfr, sldns_buffer* buf, |
4200 | | uint16_t id) |
4201 | 0 | { |
4202 | 0 | struct query_info qinfo; |
4203 | |
|
4204 | 0 | memset(&qinfo, 0, sizeof(qinfo)); |
4205 | 0 | qinfo.qname = xfr->name; |
4206 | 0 | qinfo.qname_len = xfr->namelen; |
4207 | 0 | qinfo.qtype = LDNS_RR_TYPE_SOA; |
4208 | 0 | qinfo.qclass = xfr->dclass; |
4209 | 0 | qinfo_query_encode(buf, &qinfo); |
4210 | 0 | sldns_buffer_write_u16_at(buf, 0, id); |
4211 | 0 | } |
4212 | | |
4213 | | /** create IXFR/AXFR packet for xfr */ |
4214 | | static void |
4215 | | xfr_create_ixfr_packet(struct auth_xfer* xfr, sldns_buffer* buf, uint16_t id, |
4216 | | struct auth_master* master) |
4217 | 0 | { |
4218 | 0 | struct query_info qinfo; |
4219 | 0 | uint32_t serial; |
4220 | 0 | int have_zone; |
4221 | 0 | have_zone = xfr->have_zone; |
4222 | 0 | serial = xfr->serial; |
4223 | |
|
4224 | 0 | memset(&qinfo, 0, sizeof(qinfo)); |
4225 | 0 | qinfo.qname = xfr->name; |
4226 | 0 | qinfo.qname_len = xfr->namelen; |
4227 | 0 | xfr->task_transfer->got_xfr_serial = 0; |
4228 | 0 | xfr->task_transfer->rr_scan_num = 0; |
4229 | 0 | xfr->task_transfer->incoming_xfr_serial = 0; |
4230 | 0 | xfr->task_transfer->on_ixfr_is_axfr = 0; |
4231 | 0 | xfr->task_transfer->on_ixfr = 1; |
4232 | 0 | qinfo.qtype = LDNS_RR_TYPE_IXFR; |
4233 | 0 | if(!have_zone || xfr->task_transfer->ixfr_fail || !master->ixfr) { |
4234 | 0 | qinfo.qtype = LDNS_RR_TYPE_AXFR; |
4235 | 0 | xfr->task_transfer->ixfr_fail = 0; |
4236 | 0 | xfr->task_transfer->on_ixfr = 0; |
4237 | 0 | } |
4238 | |
|
4239 | 0 | qinfo.qclass = xfr->dclass; |
4240 | 0 | qinfo_query_encode(buf, &qinfo); |
4241 | 0 | sldns_buffer_write_u16_at(buf, 0, id); |
4242 | | |
4243 | | /* append serial for IXFR */ |
4244 | 0 | if(qinfo.qtype == LDNS_RR_TYPE_IXFR) { |
4245 | 0 | size_t end = sldns_buffer_limit(buf); |
4246 | 0 | sldns_buffer_clear(buf); |
4247 | 0 | sldns_buffer_set_position(buf, end); |
4248 | | /* auth section count 1 */ |
4249 | 0 | sldns_buffer_write_u16_at(buf, LDNS_NSCOUNT_OFF, 1); |
4250 | | /* write SOA */ |
4251 | 0 | sldns_buffer_write_u8(buf, 0xC0); /* compressed ptr to qname */ |
4252 | 0 | sldns_buffer_write_u8(buf, 0x0C); |
4253 | 0 | sldns_buffer_write_u16(buf, LDNS_RR_TYPE_SOA); |
4254 | 0 | sldns_buffer_write_u16(buf, qinfo.qclass); |
4255 | 0 | sldns_buffer_write_u32(buf, 0); /* ttl */ |
4256 | 0 | sldns_buffer_write_u16(buf, 22); /* rdata length */ |
4257 | 0 | sldns_buffer_write_u8(buf, 0); /* . */ |
4258 | 0 | sldns_buffer_write_u8(buf, 0); /* . */ |
4259 | 0 | sldns_buffer_write_u32(buf, serial); /* serial */ |
4260 | 0 | sldns_buffer_write_u32(buf, 0); /* refresh */ |
4261 | 0 | sldns_buffer_write_u32(buf, 0); /* retry */ |
4262 | 0 | sldns_buffer_write_u32(buf, 0); /* expire */ |
4263 | 0 | sldns_buffer_write_u32(buf, 0); /* minimum */ |
4264 | 0 | sldns_buffer_flip(buf); |
4265 | 0 | } |
4266 | 0 | } |
4267 | | |
4268 | | /** check if returned packet is OK */ |
4269 | | static int |
4270 | | check_packet_ok(sldns_buffer* pkt, uint16_t qtype, struct auth_xfer* xfr, |
4271 | | uint32_t* serial) |
4272 | 0 | { |
4273 | | /* parse to see if packet worked, valid reply */ |
4274 | | |
4275 | | /* check serial number of SOA */ |
4276 | 0 | if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) |
4277 | 0 | return 0; |
4278 | | |
4279 | | /* check ID */ |
4280 | 0 | if(LDNS_ID_WIRE(sldns_buffer_begin(pkt)) != xfr->task_probe->id) |
4281 | 0 | return 0; |
4282 | | |
4283 | | /* check flag bits and rcode */ |
4284 | 0 | if(!LDNS_QR_WIRE(sldns_buffer_begin(pkt))) |
4285 | 0 | return 0; |
4286 | 0 | if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) |
4287 | 0 | return 0; |
4288 | 0 | if(LDNS_RCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_RCODE_NOERROR) |
4289 | 0 | return 0; |
4290 | | |
4291 | | /* check qname */ |
4292 | 0 | if(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) != 1) |
4293 | 0 | return 0; |
4294 | 0 | sldns_buffer_skip(pkt, LDNS_HEADER_SIZE); |
4295 | 0 | if(sldns_buffer_remaining(pkt) < xfr->namelen) |
4296 | 0 | return 0; |
4297 | 0 | if(query_dname_compare(sldns_buffer_current(pkt), xfr->name) != 0) |
4298 | 0 | return 0; |
4299 | 0 | sldns_buffer_skip(pkt, (ssize_t)xfr->namelen); |
4300 | | |
4301 | | /* check qtype, qclass */ |
4302 | 0 | if(sldns_buffer_remaining(pkt) < 4) |
4303 | 0 | return 0; |
4304 | 0 | if(sldns_buffer_read_u16(pkt) != qtype) |
4305 | 0 | return 0; |
4306 | 0 | if(sldns_buffer_read_u16(pkt) != xfr->dclass) |
4307 | 0 | return 0; |
4308 | | |
4309 | 0 | if(serial) { |
4310 | 0 | uint16_t rdlen; |
4311 | | /* read serial number, from answer section SOA */ |
4312 | 0 | if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) == 0) |
4313 | 0 | return 0; |
4314 | | /* read from first record SOA record */ |
4315 | 0 | if(sldns_buffer_remaining(pkt) < 1) |
4316 | 0 | return 0; |
4317 | 0 | if(dname_pkt_compare(pkt, sldns_buffer_current(pkt), |
4318 | 0 | xfr->name) != 0) |
4319 | 0 | return 0; |
4320 | 0 | if(!pkt_dname_len(pkt)) |
4321 | 0 | return 0; |
4322 | | /* type, class, ttl, rdatalen */ |
4323 | 0 | if(sldns_buffer_remaining(pkt) < 4+4+2) |
4324 | 0 | return 0; |
4325 | 0 | if(sldns_buffer_read_u16(pkt) != qtype) |
4326 | 0 | return 0; |
4327 | 0 | if(sldns_buffer_read_u16(pkt) != xfr->dclass) |
4328 | 0 | return 0; |
4329 | 0 | sldns_buffer_skip(pkt, 4); /* ttl */ |
4330 | 0 | rdlen = sldns_buffer_read_u16(pkt); |
4331 | 0 | if(sldns_buffer_remaining(pkt) < rdlen) |
4332 | 0 | return 0; |
4333 | 0 | if(sldns_buffer_remaining(pkt) < 1) |
4334 | 0 | return 0; |
4335 | 0 | if(!pkt_dname_len(pkt)) /* soa name */ |
4336 | 0 | return 0; |
4337 | 0 | if(sldns_buffer_remaining(pkt) < 1) |
4338 | 0 | return 0; |
4339 | 0 | if(!pkt_dname_len(pkt)) /* soa name */ |
4340 | 0 | return 0; |
4341 | 0 | if(sldns_buffer_remaining(pkt) < 20) |
4342 | 0 | return 0; |
4343 | 0 | *serial = sldns_buffer_read_u32(pkt); |
4344 | 0 | } |
4345 | 0 | return 1; |
4346 | 0 | } |
4347 | | |
4348 | | /** read one line from chunks into buffer at current position */ |
4349 | | static int |
4350 | | chunkline_get_line(struct auth_chunk** chunk, size_t* chunk_pos, |
4351 | | sldns_buffer* buf) |
4352 | 0 | { |
4353 | 0 | int readsome = 0; |
4354 | 0 | while(*chunk) { |
4355 | | /* more text in this chunk? */ |
4356 | 0 | if(*chunk_pos < (*chunk)->len) { |
4357 | 0 | readsome = 1; |
4358 | 0 | while(*chunk_pos < (*chunk)->len) { |
4359 | 0 | char c = (char)((*chunk)->data[*chunk_pos]); |
4360 | 0 | (*chunk_pos)++; |
4361 | 0 | if(sldns_buffer_remaining(buf) < 2) { |
4362 | | /* buffer too short */ |
4363 | 0 | verbose(VERB_ALGO, "http chunkline, " |
4364 | 0 | "line too long"); |
4365 | 0 | return 0; |
4366 | 0 | } |
4367 | 0 | sldns_buffer_write_u8(buf, (uint8_t)c); |
4368 | 0 | if(c == '\n') { |
4369 | | /* we are done */ |
4370 | 0 | return 1; |
4371 | 0 | } |
4372 | 0 | } |
4373 | 0 | } |
4374 | | /* move to next chunk */ |
4375 | 0 | *chunk = (*chunk)->next; |
4376 | 0 | *chunk_pos = 0; |
4377 | 0 | } |
4378 | | /* no more text */ |
4379 | 0 | if(readsome) return 1; |
4380 | 0 | return 0; |
4381 | 0 | } |
4382 | | |
4383 | | /** count number of open and closed parenthesis in a chunkline */ |
4384 | | static int |
4385 | | chunkline_count_parens(sldns_buffer* buf, size_t start) |
4386 | 0 | { |
4387 | 0 | size_t end = sldns_buffer_position(buf); |
4388 | 0 | size_t i; |
4389 | 0 | int count = 0; |
4390 | 0 | int squote = 0, dquote = 0; |
4391 | 0 | for(i=start; i<end; i++) { |
4392 | 0 | char c = (char)sldns_buffer_read_u8_at(buf, i); |
4393 | 0 | if(squote && c != '\'') continue; |
4394 | 0 | if(dquote && c != '"') continue; |
4395 | 0 | if(c == '"') |
4396 | 0 | dquote = !dquote; /* skip quoted part */ |
4397 | 0 | else if(c == '\'') |
4398 | 0 | squote = !squote; /* skip quoted part */ |
4399 | 0 | else if(c == '(') |
4400 | 0 | count ++; |
4401 | 0 | else if(c == ')') |
4402 | 0 | count --; |
4403 | 0 | else if(c == ';') { |
4404 | | /* rest is a comment */ |
4405 | 0 | return count; |
4406 | 0 | } |
4407 | 0 | } |
4408 | 0 | return count; |
4409 | 0 | } |
4410 | | |
4411 | | /** remove trailing ;... comment from a line in the chunkline buffer */ |
4412 | | static void |
4413 | | chunkline_remove_trailcomment(sldns_buffer* buf, size_t start) |
4414 | 0 | { |
4415 | 0 | size_t end = sldns_buffer_position(buf); |
4416 | 0 | size_t i; |
4417 | 0 | int squote = 0, dquote = 0; |
4418 | 0 | for(i=start; i<end; i++) { |
4419 | 0 | char c = (char)sldns_buffer_read_u8_at(buf, i); |
4420 | 0 | if(squote && c != '\'') continue; |
4421 | 0 | if(dquote && c != '"') continue; |
4422 | 0 | if(c == '"') |
4423 | 0 | dquote = !dquote; /* skip quoted part */ |
4424 | 0 | else if(c == '\'') |
4425 | 0 | squote = !squote; /* skip quoted part */ |
4426 | 0 | else if(c == ';') { |
4427 | | /* rest is a comment */ |
4428 | 0 | sldns_buffer_set_position(buf, i); |
4429 | 0 | return; |
4430 | 0 | } |
4431 | 0 | } |
4432 | | /* nothing to remove */ |
4433 | 0 | } |
4434 | | |
4435 | | /** see if a chunkline is a comment line (or empty line) */ |
4436 | | static int |
4437 | | chunkline_is_comment_line_or_empty(sldns_buffer* buf) |
4438 | 0 | { |
4439 | 0 | size_t i, end = sldns_buffer_limit(buf); |
4440 | 0 | for(i=0; i<end; i++) { |
4441 | 0 | char c = (char)sldns_buffer_read_u8_at(buf, i); |
4442 | 0 | if(c == ';') |
4443 | 0 | return 1; /* comment */ |
4444 | 0 | else if(c != ' ' && c != '\t' && c != '\r' && c != '\n') |
4445 | 0 | return 0; /* not a comment */ |
4446 | 0 | } |
4447 | 0 | return 1; /* empty */ |
4448 | 0 | } |
4449 | | |
4450 | | /** find a line with ( ) collated */ |
4451 | | static int |
4452 | | chunkline_get_line_collated(struct auth_chunk** chunk, size_t* chunk_pos, |
4453 | | sldns_buffer* buf) |
4454 | 0 | { |
4455 | 0 | size_t pos; |
4456 | 0 | int parens = 0; |
4457 | 0 | sldns_buffer_clear(buf); |
4458 | 0 | pos = sldns_buffer_position(buf); |
4459 | 0 | if(!chunkline_get_line(chunk, chunk_pos, buf)) { |
4460 | 0 | if(sldns_buffer_position(buf) < sldns_buffer_limit(buf)) |
4461 | 0 | sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); |
4462 | 0 | else sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf)-1, 0); |
4463 | 0 | sldns_buffer_flip(buf); |
4464 | 0 | return 0; |
4465 | 0 | } |
4466 | 0 | parens += chunkline_count_parens(buf, pos); |
4467 | 0 | while(parens > 0) { |
4468 | 0 | chunkline_remove_trailcomment(buf, pos); |
4469 | 0 | pos = sldns_buffer_position(buf); |
4470 | 0 | if(!chunkline_get_line(chunk, chunk_pos, buf)) { |
4471 | 0 | if(sldns_buffer_position(buf) < sldns_buffer_limit(buf)) |
4472 | 0 | sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); |
4473 | 0 | else sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf)-1, 0); |
4474 | 0 | sldns_buffer_flip(buf); |
4475 | 0 | return 0; |
4476 | 0 | } |
4477 | 0 | parens += chunkline_count_parens(buf, pos); |
4478 | 0 | } |
4479 | | |
4480 | 0 | if(sldns_buffer_remaining(buf) < 1) { |
4481 | 0 | verbose(VERB_ALGO, "http chunkline: " |
4482 | 0 | "line too long"); |
4483 | 0 | return 0; |
4484 | 0 | } |
4485 | 0 | sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); |
4486 | 0 | sldns_buffer_flip(buf); |
4487 | 0 | return 1; |
4488 | 0 | } |
4489 | | |
4490 | | /** process $ORIGIN for http, 0 nothing, 1 done, 2 error */ |
4491 | | static int |
4492 | | http_parse_origin(sldns_buffer* buf, struct sldns_file_parse_state* pstate) |
4493 | 0 | { |
4494 | 0 | char* line = (char*)sldns_buffer_begin(buf); |
4495 | 0 | if(strncmp(line, "$ORIGIN", 7) == 0 && |
4496 | 0 | isspace((unsigned char)line[7])) { |
4497 | 0 | int s; |
4498 | 0 | pstate->origin_len = sizeof(pstate->origin); |
4499 | 0 | s = sldns_str2wire_dname_buf(sldns_strip_ws(line+8), |
4500 | 0 | pstate->origin, &pstate->origin_len); |
4501 | 0 | if(s) { |
4502 | 0 | pstate->origin_len = 0; |
4503 | 0 | return 2; |
4504 | 0 | } |
4505 | 0 | return 1; |
4506 | 0 | } |
4507 | 0 | return 0; |
4508 | 0 | } |
4509 | | |
4510 | | /** process $TTL for http, 0 nothing, 1 done, 2 error */ |
4511 | | static int |
4512 | | http_parse_ttl(sldns_buffer* buf, struct sldns_file_parse_state* pstate) |
4513 | 0 | { |
4514 | 0 | char* line = (char*)sldns_buffer_begin(buf); |
4515 | 0 | if(strncmp(line, "$TTL", 4) == 0 && |
4516 | 0 | isspace((unsigned char)line[4])) { |
4517 | 0 | const char* end = NULL; |
4518 | 0 | int overflow = 0; |
4519 | 0 | pstate->default_ttl = sldns_str2period( |
4520 | 0 | sldns_strip_ws(line+5), &end, &overflow); |
4521 | 0 | if(overflow) { |
4522 | 0 | return 2; |
4523 | 0 | } |
4524 | 0 | return 1; |
4525 | 0 | } |
4526 | 0 | return 0; |
4527 | 0 | } |
4528 | | |
4529 | | /** find noncomment RR line in chunks, collates lines if ( ) format */ |
4530 | | static int |
4531 | | chunkline_non_comment_RR(struct auth_chunk** chunk, size_t* chunk_pos, |
4532 | | sldns_buffer* buf, struct sldns_file_parse_state* pstate) |
4533 | 0 | { |
4534 | 0 | int ret; |
4535 | 0 | while(chunkline_get_line_collated(chunk, chunk_pos, buf)) { |
4536 | 0 | if(chunkline_is_comment_line_or_empty(buf)) { |
4537 | | /* a comment, go to next line */ |
4538 | 0 | continue; |
4539 | 0 | } |
4540 | 0 | if((ret=http_parse_origin(buf, pstate))!=0) { |
4541 | 0 | if(ret == 2) |
4542 | 0 | return 0; |
4543 | 0 | continue; /* $ORIGIN has been handled */ |
4544 | 0 | } |
4545 | 0 | if((ret=http_parse_ttl(buf, pstate))!=0) { |
4546 | 0 | if(ret == 2) |
4547 | 0 | return 0; |
4548 | 0 | continue; /* $TTL has been handled */ |
4549 | 0 | } |
4550 | 0 | return 1; |
4551 | 0 | } |
4552 | | /* no noncomments, fail */ |
4553 | 0 | return 0; |
4554 | 0 | } |
4555 | | |
4556 | | /** check syntax of chunklist zonefile, parse first RR, return false on |
4557 | | * failure and return a string in the scratch buffer (first RR string) |
4558 | | * on failure. */ |
4559 | | static int |
4560 | | http_zonefile_syntax_check(struct auth_xfer* xfr, sldns_buffer* buf) |
4561 | 0 | { |
4562 | 0 | uint8_t rr[LDNS_RR_BUF_SIZE]; |
4563 | 0 | size_t rr_len, dname_len = 0; |
4564 | 0 | struct sldns_file_parse_state pstate; |
4565 | 0 | struct auth_chunk* chunk; |
4566 | 0 | size_t chunk_pos; |
4567 | 0 | int e; |
4568 | 0 | memset(&pstate, 0, sizeof(pstate)); |
4569 | 0 | pstate.default_ttl = 3600; |
4570 | 0 | if(xfr->namelen < sizeof(pstate.origin)) { |
4571 | 0 | pstate.origin_len = xfr->namelen; |
4572 | 0 | memmove(pstate.origin, xfr->name, xfr->namelen); |
4573 | 0 | } |
4574 | 0 | chunk = xfr->task_transfer->chunks_first; |
4575 | 0 | chunk_pos = 0; |
4576 | 0 | if(!chunkline_non_comment_RR(&chunk, &chunk_pos, buf, &pstate)) { |
4577 | 0 | return 0; |
4578 | 0 | } |
4579 | 0 | rr_len = sizeof(rr); |
4580 | 0 | e=sldns_str2wire_rr_buf((char*)sldns_buffer_begin(buf), rr, &rr_len, |
4581 | 0 | &dname_len, pstate.default_ttl, |
4582 | 0 | pstate.origin_len?pstate.origin:NULL, pstate.origin_len, |
4583 | 0 | pstate.prev_rr_len?pstate.prev_rr:NULL, pstate.prev_rr_len); |
4584 | 0 | if(e != 0) { |
4585 | 0 | log_err("parse failure on first RR[%d]: %s", |
4586 | 0 | LDNS_WIREPARSE_OFFSET(e), |
4587 | 0 | sldns_get_errorstr_parse(LDNS_WIREPARSE_ERROR(e))); |
4588 | 0 | return 0; |
4589 | 0 | } |
4590 | | /* check that class is correct */ |
4591 | 0 | if(sldns_wirerr_get_class(rr, rr_len, dname_len) != xfr->dclass) { |
4592 | 0 | log_err("parse failure: first record in downloaded zonefile " |
4593 | 0 | "from wrong RR class"); |
4594 | 0 | return 0; |
4595 | 0 | } |
4596 | 0 | return 1; |
4597 | 0 | } |
4598 | | |
4599 | | /** sum sizes of chunklist */ |
4600 | | static size_t |
4601 | | chunklist_sum(struct auth_chunk* list) |
4602 | 0 | { |
4603 | 0 | struct auth_chunk* p; |
4604 | 0 | size_t s = 0; |
4605 | 0 | for(p=list; p; p=p->next) { |
4606 | 0 | s += p->len; |
4607 | 0 | } |
4608 | 0 | return s; |
4609 | 0 | } |
4610 | | |
4611 | | /** remove newlines from collated line */ |
4612 | | static void |
4613 | | chunkline_newline_removal(sldns_buffer* buf) |
4614 | 0 | { |
4615 | 0 | size_t i, end=sldns_buffer_limit(buf); |
4616 | 0 | for(i=0; i<end; i++) { |
4617 | 0 | char c = (char)sldns_buffer_read_u8_at(buf, i); |
4618 | 0 | if(c == '\n' && i==end-1) { |
4619 | 0 | sldns_buffer_write_u8_at(buf, i, 0); |
4620 | 0 | sldns_buffer_set_limit(buf, end-1); |
4621 | 0 | return; |
4622 | 0 | } |
4623 | 0 | if(c == '\n') |
4624 | 0 | sldns_buffer_write_u8_at(buf, i, (uint8_t)' '); |
4625 | 0 | } |
4626 | 0 | } |
4627 | | |
4628 | | /** for http download, parse and add RR to zone */ |
4629 | | static int |
4630 | | http_parse_add_rr(struct auth_xfer* xfr, struct auth_zone* z, |
4631 | | sldns_buffer* buf, struct sldns_file_parse_state* pstate) |
4632 | 0 | { |
4633 | 0 | uint8_t rr[LDNS_RR_BUF_SIZE]; |
4634 | 0 | size_t rr_len, dname_len = 0; |
4635 | 0 | int e; |
4636 | 0 | char* line = (char*)sldns_buffer_begin(buf); |
4637 | 0 | rr_len = sizeof(rr); |
4638 | 0 | e = sldns_str2wire_rr_buf(line, rr, &rr_len, &dname_len, |
4639 | 0 | pstate->default_ttl, |
4640 | 0 | pstate->origin_len?pstate->origin:NULL, pstate->origin_len, |
4641 | 0 | pstate->prev_rr_len?pstate->prev_rr:NULL, pstate->prev_rr_len); |
4642 | 0 | if(e != 0) { |
4643 | 0 | log_err("%s/%s parse failure RR[%d]: %s in '%s'", |
4644 | 0 | xfr->task_transfer->master->host, |
4645 | 0 | xfr->task_transfer->master->file, |
4646 | 0 | LDNS_WIREPARSE_OFFSET(e), |
4647 | 0 | sldns_get_errorstr_parse(LDNS_WIREPARSE_ERROR(e)), |
4648 | 0 | line); |
4649 | 0 | return 0; |
4650 | 0 | } |
4651 | 0 | if(rr_len == 0) |
4652 | 0 | return 1; /* empty line or so */ |
4653 | | |
4654 | | /* set prev */ |
4655 | 0 | if(dname_len < sizeof(pstate->prev_rr)) { |
4656 | 0 | memmove(pstate->prev_rr, rr, dname_len); |
4657 | 0 | pstate->prev_rr_len = dname_len; |
4658 | 0 | } |
4659 | |
|
4660 | 0 | return az_insert_rr(z, rr, rr_len, dname_len, NULL); |
4661 | 0 | } |
4662 | | |
4663 | | /** RR list iterator, returns RRs from answer section one by one from the |
4664 | | * dns packets in the chunklist */ |
4665 | | static void |
4666 | | chunk_rrlist_start(struct auth_xfer* xfr, struct auth_chunk** rr_chunk, |
4667 | | int* rr_num, size_t* rr_pos) |
4668 | 0 | { |
4669 | 0 | *rr_chunk = xfr->task_transfer->chunks_first; |
4670 | 0 | *rr_num = 0; |
4671 | 0 | *rr_pos = 0; |
4672 | 0 | } |
4673 | | |
4674 | | /** RR list iterator, see if we are at the end of the list */ |
4675 | | static int |
4676 | | chunk_rrlist_end(struct auth_chunk* rr_chunk, int rr_num) |
4677 | 0 | { |
4678 | 0 | while(rr_chunk) { |
4679 | 0 | if(rr_chunk->len < LDNS_HEADER_SIZE) |
4680 | 0 | return 1; |
4681 | 0 | if(rr_num < (int)LDNS_ANCOUNT(rr_chunk->data)) |
4682 | 0 | return 0; |
4683 | | /* no more RRs in this chunk */ |
4684 | | /* continue with next chunk, see if it has RRs */ |
4685 | 0 | rr_chunk = rr_chunk->next; |
4686 | 0 | rr_num = 0; |
4687 | 0 | } |
4688 | 0 | return 1; |
4689 | 0 | } |
4690 | | |
4691 | | /** RR list iterator, move to next RR */ |
4692 | | static void |
4693 | | chunk_rrlist_gonext(struct auth_chunk** rr_chunk, int* rr_num, |
4694 | | size_t* rr_pos, size_t rr_nextpos) |
4695 | 0 | { |
4696 | | /* already at end of chunks? */ |
4697 | 0 | if(!*rr_chunk) |
4698 | 0 | return; |
4699 | | /* move within this chunk */ |
4700 | 0 | if((*rr_chunk)->len >= LDNS_HEADER_SIZE && |
4701 | 0 | (*rr_num)+1 < (int)LDNS_ANCOUNT((*rr_chunk)->data)) { |
4702 | 0 | (*rr_num) += 1; |
4703 | 0 | *rr_pos = rr_nextpos; |
4704 | 0 | return; |
4705 | 0 | } |
4706 | | /* no more RRs in this chunk */ |
4707 | | /* continue with next chunk, see if it has RRs */ |
4708 | 0 | if(*rr_chunk) |
4709 | 0 | *rr_chunk = (*rr_chunk)->next; |
4710 | 0 | while(*rr_chunk) { |
4711 | 0 | *rr_num = 0; |
4712 | 0 | *rr_pos = 0; |
4713 | 0 | if((*rr_chunk)->len >= LDNS_HEADER_SIZE && |
4714 | 0 | LDNS_ANCOUNT((*rr_chunk)->data) > 0) { |
4715 | 0 | return; |
4716 | 0 | } |
4717 | 0 | *rr_chunk = (*rr_chunk)->next; |
4718 | 0 | } |
4719 | 0 | } |
4720 | | |
4721 | | /** RR iterator, get current RR information, false on parse error */ |
4722 | | static int |
4723 | | chunk_rrlist_get_current(struct auth_chunk* rr_chunk, int rr_num, |
4724 | | size_t rr_pos, uint8_t** rr_dname, uint16_t* rr_type, |
4725 | | uint16_t* rr_class, uint32_t* rr_ttl, uint16_t* rr_rdlen, |
4726 | | uint8_t** rr_rdata, size_t* rr_nextpos) |
4727 | 0 | { |
4728 | 0 | sldns_buffer pkt; |
4729 | | /* integrity checks on position */ |
4730 | 0 | if(!rr_chunk) return 0; |
4731 | 0 | if(rr_chunk->len < LDNS_HEADER_SIZE) return 0; |
4732 | 0 | if(rr_num >= (int)LDNS_ANCOUNT(rr_chunk->data)) return 0; |
4733 | 0 | if(rr_pos >= rr_chunk->len) return 0; |
4734 | | |
4735 | | /* fetch rr information */ |
4736 | 0 | sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len); |
4737 | 0 | if(rr_pos == 0) { |
4738 | 0 | size_t i; |
4739 | | /* skip question section */ |
4740 | 0 | sldns_buffer_set_position(&pkt, LDNS_HEADER_SIZE); |
4741 | 0 | for(i=0; i<LDNS_QDCOUNT(rr_chunk->data); i++) { |
4742 | 0 | if(pkt_dname_len(&pkt) == 0) return 0; |
4743 | 0 | if(sldns_buffer_remaining(&pkt) < 4) return 0; |
4744 | 0 | sldns_buffer_skip(&pkt, 4); /* type and class */ |
4745 | 0 | } |
4746 | 0 | } else { |
4747 | 0 | sldns_buffer_set_position(&pkt, rr_pos); |
4748 | 0 | } |
4749 | 0 | *rr_dname = sldns_buffer_current(&pkt); |
4750 | 0 | if(pkt_dname_len(&pkt) == 0) return 0; |
4751 | 0 | if(sldns_buffer_remaining(&pkt) < 10) return 0; |
4752 | 0 | *rr_type = sldns_buffer_read_u16(&pkt); |
4753 | 0 | *rr_class = sldns_buffer_read_u16(&pkt); |
4754 | 0 | *rr_ttl = sldns_buffer_read_u32(&pkt); |
4755 | 0 | *rr_rdlen = sldns_buffer_read_u16(&pkt); |
4756 | 0 | if(sldns_buffer_remaining(&pkt) < (*rr_rdlen)) return 0; |
4757 | 0 | *rr_rdata = sldns_buffer_current(&pkt); |
4758 | 0 | sldns_buffer_skip(&pkt, (ssize_t)(*rr_rdlen)); |
4759 | 0 | *rr_nextpos = sldns_buffer_position(&pkt); |
4760 | 0 | return 1; |
4761 | 0 | } |
4762 | | |
4763 | | /** print log message where we are in parsing the zone transfer */ |
4764 | | static void |
4765 | | log_rrlist_position(const char* label, struct auth_chunk* rr_chunk, |
4766 | | uint8_t* rr_dname, uint16_t rr_type, size_t rr_counter) |
4767 | 0 | { |
4768 | 0 | sldns_buffer pkt; |
4769 | 0 | size_t dlen; |
4770 | 0 | uint8_t buf[256]; |
4771 | 0 | char str[256]; |
4772 | 0 | char typestr[32]; |
4773 | 0 | sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len); |
4774 | 0 | sldns_buffer_set_position(&pkt, (size_t)(rr_dname - |
4775 | 0 | sldns_buffer_begin(&pkt))); |
4776 | 0 | if((dlen=pkt_dname_len(&pkt)) == 0) return; |
4777 | 0 | if(dlen >= sizeof(buf)) return; |
4778 | 0 | dname_pkt_copy(&pkt, buf, rr_dname); |
4779 | 0 | dname_str(buf, str); |
4780 | 0 | (void)sldns_wire2str_type_buf(rr_type, typestr, sizeof(typestr)); |
4781 | 0 | verbose(VERB_ALGO, "%s at[%d] %s %s", label, (int)rr_counter, |
4782 | 0 | str, typestr); |
4783 | 0 | } |
4784 | | |
4785 | | /** check that start serial is OK for ixfr. we are at rr_counter == 0, |
4786 | | * and we are going to check rr_counter == 1 (has to be type SOA) serial */ |
4787 | | static int |
4788 | | ixfr_start_serial(struct auth_chunk* rr_chunk, int rr_num, size_t rr_pos, |
4789 | | uint8_t* rr_dname, uint16_t rr_type, uint16_t rr_class, |
4790 | | uint32_t rr_ttl, uint16_t rr_rdlen, uint8_t* rr_rdata, |
4791 | | size_t rr_nextpos, uint32_t transfer_serial, uint32_t xfr_serial) |
4792 | 0 | { |
4793 | 0 | uint32_t startserial; |
4794 | | /* move forward on RR */ |
4795 | 0 | chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); |
4796 | 0 | if(chunk_rrlist_end(rr_chunk, rr_num)) { |
4797 | | /* no second SOA */ |
4798 | 0 | verbose(VERB_OPS, "IXFR has no second SOA record"); |
4799 | 0 | return 0; |
4800 | 0 | } |
4801 | 0 | if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, |
4802 | 0 | &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, |
4803 | 0 | &rr_rdata, &rr_nextpos)) { |
4804 | 0 | verbose(VERB_OPS, "IXFR cannot parse second SOA record"); |
4805 | | /* failed to parse RR */ |
4806 | 0 | return 0; |
4807 | 0 | } |
4808 | 0 | if(rr_type != LDNS_RR_TYPE_SOA) { |
4809 | 0 | verbose(VERB_OPS, "IXFR second record is not type SOA"); |
4810 | 0 | return 0; |
4811 | 0 | } |
4812 | 0 | if(rr_rdlen < 22) { |
4813 | 0 | verbose(VERB_OPS, "IXFR, second SOA has short rdlength"); |
4814 | 0 | return 0; /* bad SOA rdlen */ |
4815 | 0 | } |
4816 | 0 | startserial = sldns_read_uint32(rr_rdata+rr_rdlen-20); |
4817 | 0 | if(startserial == transfer_serial) { |
4818 | | /* empty AXFR, not an IXFR */ |
4819 | 0 | verbose(VERB_OPS, "IXFR second serial same as first"); |
4820 | 0 | return 0; |
4821 | 0 | } |
4822 | 0 | if(startserial != xfr_serial) { |
4823 | | /* wrong start serial, it does not match the serial in |
4824 | | * memory */ |
4825 | 0 | verbose(VERB_OPS, "IXFR is from serial %u to %u but %u " |
4826 | 0 | "in memory, rejecting the zone transfer", |
4827 | 0 | (unsigned)startserial, (unsigned)transfer_serial, |
4828 | 0 | (unsigned)xfr_serial); |
4829 | 0 | return 0; |
4830 | 0 | } |
4831 | | /* everything OK in second SOA serial */ |
4832 | 0 | return 1; |
4833 | 0 | } |
4834 | | |
4835 | | /** apply IXFR to zone in memory. z is locked. false on failure(mallocfail) */ |
4836 | | static int |
4837 | | apply_ixfr(struct auth_xfer* xfr, struct auth_zone* z, |
4838 | | struct sldns_buffer* scratch_buffer) |
4839 | 0 | { |
4840 | 0 | struct auth_chunk* rr_chunk; |
4841 | 0 | int rr_num; |
4842 | 0 | size_t rr_pos; |
4843 | 0 | uint8_t* rr_dname, *rr_rdata; |
4844 | 0 | uint16_t rr_type, rr_class, rr_rdlen; |
4845 | 0 | uint32_t rr_ttl; |
4846 | 0 | size_t rr_nextpos; |
4847 | 0 | int have_transfer_serial = 0; |
4848 | 0 | uint32_t transfer_serial = 0; |
4849 | 0 | size_t rr_counter = 0; |
4850 | 0 | int delmode = 0; |
4851 | 0 | int softfail = 0; |
4852 | | |
4853 | | /* start RR iterator over chunklist of packets */ |
4854 | 0 | chunk_rrlist_start(xfr, &rr_chunk, &rr_num, &rr_pos); |
4855 | 0 | while(!chunk_rrlist_end(rr_chunk, rr_num)) { |
4856 | 0 | if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, |
4857 | 0 | &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, |
4858 | 0 | &rr_rdata, &rr_nextpos)) { |
4859 | | /* failed to parse RR */ |
4860 | 0 | return 0; |
4861 | 0 | } |
4862 | 0 | if(verbosity>=7) log_rrlist_position("apply ixfr", |
4863 | 0 | rr_chunk, rr_dname, rr_type, rr_counter); |
4864 | | /* twiddle add/del mode and check for start and end */ |
4865 | 0 | if(rr_counter == 0 && rr_type != LDNS_RR_TYPE_SOA) |
4866 | 0 | return 0; |
4867 | 0 | if(rr_counter == 1 && rr_type != LDNS_RR_TYPE_SOA) { |
4868 | | /* this is an AXFR returned from the IXFR master */ |
4869 | | /* but that should already have been detected, by |
4870 | | * on_ixfr_is_axfr */ |
4871 | 0 | return 0; |
4872 | 0 | } |
4873 | 0 | if(rr_type == LDNS_RR_TYPE_SOA) { |
4874 | 0 | uint32_t serial; |
4875 | 0 | if(rr_rdlen < 22) return 0; /* bad SOA rdlen */ |
4876 | 0 | serial = sldns_read_uint32(rr_rdata+rr_rdlen-20); |
4877 | 0 | if(have_transfer_serial == 0) { |
4878 | 0 | have_transfer_serial = 1; |
4879 | 0 | transfer_serial = serial; |
4880 | 0 | delmode = 1; /* gets negated below */ |
4881 | | /* check second RR before going any further */ |
4882 | 0 | if(!ixfr_start_serial(rr_chunk, rr_num, rr_pos, |
4883 | 0 | rr_dname, rr_type, rr_class, rr_ttl, |
4884 | 0 | rr_rdlen, rr_rdata, rr_nextpos, |
4885 | 0 | transfer_serial, xfr->serial)) { |
4886 | 0 | return 0; |
4887 | 0 | } |
4888 | 0 | } else if(transfer_serial == serial) { |
4889 | 0 | have_transfer_serial++; |
4890 | 0 | if(rr_counter == 1) { |
4891 | | /* empty AXFR, with SOA; SOA; */ |
4892 | | /* should have been detected by |
4893 | | * on_ixfr_is_axfr */ |
4894 | 0 | return 0; |
4895 | 0 | } |
4896 | 0 | if(have_transfer_serial == 3) { |
4897 | | /* see serial three times for end */ |
4898 | | /* eg. IXFR: |
4899 | | * SOA 3 start |
4900 | | * SOA 1 second RR, followed by del |
4901 | | * SOA 2 followed by add |
4902 | | * SOA 2 followed by del |
4903 | | * SOA 3 followed by add |
4904 | | * SOA 3 end */ |
4905 | | /* ended by SOA record */ |
4906 | 0 | xfr->serial = transfer_serial; |
4907 | 0 | break; |
4908 | 0 | } |
4909 | 0 | } |
4910 | | /* twiddle add/del mode */ |
4911 | | /* switch from delete part to add part and back again |
4912 | | * just before the soa, it gets deleted and added too |
4913 | | * this means we switch to delete mode for the final |
4914 | | * SOA(so skip that one) */ |
4915 | 0 | delmode = !delmode; |
4916 | 0 | } |
4917 | | /* process this RR */ |
4918 | | /* if the RR is deleted twice or added twice, then we |
4919 | | * softfail, and continue with the rest of the IXFR, so |
4920 | | * that we serve something fairly nice during the refetch */ |
4921 | 0 | if(verbosity>=7) log_rrlist_position((delmode?"del":"add"), |
4922 | 0 | rr_chunk, rr_dname, rr_type, rr_counter); |
4923 | 0 | if(delmode) { |
4924 | | /* delete this RR */ |
4925 | 0 | int nonexist = 0; |
4926 | 0 | if(!az_remove_rr_decompress(z, rr_chunk->data, |
4927 | 0 | rr_chunk->len, scratch_buffer, rr_dname, |
4928 | 0 | rr_type, rr_class, rr_ttl, rr_rdata, rr_rdlen, |
4929 | 0 | &nonexist)) { |
4930 | | /* failed, malloc error or so */ |
4931 | 0 | return 0; |
4932 | 0 | } |
4933 | 0 | if(nonexist) { |
4934 | | /* it was removal of a nonexisting RR */ |
4935 | 0 | if(verbosity>=4) log_rrlist_position( |
4936 | 0 | "IXFR error nonexistent RR", |
4937 | 0 | rr_chunk, rr_dname, rr_type, rr_counter); |
4938 | 0 | softfail = 1; |
4939 | 0 | } |
4940 | 0 | } else if(rr_counter != 0) { |
4941 | | /* skip first SOA RR for addition, it is added in |
4942 | | * the addition part near the end of the ixfr, when |
4943 | | * that serial is seen the second time. */ |
4944 | 0 | int duplicate = 0; |
4945 | | /* add this RR */ |
4946 | 0 | if(!az_insert_rr_decompress(z, rr_chunk->data, |
4947 | 0 | rr_chunk->len, scratch_buffer, rr_dname, |
4948 | 0 | rr_type, rr_class, rr_ttl, rr_rdata, rr_rdlen, |
4949 | 0 | &duplicate)) { |
4950 | | /* failed, malloc error or so */ |
4951 | 0 | return 0; |
4952 | 0 | } |
4953 | 0 | if(duplicate) { |
4954 | | /* it was a duplicate */ |
4955 | 0 | if(verbosity>=4) log_rrlist_position( |
4956 | 0 | "IXFR error duplicate RR", |
4957 | 0 | rr_chunk, rr_dname, rr_type, rr_counter); |
4958 | 0 | softfail = 1; |
4959 | 0 | } |
4960 | 0 | } |
4961 | | |
4962 | 0 | rr_counter++; |
4963 | 0 | chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); |
4964 | 0 | } |
4965 | 0 | if(softfail) { |
4966 | 0 | verbose(VERB_ALGO, "IXFR did not apply cleanly, fetching full zone"); |
4967 | 0 | return 0; |
4968 | 0 | } |
4969 | 0 | return 1; |
4970 | 0 | } |
4971 | | |
4972 | | /** apply AXFR to zone in memory. z is locked. false on failure(mallocfail) */ |
4973 | | static int |
4974 | | apply_axfr(struct auth_xfer* xfr, struct auth_zone* z, |
4975 | | struct sldns_buffer* scratch_buffer) |
4976 | 0 | { |
4977 | 0 | struct auth_chunk* rr_chunk; |
4978 | 0 | int rr_num; |
4979 | 0 | size_t rr_pos; |
4980 | 0 | uint8_t* rr_dname, *rr_rdata; |
4981 | 0 | uint16_t rr_type, rr_class, rr_rdlen; |
4982 | 0 | uint32_t rr_ttl; |
4983 | 0 | uint32_t serial = 0; |
4984 | 0 | size_t rr_nextpos; |
4985 | 0 | size_t rr_counter = 0; |
4986 | 0 | int have_end_soa = 0; |
4987 | | |
4988 | | /* clear the data tree */ |
4989 | 0 | traverse_postorder(&z->data, auth_data_del, NULL); |
4990 | 0 | rbtree_init(&z->data, &auth_data_cmp); |
4991 | | /* clear the RPZ policies */ |
4992 | 0 | if(z->rpz) |
4993 | 0 | rpz_clear(z->rpz); |
4994 | |
|
4995 | 0 | xfr->have_zone = 0; |
4996 | 0 | xfr->serial = 0; |
4997 | | |
4998 | | /* insert all RRs in to the zone */ |
4999 | | /* insert the SOA only once, skip the last one */ |
5000 | | /* start RR iterator over chunklist of packets */ |
5001 | 0 | chunk_rrlist_start(xfr, &rr_chunk, &rr_num, &rr_pos); |
5002 | 0 | while(!chunk_rrlist_end(rr_chunk, rr_num)) { |
5003 | 0 | if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, |
5004 | 0 | &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, |
5005 | 0 | &rr_rdata, &rr_nextpos)) { |
5006 | | /* failed to parse RR */ |
5007 | 0 | return 0; |
5008 | 0 | } |
5009 | 0 | if(verbosity>=7) log_rrlist_position("apply_axfr", |
5010 | 0 | rr_chunk, rr_dname, rr_type, rr_counter); |
5011 | 0 | if(rr_type == LDNS_RR_TYPE_SOA) { |
5012 | 0 | if(rr_counter != 0) { |
5013 | | /* end of the axfr */ |
5014 | 0 | have_end_soa = 1; |
5015 | 0 | break; |
5016 | 0 | } |
5017 | 0 | if(rr_rdlen < 22) return 0; /* bad SOA rdlen */ |
5018 | 0 | serial = sldns_read_uint32(rr_rdata+rr_rdlen-20); |
5019 | 0 | } |
5020 | | |
5021 | | /* add this RR */ |
5022 | 0 | if(!az_insert_rr_decompress(z, rr_chunk->data, rr_chunk->len, |
5023 | 0 | scratch_buffer, rr_dname, rr_type, rr_class, rr_ttl, |
5024 | 0 | rr_rdata, rr_rdlen, NULL)) { |
5025 | | /* failed, malloc error or so */ |
5026 | 0 | return 0; |
5027 | 0 | } |
5028 | | |
5029 | 0 | rr_counter++; |
5030 | 0 | chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); |
5031 | 0 | } |
5032 | 0 | if(!have_end_soa) { |
5033 | 0 | log_err("no end SOA record for AXFR"); |
5034 | 0 | return 0; |
5035 | 0 | } |
5036 | | |
5037 | 0 | xfr->serial = serial; |
5038 | 0 | xfr->have_zone = 1; |
5039 | 0 | return 1; |
5040 | 0 | } |
5041 | | |
5042 | | /** apply HTTP to zone in memory. z is locked. false on failure(mallocfail) */ |
5043 | | static int |
5044 | | apply_http(struct auth_xfer* xfr, struct auth_zone* z, |
5045 | | struct sldns_buffer* scratch_buffer) |
5046 | 0 | { |
5047 | | /* parse data in chunks */ |
5048 | | /* parse RR's and read into memory. ignore $INCLUDE from the |
5049 | | * downloaded file*/ |
5050 | 0 | struct sldns_file_parse_state pstate; |
5051 | 0 | struct auth_chunk* chunk; |
5052 | 0 | size_t chunk_pos; |
5053 | 0 | int ret; |
5054 | 0 | memset(&pstate, 0, sizeof(pstate)); |
5055 | 0 | pstate.default_ttl = 3600; |
5056 | 0 | if(xfr->namelen < sizeof(pstate.origin)) { |
5057 | 0 | pstate.origin_len = xfr->namelen; |
5058 | 0 | memmove(pstate.origin, xfr->name, xfr->namelen); |
5059 | 0 | } |
5060 | |
|
5061 | 0 | if(verbosity >= VERB_ALGO) |
5062 | 0 | verbose(VERB_ALGO, "http download %s of size %d", |
5063 | 0 | xfr->task_transfer->master->file, |
5064 | 0 | (int)chunklist_sum(xfr->task_transfer->chunks_first)); |
5065 | 0 | if(xfr->task_transfer->chunks_first && verbosity >= VERB_ALGO) { |
5066 | 0 | char preview[1024]; |
5067 | 0 | if(xfr->task_transfer->chunks_first->len+1 > sizeof(preview)) { |
5068 | 0 | memmove(preview, xfr->task_transfer->chunks_first->data, |
5069 | 0 | sizeof(preview)-1); |
5070 | 0 | preview[sizeof(preview)-1]=0; |
5071 | 0 | } else { |
5072 | 0 | memmove(preview, xfr->task_transfer->chunks_first->data, |
5073 | 0 | xfr->task_transfer->chunks_first->len); |
5074 | 0 | preview[xfr->task_transfer->chunks_first->len]=0; |
5075 | 0 | } |
5076 | 0 | log_info("auth zone http downloaded content preview: %s", |
5077 | 0 | preview); |
5078 | 0 | } |
5079 | | |
5080 | | /* perhaps a little syntax check before we try to apply the data? */ |
5081 | 0 | if(!http_zonefile_syntax_check(xfr, scratch_buffer)) { |
5082 | 0 | log_err("http download %s/%s does not contain a zonefile, " |
5083 | 0 | "but got '%s'", xfr->task_transfer->master->host, |
5084 | 0 | xfr->task_transfer->master->file, |
5085 | 0 | sldns_buffer_begin(scratch_buffer)); |
5086 | 0 | return 0; |
5087 | 0 | } |
5088 | | |
5089 | | /* clear the data tree */ |
5090 | 0 | traverse_postorder(&z->data, auth_data_del, NULL); |
5091 | 0 | rbtree_init(&z->data, &auth_data_cmp); |
5092 | | /* clear the RPZ policies */ |
5093 | 0 | if(z->rpz) |
5094 | 0 | rpz_clear(z->rpz); |
5095 | |
|
5096 | 0 | xfr->have_zone = 0; |
5097 | 0 | xfr->serial = 0; |
5098 | |
|
5099 | 0 | chunk = xfr->task_transfer->chunks_first; |
5100 | 0 | chunk_pos = 0; |
5101 | 0 | pstate.lineno = 0; |
5102 | 0 | while(chunkline_get_line_collated(&chunk, &chunk_pos, scratch_buffer)) { |
5103 | | /* process this line */ |
5104 | 0 | pstate.lineno++; |
5105 | 0 | chunkline_newline_removal(scratch_buffer); |
5106 | 0 | if(chunkline_is_comment_line_or_empty(scratch_buffer)) { |
5107 | 0 | continue; |
5108 | 0 | } |
5109 | | /* parse line and add RR */ |
5110 | 0 | if((ret=http_parse_origin(scratch_buffer, &pstate))!=0) { |
5111 | 0 | if(ret == 2) { |
5112 | 0 | verbose(VERB_ALGO, "error parsing ORIGIN on line [%s:%d] %s", |
5113 | 0 | xfr->task_transfer->master->file, |
5114 | 0 | pstate.lineno, |
5115 | 0 | sldns_buffer_begin(scratch_buffer)); |
5116 | 0 | return 0; |
5117 | 0 | } |
5118 | 0 | continue; /* $ORIGIN has been handled */ |
5119 | 0 | } |
5120 | 0 | if((ret=http_parse_ttl(scratch_buffer, &pstate))!=0) { |
5121 | 0 | if(ret == 2) { |
5122 | 0 | verbose(VERB_ALGO, "error parsing TTL on line [%s:%d] %s", |
5123 | 0 | xfr->task_transfer->master->file, |
5124 | 0 | pstate.lineno, |
5125 | 0 | sldns_buffer_begin(scratch_buffer)); |
5126 | 0 | return 0; |
5127 | 0 | } |
5128 | 0 | continue; /* $TTL has been handled */ |
5129 | 0 | } |
5130 | 0 | if(!http_parse_add_rr(xfr, z, scratch_buffer, &pstate)) { |
5131 | 0 | verbose(VERB_ALGO, "error parsing line [%s:%d] %s", |
5132 | 0 | xfr->task_transfer->master->file, |
5133 | 0 | pstate.lineno, |
5134 | 0 | sldns_buffer_begin(scratch_buffer)); |
5135 | 0 | return 0; |
5136 | 0 | } |
5137 | 0 | } |
5138 | 0 | return 1; |
5139 | 0 | } |
5140 | | |
5141 | | /** write http chunks to zonefile to create downloaded file */ |
5142 | | static int |
5143 | | auth_zone_write_chunks(struct auth_xfer* xfr, const char* fname) |
5144 | 0 | { |
5145 | 0 | FILE* out; |
5146 | 0 | struct auth_chunk* p; |
5147 | 0 | out = fopen(fname, "w"); |
5148 | 0 | if(!out) { |
5149 | 0 | log_err("could not open %s: %s", fname, strerror(errno)); |
5150 | 0 | return 0; |
5151 | 0 | } |
5152 | 0 | for(p = xfr->task_transfer->chunks_first; p ; p = p->next) { |
5153 | 0 | if(!write_out(out, (char*)p->data, p->len)) { |
5154 | 0 | log_err("could not write http download to %s", fname); |
5155 | 0 | fclose(out); |
5156 | 0 | return 0; |
5157 | 0 | } |
5158 | 0 | } |
5159 | 0 | fclose(out); |
5160 | 0 | return 1; |
5161 | 0 | } |
5162 | | |
5163 | | /** write to zonefile after zone has been updated */ |
5164 | | static void |
5165 | | xfr_write_after_update(struct auth_xfer* xfr, struct module_env* env) |
5166 | 0 | { |
5167 | 0 | struct config_file* cfg = env->cfg; |
5168 | 0 | struct auth_zone* z; |
5169 | 0 | char tmpfile[1024]; |
5170 | 0 | char* zfilename; |
5171 | 0 | lock_basic_unlock(&xfr->lock); |
5172 | | |
5173 | | /* get lock again, so it is a readlock and concurrently queries |
5174 | | * can be answered */ |
5175 | 0 | lock_rw_rdlock(&env->auth_zones->lock); |
5176 | 0 | z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, |
5177 | 0 | xfr->dclass); |
5178 | 0 | if(!z) { |
5179 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
5180 | | /* the zone is gone, ignore xfr results */ |
5181 | 0 | lock_basic_lock(&xfr->lock); |
5182 | 0 | return; |
5183 | 0 | } |
5184 | 0 | lock_rw_rdlock(&z->lock); |
5185 | 0 | lock_basic_lock(&xfr->lock); |
5186 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
5187 | |
|
5188 | 0 | if(z->zonefile == NULL || z->zonefile[0] == 0) { |
5189 | 0 | lock_rw_unlock(&z->lock); |
5190 | | /* no write needed, no zonefile set */ |
5191 | 0 | return; |
5192 | 0 | } |
5193 | 0 | zfilename = z->zonefile; |
5194 | 0 | if(cfg->chrootdir && cfg->chrootdir[0] && strncmp(zfilename, |
5195 | 0 | cfg->chrootdir, strlen(cfg->chrootdir)) == 0) |
5196 | 0 | zfilename += strlen(cfg->chrootdir); |
5197 | 0 | if(verbosity >= VERB_ALGO) { |
5198 | 0 | char nm[255+1]; |
5199 | 0 | dname_str(z->name, nm); |
5200 | 0 | verbose(VERB_ALGO, "write zonefile %s for %s", zfilename, nm); |
5201 | 0 | } |
5202 | | |
5203 | | /* write to tempfile first */ |
5204 | 0 | if((size_t)strlen(zfilename) + 16 > sizeof(tmpfile)) { |
5205 | 0 | verbose(VERB_ALGO, "tmpfilename too long, cannot update " |
5206 | 0 | " zonefile %s", zfilename); |
5207 | 0 | lock_rw_unlock(&z->lock); |
5208 | 0 | return; |
5209 | 0 | } |
5210 | 0 | snprintf(tmpfile, sizeof(tmpfile), "%s.tmp%u", zfilename, |
5211 | 0 | (unsigned)getpid()); |
5212 | 0 | if(xfr->task_transfer->master->http) { |
5213 | | /* use the stored chunk list to write them */ |
5214 | 0 | if(!auth_zone_write_chunks(xfr, tmpfile)) { |
5215 | 0 | unlink(tmpfile); |
5216 | 0 | lock_rw_unlock(&z->lock); |
5217 | 0 | return; |
5218 | 0 | } |
5219 | 0 | } else if(!auth_zone_write_file(z, tmpfile)) { |
5220 | 0 | unlink(tmpfile); |
5221 | 0 | lock_rw_unlock(&z->lock); |
5222 | 0 | return; |
5223 | 0 | } |
5224 | | #ifdef UB_ON_WINDOWS |
5225 | | (void)unlink(zfilename); /* windows does not replace file with rename() */ |
5226 | | #endif |
5227 | 0 | if(rename(tmpfile, zfilename) < 0) { |
5228 | 0 | log_err("could not rename(%s, %s): %s", tmpfile, zfilename, |
5229 | 0 | strerror(errno)); |
5230 | 0 | unlink(tmpfile); |
5231 | 0 | lock_rw_unlock(&z->lock); |
5232 | 0 | return; |
5233 | 0 | } |
5234 | 0 | lock_rw_unlock(&z->lock); |
5235 | 0 | } |
5236 | | |
5237 | | /** reacquire locks and structures. Starts with no locks, ends |
5238 | | * with xfr and z locks, if fail, no z lock */ |
5239 | | static int xfr_process_reacquire_locks(struct auth_xfer* xfr, |
5240 | | struct module_env* env, struct auth_zone** z) |
5241 | 0 | { |
5242 | | /* release xfr lock, then, while holding az->lock grab both |
5243 | | * z->lock and xfr->lock */ |
5244 | 0 | lock_rw_rdlock(&env->auth_zones->lock); |
5245 | 0 | *z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, |
5246 | 0 | xfr->dclass); |
5247 | 0 | if(!*z) { |
5248 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
5249 | 0 | lock_basic_lock(&xfr->lock); |
5250 | 0 | *z = NULL; |
5251 | 0 | return 0; |
5252 | 0 | } |
5253 | 0 | lock_rw_wrlock(&(*z)->lock); |
5254 | 0 | lock_basic_lock(&xfr->lock); |
5255 | 0 | lock_rw_unlock(&env->auth_zones->lock); |
5256 | 0 | return 1; |
5257 | 0 | } |
5258 | | |
5259 | | /** process chunk list and update zone in memory, |
5260 | | * return false if it did not work */ |
5261 | | static int |
5262 | | xfr_process_chunk_list(struct auth_xfer* xfr, struct module_env* env, |
5263 | | int* ixfr_fail) |
5264 | 0 | { |
5265 | 0 | struct auth_zone* z; |
5266 | | |
5267 | | /* obtain locks and structures */ |
5268 | 0 | lock_basic_unlock(&xfr->lock); |
5269 | 0 | if(!xfr_process_reacquire_locks(xfr, env, &z)) { |
5270 | | /* the zone is gone, ignore xfr results */ |
5271 | 0 | return 0; |
5272 | 0 | } |
5273 | | /* holding xfr and z locks */ |
5274 | | |
5275 | | /* apply data */ |
5276 | 0 | if(xfr->task_transfer->master->http) { |
5277 | 0 | if(!apply_http(xfr, z, env->scratch_buffer)) { |
5278 | 0 | lock_rw_unlock(&z->lock); |
5279 | 0 | verbose(VERB_ALGO, "http from %s: could not store data", |
5280 | 0 | xfr->task_transfer->master->host); |
5281 | 0 | return 0; |
5282 | 0 | } |
5283 | 0 | } else if(xfr->task_transfer->on_ixfr && |
5284 | 0 | !xfr->task_transfer->on_ixfr_is_axfr) { |
5285 | 0 | if(!apply_ixfr(xfr, z, env->scratch_buffer)) { |
5286 | 0 | lock_rw_unlock(&z->lock); |
5287 | 0 | verbose(VERB_ALGO, "xfr from %s: could not store IXFR" |
5288 | 0 | " data", xfr->task_transfer->master->host); |
5289 | 0 | *ixfr_fail = 1; |
5290 | 0 | return 0; |
5291 | 0 | } |
5292 | 0 | } else { |
5293 | 0 | if(!apply_axfr(xfr, z, env->scratch_buffer)) { |
5294 | 0 | lock_rw_unlock(&z->lock); |
5295 | 0 | verbose(VERB_ALGO, "xfr from %s: could not store AXFR" |
5296 | 0 | " data", xfr->task_transfer->master->host); |
5297 | 0 | return 0; |
5298 | 0 | } |
5299 | 0 | } |
5300 | 0 | xfr->zone_expired = 0; |
5301 | 0 | z->zone_expired = 0; |
5302 | 0 | if(!xfr_find_soa(z, xfr)) { |
5303 | 0 | lock_rw_unlock(&z->lock); |
5304 | 0 | verbose(VERB_ALGO, "xfr from %s: no SOA in zone after update" |
5305 | 0 | " (or malformed RR)", xfr->task_transfer->master->host); |
5306 | 0 | return 0; |
5307 | 0 | } |
5308 | | |
5309 | | /* release xfr lock while verifying zonemd because it may have |
5310 | | * to spawn lookups in the state machines */ |
5311 | 0 | lock_basic_unlock(&xfr->lock); |
5312 | | /* holding z lock */ |
5313 | 0 | auth_zone_verify_zonemd(z, env, &env->mesh->mods, NULL, 0, 0); |
5314 | 0 | if(z->zone_expired) { |
5315 | 0 | char zname[256]; |
5316 | | /* ZONEMD must have failed */ |
5317 | | /* reacquire locks, so we hold xfr lock on exit of routine, |
5318 | | * and both xfr and z again after releasing xfr for potential |
5319 | | * state machine mesh callbacks */ |
5320 | 0 | lock_rw_unlock(&z->lock); |
5321 | 0 | if(!xfr_process_reacquire_locks(xfr, env, &z)) |
5322 | 0 | return 0; |
5323 | 0 | dname_str(xfr->name, zname); |
5324 | 0 | verbose(VERB_ALGO, "xfr from %s: ZONEMD failed for %s, transfer is failed", xfr->task_transfer->master->host, zname); |
5325 | 0 | xfr->zone_expired = 1; |
5326 | 0 | lock_rw_unlock(&z->lock); |
5327 | 0 | return 0; |
5328 | 0 | } |
5329 | | /* reacquire locks, so we hold xfr lock on exit of routine, |
5330 | | * and both xfr and z again after releasing xfr for potential |
5331 | | * state machine mesh callbacks */ |
5332 | 0 | lock_rw_unlock(&z->lock); |
5333 | 0 | if(!xfr_process_reacquire_locks(xfr, env, &z)) |
5334 | 0 | return 0; |
5335 | | /* holding xfr and z locks */ |
5336 | | |
5337 | 0 | if(xfr->have_zone) |
5338 | 0 | xfr->lease_time = *env->now; |
5339 | |
|
5340 | 0 | if(z->rpz) |
5341 | 0 | rpz_finish_config(z->rpz); |
5342 | | |
5343 | | /* unlock */ |
5344 | 0 | lock_rw_unlock(&z->lock); |
5345 | |
|
5346 | 0 | if(verbosity >= VERB_QUERY && xfr->have_zone) { |
5347 | 0 | char zname[256]; |
5348 | 0 | dname_str(xfr->name, zname); |
5349 | 0 | verbose(VERB_QUERY, "auth zone %s updated to serial %u", zname, |
5350 | 0 | (unsigned)xfr->serial); |
5351 | 0 | } |
5352 | | /* see if we need to write to a zonefile */ |
5353 | 0 | xfr_write_after_update(xfr, env); |
5354 | 0 | return 1; |
5355 | 0 | } |
5356 | | |
5357 | | /** disown task_transfer. caller must hold xfr.lock */ |
5358 | | static void |
5359 | | xfr_transfer_disown(struct auth_xfer* xfr) |
5360 | 0 | { |
5361 | | /* remove timer (from this worker's event base) */ |
5362 | 0 | comm_timer_delete(xfr->task_transfer->timer); |
5363 | 0 | xfr->task_transfer->timer = NULL; |
5364 | | /* remove the commpoint */ |
5365 | 0 | comm_point_delete(xfr->task_transfer->cp); |
5366 | 0 | xfr->task_transfer->cp = NULL; |
5367 | | /* we don't own this item anymore */ |
5368 | 0 | xfr->task_transfer->worker = NULL; |
5369 | 0 | xfr->task_transfer->env = NULL; |
5370 | 0 | } |
5371 | | |
5372 | | /** lookup a host name for its addresses, if needed */ |
5373 | | static int |
5374 | | xfr_transfer_lookup_host(struct auth_xfer* xfr, struct module_env* env) |
5375 | 0 | { |
5376 | 0 | struct sockaddr_storage addr; |
5377 | 0 | socklen_t addrlen = 0; |
5378 | 0 | struct auth_master* master = xfr->task_transfer->lookup_target; |
5379 | 0 | struct query_info qinfo; |
5380 | 0 | uint16_t qflags = BIT_RD; |
5381 | 0 | uint8_t dname[LDNS_MAX_DOMAINLEN+1]; |
5382 | 0 | struct edns_data edns; |
5383 | 0 | sldns_buffer* buf = env->scratch_buffer; |
5384 | 0 | if(!master) return 0; |
5385 | 0 | if(extstrtoaddr(master->host, &addr, &addrlen, UNBOUND_DNS_PORT)) { |
5386 | | /* not needed, host is in IP addr format */ |
5387 | 0 | return 0; |
5388 | 0 | } |
5389 | 0 | if(master->allow_notify) |
5390 | 0 | return 0; /* allow-notifies are not transferred from, no |
5391 | | lookup is needed */ |
5392 | | |
5393 | | /* use mesh_new_callback to probe for non-addr hosts, |
5394 | | * and then wait for them to be looked up (in cache, or query) */ |
5395 | 0 | qinfo.qname_len = sizeof(dname); |
5396 | 0 | if(sldns_str2wire_dname_buf(master->host, dname, &qinfo.qname_len) |
5397 | 0 | != 0) { |
5398 | 0 | log_err("cannot parse host name of master %s", master->host); |
5399 | 0 | return 0; |
5400 | 0 | } |
5401 | 0 | qinfo.qname = dname; |
5402 | 0 | qinfo.qclass = xfr->dclass; |
5403 | 0 | qinfo.qtype = LDNS_RR_TYPE_A; |
5404 | 0 | if(xfr->task_transfer->lookup_aaaa) |
5405 | 0 | qinfo.qtype = LDNS_RR_TYPE_AAAA; |
5406 | 0 | qinfo.local_alias = NULL; |
5407 | 0 | if(verbosity >= VERB_ALGO) { |
5408 | 0 | char buf1[512]; |
5409 | 0 | char buf2[LDNS_MAX_DOMAINLEN+1]; |
5410 | 0 | dname_str(xfr->name, buf2); |
5411 | 0 | snprintf(buf1, sizeof(buf1), "auth zone %s: master lookup" |
5412 | 0 | " for task_transfer", buf2); |
5413 | 0 | log_query_info(VERB_ALGO, buf1, &qinfo); |
5414 | 0 | } |
5415 | 0 | edns.edns_present = 1; |
5416 | 0 | edns.ext_rcode = 0; |
5417 | 0 | edns.edns_version = 0; |
5418 | 0 | edns.bits = EDNS_DO; |
5419 | 0 | edns.opt_list_in = NULL; |
5420 | 0 | edns.opt_list_out = NULL; |
5421 | 0 | edns.opt_list_inplace_cb_out = NULL; |
5422 | 0 | edns.padding_block_size = 0; |
5423 | 0 | if(sldns_buffer_capacity(buf) < 65535) |
5424 | 0 | edns.udp_size = (uint16_t)sldns_buffer_capacity(buf); |
5425 | 0 | else edns.udp_size = 65535; |
5426 | | |
5427 | | /* unlock xfr during mesh_new_callback() because the callback can be |
5428 | | * called straight away */ |
5429 | 0 | lock_basic_unlock(&xfr->lock); |
5430 | 0 | if(!mesh_new_callback(env->mesh, &qinfo, qflags, &edns, buf, 0, |
5431 | 0 | &auth_xfer_transfer_lookup_callback, xfr, 0)) { |
5432 | 0 | lock_basic_lock(&xfr->lock); |
5433 | 0 | log_err("out of memory lookup up master %s", master->host); |
5434 | 0 | return 0; |
5435 | 0 | } |
5436 | 0 | lock_basic_lock(&xfr->lock); |
5437 | 0 | return 1; |
5438 | 0 | } |
5439 | | |
5440 | | /** initiate TCP to the target and fetch zone. |
5441 | | * returns true if that was successfully started, and timeout setup. */ |
5442 | | static int |
5443 | | xfr_transfer_init_fetch(struct auth_xfer* xfr, struct module_env* env) |
5444 | 0 | { |
5445 | 0 | struct sockaddr_storage addr; |
5446 | 0 | socklen_t addrlen = 0; |
5447 | 0 | struct auth_master* master = xfr->task_transfer->master; |
5448 | 0 | char *auth_name = NULL; |
5449 | 0 | struct timeval t; |
5450 | 0 | int timeout; |
5451 | 0 | if(!master) return 0; |
5452 | 0 | if(master->allow_notify) return 0; /* only for notify */ |
5453 | | |
5454 | | /* get master addr */ |
5455 | 0 | if(xfr->task_transfer->scan_addr) { |
5456 | 0 | addrlen = xfr->task_transfer->scan_addr->addrlen; |
5457 | 0 | memmove(&addr, &xfr->task_transfer->scan_addr->addr, addrlen); |
5458 | 0 | } else { |
5459 | 0 | if(!authextstrtoaddr(master->host, &addr, &addrlen, &auth_name)) { |
5460 | | /* the ones that are not in addr format are supposed |
5461 | | * to be looked up. The lookup has failed however, |
5462 | | * so skip them */ |
5463 | 0 | char zname[255+1]; |
5464 | 0 | dname_str(xfr->name, zname); |
5465 | 0 | log_err("%s: failed lookup, cannot transfer from master %s", |
5466 | 0 | zname, master->host); |
5467 | 0 | return 0; |
5468 | 0 | } |
5469 | 0 | } |
5470 | | |
5471 | | /* remove previous TCP connection (if any) */ |
5472 | 0 | if(xfr->task_transfer->cp) { |
5473 | 0 | comm_point_delete(xfr->task_transfer->cp); |
5474 | 0 | xfr->task_transfer->cp = NULL; |
5475 | 0 | } |
5476 | 0 | if(!xfr->task_transfer->timer) { |
5477 | 0 | xfr->task_transfer->timer = comm_timer_create(env->worker_base, |
5478 | 0 | auth_xfer_transfer_timer_callback, xfr); |
5479 | 0 | if(!xfr->task_transfer->timer) { |
5480 | 0 | log_err("malloc failure"); |
5481 | 0 | return 0; |
5482 | 0 | } |
5483 | 0 | } |
5484 | 0 | timeout = AUTH_TRANSFER_TIMEOUT; |
5485 | 0 | #ifndef S_SPLINT_S |
5486 | 0 | t.tv_sec = timeout/1000; |
5487 | 0 | t.tv_usec = (timeout%1000)*1000; |
5488 | 0 | #endif |
5489 | |
|
5490 | 0 | if(master->http) { |
5491 | | /* perform http fetch */ |
5492 | | /* store http port number into sockaddr, |
5493 | | * unless someone used unbound's host@port notation */ |
5494 | 0 | xfr->task_transfer->on_ixfr = 0; |
5495 | 0 | if(strchr(master->host, '@') == NULL) |
5496 | 0 | sockaddr_store_port(&addr, addrlen, master->port); |
5497 | 0 | xfr->task_transfer->cp = outnet_comm_point_for_http( |
5498 | 0 | env->outnet, auth_xfer_transfer_http_callback, xfr, |
5499 | 0 | &addr, addrlen, -1, master->ssl, master->host, |
5500 | 0 | master->file, env->cfg); |
5501 | 0 | if(!xfr->task_transfer->cp) { |
5502 | 0 | char zname[255+1], as[256]; |
5503 | 0 | dname_str(xfr->name, zname); |
5504 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
5505 | 0 | verbose(VERB_ALGO, "cannot create http cp " |
5506 | 0 | "connection for %s to %s", zname, as); |
5507 | 0 | return 0; |
5508 | 0 | } |
5509 | 0 | comm_timer_set(xfr->task_transfer->timer, &t); |
5510 | 0 | if(verbosity >= VERB_ALGO) { |
5511 | 0 | char zname[255+1], as[256]; |
5512 | 0 | dname_str(xfr->name, zname); |
5513 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
5514 | 0 | verbose(VERB_ALGO, "auth zone %s transfer next HTTP fetch from %s started", zname, as); |
5515 | 0 | } |
5516 | | /* Create or refresh the list of allow_notify addrs */ |
5517 | 0 | probe_copy_masters_for_allow_notify(xfr); |
5518 | 0 | return 1; |
5519 | 0 | } |
5520 | | |
5521 | | /* perform AXFR/IXFR */ |
5522 | | /* set the packet to be written */ |
5523 | | /* create new ID */ |
5524 | 0 | xfr->task_transfer->id = GET_RANDOM_ID(env->rnd); |
5525 | 0 | xfr_create_ixfr_packet(xfr, env->scratch_buffer, |
5526 | 0 | xfr->task_transfer->id, master); |
5527 | | |
5528 | | /* connect on fd */ |
5529 | 0 | xfr->task_transfer->cp = outnet_comm_point_for_tcp(env->outnet, |
5530 | 0 | auth_xfer_transfer_tcp_callback, xfr, &addr, addrlen, |
5531 | 0 | env->scratch_buffer, -1, |
5532 | 0 | auth_name != NULL, auth_name); |
5533 | 0 | if(!xfr->task_transfer->cp) { |
5534 | 0 | char zname[255+1], as[256]; |
5535 | 0 | dname_str(xfr->name, zname); |
5536 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
5537 | 0 | verbose(VERB_ALGO, "cannot create tcp cp connection for " |
5538 | 0 | "xfr %s to %s", zname, as); |
5539 | 0 | return 0; |
5540 | 0 | } |
5541 | 0 | comm_timer_set(xfr->task_transfer->timer, &t); |
5542 | 0 | if(verbosity >= VERB_ALGO) { |
5543 | 0 | char zname[255+1], as[256]; |
5544 | 0 | dname_str(xfr->name, zname); |
5545 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
5546 | 0 | verbose(VERB_ALGO, "auth zone %s transfer next %s fetch from %s started", zname, |
5547 | 0 | (xfr->task_transfer->on_ixfr?"IXFR":"AXFR"), as); |
5548 | 0 | } |
5549 | 0 | return 1; |
5550 | 0 | } |
5551 | | |
5552 | | /** perform next lookup, next transfer TCP, or end and resume wait time task */ |
5553 | | static void |
5554 | | xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env) |
5555 | 0 | { |
5556 | 0 | log_assert(xfr->task_transfer->worker == env->worker); |
5557 | | |
5558 | | /* are we performing lookups? */ |
5559 | 0 | while(xfr->task_transfer->lookup_target) { |
5560 | 0 | if(xfr_transfer_lookup_host(xfr, env)) { |
5561 | | /* wait for lookup to finish, |
5562 | | * note that the hostname may be in unbound's cache |
5563 | | * and we may then get an instant cache response, |
5564 | | * and that calls the callback just like a full |
5565 | | * lookup and lookup failures also call callback */ |
5566 | 0 | if(verbosity >= VERB_ALGO) { |
5567 | 0 | char zname[255+1]; |
5568 | 0 | dname_str(xfr->name, zname); |
5569 | 0 | verbose(VERB_ALGO, "auth zone %s transfer next target lookup", zname); |
5570 | 0 | } |
5571 | 0 | lock_basic_unlock(&xfr->lock); |
5572 | 0 | return; |
5573 | 0 | } |
5574 | 0 | xfr_transfer_move_to_next_lookup(xfr, env); |
5575 | 0 | } |
5576 | | |
5577 | | /* initiate TCP and fetch the zone from the master */ |
5578 | | /* and set timeout on it */ |
5579 | 0 | while(!xfr_transfer_end_of_list(xfr)) { |
5580 | 0 | xfr->task_transfer->master = xfr_transfer_current_master(xfr); |
5581 | 0 | if(xfr_transfer_init_fetch(xfr, env)) { |
5582 | | /* successfully started, wait for callback */ |
5583 | 0 | lock_basic_unlock(&xfr->lock); |
5584 | 0 | return; |
5585 | 0 | } |
5586 | | /* failed to fetch, next master */ |
5587 | 0 | xfr_transfer_nextmaster(xfr); |
5588 | 0 | } |
5589 | 0 | if(verbosity >= VERB_ALGO) { |
5590 | 0 | char zname[255+1]; |
5591 | 0 | dname_str(xfr->name, zname); |
5592 | 0 | verbose(VERB_ALGO, "auth zone %s transfer failed, wait", zname); |
5593 | 0 | } |
5594 | | |
5595 | | /* we failed to fetch the zone, move to wait task |
5596 | | * use the shorter retry timeout */ |
5597 | 0 | xfr_transfer_disown(xfr); |
5598 | | |
5599 | | /* pick up the nextprobe task and wait */ |
5600 | 0 | if(xfr->task_nextprobe->worker == NULL) |
5601 | 0 | xfr_set_timeout(xfr, env, 1, 0); |
5602 | 0 | lock_basic_unlock(&xfr->lock); |
5603 | 0 | } |
5604 | | |
5605 | | /** add addrs from A or AAAA rrset to the master */ |
5606 | | static void |
5607 | | xfr_master_add_addrs(struct auth_master* m, struct ub_packed_rrset_key* rrset, |
5608 | | uint16_t rrtype) |
5609 | 0 | { |
5610 | 0 | size_t i; |
5611 | 0 | struct packed_rrset_data* data; |
5612 | 0 | if(!m || !rrset) return; |
5613 | 0 | if(rrtype != LDNS_RR_TYPE_A && rrtype != LDNS_RR_TYPE_AAAA) |
5614 | 0 | return; |
5615 | 0 | data = (struct packed_rrset_data*)rrset->entry.data; |
5616 | 0 | for(i=0; i<data->count; i++) { |
5617 | 0 | struct auth_addr* a; |
5618 | 0 | size_t len = data->rr_len[i] - 2; |
5619 | 0 | uint8_t* rdata = data->rr_data[i]+2; |
5620 | 0 | if(rrtype == LDNS_RR_TYPE_A && len != INET_SIZE) |
5621 | 0 | continue; /* wrong length for A */ |
5622 | 0 | if(rrtype == LDNS_RR_TYPE_AAAA && len != INET6_SIZE) |
5623 | 0 | continue; /* wrong length for AAAA */ |
5624 | | |
5625 | | /* add and alloc it */ |
5626 | 0 | a = (struct auth_addr*)calloc(1, sizeof(*a)); |
5627 | 0 | if(!a) { |
5628 | 0 | log_err("out of memory"); |
5629 | 0 | return; |
5630 | 0 | } |
5631 | 0 | if(rrtype == LDNS_RR_TYPE_A) { |
5632 | 0 | struct sockaddr_in* sa; |
5633 | 0 | a->addrlen = (socklen_t)sizeof(*sa); |
5634 | 0 | sa = (struct sockaddr_in*)&a->addr; |
5635 | 0 | sa->sin_family = AF_INET; |
5636 | 0 | sa->sin_port = (in_port_t)htons(UNBOUND_DNS_PORT); |
5637 | 0 | memmove(&sa->sin_addr, rdata, INET_SIZE); |
5638 | 0 | } else { |
5639 | 0 | struct sockaddr_in6* sa; |
5640 | 0 | a->addrlen = (socklen_t)sizeof(*sa); |
5641 | 0 | sa = (struct sockaddr_in6*)&a->addr; |
5642 | 0 | sa->sin6_family = AF_INET6; |
5643 | 0 | sa->sin6_port = (in_port_t)htons(UNBOUND_DNS_PORT); |
5644 | 0 | memmove(&sa->sin6_addr, rdata, INET6_SIZE); |
5645 | 0 | } |
5646 | 0 | if(verbosity >= VERB_ALGO) { |
5647 | 0 | char s[64]; |
5648 | 0 | addr_to_str(&a->addr, a->addrlen, s, sizeof(s)); |
5649 | 0 | verbose(VERB_ALGO, "auth host %s lookup %s", |
5650 | 0 | m->host, s); |
5651 | 0 | } |
5652 | | /* append to list */ |
5653 | 0 | a->next = m->list; |
5654 | 0 | m->list = a; |
5655 | 0 | } |
5656 | 0 | } |
5657 | | |
5658 | | /** callback for task_transfer lookup of host name, of A or AAAA */ |
5659 | | void auth_xfer_transfer_lookup_callback(void* arg, int rcode, sldns_buffer* buf, |
5660 | | enum sec_status ATTR_UNUSED(sec), char* ATTR_UNUSED(why_bogus), |
5661 | | int ATTR_UNUSED(was_ratelimited)) |
5662 | 0 | { |
5663 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
5664 | 0 | struct module_env* env; |
5665 | 0 | log_assert(xfr->task_transfer); |
5666 | 0 | lock_basic_lock(&xfr->lock); |
5667 | 0 | env = xfr->task_transfer->env; |
5668 | 0 | if(!env || env->outnet->want_to_quit) { |
5669 | 0 | lock_basic_unlock(&xfr->lock); |
5670 | 0 | return; /* stop on quit */ |
5671 | 0 | } |
5672 | | |
5673 | | /* process result */ |
5674 | 0 | if(rcode == LDNS_RCODE_NOERROR) { |
5675 | 0 | uint16_t wanted_qtype = LDNS_RR_TYPE_A; |
5676 | 0 | struct regional* temp = env->scratch; |
5677 | 0 | struct query_info rq; |
5678 | 0 | struct reply_info* rep; |
5679 | 0 | if(xfr->task_transfer->lookup_aaaa) |
5680 | 0 | wanted_qtype = LDNS_RR_TYPE_AAAA; |
5681 | 0 | memset(&rq, 0, sizeof(rq)); |
5682 | 0 | rep = parse_reply_in_temp_region(buf, temp, &rq); |
5683 | 0 | if(rep && rq.qtype == wanted_qtype && |
5684 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR) { |
5685 | | /* parsed successfully */ |
5686 | 0 | struct ub_packed_rrset_key* answer = |
5687 | 0 | reply_find_answer_rrset(&rq, rep); |
5688 | 0 | if(answer) { |
5689 | 0 | xfr_master_add_addrs(xfr->task_transfer-> |
5690 | 0 | lookup_target, answer, wanted_qtype); |
5691 | 0 | } else { |
5692 | 0 | if(verbosity >= VERB_ALGO) { |
5693 | 0 | char zname[255+1]; |
5694 | 0 | dname_str(xfr->name, zname); |
5695 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup has nodata", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); |
5696 | 0 | } |
5697 | 0 | } |
5698 | 0 | } else { |
5699 | 0 | if(verbosity >= VERB_ALGO) { |
5700 | 0 | char zname[255+1]; |
5701 | 0 | dname_str(xfr->name, zname); |
5702 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup has no answer", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); |
5703 | 0 | } |
5704 | 0 | } |
5705 | 0 | regional_free_all(temp); |
5706 | 0 | } else { |
5707 | 0 | if(verbosity >= VERB_ALGO) { |
5708 | 0 | char zname[255+1]; |
5709 | 0 | dname_str(xfr->name, zname); |
5710 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup failed", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); |
5711 | 0 | } |
5712 | 0 | } |
5713 | 0 | if(xfr->task_transfer->lookup_target->list && |
5714 | 0 | xfr->task_transfer->lookup_target == xfr_transfer_current_master(xfr)) |
5715 | 0 | xfr->task_transfer->scan_addr = xfr->task_transfer->lookup_target->list; |
5716 | | |
5717 | | /* move to lookup AAAA after A lookup, move to next hostname lookup, |
5718 | | * or move to fetch the zone, or, if nothing to do, end task_transfer */ |
5719 | 0 | xfr_transfer_move_to_next_lookup(xfr, env); |
5720 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
5721 | 0 | } |
5722 | | |
5723 | | /** check if xfer (AXFR or IXFR) packet is OK. |
5724 | | * return false if we lost connection (SERVFAIL, or unreadable). |
5725 | | * return false if we need to move from IXFR to AXFR, with gonextonfail |
5726 | | * set to false, so the same master is tried again, but with AXFR. |
5727 | | * return true if fine to link into data. |
5728 | | * return true with transferdone=true when the transfer has ended. |
5729 | | */ |
5730 | | static int |
5731 | | check_xfer_packet(sldns_buffer* pkt, struct auth_xfer* xfr, |
5732 | | int* gonextonfail, int* transferdone) |
5733 | 0 | { |
5734 | 0 | uint8_t* wire = sldns_buffer_begin(pkt); |
5735 | 0 | int i; |
5736 | 0 | if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) { |
5737 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet too small", |
5738 | 0 | xfr->task_transfer->master->host); |
5739 | 0 | return 0; |
5740 | 0 | } |
5741 | 0 | if(!LDNS_QR_WIRE(wire)) { |
5742 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet has no QR flag", |
5743 | 0 | xfr->task_transfer->master->host); |
5744 | 0 | return 0; |
5745 | 0 | } |
5746 | 0 | if(LDNS_TC_WIRE(wire)) { |
5747 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet has TC flag", |
5748 | 0 | xfr->task_transfer->master->host); |
5749 | 0 | return 0; |
5750 | 0 | } |
5751 | | /* check ID */ |
5752 | 0 | if(LDNS_ID_WIRE(wire) != xfr->task_transfer->id) { |
5753 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet wrong ID", |
5754 | 0 | xfr->task_transfer->master->host); |
5755 | 0 | return 0; |
5756 | 0 | } |
5757 | 0 | if(LDNS_RCODE_WIRE(wire) != LDNS_RCODE_NOERROR) { |
5758 | 0 | char rcode[32]; |
5759 | 0 | sldns_wire2str_rcode_buf((int)LDNS_RCODE_WIRE(wire), rcode, |
5760 | 0 | sizeof(rcode)); |
5761 | | /* if we are doing IXFR, check for fallback */ |
5762 | 0 | if(xfr->task_transfer->on_ixfr) { |
5763 | 0 | if(LDNS_RCODE_WIRE(wire) == LDNS_RCODE_NOTIMPL || |
5764 | 0 | LDNS_RCODE_WIRE(wire) == LDNS_RCODE_SERVFAIL || |
5765 | 0 | LDNS_RCODE_WIRE(wire) == LDNS_RCODE_REFUSED || |
5766 | 0 | LDNS_RCODE_WIRE(wire) == LDNS_RCODE_FORMERR) { |
5767 | 0 | verbose(VERB_ALGO, "xfr to %s, fallback " |
5768 | 0 | "from IXFR to AXFR (with rcode %s)", |
5769 | 0 | xfr->task_transfer->master->host, |
5770 | 0 | rcode); |
5771 | 0 | xfr->task_transfer->ixfr_fail = 1; |
5772 | 0 | *gonextonfail = 0; |
5773 | 0 | return 0; |
5774 | 0 | } |
5775 | 0 | } |
5776 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with rcode %s", |
5777 | 0 | xfr->task_transfer->master->host, rcode); |
5778 | 0 | return 0; |
5779 | 0 | } |
5780 | 0 | if(LDNS_OPCODE_WIRE(wire) != LDNS_PACKET_QUERY) { |
5781 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with bad opcode", |
5782 | 0 | xfr->task_transfer->master->host); |
5783 | 0 | return 0; |
5784 | 0 | } |
5785 | 0 | if(LDNS_QDCOUNT(wire) > 1) { |
5786 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet has qdcount %d", |
5787 | 0 | xfr->task_transfer->master->host, |
5788 | 0 | (int)LDNS_QDCOUNT(wire)); |
5789 | 0 | return 0; |
5790 | 0 | } |
5791 | | |
5792 | | /* check qname */ |
5793 | 0 | sldns_buffer_set_position(pkt, LDNS_HEADER_SIZE); |
5794 | 0 | for(i=0; i<(int)LDNS_QDCOUNT(wire); i++) { |
5795 | 0 | size_t pos = sldns_buffer_position(pkt); |
5796 | 0 | uint16_t qtype, qclass; |
5797 | 0 | if(pkt_dname_len(pkt) == 0) { |
5798 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5799 | 0 | "malformed dname", |
5800 | 0 | xfr->task_transfer->master->host); |
5801 | 0 | return 0; |
5802 | 0 | } |
5803 | 0 | if(dname_pkt_compare(pkt, sldns_buffer_at(pkt, pos), |
5804 | 0 | xfr->name) != 0) { |
5805 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5806 | 0 | "wrong qname", |
5807 | 0 | xfr->task_transfer->master->host); |
5808 | 0 | return 0; |
5809 | 0 | } |
5810 | 0 | if(sldns_buffer_remaining(pkt) < 4) { |
5811 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5812 | 0 | "truncated query RR", |
5813 | 0 | xfr->task_transfer->master->host); |
5814 | 0 | return 0; |
5815 | 0 | } |
5816 | 0 | qtype = sldns_buffer_read_u16(pkt); |
5817 | 0 | qclass = sldns_buffer_read_u16(pkt); |
5818 | 0 | if(qclass != xfr->dclass) { |
5819 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5820 | 0 | "wrong qclass", |
5821 | 0 | xfr->task_transfer->master->host); |
5822 | 0 | return 0; |
5823 | 0 | } |
5824 | 0 | if(xfr->task_transfer->on_ixfr) { |
5825 | 0 | if(qtype != LDNS_RR_TYPE_IXFR) { |
5826 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet " |
5827 | 0 | "with wrong qtype, expected IXFR", |
5828 | 0 | xfr->task_transfer->master->host); |
5829 | 0 | return 0; |
5830 | 0 | } |
5831 | 0 | } else { |
5832 | 0 | if(qtype != LDNS_RR_TYPE_AXFR) { |
5833 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet " |
5834 | 0 | "with wrong qtype, expected AXFR", |
5835 | 0 | xfr->task_transfer->master->host); |
5836 | 0 | return 0; |
5837 | 0 | } |
5838 | 0 | } |
5839 | 0 | } |
5840 | | |
5841 | | /* check parse of RRs in packet, store first SOA serial |
5842 | | * to be able to detect last SOA (with that serial) to see if done */ |
5843 | | /* also check for IXFR 'zone up to date' reply */ |
5844 | 0 | for(i=0; i<(int)LDNS_ANCOUNT(wire); i++) { |
5845 | 0 | size_t pos = sldns_buffer_position(pkt); |
5846 | 0 | uint16_t tp, rdlen; |
5847 | 0 | if(pkt_dname_len(pkt) == 0) { |
5848 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5849 | 0 | "malformed dname in answer section", |
5850 | 0 | xfr->task_transfer->master->host); |
5851 | 0 | return 0; |
5852 | 0 | } |
5853 | 0 | if(sldns_buffer_remaining(pkt) < 10) { |
5854 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5855 | 0 | "truncated RR", |
5856 | 0 | xfr->task_transfer->master->host); |
5857 | 0 | return 0; |
5858 | 0 | } |
5859 | 0 | tp = sldns_buffer_read_u16(pkt); |
5860 | 0 | (void)sldns_buffer_read_u16(pkt); /* class */ |
5861 | 0 | (void)sldns_buffer_read_u32(pkt); /* ttl */ |
5862 | 0 | rdlen = sldns_buffer_read_u16(pkt); |
5863 | 0 | if(sldns_buffer_remaining(pkt) < rdlen) { |
5864 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5865 | 0 | "truncated RR rdata", |
5866 | 0 | xfr->task_transfer->master->host); |
5867 | 0 | return 0; |
5868 | 0 | } |
5869 | | |
5870 | | /* RR parses (haven't checked rdata itself), now look at |
5871 | | * SOA records to see serial number */ |
5872 | 0 | if(xfr->task_transfer->rr_scan_num == 0 && |
5873 | 0 | tp != LDNS_RR_TYPE_SOA) { |
5874 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5875 | 0 | "malformed zone transfer, no start SOA", |
5876 | 0 | xfr->task_transfer->master->host); |
5877 | 0 | return 0; |
5878 | 0 | } |
5879 | 0 | if(xfr->task_transfer->rr_scan_num == 1 && |
5880 | 0 | tp != LDNS_RR_TYPE_SOA) { |
5881 | | /* second RR is not a SOA record, this is not an IXFR |
5882 | | * the master is replying with an AXFR */ |
5883 | 0 | xfr->task_transfer->on_ixfr_is_axfr = 1; |
5884 | 0 | } |
5885 | 0 | if(tp == LDNS_RR_TYPE_SOA) { |
5886 | 0 | uint32_t serial; |
5887 | 0 | if(rdlen < 22) { |
5888 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet " |
5889 | 0 | "with SOA with malformed rdata", |
5890 | 0 | xfr->task_transfer->master->host); |
5891 | 0 | return 0; |
5892 | 0 | } |
5893 | 0 | if(dname_pkt_compare(pkt, sldns_buffer_at(pkt, pos), |
5894 | 0 | xfr->name) != 0) { |
5895 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet " |
5896 | 0 | "with SOA with wrong dname", |
5897 | 0 | xfr->task_transfer->master->host); |
5898 | 0 | return 0; |
5899 | 0 | } |
5900 | | |
5901 | | /* read serial number of SOA */ |
5902 | 0 | serial = sldns_buffer_read_u32_at(pkt, |
5903 | 0 | sldns_buffer_position(pkt)+rdlen-20); |
5904 | | |
5905 | | /* check for IXFR 'zone has SOA x' reply */ |
5906 | 0 | if(xfr->task_transfer->on_ixfr && |
5907 | 0 | xfr->task_transfer->rr_scan_num == 0 && |
5908 | 0 | LDNS_ANCOUNT(wire)==1) { |
5909 | 0 | verbose(VERB_ALGO, "xfr to %s ended, " |
5910 | 0 | "IXFR reply that zone has serial %u," |
5911 | 0 | " fallback from IXFR to AXFR", |
5912 | 0 | xfr->task_transfer->master->host, |
5913 | 0 | (unsigned)serial); |
5914 | 0 | xfr->task_transfer->ixfr_fail = 1; |
5915 | 0 | *gonextonfail = 0; |
5916 | 0 | return 0; |
5917 | 0 | } |
5918 | | |
5919 | | /* if first SOA, store serial number */ |
5920 | 0 | if(xfr->task_transfer->got_xfr_serial == 0) { |
5921 | 0 | xfr->task_transfer->got_xfr_serial = 1; |
5922 | 0 | xfr->task_transfer->incoming_xfr_serial = |
5923 | 0 | serial; |
5924 | 0 | verbose(VERB_ALGO, "xfr %s: contains " |
5925 | 0 | "SOA serial %u", |
5926 | 0 | xfr->task_transfer->master->host, |
5927 | 0 | (unsigned)serial); |
5928 | | /* see if end of AXFR */ |
5929 | 0 | } else if(!xfr->task_transfer->on_ixfr || |
5930 | 0 | xfr->task_transfer->on_ixfr_is_axfr) { |
5931 | | /* second SOA with serial is the end |
5932 | | * for AXFR */ |
5933 | 0 | *transferdone = 1; |
5934 | 0 | verbose(VERB_ALGO, "xfr %s: last AXFR packet", |
5935 | 0 | xfr->task_transfer->master->host); |
5936 | | /* for IXFR, count SOA records with that serial */ |
5937 | 0 | } else if(xfr->task_transfer->incoming_xfr_serial == |
5938 | 0 | serial && xfr->task_transfer->got_xfr_serial |
5939 | 0 | == 1) { |
5940 | 0 | xfr->task_transfer->got_xfr_serial++; |
5941 | | /* if not first soa, if serial==firstserial, the |
5942 | | * third time we are at the end, for IXFR */ |
5943 | 0 | } else if(xfr->task_transfer->incoming_xfr_serial == |
5944 | 0 | serial && xfr->task_transfer->got_xfr_serial |
5945 | 0 | == 2) { |
5946 | 0 | verbose(VERB_ALGO, "xfr %s: last IXFR packet", |
5947 | 0 | xfr->task_transfer->master->host); |
5948 | 0 | *transferdone = 1; |
5949 | | /* continue parse check, if that succeeds, |
5950 | | * transfer is done */ |
5951 | 0 | } |
5952 | 0 | } |
5953 | 0 | xfr->task_transfer->rr_scan_num++; |
5954 | | |
5955 | | /* skip over RR rdata to go to the next RR */ |
5956 | 0 | sldns_buffer_skip(pkt, (ssize_t)rdlen); |
5957 | 0 | } |
5958 | | |
5959 | | /* check authority section */ |
5960 | | /* we skip over the RRs checking packet format */ |
5961 | 0 | for(i=0; i<(int)LDNS_NSCOUNT(wire); i++) { |
5962 | 0 | uint16_t rdlen; |
5963 | 0 | if(pkt_dname_len(pkt) == 0) { |
5964 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5965 | 0 | "malformed dname in authority section", |
5966 | 0 | xfr->task_transfer->master->host); |
5967 | 0 | return 0; |
5968 | 0 | } |
5969 | 0 | if(sldns_buffer_remaining(pkt) < 10) { |
5970 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5971 | 0 | "truncated RR", |
5972 | 0 | xfr->task_transfer->master->host); |
5973 | 0 | return 0; |
5974 | 0 | } |
5975 | 0 | (void)sldns_buffer_read_u16(pkt); /* type */ |
5976 | 0 | (void)sldns_buffer_read_u16(pkt); /* class */ |
5977 | 0 | (void)sldns_buffer_read_u32(pkt); /* ttl */ |
5978 | 0 | rdlen = sldns_buffer_read_u16(pkt); |
5979 | 0 | if(sldns_buffer_remaining(pkt) < rdlen) { |
5980 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5981 | 0 | "truncated RR rdata", |
5982 | 0 | xfr->task_transfer->master->host); |
5983 | 0 | return 0; |
5984 | 0 | } |
5985 | | /* skip over RR rdata to go to the next RR */ |
5986 | 0 | sldns_buffer_skip(pkt, (ssize_t)rdlen); |
5987 | 0 | } |
5988 | | |
5989 | | /* check additional section */ |
5990 | 0 | for(i=0; i<(int)LDNS_ARCOUNT(wire); i++) { |
5991 | 0 | uint16_t rdlen; |
5992 | 0 | if(pkt_dname_len(pkt) == 0) { |
5993 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
5994 | 0 | "malformed dname in additional section", |
5995 | 0 | xfr->task_transfer->master->host); |
5996 | 0 | return 0; |
5997 | 0 | } |
5998 | 0 | if(sldns_buffer_remaining(pkt) < 10) { |
5999 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
6000 | 0 | "truncated RR", |
6001 | 0 | xfr->task_transfer->master->host); |
6002 | 0 | return 0; |
6003 | 0 | } |
6004 | 0 | (void)sldns_buffer_read_u16(pkt); /* type */ |
6005 | 0 | (void)sldns_buffer_read_u16(pkt); /* class */ |
6006 | 0 | (void)sldns_buffer_read_u32(pkt); /* ttl */ |
6007 | 0 | rdlen = sldns_buffer_read_u16(pkt); |
6008 | 0 | if(sldns_buffer_remaining(pkt) < rdlen) { |
6009 | 0 | verbose(VERB_ALGO, "xfr to %s failed, packet with " |
6010 | 0 | "truncated RR rdata", |
6011 | 0 | xfr->task_transfer->master->host); |
6012 | 0 | return 0; |
6013 | 0 | } |
6014 | | /* skip over RR rdata to go to the next RR */ |
6015 | 0 | sldns_buffer_skip(pkt, (ssize_t)rdlen); |
6016 | 0 | } |
6017 | | |
6018 | 0 | return 1; |
6019 | 0 | } |
6020 | | |
6021 | | /** Link the data from this packet into the worklist of transferred data */ |
6022 | | static int |
6023 | | xfer_link_data(sldns_buffer* pkt, struct auth_xfer* xfr) |
6024 | 0 | { |
6025 | | /* alloc it */ |
6026 | 0 | struct auth_chunk* e; |
6027 | 0 | e = (struct auth_chunk*)calloc(1, sizeof(*e)); |
6028 | 0 | if(!e) return 0; |
6029 | 0 | e->next = NULL; |
6030 | 0 | e->len = sldns_buffer_limit(pkt); |
6031 | 0 | e->data = memdup(sldns_buffer_begin(pkt), e->len); |
6032 | 0 | if(!e->data) { |
6033 | 0 | free(e); |
6034 | 0 | return 0; |
6035 | 0 | } |
6036 | | |
6037 | | /* alloc succeeded, link into list */ |
6038 | 0 | if(!xfr->task_transfer->chunks_first) |
6039 | 0 | xfr->task_transfer->chunks_first = e; |
6040 | 0 | if(xfr->task_transfer->chunks_last) |
6041 | 0 | xfr->task_transfer->chunks_last->next = e; |
6042 | 0 | xfr->task_transfer->chunks_last = e; |
6043 | 0 | return 1; |
6044 | 0 | } |
6045 | | |
6046 | | /** task transfer. the list of data is complete. process it and if failed |
6047 | | * move to next master, if succeeded, end the task transfer */ |
6048 | | static void |
6049 | | process_list_end_transfer(struct auth_xfer* xfr, struct module_env* env) |
6050 | 0 | { |
6051 | 0 | int ixfr_fail = 0; |
6052 | 0 | if(xfr_process_chunk_list(xfr, env, &ixfr_fail)) { |
6053 | | /* it worked! */ |
6054 | 0 | auth_chunks_delete(xfr->task_transfer); |
6055 | | |
6056 | | /* we fetched the zone, move to wait task */ |
6057 | 0 | xfr_transfer_disown(xfr); |
6058 | |
|
6059 | 0 | if(xfr->notify_received && (!xfr->notify_has_serial || |
6060 | 0 | (xfr->notify_has_serial && |
6061 | 0 | xfr_serial_means_update(xfr, xfr->notify_serial)))) { |
6062 | 0 | uint32_t sr = xfr->notify_serial; |
6063 | 0 | int has_sr = xfr->notify_has_serial; |
6064 | | /* we received a notify while probe/transfer was |
6065 | | * in progress. start a new probe and transfer */ |
6066 | 0 | xfr->notify_received = 0; |
6067 | 0 | xfr->notify_has_serial = 0; |
6068 | 0 | xfr->notify_serial = 0; |
6069 | 0 | if(!xfr_start_probe(xfr, env, NULL)) { |
6070 | | /* if we couldn't start it, already in |
6071 | | * progress; restore notify serial, |
6072 | | * while xfr still locked */ |
6073 | 0 | xfr->notify_received = 1; |
6074 | 0 | xfr->notify_has_serial = has_sr; |
6075 | 0 | xfr->notify_serial = sr; |
6076 | 0 | lock_basic_unlock(&xfr->lock); |
6077 | 0 | } |
6078 | 0 | return; |
6079 | 0 | } else { |
6080 | | /* pick up the nextprobe task and wait (normail wait time) */ |
6081 | 0 | if(xfr->task_nextprobe->worker == NULL) |
6082 | 0 | xfr_set_timeout(xfr, env, 0, 0); |
6083 | 0 | } |
6084 | 0 | lock_basic_unlock(&xfr->lock); |
6085 | 0 | return; |
6086 | 0 | } |
6087 | | /* processing failed */ |
6088 | | /* when done, delete data from list */ |
6089 | 0 | auth_chunks_delete(xfr->task_transfer); |
6090 | 0 | if(ixfr_fail) { |
6091 | 0 | xfr->task_transfer->ixfr_fail = 1; |
6092 | 0 | } else { |
6093 | 0 | xfr_transfer_nextmaster(xfr); |
6094 | 0 | } |
6095 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
6096 | 0 | } |
6097 | | |
6098 | | /** callback for the task_transfer timer */ |
6099 | | void |
6100 | | auth_xfer_transfer_timer_callback(void* arg) |
6101 | 0 | { |
6102 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6103 | 0 | struct module_env* env; |
6104 | 0 | int gonextonfail = 1; |
6105 | 0 | log_assert(xfr->task_transfer); |
6106 | 0 | lock_basic_lock(&xfr->lock); |
6107 | 0 | env = xfr->task_transfer->env; |
6108 | 0 | if(!env || env->outnet->want_to_quit) { |
6109 | 0 | lock_basic_unlock(&xfr->lock); |
6110 | 0 | return; /* stop on quit */ |
6111 | 0 | } |
6112 | | |
6113 | 0 | verbose(VERB_ALGO, "xfr stopped, connection timeout to %s", |
6114 | 0 | xfr->task_transfer->master->host); |
6115 | | |
6116 | | /* see if IXFR caused the failure, if so, try AXFR */ |
6117 | 0 | if(xfr->task_transfer->on_ixfr) { |
6118 | 0 | xfr->task_transfer->ixfr_possible_timeout_count++; |
6119 | 0 | if(xfr->task_transfer->ixfr_possible_timeout_count >= |
6120 | 0 | NUM_TIMEOUTS_FALLBACK_IXFR) { |
6121 | 0 | verbose(VERB_ALGO, "xfr to %s, fallback " |
6122 | 0 | "from IXFR to AXFR (because of timeouts)", |
6123 | 0 | xfr->task_transfer->master->host); |
6124 | 0 | xfr->task_transfer->ixfr_fail = 1; |
6125 | 0 | gonextonfail = 0; |
6126 | 0 | } |
6127 | 0 | } |
6128 | | |
6129 | | /* delete transferred data from list */ |
6130 | 0 | auth_chunks_delete(xfr->task_transfer); |
6131 | 0 | comm_point_delete(xfr->task_transfer->cp); |
6132 | 0 | xfr->task_transfer->cp = NULL; |
6133 | 0 | if(gonextonfail) |
6134 | 0 | xfr_transfer_nextmaster(xfr); |
6135 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
6136 | 0 | } |
6137 | | |
6138 | | /** callback for task_transfer tcp connections */ |
6139 | | int |
6140 | | auth_xfer_transfer_tcp_callback(struct comm_point* c, void* arg, int err, |
6141 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
6142 | 0 | { |
6143 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6144 | 0 | struct module_env* env; |
6145 | 0 | int gonextonfail = 1; |
6146 | 0 | int transferdone = 0; |
6147 | 0 | log_assert(xfr->task_transfer); |
6148 | 0 | lock_basic_lock(&xfr->lock); |
6149 | 0 | env = xfr->task_transfer->env; |
6150 | 0 | if(!env || env->outnet->want_to_quit) { |
6151 | 0 | lock_basic_unlock(&xfr->lock); |
6152 | 0 | return 0; /* stop on quit */ |
6153 | 0 | } |
6154 | | /* stop the timer */ |
6155 | 0 | comm_timer_disable(xfr->task_transfer->timer); |
6156 | |
|
6157 | 0 | if(err != NETEVENT_NOERROR) { |
6158 | | /* connection failed, closed, or timeout */ |
6159 | | /* stop this transfer, cleanup |
6160 | | * and continue task_transfer*/ |
6161 | 0 | verbose(VERB_ALGO, "xfr stopped, connection lost to %s", |
6162 | 0 | xfr->task_transfer->master->host); |
6163 | | |
6164 | | /* see if IXFR caused the failure, if so, try AXFR */ |
6165 | 0 | if(xfr->task_transfer->on_ixfr) { |
6166 | 0 | xfr->task_transfer->ixfr_possible_timeout_count++; |
6167 | 0 | if(xfr->task_transfer->ixfr_possible_timeout_count >= |
6168 | 0 | NUM_TIMEOUTS_FALLBACK_IXFR) { |
6169 | 0 | verbose(VERB_ALGO, "xfr to %s, fallback " |
6170 | 0 | "from IXFR to AXFR (because of timeouts)", |
6171 | 0 | xfr->task_transfer->master->host); |
6172 | 0 | xfr->task_transfer->ixfr_fail = 1; |
6173 | 0 | gonextonfail = 0; |
6174 | 0 | } |
6175 | 0 | } |
6176 | |
|
6177 | 0 | failed: |
6178 | | /* delete transferred data from list */ |
6179 | 0 | auth_chunks_delete(xfr->task_transfer); |
6180 | 0 | comm_point_delete(xfr->task_transfer->cp); |
6181 | 0 | xfr->task_transfer->cp = NULL; |
6182 | 0 | if(gonextonfail) |
6183 | 0 | xfr_transfer_nextmaster(xfr); |
6184 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
6185 | 0 | return 0; |
6186 | 0 | } |
6187 | | /* note that IXFR worked without timeout */ |
6188 | 0 | if(xfr->task_transfer->on_ixfr) |
6189 | 0 | xfr->task_transfer->ixfr_possible_timeout_count = 0; |
6190 | | |
6191 | | /* handle returned packet */ |
6192 | | /* if it fails, cleanup and end this transfer */ |
6193 | | /* if it needs to fallback from IXFR to AXFR, do that */ |
6194 | 0 | if(!check_xfer_packet(c->buffer, xfr, &gonextonfail, &transferdone)) { |
6195 | 0 | goto failed; |
6196 | 0 | } |
6197 | | /* if it is good, link it into the list of data */ |
6198 | | /* if the link into list of data fails (malloc fail) cleanup and end */ |
6199 | 0 | if(!xfer_link_data(c->buffer, xfr)) { |
6200 | 0 | verbose(VERB_ALGO, "xfr stopped to %s, malloc failed", |
6201 | 0 | xfr->task_transfer->master->host); |
6202 | 0 | goto failed; |
6203 | 0 | } |
6204 | | /* if the transfer is done now, disconnect and process the list */ |
6205 | 0 | if(transferdone) { |
6206 | 0 | comm_point_delete(xfr->task_transfer->cp); |
6207 | 0 | xfr->task_transfer->cp = NULL; |
6208 | 0 | process_list_end_transfer(xfr, env); |
6209 | 0 | return 0; |
6210 | 0 | } |
6211 | | |
6212 | | /* if we want to read more messages, setup the commpoint to read |
6213 | | * a DNS packet, and the timeout */ |
6214 | 0 | lock_basic_unlock(&xfr->lock); |
6215 | 0 | c->tcp_is_reading = 1; |
6216 | 0 | sldns_buffer_clear(c->buffer); |
6217 | 0 | comm_point_start_listening(c, -1, AUTH_TRANSFER_TIMEOUT); |
6218 | 0 | return 0; |
6219 | 0 | } |
6220 | | |
6221 | | /** callback for task_transfer http connections */ |
6222 | | int |
6223 | | auth_xfer_transfer_http_callback(struct comm_point* c, void* arg, int err, |
6224 | | struct comm_reply* repinfo) |
6225 | 0 | { |
6226 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6227 | 0 | struct module_env* env; |
6228 | 0 | log_assert(xfr->task_transfer); |
6229 | 0 | lock_basic_lock(&xfr->lock); |
6230 | 0 | env = xfr->task_transfer->env; |
6231 | 0 | if(!env || env->outnet->want_to_quit) { |
6232 | 0 | lock_basic_unlock(&xfr->lock); |
6233 | 0 | return 0; /* stop on quit */ |
6234 | 0 | } |
6235 | 0 | verbose(VERB_ALGO, "auth zone transfer http callback"); |
6236 | | /* stop the timer */ |
6237 | 0 | comm_timer_disable(xfr->task_transfer->timer); |
6238 | |
|
6239 | 0 | if(err != NETEVENT_NOERROR && err != NETEVENT_DONE) { |
6240 | | /* connection failed, closed, or timeout */ |
6241 | | /* stop this transfer, cleanup |
6242 | | * and continue task_transfer*/ |
6243 | 0 | verbose(VERB_ALGO, "http stopped, connection lost to %s", |
6244 | 0 | xfr->task_transfer->master->host); |
6245 | 0 | failed: |
6246 | | /* delete transferred data from list */ |
6247 | 0 | auth_chunks_delete(xfr->task_transfer); |
6248 | 0 | if(repinfo) repinfo->c = NULL; /* signal cp deleted to |
6249 | | the routine calling this callback */ |
6250 | 0 | comm_point_delete(xfr->task_transfer->cp); |
6251 | 0 | xfr->task_transfer->cp = NULL; |
6252 | 0 | xfr_transfer_nextmaster(xfr); |
6253 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
6254 | 0 | return 0; |
6255 | 0 | } |
6256 | | |
6257 | | /* if it is good, link it into the list of data */ |
6258 | | /* if the link into list of data fails (malloc fail) cleanup and end */ |
6259 | 0 | if(sldns_buffer_limit(c->buffer) > 0) { |
6260 | 0 | verbose(VERB_ALGO, "auth zone http queued up %d bytes", |
6261 | 0 | (int)sldns_buffer_limit(c->buffer)); |
6262 | 0 | if(!xfer_link_data(c->buffer, xfr)) { |
6263 | 0 | verbose(VERB_ALGO, "http stopped to %s, malloc failed", |
6264 | 0 | xfr->task_transfer->master->host); |
6265 | 0 | goto failed; |
6266 | 0 | } |
6267 | 0 | } |
6268 | | /* if the transfer is done now, disconnect and process the list */ |
6269 | 0 | if(err == NETEVENT_DONE) { |
6270 | 0 | if(repinfo) repinfo->c = NULL; /* signal cp deleted to |
6271 | | the routine calling this callback */ |
6272 | 0 | comm_point_delete(xfr->task_transfer->cp); |
6273 | 0 | xfr->task_transfer->cp = NULL; |
6274 | 0 | process_list_end_transfer(xfr, env); |
6275 | 0 | return 0; |
6276 | 0 | } |
6277 | | |
6278 | | /* if we want to read more messages, setup the commpoint to read |
6279 | | * a DNS packet, and the timeout */ |
6280 | 0 | lock_basic_unlock(&xfr->lock); |
6281 | 0 | c->tcp_is_reading = 1; |
6282 | 0 | sldns_buffer_clear(c->buffer); |
6283 | 0 | comm_point_start_listening(c, -1, AUTH_TRANSFER_TIMEOUT); |
6284 | 0 | return 0; |
6285 | 0 | } |
6286 | | |
6287 | | |
6288 | | /** start transfer task by this worker , xfr is locked. */ |
6289 | | static void |
6290 | | xfr_start_transfer(struct auth_xfer* xfr, struct module_env* env, |
6291 | | struct auth_master* master) |
6292 | 0 | { |
6293 | 0 | log_assert(xfr->task_transfer != NULL); |
6294 | 0 | log_assert(xfr->task_transfer->worker == NULL); |
6295 | 0 | log_assert(xfr->task_transfer->chunks_first == NULL); |
6296 | 0 | log_assert(xfr->task_transfer->chunks_last == NULL); |
6297 | 0 | xfr->task_transfer->worker = env->worker; |
6298 | 0 | xfr->task_transfer->env = env; |
6299 | | |
6300 | | /* init transfer process */ |
6301 | | /* find that master in the transfer's list of masters? */ |
6302 | 0 | xfr_transfer_start_list(xfr, master); |
6303 | | /* start lookup for hostnames in transfer master list */ |
6304 | 0 | xfr_transfer_start_lookups(xfr); |
6305 | | |
6306 | | /* initiate TCP, and set timeout on it */ |
6307 | 0 | xfr_transfer_nexttarget_or_end(xfr, env); |
6308 | 0 | } |
6309 | | |
6310 | | /** disown task_probe. caller must hold xfr.lock */ |
6311 | | static void |
6312 | | xfr_probe_disown(struct auth_xfer* xfr) |
6313 | 0 | { |
6314 | | /* remove timer (from this worker's event base) */ |
6315 | 0 | comm_timer_delete(xfr->task_probe->timer); |
6316 | 0 | xfr->task_probe->timer = NULL; |
6317 | | /* remove the commpoint */ |
6318 | 0 | comm_point_delete(xfr->task_probe->cp); |
6319 | 0 | xfr->task_probe->cp = NULL; |
6320 | | /* we don't own this item anymore */ |
6321 | 0 | xfr->task_probe->worker = NULL; |
6322 | 0 | xfr->task_probe->env = NULL; |
6323 | 0 | } |
6324 | | |
6325 | | /** send the UDP probe to the master, this is part of task_probe */ |
6326 | | static int |
6327 | | xfr_probe_send_probe(struct auth_xfer* xfr, struct module_env* env, |
6328 | | int timeout) |
6329 | 0 | { |
6330 | 0 | struct sockaddr_storage addr; |
6331 | 0 | socklen_t addrlen = 0; |
6332 | 0 | struct timeval t; |
6333 | | /* pick master */ |
6334 | 0 | struct auth_master* master = xfr_probe_current_master(xfr); |
6335 | 0 | char *auth_name = NULL; |
6336 | 0 | if(!master) return 0; |
6337 | 0 | if(master->allow_notify) return 0; /* only for notify */ |
6338 | 0 | if(master->http) return 0; /* only masters get SOA UDP probe, |
6339 | | not urls, if those are in this list */ |
6340 | | |
6341 | | /* get master addr */ |
6342 | 0 | if(xfr->task_probe->scan_addr) { |
6343 | 0 | addrlen = xfr->task_probe->scan_addr->addrlen; |
6344 | 0 | memmove(&addr, &xfr->task_probe->scan_addr->addr, addrlen); |
6345 | 0 | } else { |
6346 | 0 | if(!authextstrtoaddr(master->host, &addr, &addrlen, &auth_name)) { |
6347 | | /* the ones that are not in addr format are supposed |
6348 | | * to be looked up. The lookup has failed however, |
6349 | | * so skip them */ |
6350 | 0 | char zname[255+1]; |
6351 | 0 | dname_str(xfr->name, zname); |
6352 | 0 | log_err("%s: failed lookup, cannot probe to master %s", |
6353 | 0 | zname, master->host); |
6354 | 0 | return 0; |
6355 | 0 | } |
6356 | 0 | if (auth_name != NULL) { |
6357 | 0 | if (addr.ss_family == AF_INET |
6358 | 0 | && (int)ntohs(((struct sockaddr_in *)&addr)->sin_port) |
6359 | 0 | == env->cfg->ssl_port) |
6360 | 0 | ((struct sockaddr_in *)&addr)->sin_port |
6361 | 0 | = htons((uint16_t)env->cfg->port); |
6362 | 0 | else if (addr.ss_family == AF_INET6 |
6363 | 0 | && (int)ntohs(((struct sockaddr_in6 *)&addr)->sin6_port) |
6364 | 0 | == env->cfg->ssl_port) |
6365 | 0 | ((struct sockaddr_in6 *)&addr)->sin6_port |
6366 | 0 | = htons((uint16_t)env->cfg->port); |
6367 | 0 | } |
6368 | 0 | } |
6369 | | |
6370 | | /* create packet */ |
6371 | | /* create new ID for new probes, but not on timeout retries, |
6372 | | * this means we'll accept replies to previous retries to same ip */ |
6373 | 0 | if(timeout == AUTH_PROBE_TIMEOUT) |
6374 | 0 | xfr->task_probe->id = GET_RANDOM_ID(env->rnd); |
6375 | 0 | xfr_create_soa_probe_packet(xfr, env->scratch_buffer, |
6376 | 0 | xfr->task_probe->id); |
6377 | | /* we need to remove the cp if we have a different ip4/ip6 type now */ |
6378 | 0 | if(xfr->task_probe->cp && |
6379 | 0 | ((xfr->task_probe->cp_is_ip6 && !addr_is_ip6(&addr, addrlen)) || |
6380 | 0 | (!xfr->task_probe->cp_is_ip6 && addr_is_ip6(&addr, addrlen))) |
6381 | 0 | ) { |
6382 | 0 | comm_point_delete(xfr->task_probe->cp); |
6383 | 0 | xfr->task_probe->cp = NULL; |
6384 | 0 | } |
6385 | 0 | if(!xfr->task_probe->cp) { |
6386 | 0 | if(addr_is_ip6(&addr, addrlen)) |
6387 | 0 | xfr->task_probe->cp_is_ip6 = 1; |
6388 | 0 | else xfr->task_probe->cp_is_ip6 = 0; |
6389 | 0 | xfr->task_probe->cp = outnet_comm_point_for_udp(env->outnet, |
6390 | 0 | auth_xfer_probe_udp_callback, xfr, &addr, addrlen); |
6391 | 0 | if(!xfr->task_probe->cp) { |
6392 | 0 | char zname[255+1], as[256]; |
6393 | 0 | dname_str(xfr->name, zname); |
6394 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
6395 | 0 | verbose(VERB_ALGO, "cannot create udp cp for " |
6396 | 0 | "probe %s to %s", zname, as); |
6397 | 0 | return 0; |
6398 | 0 | } |
6399 | 0 | } |
6400 | 0 | if(!xfr->task_probe->timer) { |
6401 | 0 | xfr->task_probe->timer = comm_timer_create(env->worker_base, |
6402 | 0 | auth_xfer_probe_timer_callback, xfr); |
6403 | 0 | if(!xfr->task_probe->timer) { |
6404 | 0 | log_err("malloc failure"); |
6405 | 0 | return 0; |
6406 | 0 | } |
6407 | 0 | } |
6408 | | |
6409 | | /* send udp packet */ |
6410 | 0 | if(!comm_point_send_udp_msg(xfr->task_probe->cp, env->scratch_buffer, |
6411 | 0 | (struct sockaddr*)&addr, addrlen, 0)) { |
6412 | 0 | char zname[255+1], as[256]; |
6413 | 0 | dname_str(xfr->name, zname); |
6414 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
6415 | 0 | verbose(VERB_ALGO, "failed to send soa probe for %s to %s", |
6416 | 0 | zname, as); |
6417 | 0 | return 0; |
6418 | 0 | } |
6419 | 0 | if(verbosity >= VERB_ALGO) { |
6420 | 0 | char zname[255+1], as[256]; |
6421 | 0 | dname_str(xfr->name, zname); |
6422 | 0 | addr_to_str(&addr, addrlen, as, sizeof(as)); |
6423 | 0 | verbose(VERB_ALGO, "auth zone %s soa probe sent to %s", zname, |
6424 | 0 | as); |
6425 | 0 | } |
6426 | 0 | xfr->task_probe->timeout = timeout; |
6427 | 0 | #ifndef S_SPLINT_S |
6428 | 0 | t.tv_sec = timeout/1000; |
6429 | 0 | t.tv_usec = (timeout%1000)*1000; |
6430 | 0 | #endif |
6431 | 0 | comm_timer_set(xfr->task_probe->timer, &t); |
6432 | |
|
6433 | 0 | return 1; |
6434 | 0 | } |
6435 | | |
6436 | | /** callback for task_probe timer */ |
6437 | | void |
6438 | | auth_xfer_probe_timer_callback(void* arg) |
6439 | 0 | { |
6440 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6441 | 0 | struct module_env* env; |
6442 | 0 | log_assert(xfr->task_probe); |
6443 | 0 | lock_basic_lock(&xfr->lock); |
6444 | 0 | env = xfr->task_probe->env; |
6445 | 0 | if(!env || env->outnet->want_to_quit) { |
6446 | 0 | lock_basic_unlock(&xfr->lock); |
6447 | 0 | return; /* stop on quit */ |
6448 | 0 | } |
6449 | | |
6450 | 0 | if(verbosity >= VERB_ALGO) { |
6451 | 0 | char zname[255+1]; |
6452 | 0 | dname_str(xfr->name, zname); |
6453 | 0 | verbose(VERB_ALGO, "auth zone %s soa probe timeout", zname); |
6454 | 0 | } |
6455 | 0 | if(xfr->task_probe->timeout <= AUTH_PROBE_TIMEOUT_STOP) { |
6456 | | /* try again with bigger timeout */ |
6457 | 0 | if(xfr_probe_send_probe(xfr, env, xfr->task_probe->timeout*2)) { |
6458 | 0 | lock_basic_unlock(&xfr->lock); |
6459 | 0 | return; |
6460 | 0 | } |
6461 | 0 | } |
6462 | | /* delete commpoint so a new one is created, with a fresh port nr */ |
6463 | 0 | comm_point_delete(xfr->task_probe->cp); |
6464 | 0 | xfr->task_probe->cp = NULL; |
6465 | | |
6466 | | /* too many timeouts (or fail to send), move to next or end */ |
6467 | 0 | xfr_probe_nextmaster(xfr); |
6468 | 0 | xfr_probe_send_or_end(xfr, env); |
6469 | 0 | } |
6470 | | |
6471 | | /** callback for task_probe udp packets */ |
6472 | | int |
6473 | | auth_xfer_probe_udp_callback(struct comm_point* c, void* arg, int err, |
6474 | | struct comm_reply* repinfo) |
6475 | 0 | { |
6476 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6477 | 0 | struct module_env* env; |
6478 | 0 | log_assert(xfr->task_probe); |
6479 | 0 | lock_basic_lock(&xfr->lock); |
6480 | 0 | env = xfr->task_probe->env; |
6481 | 0 | if(!env || env->outnet->want_to_quit) { |
6482 | 0 | lock_basic_unlock(&xfr->lock); |
6483 | 0 | return 0; /* stop on quit */ |
6484 | 0 | } |
6485 | | |
6486 | | /* the comm_point_udp_callback is in a for loop for NUM_UDP_PER_SELECT |
6487 | | * and we set rep.c=NULL to stop if from looking inside the commpoint*/ |
6488 | 0 | repinfo->c = NULL; |
6489 | | /* stop the timer */ |
6490 | 0 | comm_timer_disable(xfr->task_probe->timer); |
6491 | | |
6492 | | /* see if we got a packet and what that means */ |
6493 | 0 | if(err == NETEVENT_NOERROR) { |
6494 | 0 | uint32_t serial = 0; |
6495 | 0 | if(check_packet_ok(c->buffer, LDNS_RR_TYPE_SOA, xfr, |
6496 | 0 | &serial)) { |
6497 | | /* successful lookup */ |
6498 | 0 | if(verbosity >= VERB_ALGO) { |
6499 | 0 | char buf[256]; |
6500 | 0 | dname_str(xfr->name, buf); |
6501 | 0 | verbose(VERB_ALGO, "auth zone %s: soa probe " |
6502 | 0 | "serial is %u", buf, (unsigned)serial); |
6503 | 0 | } |
6504 | | /* see if this serial indicates that the zone has |
6505 | | * to be updated */ |
6506 | 0 | if(xfr_serial_means_update(xfr, serial)) { |
6507 | | /* if updated, start the transfer task, if needed */ |
6508 | 0 | verbose(VERB_ALGO, "auth_zone updated, start transfer"); |
6509 | 0 | if(xfr->task_transfer->worker == NULL) { |
6510 | 0 | struct auth_master* master = |
6511 | 0 | xfr_probe_current_master(xfr); |
6512 | | /* if we have download URLs use them |
6513 | | * in preference to this master we |
6514 | | * just probed the SOA from */ |
6515 | 0 | if(xfr->task_transfer->masters && |
6516 | 0 | xfr->task_transfer->masters->http) |
6517 | 0 | master = NULL; |
6518 | 0 | xfr_probe_disown(xfr); |
6519 | 0 | xfr_start_transfer(xfr, env, master); |
6520 | 0 | return 0; |
6521 | |
|
6522 | 0 | } |
6523 | | /* other tasks are running, we don't do this anymore */ |
6524 | 0 | xfr_probe_disown(xfr); |
6525 | 0 | lock_basic_unlock(&xfr->lock); |
6526 | | /* return, we don't sent a reply to this udp packet, |
6527 | | * and we setup the tasks to do next */ |
6528 | 0 | return 0; |
6529 | 0 | } else { |
6530 | 0 | verbose(VERB_ALGO, "auth_zone master reports unchanged soa serial"); |
6531 | | /* we if cannot find updates amongst the |
6532 | | * masters, this means we then have a new lease |
6533 | | * on the zone */ |
6534 | 0 | xfr->task_probe->have_new_lease = 1; |
6535 | 0 | } |
6536 | 0 | } else { |
6537 | 0 | if(verbosity >= VERB_ALGO) { |
6538 | 0 | char buf[256]; |
6539 | 0 | dname_str(xfr->name, buf); |
6540 | 0 | verbose(VERB_ALGO, "auth zone %s: bad reply to soa probe", buf); |
6541 | 0 | } |
6542 | 0 | } |
6543 | 0 | } else { |
6544 | 0 | if(verbosity >= VERB_ALGO) { |
6545 | 0 | char buf[256]; |
6546 | 0 | dname_str(xfr->name, buf); |
6547 | 0 | verbose(VERB_ALGO, "auth zone %s: soa probe failed", buf); |
6548 | 0 | } |
6549 | 0 | } |
6550 | | |
6551 | | /* failed lookup or not an update */ |
6552 | | /* delete commpoint so a new one is created, with a fresh port nr */ |
6553 | 0 | comm_point_delete(xfr->task_probe->cp); |
6554 | 0 | xfr->task_probe->cp = NULL; |
6555 | | |
6556 | | /* if the result was not a successful probe, we need |
6557 | | * to send the next one */ |
6558 | 0 | xfr_probe_nextmaster(xfr); |
6559 | 0 | xfr_probe_send_or_end(xfr, env); |
6560 | 0 | return 0; |
6561 | 0 | } |
6562 | | |
6563 | | /** lookup a host name for its addresses, if needed */ |
6564 | | static int |
6565 | | xfr_probe_lookup_host(struct auth_xfer* xfr, struct module_env* env) |
6566 | 0 | { |
6567 | 0 | struct sockaddr_storage addr; |
6568 | 0 | socklen_t addrlen = 0; |
6569 | 0 | struct auth_master* master = xfr->task_probe->lookup_target; |
6570 | 0 | struct query_info qinfo; |
6571 | 0 | uint16_t qflags = BIT_RD; |
6572 | 0 | uint8_t dname[LDNS_MAX_DOMAINLEN+1]; |
6573 | 0 | struct edns_data edns; |
6574 | 0 | sldns_buffer* buf = env->scratch_buffer; |
6575 | 0 | if(!master) return 0; |
6576 | 0 | if(extstrtoaddr(master->host, &addr, &addrlen, UNBOUND_DNS_PORT)) { |
6577 | | /* not needed, host is in IP addr format */ |
6578 | 0 | return 0; |
6579 | 0 | } |
6580 | 0 | if(master->allow_notify && !master->http && |
6581 | 0 | strchr(master->host, '/') != NULL && |
6582 | 0 | strchr(master->host, '/') == strrchr(master->host, '/')) { |
6583 | 0 | return 0; /* is IP/prefix format, not something to look up */ |
6584 | 0 | } |
6585 | | |
6586 | | /* use mesh_new_callback to probe for non-addr hosts, |
6587 | | * and then wait for them to be looked up (in cache, or query) */ |
6588 | 0 | qinfo.qname_len = sizeof(dname); |
6589 | 0 | if(sldns_str2wire_dname_buf(master->host, dname, &qinfo.qname_len) |
6590 | 0 | != 0) { |
6591 | 0 | log_err("cannot parse host name of master %s", master->host); |
6592 | 0 | return 0; |
6593 | 0 | } |
6594 | 0 | qinfo.qname = dname; |
6595 | 0 | qinfo.qclass = xfr->dclass; |
6596 | 0 | qinfo.qtype = LDNS_RR_TYPE_A; |
6597 | 0 | if(xfr->task_probe->lookup_aaaa) |
6598 | 0 | qinfo.qtype = LDNS_RR_TYPE_AAAA; |
6599 | 0 | qinfo.local_alias = NULL; |
6600 | 0 | if(verbosity >= VERB_ALGO) { |
6601 | 0 | char buf1[512]; |
6602 | 0 | char buf2[LDNS_MAX_DOMAINLEN+1]; |
6603 | 0 | dname_str(xfr->name, buf2); |
6604 | 0 | snprintf(buf1, sizeof(buf1), "auth zone %s: master lookup" |
6605 | 0 | " for task_probe", buf2); |
6606 | 0 | log_query_info(VERB_ALGO, buf1, &qinfo); |
6607 | 0 | } |
6608 | 0 | edns.edns_present = 1; |
6609 | 0 | edns.ext_rcode = 0; |
6610 | 0 | edns.edns_version = 0; |
6611 | 0 | edns.bits = EDNS_DO; |
6612 | 0 | edns.opt_list_in = NULL; |
6613 | 0 | edns.opt_list_out = NULL; |
6614 | 0 | edns.opt_list_inplace_cb_out = NULL; |
6615 | 0 | edns.padding_block_size = 0; |
6616 | 0 | if(sldns_buffer_capacity(buf) < 65535) |
6617 | 0 | edns.udp_size = (uint16_t)sldns_buffer_capacity(buf); |
6618 | 0 | else edns.udp_size = 65535; |
6619 | | |
6620 | | /* unlock xfr during mesh_new_callback() because the callback can be |
6621 | | * called straight away */ |
6622 | 0 | lock_basic_unlock(&xfr->lock); |
6623 | 0 | if(!mesh_new_callback(env->mesh, &qinfo, qflags, &edns, buf, 0, |
6624 | 0 | &auth_xfer_probe_lookup_callback, xfr, 0)) { |
6625 | 0 | lock_basic_lock(&xfr->lock); |
6626 | 0 | log_err("out of memory lookup up master %s", master->host); |
6627 | 0 | return 0; |
6628 | 0 | } |
6629 | 0 | lock_basic_lock(&xfr->lock); |
6630 | 0 | return 1; |
6631 | 0 | } |
6632 | | |
6633 | | /** move to sending the probe packets, next if fails. task_probe */ |
6634 | | static void |
6635 | | xfr_probe_send_or_end(struct auth_xfer* xfr, struct module_env* env) |
6636 | 0 | { |
6637 | | /* are we doing hostname lookups? */ |
6638 | 0 | while(xfr->task_probe->lookup_target) { |
6639 | 0 | if(xfr_probe_lookup_host(xfr, env)) { |
6640 | | /* wait for lookup to finish, |
6641 | | * note that the hostname may be in unbound's cache |
6642 | | * and we may then get an instant cache response, |
6643 | | * and that calls the callback just like a full |
6644 | | * lookup and lookup failures also call callback */ |
6645 | 0 | if(verbosity >= VERB_ALGO) { |
6646 | 0 | char zname[255+1]; |
6647 | 0 | dname_str(xfr->name, zname); |
6648 | 0 | verbose(VERB_ALGO, "auth zone %s probe next target lookup", zname); |
6649 | 0 | } |
6650 | 0 | lock_basic_unlock(&xfr->lock); |
6651 | 0 | return; |
6652 | 0 | } |
6653 | 0 | xfr_probe_move_to_next_lookup(xfr, env); |
6654 | 0 | } |
6655 | | /* probe of list has ended. Create or refresh the list of of |
6656 | | * allow_notify addrs */ |
6657 | 0 | probe_copy_masters_for_allow_notify(xfr); |
6658 | 0 | if(verbosity >= VERB_ALGO) { |
6659 | 0 | char zname[255+1]; |
6660 | 0 | dname_str(xfr->name, zname); |
6661 | 0 | verbose(VERB_ALGO, "auth zone %s probe: notify addrs updated", zname); |
6662 | 0 | } |
6663 | 0 | if(xfr->task_probe->only_lookup) { |
6664 | | /* only wanted lookups for copy, stop probe and start wait */ |
6665 | 0 | xfr->task_probe->only_lookup = 0; |
6666 | 0 | if(verbosity >= VERB_ALGO) { |
6667 | 0 | char zname[255+1]; |
6668 | 0 | dname_str(xfr->name, zname); |
6669 | 0 | verbose(VERB_ALGO, "auth zone %s probe: finished only_lookup", zname); |
6670 | 0 | } |
6671 | 0 | xfr_probe_disown(xfr); |
6672 | 0 | if(xfr->task_nextprobe->worker == NULL) |
6673 | 0 | xfr_set_timeout(xfr, env, 0, 0); |
6674 | 0 | lock_basic_unlock(&xfr->lock); |
6675 | 0 | return; |
6676 | 0 | } |
6677 | | |
6678 | | /* send probe packets */ |
6679 | 0 | while(!xfr_probe_end_of_list(xfr)) { |
6680 | 0 | if(xfr_probe_send_probe(xfr, env, AUTH_PROBE_TIMEOUT)) { |
6681 | | /* successfully sent probe, wait for callback */ |
6682 | 0 | lock_basic_unlock(&xfr->lock); |
6683 | 0 | return; |
6684 | 0 | } |
6685 | | /* failed to send probe, next master */ |
6686 | 0 | xfr_probe_nextmaster(xfr); |
6687 | 0 | } |
6688 | | |
6689 | | /* done with probe sequence, wait */ |
6690 | 0 | if(xfr->task_probe->have_new_lease) { |
6691 | | /* if zone not updated, start the wait timer again */ |
6692 | 0 | if(verbosity >= VERB_ALGO) { |
6693 | 0 | char zname[255+1]; |
6694 | 0 | dname_str(xfr->name, zname); |
6695 | 0 | verbose(VERB_ALGO, "auth_zone %s unchanged, new lease, wait", zname); |
6696 | 0 | } |
6697 | 0 | xfr_probe_disown(xfr); |
6698 | 0 | if(xfr->have_zone) |
6699 | 0 | xfr->lease_time = *env->now; |
6700 | 0 | if(xfr->task_nextprobe->worker == NULL) |
6701 | 0 | xfr_set_timeout(xfr, env, 0, 0); |
6702 | 0 | } else { |
6703 | 0 | if(verbosity >= VERB_ALGO) { |
6704 | 0 | char zname[255+1]; |
6705 | 0 | dname_str(xfr->name, zname); |
6706 | 0 | verbose(VERB_ALGO, "auth zone %s soa probe failed, wait to retry", zname); |
6707 | 0 | } |
6708 | | /* we failed to send this as well, move to the wait task, |
6709 | | * use the shorter retry timeout */ |
6710 | 0 | xfr_probe_disown(xfr); |
6711 | | /* pick up the nextprobe task and wait */ |
6712 | 0 | if(xfr->task_nextprobe->worker == NULL) |
6713 | 0 | xfr_set_timeout(xfr, env, 1, 0); |
6714 | 0 | } |
6715 | |
|
6716 | 0 | lock_basic_unlock(&xfr->lock); |
6717 | 0 | } |
6718 | | |
6719 | | /** callback for task_probe lookup of host name, of A or AAAA */ |
6720 | | void auth_xfer_probe_lookup_callback(void* arg, int rcode, sldns_buffer* buf, |
6721 | | enum sec_status ATTR_UNUSED(sec), char* ATTR_UNUSED(why_bogus), |
6722 | | int ATTR_UNUSED(was_ratelimited)) |
6723 | 0 | { |
6724 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6725 | 0 | struct module_env* env; |
6726 | 0 | log_assert(xfr->task_probe); |
6727 | 0 | lock_basic_lock(&xfr->lock); |
6728 | 0 | env = xfr->task_probe->env; |
6729 | 0 | if(!env || env->outnet->want_to_quit) { |
6730 | 0 | lock_basic_unlock(&xfr->lock); |
6731 | 0 | return; /* stop on quit */ |
6732 | 0 | } |
6733 | | |
6734 | | /* process result */ |
6735 | 0 | if(rcode == LDNS_RCODE_NOERROR) { |
6736 | 0 | uint16_t wanted_qtype = LDNS_RR_TYPE_A; |
6737 | 0 | struct regional* temp = env->scratch; |
6738 | 0 | struct query_info rq; |
6739 | 0 | struct reply_info* rep; |
6740 | 0 | if(xfr->task_probe->lookup_aaaa) |
6741 | 0 | wanted_qtype = LDNS_RR_TYPE_AAAA; |
6742 | 0 | memset(&rq, 0, sizeof(rq)); |
6743 | 0 | rep = parse_reply_in_temp_region(buf, temp, &rq); |
6744 | 0 | if(rep && rq.qtype == wanted_qtype && |
6745 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR) { |
6746 | | /* parsed successfully */ |
6747 | 0 | struct ub_packed_rrset_key* answer = |
6748 | 0 | reply_find_answer_rrset(&rq, rep); |
6749 | 0 | if(answer) { |
6750 | 0 | xfr_master_add_addrs(xfr->task_probe-> |
6751 | 0 | lookup_target, answer, wanted_qtype); |
6752 | 0 | } else { |
6753 | 0 | if(verbosity >= VERB_ALGO) { |
6754 | 0 | char zname[255+1]; |
6755 | 0 | dname_str(xfr->name, zname); |
6756 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup has nodata", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); |
6757 | 0 | } |
6758 | 0 | } |
6759 | 0 | } else { |
6760 | 0 | if(verbosity >= VERB_ALGO) { |
6761 | 0 | char zname[255+1]; |
6762 | 0 | dname_str(xfr->name, zname); |
6763 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup has no address", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); |
6764 | 0 | } |
6765 | 0 | } |
6766 | 0 | regional_free_all(temp); |
6767 | 0 | } else { |
6768 | 0 | if(verbosity >= VERB_ALGO) { |
6769 | 0 | char zname[255+1]; |
6770 | 0 | dname_str(xfr->name, zname); |
6771 | 0 | verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup failed", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); |
6772 | 0 | } |
6773 | 0 | } |
6774 | 0 | if(xfr->task_probe->lookup_target->list && |
6775 | 0 | xfr->task_probe->lookup_target == xfr_probe_current_master(xfr)) |
6776 | 0 | xfr->task_probe->scan_addr = xfr->task_probe->lookup_target->list; |
6777 | | |
6778 | | /* move to lookup AAAA after A lookup, move to next hostname lookup, |
6779 | | * or move to send the probes, or, if nothing to do, end task_probe */ |
6780 | 0 | xfr_probe_move_to_next_lookup(xfr, env); |
6781 | 0 | xfr_probe_send_or_end(xfr, env); |
6782 | 0 | } |
6783 | | |
6784 | | /** disown task_nextprobe. caller must hold xfr.lock */ |
6785 | | static void |
6786 | | xfr_nextprobe_disown(struct auth_xfer* xfr) |
6787 | 0 | { |
6788 | | /* delete the timer, because the next worker to pick this up may |
6789 | | * not have the same event base */ |
6790 | 0 | comm_timer_delete(xfr->task_nextprobe->timer); |
6791 | 0 | xfr->task_nextprobe->timer = NULL; |
6792 | 0 | xfr->task_nextprobe->next_probe = 0; |
6793 | | /* we don't own this item anymore */ |
6794 | 0 | xfr->task_nextprobe->worker = NULL; |
6795 | 0 | xfr->task_nextprobe->env = NULL; |
6796 | 0 | } |
6797 | | |
6798 | | /** xfer nextprobe timeout callback, this is part of task_nextprobe */ |
6799 | | void |
6800 | | auth_xfer_timer(void* arg) |
6801 | 0 | { |
6802 | 0 | struct auth_xfer* xfr = (struct auth_xfer*)arg; |
6803 | 0 | struct module_env* env; |
6804 | 0 | log_assert(xfr->task_nextprobe); |
6805 | 0 | lock_basic_lock(&xfr->lock); |
6806 | 0 | env = xfr->task_nextprobe->env; |
6807 | 0 | if(!env || env->outnet->want_to_quit) { |
6808 | 0 | lock_basic_unlock(&xfr->lock); |
6809 | 0 | return; /* stop on quit */ |
6810 | 0 | } |
6811 | | |
6812 | | /* see if zone has expired, and if so, also set auth_zone expired */ |
6813 | 0 | if(xfr->have_zone && !xfr->zone_expired && |
6814 | 0 | *env->now >= xfr->lease_time + xfr->expiry) { |
6815 | 0 | lock_basic_unlock(&xfr->lock); |
6816 | 0 | auth_xfer_set_expired(xfr, env, 1); |
6817 | 0 | lock_basic_lock(&xfr->lock); |
6818 | 0 | } |
6819 | |
|
6820 | 0 | xfr_nextprobe_disown(xfr); |
6821 | |
|
6822 | 0 | if(!xfr_start_probe(xfr, env, NULL)) { |
6823 | | /* not started because already in progress */ |
6824 | 0 | lock_basic_unlock(&xfr->lock); |
6825 | 0 | } |
6826 | 0 | } |
6827 | | |
6828 | | /** return true if there are probe (SOA UDP query) targets in the master list*/ |
6829 | | static int |
6830 | | have_probe_targets(struct auth_master* list) |
6831 | 0 | { |
6832 | 0 | struct auth_master* p; |
6833 | 0 | for(p=list; p; p = p->next) { |
6834 | 0 | if(!p->allow_notify && p->host) |
6835 | 0 | return 1; |
6836 | 0 | } |
6837 | 0 | return 0; |
6838 | 0 | } |
6839 | | |
6840 | | /** start task_probe if possible, if no masters for probe start task_transfer |
6841 | | * returns true if task has been started, and false if the task is already |
6842 | | * in progress. */ |
6843 | | static int |
6844 | | xfr_start_probe(struct auth_xfer* xfr, struct module_env* env, |
6845 | | struct auth_master* spec) |
6846 | 0 | { |
6847 | | /* see if we need to start a probe (or maybe it is already in |
6848 | | * progress (due to notify)) */ |
6849 | 0 | if(xfr->task_probe->worker == NULL) { |
6850 | 0 | if(!have_probe_targets(xfr->task_probe->masters) && |
6851 | 0 | !(xfr->task_probe->only_lookup && |
6852 | 0 | xfr->task_probe->masters != NULL)) { |
6853 | | /* useless to pick up task_probe, no masters to |
6854 | | * probe. Instead attempt to pick up task transfer */ |
6855 | 0 | if(xfr->task_transfer->worker == NULL) { |
6856 | 0 | xfr_start_transfer(xfr, env, spec); |
6857 | 0 | return 1; |
6858 | 0 | } |
6859 | | /* task transfer already in progress */ |
6860 | 0 | return 0; |
6861 | 0 | } |
6862 | | |
6863 | | /* pick up the probe task ourselves */ |
6864 | 0 | xfr->task_probe->worker = env->worker; |
6865 | 0 | xfr->task_probe->env = env; |
6866 | 0 | xfr->task_probe->cp = NULL; |
6867 | | |
6868 | | /* start the task */ |
6869 | | /* have not seen a new lease yet, this scan */ |
6870 | 0 | xfr->task_probe->have_new_lease = 0; |
6871 | | /* if this was a timeout, no specific first master to scan */ |
6872 | | /* otherwise, spec is nonNULL the notified master, scan |
6873 | | * first and also transfer first from it */ |
6874 | 0 | xfr_probe_start_list(xfr, spec); |
6875 | | /* setup to start the lookup of hostnames of masters afresh */ |
6876 | 0 | xfr_probe_start_lookups(xfr); |
6877 | | /* send the probe packet or next send, or end task */ |
6878 | 0 | xfr_probe_send_or_end(xfr, env); |
6879 | 0 | return 1; |
6880 | 0 | } |
6881 | 0 | return 0; |
6882 | 0 | } |
6883 | | |
6884 | | /** for task_nextprobe. |
6885 | | * determine next timeout for auth_xfer. Also (re)sets timer. |
6886 | | * @param xfr: task structure |
6887 | | * @param env: module environment, with worker and time. |
6888 | | * @param failure: set true if timer should be set for failure retry. |
6889 | | * @param lookup_only: only perform lookups when timer done, 0 sec timeout |
6890 | | */ |
6891 | | static void |
6892 | | xfr_set_timeout(struct auth_xfer* xfr, struct module_env* env, |
6893 | | int failure, int lookup_only) |
6894 | 0 | { |
6895 | 0 | struct timeval tv; |
6896 | 0 | log_assert(xfr->task_nextprobe != NULL); |
6897 | 0 | log_assert(xfr->task_nextprobe->worker == NULL || |
6898 | 0 | xfr->task_nextprobe->worker == env->worker); |
6899 | | /* normally, nextprobe = startoflease + refresh, |
6900 | | * but if expiry is sooner, use that one. |
6901 | | * after a failure, use the retry timer instead. */ |
6902 | 0 | xfr->task_nextprobe->next_probe = *env->now; |
6903 | 0 | if(xfr->lease_time && !failure) |
6904 | 0 | xfr->task_nextprobe->next_probe = xfr->lease_time; |
6905 | | |
6906 | 0 | if(!failure) { |
6907 | 0 | xfr->task_nextprobe->backoff = 0; |
6908 | 0 | } else { |
6909 | 0 | if(xfr->task_nextprobe->backoff == 0) |
6910 | 0 | xfr->task_nextprobe->backoff = 3; |
6911 | 0 | else xfr->task_nextprobe->backoff *= 2; |
6912 | 0 | if(xfr->task_nextprobe->backoff > AUTH_TRANSFER_MAX_BACKOFF) |
6913 | 0 | xfr->task_nextprobe->backoff = |
6914 | 0 | AUTH_TRANSFER_MAX_BACKOFF; |
6915 | 0 | } |
6916 | |
|
6917 | 0 | if(xfr->have_zone) { |
6918 | 0 | time_t wait = xfr->refresh; |
6919 | 0 | if(failure) wait = xfr->retry; |
6920 | 0 | if(xfr->expiry < wait) |
6921 | 0 | xfr->task_nextprobe->next_probe += xfr->expiry; |
6922 | 0 | else xfr->task_nextprobe->next_probe += wait; |
6923 | 0 | if(failure) |
6924 | 0 | xfr->task_nextprobe->next_probe += |
6925 | 0 | xfr->task_nextprobe->backoff; |
6926 | | /* put the timer exactly on expiry, if possible */ |
6927 | 0 | if(xfr->lease_time && xfr->lease_time+xfr->expiry < |
6928 | 0 | xfr->task_nextprobe->next_probe && |
6929 | 0 | xfr->lease_time+xfr->expiry > *env->now) |
6930 | 0 | xfr->task_nextprobe->next_probe = |
6931 | 0 | xfr->lease_time+xfr->expiry; |
6932 | 0 | } else { |
6933 | 0 | xfr->task_nextprobe->next_probe += |
6934 | 0 | xfr->task_nextprobe->backoff; |
6935 | 0 | } |
6936 | |
|
6937 | 0 | if(!xfr->task_nextprobe->timer) { |
6938 | 0 | xfr->task_nextprobe->timer = comm_timer_create( |
6939 | 0 | env->worker_base, auth_xfer_timer, xfr); |
6940 | 0 | if(!xfr->task_nextprobe->timer) { |
6941 | | /* failed to malloc memory. likely zone transfer |
6942 | | * also fails for that. skip the timeout */ |
6943 | 0 | char zname[255+1]; |
6944 | 0 | dname_str(xfr->name, zname); |
6945 | 0 | log_err("cannot allocate timer, no refresh for %s", |
6946 | 0 | zname); |
6947 | 0 | return; |
6948 | 0 | } |
6949 | 0 | } |
6950 | 0 | xfr->task_nextprobe->worker = env->worker; |
6951 | 0 | xfr->task_nextprobe->env = env; |
6952 | 0 | if(*(xfr->task_nextprobe->env->now) <= xfr->task_nextprobe->next_probe) |
6953 | 0 | tv.tv_sec = xfr->task_nextprobe->next_probe - |
6954 | 0 | *(xfr->task_nextprobe->env->now); |
6955 | 0 | else tv.tv_sec = 0; |
6956 | 0 | if(tv.tv_sec != 0 && lookup_only && xfr->task_probe->masters) { |
6957 | | /* don't lookup_only, if lookup timeout is 0 anyway, |
6958 | | * or if we don't have masters to lookup */ |
6959 | 0 | tv.tv_sec = 0; |
6960 | 0 | if(xfr->task_probe->worker == NULL) |
6961 | 0 | xfr->task_probe->only_lookup = 1; |
6962 | 0 | } |
6963 | 0 | if(verbosity >= VERB_ALGO) { |
6964 | 0 | char zname[255+1]; |
6965 | 0 | dname_str(xfr->name, zname); |
6966 | 0 | verbose(VERB_ALGO, "auth zone %s timeout in %d seconds", |
6967 | 0 | zname, (int)tv.tv_sec); |
6968 | 0 | } |
6969 | 0 | tv.tv_usec = 0; |
6970 | 0 | comm_timer_set(xfr->task_nextprobe->timer, &tv); |
6971 | 0 | } |
6972 | | |
6973 | | /** initial pick up of worker timeouts, ties events to worker event loop */ |
6974 | | void |
6975 | | auth_xfer_pickup_initial(struct auth_zones* az, struct module_env* env) |
6976 | 0 | { |
6977 | 0 | struct auth_xfer* x; |
6978 | 0 | lock_rw_wrlock(&az->lock); |
6979 | 0 | RBTREE_FOR(x, struct auth_xfer*, &az->xtree) { |
6980 | 0 | lock_basic_lock(&x->lock); |
6981 | | /* set lease_time, because we now have timestamp in env, |
6982 | | * (not earlier during startup and apply_cfg), and this |
6983 | | * notes the start time when the data was acquired */ |
6984 | 0 | if(x->have_zone) |
6985 | 0 | x->lease_time = *env->now; |
6986 | 0 | if(x->task_nextprobe && x->task_nextprobe->worker == NULL) { |
6987 | 0 | xfr_set_timeout(x, env, 0, 1); |
6988 | 0 | } |
6989 | 0 | lock_basic_unlock(&x->lock); |
6990 | 0 | } |
6991 | 0 | lock_rw_unlock(&az->lock); |
6992 | 0 | } |
6993 | | |
6994 | | void auth_zones_cleanup(struct auth_zones* az) |
6995 | 0 | { |
6996 | 0 | struct auth_xfer* x; |
6997 | 0 | lock_rw_wrlock(&az->lock); |
6998 | 0 | RBTREE_FOR(x, struct auth_xfer*, &az->xtree) { |
6999 | 0 | lock_basic_lock(&x->lock); |
7000 | 0 | if(x->task_nextprobe && x->task_nextprobe->worker != NULL) { |
7001 | 0 | xfr_nextprobe_disown(x); |
7002 | 0 | } |
7003 | 0 | if(x->task_probe && x->task_probe->worker != NULL) { |
7004 | 0 | xfr_probe_disown(x); |
7005 | 0 | } |
7006 | 0 | if(x->task_transfer && x->task_transfer->worker != NULL) { |
7007 | 0 | auth_chunks_delete(x->task_transfer); |
7008 | 0 | xfr_transfer_disown(x); |
7009 | 0 | } |
7010 | 0 | lock_basic_unlock(&x->lock); |
7011 | 0 | } |
7012 | 0 | lock_rw_unlock(&az->lock); |
7013 | 0 | } |
7014 | | |
7015 | | /** |
7016 | | * malloc the xfer and tasks |
7017 | | * @param z: auth_zone with name of zone. |
7018 | | */ |
7019 | | static struct auth_xfer* |
7020 | | auth_xfer_new(struct auth_zone* z) |
7021 | 0 | { |
7022 | 0 | struct auth_xfer* xfr; |
7023 | 0 | xfr = (struct auth_xfer*)calloc(1, sizeof(*xfr)); |
7024 | 0 | if(!xfr) return NULL; |
7025 | 0 | xfr->name = memdup(z->name, z->namelen); |
7026 | 0 | if(!xfr->name) { |
7027 | 0 | free(xfr); |
7028 | 0 | return NULL; |
7029 | 0 | } |
7030 | 0 | xfr->node.key = xfr; |
7031 | 0 | xfr->namelen = z->namelen; |
7032 | 0 | xfr->namelabs = z->namelabs; |
7033 | 0 | xfr->dclass = z->dclass; |
7034 | |
|
7035 | 0 | xfr->task_nextprobe = (struct auth_nextprobe*)calloc(1, |
7036 | 0 | sizeof(struct auth_nextprobe)); |
7037 | 0 | if(!xfr->task_nextprobe) { |
7038 | 0 | free(xfr->name); |
7039 | 0 | free(xfr); |
7040 | 0 | return NULL; |
7041 | 0 | } |
7042 | 0 | xfr->task_probe = (struct auth_probe*)calloc(1, |
7043 | 0 | sizeof(struct auth_probe)); |
7044 | 0 | if(!xfr->task_probe) { |
7045 | 0 | free(xfr->task_nextprobe); |
7046 | 0 | free(xfr->name); |
7047 | 0 | free(xfr); |
7048 | 0 | return NULL; |
7049 | 0 | } |
7050 | 0 | xfr->task_transfer = (struct auth_transfer*)calloc(1, |
7051 | 0 | sizeof(struct auth_transfer)); |
7052 | 0 | if(!xfr->task_transfer) { |
7053 | 0 | free(xfr->task_probe); |
7054 | 0 | free(xfr->task_nextprobe); |
7055 | 0 | free(xfr->name); |
7056 | 0 | free(xfr); |
7057 | 0 | return NULL; |
7058 | 0 | } |
7059 | | |
7060 | 0 | lock_basic_init(&xfr->lock); |
7061 | 0 | lock_protect(&xfr->lock, &xfr->name, sizeof(xfr->name)); |
7062 | 0 | lock_protect(&xfr->lock, &xfr->namelen, sizeof(xfr->namelen)); |
7063 | 0 | lock_protect(&xfr->lock, xfr->name, xfr->namelen); |
7064 | 0 | lock_protect(&xfr->lock, &xfr->namelabs, sizeof(xfr->namelabs)); |
7065 | 0 | lock_protect(&xfr->lock, &xfr->dclass, sizeof(xfr->dclass)); |
7066 | 0 | lock_protect(&xfr->lock, &xfr->notify_received, sizeof(xfr->notify_received)); |
7067 | 0 | lock_protect(&xfr->lock, &xfr->notify_serial, sizeof(xfr->notify_serial)); |
7068 | 0 | lock_protect(&xfr->lock, &xfr->zone_expired, sizeof(xfr->zone_expired)); |
7069 | 0 | lock_protect(&xfr->lock, &xfr->have_zone, sizeof(xfr->have_zone)); |
7070 | 0 | lock_protect(&xfr->lock, &xfr->serial, sizeof(xfr->serial)); |
7071 | 0 | lock_protect(&xfr->lock, &xfr->retry, sizeof(xfr->retry)); |
7072 | 0 | lock_protect(&xfr->lock, &xfr->refresh, sizeof(xfr->refresh)); |
7073 | 0 | lock_protect(&xfr->lock, &xfr->expiry, sizeof(xfr->expiry)); |
7074 | 0 | lock_protect(&xfr->lock, &xfr->lease_time, sizeof(xfr->lease_time)); |
7075 | 0 | lock_protect(&xfr->lock, &xfr->task_nextprobe->worker, |
7076 | 0 | sizeof(xfr->task_nextprobe->worker)); |
7077 | 0 | lock_protect(&xfr->lock, &xfr->task_probe->worker, |
7078 | 0 | sizeof(xfr->task_probe->worker)); |
7079 | 0 | lock_protect(&xfr->lock, &xfr->task_transfer->worker, |
7080 | 0 | sizeof(xfr->task_transfer->worker)); |
7081 | 0 | lock_basic_lock(&xfr->lock); |
7082 | 0 | return xfr; |
7083 | 0 | } |
7084 | | |
7085 | | /** Create auth_xfer structure. |
7086 | | * This populates the have_zone, soa values, and so on times. |
7087 | | * and sets the timeout, if a zone transfer is needed a short timeout is set. |
7088 | | * For that the auth_zone itself must exist (and read in zonefile) |
7089 | | * returns false on alloc failure. */ |
7090 | | struct auth_xfer* |
7091 | | auth_xfer_create(struct auth_zones* az, struct auth_zone* z) |
7092 | 0 | { |
7093 | 0 | struct auth_xfer* xfr; |
7094 | | |
7095 | | /* malloc it */ |
7096 | 0 | xfr = auth_xfer_new(z); |
7097 | 0 | if(!xfr) { |
7098 | 0 | log_err("malloc failure"); |
7099 | 0 | return NULL; |
7100 | 0 | } |
7101 | | /* insert in tree */ |
7102 | 0 | (void)rbtree_insert(&az->xtree, &xfr->node); |
7103 | 0 | return xfr; |
7104 | 0 | } |
7105 | | |
7106 | | /** create new auth_master structure */ |
7107 | | static struct auth_master* |
7108 | | auth_master_new(struct auth_master*** list) |
7109 | 0 | { |
7110 | 0 | struct auth_master *m; |
7111 | 0 | m = (struct auth_master*)calloc(1, sizeof(*m)); |
7112 | 0 | if(!m) { |
7113 | 0 | log_err("malloc failure"); |
7114 | 0 | return NULL; |
7115 | 0 | } |
7116 | | /* set first pointer to m, or next pointer of previous element to m */ |
7117 | 0 | (**list) = m; |
7118 | | /* store m's next pointer as future point to store at */ |
7119 | 0 | (*list) = &(m->next); |
7120 | 0 | return m; |
7121 | 0 | } |
7122 | | |
7123 | | /** dup_prefix : create string from initial part of other string, malloced */ |
7124 | | static char* |
7125 | | dup_prefix(char* str, size_t num) |
7126 | 0 | { |
7127 | 0 | char* result; |
7128 | 0 | size_t len = strlen(str); |
7129 | 0 | if(len < num) num = len; /* not more than strlen */ |
7130 | 0 | result = (char*)malloc(num+1); |
7131 | 0 | if(!result) { |
7132 | 0 | log_err("malloc failure"); |
7133 | 0 | return result; |
7134 | 0 | } |
7135 | 0 | memmove(result, str, num); |
7136 | 0 | result[num] = 0; |
7137 | 0 | return result; |
7138 | 0 | } |
7139 | | |
7140 | | /** dup string and print error on error */ |
7141 | | static char* |
7142 | | dup_all(char* str) |
7143 | 0 | { |
7144 | 0 | char* result = strdup(str); |
7145 | 0 | if(!result) { |
7146 | 0 | log_err("malloc failure"); |
7147 | 0 | return NULL; |
7148 | 0 | } |
7149 | 0 | return result; |
7150 | 0 | } |
7151 | | |
7152 | | /** find first of two characters */ |
7153 | | static char* |
7154 | | str_find_first_of_chars(char* s, char a, char b) |
7155 | 0 | { |
7156 | 0 | char* ra = strchr(s, a); |
7157 | 0 | char* rb = strchr(s, b); |
7158 | 0 | if(!ra) return rb; |
7159 | 0 | if(!rb) return ra; |
7160 | 0 | if(ra < rb) return ra; |
7161 | 0 | return rb; |
7162 | 0 | } |
7163 | | |
7164 | | /** parse URL into host and file parts, false on malloc or parse error */ |
7165 | | static int |
7166 | | parse_url(char* url, char** host, char** file, int* port, int* ssl) |
7167 | 0 | { |
7168 | 0 | char* p = url; |
7169 | | /* parse http://www.example.com/file.htm |
7170 | | * or http://127.0.0.1 (index.html) |
7171 | | * or https://[::1@1234]/a/b/c/d */ |
7172 | 0 | *ssl = 1; |
7173 | 0 | *port = AUTH_HTTPS_PORT; |
7174 | | |
7175 | | /* parse http:// or https:// */ |
7176 | 0 | if(strncmp(p, "http://", 7) == 0) { |
7177 | 0 | p += 7; |
7178 | 0 | *ssl = 0; |
7179 | 0 | *port = AUTH_HTTP_PORT; |
7180 | 0 | } else if(strncmp(p, "https://", 8) == 0) { |
7181 | 0 | p += 8; |
7182 | 0 | } else if(strstr(p, "://") && strchr(p, '/') > strstr(p, "://") && |
7183 | 0 | strchr(p, ':') >= strstr(p, "://")) { |
7184 | 0 | char* uri = dup_prefix(p, (size_t)(strstr(p, "://")-p)); |
7185 | 0 | log_err("protocol %s:// not supported (for url %s)", |
7186 | 0 | uri?uri:"", p); |
7187 | 0 | free(uri); |
7188 | 0 | return 0; |
7189 | 0 | } |
7190 | | |
7191 | | /* parse hostname part */ |
7192 | 0 | if(p[0] == '[') { |
7193 | 0 | char* end = strchr(p, ']'); |
7194 | 0 | p++; /* skip over [ */ |
7195 | 0 | if(end) { |
7196 | 0 | *host = dup_prefix(p, (size_t)(end-p)); |
7197 | 0 | if(!*host) return 0; |
7198 | 0 | p = end+1; /* skip over ] */ |
7199 | 0 | } else { |
7200 | 0 | *host = dup_all(p); |
7201 | 0 | if(!*host) return 0; |
7202 | 0 | p = end; |
7203 | 0 | } |
7204 | 0 | } else { |
7205 | 0 | char* end = str_find_first_of_chars(p, ':', '/'); |
7206 | 0 | if(end) { |
7207 | 0 | *host = dup_prefix(p, (size_t)(end-p)); |
7208 | 0 | if(!*host) return 0; |
7209 | 0 | } else { |
7210 | 0 | *host = dup_all(p); |
7211 | 0 | if(!*host) return 0; |
7212 | 0 | } |
7213 | 0 | p = end; /* at next : or / or NULL */ |
7214 | 0 | } |
7215 | | |
7216 | | /* parse port number */ |
7217 | 0 | if(p && p[0] == ':') { |
7218 | 0 | char* end = NULL; |
7219 | 0 | *port = strtol(p+1, &end, 10); |
7220 | 0 | p = end; |
7221 | 0 | } |
7222 | | |
7223 | | /* parse filename part */ |
7224 | 0 | while(p && *p == '/') |
7225 | 0 | p++; |
7226 | 0 | if(!p || p[0] == 0) |
7227 | 0 | *file = strdup("/"); |
7228 | 0 | else *file = strdup(p); |
7229 | 0 | if(!*file) { |
7230 | 0 | log_err("malloc failure"); |
7231 | 0 | return 0; |
7232 | 0 | } |
7233 | 0 | return 1; |
7234 | 0 | } |
7235 | | |
7236 | | int |
7237 | | xfer_set_masters(struct auth_master** list, struct config_auth* c, |
7238 | | int with_http) |
7239 | 0 | { |
7240 | 0 | struct auth_master* m; |
7241 | 0 | struct config_strlist* p; |
7242 | | /* list points to the first, or next pointer for the new element */ |
7243 | 0 | while(*list) { |
7244 | 0 | list = &( (*list)->next ); |
7245 | 0 | } |
7246 | 0 | if(with_http) |
7247 | 0 | for(p = c->urls; p; p = p->next) { |
7248 | 0 | m = auth_master_new(&list); |
7249 | 0 | if(!m) return 0; |
7250 | 0 | m->http = 1; |
7251 | 0 | if(!parse_url(p->str, &m->host, &m->file, &m->port, &m->ssl)) |
7252 | 0 | return 0; |
7253 | 0 | } |
7254 | 0 | for(p = c->masters; p; p = p->next) { |
7255 | 0 | m = auth_master_new(&list); |
7256 | 0 | if(!m) return 0; |
7257 | 0 | m->ixfr = 1; /* this flag is not configurable */ |
7258 | 0 | m->host = strdup(p->str); |
7259 | 0 | if(!m->host) { |
7260 | 0 | log_err("malloc failure"); |
7261 | 0 | return 0; |
7262 | 0 | } |
7263 | 0 | } |
7264 | 0 | for(p = c->allow_notify; p; p = p->next) { |
7265 | 0 | m = auth_master_new(&list); |
7266 | 0 | if(!m) return 0; |
7267 | 0 | m->allow_notify = 1; |
7268 | 0 | m->host = strdup(p->str); |
7269 | 0 | if(!m->host) { |
7270 | 0 | log_err("malloc failure"); |
7271 | 0 | return 0; |
7272 | 0 | } |
7273 | 0 | } |
7274 | 0 | return 1; |
7275 | 0 | } |
7276 | | |
7277 | 0 | #define SERIAL_BITS 32 |
7278 | | int |
7279 | | compare_serial(uint32_t a, uint32_t b) |
7280 | 0 | { |
7281 | 0 | const uint32_t cutoff = ((uint32_t) 1 << (SERIAL_BITS - 1)); |
7282 | |
|
7283 | 0 | if (a == b) { |
7284 | 0 | return 0; |
7285 | 0 | } else if ((a < b && b - a < cutoff) || (a > b && a - b > cutoff)) { |
7286 | 0 | return -1; |
7287 | 0 | } else { |
7288 | 0 | return 1; |
7289 | 0 | } |
7290 | 0 | } |
7291 | | |
7292 | | int zonemd_hashalgo_supported(int hashalgo) |
7293 | 0 | { |
7294 | 0 | if(hashalgo == ZONEMD_ALGO_SHA384) return 1; |
7295 | 0 | if(hashalgo == ZONEMD_ALGO_SHA512) return 1; |
7296 | 0 | return 0; |
7297 | 0 | } |
7298 | | |
7299 | | int zonemd_scheme_supported(int scheme) |
7300 | 0 | { |
7301 | 0 | if(scheme == ZONEMD_SCHEME_SIMPLE) return 1; |
7302 | 0 | return 0; |
7303 | 0 | } |
7304 | | |
7305 | | /** initialize hash for hashing with zonemd hash algo */ |
7306 | | static struct secalgo_hash* zonemd_digest_init(int hashalgo, char** reason) |
7307 | 0 | { |
7308 | 0 | struct secalgo_hash *h; |
7309 | 0 | if(hashalgo == ZONEMD_ALGO_SHA384) { |
7310 | | /* sha384 */ |
7311 | 0 | h = secalgo_hash_create_sha384(); |
7312 | 0 | if(!h) |
7313 | 0 | *reason = "digest sha384 could not be created"; |
7314 | 0 | return h; |
7315 | 0 | } else if(hashalgo == ZONEMD_ALGO_SHA512) { |
7316 | | /* sha512 */ |
7317 | 0 | h = secalgo_hash_create_sha512(); |
7318 | 0 | if(!h) |
7319 | 0 | *reason = "digest sha512 could not be created"; |
7320 | 0 | return h; |
7321 | 0 | } |
7322 | | /* unknown hash algo */ |
7323 | 0 | *reason = "unsupported algorithm"; |
7324 | 0 | return NULL; |
7325 | 0 | } |
7326 | | |
7327 | | /** update the hash for zonemd */ |
7328 | | static int zonemd_digest_update(int hashalgo, struct secalgo_hash* h, |
7329 | | uint8_t* data, size_t len, char** reason) |
7330 | 0 | { |
7331 | 0 | if(hashalgo == ZONEMD_ALGO_SHA384) { |
7332 | 0 | if(!secalgo_hash_update(h, data, len)) { |
7333 | 0 | *reason = "digest sha384 failed"; |
7334 | 0 | return 0; |
7335 | 0 | } |
7336 | 0 | return 1; |
7337 | 0 | } else if(hashalgo == ZONEMD_ALGO_SHA512) { |
7338 | 0 | if(!secalgo_hash_update(h, data, len)) { |
7339 | 0 | *reason = "digest sha512 failed"; |
7340 | 0 | return 0; |
7341 | 0 | } |
7342 | 0 | return 1; |
7343 | 0 | } |
7344 | | /* unknown hash algo */ |
7345 | 0 | *reason = "unsupported algorithm"; |
7346 | 0 | return 0; |
7347 | 0 | } |
7348 | | |
7349 | | /** finish the hash for zonemd */ |
7350 | | static int zonemd_digest_finish(int hashalgo, struct secalgo_hash* h, |
7351 | | uint8_t* result, size_t hashlen, size_t* resultlen, char** reason) |
7352 | 0 | { |
7353 | 0 | if(hashalgo == ZONEMD_ALGO_SHA384) { |
7354 | 0 | if(hashlen < 384/8) { |
7355 | 0 | *reason = "digest buffer too small for sha384"; |
7356 | 0 | return 0; |
7357 | 0 | } |
7358 | 0 | if(!secalgo_hash_final(h, result, hashlen, resultlen)) { |
7359 | 0 | *reason = "digest sha384 finish failed"; |
7360 | 0 | return 0; |
7361 | 0 | } |
7362 | 0 | return 1; |
7363 | 0 | } else if(hashalgo == ZONEMD_ALGO_SHA512) { |
7364 | 0 | if(hashlen < 512/8) { |
7365 | 0 | *reason = "digest buffer too small for sha512"; |
7366 | 0 | return 0; |
7367 | 0 | } |
7368 | 0 | if(!secalgo_hash_final(h, result, hashlen, resultlen)) { |
7369 | 0 | *reason = "digest sha512 finish failed"; |
7370 | 0 | return 0; |
7371 | 0 | } |
7372 | 0 | return 1; |
7373 | 0 | } |
7374 | | /* unknown algo */ |
7375 | 0 | *reason = "unsupported algorithm"; |
7376 | 0 | return 0; |
7377 | 0 | } |
7378 | | |
7379 | | /** add rrsets from node to the list */ |
7380 | | static size_t authdata_rrsets_to_list(struct auth_rrset** array, |
7381 | | size_t arraysize, struct auth_rrset* first) |
7382 | 0 | { |
7383 | 0 | struct auth_rrset* rrset = first; |
7384 | 0 | size_t num = 0; |
7385 | 0 | while(rrset) { |
7386 | 0 | if(num >= arraysize) |
7387 | 0 | return num; |
7388 | 0 | array[num] = rrset; |
7389 | 0 | num++; |
7390 | 0 | rrset = rrset->next; |
7391 | 0 | } |
7392 | 0 | return num; |
7393 | 0 | } |
7394 | | |
7395 | | /** compare rr list entries */ |
7396 | | static int rrlist_compare(const void* arg1, const void* arg2) |
7397 | 0 | { |
7398 | 0 | struct auth_rrset* r1 = *(struct auth_rrset**)arg1; |
7399 | 0 | struct auth_rrset* r2 = *(struct auth_rrset**)arg2; |
7400 | 0 | uint16_t t1, t2; |
7401 | 0 | if(r1 == NULL) t1 = LDNS_RR_TYPE_RRSIG; |
7402 | 0 | else t1 = r1->type; |
7403 | 0 | if(r2 == NULL) t2 = LDNS_RR_TYPE_RRSIG; |
7404 | 0 | else t2 = r2->type; |
7405 | 0 | if(t1 < t2) |
7406 | 0 | return -1; |
7407 | 0 | if(t1 > t2) |
7408 | 0 | return 1; |
7409 | 0 | return 0; |
7410 | 0 | } |
7411 | | |
7412 | | /** add type RRSIG to rr list if not one there already, |
7413 | | * this is to perform RRSIG collate processing at that point. */ |
7414 | | static void addrrsigtype_if_needed(struct auth_rrset** array, |
7415 | | size_t arraysize, size_t* rrnum, struct auth_data* node) |
7416 | 0 | { |
7417 | 0 | if(az_domain_rrset(node, LDNS_RR_TYPE_RRSIG)) |
7418 | 0 | return; /* already one there */ |
7419 | 0 | if((*rrnum) >= arraysize) |
7420 | 0 | return; /* array too small? */ |
7421 | 0 | array[*rrnum] = NULL; /* nothing there, but need entry in list */ |
7422 | 0 | (*rrnum)++; |
7423 | 0 | } |
7424 | | |
7425 | | /** collate the RRs in an RRset using the simple scheme */ |
7426 | | static int zonemd_simple_rrset(struct auth_zone* z, int hashalgo, |
7427 | | struct secalgo_hash* h, struct auth_data* node, |
7428 | | struct auth_rrset* rrset, struct regional* region, |
7429 | | struct sldns_buffer* buf, char** reason) |
7430 | 0 | { |
7431 | | /* canonicalize */ |
7432 | 0 | struct ub_packed_rrset_key key; |
7433 | 0 | memset(&key, 0, sizeof(key)); |
7434 | 0 | key.entry.key = &key; |
7435 | 0 | key.entry.data = rrset->data; |
7436 | 0 | key.rk.dname = node->name; |
7437 | 0 | key.rk.dname_len = node->namelen; |
7438 | 0 | key.rk.type = htons(rrset->type); |
7439 | 0 | key.rk.rrset_class = htons(z->dclass); |
7440 | 0 | if(!rrset_canonicalize_to_buffer(region, buf, &key)) { |
7441 | 0 | *reason = "out of memory"; |
7442 | 0 | return 0; |
7443 | 0 | } |
7444 | 0 | regional_free_all(region); |
7445 | | |
7446 | | /* hash */ |
7447 | 0 | if(!zonemd_digest_update(hashalgo, h, sldns_buffer_begin(buf), |
7448 | 0 | sldns_buffer_limit(buf), reason)) { |
7449 | 0 | return 0; |
7450 | 0 | } |
7451 | 0 | return 1; |
7452 | 0 | } |
7453 | | |
7454 | | /** count number of RRSIGs in a domain name rrset list */ |
7455 | | static size_t zonemd_simple_count_rrsig(struct auth_rrset* rrset, |
7456 | | struct auth_rrset** rrlist, size_t rrnum, |
7457 | | struct auth_zone* z, struct auth_data* node) |
7458 | 0 | { |
7459 | 0 | size_t i, count = 0; |
7460 | 0 | if(rrset) { |
7461 | 0 | size_t j; |
7462 | 0 | for(j = 0; j<rrset->data->count; j++) { |
7463 | 0 | if(rrsig_rdata_get_type_covered(rrset->data-> |
7464 | 0 | rr_data[j], rrset->data->rr_len[j]) == |
7465 | 0 | LDNS_RR_TYPE_ZONEMD && |
7466 | 0 | query_dname_compare(z->name, node->name)==0) { |
7467 | | /* omit RRSIGs over type ZONEMD at apex */ |
7468 | 0 | continue; |
7469 | 0 | } |
7470 | 0 | count++; |
7471 | 0 | } |
7472 | 0 | } |
7473 | 0 | for(i=0; i<rrnum; i++) { |
7474 | 0 | if(rrlist[i] && rrlist[i]->type == LDNS_RR_TYPE_ZONEMD && |
7475 | 0 | query_dname_compare(z->name, node->name)==0) { |
7476 | | /* omit RRSIGs over type ZONEMD at apex */ |
7477 | 0 | continue; |
7478 | 0 | } |
7479 | 0 | count += (rrlist[i]?rrlist[i]->data->rrsig_count:0); |
7480 | 0 | } |
7481 | 0 | return count; |
7482 | 0 | } |
7483 | | |
7484 | | /** allocate sparse rrset data for the number of entries in tepm region */ |
7485 | | static int zonemd_simple_rrsig_allocs(struct regional* region, |
7486 | | struct packed_rrset_data* data, size_t count) |
7487 | 0 | { |
7488 | 0 | data->rr_len = regional_alloc(region, sizeof(*data->rr_len) * count); |
7489 | 0 | if(!data->rr_len) { |
7490 | 0 | return 0; |
7491 | 0 | } |
7492 | 0 | data->rr_ttl = regional_alloc(region, sizeof(*data->rr_ttl) * count); |
7493 | 0 | if(!data->rr_ttl) { |
7494 | 0 | return 0; |
7495 | 0 | } |
7496 | 0 | data->rr_data = regional_alloc(region, sizeof(*data->rr_data) * count); |
7497 | 0 | if(!data->rr_data) { |
7498 | 0 | return 0; |
7499 | 0 | } |
7500 | 0 | return 1; |
7501 | 0 | } |
7502 | | |
7503 | | /** add the RRSIGs from the rrs in the domain into the data */ |
7504 | | static void add_rrlist_rrsigs_into_data(struct packed_rrset_data* data, |
7505 | | size_t* done, struct auth_rrset** rrlist, size_t rrnum, |
7506 | | struct auth_zone* z, struct auth_data* node) |
7507 | 0 | { |
7508 | 0 | size_t i; |
7509 | 0 | for(i=0; i<rrnum; i++) { |
7510 | 0 | size_t j; |
7511 | 0 | if(!rrlist[i]) |
7512 | 0 | continue; |
7513 | 0 | if(rrlist[i] && rrlist[i]->type == LDNS_RR_TYPE_ZONEMD && |
7514 | 0 | query_dname_compare(z->name, node->name)==0) { |
7515 | | /* omit RRSIGs over type ZONEMD at apex */ |
7516 | 0 | continue; |
7517 | 0 | } |
7518 | 0 | for(j = 0; j<rrlist[i]->data->rrsig_count; j++) { |
7519 | 0 | data->rr_len[*done] = rrlist[i]->data->rr_len[rrlist[i]->data->count + j]; |
7520 | 0 | data->rr_ttl[*done] = rrlist[i]->data->rr_ttl[rrlist[i]->data->count + j]; |
7521 | | /* reference the rdata in the rrset, no need to |
7522 | | * copy it, it is no longer needed at the end of |
7523 | | * the routine */ |
7524 | 0 | data->rr_data[*done] = rrlist[i]->data->rr_data[rrlist[i]->data->count + j]; |
7525 | 0 | (*done)++; |
7526 | 0 | } |
7527 | 0 | } |
7528 | 0 | } |
7529 | | |
7530 | | static void add_rrset_into_data(struct packed_rrset_data* data, |
7531 | | size_t* done, struct auth_rrset* rrset, |
7532 | | struct auth_zone* z, struct auth_data* node) |
7533 | 0 | { |
7534 | 0 | if(rrset) { |
7535 | 0 | size_t j; |
7536 | 0 | for(j = 0; j<rrset->data->count; j++) { |
7537 | 0 | if(rrsig_rdata_get_type_covered(rrset->data-> |
7538 | 0 | rr_data[j], rrset->data->rr_len[j]) == |
7539 | 0 | LDNS_RR_TYPE_ZONEMD && |
7540 | 0 | query_dname_compare(z->name, node->name)==0) { |
7541 | | /* omit RRSIGs over type ZONEMD at apex */ |
7542 | 0 | continue; |
7543 | 0 | } |
7544 | 0 | data->rr_len[*done] = rrset->data->rr_len[j]; |
7545 | 0 | data->rr_ttl[*done] = rrset->data->rr_ttl[j]; |
7546 | | /* reference the rdata in the rrset, no need to |
7547 | | * copy it, it is no longer need at the end of |
7548 | | * the routine */ |
7549 | 0 | data->rr_data[*done] = rrset->data->rr_data[j]; |
7550 | 0 | (*done)++; |
7551 | 0 | } |
7552 | 0 | } |
7553 | 0 | } |
7554 | | |
7555 | | /** collate the RRSIGs using the simple scheme */ |
7556 | | static int zonemd_simple_rrsig(struct auth_zone* z, int hashalgo, |
7557 | | struct secalgo_hash* h, struct auth_data* node, |
7558 | | struct auth_rrset* rrset, struct auth_rrset** rrlist, size_t rrnum, |
7559 | | struct regional* region, struct sldns_buffer* buf, char** reason) |
7560 | 0 | { |
7561 | | /* the rrset pointer can be NULL, this means it is type RRSIG and |
7562 | | * there is no ordinary type RRSIG there. The RRSIGs are stored |
7563 | | * with the RRsets in their data. |
7564 | | * |
7565 | | * The RRset pointer can be nonNULL. This happens if there is |
7566 | | * no RR that is covered by the RRSIG for the domain. Then this |
7567 | | * RRSIG RR is stored in an rrset of type RRSIG. The other RRSIGs |
7568 | | * are stored in the rrset entries for the RRs in the rr list for |
7569 | | * the domain node. We need to collate the rrset's data, if any, and |
7570 | | * the rrlist's rrsigs */ |
7571 | | /* if this is the apex, omit RRSIGs that cover type ZONEMD */ |
7572 | | /* build rrsig rrset */ |
7573 | 0 | size_t done = 0; |
7574 | 0 | struct ub_packed_rrset_key key; |
7575 | 0 | struct packed_rrset_data data; |
7576 | 0 | memset(&key, 0, sizeof(key)); |
7577 | 0 | memset(&data, 0, sizeof(data)); |
7578 | 0 | key.entry.key = &key; |
7579 | 0 | key.entry.data = &data; |
7580 | 0 | key.rk.dname = node->name; |
7581 | 0 | key.rk.dname_len = node->namelen; |
7582 | 0 | key.rk.type = htons(LDNS_RR_TYPE_RRSIG); |
7583 | 0 | key.rk.rrset_class = htons(z->dclass); |
7584 | 0 | data.count = zonemd_simple_count_rrsig(rrset, rrlist, rrnum, z, node); |
7585 | 0 | if(!zonemd_simple_rrsig_allocs(region, &data, data.count)) { |
7586 | 0 | *reason = "out of memory"; |
7587 | 0 | regional_free_all(region); |
7588 | 0 | return 0; |
7589 | 0 | } |
7590 | | /* all the RRSIGs stored in the other rrsets for this domain node */ |
7591 | 0 | add_rrlist_rrsigs_into_data(&data, &done, rrlist, rrnum, z, node); |
7592 | | /* plus the RRSIGs stored in an rrset of type RRSIG for this node */ |
7593 | 0 | add_rrset_into_data(&data, &done, rrset, z, node); |
7594 | | |
7595 | | /* canonicalize */ |
7596 | 0 | if(!rrset_canonicalize_to_buffer(region, buf, &key)) { |
7597 | 0 | *reason = "out of memory"; |
7598 | 0 | regional_free_all(region); |
7599 | 0 | return 0; |
7600 | 0 | } |
7601 | 0 | regional_free_all(region); |
7602 | | |
7603 | | /* hash */ |
7604 | 0 | if(!zonemd_digest_update(hashalgo, h, sldns_buffer_begin(buf), |
7605 | 0 | sldns_buffer_limit(buf), reason)) { |
7606 | 0 | return 0; |
7607 | 0 | } |
7608 | 0 | return 1; |
7609 | 0 | } |
7610 | | |
7611 | | /** collate a domain's rrsets using the simple scheme */ |
7612 | | static int zonemd_simple_domain(struct auth_zone* z, int hashalgo, |
7613 | | struct secalgo_hash* h, struct auth_data* node, |
7614 | | struct regional* region, struct sldns_buffer* buf, char** reason) |
7615 | 0 | { |
7616 | 0 | const size_t rrlistsize = 65536; |
7617 | 0 | struct auth_rrset* rrlist[rrlistsize]; |
7618 | 0 | size_t i, rrnum = 0; |
7619 | | /* see if the domain is out of scope, the zone origin, |
7620 | | * that would be omitted */ |
7621 | 0 | if(!dname_subdomain_c(node->name, z->name)) |
7622 | 0 | return 1; /* continue */ |
7623 | | /* loop over the rrsets in ascending order. */ |
7624 | 0 | rrnum = authdata_rrsets_to_list(rrlist, rrlistsize, node->rrsets); |
7625 | 0 | addrrsigtype_if_needed(rrlist, rrlistsize, &rrnum, node); |
7626 | 0 | qsort(rrlist, rrnum, sizeof(*rrlist), rrlist_compare); |
7627 | 0 | for(i=0; i<rrnum; i++) { |
7628 | 0 | if(rrlist[i] && rrlist[i]->type == LDNS_RR_TYPE_ZONEMD && |
7629 | 0 | query_dname_compare(z->name, node->name) == 0) { |
7630 | | /* omit type ZONEMD at apex */ |
7631 | 0 | continue; |
7632 | 0 | } |
7633 | 0 | if(rrlist[i] == NULL || rrlist[i]->type == |
7634 | 0 | LDNS_RR_TYPE_RRSIG) { |
7635 | 0 | if(!zonemd_simple_rrsig(z, hashalgo, h, node, |
7636 | 0 | rrlist[i], rrlist, rrnum, region, buf, reason)) |
7637 | 0 | return 0; |
7638 | 0 | } else if(!zonemd_simple_rrset(z, hashalgo, h, node, |
7639 | 0 | rrlist[i], region, buf, reason)) { |
7640 | 0 | return 0; |
7641 | 0 | } |
7642 | 0 | } |
7643 | 0 | return 1; |
7644 | 0 | } |
7645 | | |
7646 | | /** collate the zone using the simple scheme */ |
7647 | | static int zonemd_simple_collate(struct auth_zone* z, int hashalgo, |
7648 | | struct secalgo_hash* h, struct regional* region, |
7649 | | struct sldns_buffer* buf, char** reason) |
7650 | 0 | { |
7651 | | /* our tree is sorted in canonical order, so we can just loop over |
7652 | | * the tree */ |
7653 | 0 | struct auth_data* n; |
7654 | 0 | RBTREE_FOR(n, struct auth_data*, &z->data) { |
7655 | 0 | if(!zonemd_simple_domain(z, hashalgo, h, n, region, buf, |
7656 | 0 | reason)) |
7657 | 0 | return 0; |
7658 | 0 | } |
7659 | 0 | return 1; |
7660 | 0 | } |
7661 | | |
7662 | | int auth_zone_generate_zonemd_hash(struct auth_zone* z, int scheme, |
7663 | | int hashalgo, uint8_t* hash, size_t hashlen, size_t* resultlen, |
7664 | | struct regional* region, struct sldns_buffer* buf, char** reason) |
7665 | 0 | { |
7666 | 0 | struct secalgo_hash* h = zonemd_digest_init(hashalgo, reason); |
7667 | 0 | if(!h) { |
7668 | 0 | if(!*reason) |
7669 | 0 | *reason = "digest init fail"; |
7670 | 0 | return 0; |
7671 | 0 | } |
7672 | 0 | if(scheme == ZONEMD_SCHEME_SIMPLE) { |
7673 | 0 | if(!zonemd_simple_collate(z, hashalgo, h, region, buf, reason)) { |
7674 | 0 | if(!*reason) *reason = "scheme simple collate fail"; |
7675 | 0 | secalgo_hash_delete(h); |
7676 | 0 | return 0; |
7677 | 0 | } |
7678 | 0 | } |
7679 | 0 | if(!zonemd_digest_finish(hashalgo, h, hash, hashlen, resultlen, |
7680 | 0 | reason)) { |
7681 | 0 | secalgo_hash_delete(h); |
7682 | 0 | *reason = "digest finish fail"; |
7683 | 0 | return 0; |
7684 | 0 | } |
7685 | 0 | secalgo_hash_delete(h); |
7686 | 0 | return 1; |
7687 | 0 | } |
7688 | | |
7689 | | int auth_zone_generate_zonemd_check(struct auth_zone* z, int scheme, |
7690 | | int hashalgo, uint8_t* hash, size_t hashlen, struct regional* region, |
7691 | | struct sldns_buffer* buf, char** reason) |
7692 | 0 | { |
7693 | 0 | uint8_t gen[512]; |
7694 | 0 | size_t genlen = 0; |
7695 | 0 | *reason = NULL; |
7696 | 0 | if(!zonemd_hashalgo_supported(hashalgo)) { |
7697 | | /* allow it */ |
7698 | 0 | *reason = "unsupported algorithm"; |
7699 | 0 | return 1; |
7700 | 0 | } |
7701 | 0 | if(!zonemd_scheme_supported(scheme)) { |
7702 | | /* allow it */ |
7703 | 0 | *reason = "unsupported scheme"; |
7704 | 0 | return 1; |
7705 | 0 | } |
7706 | 0 | if(hashlen < 12) { |
7707 | | /* the ZONEMD draft requires digests to fail if too small */ |
7708 | 0 | *reason = "digest length too small, less than 12"; |
7709 | 0 | return 0; |
7710 | 0 | } |
7711 | | /* generate digest */ |
7712 | 0 | if(!auth_zone_generate_zonemd_hash(z, scheme, hashalgo, gen, |
7713 | 0 | sizeof(gen), &genlen, region, buf, reason)) { |
7714 | | /* reason filled in by zonemd hash routine */ |
7715 | 0 | return 0; |
7716 | 0 | } |
7717 | | /* check digest length */ |
7718 | 0 | if(hashlen != genlen) { |
7719 | 0 | *reason = "incorrect digest length"; |
7720 | 0 | if(verbosity >= VERB_ALGO) { |
7721 | 0 | verbose(VERB_ALGO, "zonemd scheme=%d hashalgo=%d", |
7722 | 0 | scheme, hashalgo); |
7723 | 0 | log_hex("ZONEMD should be ", gen, genlen); |
7724 | 0 | log_hex("ZONEMD to check is", hash, hashlen); |
7725 | 0 | } |
7726 | 0 | return 0; |
7727 | 0 | } |
7728 | | /* check digest */ |
7729 | 0 | if(memcmp(hash, gen, genlen) != 0) { |
7730 | 0 | *reason = "incorrect digest"; |
7731 | 0 | if(verbosity >= VERB_ALGO) { |
7732 | 0 | verbose(VERB_ALGO, "zonemd scheme=%d hashalgo=%d", |
7733 | 0 | scheme, hashalgo); |
7734 | 0 | log_hex("ZONEMD should be ", gen, genlen); |
7735 | 0 | log_hex("ZONEMD to check is", hash, hashlen); |
7736 | 0 | } |
7737 | 0 | return 0; |
7738 | 0 | } |
7739 | 0 | return 1; |
7740 | 0 | } |
7741 | | |
7742 | | /** log auth zone message with zone name in front. */ |
7743 | | static void auth_zone_log(uint8_t* name, enum verbosity_value level, |
7744 | | const char* format, ...) ATTR_FORMAT(printf, 3, 4); |
7745 | | static void auth_zone_log(uint8_t* name, enum verbosity_value level, |
7746 | | const char* format, ...) |
7747 | 0 | { |
7748 | 0 | va_list args; |
7749 | 0 | va_start(args, format); |
7750 | 0 | if(verbosity >= level) { |
7751 | 0 | char str[255+1]; |
7752 | 0 | char msg[MAXSYSLOGMSGLEN]; |
7753 | 0 | dname_str(name, str); |
7754 | 0 | vsnprintf(msg, sizeof(msg), format, args); |
7755 | 0 | verbose(level, "auth zone %s %s", str, msg); |
7756 | 0 | } |
7757 | 0 | va_end(args); |
7758 | 0 | } |
7759 | | |
7760 | | /** ZONEMD, dnssec verify the rrset with the dnskey */ |
7761 | | static int zonemd_dnssec_verify_rrset(struct auth_zone* z, |
7762 | | struct module_env* env, struct module_stack* mods, |
7763 | | struct ub_packed_rrset_key* dnskey, struct auth_data* node, |
7764 | | struct auth_rrset* rrset, char** why_bogus, uint8_t* sigalg) |
7765 | 0 | { |
7766 | 0 | struct ub_packed_rrset_key pk; |
7767 | 0 | enum sec_status sec; |
7768 | 0 | struct val_env* ve; |
7769 | 0 | int m; |
7770 | 0 | m = modstack_find(mods, "validator"); |
7771 | 0 | if(m == -1) { |
7772 | 0 | auth_zone_log(z->name, VERB_ALGO, "zonemd dnssec verify: have " |
7773 | 0 | "DNSKEY chain of trust, but no validator module"); |
7774 | 0 | return 0; |
7775 | 0 | } |
7776 | 0 | ve = (struct val_env*)env->modinfo[m]; |
7777 | |
|
7778 | 0 | memset(&pk, 0, sizeof(pk)); |
7779 | 0 | pk.entry.key = &pk; |
7780 | 0 | pk.entry.data = rrset->data; |
7781 | 0 | pk.rk.dname = node->name; |
7782 | 0 | pk.rk.dname_len = node->namelen; |
7783 | 0 | pk.rk.type = htons(rrset->type); |
7784 | 0 | pk.rk.rrset_class = htons(z->dclass); |
7785 | 0 | if(verbosity >= VERB_ALGO) { |
7786 | 0 | char typestr[32]; |
7787 | 0 | typestr[0]=0; |
7788 | 0 | sldns_wire2str_type_buf(rrset->type, typestr, sizeof(typestr)); |
7789 | 0 | auth_zone_log(z->name, VERB_ALGO, |
7790 | 0 | "zonemd: verify %s RRset with DNSKEY", typestr); |
7791 | 0 | } |
7792 | 0 | sec = dnskeyset_verify_rrset(env, ve, &pk, dnskey, sigalg, why_bogus, NULL, |
7793 | 0 | LDNS_SECTION_ANSWER, NULL); |
7794 | 0 | if(sec == sec_status_secure) { |
7795 | 0 | return 1; |
7796 | 0 | } |
7797 | 0 | if(why_bogus) |
7798 | 0 | auth_zone_log(z->name, VERB_ALGO, "DNSSEC verify was bogus: %s", *why_bogus); |
7799 | 0 | return 0; |
7800 | 0 | } |
7801 | | |
7802 | | /** check for nsec3, the RR with params equal, if bitmap has the type */ |
7803 | | static int nsec3_of_param_has_type(struct auth_rrset* nsec3, int algo, |
7804 | | size_t iter, uint8_t* salt, size_t saltlen, uint16_t rrtype) |
7805 | 0 | { |
7806 | 0 | int i, count = (int)nsec3->data->count; |
7807 | 0 | struct ub_packed_rrset_key pk; |
7808 | 0 | memset(&pk, 0, sizeof(pk)); |
7809 | 0 | pk.entry.data = nsec3->data; |
7810 | 0 | for(i=0; i<count; i++) { |
7811 | 0 | int rralgo; |
7812 | 0 | size_t rriter, rrsaltlen; |
7813 | 0 | uint8_t* rrsalt; |
7814 | 0 | if(!nsec3_get_params(&pk, i, &rralgo, &rriter, &rrsalt, |
7815 | 0 | &rrsaltlen)) |
7816 | 0 | continue; /* no parameters, malformed */ |
7817 | 0 | if(rralgo != algo || rriter != iter || rrsaltlen != saltlen) |
7818 | 0 | continue; /* different parameters */ |
7819 | 0 | if(saltlen != 0) { |
7820 | 0 | if(rrsalt == NULL || salt == NULL) |
7821 | 0 | continue; |
7822 | 0 | if(memcmp(rrsalt, salt, saltlen) != 0) |
7823 | 0 | continue; /* different salt parameters */ |
7824 | 0 | } |
7825 | 0 | if(nsec3_has_type(&pk, i, rrtype)) |
7826 | 0 | return 1; |
7827 | 0 | } |
7828 | 0 | return 0; |
7829 | 0 | } |
7830 | | |
7831 | | /** Verify the absence of ZONEMD with DNSSEC by checking NSEC, NSEC3 type flag. |
7832 | | * return false on failure, reason contains description of failure. */ |
7833 | | static int zonemd_check_dnssec_absence(struct auth_zone* z, |
7834 | | struct module_env* env, struct module_stack* mods, |
7835 | | struct ub_packed_rrset_key* dnskey, struct auth_data* apex, |
7836 | | char** reason, char** why_bogus, uint8_t* sigalg) |
7837 | 0 | { |
7838 | 0 | struct auth_rrset* nsec = NULL; |
7839 | 0 | if(!apex) { |
7840 | 0 | *reason = "zone has no apex domain but ZONEMD missing"; |
7841 | 0 | return 0; |
7842 | 0 | } |
7843 | 0 | nsec = az_domain_rrset(apex, LDNS_RR_TYPE_NSEC); |
7844 | 0 | if(nsec) { |
7845 | 0 | struct ub_packed_rrset_key pk; |
7846 | | /* dnssec verify the NSEC */ |
7847 | 0 | if(!zonemd_dnssec_verify_rrset(z, env, mods, dnskey, apex, |
7848 | 0 | nsec, why_bogus, sigalg)) { |
7849 | 0 | *reason = "DNSSEC verify failed for NSEC RRset"; |
7850 | 0 | return 0; |
7851 | 0 | } |
7852 | | /* check type bitmap */ |
7853 | 0 | memset(&pk, 0, sizeof(pk)); |
7854 | 0 | pk.entry.data = nsec->data; |
7855 | 0 | if(nsec_has_type(&pk, LDNS_RR_TYPE_ZONEMD)) { |
7856 | 0 | *reason = "DNSSEC NSEC bitmap says type ZONEMD exists"; |
7857 | 0 | return 0; |
7858 | 0 | } |
7859 | 0 | auth_zone_log(z->name, VERB_ALGO, "zonemd DNSSEC NSEC verification of absence of ZONEMD secure"); |
7860 | 0 | } else { |
7861 | | /* NSEC3 perhaps ? */ |
7862 | 0 | int algo; |
7863 | 0 | size_t iter, saltlen; |
7864 | 0 | uint8_t* salt; |
7865 | 0 | struct auth_rrset* nsec3param = az_domain_rrset(apex, |
7866 | 0 | LDNS_RR_TYPE_NSEC3PARAM); |
7867 | 0 | struct auth_data* match; |
7868 | 0 | struct auth_rrset* nsec3; |
7869 | 0 | if(!nsec3param) { |
7870 | 0 | *reason = "zone has no NSEC information but ZONEMD missing"; |
7871 | 0 | return 0; |
7872 | 0 | } |
7873 | 0 | if(!az_nsec3_param(z, &algo, &iter, &salt, &saltlen)) { |
7874 | 0 | *reason = "zone has no NSEC information but ZONEMD missing"; |
7875 | 0 | return 0; |
7876 | 0 | } |
7877 | | /* find the NSEC3 record */ |
7878 | 0 | match = az_nsec3_find_exact(z, z->name, z->namelen, algo, |
7879 | 0 | iter, salt, saltlen); |
7880 | 0 | if(!match) { |
7881 | 0 | *reason = "zone has no NSEC3 domain for the apex but ZONEMD missing"; |
7882 | 0 | return 0; |
7883 | 0 | } |
7884 | 0 | nsec3 = az_domain_rrset(match, LDNS_RR_TYPE_NSEC3); |
7885 | 0 | if(!nsec3) { |
7886 | 0 | *reason = "zone has no NSEC3 RRset for the apex but ZONEMD missing"; |
7887 | 0 | return 0; |
7888 | 0 | } |
7889 | | /* dnssec verify the NSEC3 */ |
7890 | 0 | if(!zonemd_dnssec_verify_rrset(z, env, mods, dnskey, match, |
7891 | 0 | nsec3, why_bogus, sigalg)) { |
7892 | 0 | *reason = "DNSSEC verify failed for NSEC3 RRset"; |
7893 | 0 | return 0; |
7894 | 0 | } |
7895 | | /* check type bitmap */ |
7896 | 0 | if(nsec3_of_param_has_type(nsec3, algo, iter, salt, saltlen, |
7897 | 0 | LDNS_RR_TYPE_ZONEMD)) { |
7898 | 0 | *reason = "DNSSEC NSEC3 bitmap says type ZONEMD exists"; |
7899 | 0 | return 0; |
7900 | 0 | } |
7901 | 0 | auth_zone_log(z->name, VERB_ALGO, "zonemd DNSSEC NSEC3 verification of absence of ZONEMD secure"); |
7902 | 0 | } |
7903 | | |
7904 | 0 | return 1; |
7905 | 0 | } |
7906 | | |
7907 | | /** Verify the SOA and ZONEMD DNSSEC signatures. |
7908 | | * return false on failure, reason contains description of failure. */ |
7909 | | static int zonemd_check_dnssec_soazonemd(struct auth_zone* z, |
7910 | | struct module_env* env, struct module_stack* mods, |
7911 | | struct ub_packed_rrset_key* dnskey, struct auth_data* apex, |
7912 | | struct auth_rrset* zonemd_rrset, char** reason, char** why_bogus, |
7913 | | uint8_t* sigalg) |
7914 | 0 | { |
7915 | 0 | struct auth_rrset* soa; |
7916 | 0 | if(!apex) { |
7917 | 0 | *reason = "zone has no apex domain"; |
7918 | 0 | return 0; |
7919 | 0 | } |
7920 | 0 | soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); |
7921 | 0 | if(!soa) { |
7922 | 0 | *reason = "zone has no SOA RRset"; |
7923 | 0 | return 0; |
7924 | 0 | } |
7925 | 0 | if(!zonemd_dnssec_verify_rrset(z, env, mods, dnskey, apex, soa, |
7926 | 0 | why_bogus, sigalg)) { |
7927 | 0 | *reason = "DNSSEC verify failed for SOA RRset"; |
7928 | 0 | return 0; |
7929 | 0 | } |
7930 | 0 | if(!zonemd_dnssec_verify_rrset(z, env, mods, dnskey, apex, |
7931 | 0 | zonemd_rrset, why_bogus, sigalg)) { |
7932 | 0 | *reason = "DNSSEC verify failed for ZONEMD RRset"; |
7933 | 0 | return 0; |
7934 | 0 | } |
7935 | 0 | auth_zone_log(z->name, VERB_ALGO, "zonemd DNSSEC verification of SOA and ZONEMD RRsets secure"); |
7936 | 0 | return 1; |
7937 | 0 | } |
7938 | | |
7939 | | /** |
7940 | | * Fail the ZONEMD verification. |
7941 | | * @param z: auth zone that fails. |
7942 | | * @param env: environment with config, to ignore failure or not. |
7943 | | * @param reason: failure string description. |
7944 | | * @param why_bogus: failure string for DNSSEC verification failure. |
7945 | | * @param result: strdup result in here if not NULL. |
7946 | | */ |
7947 | | static void auth_zone_zonemd_fail(struct auth_zone* z, struct module_env* env, |
7948 | | char* reason, char* why_bogus, char** result) |
7949 | 0 | { |
7950 | 0 | char zstr[255+1]; |
7951 | | /* if fail: log reason, and depending on config also take action |
7952 | | * and drop the zone, eg. it is gone from memory, set zone_expired */ |
7953 | 0 | dname_str(z->name, zstr); |
7954 | 0 | if(!reason) reason = "verification failed"; |
7955 | 0 | if(result) { |
7956 | 0 | if(why_bogus) { |
7957 | 0 | char res[1024]; |
7958 | 0 | snprintf(res, sizeof(res), "%s: %s", reason, |
7959 | 0 | why_bogus); |
7960 | 0 | *result = strdup(res); |
7961 | 0 | } else { |
7962 | 0 | *result = strdup(reason); |
7963 | 0 | } |
7964 | 0 | if(!*result) log_err("out of memory"); |
7965 | 0 | } else { |
7966 | 0 | log_warn("auth zone %s: ZONEMD verification failed: %s", zstr, reason); |
7967 | 0 | } |
7968 | |
|
7969 | 0 | if(env->cfg->zonemd_permissive_mode) { |
7970 | 0 | verbose(VERB_ALGO, "zonemd-permissive-mode enabled, " |
7971 | 0 | "not blocking zone %s", zstr); |
7972 | 0 | return; |
7973 | 0 | } |
7974 | | |
7975 | | /* expired means the zone gives servfail and is not used by |
7976 | | * lookup if fallback_enabled*/ |
7977 | 0 | z->zone_expired = 1; |
7978 | 0 | } |
7979 | | |
7980 | | /** |
7981 | | * Verify the zonemd with DNSSEC and hash check, with given key. |
7982 | | * @param z: auth zone. |
7983 | | * @param env: environment with config and temp buffers. |
7984 | | * @param mods: module stack with validator env for verification. |
7985 | | * @param dnskey: dnskey that we can use, or NULL. If nonnull, the key |
7986 | | * has been verified and is the start of the chain of trust. |
7987 | | * @param is_insecure: if true, the dnskey is not used, the zone is insecure. |
7988 | | * And dnssec is not used. It is DNSSEC secure insecure or not under |
7989 | | * a trust anchor. |
7990 | | * @param sigalg: if nonNULL provide algorithm downgrade protection. |
7991 | | * Otherwise one algorithm is enough. Must have space of ALGO_NEEDS_MAX+1. |
7992 | | * @param result: if not NULL result reason copied here. |
7993 | | */ |
7994 | | static void |
7995 | | auth_zone_verify_zonemd_with_key(struct auth_zone* z, struct module_env* env, |
7996 | | struct module_stack* mods, struct ub_packed_rrset_key* dnskey, |
7997 | | int is_insecure, char** result, uint8_t* sigalg) |
7998 | 0 | { |
7999 | 0 | char* reason = NULL, *why_bogus = NULL; |
8000 | 0 | struct auth_data* apex = NULL; |
8001 | 0 | struct auth_rrset* zonemd_rrset = NULL; |
8002 | 0 | int zonemd_absent = 0, zonemd_absence_dnssecok = 0; |
8003 | | |
8004 | | /* see if ZONEMD is present or absent. */ |
8005 | 0 | apex = az_find_name(z, z->name, z->namelen); |
8006 | 0 | if(!apex) { |
8007 | 0 | zonemd_absent = 1; |
8008 | 0 | } else { |
8009 | 0 | zonemd_rrset = az_domain_rrset(apex, LDNS_RR_TYPE_ZONEMD); |
8010 | 0 | if(!zonemd_rrset || zonemd_rrset->data->count==0) { |
8011 | 0 | zonemd_absent = 1; |
8012 | 0 | zonemd_rrset = NULL; |
8013 | 0 | } |
8014 | 0 | } |
8015 | | |
8016 | | /* if no DNSSEC, done. */ |
8017 | | /* if no ZONEMD, and DNSSEC, use DNSKEY to verify NSEC or NSEC3 for |
8018 | | * zone apex. Check ZONEMD bit is turned off or else fail */ |
8019 | | /* if ZONEMD, and DNSSEC, check DNSSEC signature on SOA and ZONEMD, |
8020 | | * or else fail */ |
8021 | 0 | if(!dnskey && !is_insecure) { |
8022 | 0 | auth_zone_zonemd_fail(z, env, "DNSKEY missing", NULL, result); |
8023 | 0 | return; |
8024 | 0 | } else if(!zonemd_rrset && dnskey && !is_insecure) { |
8025 | | /* fetch, DNSSEC verify, and check NSEC/NSEC3 */ |
8026 | 0 | if(!zonemd_check_dnssec_absence(z, env, mods, dnskey, apex, |
8027 | 0 | &reason, &why_bogus, sigalg)) { |
8028 | 0 | auth_zone_zonemd_fail(z, env, reason, why_bogus, result); |
8029 | 0 | return; |
8030 | 0 | } |
8031 | 0 | zonemd_absence_dnssecok = 1; |
8032 | 0 | } else if(zonemd_rrset && dnskey && !is_insecure) { |
8033 | | /* check DNSSEC verify of SOA and ZONEMD */ |
8034 | 0 | if(!zonemd_check_dnssec_soazonemd(z, env, mods, dnskey, apex, |
8035 | 0 | zonemd_rrset, &reason, &why_bogus, sigalg)) { |
8036 | 0 | auth_zone_zonemd_fail(z, env, reason, why_bogus, result); |
8037 | 0 | return; |
8038 | 0 | } |
8039 | 0 | } |
8040 | | |
8041 | 0 | if(zonemd_absent && z->zonemd_reject_absence) { |
8042 | 0 | auth_zone_zonemd_fail(z, env, "ZONEMD absent and that is not allowed by config", NULL, result); |
8043 | 0 | return; |
8044 | 0 | } |
8045 | 0 | if(zonemd_absent && zonemd_absence_dnssecok) { |
8046 | 0 | auth_zone_log(z->name, VERB_ALGO, "DNSSEC verified nonexistence of ZONEMD"); |
8047 | 0 | if(result) { |
8048 | 0 | *result = strdup("DNSSEC verified nonexistence of ZONEMD"); |
8049 | 0 | if(!*result) log_err("out of memory"); |
8050 | 0 | } |
8051 | 0 | return; |
8052 | 0 | } |
8053 | 0 | if(zonemd_absent) { |
8054 | 0 | auth_zone_log(z->name, VERB_ALGO, "no ZONEMD present"); |
8055 | 0 | if(result) { |
8056 | 0 | *result = strdup("no ZONEMD present"); |
8057 | 0 | if(!*result) log_err("out of memory"); |
8058 | 0 | } |
8059 | 0 | return; |
8060 | 0 | } |
8061 | | |
8062 | | /* check ZONEMD checksum and report or else fail. */ |
8063 | 0 | if(!auth_zone_zonemd_check_hash(z, env, &reason)) { |
8064 | 0 | auth_zone_zonemd_fail(z, env, reason, NULL, result); |
8065 | 0 | return; |
8066 | 0 | } |
8067 | | |
8068 | | /* success! log the success */ |
8069 | 0 | if(reason) |
8070 | 0 | auth_zone_log(z->name, VERB_ALGO, "ZONEMD %s", reason); |
8071 | 0 | else auth_zone_log(z->name, VERB_ALGO, "ZONEMD verification successful"); |
8072 | 0 | if(result) { |
8073 | 0 | if(reason) |
8074 | 0 | *result = strdup(reason); |
8075 | 0 | else *result = strdup("ZONEMD verification successful"); |
8076 | 0 | if(!*result) log_err("out of memory"); |
8077 | 0 | } |
8078 | 0 | } |
8079 | | |
8080 | | /** |
8081 | | * verify the zone DNSKEY rrset from the trust anchor |
8082 | | * This is possible because the anchor is for the zone itself, and can |
8083 | | * thus apply straight to the zone DNSKEY set. |
8084 | | * @param z: the auth zone. |
8085 | | * @param env: environment with time and temp buffers. |
8086 | | * @param mods: module stack for validator environment for dnssec validation. |
8087 | | * @param anchor: trust anchor to use |
8088 | | * @param is_insecure: returned, true if the zone is securely insecure. |
8089 | | * @param why_bogus: if the routine fails, returns the failure reason. |
8090 | | * @param keystorage: where to store the ub_packed_rrset_key that is created |
8091 | | * on success. A pointer to it is returned on success. |
8092 | | * @return the dnskey RRset, reference to zone data and keystorage, or |
8093 | | * NULL on failure. |
8094 | | */ |
8095 | | static struct ub_packed_rrset_key* |
8096 | | zonemd_get_dnskey_from_anchor(struct auth_zone* z, struct module_env* env, |
8097 | | struct module_stack* mods, struct trust_anchor* anchor, |
8098 | | int* is_insecure, char** why_bogus, |
8099 | | struct ub_packed_rrset_key* keystorage) |
8100 | 0 | { |
8101 | 0 | struct auth_data* apex; |
8102 | 0 | struct auth_rrset* dnskey_rrset; |
8103 | 0 | enum sec_status sec; |
8104 | 0 | struct val_env* ve; |
8105 | 0 | int m; |
8106 | |
|
8107 | 0 | apex = az_find_name(z, z->name, z->namelen); |
8108 | 0 | if(!apex) { |
8109 | 0 | *why_bogus = "have trust anchor, but zone has no apex domain for DNSKEY"; |
8110 | 0 | return 0; |
8111 | 0 | } |
8112 | 0 | dnskey_rrset = az_domain_rrset(apex, LDNS_RR_TYPE_DNSKEY); |
8113 | 0 | if(!dnskey_rrset || dnskey_rrset->data->count==0) { |
8114 | 0 | *why_bogus = "have trust anchor, but zone has no DNSKEY"; |
8115 | 0 | return 0; |
8116 | 0 | } |
8117 | | |
8118 | 0 | m = modstack_find(mods, "validator"); |
8119 | 0 | if(m == -1) { |
8120 | 0 | *why_bogus = "have trust anchor, but no validator module"; |
8121 | 0 | return 0; |
8122 | 0 | } |
8123 | 0 | ve = (struct val_env*)env->modinfo[m]; |
8124 | |
|
8125 | 0 | memset(keystorage, 0, sizeof(*keystorage)); |
8126 | 0 | keystorage->entry.key = keystorage; |
8127 | 0 | keystorage->entry.data = dnskey_rrset->data; |
8128 | 0 | keystorage->rk.dname = apex->name; |
8129 | 0 | keystorage->rk.dname_len = apex->namelen; |
8130 | 0 | keystorage->rk.type = htons(LDNS_RR_TYPE_DNSKEY); |
8131 | 0 | keystorage->rk.rrset_class = htons(z->dclass); |
8132 | 0 | auth_zone_log(z->name, VERB_QUERY, |
8133 | 0 | "zonemd: verify DNSKEY RRset with trust anchor"); |
8134 | 0 | sec = val_verify_DNSKEY_with_TA(env, ve, keystorage, anchor->ds_rrset, |
8135 | 0 | anchor->dnskey_rrset, NULL, why_bogus, NULL, NULL); |
8136 | 0 | regional_free_all(env->scratch); |
8137 | 0 | if(sec == sec_status_secure) { |
8138 | | /* success */ |
8139 | 0 | *is_insecure = 0; |
8140 | 0 | return keystorage; |
8141 | 0 | } else if(sec == sec_status_insecure) { |
8142 | | /* insecure */ |
8143 | 0 | *is_insecure = 1; |
8144 | 0 | } else { |
8145 | | /* bogus */ |
8146 | 0 | *is_insecure = 0; |
8147 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8148 | 0 | "zonemd: verify DNSKEY RRset with trust anchor failed: %s", *why_bogus); |
8149 | 0 | } |
8150 | 0 | return NULL; |
8151 | 0 | } |
8152 | | |
8153 | | /** verify the DNSKEY from the zone with looked up DS record */ |
8154 | | static struct ub_packed_rrset_key* |
8155 | | auth_zone_verify_zonemd_key_with_ds(struct auth_zone* z, |
8156 | | struct module_env* env, struct module_stack* mods, |
8157 | | struct ub_packed_rrset_key* ds, int* is_insecure, char** why_bogus, |
8158 | | struct ub_packed_rrset_key* keystorage, uint8_t* sigalg) |
8159 | 0 | { |
8160 | 0 | struct auth_data* apex; |
8161 | 0 | struct auth_rrset* dnskey_rrset; |
8162 | 0 | enum sec_status sec; |
8163 | 0 | struct val_env* ve; |
8164 | 0 | int m; |
8165 | | |
8166 | | /* fetch DNSKEY from zone data */ |
8167 | 0 | apex = az_find_name(z, z->name, z->namelen); |
8168 | 0 | if(!apex) { |
8169 | 0 | *why_bogus = "in verifywithDS, zone has no apex"; |
8170 | 0 | return NULL; |
8171 | 0 | } |
8172 | 0 | dnskey_rrset = az_domain_rrset(apex, LDNS_RR_TYPE_DNSKEY); |
8173 | 0 | if(!dnskey_rrset || dnskey_rrset->data->count==0) { |
8174 | 0 | *why_bogus = "in verifywithDS, zone has no DNSKEY"; |
8175 | 0 | return NULL; |
8176 | 0 | } |
8177 | | |
8178 | 0 | m = modstack_find(mods, "validator"); |
8179 | 0 | if(m == -1) { |
8180 | 0 | *why_bogus = "in verifywithDS, have no validator module"; |
8181 | 0 | return NULL; |
8182 | 0 | } |
8183 | 0 | ve = (struct val_env*)env->modinfo[m]; |
8184 | |
|
8185 | 0 | memset(keystorage, 0, sizeof(*keystorage)); |
8186 | 0 | keystorage->entry.key = keystorage; |
8187 | 0 | keystorage->entry.data = dnskey_rrset->data; |
8188 | 0 | keystorage->rk.dname = apex->name; |
8189 | 0 | keystorage->rk.dname_len = apex->namelen; |
8190 | 0 | keystorage->rk.type = htons(LDNS_RR_TYPE_DNSKEY); |
8191 | 0 | keystorage->rk.rrset_class = htons(z->dclass); |
8192 | 0 | auth_zone_log(z->name, VERB_QUERY, "zonemd: verify zone DNSKEY with DS"); |
8193 | 0 | sec = val_verify_DNSKEY_with_DS(env, ve, keystorage, ds, sigalg, |
8194 | 0 | why_bogus, NULL, NULL); |
8195 | 0 | regional_free_all(env->scratch); |
8196 | 0 | if(sec == sec_status_secure) { |
8197 | | /* success */ |
8198 | 0 | return keystorage; |
8199 | 0 | } else if(sec == sec_status_insecure) { |
8200 | | /* insecure */ |
8201 | 0 | *is_insecure = 1; |
8202 | 0 | } else { |
8203 | | /* bogus */ |
8204 | 0 | *is_insecure = 0; |
8205 | 0 | if(*why_bogus == NULL) |
8206 | 0 | *why_bogus = "verify failed"; |
8207 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8208 | 0 | "zonemd: verify DNSKEY RRset with DS failed: %s", |
8209 | 0 | *why_bogus); |
8210 | 0 | } |
8211 | 0 | return NULL; |
8212 | 0 | } |
8213 | | |
8214 | | /** callback for ZONEMD lookup of DNSKEY */ |
8215 | | void auth_zonemd_dnskey_lookup_callback(void* arg, int rcode, sldns_buffer* buf, |
8216 | | enum sec_status sec, char* why_bogus, int ATTR_UNUSED(was_ratelimited)) |
8217 | 0 | { |
8218 | 0 | struct auth_zone* z = (struct auth_zone*)arg; |
8219 | 0 | struct module_env* env; |
8220 | 0 | char* reason = NULL, *ds_bogus = NULL, *typestr="DNSKEY"; |
8221 | 0 | struct ub_packed_rrset_key* dnskey = NULL, *ds = NULL; |
8222 | 0 | int is_insecure = 0, downprot; |
8223 | 0 | struct ub_packed_rrset_key keystorage; |
8224 | 0 | uint8_t sigalg[ALGO_NEEDS_MAX+1]; |
8225 | |
|
8226 | 0 | lock_rw_wrlock(&z->lock); |
8227 | 0 | env = z->zonemd_callback_env; |
8228 | | /* release the env variable so another worker can pick up the |
8229 | | * ZONEMD verification task if it wants to */ |
8230 | 0 | z->zonemd_callback_env = NULL; |
8231 | 0 | if(!env || env->outnet->want_to_quit || z->zone_deleted) { |
8232 | 0 | lock_rw_unlock(&z->lock); |
8233 | 0 | return; /* stop on quit */ |
8234 | 0 | } |
8235 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DS) |
8236 | 0 | typestr = "DS"; |
8237 | 0 | downprot = env->cfg->harden_algo_downgrade; |
8238 | | |
8239 | | /* process result */ |
8240 | 0 | if(sec == sec_status_bogus) { |
8241 | 0 | reason = why_bogus; |
8242 | 0 | if(!reason) { |
8243 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DNSKEY) |
8244 | 0 | reason = "lookup of DNSKEY was bogus"; |
8245 | 0 | else reason = "lookup of DS was bogus"; |
8246 | 0 | } |
8247 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8248 | 0 | "zonemd lookup of %s was bogus: %s", typestr, reason); |
8249 | 0 | } else if(rcode == LDNS_RCODE_NOERROR) { |
8250 | 0 | uint16_t wanted_qtype = z->zonemd_callback_qtype; |
8251 | 0 | struct regional* temp = env->scratch; |
8252 | 0 | struct query_info rq; |
8253 | 0 | struct reply_info* rep; |
8254 | 0 | memset(&rq, 0, sizeof(rq)); |
8255 | 0 | rep = parse_reply_in_temp_region(buf, temp, &rq); |
8256 | 0 | if(rep && rq.qtype == wanted_qtype && |
8257 | 0 | query_dname_compare(z->name, rq.qname) == 0 && |
8258 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR) { |
8259 | | /* parsed successfully */ |
8260 | 0 | struct ub_packed_rrset_key* answer = |
8261 | 0 | reply_find_answer_rrset(&rq, rep); |
8262 | 0 | if(answer && sec == sec_status_secure) { |
8263 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DNSKEY) |
8264 | 0 | dnskey = answer; |
8265 | 0 | else ds = answer; |
8266 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8267 | 0 | "zonemd lookup of %s was secure", typestr); |
8268 | 0 | } else if(sec == sec_status_secure && !answer) { |
8269 | 0 | is_insecure = 1; |
8270 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8271 | 0 | "zonemd lookup of %s has no content, but is secure, treat as insecure", typestr); |
8272 | 0 | } else if(sec == sec_status_insecure) { |
8273 | 0 | is_insecure = 1; |
8274 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8275 | 0 | "zonemd lookup of %s was insecure", typestr); |
8276 | 0 | } else if(sec == sec_status_indeterminate) { |
8277 | 0 | is_insecure = 1; |
8278 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8279 | 0 | "zonemd lookup of %s was indeterminate, treat as insecure", typestr); |
8280 | 0 | } else { |
8281 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8282 | 0 | "zonemd lookup of %s has nodata", typestr); |
8283 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DNSKEY) |
8284 | 0 | reason = "lookup of DNSKEY has nodata"; |
8285 | 0 | else reason = "lookup of DS has nodata"; |
8286 | 0 | } |
8287 | 0 | } else if(rep && rq.qtype == wanted_qtype && |
8288 | 0 | query_dname_compare(z->name, rq.qname) == 0 && |
8289 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN && |
8290 | 0 | sec == sec_status_secure) { |
8291 | | /* secure nxdomain, so the zone is like some RPZ zone |
8292 | | * that does not exist in the wider internet, with |
8293 | | * a secure nxdomain answer outside of it. So we |
8294 | | * treat the zonemd zone without a dnssec chain of |
8295 | | * trust, as insecure. */ |
8296 | 0 | is_insecure = 1; |
8297 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8298 | 0 | "zonemd lookup of %s was secure NXDOMAIN, treat as insecure", typestr); |
8299 | 0 | } else if(rep && rq.qtype == wanted_qtype && |
8300 | 0 | query_dname_compare(z->name, rq.qname) == 0 && |
8301 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN && |
8302 | 0 | sec == sec_status_insecure) { |
8303 | 0 | is_insecure = 1; |
8304 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8305 | 0 | "zonemd lookup of %s was insecure NXDOMAIN, treat as insecure", typestr); |
8306 | 0 | } else if(rep && rq.qtype == wanted_qtype && |
8307 | 0 | query_dname_compare(z->name, rq.qname) == 0 && |
8308 | 0 | FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN && |
8309 | 0 | sec == sec_status_indeterminate) { |
8310 | 0 | is_insecure = 1; |
8311 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8312 | 0 | "zonemd lookup of %s was indeterminate NXDOMAIN, treat as insecure", typestr); |
8313 | 0 | } else { |
8314 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8315 | 0 | "zonemd lookup of %s has no answer", typestr); |
8316 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DNSKEY) |
8317 | 0 | reason = "lookup of DNSKEY has no answer"; |
8318 | 0 | else reason = "lookup of DS has no answer"; |
8319 | 0 | } |
8320 | 0 | } else { |
8321 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8322 | 0 | "zonemd lookup of %s failed", typestr); |
8323 | 0 | if(z->zonemd_callback_qtype == LDNS_RR_TYPE_DNSKEY) |
8324 | 0 | reason = "lookup of DNSKEY failed"; |
8325 | 0 | else reason = "lookup of DS failed"; |
8326 | 0 | } |
8327 | |
|
8328 | 0 | if(!reason && !is_insecure && !dnskey && ds) { |
8329 | 0 | dnskey = auth_zone_verify_zonemd_key_with_ds(z, env, |
8330 | 0 | &env->mesh->mods, ds, &is_insecure, &ds_bogus, |
8331 | 0 | &keystorage, downprot?sigalg:NULL); |
8332 | 0 | if(!dnskey && !is_insecure && !reason) |
8333 | 0 | reason = "DNSKEY verify with DS failed"; |
8334 | 0 | } |
8335 | |
|
8336 | 0 | if(reason) { |
8337 | 0 | auth_zone_zonemd_fail(z, env, reason, ds_bogus, NULL); |
8338 | 0 | lock_rw_unlock(&z->lock); |
8339 | 0 | return; |
8340 | 0 | } |
8341 | | |
8342 | 0 | auth_zone_verify_zonemd_with_key(z, env, &env->mesh->mods, dnskey, |
8343 | 0 | is_insecure, NULL, downprot?sigalg:NULL); |
8344 | 0 | regional_free_all(env->scratch); |
8345 | 0 | lock_rw_unlock(&z->lock); |
8346 | 0 | } |
8347 | | |
8348 | | /** lookup DNSKEY for ZONEMD verification */ |
8349 | | static int |
8350 | | zonemd_lookup_dnskey(struct auth_zone* z, struct module_env* env) |
8351 | 0 | { |
8352 | 0 | struct query_info qinfo; |
8353 | 0 | uint16_t qflags = BIT_RD; |
8354 | 0 | struct edns_data edns; |
8355 | 0 | sldns_buffer* buf = env->scratch_buffer; |
8356 | 0 | int fetch_ds = 0; |
8357 | |
|
8358 | 0 | if(!z->fallback_enabled) { |
8359 | | /* we cannot actually get the DNSKEY, because it is in the |
8360 | | * zone we have ourselves, and it is not served yet |
8361 | | * (possibly), so fetch type DS */ |
8362 | 0 | fetch_ds = 1; |
8363 | 0 | } |
8364 | 0 | if(z->zonemd_callback_env) { |
8365 | | /* another worker is already working on the callback |
8366 | | * for the DNSKEY lookup for ZONEMD verification. |
8367 | | * We do not also have to do ZONEMD verification, let that |
8368 | | * worker do it */ |
8369 | 0 | auth_zone_log(z->name, VERB_ALGO, |
8370 | 0 | "zonemd needs lookup of %s and that already is worked on by another worker", (fetch_ds?"DS":"DNSKEY")); |
8371 | 0 | return 1; |
8372 | 0 | } |
8373 | | |
8374 | | /* use mesh_new_callback to lookup the DNSKEY, |
8375 | | * and then wait for them to be looked up (in cache, or query) */ |
8376 | 0 | qinfo.qname_len = z->namelen; |
8377 | 0 | qinfo.qname = z->name; |
8378 | 0 | qinfo.qclass = z->dclass; |
8379 | 0 | if(fetch_ds) |
8380 | 0 | qinfo.qtype = LDNS_RR_TYPE_DS; |
8381 | 0 | else qinfo.qtype = LDNS_RR_TYPE_DNSKEY; |
8382 | 0 | qinfo.local_alias = NULL; |
8383 | 0 | if(verbosity >= VERB_ALGO) { |
8384 | 0 | char buf1[512]; |
8385 | 0 | char buf2[LDNS_MAX_DOMAINLEN+1]; |
8386 | 0 | dname_str(z->name, buf2); |
8387 | 0 | snprintf(buf1, sizeof(buf1), "auth zone %s: lookup %s " |
8388 | 0 | "for zonemd verification", buf2, |
8389 | 0 | (fetch_ds?"DS":"DNSKEY")); |
8390 | 0 | log_query_info(VERB_ALGO, buf1, &qinfo); |
8391 | 0 | } |
8392 | 0 | edns.edns_present = 1; |
8393 | 0 | edns.ext_rcode = 0; |
8394 | 0 | edns.edns_version = 0; |
8395 | 0 | edns.bits = EDNS_DO; |
8396 | 0 | edns.opt_list_in = NULL; |
8397 | 0 | edns.opt_list_out = NULL; |
8398 | 0 | edns.opt_list_inplace_cb_out = NULL; |
8399 | 0 | if(sldns_buffer_capacity(buf) < 65535) |
8400 | 0 | edns.udp_size = (uint16_t)sldns_buffer_capacity(buf); |
8401 | 0 | else edns.udp_size = 65535; |
8402 | | |
8403 | | /* store the worker-specific module env for the callback. |
8404 | | * We can then reference this when the callback executes */ |
8405 | 0 | z->zonemd_callback_env = env; |
8406 | 0 | z->zonemd_callback_qtype = qinfo.qtype; |
8407 | | /* the callback can be called straight away */ |
8408 | 0 | lock_rw_unlock(&z->lock); |
8409 | 0 | if(!mesh_new_callback(env->mesh, &qinfo, qflags, &edns, buf, 0, |
8410 | 0 | &auth_zonemd_dnskey_lookup_callback, z, 0)) { |
8411 | 0 | lock_rw_wrlock(&z->lock); |
8412 | 0 | log_err("out of memory lookup of %s for zonemd", |
8413 | 0 | (fetch_ds?"DS":"DNSKEY")); |
8414 | 0 | return 0; |
8415 | 0 | } |
8416 | 0 | lock_rw_wrlock(&z->lock); |
8417 | 0 | return 1; |
8418 | 0 | } |
8419 | | |
8420 | | void auth_zone_verify_zonemd(struct auth_zone* z, struct module_env* env, |
8421 | | struct module_stack* mods, char** result, int offline, int only_online) |
8422 | 0 | { |
8423 | 0 | char* reason = NULL, *why_bogus = NULL; |
8424 | 0 | struct trust_anchor* anchor = NULL; |
8425 | 0 | struct ub_packed_rrset_key* dnskey = NULL; |
8426 | 0 | struct ub_packed_rrset_key keystorage; |
8427 | 0 | int is_insecure = 0; |
8428 | | /* verify the ZONEMD if present. |
8429 | | * If not present check if absence is allowed by DNSSEC */ |
8430 | 0 | if(!z->zonemd_check) |
8431 | 0 | return; |
8432 | 0 | if(z->data.count == 0) |
8433 | 0 | return; /* no data */ |
8434 | | |
8435 | | /* if zone is under a trustanchor */ |
8436 | | /* is it equal to trustanchor - get dnskey's verified */ |
8437 | | /* else, find chain of trust by fetching DNSKEYs lookup for zone */ |
8438 | | /* result if that, if insecure, means no DNSSEC for the ZONEMD, |
8439 | | * otherwise we have the zone DNSKEY for the DNSSEC verification. */ |
8440 | 0 | if(env->anchors) |
8441 | 0 | anchor = anchors_lookup(env->anchors, z->name, z->namelen, |
8442 | 0 | z->dclass); |
8443 | 0 | if(anchor && anchor->numDS == 0 && anchor->numDNSKEY == 0) { |
8444 | | /* domain-insecure trust anchor for unsigned zones */ |
8445 | 0 | lock_basic_unlock(&anchor->lock); |
8446 | 0 | if(only_online) |
8447 | 0 | return; |
8448 | 0 | dnskey = NULL; |
8449 | 0 | is_insecure = 1; |
8450 | 0 | } else if(anchor && query_dname_compare(z->name, anchor->name) == 0) { |
8451 | 0 | if(only_online) { |
8452 | 0 | lock_basic_unlock(&anchor->lock); |
8453 | 0 | return; |
8454 | 0 | } |
8455 | | /* equal to trustanchor, no need for online lookups */ |
8456 | 0 | dnskey = zonemd_get_dnskey_from_anchor(z, env, mods, anchor, |
8457 | 0 | &is_insecure, &why_bogus, &keystorage); |
8458 | 0 | lock_basic_unlock(&anchor->lock); |
8459 | 0 | if(!dnskey && !reason && !is_insecure) { |
8460 | 0 | reason = "verify DNSKEY RRset with trust anchor failed"; |
8461 | 0 | } |
8462 | 0 | } else if(anchor) { |
8463 | 0 | lock_basic_unlock(&anchor->lock); |
8464 | | /* perform online lookups */ |
8465 | 0 | if(offline) |
8466 | 0 | return; |
8467 | | /* setup online lookups, and wait for them */ |
8468 | 0 | if(zonemd_lookup_dnskey(z, env)) { |
8469 | | /* wait for the lookup */ |
8470 | 0 | return; |
8471 | 0 | } |
8472 | 0 | reason = "could not lookup DNSKEY for chain of trust"; |
8473 | 0 | } else { |
8474 | | /* the zone is not under a trust anchor */ |
8475 | 0 | if(only_online) |
8476 | 0 | return; |
8477 | 0 | dnskey = NULL; |
8478 | 0 | is_insecure = 1; |
8479 | 0 | } |
8480 | | |
8481 | 0 | if(reason) { |
8482 | 0 | auth_zone_zonemd_fail(z, env, reason, why_bogus, result); |
8483 | 0 | return; |
8484 | 0 | } |
8485 | | |
8486 | 0 | auth_zone_verify_zonemd_with_key(z, env, mods, dnskey, is_insecure, |
8487 | 0 | result, NULL); |
8488 | 0 | regional_free_all(env->scratch); |
8489 | 0 | } |
8490 | | |
8491 | | void auth_zones_pickup_zonemd_verify(struct auth_zones* az, |
8492 | | struct module_env* env) |
8493 | 0 | { |
8494 | 0 | struct auth_zone key; |
8495 | 0 | uint8_t savezname[255+1]; |
8496 | 0 | size_t savezname_len; |
8497 | 0 | struct auth_zone* z; |
8498 | 0 | key.node.key = &key; |
8499 | 0 | lock_rw_rdlock(&az->lock); |
8500 | 0 | RBTREE_FOR(z, struct auth_zone*, &az->ztree) { |
8501 | 0 | lock_rw_wrlock(&z->lock); |
8502 | 0 | if(!z->zonemd_check) { |
8503 | 0 | lock_rw_unlock(&z->lock); |
8504 | 0 | continue; |
8505 | 0 | } |
8506 | 0 | key.dclass = z->dclass; |
8507 | 0 | key.namelabs = z->namelabs; |
8508 | 0 | if(z->namelen > sizeof(savezname)) { |
8509 | 0 | lock_rw_unlock(&z->lock); |
8510 | 0 | log_err("auth_zones_pickup_zonemd_verify: zone name too long"); |
8511 | 0 | continue; |
8512 | 0 | } |
8513 | 0 | savezname_len = z->namelen; |
8514 | 0 | memmove(savezname, z->name, z->namelen); |
8515 | 0 | lock_rw_unlock(&az->lock); |
8516 | 0 | auth_zone_verify_zonemd(z, env, &env->mesh->mods, NULL, 0, 1); |
8517 | 0 | lock_rw_unlock(&z->lock); |
8518 | 0 | lock_rw_rdlock(&az->lock); |
8519 | | /* find the zone we had before, it is not deleted, |
8520 | | * because we have a flag for that that is processed at |
8521 | | * apply_cfg time */ |
8522 | 0 | key.namelen = savezname_len; |
8523 | 0 | key.name = savezname; |
8524 | 0 | z = (struct auth_zone*)rbtree_search(&az->ztree, &key); |
8525 | 0 | if(!z) |
8526 | 0 | break; |
8527 | 0 | } |
8528 | 0 | lock_rw_unlock(&az->lock); |
8529 | 0 | } |