/src/unbound/libunbound/libworker.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libunbound/worker.c - worker thread or process that resolves |
3 | | * |
4 | | * Copyright (c) 2007, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains the worker process or thread that performs |
40 | | * the DNS resolving and validation. The worker is called by a procedure |
41 | | * and if in the background continues until exit, if in the foreground |
42 | | * returns from the procedure when done. |
43 | | */ |
44 | | #include "config.h" |
45 | | #ifdef HAVE_SSL |
46 | | #include <openssl/ssl.h> |
47 | | #endif |
48 | | #include "libunbound/libworker.h" |
49 | | #include "libunbound/context.h" |
50 | | #include "libunbound/unbound.h" |
51 | | #include "libunbound/worker.h" |
52 | | #include "libunbound/unbound-event.h" |
53 | | #include "services/outside_network.h" |
54 | | #include "services/mesh.h" |
55 | | #include "services/localzone.h" |
56 | | #include "services/cache/rrset.h" |
57 | | #include "services/outbound_list.h" |
58 | | #include "services/authzone.h" |
59 | | #include "util/fptr_wlist.h" |
60 | | #include "util/module.h" |
61 | | #include "util/regional.h" |
62 | | #include "util/random.h" |
63 | | #include "util/config_file.h" |
64 | | #include "util/netevent.h" |
65 | | #include "util/storage/lookup3.h" |
66 | | #include "util/storage/slabhash.h" |
67 | | #include "util/net_help.h" |
68 | | #include "util/data/dname.h" |
69 | | #include "util/data/msgreply.h" |
70 | | #include "util/data/msgencode.h" |
71 | | #include "util/tube.h" |
72 | | #include "iterator/iter_fwd.h" |
73 | | #include "iterator/iter_hints.h" |
74 | | #include "sldns/sbuffer.h" |
75 | | #include "sldns/str2wire.h" |
76 | | #ifdef USE_DNSTAP |
77 | | #include "dnstap/dtstream.h" |
78 | | #endif |
79 | | |
80 | | #ifdef HAVE_TARGETCONDITIONALS_H |
81 | | #include <TargetConditionals.h> |
82 | | #endif |
83 | | |
84 | | #if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) |
85 | | #undef HAVE_FORK |
86 | | #endif |
87 | | |
88 | | /** handle new query command for bg worker */ |
89 | | static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); |
90 | | |
91 | | /** delete libworker env */ |
92 | | static void |
93 | | libworker_delete_env(struct libworker* w) |
94 | 0 | { |
95 | 0 | if(w->env) { |
96 | 0 | outside_network_quit_prepare(w->back); |
97 | 0 | mesh_delete(w->env->mesh); |
98 | 0 | context_release_alloc(w->ctx, w->env->alloc, |
99 | 0 | !w->is_bg || w->is_bg_thread); |
100 | 0 | sldns_buffer_free(w->env->scratch_buffer); |
101 | 0 | regional_destroy(w->env->scratch); |
102 | 0 | forwards_delete(w->env->fwds); |
103 | 0 | hints_delete(w->env->hints); |
104 | 0 | ub_randfree(w->env->rnd); |
105 | 0 | free(w->env); |
106 | 0 | } |
107 | 0 | #ifdef HAVE_SSL |
108 | 0 | SSL_CTX_free(w->sslctx); |
109 | 0 | #endif |
110 | 0 | outside_network_delete(w->back); |
111 | 0 | } |
112 | | |
113 | | /** delete libworker struct */ |
114 | | static void |
115 | | libworker_delete(struct libworker* w) |
116 | 0 | { |
117 | 0 | if(!w) return; |
118 | 0 | libworker_delete_env(w); |
119 | 0 | comm_base_delete(w->base); |
120 | 0 | free(w); |
121 | 0 | } |
122 | | |
123 | | void |
124 | | libworker_delete_event(struct libworker* w) |
125 | 0 | { |
126 | 0 | if(!w) return; |
127 | 0 | libworker_delete_env(w); |
128 | 0 | comm_base_delete_no_base(w->base); |
129 | 0 | free(w); |
130 | 0 | } |
131 | | |
132 | | /** setup fresh libworker struct */ |
133 | | static struct libworker* |
134 | | libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) |
135 | 0 | { |
136 | 0 | struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); |
137 | 0 | struct config_file* cfg = ctx->env->cfg; |
138 | 0 | int* ports; |
139 | 0 | int numports; |
140 | 0 | if(!w) return NULL; |
141 | 0 | w->is_bg = is_bg; |
142 | 0 | w->ctx = ctx; |
143 | 0 | w->env = (struct module_env*)malloc(sizeof(*w->env)); |
144 | 0 | if(!w->env) { |
145 | 0 | free(w); |
146 | 0 | return NULL; |
147 | 0 | } |
148 | 0 | *w->env = *ctx->env; |
149 | 0 | w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); |
150 | 0 | if(!w->env->alloc) { |
151 | 0 | libworker_delete(w); |
152 | 0 | return NULL; |
153 | 0 | } |
154 | 0 | w->thread_num = w->env->alloc->thread_num; |
155 | 0 | alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); |
156 | 0 | if(!w->is_bg || w->is_bg_thread) { |
157 | 0 | lock_basic_lock(&ctx->cfglock); |
158 | 0 | } |
159 | 0 | w->env->scratch = regional_create_custom(cfg->msg_buffer_size); |
160 | 0 | w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); |
161 | 0 | w->env->fwds = forwards_create(); |
162 | 0 | if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) { |
163 | 0 | forwards_delete(w->env->fwds); |
164 | 0 | w->env->fwds = NULL; |
165 | 0 | } |
166 | 0 | w->env->hints = hints_create(); |
167 | 0 | if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) { |
168 | 0 | hints_delete(w->env->hints); |
169 | 0 | w->env->hints = NULL; |
170 | 0 | } |
171 | 0 | w->sslctx = connect_sslctx_create(NULL, NULL, |
172 | 0 | cfg->tls_cert_bundle, cfg->tls_win_cert); |
173 | 0 | if(!w->sslctx) { |
174 | | /* to make the setup fail after unlock */ |
175 | 0 | hints_delete(w->env->hints); |
176 | 0 | w->env->hints = NULL; |
177 | 0 | } |
178 | 0 | if(!w->is_bg || w->is_bg_thread) { |
179 | 0 | lock_basic_unlock(&ctx->cfglock); |
180 | 0 | } |
181 | 0 | if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds || |
182 | 0 | !w->env->hints) { |
183 | 0 | libworker_delete(w); |
184 | 0 | return NULL; |
185 | 0 | } |
186 | 0 | w->env->worker = (struct worker*)w; |
187 | 0 | w->env->probe_timer = NULL; |
188 | 0 | if(!w->is_bg || w->is_bg_thread) { |
189 | 0 | lock_basic_lock(&ctx->cfglock); |
190 | 0 | } |
191 | 0 | if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) { |
192 | 0 | if(!w->is_bg || w->is_bg_thread) { |
193 | 0 | lock_basic_unlock(&ctx->cfglock); |
194 | 0 | } |
195 | 0 | libworker_delete(w); |
196 | 0 | return NULL; |
197 | 0 | } |
198 | 0 | if(!w->is_bg || w->is_bg_thread) { |
199 | 0 | lock_basic_unlock(&ctx->cfglock); |
200 | 0 | } |
201 | 0 | if(1) { |
202 | | /* primitive lockout for threading: if it overwrites another |
203 | | * thread it is like wiping the cache (which is likely empty |
204 | | * at the start) */ |
205 | | /* note we are holding the ctx lock in normal threaded |
206 | | * cases so that is solved properly, it is only for many ctx |
207 | | * in different threads that this may clash */ |
208 | 0 | static int done_raninit = 0; |
209 | 0 | if(!done_raninit) { |
210 | 0 | done_raninit = 1; |
211 | 0 | hash_set_raninit((uint32_t)ub_random(w->env->rnd)); |
212 | 0 | } |
213 | 0 | } |
214 | |
|
215 | 0 | if(eb) |
216 | 0 | w->base = comm_base_create_event(eb); |
217 | 0 | else w->base = comm_base_create(0); |
218 | 0 | if(!w->base) { |
219 | 0 | libworker_delete(w); |
220 | 0 | return NULL; |
221 | 0 | } |
222 | 0 | w->env->worker_base = w->base; |
223 | 0 | if(!w->is_bg || w->is_bg_thread) { |
224 | 0 | lock_basic_lock(&ctx->cfglock); |
225 | 0 | } |
226 | 0 | numports = cfg_condense_ports(cfg, &ports); |
227 | 0 | if(numports == 0) { |
228 | 0 | if(!w->is_bg || w->is_bg_thread) { |
229 | 0 | lock_basic_unlock(&ctx->cfglock); |
230 | 0 | } |
231 | 0 | libworker_delete(w); |
232 | 0 | return NULL; |
233 | 0 | } |
234 | 0 | w->back = outside_network_create(w->base, cfg->msg_buffer_size, |
235 | 0 | (size_t)cfg->outgoing_num_ports, cfg->out_ifs, |
236 | 0 | cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, |
237 | 0 | cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp, |
238 | 0 | w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, |
239 | 0 | ports, numports, cfg->unwanted_threshold, |
240 | 0 | cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, |
241 | 0 | cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, |
242 | 0 | cfg->delay_close, cfg->tls_use_sni, NULL, cfg->udp_connect, |
243 | 0 | cfg->max_reuse_tcp_queries, cfg->tcp_reuse_timeout, |
244 | 0 | cfg->tcp_auth_query_timeout); |
245 | 0 | w->env->outnet = w->back; |
246 | 0 | if(!w->is_bg || w->is_bg_thread) { |
247 | 0 | lock_basic_unlock(&ctx->cfglock); |
248 | 0 | } |
249 | 0 | free(ports); |
250 | 0 | if(!w->back) { |
251 | 0 | libworker_delete(w); |
252 | 0 | return NULL; |
253 | 0 | } |
254 | 0 | w->env->mesh = mesh_create(&ctx->mods, w->env); |
255 | 0 | if(!w->env->mesh) { |
256 | 0 | libworker_delete(w); |
257 | 0 | return NULL; |
258 | 0 | } |
259 | 0 | w->env->send_query = &libworker_send_query; |
260 | 0 | w->env->detach_subs = &mesh_detach_subs; |
261 | 0 | w->env->attach_sub = &mesh_attach_sub; |
262 | 0 | w->env->add_sub = &mesh_add_sub; |
263 | 0 | w->env->kill_sub = &mesh_state_delete; |
264 | 0 | w->env->detect_cycle = &mesh_detect_cycle; |
265 | 0 | comm_base_timept(w->base, &w->env->now, &w->env->now_tv); |
266 | 0 | return w; |
267 | 0 | } |
268 | | |
269 | | struct libworker* libworker_create_event(struct ub_ctx* ctx, |
270 | | struct ub_event_base* eb) |
271 | 0 | { |
272 | 0 | return libworker_setup(ctx, 0, eb); |
273 | 0 | } |
274 | | |
275 | | /** handle cancel command for bg worker */ |
276 | | static void |
277 | | handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) |
278 | 0 | { |
279 | 0 | struct ctx_query* q; |
280 | 0 | if(w->is_bg_thread) { |
281 | 0 | lock_basic_lock(&w->ctx->cfglock); |
282 | 0 | q = context_deserialize_cancel(w->ctx, buf, len); |
283 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
284 | 0 | } else { |
285 | 0 | q = context_deserialize_cancel(w->ctx, buf, len); |
286 | 0 | } |
287 | 0 | if(!q) { |
288 | | /* probably simply lookup failed, i.e. the message had been |
289 | | * processed and answered before the cancel arrived */ |
290 | 0 | return; |
291 | 0 | } |
292 | 0 | q->cancelled = 1; |
293 | 0 | free(buf); |
294 | 0 | } |
295 | | |
296 | | /** do control command coming into bg server */ |
297 | | static void |
298 | | libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) |
299 | 0 | { |
300 | 0 | switch(context_serial_getcmd(msg, len)) { |
301 | 0 | default: |
302 | 0 | case UB_LIBCMD_ANSWER: |
303 | 0 | log_err("unknown command for bg worker %d", |
304 | 0 | (int)context_serial_getcmd(msg, len)); |
305 | | /* and fall through to quit */ |
306 | | /* fallthrough */ |
307 | 0 | case UB_LIBCMD_QUIT: |
308 | 0 | free(msg); |
309 | 0 | comm_base_exit(w->base); |
310 | 0 | break; |
311 | 0 | case UB_LIBCMD_NEWQUERY: |
312 | 0 | handle_newq(w, msg, len); |
313 | 0 | break; |
314 | 0 | case UB_LIBCMD_CANCEL: |
315 | 0 | handle_cancel(w, msg, len); |
316 | 0 | break; |
317 | 0 | } |
318 | 0 | } |
319 | | |
320 | | /** handle control command coming into server */ |
321 | | void |
322 | | libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), |
323 | | uint8_t* msg, size_t len, int err, void* arg) |
324 | 0 | { |
325 | 0 | struct libworker* w = (struct libworker*)arg; |
326 | |
|
327 | 0 | if(err != 0) { |
328 | 0 | free(msg); |
329 | | /* it is of no use to go on, exit */ |
330 | 0 | comm_base_exit(w->base); |
331 | 0 | return; |
332 | 0 | } |
333 | 0 | libworker_do_cmd(w, msg, len); /* also frees the buf */ |
334 | 0 | } |
335 | | |
336 | | /** the background thread func */ |
337 | | static void* |
338 | | libworker_dobg(void* arg) |
339 | 0 | { |
340 | | /* setup */ |
341 | 0 | uint32_t m; |
342 | 0 | struct libworker* w = (struct libworker*)arg; |
343 | 0 | struct ub_ctx* ctx; |
344 | 0 | if(!w) { |
345 | 0 | log_err("libunbound bg worker init failed, nomem"); |
346 | 0 | return NULL; |
347 | 0 | } |
348 | 0 | ctx = w->ctx; |
349 | 0 | log_thread_set(&w->thread_num); |
350 | | #ifdef THREADS_DISABLED |
351 | | /* we are forked */ |
352 | | w->is_bg_thread = 0; |
353 | | /* close non-used parts of the pipes */ |
354 | | tube_close_write(ctx->qq_pipe); |
355 | | tube_close_read(ctx->rr_pipe); |
356 | | #endif |
357 | 0 | if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, |
358 | 0 | libworker_handle_control_cmd, w)) { |
359 | 0 | log_err("libunbound bg worker init failed, no bglisten"); |
360 | 0 | return NULL; |
361 | 0 | } |
362 | 0 | if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { |
363 | 0 | log_err("libunbound bg worker init failed, no bgwrite"); |
364 | 0 | return NULL; |
365 | 0 | } |
366 | | |
367 | | /* do the work */ |
368 | 0 | comm_base_dispatch(w->base); |
369 | | |
370 | | /* cleanup */ |
371 | 0 | m = UB_LIBCMD_QUIT; |
372 | 0 | w->want_quit = 1; |
373 | 0 | tube_remove_bg_listen(w->ctx->qq_pipe); |
374 | 0 | tube_remove_bg_write(w->ctx->rr_pipe); |
375 | 0 | libworker_delete(w); |
376 | 0 | (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, |
377 | 0 | (uint32_t)sizeof(m), 0); |
378 | | #ifdef THREADS_DISABLED |
379 | | /* close pipes from forked process before exit */ |
380 | | tube_close_read(ctx->qq_pipe); |
381 | | tube_close_write(ctx->rr_pipe); |
382 | | #endif |
383 | 0 | return NULL; |
384 | 0 | } |
385 | | |
386 | | int libworker_bg(struct ub_ctx* ctx) |
387 | 0 | { |
388 | 0 | struct libworker* w; |
389 | | /* fork or threadcreate */ |
390 | 0 | lock_basic_lock(&ctx->cfglock); |
391 | 0 | if(ctx->dothread) { |
392 | 0 | lock_basic_unlock(&ctx->cfglock); |
393 | 0 | w = libworker_setup(ctx, 1, NULL); |
394 | 0 | if(!w) return UB_NOMEM; |
395 | 0 | w->is_bg_thread = 1; |
396 | 0 | ctx->thread_worker = w; |
397 | | #ifdef ENABLE_LOCK_CHECKS |
398 | | w->thread_num = 1; /* for nicer DEBUG checklocks */ |
399 | | #endif |
400 | 0 | ub_thread_create(&ctx->bg_tid, libworker_dobg, w); |
401 | 0 | } else { |
402 | 0 | lock_basic_unlock(&ctx->cfglock); |
403 | | #ifndef HAVE_FORK |
404 | | /* no fork on windows */ |
405 | | return UB_FORKFAIL; |
406 | | #else /* HAVE_FORK */ |
407 | 0 | switch((ctx->bg_pid=fork())) { |
408 | 0 | case 0: |
409 | 0 | w = libworker_setup(ctx, 1, NULL); |
410 | 0 | if(!w) fatal_exit("out of memory"); |
411 | | /* close non-used parts of the pipes */ |
412 | 0 | tube_close_write(ctx->qq_pipe); |
413 | 0 | tube_close_read(ctx->rr_pipe); |
414 | 0 | (void)libworker_dobg(w); |
415 | 0 | exit(0); |
416 | 0 | break; |
417 | 0 | case -1: |
418 | 0 | return UB_FORKFAIL; |
419 | 0 | default: |
420 | | /* close non-used parts, so that the worker |
421 | | * bgprocess gets 'pipe closed' when the |
422 | | * main process exits */ |
423 | 0 | tube_close_read(ctx->qq_pipe); |
424 | 0 | tube_close_write(ctx->rr_pipe); |
425 | 0 | break; |
426 | 0 | } |
427 | 0 | #endif /* HAVE_FORK */ |
428 | 0 | } |
429 | 0 | return UB_NOERROR; |
430 | 0 | } |
431 | | |
432 | | /** insert canonname */ |
433 | | static int |
434 | | fill_canon(struct ub_result* res, uint8_t* s) |
435 | 0 | { |
436 | 0 | char buf[255+2]; |
437 | 0 | dname_str(s, buf); |
438 | 0 | res->canonname = strdup(buf); |
439 | 0 | return res->canonname != 0; |
440 | 0 | } |
441 | | |
442 | | /** fill data into result */ |
443 | | static int |
444 | | fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, |
445 | | uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) |
446 | 0 | { |
447 | 0 | size_t i; |
448 | 0 | struct packed_rrset_data* data; |
449 | 0 | res->ttl = 0; |
450 | 0 | if(!answer) { |
451 | 0 | if(finalcname) { |
452 | 0 | if(!fill_canon(res, finalcname)) |
453 | 0 | return 0; /* out of memory */ |
454 | 0 | } |
455 | 0 | if(rep->rrset_count != 0) |
456 | 0 | res->ttl = (int)rep->ttl; |
457 | 0 | res->data = (char**)calloc(1, sizeof(char*)); |
458 | 0 | if(!res->data) |
459 | 0 | return 0; /* out of memory */ |
460 | 0 | res->len = (int*)calloc(1, sizeof(int)); |
461 | 0 | if(!res->len) { |
462 | 0 | free(res->data); |
463 | 0 | res->data = NULL; |
464 | 0 | return 0; /* out of memory */ |
465 | 0 | } |
466 | 0 | return 1; |
467 | 0 | } |
468 | 0 | data = (struct packed_rrset_data*)answer->entry.data; |
469 | 0 | if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { |
470 | 0 | if(!fill_canon(res, answer->rk.dname)) |
471 | 0 | return 0; /* out of memory */ |
472 | 0 | } else res->canonname = NULL; |
473 | 0 | res->data = (char**)calloc(data->count+1, sizeof(char*)); |
474 | 0 | if(!res->data) |
475 | 0 | return 0; /* out of memory */ |
476 | 0 | res->len = (int*)calloc(data->count+1, sizeof(int)); |
477 | 0 | if(!res->len) { |
478 | 0 | free(res->data); |
479 | 0 | res->data = NULL; |
480 | 0 | return 0; /* out of memory */ |
481 | 0 | } |
482 | 0 | for(i=0; i<data->count; i++) { |
483 | | /* remove rdlength from rdata */ |
484 | 0 | res->len[i] = (int)(data->rr_len[i] - 2); |
485 | 0 | res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); |
486 | 0 | if(!res->data[i]) { |
487 | 0 | size_t j; |
488 | 0 | for(j=0; j<i; j++) { |
489 | 0 | free(res->data[j]); |
490 | 0 | res->data[j] = NULL; |
491 | 0 | } |
492 | 0 | free(res->data); |
493 | 0 | res->data = NULL; |
494 | 0 | free(res->len); |
495 | 0 | res->len = NULL; |
496 | 0 | return 0; /* out of memory */ |
497 | 0 | } |
498 | 0 | } |
499 | | /* ttl for positive answers, from CNAME and answer RRs */ |
500 | 0 | if(data->count != 0) { |
501 | 0 | size_t j; |
502 | 0 | res->ttl = (int)data->ttl; |
503 | 0 | for(j=0; j<rep->an_numrrsets; j++) { |
504 | 0 | struct packed_rrset_data* d = |
505 | 0 | (struct packed_rrset_data*)rep->rrsets[j]-> |
506 | 0 | entry.data; |
507 | 0 | if((int)d->ttl < res->ttl) |
508 | 0 | res->ttl = (int)d->ttl; |
509 | 0 | } |
510 | 0 | } |
511 | | /* ttl for negative answers */ |
512 | 0 | if(data->count == 0 && rep->rrset_count != 0) |
513 | 0 | res->ttl = (int)rep->ttl; |
514 | 0 | res->data[data->count] = NULL; |
515 | 0 | res->len[data->count] = 0; |
516 | 0 | return 1; |
517 | 0 | } |
518 | | |
519 | | /** fill result from parsed message, on error fills servfail */ |
520 | | void |
521 | | libworker_enter_result(struct ub_result* res, sldns_buffer* buf, |
522 | | struct regional* temp, enum sec_status msg_security) |
523 | 0 | { |
524 | 0 | struct query_info rq; |
525 | 0 | struct reply_info* rep; |
526 | 0 | res->rcode = LDNS_RCODE_SERVFAIL; |
527 | 0 | rep = parse_reply_in_temp_region(buf, temp, &rq); |
528 | 0 | if(!rep) { |
529 | 0 | log_err("cannot parse buf"); |
530 | 0 | return; /* error parsing buf, or out of memory */ |
531 | 0 | } |
532 | 0 | if(!fill_res(res, reply_find_answer_rrset(&rq, rep), |
533 | 0 | reply_find_final_cname_target(&rq, rep), &rq, rep)) |
534 | 0 | return; /* out of memory */ |
535 | | /* rcode, havedata, nxdomain, secure, bogus */ |
536 | 0 | res->rcode = (int)FLAGS_GET_RCODE(rep->flags); |
537 | 0 | if(res->data && res->data[0]) |
538 | 0 | res->havedata = 1; |
539 | 0 | if(res->rcode == LDNS_RCODE_NXDOMAIN) |
540 | 0 | res->nxdomain = 1; |
541 | 0 | if(msg_security == sec_status_secure) |
542 | 0 | res->secure = 1; |
543 | 0 | if(msg_security == sec_status_bogus || |
544 | 0 | msg_security == sec_status_secure_sentinel_fail) |
545 | 0 | res->bogus = 1; |
546 | 0 | } |
547 | | |
548 | | /** fillup fg results */ |
549 | | static void |
550 | | libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, |
551 | | enum sec_status s, char* why_bogus, int was_ratelimited) |
552 | 0 | { |
553 | 0 | q->res->was_ratelimited = was_ratelimited; |
554 | 0 | if(why_bogus) |
555 | 0 | q->res->why_bogus = strdup(why_bogus); |
556 | 0 | if(rcode != 0) { |
557 | 0 | q->res->rcode = rcode; |
558 | 0 | q->msg_security = s; |
559 | 0 | return; |
560 | 0 | } |
561 | | |
562 | 0 | q->res->rcode = LDNS_RCODE_SERVFAIL; |
563 | 0 | q->msg_security = sec_status_unchecked; |
564 | 0 | q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); |
565 | 0 | q->msg_len = sldns_buffer_limit(buf); |
566 | 0 | if(!q->msg) { |
567 | 0 | return; /* the error is in the rcode */ |
568 | 0 | } |
569 | | |
570 | | /* canonname and results */ |
571 | 0 | q->msg_security = s; |
572 | 0 | libworker_enter_result(q->res, buf, q->w->env->scratch, s); |
573 | 0 | } |
574 | | |
575 | | void |
576 | | libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, |
577 | | char* why_bogus, int was_ratelimited) |
578 | 0 | { |
579 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
580 | | /* fg query is done; exit comm base */ |
581 | 0 | comm_base_exit(q->w->base); |
582 | |
|
583 | 0 | libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); |
584 | 0 | } |
585 | | |
586 | | /** setup qinfo and edns */ |
587 | | static int |
588 | | setup_qinfo_edns(struct libworker* w, struct ctx_query* q, |
589 | | struct query_info* qinfo, struct edns_data* edns) |
590 | 0 | { |
591 | 0 | qinfo->qtype = (uint16_t)q->res->qtype; |
592 | 0 | qinfo->qclass = (uint16_t)q->res->qclass; |
593 | 0 | qinfo->local_alias = NULL; |
594 | 0 | qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); |
595 | 0 | if(!qinfo->qname) { |
596 | 0 | return 0; |
597 | 0 | } |
598 | 0 | edns->edns_present = 1; |
599 | 0 | edns->ext_rcode = 0; |
600 | 0 | edns->edns_version = 0; |
601 | 0 | edns->bits = EDNS_DO; |
602 | 0 | edns->opt_list_in = NULL; |
603 | 0 | edns->opt_list_out = NULL; |
604 | 0 | edns->opt_list_inplace_cb_out = NULL; |
605 | 0 | edns->padding_block_size = 0; |
606 | 0 | if(sldns_buffer_capacity(w->back->udp_buff) < 65535) |
607 | 0 | edns->udp_size = (uint16_t)sldns_buffer_capacity( |
608 | 0 | w->back->udp_buff); |
609 | 0 | else edns->udp_size = 65535; |
610 | 0 | return 1; |
611 | 0 | } |
612 | | |
613 | | int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) |
614 | 0 | { |
615 | 0 | struct libworker* w = libworker_setup(ctx, 0, NULL); |
616 | 0 | uint16_t qflags, qid; |
617 | 0 | struct query_info qinfo; |
618 | 0 | struct edns_data edns; |
619 | 0 | if(!w) |
620 | 0 | return UB_INITFAIL; |
621 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { |
622 | 0 | libworker_delete(w); |
623 | 0 | return UB_SYNTAX; |
624 | 0 | } |
625 | 0 | qid = 0; |
626 | 0 | qflags = BIT_RD; |
627 | 0 | q->w = w; |
628 | | /* see if there is a fixed answer */ |
629 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
630 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
631 | 0 | if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, |
632 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
633 | 0 | NULL, 0, NULL, 0, NULL)) { |
634 | 0 | regional_free_all(w->env->scratch); |
635 | 0 | libworker_fillup_fg(q, LDNS_RCODE_NOERROR, |
636 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
637 | 0 | libworker_delete(w); |
638 | 0 | free(qinfo.qname); |
639 | 0 | return UB_NOERROR; |
640 | 0 | } |
641 | 0 | if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, |
642 | 0 | w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { |
643 | 0 | regional_free_all(w->env->scratch); |
644 | 0 | libworker_fillup_fg(q, LDNS_RCODE_NOERROR, |
645 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
646 | 0 | libworker_delete(w); |
647 | 0 | free(qinfo.qname); |
648 | 0 | return UB_NOERROR; |
649 | 0 | } |
650 | | /* process new query */ |
651 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
652 | 0 | w->back->udp_buff, qid, libworker_fg_done_cb, q, 0)) { |
653 | 0 | free(qinfo.qname); |
654 | 0 | return UB_NOMEM; |
655 | 0 | } |
656 | 0 | free(qinfo.qname); |
657 | | |
658 | | /* wait for reply */ |
659 | 0 | comm_base_dispatch(w->base); |
660 | |
|
661 | 0 | libworker_delete(w); |
662 | 0 | return UB_NOERROR; |
663 | 0 | } |
664 | | |
665 | | void |
666 | | libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, |
667 | | enum sec_status s, char* why_bogus, int was_ratelimited) |
668 | 0 | { |
669 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
670 | 0 | ub_event_callback_type cb = q->cb_event; |
671 | 0 | void* cb_arg = q->cb_arg; |
672 | 0 | int cancelled = q->cancelled; |
673 | | |
674 | | /* delete it now */ |
675 | 0 | struct ub_ctx* ctx = q->w->ctx; |
676 | 0 | lock_basic_lock(&ctx->cfglock); |
677 | 0 | (void)rbtree_delete(&ctx->queries, q->node.key); |
678 | 0 | ctx->num_async--; |
679 | 0 | context_query_delete(q); |
680 | 0 | lock_basic_unlock(&ctx->cfglock); |
681 | |
|
682 | 0 | if(!cancelled) { |
683 | | /* call callback */ |
684 | 0 | int sec = 0; |
685 | 0 | if(s == sec_status_bogus) |
686 | 0 | sec = 1; |
687 | 0 | else if(s == sec_status_secure) |
688 | 0 | sec = 2; |
689 | 0 | (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), |
690 | 0 | (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); |
691 | 0 | } |
692 | 0 | } |
693 | | |
694 | | int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, |
695 | | int* async_id) |
696 | 0 | { |
697 | 0 | struct libworker* w = ctx->event_worker; |
698 | 0 | uint16_t qflags, qid; |
699 | 0 | struct query_info qinfo; |
700 | 0 | struct edns_data edns; |
701 | 0 | if(!w) |
702 | 0 | return UB_INITFAIL; |
703 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) |
704 | 0 | return UB_SYNTAX; |
705 | 0 | qid = 0; |
706 | 0 | qflags = BIT_RD; |
707 | 0 | q->w = w; |
708 | | /* see if there is a fixed answer */ |
709 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
710 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
711 | 0 | if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, |
712 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
713 | 0 | NULL, 0, NULL, 0, NULL)) { |
714 | 0 | regional_free_all(w->env->scratch); |
715 | 0 | free(qinfo.qname); |
716 | 0 | libworker_event_done_cb(q, LDNS_RCODE_NOERROR, |
717 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
718 | 0 | return UB_NOERROR; |
719 | 0 | } |
720 | 0 | if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, |
721 | 0 | w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { |
722 | 0 | regional_free_all(w->env->scratch); |
723 | 0 | free(qinfo.qname); |
724 | 0 | libworker_event_done_cb(q, LDNS_RCODE_NOERROR, |
725 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
726 | 0 | return UB_NOERROR; |
727 | 0 | } |
728 | | /* process new query */ |
729 | 0 | if(async_id) |
730 | 0 | *async_id = q->querynum; |
731 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
732 | 0 | w->back->udp_buff, qid, libworker_event_done_cb, q, 0)) { |
733 | 0 | free(qinfo.qname); |
734 | 0 | return UB_NOMEM; |
735 | 0 | } |
736 | 0 | free(qinfo.qname); |
737 | 0 | return UB_NOERROR; |
738 | 0 | } |
739 | | |
740 | | /** add result to the bg worker result queue */ |
741 | | static void |
742 | | add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, |
743 | | int err, char* reason, int was_ratelimited) |
744 | 0 | { |
745 | 0 | uint8_t* msg = NULL; |
746 | 0 | uint32_t len = 0; |
747 | |
|
748 | 0 | if(w->want_quit) { |
749 | 0 | context_query_delete(q); |
750 | 0 | return; |
751 | 0 | } |
752 | | /* serialize and delete unneeded q */ |
753 | 0 | if(w->is_bg_thread) { |
754 | 0 | lock_basic_lock(&w->ctx->cfglock); |
755 | 0 | if(reason) |
756 | 0 | q->res->why_bogus = strdup(reason); |
757 | 0 | q->res->was_ratelimited = was_ratelimited; |
758 | 0 | if(pkt) { |
759 | 0 | q->msg_len = sldns_buffer_remaining(pkt); |
760 | 0 | q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); |
761 | 0 | if(!q->msg) { |
762 | 0 | msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); |
763 | 0 | } else { |
764 | 0 | msg = context_serialize_answer(q, err, NULL, &len); |
765 | 0 | } |
766 | 0 | } else { |
767 | 0 | msg = context_serialize_answer(q, err, NULL, &len); |
768 | 0 | } |
769 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
770 | 0 | } else { |
771 | 0 | if(reason) |
772 | 0 | q->res->why_bogus = strdup(reason); |
773 | 0 | q->res->was_ratelimited = was_ratelimited; |
774 | 0 | msg = context_serialize_answer(q, err, pkt, &len); |
775 | 0 | (void)rbtree_delete(&w->ctx->queries, q->node.key); |
776 | 0 | w->ctx->num_async--; |
777 | 0 | context_query_delete(q); |
778 | 0 | } |
779 | |
|
780 | 0 | if(!msg) { |
781 | 0 | log_err("out of memory for async answer"); |
782 | 0 | return; |
783 | 0 | } |
784 | 0 | if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { |
785 | 0 | log_err("out of memory for async answer"); |
786 | 0 | return; |
787 | 0 | } |
788 | 0 | } |
789 | | |
790 | | void |
791 | | libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, |
792 | | char* why_bogus, int was_ratelimited) |
793 | 0 | { |
794 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
795 | |
|
796 | 0 | if(q->cancelled || q->w->back->want_to_quit) { |
797 | 0 | if(q->w->is_bg_thread) { |
798 | | /* delete it now */ |
799 | 0 | struct ub_ctx* ctx = q->w->ctx; |
800 | 0 | lock_basic_lock(&ctx->cfglock); |
801 | 0 | (void)rbtree_delete(&ctx->queries, q->node.key); |
802 | 0 | ctx->num_async--; |
803 | 0 | context_query_delete(q); |
804 | 0 | lock_basic_unlock(&ctx->cfglock); |
805 | 0 | } |
806 | | /* cancelled, do not give answer */ |
807 | 0 | return; |
808 | 0 | } |
809 | 0 | q->msg_security = s; |
810 | 0 | if(!buf) { |
811 | 0 | buf = q->w->env->scratch_buffer; |
812 | 0 | } |
813 | 0 | if(rcode != 0) { |
814 | 0 | error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); |
815 | 0 | } |
816 | 0 | add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); |
817 | 0 | } |
818 | | |
819 | | |
820 | | /** handle new query command for bg worker */ |
821 | | static void |
822 | | handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) |
823 | 0 | { |
824 | 0 | uint16_t qflags, qid; |
825 | 0 | struct query_info qinfo; |
826 | 0 | struct edns_data edns; |
827 | 0 | struct ctx_query* q; |
828 | 0 | if(w->is_bg_thread) { |
829 | 0 | lock_basic_lock(&w->ctx->cfglock); |
830 | 0 | q = context_lookup_new_query(w->ctx, buf, len); |
831 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
832 | 0 | } else { |
833 | 0 | q = context_deserialize_new_query(w->ctx, buf, len); |
834 | 0 | } |
835 | 0 | free(buf); |
836 | 0 | if(!q) { |
837 | 0 | log_err("failed to deserialize newq"); |
838 | 0 | return; |
839 | 0 | } |
840 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { |
841 | 0 | add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); |
842 | 0 | return; |
843 | 0 | } |
844 | 0 | qid = 0; |
845 | 0 | qflags = BIT_RD; |
846 | | /* see if there is a fixed answer */ |
847 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
848 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
849 | 0 | if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, |
850 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
851 | 0 | NULL, 0, NULL, 0, NULL)) { |
852 | 0 | regional_free_all(w->env->scratch); |
853 | 0 | q->msg_security = sec_status_insecure; |
854 | 0 | add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); |
855 | 0 | free(qinfo.qname); |
856 | 0 | return; |
857 | 0 | } |
858 | 0 | if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones, |
859 | 0 | w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { |
860 | 0 | regional_free_all(w->env->scratch); |
861 | 0 | q->msg_security = sec_status_insecure; |
862 | 0 | add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); |
863 | 0 | free(qinfo.qname); |
864 | 0 | return; |
865 | 0 | } |
866 | 0 | q->w = w; |
867 | | /* process new query */ |
868 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
869 | 0 | w->back->udp_buff, qid, libworker_bg_done_cb, q, 0)) { |
870 | 0 | add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); |
871 | 0 | } |
872 | 0 | free(qinfo.qname); |
873 | 0 | } |
874 | | |
875 | | void libworker_alloc_cleanup(void* arg) |
876 | 0 | { |
877 | 0 | struct libworker* w = (struct libworker*)arg; |
878 | 0 | slabhash_clear(&w->env->rrset_cache->table); |
879 | 0 | slabhash_clear(w->env->msg_cache); |
880 | 0 | } |
881 | | |
882 | | struct outbound_entry* libworker_send_query(struct query_info* qinfo, |
883 | | uint16_t flags, int dnssec, int want_dnssec, int nocaps, |
884 | | int check_ratelimit, |
885 | | struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, |
886 | | size_t zonelen, int tcp_upstream, int ssl_upstream, char* tls_auth_name, |
887 | | struct module_qstate* q, int* was_ratelimited) |
888 | 0 | { |
889 | 0 | struct libworker* w = (struct libworker*)q->env->worker; |
890 | 0 | struct outbound_entry* e = (struct outbound_entry*)regional_alloc( |
891 | 0 | q->region, sizeof(*e)); |
892 | 0 | if(!e) |
893 | 0 | return NULL; |
894 | 0 | e->qstate = q; |
895 | 0 | e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, |
896 | 0 | want_dnssec, nocaps, check_ratelimit, tcp_upstream, ssl_upstream, |
897 | 0 | tls_auth_name, addr, addrlen, zone, zonelen, q, |
898 | 0 | libworker_handle_service_reply, e, w->back->udp_buff, q->env, |
899 | 0 | was_ratelimited); |
900 | 0 | if(!e->qsent) { |
901 | 0 | return NULL; |
902 | 0 | } |
903 | 0 | return e; |
904 | 0 | } |
905 | | |
906 | | int |
907 | | libworker_handle_service_reply(struct comm_point* c, void* arg, int error, |
908 | | struct comm_reply* reply_info) |
909 | 0 | { |
910 | 0 | struct outbound_entry* e = (struct outbound_entry*)arg; |
911 | 0 | struct libworker* lw = (struct libworker*)e->qstate->env->worker; |
912 | |
|
913 | 0 | if(error != 0) { |
914 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, error); |
915 | 0 | return 0; |
916 | 0 | } |
917 | | /* sanity check. */ |
918 | 0 | if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) |
919 | 0 | || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != |
920 | 0 | LDNS_PACKET_QUERY |
921 | 0 | || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { |
922 | | /* error becomes timeout for the module as if this reply |
923 | | * never arrived. */ |
924 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, |
925 | 0 | NETEVENT_TIMEOUT); |
926 | 0 | return 0; |
927 | 0 | } |
928 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); |
929 | 0 | return 0; |
930 | 0 | } |
931 | | |
932 | | /* --- fake callbacks for fptr_wlist to work --- */ |
933 | | void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), |
934 | | uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), |
935 | | int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) |
936 | 0 | { |
937 | 0 | log_assert(0); |
938 | 0 | } |
939 | | |
940 | | int worker_handle_request(struct comm_point* ATTR_UNUSED(c), |
941 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
942 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
943 | 0 | { |
944 | 0 | log_assert(0); |
945 | 0 | return 0; |
946 | 0 | } |
947 | | |
948 | | int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), |
949 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
950 | | struct comm_reply* ATTR_UNUSED(reply_info)) |
951 | 0 | { |
952 | 0 | log_assert(0); |
953 | 0 | return 0; |
954 | 0 | } |
955 | | |
956 | | int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), |
957 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
958 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
959 | 0 | { |
960 | 0 | log_assert(0); |
961 | 0 | return 0; |
962 | 0 | } |
963 | | |
964 | | int remote_control_callback(struct comm_point* ATTR_UNUSED(c), |
965 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
966 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
967 | 0 | { |
968 | 0 | log_assert(0); |
969 | 0 | return 0; |
970 | 0 | } |
971 | | |
972 | | void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) |
973 | 0 | { |
974 | 0 | log_assert(0); |
975 | 0 | } |
976 | | |
977 | | struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), |
978 | | uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), |
979 | | int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), |
980 | | int ATTR_UNUSED(check_ratelimit), |
981 | | struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), |
982 | | uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), int ATTR_UNUSED(tcp_upstream), |
983 | | int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), |
984 | | struct module_qstate* ATTR_UNUSED(q), int* ATTR_UNUSED(was_ratelimited)) |
985 | 0 | { |
986 | 0 | log_assert(0); |
987 | 0 | return 0; |
988 | 0 | } |
989 | | |
990 | | void |
991 | | worker_alloc_cleanup(void* ATTR_UNUSED(arg)) |
992 | 0 | { |
993 | 0 | log_assert(0); |
994 | 0 | } |
995 | | |
996 | | void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) |
997 | 0 | { |
998 | 0 | log_assert(0); |
999 | 0 | } |
1000 | | |
1001 | | void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) |
1002 | 0 | { |
1003 | 0 | log_assert(0); |
1004 | 0 | } |
1005 | | |
1006 | | void worker_start_accept(void* ATTR_UNUSED(arg)) |
1007 | 0 | { |
1008 | 0 | log_assert(0); |
1009 | 0 | } |
1010 | | |
1011 | | void worker_stop_accept(void* ATTR_UNUSED(arg)) |
1012 | 0 | { |
1013 | 0 | log_assert(0); |
1014 | 0 | } |
1015 | | |
1016 | | int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) |
1017 | 0 | { |
1018 | 0 | log_assert(0); |
1019 | 0 | return 0; |
1020 | 0 | } |
1021 | | |
1022 | | int |
1023 | | codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) |
1024 | 0 | { |
1025 | 0 | log_assert(0); |
1026 | 0 | return 0; |
1027 | 0 | } |
1028 | | |
1029 | | int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) |
1030 | 0 | { |
1031 | 0 | log_assert(0); |
1032 | 0 | return 0; |
1033 | 0 | } |
1034 | | |
1035 | | void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) |
1036 | 0 | { |
1037 | 0 | log_assert(0); |
1038 | 0 | } |
1039 | | |
1040 | | #ifdef UB_ON_WINDOWS |
1041 | | void |
1042 | | worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* |
1043 | | ATTR_UNUSED(arg)) { |
1044 | | log_assert(0); |
1045 | | } |
1046 | | |
1047 | | void |
1048 | | wsvc_cron_cb(void* ATTR_UNUSED(arg)) |
1049 | | { |
1050 | | log_assert(0); |
1051 | | } |
1052 | | #endif /* UB_ON_WINDOWS */ |
1053 | | |
1054 | | #ifdef USE_DNSTAP |
1055 | | void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1056 | | void* ATTR_UNUSED(arg)) |
1057 | | { |
1058 | | log_assert(0); |
1059 | | } |
1060 | | #endif |
1061 | | |
1062 | | #ifdef USE_DNSTAP |
1063 | | void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1064 | | void* ATTR_UNUSED(arg)) |
1065 | | { |
1066 | | log_assert(0); |
1067 | | } |
1068 | | #endif |