/src/unbound/libunbound/libworker.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * libunbound/worker.c - worker thread or process that resolves |
3 | | * |
4 | | * Copyright (c) 2007, NLnet Labs. All rights reserved. |
5 | | * |
6 | | * This software is open source. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions |
10 | | * are met: |
11 | | * |
12 | | * Redistributions of source code must retain the above copyright notice, |
13 | | * this list of conditions and the following disclaimer. |
14 | | * |
15 | | * Redistributions in binary form must reproduce the above copyright notice, |
16 | | * this list of conditions and the following disclaimer in the documentation |
17 | | * and/or other materials provided with the distribution. |
18 | | * |
19 | | * Neither the name of the NLNET LABS nor the names of its contributors may |
20 | | * be used to endorse or promote products derived from this software without |
21 | | * specific prior written permission. |
22 | | * |
23 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
26 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
27 | | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
28 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
29 | | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
30 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
31 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
32 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
33 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
34 | | */ |
35 | | |
36 | | /** |
37 | | * \file |
38 | | * |
39 | | * This file contains the worker process or thread that performs |
40 | | * the DNS resolving and validation. The worker is called by a procedure |
41 | | * and if in the background continues until exit, if in the foreground |
42 | | * returns from the procedure when done. |
43 | | */ |
44 | | #include "config.h" |
45 | | #ifdef HAVE_SSL |
46 | | #include <openssl/ssl.h> |
47 | | #endif |
48 | | #include "libunbound/libworker.h" |
49 | | #include "libunbound/context.h" |
50 | | #include "libunbound/unbound.h" |
51 | | #include "libunbound/worker.h" |
52 | | #include "libunbound/unbound-event.h" |
53 | | #include "services/outside_network.h" |
54 | | #include "services/mesh.h" |
55 | | #include "services/localzone.h" |
56 | | #include "services/cache/rrset.h" |
57 | | #include "services/outbound_list.h" |
58 | | #include "services/authzone.h" |
59 | | #include "util/fptr_wlist.h" |
60 | | #include "util/module.h" |
61 | | #include "util/regional.h" |
62 | | #include "util/random.h" |
63 | | #include "util/config_file.h" |
64 | | #include "util/netevent.h" |
65 | | #include "util/proxy_protocol.h" |
66 | | #include "util/storage/lookup3.h" |
67 | | #include "util/storage/slabhash.h" |
68 | | #include "util/net_help.h" |
69 | | #include "util/data/dname.h" |
70 | | #include "util/data/msgreply.h" |
71 | | #include "util/data/msgencode.h" |
72 | | #include "util/tube.h" |
73 | | #include "sldns/sbuffer.h" |
74 | | #include "sldns/str2wire.h" |
75 | | #ifdef USE_DNSTAP |
76 | | #include "dnstap/dtstream.h" |
77 | | #endif |
78 | | |
79 | | #ifdef HAVE_TARGETCONDITIONALS_H |
80 | | #include <TargetConditionals.h> |
81 | | #endif |
82 | | |
83 | | #if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) |
84 | | #undef HAVE_FORK |
85 | | #endif |
86 | | |
87 | | /** handle new query command for bg worker */ |
88 | | static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); |
89 | | |
90 | | /** delete libworker env */ |
91 | | static void |
92 | | libworker_delete_env(struct libworker* w) |
93 | 0 | { |
94 | 0 | if(w->env) { |
95 | 0 | outside_network_quit_prepare(w->back); |
96 | 0 | mesh_delete(w->env->mesh); |
97 | 0 | context_release_alloc(w->ctx, w->env->alloc, |
98 | 0 | !w->is_bg || w->is_bg_thread); |
99 | 0 | sldns_buffer_free(w->env->scratch_buffer); |
100 | 0 | regional_destroy(w->env->scratch); |
101 | 0 | ub_randfree(w->env->rnd); |
102 | 0 | free(w->env); |
103 | 0 | } |
104 | 0 | #ifdef HAVE_SSL |
105 | 0 | SSL_CTX_free(w->sslctx); |
106 | 0 | #endif |
107 | 0 | outside_network_delete(w->back); |
108 | 0 | } |
109 | | |
110 | | /** delete libworker struct */ |
111 | | static void |
112 | | libworker_delete(struct libworker* w) |
113 | 0 | { |
114 | 0 | if(!w) return; |
115 | 0 | libworker_delete_env(w); |
116 | 0 | comm_base_delete(w->base); |
117 | 0 | free(w); |
118 | 0 | } |
119 | | |
120 | | void |
121 | | libworker_delete_event(struct libworker* w) |
122 | 0 | { |
123 | 0 | if(!w) return; |
124 | 0 | libworker_delete_env(w); |
125 | 0 | comm_base_delete_no_base(w->base); |
126 | 0 | free(w); |
127 | 0 | } |
128 | | |
129 | | /** setup fresh libworker struct */ |
130 | | static struct libworker* |
131 | | libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) |
132 | 0 | { |
133 | 0 | struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); |
134 | 0 | struct config_file* cfg = ctx->env->cfg; |
135 | 0 | int* ports; |
136 | 0 | int numports; |
137 | 0 | if(!w) return NULL; |
138 | 0 | w->is_bg = is_bg; |
139 | 0 | w->ctx = ctx; |
140 | 0 | w->env = (struct module_env*)malloc(sizeof(*w->env)); |
141 | 0 | if(!w->env) { |
142 | 0 | free(w); |
143 | 0 | return NULL; |
144 | 0 | } |
145 | 0 | *w->env = *ctx->env; |
146 | 0 | w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); |
147 | 0 | if(!w->env->alloc) { |
148 | 0 | libworker_delete(w); |
149 | 0 | return NULL; |
150 | 0 | } |
151 | 0 | w->thread_num = w->env->alloc->thread_num; |
152 | 0 | alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); |
153 | 0 | if(!w->is_bg || w->is_bg_thread) { |
154 | 0 | lock_basic_lock(&ctx->cfglock); |
155 | 0 | } |
156 | 0 | w->env->scratch = regional_create_custom(cfg->msg_buffer_size); |
157 | 0 | w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); |
158 | 0 | #ifdef HAVE_SSL |
159 | 0 | w->sslctx = connect_sslctx_create(NULL, NULL, |
160 | 0 | cfg->tls_cert_bundle, cfg->tls_win_cert); |
161 | 0 | if(!w->sslctx) { |
162 | | /* to make the setup fail after unlock */ |
163 | 0 | sldns_buffer_free(w->env->scratch_buffer); |
164 | 0 | w->env->scratch_buffer = NULL; |
165 | 0 | } |
166 | 0 | #endif |
167 | 0 | if(!w->is_bg || w->is_bg_thread) { |
168 | 0 | lock_basic_unlock(&ctx->cfglock); |
169 | 0 | } |
170 | 0 | if(!w->env->scratch || !w->env->scratch_buffer) { |
171 | 0 | libworker_delete(w); |
172 | 0 | return NULL; |
173 | 0 | } |
174 | 0 | w->env->worker = (struct worker*)w; |
175 | 0 | w->env->probe_timer = NULL; |
176 | 0 | if(!w->is_bg || w->is_bg_thread) { |
177 | 0 | lock_basic_lock(&ctx->cfglock); |
178 | 0 | } |
179 | 0 | if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) { |
180 | 0 | if(!w->is_bg || w->is_bg_thread) { |
181 | 0 | lock_basic_unlock(&ctx->cfglock); |
182 | 0 | } |
183 | 0 | libworker_delete(w); |
184 | 0 | return NULL; |
185 | 0 | } |
186 | 0 | if(!w->is_bg || w->is_bg_thread) { |
187 | 0 | lock_basic_unlock(&ctx->cfglock); |
188 | 0 | } |
189 | 0 | if(1) { |
190 | | /* primitive lockout for threading: if it overwrites another |
191 | | * thread it is like wiping the cache (which is likely empty |
192 | | * at the start) */ |
193 | | /* note we are holding the ctx lock in normal threaded |
194 | | * cases so that is solved properly, it is only for many ctx |
195 | | * in different threads that this may clash */ |
196 | 0 | static int done_raninit = 0; |
197 | 0 | if(!done_raninit) { |
198 | 0 | done_raninit = 1; |
199 | 0 | hash_set_raninit((uint32_t)ub_random(w->env->rnd)); |
200 | 0 | } |
201 | 0 | } |
202 | |
|
203 | 0 | if(eb) |
204 | 0 | w->base = comm_base_create_event(eb); |
205 | 0 | else w->base = comm_base_create(0); |
206 | 0 | if(!w->base) { |
207 | 0 | libworker_delete(w); |
208 | 0 | return NULL; |
209 | 0 | } |
210 | 0 | w->env->worker_base = w->base; |
211 | 0 | if(!w->is_bg || w->is_bg_thread) { |
212 | 0 | lock_basic_lock(&ctx->cfglock); |
213 | 0 | } |
214 | 0 | numports = cfg_condense_ports(cfg, &ports); |
215 | 0 | if(numports == 0) { |
216 | 0 | if(!w->is_bg || w->is_bg_thread) { |
217 | 0 | lock_basic_unlock(&ctx->cfglock); |
218 | 0 | } |
219 | 0 | libworker_delete(w); |
220 | 0 | return NULL; |
221 | 0 | } |
222 | 0 | w->back = outside_network_create(w->base, cfg->msg_buffer_size, |
223 | 0 | (size_t)cfg->outgoing_num_ports, cfg->out_ifs, |
224 | 0 | cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, |
225 | 0 | cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp, |
226 | 0 | w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, |
227 | 0 | ports, numports, cfg->unwanted_threshold, |
228 | 0 | cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, |
229 | 0 | cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, |
230 | 0 | cfg->delay_close, cfg->tls_use_sni, NULL, cfg->udp_connect, |
231 | 0 | cfg->max_reuse_tcp_queries, cfg->tcp_reuse_timeout, |
232 | 0 | cfg->tcp_auth_query_timeout); |
233 | 0 | w->env->outnet = w->back; |
234 | 0 | if(!w->is_bg || w->is_bg_thread) { |
235 | 0 | lock_basic_unlock(&ctx->cfglock); |
236 | 0 | } |
237 | 0 | free(ports); |
238 | 0 | if(!w->back) { |
239 | 0 | libworker_delete(w); |
240 | 0 | return NULL; |
241 | 0 | } |
242 | 0 | w->env->mesh = mesh_create(&ctx->mods, w->env); |
243 | 0 | if(!w->env->mesh) { |
244 | 0 | libworker_delete(w); |
245 | 0 | return NULL; |
246 | 0 | } |
247 | 0 | w->env->send_query = &libworker_send_query; |
248 | 0 | w->env->detach_subs = &mesh_detach_subs; |
249 | 0 | w->env->attach_sub = &mesh_attach_sub; |
250 | 0 | w->env->add_sub = &mesh_add_sub; |
251 | 0 | w->env->kill_sub = &mesh_state_delete; |
252 | 0 | w->env->detect_cycle = &mesh_detect_cycle; |
253 | 0 | comm_base_timept(w->base, &w->env->now, &w->env->now_tv); |
254 | 0 | pp_init(&sldns_write_uint16, &sldns_write_uint32); |
255 | 0 | return w; |
256 | 0 | } |
257 | | |
258 | | struct libworker* libworker_create_event(struct ub_ctx* ctx, |
259 | | struct ub_event_base* eb) |
260 | 0 | { |
261 | 0 | return libworker_setup(ctx, 0, eb); |
262 | 0 | } |
263 | | |
264 | | /** handle cancel command for bg worker */ |
265 | | static void |
266 | | handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) |
267 | 0 | { |
268 | 0 | struct ctx_query* q; |
269 | 0 | if(w->is_bg_thread) { |
270 | 0 | lock_basic_lock(&w->ctx->cfglock); |
271 | 0 | q = context_deserialize_cancel(w->ctx, buf, len); |
272 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
273 | 0 | } else { |
274 | 0 | q = context_deserialize_cancel(w->ctx, buf, len); |
275 | 0 | } |
276 | 0 | if(!q) { |
277 | | /* probably simply lookup failed, i.e. the message had been |
278 | | * processed and answered before the cancel arrived */ |
279 | 0 | return; |
280 | 0 | } |
281 | 0 | q->cancelled = 1; |
282 | 0 | free(buf); |
283 | 0 | } |
284 | | |
285 | | /** do control command coming into bg server */ |
286 | | static void |
287 | | libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) |
288 | 0 | { |
289 | 0 | switch(context_serial_getcmd(msg, len)) { |
290 | 0 | default: |
291 | 0 | case UB_LIBCMD_ANSWER: |
292 | 0 | log_err("unknown command for bg worker %d", |
293 | 0 | (int)context_serial_getcmd(msg, len)); |
294 | | /* and fall through to quit */ |
295 | 0 | ATTR_FALLTHROUGH |
296 | | /* fallthrough */ |
297 | 0 | case UB_LIBCMD_QUIT: |
298 | 0 | free(msg); |
299 | 0 | comm_base_exit(w->base); |
300 | 0 | break; |
301 | 0 | case UB_LIBCMD_NEWQUERY: |
302 | 0 | handle_newq(w, msg, len); |
303 | 0 | break; |
304 | 0 | case UB_LIBCMD_CANCEL: |
305 | 0 | handle_cancel(w, msg, len); |
306 | 0 | break; |
307 | 0 | } |
308 | 0 | } |
309 | | |
310 | | /** handle control command coming into server */ |
311 | | void |
312 | | libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), |
313 | | uint8_t* msg, size_t len, int err, void* arg) |
314 | 0 | { |
315 | 0 | struct libworker* w = (struct libworker*)arg; |
316 | |
|
317 | 0 | if(err != 0) { |
318 | 0 | free(msg); |
319 | | /* it is of no use to go on, exit */ |
320 | 0 | comm_base_exit(w->base); |
321 | 0 | return; |
322 | 0 | } |
323 | 0 | libworker_do_cmd(w, msg, len); /* also frees the buf */ |
324 | 0 | } |
325 | | |
326 | | /** the background thread func */ |
327 | | static void* |
328 | | libworker_dobg(void* arg) |
329 | 0 | { |
330 | | /* setup */ |
331 | 0 | uint32_t m; |
332 | 0 | struct libworker* w = (struct libworker*)arg; |
333 | 0 | struct ub_ctx* ctx; |
334 | 0 | if(!w) { |
335 | 0 | log_err("libunbound bg worker init failed, nomem"); |
336 | 0 | return NULL; |
337 | 0 | } |
338 | 0 | ctx = w->ctx; |
339 | 0 | log_thread_set(&w->thread_num); |
340 | | #ifdef THREADS_DISABLED |
341 | | /* we are forked */ |
342 | | w->is_bg_thread = 0; |
343 | | /* close non-used parts of the pipes */ |
344 | | tube_close_write(ctx->qq_pipe); |
345 | | tube_close_read(ctx->rr_pipe); |
346 | | #endif |
347 | 0 | if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, |
348 | 0 | libworker_handle_control_cmd, w)) { |
349 | 0 | log_err("libunbound bg worker init failed, no bglisten"); |
350 | 0 | return NULL; |
351 | 0 | } |
352 | 0 | if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { |
353 | 0 | log_err("libunbound bg worker init failed, no bgwrite"); |
354 | 0 | return NULL; |
355 | 0 | } |
356 | | |
357 | | /* do the work */ |
358 | 0 | comm_base_dispatch(w->base); |
359 | | |
360 | | /* cleanup */ |
361 | 0 | m = UB_LIBCMD_QUIT; |
362 | 0 | w->want_quit = 1; |
363 | 0 | tube_remove_bg_listen(w->ctx->qq_pipe); |
364 | 0 | tube_remove_bg_write(w->ctx->rr_pipe); |
365 | 0 | libworker_delete(w); |
366 | 0 | (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, |
367 | 0 | (uint32_t)sizeof(m), 0); |
368 | | #ifdef THREADS_DISABLED |
369 | | /* close pipes from forked process before exit */ |
370 | | tube_close_read(ctx->qq_pipe); |
371 | | tube_close_write(ctx->rr_pipe); |
372 | | #endif |
373 | 0 | return NULL; |
374 | 0 | } |
375 | | |
376 | | int libworker_bg(struct ub_ctx* ctx) |
377 | 0 | { |
378 | 0 | struct libworker* w; |
379 | | /* fork or threadcreate */ |
380 | 0 | lock_basic_lock(&ctx->cfglock); |
381 | 0 | if(ctx->dothread) { |
382 | 0 | lock_basic_unlock(&ctx->cfglock); |
383 | 0 | w = libworker_setup(ctx, 1, NULL); |
384 | 0 | if(!w) return UB_NOMEM; |
385 | 0 | w->is_bg_thread = 1; |
386 | 0 | ctx->thread_worker = w; |
387 | | #ifdef ENABLE_LOCK_CHECKS |
388 | | w->thread_num = 1; /* for nicer DEBUG checklocks */ |
389 | | #endif |
390 | 0 | ub_thread_create(&ctx->bg_tid, libworker_dobg, w); |
391 | 0 | } else { |
392 | 0 | lock_basic_unlock(&ctx->cfglock); |
393 | | #ifndef HAVE_FORK |
394 | | /* no fork on windows */ |
395 | | return UB_FORKFAIL; |
396 | | #else /* HAVE_FORK */ |
397 | 0 | switch((ctx->bg_pid=fork())) { |
398 | 0 | case 0: |
399 | 0 | w = libworker_setup(ctx, 1, NULL); |
400 | 0 | if(!w) fatal_exit("out of memory"); |
401 | | /* close non-used parts of the pipes */ |
402 | 0 | tube_close_write(ctx->qq_pipe); |
403 | 0 | tube_close_read(ctx->rr_pipe); |
404 | 0 | (void)libworker_dobg(w); |
405 | 0 | exit(0); |
406 | 0 | break; |
407 | 0 | case -1: |
408 | 0 | return UB_FORKFAIL; |
409 | 0 | default: |
410 | | /* close non-used parts, so that the worker |
411 | | * bgprocess gets 'pipe closed' when the |
412 | | * main process exits */ |
413 | 0 | tube_close_read(ctx->qq_pipe); |
414 | 0 | tube_close_write(ctx->rr_pipe); |
415 | 0 | break; |
416 | 0 | } |
417 | 0 | #endif /* HAVE_FORK */ |
418 | 0 | } |
419 | 0 | return UB_NOERROR; |
420 | 0 | } |
421 | | |
422 | | /** insert canonname */ |
423 | | static int |
424 | | fill_canon(struct ub_result* res, uint8_t* s) |
425 | 0 | { |
426 | 0 | char buf[LDNS_MAX_DOMAINLEN]; |
427 | 0 | dname_str(s, buf); |
428 | 0 | res->canonname = strdup(buf); |
429 | 0 | return res->canonname != 0; |
430 | 0 | } |
431 | | |
432 | | /** fill data into result */ |
433 | | static int |
434 | | fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, |
435 | | uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) |
436 | 0 | { |
437 | 0 | size_t i; |
438 | 0 | struct packed_rrset_data* data; |
439 | 0 | res->ttl = 0; |
440 | 0 | if(!answer) { |
441 | 0 | if(finalcname) { |
442 | 0 | if(!fill_canon(res, finalcname)) |
443 | 0 | return 0; /* out of memory */ |
444 | 0 | } |
445 | 0 | if(rep->rrset_count != 0) |
446 | 0 | res->ttl = (int)rep->ttl; |
447 | 0 | res->data = (char**)calloc(1, sizeof(char*)); |
448 | 0 | if(!res->data) |
449 | 0 | return 0; /* out of memory */ |
450 | 0 | res->len = (int*)calloc(1, sizeof(int)); |
451 | 0 | if(!res->len) { |
452 | 0 | free(res->data); |
453 | 0 | res->data = NULL; |
454 | 0 | return 0; /* out of memory */ |
455 | 0 | } |
456 | 0 | return 1; |
457 | 0 | } |
458 | 0 | data = (struct packed_rrset_data*)answer->entry.data; |
459 | 0 | if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { |
460 | 0 | if(!fill_canon(res, answer->rk.dname)) |
461 | 0 | return 0; /* out of memory */ |
462 | 0 | } else res->canonname = NULL; |
463 | 0 | res->data = (char**)calloc(data->count+1, sizeof(char*)); |
464 | 0 | if(!res->data) |
465 | 0 | return 0; /* out of memory */ |
466 | 0 | res->len = (int*)calloc(data->count+1, sizeof(int)); |
467 | 0 | if(!res->len) { |
468 | 0 | free(res->data); |
469 | 0 | res->data = NULL; |
470 | 0 | return 0; /* out of memory */ |
471 | 0 | } |
472 | 0 | for(i=0; i<data->count; i++) { |
473 | | /* remove rdlength from rdata */ |
474 | 0 | res->len[i] = (int)(data->rr_len[i] - 2); |
475 | 0 | res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); |
476 | 0 | if(!res->data[i]) { |
477 | 0 | size_t j; |
478 | 0 | for(j=0; j<i; j++) { |
479 | 0 | free(res->data[j]); |
480 | 0 | res->data[j] = NULL; |
481 | 0 | } |
482 | 0 | free(res->data); |
483 | 0 | res->data = NULL; |
484 | 0 | free(res->len); |
485 | 0 | res->len = NULL; |
486 | 0 | return 0; /* out of memory */ |
487 | 0 | } |
488 | 0 | } |
489 | | /* ttl for positive answers, from CNAME and answer RRs */ |
490 | 0 | if(data->count != 0) { |
491 | 0 | size_t j; |
492 | 0 | res->ttl = (int)data->ttl; |
493 | 0 | for(j=0; j<rep->an_numrrsets; j++) { |
494 | 0 | struct packed_rrset_data* d = |
495 | 0 | (struct packed_rrset_data*)rep->rrsets[j]-> |
496 | 0 | entry.data; |
497 | 0 | if((int)d->ttl < res->ttl) |
498 | 0 | res->ttl = (int)d->ttl; |
499 | 0 | } |
500 | 0 | } |
501 | | /* ttl for negative answers */ |
502 | 0 | if(data->count == 0 && rep->rrset_count != 0) |
503 | 0 | res->ttl = (int)rep->ttl; |
504 | 0 | res->data[data->count] = NULL; |
505 | 0 | res->len[data->count] = 0; |
506 | 0 | return 1; |
507 | 0 | } |
508 | | |
509 | | /** fill result from parsed message, on error fills servfail */ |
510 | | void |
511 | | libworker_enter_result(struct ub_result* res, sldns_buffer* buf, |
512 | | struct regional* temp, enum sec_status msg_security) |
513 | 0 | { |
514 | 0 | struct query_info rq; |
515 | 0 | struct reply_info* rep; |
516 | 0 | res->rcode = LDNS_RCODE_SERVFAIL; |
517 | 0 | rep = parse_reply_in_temp_region(buf, temp, &rq); |
518 | 0 | if(!rep) { |
519 | 0 | log_err("cannot parse buf"); |
520 | 0 | return; /* error parsing buf, or out of memory */ |
521 | 0 | } |
522 | 0 | if(!fill_res(res, reply_find_answer_rrset(&rq, rep), |
523 | 0 | reply_find_final_cname_target(&rq, rep), &rq, rep)) |
524 | 0 | return; /* out of memory */ |
525 | | /* rcode, havedata, nxdomain, secure, bogus */ |
526 | 0 | res->rcode = (int)FLAGS_GET_RCODE(rep->flags); |
527 | 0 | if(res->data && res->data[0]) |
528 | 0 | res->havedata = 1; |
529 | 0 | if(res->rcode == LDNS_RCODE_NXDOMAIN) |
530 | 0 | res->nxdomain = 1; |
531 | 0 | if(msg_security == sec_status_secure) |
532 | 0 | res->secure = 1; |
533 | 0 | if(msg_security == sec_status_bogus || |
534 | 0 | msg_security == sec_status_secure_sentinel_fail) |
535 | 0 | res->bogus = 1; |
536 | 0 | } |
537 | | |
538 | | /** fillup fg results */ |
539 | | static void |
540 | | libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, |
541 | | enum sec_status s, char* why_bogus, int was_ratelimited) |
542 | 0 | { |
543 | 0 | q->res->was_ratelimited = was_ratelimited; |
544 | 0 | if(why_bogus) |
545 | 0 | q->res->why_bogus = strdup(why_bogus); |
546 | 0 | if(rcode != 0) { |
547 | 0 | q->res->rcode = rcode; |
548 | 0 | q->msg_security = s; |
549 | 0 | return; |
550 | 0 | } |
551 | | |
552 | 0 | q->res->rcode = LDNS_RCODE_SERVFAIL; |
553 | 0 | q->msg_security = sec_status_unchecked; |
554 | 0 | q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); |
555 | 0 | q->msg_len = sldns_buffer_limit(buf); |
556 | 0 | if(!q->msg) { |
557 | 0 | return; /* the error is in the rcode */ |
558 | 0 | } |
559 | | |
560 | | /* canonname and results */ |
561 | 0 | q->msg_security = s; |
562 | 0 | libworker_enter_result(q->res, buf, q->w->env->scratch, s); |
563 | 0 | } |
564 | | |
565 | | void |
566 | | libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, |
567 | | char* why_bogus, int was_ratelimited) |
568 | 0 | { |
569 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
570 | | /* fg query is done; exit comm base */ |
571 | 0 | comm_base_exit(q->w->base); |
572 | |
|
573 | 0 | libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); |
574 | 0 | } |
575 | | |
576 | | /** setup qinfo and edns */ |
577 | | static int |
578 | | setup_qinfo_edns(struct libworker* w, struct ctx_query* q, |
579 | | struct query_info* qinfo, struct edns_data* edns) |
580 | 0 | { |
581 | 0 | qinfo->qtype = (uint16_t)q->res->qtype; |
582 | 0 | qinfo->qclass = (uint16_t)q->res->qclass; |
583 | 0 | qinfo->local_alias = NULL; |
584 | 0 | qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); |
585 | 0 | if(!qinfo->qname) { |
586 | 0 | return 0; |
587 | 0 | } |
588 | 0 | edns->edns_present = 1; |
589 | 0 | edns->ext_rcode = 0; |
590 | 0 | edns->edns_version = 0; |
591 | 0 | edns->bits = EDNS_DO; |
592 | 0 | edns->opt_list_in = NULL; |
593 | 0 | edns->opt_list_out = NULL; |
594 | 0 | edns->opt_list_inplace_cb_out = NULL; |
595 | 0 | edns->padding_block_size = 0; |
596 | 0 | edns->cookie_present = 0; |
597 | 0 | edns->cookie_valid = 0; |
598 | 0 | if(sldns_buffer_capacity(w->back->udp_buff) < 65535) |
599 | 0 | edns->udp_size = (uint16_t)sldns_buffer_capacity( |
600 | 0 | w->back->udp_buff); |
601 | 0 | else edns->udp_size = 65535; |
602 | 0 | return 1; |
603 | 0 | } |
604 | | |
605 | | int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) |
606 | 0 | { |
607 | 0 | struct libworker* w = libworker_setup(ctx, 0, NULL); |
608 | 0 | uint16_t qflags, qid; |
609 | 0 | struct query_info qinfo; |
610 | 0 | struct edns_data edns; |
611 | 0 | if(!w) |
612 | 0 | return UB_INITFAIL; |
613 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { |
614 | 0 | libworker_delete(w); |
615 | 0 | return UB_SYNTAX; |
616 | 0 | } |
617 | 0 | qid = 0; |
618 | 0 | qflags = BIT_RD; |
619 | 0 | q->w = w; |
620 | | /* see if there is a fixed answer */ |
621 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
622 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
623 | 0 | if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, |
624 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
625 | 0 | NULL, 0, NULL, 0, NULL)) { |
626 | 0 | regional_free_all(w->env->scratch); |
627 | 0 | libworker_fillup_fg(q, LDNS_RCODE_NOERROR, |
628 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
629 | 0 | libworker_delete(w); |
630 | 0 | free(qinfo.qname); |
631 | 0 | return UB_NOERROR; |
632 | 0 | } |
633 | 0 | if(ctx->env->auth_zones && auth_zones_downstream_answer( |
634 | 0 | ctx->env->auth_zones, w->env, &qinfo, &edns, NULL, |
635 | 0 | w->back->udp_buff, w->env->scratch)) { |
636 | 0 | regional_free_all(w->env->scratch); |
637 | 0 | libworker_fillup_fg(q, LDNS_RCODE_NOERROR, |
638 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
639 | 0 | libworker_delete(w); |
640 | 0 | free(qinfo.qname); |
641 | 0 | return UB_NOERROR; |
642 | 0 | } |
643 | | /* process new query */ |
644 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
645 | 0 | w->back->udp_buff, qid, libworker_fg_done_cb, q, 0)) { |
646 | 0 | free(qinfo.qname); |
647 | 0 | return UB_NOMEM; |
648 | 0 | } |
649 | 0 | free(qinfo.qname); |
650 | | |
651 | | /* wait for reply */ |
652 | 0 | comm_base_dispatch(w->base); |
653 | |
|
654 | 0 | libworker_delete(w); |
655 | 0 | return UB_NOERROR; |
656 | 0 | } |
657 | | |
658 | | void |
659 | | libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, |
660 | | enum sec_status s, char* why_bogus, int was_ratelimited) |
661 | 0 | { |
662 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
663 | 0 | ub_event_callback_type cb = q->cb_event; |
664 | 0 | void* cb_arg = q->cb_arg; |
665 | 0 | int cancelled = q->cancelled; |
666 | | |
667 | | /* delete it now */ |
668 | 0 | struct ub_ctx* ctx = q->w->ctx; |
669 | 0 | lock_basic_lock(&ctx->cfglock); |
670 | 0 | (void)rbtree_delete(&ctx->queries, q->node.key); |
671 | 0 | ctx->num_async--; |
672 | 0 | context_query_delete(q); |
673 | 0 | lock_basic_unlock(&ctx->cfglock); |
674 | |
|
675 | 0 | if(!cancelled) { |
676 | | /* call callback */ |
677 | 0 | int sec = 0; |
678 | 0 | if(s == sec_status_bogus) |
679 | 0 | sec = 1; |
680 | 0 | else if(s == sec_status_secure) |
681 | 0 | sec = 2; |
682 | 0 | (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), |
683 | 0 | (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); |
684 | 0 | } |
685 | 0 | } |
686 | | |
687 | | int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, |
688 | | int* async_id) |
689 | 0 | { |
690 | 0 | struct libworker* w = ctx->event_worker; |
691 | 0 | uint16_t qflags, qid; |
692 | 0 | struct query_info qinfo; |
693 | 0 | struct edns_data edns; |
694 | 0 | if(!w) |
695 | 0 | return UB_INITFAIL; |
696 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) |
697 | 0 | return UB_SYNTAX; |
698 | 0 | qid = 0; |
699 | 0 | qflags = BIT_RD; |
700 | 0 | q->w = w; |
701 | | /* see if there is a fixed answer */ |
702 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
703 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
704 | 0 | if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, |
705 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
706 | 0 | NULL, 0, NULL, 0, NULL)) { |
707 | 0 | regional_free_all(w->env->scratch); |
708 | 0 | free(qinfo.qname); |
709 | 0 | libworker_event_done_cb(q, LDNS_RCODE_NOERROR, |
710 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
711 | 0 | return UB_NOERROR; |
712 | 0 | } |
713 | 0 | if(ctx->env->auth_zones && auth_zones_downstream_answer( |
714 | 0 | ctx->env->auth_zones, w->env, &qinfo, &edns, NULL, |
715 | 0 | w->back->udp_buff, w->env->scratch)) { |
716 | 0 | regional_free_all(w->env->scratch); |
717 | 0 | free(qinfo.qname); |
718 | 0 | libworker_event_done_cb(q, LDNS_RCODE_NOERROR, |
719 | 0 | w->back->udp_buff, sec_status_insecure, NULL, 0); |
720 | 0 | return UB_NOERROR; |
721 | 0 | } |
722 | | /* process new query */ |
723 | 0 | if(async_id) |
724 | 0 | *async_id = q->querynum; |
725 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
726 | 0 | w->back->udp_buff, qid, libworker_event_done_cb, q, 0)) { |
727 | 0 | free(qinfo.qname); |
728 | 0 | return UB_NOMEM; |
729 | 0 | } |
730 | 0 | free(qinfo.qname); |
731 | 0 | return UB_NOERROR; |
732 | 0 | } |
733 | | |
734 | | /** add result to the bg worker result queue */ |
735 | | static void |
736 | | add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, |
737 | | int err, char* reason, int was_ratelimited) |
738 | 0 | { |
739 | 0 | uint8_t* msg = NULL; |
740 | 0 | uint32_t len = 0; |
741 | |
|
742 | 0 | if(w->want_quit) { |
743 | 0 | context_query_delete(q); |
744 | 0 | return; |
745 | 0 | } |
746 | | /* serialize and delete unneeded q */ |
747 | 0 | if(w->is_bg_thread) { |
748 | 0 | lock_basic_lock(&w->ctx->cfglock); |
749 | 0 | if(reason) |
750 | 0 | q->res->why_bogus = strdup(reason); |
751 | 0 | q->res->was_ratelimited = was_ratelimited; |
752 | 0 | if(pkt) { |
753 | 0 | q->msg_len = sldns_buffer_remaining(pkt); |
754 | 0 | q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); |
755 | 0 | if(!q->msg) { |
756 | 0 | msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); |
757 | 0 | } else { |
758 | 0 | msg = context_serialize_answer(q, err, NULL, &len); |
759 | 0 | } |
760 | 0 | } else { |
761 | 0 | msg = context_serialize_answer(q, err, NULL, &len); |
762 | 0 | } |
763 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
764 | 0 | } else { |
765 | 0 | if(reason) |
766 | 0 | q->res->why_bogus = strdup(reason); |
767 | 0 | q->res->was_ratelimited = was_ratelimited; |
768 | 0 | msg = context_serialize_answer(q, err, pkt, &len); |
769 | 0 | (void)rbtree_delete(&w->ctx->queries, q->node.key); |
770 | 0 | w->ctx->num_async--; |
771 | 0 | context_query_delete(q); |
772 | 0 | } |
773 | |
|
774 | 0 | if(!msg) { |
775 | 0 | log_err("out of memory for async answer"); |
776 | 0 | return; |
777 | 0 | } |
778 | 0 | if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { |
779 | 0 | log_err("out of memory for async answer"); |
780 | 0 | return; |
781 | 0 | } |
782 | 0 | } |
783 | | |
784 | | void |
785 | | libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, |
786 | | char* why_bogus, int was_ratelimited) |
787 | 0 | { |
788 | 0 | struct ctx_query* q = (struct ctx_query*)arg; |
789 | |
|
790 | 0 | if(q->cancelled || q->w->back->want_to_quit) { |
791 | 0 | if(q->w->is_bg_thread) { |
792 | | /* delete it now */ |
793 | 0 | struct ub_ctx* ctx = q->w->ctx; |
794 | 0 | lock_basic_lock(&ctx->cfglock); |
795 | 0 | (void)rbtree_delete(&ctx->queries, q->node.key); |
796 | 0 | ctx->num_async--; |
797 | 0 | context_query_delete(q); |
798 | 0 | lock_basic_unlock(&ctx->cfglock); |
799 | 0 | } |
800 | | /* cancelled, do not give answer */ |
801 | 0 | return; |
802 | 0 | } |
803 | 0 | q->msg_security = s; |
804 | 0 | if(!buf) { |
805 | 0 | buf = q->w->env->scratch_buffer; |
806 | 0 | } |
807 | 0 | if(rcode != 0) { |
808 | 0 | error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); |
809 | 0 | } |
810 | 0 | add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); |
811 | 0 | } |
812 | | |
813 | | |
814 | | /** handle new query command for bg worker */ |
815 | | static void |
816 | | handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) |
817 | 0 | { |
818 | 0 | uint16_t qflags, qid; |
819 | 0 | struct query_info qinfo; |
820 | 0 | struct edns_data edns; |
821 | 0 | struct ctx_query* q; |
822 | 0 | if(w->is_bg_thread) { |
823 | 0 | lock_basic_lock(&w->ctx->cfglock); |
824 | 0 | q = context_lookup_new_query(w->ctx, buf, len); |
825 | 0 | lock_basic_unlock(&w->ctx->cfglock); |
826 | 0 | } else { |
827 | 0 | q = context_deserialize_new_query(w->ctx, buf, len); |
828 | 0 | } |
829 | 0 | free(buf); |
830 | 0 | if(!q) { |
831 | 0 | log_err("failed to deserialize newq"); |
832 | 0 | return; |
833 | 0 | } |
834 | 0 | if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { |
835 | 0 | add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); |
836 | 0 | return; |
837 | 0 | } |
838 | 0 | qid = 0; |
839 | 0 | qflags = BIT_RD; |
840 | | /* see if there is a fixed answer */ |
841 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); |
842 | 0 | sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); |
843 | 0 | if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, |
844 | 0 | w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, |
845 | 0 | NULL, 0, NULL, 0, NULL)) { |
846 | 0 | regional_free_all(w->env->scratch); |
847 | 0 | q->msg_security = sec_status_insecure; |
848 | 0 | add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); |
849 | 0 | free(qinfo.qname); |
850 | 0 | return; |
851 | 0 | } |
852 | 0 | if(w->ctx->env->auth_zones && auth_zones_downstream_answer( |
853 | 0 | w->ctx->env->auth_zones, w->env, &qinfo, &edns, NULL, |
854 | 0 | w->back->udp_buff, w->env->scratch)) { |
855 | 0 | regional_free_all(w->env->scratch); |
856 | 0 | q->msg_security = sec_status_insecure; |
857 | 0 | add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); |
858 | 0 | free(qinfo.qname); |
859 | 0 | return; |
860 | 0 | } |
861 | 0 | q->w = w; |
862 | | /* process new query */ |
863 | 0 | if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, |
864 | 0 | w->back->udp_buff, qid, libworker_bg_done_cb, q, 0)) { |
865 | 0 | add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); |
866 | 0 | } |
867 | 0 | free(qinfo.qname); |
868 | 0 | } |
869 | | |
870 | | void libworker_alloc_cleanup(void* arg) |
871 | 0 | { |
872 | 0 | struct libworker* w = (struct libworker*)arg; |
873 | 0 | slabhash_clear(&w->env->rrset_cache->table); |
874 | 0 | slabhash_clear(w->env->msg_cache); |
875 | 0 | } |
876 | | |
877 | | struct outbound_entry* libworker_send_query(struct query_info* qinfo, |
878 | | uint16_t flags, int dnssec, int want_dnssec, int nocaps, |
879 | | int check_ratelimit, |
880 | | struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, |
881 | | size_t zonelen, int tcp_upstream, int ssl_upstream, char* tls_auth_name, |
882 | | struct module_qstate* q, int* was_ratelimited) |
883 | 0 | { |
884 | 0 | struct libworker* w = (struct libworker*)q->env->worker; |
885 | 0 | struct outbound_entry* e = (struct outbound_entry*)regional_alloc( |
886 | 0 | q->region, sizeof(*e)); |
887 | 0 | if(!e) |
888 | 0 | return NULL; |
889 | 0 | e->qstate = q; |
890 | 0 | e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, |
891 | 0 | want_dnssec, nocaps, check_ratelimit, tcp_upstream, ssl_upstream, |
892 | 0 | tls_auth_name, addr, addrlen, zone, zonelen, q, |
893 | 0 | libworker_handle_service_reply, e, w->back->udp_buff, q->env, |
894 | 0 | was_ratelimited); |
895 | 0 | if(!e->qsent) { |
896 | 0 | return NULL; |
897 | 0 | } |
898 | 0 | return e; |
899 | 0 | } |
900 | | |
901 | | int |
902 | | libworker_handle_service_reply(struct comm_point* c, void* arg, int error, |
903 | | struct comm_reply* reply_info) |
904 | 0 | { |
905 | 0 | struct outbound_entry* e = (struct outbound_entry*)arg; |
906 | 0 | struct libworker* lw = (struct libworker*)e->qstate->env->worker; |
907 | |
|
908 | 0 | if(error != 0) { |
909 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, error); |
910 | 0 | return 0; |
911 | 0 | } |
912 | | /* sanity check. */ |
913 | 0 | if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) |
914 | 0 | || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != |
915 | 0 | LDNS_PACKET_QUERY |
916 | 0 | || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { |
917 | | /* error becomes timeout for the module as if this reply |
918 | | * never arrived. */ |
919 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, |
920 | 0 | NETEVENT_TIMEOUT); |
921 | 0 | return 0; |
922 | 0 | } |
923 | 0 | mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); |
924 | 0 | return 0; |
925 | 0 | } |
926 | | |
927 | | /* --- fake callbacks for fptr_wlist to work --- */ |
928 | | void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), |
929 | | uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), |
930 | | int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) |
931 | 0 | { |
932 | 0 | log_assert(0); |
933 | 0 | } |
934 | | |
935 | | int worker_handle_request(struct comm_point* ATTR_UNUSED(c), |
936 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
937 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
938 | 0 | { |
939 | 0 | log_assert(0); |
940 | 0 | return 0; |
941 | 0 | } |
942 | | |
943 | | int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), |
944 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
945 | | struct comm_reply* ATTR_UNUSED(reply_info)) |
946 | 0 | { |
947 | 0 | log_assert(0); |
948 | 0 | return 0; |
949 | 0 | } |
950 | | |
951 | | int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), |
952 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
953 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
954 | 0 | { |
955 | 0 | log_assert(0); |
956 | 0 | return 0; |
957 | 0 | } |
958 | | |
959 | | int remote_control_callback(struct comm_point* ATTR_UNUSED(c), |
960 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
961 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
962 | 0 | { |
963 | 0 | log_assert(0); |
964 | 0 | return 0; |
965 | 0 | } |
966 | | |
967 | | void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) |
968 | 0 | { |
969 | 0 | log_assert(0); |
970 | 0 | } |
971 | | |
972 | | struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), |
973 | | uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), |
974 | | int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), |
975 | | int ATTR_UNUSED(check_ratelimit), |
976 | | struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), |
977 | | uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), int ATTR_UNUSED(tcp_upstream), |
978 | | int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), |
979 | | struct module_qstate* ATTR_UNUSED(q), int* ATTR_UNUSED(was_ratelimited)) |
980 | 0 | { |
981 | 0 | log_assert(0); |
982 | 0 | return 0; |
983 | 0 | } |
984 | | |
985 | | void |
986 | | worker_alloc_cleanup(void* ATTR_UNUSED(arg)) |
987 | 0 | { |
988 | 0 | log_assert(0); |
989 | 0 | } |
990 | | |
991 | | void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) |
992 | 0 | { |
993 | 0 | log_assert(0); |
994 | 0 | } |
995 | | |
996 | | void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) |
997 | 0 | { |
998 | 0 | log_assert(0); |
999 | 0 | } |
1000 | | |
1001 | | void worker_start_accept(void* ATTR_UNUSED(arg)) |
1002 | 0 | { |
1003 | 0 | log_assert(0); |
1004 | 0 | } |
1005 | | |
1006 | | void worker_stop_accept(void* ATTR_UNUSED(arg)) |
1007 | 0 | { |
1008 | 0 | log_assert(0); |
1009 | 0 | } |
1010 | | |
1011 | | int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) |
1012 | 0 | { |
1013 | 0 | log_assert(0); |
1014 | 0 | return 0; |
1015 | 0 | } |
1016 | | |
1017 | | int |
1018 | | codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) |
1019 | 0 | { |
1020 | 0 | log_assert(0); |
1021 | 0 | return 0; |
1022 | 0 | } |
1023 | | |
1024 | | int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) |
1025 | 0 | { |
1026 | 0 | log_assert(0); |
1027 | 0 | return 0; |
1028 | 0 | } |
1029 | | |
1030 | | void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) |
1031 | 0 | { |
1032 | 0 | log_assert(0); |
1033 | 0 | } |
1034 | | |
1035 | | #ifdef UB_ON_WINDOWS |
1036 | | void |
1037 | | worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* |
1038 | | ATTR_UNUSED(arg)) { |
1039 | | log_assert(0); |
1040 | | } |
1041 | | |
1042 | | void |
1043 | | wsvc_cron_cb(void* ATTR_UNUSED(arg)) |
1044 | | { |
1045 | | log_assert(0); |
1046 | | } |
1047 | | #endif /* UB_ON_WINDOWS */ |
1048 | | |
1049 | | #ifdef USE_DNSTAP |
1050 | | void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1051 | | void* ATTR_UNUSED(arg)) |
1052 | | { |
1053 | | log_assert(0); |
1054 | | } |
1055 | | #endif |
1056 | | |
1057 | | #ifdef USE_DNSTAP |
1058 | | void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1059 | | void* ATTR_UNUSED(arg)) |
1060 | | { |
1061 | | log_assert(0); |
1062 | | } |
1063 | | #endif |
1064 | | |
1065 | | void fast_reload_service_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1066 | | void* ATTR_UNUSED(arg)) |
1067 | 0 | { |
1068 | 0 | log_assert(0); |
1069 | 0 | } |
1070 | | |
1071 | | int fast_reload_client_callback(struct comm_point* ATTR_UNUSED(c), |
1072 | | void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), |
1073 | | struct comm_reply* ATTR_UNUSED(repinfo)) |
1074 | 0 | { |
1075 | 0 | log_assert(0); |
1076 | 0 | return 0; |
1077 | 0 | } |
1078 | | |
1079 | | #ifdef HAVE_NGTCP2 |
1080 | | void doq_client_event_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1081 | | void* ATTR_UNUSED(arg)) |
1082 | | { |
1083 | | log_assert(0); |
1084 | | } |
1085 | | #endif |
1086 | | |
1087 | | #ifdef HAVE_NGTCP2 |
1088 | | void doq_client_timer_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), |
1089 | | void* ATTR_UNUSED(arg)) |
1090 | | { |
1091 | | log_assert(0); |
1092 | | } |
1093 | | #endif |