Coverage Report

Created: 2023-03-26 06:07

/src/unbound/services/mesh.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * services/mesh.c - deal with mesh of query states and handle events for that.
3
 *
4
 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5
 *
6
 * This software is open source.
7
 * 
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 
12
 * Redistributions of source code must retain the above copyright notice,
13
 * this list of conditions and the following disclaimer.
14
 * 
15
 * Redistributions in binary form must reproduce the above copyright notice,
16
 * this list of conditions and the following disclaimer in the documentation
17
 * and/or other materials provided with the distribution.
18
 * 
19
 * Neither the name of the NLNET LABS nor the names of its contributors may
20
 * be used to endorse or promote products derived from this software without
21
 * specific prior written permission.
22
 * 
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29
 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
 */
35
36
/**
37
 * \file
38
 *
39
 * This file contains functions to assist in dealing with a mesh of
40
 * query states. This mesh is supposed to be thread-specific.
41
 * It consists of query states (per qname, qtype, qclass) and connections
42
 * between query states and the super and subquery states, and replies to
43
 * send back to clients.
44
 */
45
#include "config.h"
46
#include "services/mesh.h"
47
#include "services/outbound_list.h"
48
#include "services/cache/dns.h"
49
#include "services/cache/rrset.h"
50
#include "util/log.h"
51
#include "util/net_help.h"
52
#include "util/module.h"
53
#include "util/regional.h"
54
#include "util/data/msgencode.h"
55
#include "util/timehist.h"
56
#include "util/fptr_wlist.h"
57
#include "util/alloc.h"
58
#include "util/config_file.h"
59
#include "util/edns.h"
60
#include "sldns/sbuffer.h"
61
#include "sldns/wire2str.h"
62
#include "services/localzone.h"
63
#include "util/data/dname.h"
64
#include "respip/respip.h"
65
#include "services/listen_dnsport.h"
66
67
#ifdef CLIENT_SUBNET
68
#include "edns-subnet/subnetmod.h"
69
#include "edns-subnet/edns-subnet.h"
70
#endif
71
72
/** subtract timers and the values do not overflow or become negative */
73
static void
74
timeval_subtract(struct timeval* d, const struct timeval* end, const struct timeval* start)
75
0
{
76
0
#ifndef S_SPLINT_S
77
0
  time_t end_usec = end->tv_usec;
78
0
  d->tv_sec = end->tv_sec - start->tv_sec;
79
0
  if(end_usec < start->tv_usec) {
80
0
    end_usec += 1000000;
81
0
    d->tv_sec--;
82
0
  }
83
0
  d->tv_usec = end_usec - start->tv_usec;
84
0
#endif
85
0
}
86
87
/** add timers and the values do not overflow or become negative */
88
static void
89
timeval_add(struct timeval* d, const struct timeval* add)
90
0
{
91
0
#ifndef S_SPLINT_S
92
0
  d->tv_sec += add->tv_sec;
93
0
  d->tv_usec += add->tv_usec;
94
0
  if(d->tv_usec >= 1000000 ) {
95
0
    d->tv_usec -= 1000000;
96
0
    d->tv_sec++;
97
0
  }
98
0
#endif
99
0
}
100
101
/** divide sum of timers to get average */
102
static void
103
timeval_divide(struct timeval* avg, const struct timeval* sum, size_t d)
104
0
{
105
0
#ifndef S_SPLINT_S
106
0
  size_t leftover;
107
0
  if(d <= 0) {
108
0
    avg->tv_sec = 0;
109
0
    avg->tv_usec = 0;
110
0
    return;
111
0
  }
112
0
  avg->tv_sec = sum->tv_sec / d;
113
0
  avg->tv_usec = sum->tv_usec / d;
114
  /* handle fraction from seconds divide */
115
0
  leftover = sum->tv_sec - avg->tv_sec*d;
116
0
  if(leftover <= 0)
117
0
    leftover = 0;
118
0
  avg->tv_usec += (((long long)leftover)*((long long)1000000))/d;
119
0
  if(avg->tv_sec < 0)
120
0
    avg->tv_sec = 0;
121
0
  if(avg->tv_usec < 0)
122
0
    avg->tv_usec = 0;
123
0
#endif
124
0
}
125
126
/** histogram compare of time values */
127
static int
128
timeval_smaller(const struct timeval* x, const struct timeval* y)
129
0
{
130
0
#ifndef S_SPLINT_S
131
0
  if(x->tv_sec < y->tv_sec)
132
0
    return 1;
133
0
  else if(x->tv_sec == y->tv_sec) {
134
0
    if(x->tv_usec <= y->tv_usec)
135
0
      return 1;
136
0
    else  return 0;
137
0
  }
138
0
  else  return 0;
139
0
#endif
140
0
}
141
142
/**
143
 * Compare two response-ip client info entries for the purpose of mesh state
144
 * compare.  It returns 0 if ci_a and ci_b are considered equal; otherwise
145
 * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but
146
 * in practice it should be only used to mean they are different).
147
 * We cannot share the mesh state for two queries if different response-ip
148
 * actions can apply in the end, even if those queries are otherwise identical.
149
 * For this purpose we compare tag lists and tag action lists; they should be
150
 * identical to share the same state.
151
 * For tag data, we don't look into the data content, as it can be
152
 * expensive; unless tag data are not defined for both or they point to the
153
 * exact same data in memory (i.e., they come from the same ACL entry), we
154
 * consider these data different.
155
 * Likewise, if the client info is associated with views, we don't look into
156
 * the views.  They are considered different unless they are exactly the same
157
 * even if the views only differ in the names.
158
 */
159
static int
160
client_info_compare(const struct respip_client_info* ci_a,
161
  const struct respip_client_info* ci_b)
162
0
{
163
0
  int cmp;
164
165
0
  if(!ci_a && !ci_b)
166
0
    return 0;
167
0
  if(ci_a && !ci_b)
168
0
    return -1;
169
0
  if(!ci_a && ci_b)
170
0
    return 1;
171
0
  if(ci_a->taglen != ci_b->taglen)
172
0
    return (ci_a->taglen < ci_b->taglen) ? -1 : 1;
173
0
  if(ci_a->taglist && !ci_b->taglist)
174
0
    return -1;
175
0
  if(!ci_a->taglist && ci_b->taglist)
176
0
    return 1;
177
0
  if(ci_a->taglist && ci_b->taglist) {
178
0
    cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen);
179
0
    if(cmp != 0)
180
0
      return cmp;
181
0
  }
182
0
  if(ci_a->tag_actions_size != ci_b->tag_actions_size)
183
0
    return (ci_a->tag_actions_size < ci_b->tag_actions_size) ?
184
0
      -1 : 1;
185
0
  if(ci_a->tag_actions && !ci_b->tag_actions)
186
0
    return -1;
187
0
  if(!ci_a->tag_actions && ci_b->tag_actions)
188
0
    return 1;
189
0
  if(ci_a->tag_actions && ci_b->tag_actions) {
190
0
    cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions,
191
0
      ci_a->tag_actions_size);
192
0
    if(cmp != 0)
193
0
      return cmp;
194
0
  }
195
0
  if(ci_a->tag_datas != ci_b->tag_datas)
196
0
    return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1;
197
0
  if(ci_a->view != ci_b->view)
198
0
    return ci_a->view < ci_b->view ? -1 : 1;
199
  /* For the unbound daemon these should be non-NULL and identical,
200
   * but we check that just in case. */
201
0
  if(ci_a->respip_set != ci_b->respip_set)
202
0
    return ci_a->respip_set < ci_b->respip_set ? -1 : 1;
203
0
  return 0;
204
0
}
205
206
int
207
mesh_state_compare(const void* ap, const void* bp)
208
0
{
209
0
  struct mesh_state* a = (struct mesh_state*)ap;
210
0
  struct mesh_state* b = (struct mesh_state*)bp;
211
0
  int cmp;
212
213
0
  if(a->unique < b->unique)
214
0
    return -1;
215
0
  if(a->unique > b->unique)
216
0
    return 1;
217
218
0
  if(a->s.is_priming && !b->s.is_priming)
219
0
    return -1;
220
0
  if(!a->s.is_priming && b->s.is_priming)
221
0
    return 1;
222
223
0
  if(a->s.is_valrec && !b->s.is_valrec)
224
0
    return -1;
225
0
  if(!a->s.is_valrec && b->s.is_valrec)
226
0
    return 1;
227
228
0
  if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD))
229
0
    return -1;
230
0
  if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD))
231
0
    return 1;
232
233
0
  if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD))
234
0
    return -1;
235
0
  if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD))
236
0
    return 1;
237
238
0
  cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo);
239
0
  if(cmp != 0)
240
0
    return cmp;
241
0
  return client_info_compare(a->s.client_info, b->s.client_info);
242
0
}
243
244
int
245
mesh_state_ref_compare(const void* ap, const void* bp)
246
0
{
247
0
  struct mesh_state_ref* a = (struct mesh_state_ref*)ap;
248
0
  struct mesh_state_ref* b = (struct mesh_state_ref*)bp;
249
0
  return mesh_state_compare(a->s, b->s);
250
0
}
251
252
struct mesh_area* 
253
mesh_create(struct module_stack* stack, struct module_env* env)
254
0
{
255
0
  struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area));
256
0
  if(!mesh) {
257
0
    log_err("mesh area alloc: out of memory");
258
0
    return NULL;
259
0
  }
260
0
  mesh->histogram = timehist_setup();
261
0
  mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size);
262
0
  if(!mesh->histogram || !mesh->qbuf_bak) {
263
0
    free(mesh);
264
0
    log_err("mesh area alloc: out of memory");
265
0
    return NULL;
266
0
  }
267
0
  mesh->mods = *stack;
268
0
  mesh->env = env;
269
0
  rbtree_init(&mesh->run, &mesh_state_compare);
270
0
  rbtree_init(&mesh->all, &mesh_state_compare);
271
0
  mesh->num_reply_addrs = 0;
272
0
  mesh->num_reply_states = 0;
273
0
  mesh->num_detached_states = 0;
274
0
  mesh->num_forever_states = 0;
275
0
  mesh->stats_jostled = 0;
276
0
  mesh->stats_dropped = 0;
277
0
  mesh->ans_expired = 0;
278
0
  mesh->max_reply_states = env->cfg->num_queries_per_thread;
279
0
  mesh->max_forever_states = (mesh->max_reply_states+1)/2;
280
0
#ifndef S_SPLINT_S
281
0
  mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000);
282
0
  mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000)
283
0
    *1000);
284
0
#endif
285
0
  return mesh;
286
0
}
287
288
/** help mesh delete delete mesh states */
289
static void
290
mesh_delete_helper(rbnode_type* n)
291
0
{
292
0
  struct mesh_state* mstate = (struct mesh_state*)n->key;
293
  /* perform a full delete, not only 'cleanup' routine,
294
   * because other callbacks expect a clean state in the mesh.
295
   * For 're-entrant' calls */
296
0
  mesh_state_delete(&mstate->s);
297
  /* but because these delete the items from the tree, postorder
298
   * traversal and rbtree rebalancing do not work together */
299
0
}
300
301
void 
302
mesh_delete(struct mesh_area* mesh)
303
0
{
304
0
  if(!mesh)
305
0
    return;
306
  /* free all query states */
307
0
  while(mesh->all.count)
308
0
    mesh_delete_helper(mesh->all.root);
309
0
  timehist_delete(mesh->histogram);
310
0
  sldns_buffer_free(mesh->qbuf_bak);
311
0
  free(mesh);
312
0
}
313
314
void
315
mesh_delete_all(struct mesh_area* mesh)
316
0
{
317
  /* free all query states */
318
0
  while(mesh->all.count)
319
0
    mesh_delete_helper(mesh->all.root);
320
0
  mesh->stats_dropped += mesh->num_reply_addrs;
321
  /* clear mesh area references */
322
0
  rbtree_init(&mesh->run, &mesh_state_compare);
323
0
  rbtree_init(&mesh->all, &mesh_state_compare);
324
0
  mesh->num_reply_addrs = 0;
325
0
  mesh->num_reply_states = 0;
326
0
  mesh->num_detached_states = 0;
327
0
  mesh->num_forever_states = 0;
328
0
  mesh->forever_first = NULL;
329
0
  mesh->forever_last = NULL;
330
0
  mesh->jostle_first = NULL;
331
0
  mesh->jostle_last = NULL;
332
0
}
333
334
int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf)
335
0
{
336
0
  struct mesh_state* m = mesh->jostle_first;
337
  /* free space is available */
338
0
  if(mesh->num_reply_states < mesh->max_reply_states)
339
0
    return 1;
340
  /* try to kick out a jostle-list item */
341
0
  if(m && m->reply_list && m->list_select == mesh_jostle_list) {
342
    /* how old is it? */
343
0
    struct timeval age;
344
0
    timeval_subtract(&age, mesh->env->now_tv, 
345
0
      &m->reply_list->start_time);
346
0
    if(timeval_smaller(&mesh->jostle_max, &age)) {
347
      /* its a goner */
348
0
      log_nametypeclass(VERB_ALGO, "query jostled out to "
349
0
        "make space for a new one",
350
0
        m->s.qinfo.qname, m->s.qinfo.qtype,
351
0
        m->s.qinfo.qclass);
352
      /* backup the query */
353
0
      if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf);
354
      /* notify supers */
355
0
      if(m->super_set.count > 0) {
356
0
        verbose(VERB_ALGO, "notify supers of failure");
357
0
        m->s.return_msg = NULL;
358
0
        m->s.return_rcode = LDNS_RCODE_SERVFAIL;
359
0
        mesh_walk_supers(mesh, m);
360
0
      }
361
0
      mesh->stats_jostled ++;
362
0
      mesh_state_delete(&m->s);
363
      /* restore the query - note that the qinfo ptr to
364
       * the querybuffer is then correct again. */
365
0
      if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak);
366
0
      return 1;
367
0
    }
368
0
  }
369
  /* no space for new item */
370
0
  return 0;
371
0
}
372
373
struct dns_msg*
374
mesh_serve_expired_lookup(struct module_qstate* qstate,
375
  struct query_info* lookup_qinfo)
376
0
{
377
0
  hashvalue_type h;
378
0
  struct lruhash_entry* e;
379
0
  struct dns_msg* msg;
380
0
  struct reply_info* data;
381
0
  struct msgreply_entry* key;
382
0
  time_t timenow = *qstate->env->now;
383
0
  int must_validate = (!(qstate->query_flags&BIT_CD)
384
0
    || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate;
385
  /* Lookup cache */
386
0
  h = query_info_hash(lookup_qinfo, qstate->query_flags);
387
0
  e = slabhash_lookup(qstate->env->msg_cache, h, lookup_qinfo, 0);
388
0
  if(!e) return NULL;
389
390
0
  key = (struct msgreply_entry*)e->key;
391
0
  data = (struct reply_info*)e->data;
392
0
  msg = tomsg(qstate->env, &key->key, data, qstate->region, timenow,
393
0
    qstate->env->cfg->serve_expired, qstate->env->scratch);
394
0
  if(!msg)
395
0
    goto bail_out;
396
397
  /* Check CNAME chain (if any)
398
   * This is part of tomsg above; no need to check now. */
399
400
  /* Check security status of the cached answer.
401
   * tomsg above has a subset of these checks, so we are leaving
402
   * these as is.
403
   * In case of bogus or revalidation we don't care to reply here. */
404
0
  if(must_validate && (msg->rep->security == sec_status_bogus ||
405
0
    msg->rep->security == sec_status_secure_sentinel_fail)) {
406
0
    verbose(VERB_ALGO, "Serve expired: bogus answer found in cache");
407
0
    goto bail_out;
408
0
  } else if(msg->rep->security == sec_status_unchecked && must_validate) {
409
0
    verbose(VERB_ALGO, "Serve expired: unchecked entry needs "
410
0
      "validation");
411
0
    goto bail_out; /* need to validate cache entry first */
412
0
  } else if(msg->rep->security == sec_status_secure &&
413
0
    !reply_all_rrsets_secure(msg->rep) && must_validate) {
414
0
      verbose(VERB_ALGO, "Serve expired: secure entry"
415
0
        " changed status");
416
0
      goto bail_out; /* rrset changed, re-verify */
417
0
  }
418
419
0
  lock_rw_unlock(&e->lock);
420
0
  return msg;
421
422
0
bail_out:
423
0
  lock_rw_unlock(&e->lock);
424
0
  return NULL;
425
0
}
426
427
428
/** Init the serve expired data structure */
429
static int
430
mesh_serve_expired_init(struct mesh_state* mstate, int timeout)
431
0
{
432
0
  struct timeval t;
433
434
  /* Create serve_expired_data if not there yet */
435
0
  if(!mstate->s.serve_expired_data) {
436
0
    mstate->s.serve_expired_data = (struct serve_expired_data*)
437
0
      regional_alloc_zero(
438
0
        mstate->s.region, sizeof(struct serve_expired_data));
439
0
    if(!mstate->s.serve_expired_data)
440
0
      return 0;
441
0
  }
442
443
  /* Don't overwrite the function if already set */
444
0
  mstate->s.serve_expired_data->get_cached_answer =
445
0
    mstate->s.serve_expired_data->get_cached_answer?
446
0
    mstate->s.serve_expired_data->get_cached_answer:
447
0
    &mesh_serve_expired_lookup;
448
449
  /* In case this timer already popped, start it again */
450
0
  if(!mstate->s.serve_expired_data->timer) {
451
0
    mstate->s.serve_expired_data->timer = comm_timer_create(
452
0
      mstate->s.env->worker_base, mesh_serve_expired_callback, mstate);
453
0
    if(!mstate->s.serve_expired_data->timer)
454
0
      return 0;
455
0
#ifndef S_SPLINT_S
456
0
    t.tv_sec = timeout/1000;
457
0
    t.tv_usec = (timeout%1000)*1000;
458
0
#endif
459
0
    comm_timer_set(mstate->s.serve_expired_data->timer, &t);
460
0
  }
461
0
  return 1;
462
0
}
463
464
void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
465
  struct respip_client_info* cinfo, uint16_t qflags,
466
  struct edns_data* edns, struct comm_reply* rep, uint16_t qid,
467
  int rpz_passthru)
468
0
{
469
0
  struct mesh_state* s = NULL;
470
0
  int unique = unique_mesh_state(edns->opt_list_in, mesh->env);
471
0
  int was_detached = 0;
472
0
  int was_noreply = 0;
473
0
  int added = 0;
474
0
  int timeout = mesh->env->cfg->serve_expired?
475
0
    mesh->env->cfg->serve_expired_client_timeout:0;
476
0
  struct sldns_buffer* r_buffer = rep->c->buffer;
477
0
  if(rep->c->tcp_req_info) {
478
0
    r_buffer = rep->c->tcp_req_info->spool_buffer;
479
0
  }
480
0
  if(!unique)
481
0
    s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
482
  /* does this create a new reply state? */
483
0
  if(!s || s->list_select == mesh_no_list) {
484
0
    if(!mesh_make_new_space(mesh, rep->c->buffer)) {
485
0
      verbose(VERB_ALGO, "Too many queries. dropping "
486
0
        "incoming query.");
487
0
      comm_point_drop_reply(rep);
488
0
      mesh->stats_dropped++;
489
0
      return;
490
0
    }
491
    /* for this new reply state, the reply address is free,
492
     * so the limit of reply addresses does not stop reply states*/
493
0
  } else {
494
    /* protect our memory usage from storing reply addresses */
495
0
    if(mesh->num_reply_addrs > mesh->max_reply_states*16) {
496
0
      verbose(VERB_ALGO, "Too many requests queued. "
497
0
        "dropping incoming query.");
498
0
      comm_point_drop_reply(rep);
499
0
      mesh->stats_dropped++;
500
0
      return;
501
0
    }
502
0
  }
503
  /* see if it already exists, if not, create one */
504
0
  if(!s) {
505
#ifdef UNBOUND_DEBUG
506
    struct rbnode_type* n;
507
#endif
508
0
    s = mesh_state_create(mesh->env, qinfo, cinfo,
509
0
      qflags&(BIT_RD|BIT_CD), 0, 0);
510
0
    if(!s) {
511
0
      log_err("mesh_state_create: out of memory; SERVFAIL");
512
0
      if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL,
513
0
        LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
514
0
          edns->opt_list_inplace_cb_out = NULL;
515
0
      error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
516
0
        qinfo, qid, qflags, edns);
517
0
      comm_point_send_reply(rep);
518
0
      return;
519
0
    }
520
0
    if(unique)
521
0
      mesh_state_make_unique(s);
522
0
    s->s.rpz_passthru = rpz_passthru;
523
    /* copy the edns options we got from the front */
524
0
    if(edns->opt_list_in) {
525
0
      s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in,
526
0
        s->s.region);
527
0
      if(!s->s.edns_opts_front_in) {
528
0
        log_err("mesh_state_create: out of memory; SERVFAIL");
529
0
        if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL,
530
0
          NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
531
0
            edns->opt_list_inplace_cb_out = NULL;
532
0
        error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
533
0
          qinfo, qid, qflags, edns);
534
0
        comm_point_send_reply(rep);
535
0
        return;
536
0
      }
537
0
    }
538
539
#ifdef UNBOUND_DEBUG
540
    n =
541
#else
542
0
    (void)
543
0
#endif
544
0
    rbtree_insert(&mesh->all, &s->node);
545
0
    log_assert(n != NULL);
546
    /* set detached (it is now) */
547
0
    mesh->num_detached_states++;
548
0
    added = 1;
549
0
  }
550
0
  if(!s->reply_list && !s->cb_list) {
551
0
    was_noreply = 1;
552
0
    if(s->super_set.count == 0) {
553
0
      was_detached = 1;
554
0
    }
555
0
  }
556
  /* add reply to s */
557
0
  if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) {
558
0
    log_err("mesh_new_client: out of memory; SERVFAIL");
559
0
    goto servfail_mem;
560
0
  }
561
0
  if(rep->c->tcp_req_info) {
562
0
    if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) {
563
0
      log_err("mesh_new_client: out of memory add tcpreqinfo");
564
0
      goto servfail_mem;
565
0
    }
566
0
  }
567
0
  if(rep->c->use_h2) {
568
0
    http2_stream_add_meshstate(rep->c->h2_stream, mesh, s);
569
0
  }
570
  /* add serve expired timer if required and not already there */
571
0
  if(timeout && !mesh_serve_expired_init(s, timeout)) {
572
0
    log_err("mesh_new_client: out of memory initializing serve expired");
573
0
    goto servfail_mem;
574
0
  }
575
  /* update statistics */
576
0
  if(was_detached) {
577
0
    log_assert(mesh->num_detached_states > 0);
578
0
    mesh->num_detached_states--;
579
0
  }
580
0
  if(was_noreply) {
581
0
    mesh->num_reply_states ++;
582
0
  }
583
0
  mesh->num_reply_addrs++;
584
0
  if(s->list_select == mesh_no_list) {
585
    /* move to either the forever or the jostle_list */
586
0
    if(mesh->num_forever_states < mesh->max_forever_states) {
587
0
      mesh->num_forever_states ++;
588
0
      mesh_list_insert(s, &mesh->forever_first, 
589
0
        &mesh->forever_last);
590
0
      s->list_select = mesh_forever_list;
591
0
    } else {
592
0
      mesh_list_insert(s, &mesh->jostle_first, 
593
0
        &mesh->jostle_last);
594
0
      s->list_select = mesh_jostle_list;
595
0
    }
596
0
  }
597
0
  if(added)
598
0
    mesh_run(mesh, s, module_event_new, NULL);
599
0
  return;
600
601
0
servfail_mem:
602
0
  if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s,
603
0
    NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv))
604
0
      edns->opt_list_inplace_cb_out = NULL;
605
0
  error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
606
0
    qinfo, qid, qflags, edns);
607
0
  comm_point_send_reply(rep);
608
0
  if(added)
609
0
    mesh_state_delete(&s->s);
610
0
  return;
611
0
}
612
613
int 
614
mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
615
  uint16_t qflags, struct edns_data* edns, sldns_buffer* buf, 
616
  uint16_t qid, mesh_cb_func_type cb, void* cb_arg, int rpz_passthru)
617
0
{
618
0
  struct mesh_state* s = NULL;
619
0
  int unique = unique_mesh_state(edns->opt_list_in, mesh->env);
620
0
  int timeout = mesh->env->cfg->serve_expired?
621
0
    mesh->env->cfg->serve_expired_client_timeout:0;
622
0
  int was_detached = 0;
623
0
  int was_noreply = 0;
624
0
  int added = 0;
625
0
  if(!unique)
626
0
    s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
627
628
  /* there are no limits on the number of callbacks */
629
630
  /* see if it already exists, if not, create one */
631
0
  if(!s) {
632
#ifdef UNBOUND_DEBUG
633
    struct rbnode_type* n;
634
#endif
635
0
    s = mesh_state_create(mesh->env, qinfo, NULL,
636
0
      qflags&(BIT_RD|BIT_CD), 0, 0);
637
0
    if(!s) {
638
0
      return 0;
639
0
    }
640
0
    if(unique)
641
0
      mesh_state_make_unique(s);
642
0
    s->s.rpz_passthru = rpz_passthru;
643
0
    if(edns->opt_list_in) {
644
0
      s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in,
645
0
        s->s.region);
646
0
      if(!s->s.edns_opts_front_in) {
647
0
        return 0;
648
0
      }
649
0
    }
650
#ifdef UNBOUND_DEBUG
651
    n =
652
#else
653
0
    (void)
654
0
#endif
655
0
    rbtree_insert(&mesh->all, &s->node);
656
0
    log_assert(n != NULL);
657
    /* set detached (it is now) */
658
0
    mesh->num_detached_states++;
659
0
    added = 1;
660
0
  }
661
0
  if(!s->reply_list && !s->cb_list) {
662
0
    was_noreply = 1;
663
0
    if(s->super_set.count == 0) {
664
0
      was_detached = 1;
665
0
    }
666
0
  }
667
  /* add reply to s */
668
0
  if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) {
669
0
    if(added)
670
0
      mesh_state_delete(&s->s);
671
0
    return 0;
672
0
  }
673
  /* add serve expired timer if not already there */
674
0
  if(timeout && !mesh_serve_expired_init(s, timeout)) {
675
0
    return 0;
676
0
  }
677
  /* update statistics */
678
0
  if(was_detached) {
679
0
    log_assert(mesh->num_detached_states > 0);
680
0
    mesh->num_detached_states--;
681
0
  }
682
0
  if(was_noreply) {
683
0
    mesh->num_reply_states ++;
684
0
  }
685
0
  mesh->num_reply_addrs++;
686
0
  if(added)
687
0
    mesh_run(mesh, s, module_event_new, NULL);
688
0
  return 1;
689
0
}
690
691
/* Internal backend routine of mesh_new_prefetch().  It takes one additional
692
 * parameter, 'run', which controls whether to run the prefetch state
693
 * immediately.  When this function is called internally 'run' could be
694
 * 0 (false), in which case the new state is only made runnable so it
695
 * will not be run recursively on top of the current state. */
696
static void mesh_schedule_prefetch(struct mesh_area* mesh,
697
  struct query_info* qinfo, uint16_t qflags, time_t leeway, int run,
698
  int rpz_passthru)
699
0
{
700
0
  struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo,
701
0
    qflags&(BIT_RD|BIT_CD), 0, 0);
702
#ifdef UNBOUND_DEBUG
703
  struct rbnode_type* n;
704
#endif
705
  /* already exists, and for a different purpose perhaps.
706
   * if mesh_no_list, keep it that way. */
707
0
  if(s) {
708
    /* make it ignore the cache from now on */
709
0
    if(!s->s.blacklist)
710
0
      sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
711
0
    if(s->s.prefetch_leeway < leeway)
712
0
      s->s.prefetch_leeway = leeway;
713
0
    return;
714
0
  }
715
0
  if(!mesh_make_new_space(mesh, NULL)) {
716
0
    verbose(VERB_ALGO, "Too many queries. dropped prefetch.");
717
0
    mesh->stats_dropped ++;
718
0
    return;
719
0
  }
720
721
0
  s = mesh_state_create(mesh->env, qinfo, NULL,
722
0
    qflags&(BIT_RD|BIT_CD), 0, 0);
723
0
  if(!s) {
724
0
    log_err("prefetch mesh_state_create: out of memory");
725
0
    return;
726
0
  }
727
#ifdef UNBOUND_DEBUG
728
  n =
729
#else
730
0
  (void)
731
0
#endif
732
0
  rbtree_insert(&mesh->all, &s->node);
733
0
  log_assert(n != NULL);
734
  /* set detached (it is now) */
735
0
  mesh->num_detached_states++;
736
  /* make it ignore the cache */
737
0
  sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
738
0
  s->s.prefetch_leeway = leeway;
739
740
0
  if(s->list_select == mesh_no_list) {
741
    /* move to either the forever or the jostle_list */
742
0
    if(mesh->num_forever_states < mesh->max_forever_states) {
743
0
      mesh->num_forever_states ++;
744
0
      mesh_list_insert(s, &mesh->forever_first,
745
0
        &mesh->forever_last);
746
0
      s->list_select = mesh_forever_list;
747
0
    } else {
748
0
      mesh_list_insert(s, &mesh->jostle_first,
749
0
        &mesh->jostle_last);
750
0
      s->list_select = mesh_jostle_list;
751
0
    }
752
0
  }
753
0
  s->s.rpz_passthru = rpz_passthru;
754
755
0
  if(!run) {
756
#ifdef UNBOUND_DEBUG
757
    n =
758
#else
759
0
    (void)
760
0
#endif
761
0
    rbtree_insert(&mesh->run, &s->run_node);
762
0
    log_assert(n != NULL);
763
0
    return;
764
0
  }
765
766
0
  mesh_run(mesh, s, module_event_new, NULL);
767
0
}
768
769
#ifdef CLIENT_SUBNET
770
/* Same logic as mesh_schedule_prefetch but tailored to the subnet module logic
771
 * like passing along the comm_reply info. This will be faked into an EDNS
772
 * option for processing by the subnet module if the client has not already
773
 * attached its own ECS data. */
774
static void mesh_schedule_prefetch_subnet(struct mesh_area* mesh,
775
  struct query_info* qinfo, uint16_t qflags, time_t leeway, int run,
776
  int rpz_passthru, struct comm_reply* rep, struct edns_option* edns_list)
777
{
778
  struct mesh_state* s = NULL;
779
  struct edns_option* opt = NULL;
780
#ifdef UNBOUND_DEBUG
781
  struct rbnode_type* n;
782
#endif
783
  if(!mesh_make_new_space(mesh, NULL)) {
784
    verbose(VERB_ALGO, "Too many queries. dropped prefetch.");
785
    mesh->stats_dropped ++;
786
    return;
787
  }
788
789
  s = mesh_state_create(mesh->env, qinfo, NULL,
790
    qflags&(BIT_RD|BIT_CD), 0, 0);
791
  if(!s) {
792
    log_err("prefetch_subnet mesh_state_create: out of memory");
793
    return;
794
  }
795
  mesh_state_make_unique(s);
796
797
  opt = edns_opt_list_find(edns_list, mesh->env->cfg->client_subnet_opcode);
798
  if(opt) {
799
    /* Use the client's ECS data */
800
    if(!edns_opt_list_append(&s->s.edns_opts_front_in, opt->opt_code,
801
      opt->opt_len, opt->opt_data, s->s.region)) {
802
      log_err("prefetch_subnet edns_opt_list_append: out of memory");
803
      return;
804
    }
805
  } else {
806
    /* Store the client's address. Later in the subnet module,
807
     * it is decided whether to include an ECS option or not.
808
     */
809
    s->s.client_addr =  rep->client_addr;
810
  }
811
#ifdef UNBOUND_DEBUG
812
  n =
813
#else
814
  (void)
815
#endif
816
  rbtree_insert(&mesh->all, &s->node);
817
  log_assert(n != NULL);
818
  /* set detached (it is now) */
819
  mesh->num_detached_states++;
820
  /* make it ignore the cache */
821
  sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
822
  s->s.prefetch_leeway = leeway;
823
824
  if(s->list_select == mesh_no_list) {
825
    /* move to either the forever or the jostle_list */
826
    if(mesh->num_forever_states < mesh->max_forever_states) {
827
      mesh->num_forever_states ++;
828
      mesh_list_insert(s, &mesh->forever_first,
829
        &mesh->forever_last);
830
      s->list_select = mesh_forever_list;
831
    } else {
832
      mesh_list_insert(s, &mesh->jostle_first,
833
        &mesh->jostle_last);
834
      s->list_select = mesh_jostle_list;
835
    }
836
  }
837
  s->s.rpz_passthru = rpz_passthru;
838
839
  if(!run) {
840
#ifdef UNBOUND_DEBUG
841
    n =
842
#else
843
    (void)
844
#endif
845
    rbtree_insert(&mesh->run, &s->run_node);
846
    log_assert(n != NULL);
847
    return;
848
  }
849
850
  mesh_run(mesh, s, module_event_new, NULL);
851
}
852
#endif /* CLIENT_SUBNET */
853
854
void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
855
  uint16_t qflags, time_t leeway, int rpz_passthru,
856
  struct comm_reply* rep, struct edns_option* opt_list)
857
0
{
858
0
  (void)opt_list;
859
0
  (void)rep;
860
#ifdef CLIENT_SUBNET
861
  if(rep)
862
    mesh_schedule_prefetch_subnet(mesh, qinfo, qflags, leeway, 1,
863
      rpz_passthru, rep, opt_list);
864
  else
865
#endif
866
0
    mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1,
867
0
      rpz_passthru);
868
0
}
869
870
void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e,
871
        struct comm_reply* reply, int what)
872
0
{
873
0
  enum module_ev event = module_event_reply;
874
0
  e->qstate->reply = reply;
875
0
  if(what != NETEVENT_NOERROR) {
876
0
    event = module_event_noreply;
877
0
    if(what == NETEVENT_CAPSFAIL)
878
0
      event = module_event_capsfail;
879
0
  }
880
0
  mesh_run(mesh, e->qstate->mesh_info, event, e);
881
0
}
882
883
struct mesh_state*
884
mesh_state_create(struct module_env* env, struct query_info* qinfo,
885
  struct respip_client_info* cinfo, uint16_t qflags, int prime,
886
  int valrec)
887
0
{
888
0
  struct regional* region = alloc_reg_obtain(env->alloc);
889
0
  struct mesh_state* mstate;
890
0
  int i;
891
0
  if(!region)
892
0
    return NULL;
893
0
  mstate = (struct mesh_state*)regional_alloc(region, 
894
0
    sizeof(struct mesh_state));
895
0
  if(!mstate) {
896
0
    alloc_reg_release(env->alloc, region);
897
0
    return NULL;
898
0
  }
899
0
  memset(mstate, 0, sizeof(*mstate));
900
0
  mstate->node = *RBTREE_NULL;
901
0
  mstate->run_node = *RBTREE_NULL;
902
0
  mstate->node.key = mstate;
903
0
  mstate->run_node.key = mstate;
904
0
  mstate->reply_list = NULL;
905
0
  mstate->list_select = mesh_no_list;
906
0
  mstate->replies_sent = 0;
907
0
  rbtree_init(&mstate->super_set, &mesh_state_ref_compare);
908
0
  rbtree_init(&mstate->sub_set, &mesh_state_ref_compare);
909
0
  mstate->num_activated = 0;
910
0
  mstate->unique = NULL;
911
  /* init module qstate */
912
0
  mstate->s.qinfo.qtype = qinfo->qtype;
913
0
  mstate->s.qinfo.qclass = qinfo->qclass;
914
0
  mstate->s.qinfo.local_alias = NULL;
915
0
  mstate->s.qinfo.qname_len = qinfo->qname_len;
916
0
  mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
917
0
    qinfo->qname_len);
918
0
  if(!mstate->s.qinfo.qname) {
919
0
    alloc_reg_release(env->alloc, region);
920
0
    return NULL;
921
0
  }
922
0
  if(cinfo) {
923
0
    mstate->s.client_info = regional_alloc_init(region, cinfo,
924
0
      sizeof(*cinfo));
925
0
    if(!mstate->s.client_info) {
926
0
      alloc_reg_release(env->alloc, region);
927
0
      return NULL;
928
0
    }
929
0
  }
930
  /* remove all weird bits from qflags */
931
0
  mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
932
0
  mstate->s.is_priming = prime;
933
0
  mstate->s.is_valrec = valrec;
934
0
  mstate->s.reply = NULL;
935
0
  mstate->s.region = region;
936
0
  mstate->s.curmod = 0;
937
0
  mstate->s.return_msg = 0;
938
0
  mstate->s.return_rcode = LDNS_RCODE_NOERROR;
939
0
  mstate->s.env = env;
940
0
  mstate->s.mesh_info = mstate;
941
0
  mstate->s.prefetch_leeway = 0;
942
0
  mstate->s.serve_expired_data = NULL;
943
0
  mstate->s.no_cache_lookup = 0;
944
0
  mstate->s.no_cache_store = 0;
945
0
  mstate->s.need_refetch = 0;
946
0
  mstate->s.was_ratelimited = 0;
947
0
  mstate->s.qstarttime = *env->now;
948
949
  /* init modules */
950
0
  for(i=0; i<env->mesh->mods.num; i++) {
951
0
    mstate->s.minfo[i] = NULL;
952
0
    mstate->s.ext_state[i] = module_state_initial;
953
0
  }
954
  /* init edns option lists */
955
0
  mstate->s.edns_opts_front_in = NULL;
956
0
  mstate->s.edns_opts_back_out = NULL;
957
0
  mstate->s.edns_opts_back_in = NULL;
958
0
  mstate->s.edns_opts_front_out = NULL;
959
960
0
  return mstate;
961
0
}
962
963
int
964
mesh_state_is_unique(struct mesh_state* mstate)
965
0
{
966
0
  return mstate->unique != NULL;
967
0
}
968
969
void
970
mesh_state_make_unique(struct mesh_state* mstate)
971
0
{
972
0
  mstate->unique = mstate;
973
0
}
974
975
void 
976
mesh_state_cleanup(struct mesh_state* mstate)
977
0
{
978
0
  struct mesh_area* mesh;
979
0
  int i;
980
0
  if(!mstate)
981
0
    return;
982
0
  mesh = mstate->s.env->mesh;
983
  /* Stop and delete the serve expired timer */
984
0
  if(mstate->s.serve_expired_data && mstate->s.serve_expired_data->timer) {
985
0
    comm_timer_delete(mstate->s.serve_expired_data->timer);
986
0
    mstate->s.serve_expired_data->timer = NULL;
987
0
  }
988
  /* drop unsent replies */
989
0
  if(!mstate->replies_sent) {
990
0
    struct mesh_reply* rep = mstate->reply_list;
991
0
    struct mesh_cb* cb;
992
    /* in tcp_req_info, the mstates linked are removed, but
993
     * the reply_list is now NULL, so the remove-from-empty-list
994
     * takes no time and also it does not do the mesh accounting */
995
0
    mstate->reply_list = NULL;
996
0
    for(; rep; rep=rep->next) {
997
0
      comm_point_drop_reply(&rep->query_reply);
998
0
      log_assert(mesh->num_reply_addrs > 0);
999
0
      mesh->num_reply_addrs--;
1000
0
    }
1001
0
    while((cb = mstate->cb_list)!=NULL) {
1002
0
      mstate->cb_list = cb->next;
1003
0
      fptr_ok(fptr_whitelist_mesh_cb(cb->cb));
1004
0
      (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL,
1005
0
        sec_status_unchecked, NULL, 0);
1006
0
      log_assert(mesh->num_reply_addrs > 0);
1007
0
      mesh->num_reply_addrs--;
1008
0
    }
1009
0
  }
1010
1011
  /* de-init modules */
1012
0
  for(i=0; i<mesh->mods.num; i++) {
1013
0
    fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear));
1014
0
    (*mesh->mods.mod[i]->clear)(&mstate->s, i);
1015
0
    mstate->s.minfo[i] = NULL;
1016
0
    mstate->s.ext_state[i] = module_finished;
1017
0
  }
1018
0
  alloc_reg_release(mstate->s.env->alloc, mstate->s.region);
1019
0
}
1020
1021
void 
1022
mesh_state_delete(struct module_qstate* qstate)
1023
0
{
1024
0
  struct mesh_area* mesh;
1025
0
  struct mesh_state_ref* super, ref;
1026
0
  struct mesh_state* mstate;
1027
0
  if(!qstate)
1028
0
    return;
1029
0
  mstate = qstate->mesh_info;
1030
0
  mesh = mstate->s.env->mesh;
1031
0
  mesh_detach_subs(&mstate->s);
1032
0
  if(mstate->list_select == mesh_forever_list) {
1033
0
    mesh->num_forever_states --;
1034
0
    mesh_list_remove(mstate, &mesh->forever_first, 
1035
0
      &mesh->forever_last);
1036
0
  } else if(mstate->list_select == mesh_jostle_list) {
1037
0
    mesh_list_remove(mstate, &mesh->jostle_first, 
1038
0
      &mesh->jostle_last);
1039
0
  }
1040
0
  if(!mstate->reply_list && !mstate->cb_list
1041
0
    && mstate->super_set.count == 0) {
1042
0
    log_assert(mesh->num_detached_states > 0);
1043
0
    mesh->num_detached_states--;
1044
0
  }
1045
0
  if(mstate->reply_list || mstate->cb_list) {
1046
0
    log_assert(mesh->num_reply_states > 0);
1047
0
    mesh->num_reply_states--;
1048
0
  }
1049
0
  ref.node.key = &ref;
1050
0
  ref.s = mstate;
1051
0
  RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) {
1052
0
    (void)rbtree_delete(&super->s->sub_set, &ref);
1053
0
  }
1054
0
  (void)rbtree_delete(&mesh->run, mstate);
1055
0
  (void)rbtree_delete(&mesh->all, mstate);
1056
0
  mesh_state_cleanup(mstate);
1057
0
}
1058
1059
/** helper recursive rbtree find routine */
1060
static int
1061
find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c)
1062
0
{
1063
0
  struct mesh_state_ref* r;
1064
0
  if((*c)++ > MESH_MAX_SUBSUB)
1065
0
    return 1;
1066
0
  RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) {
1067
0
    if(r->s == tofind || find_in_subsub(r->s, tofind, c))
1068
0
      return 1;
1069
0
  }
1070
0
  return 0;
1071
0
}
1072
1073
/** find cycle for already looked up mesh_state */
1074
static int
1075
mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m)
1076
0
{
1077
0
  struct mesh_state* cyc_m = qstate->mesh_info;
1078
0
  size_t counter = 0;
1079
0
  if(!dep_m)
1080
0
    return 0;
1081
0
  if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) {
1082
0
    if(counter > MESH_MAX_SUBSUB)
1083
0
      return 2;
1084
0
    return 1;
1085
0
  }
1086
0
  return 0;
1087
0
}
1088
1089
void mesh_detach_subs(struct module_qstate* qstate)
1090
0
{
1091
0
  struct mesh_area* mesh = qstate->env->mesh;
1092
0
  struct mesh_state_ref* ref, lookup;
1093
#ifdef UNBOUND_DEBUG
1094
  struct rbnode_type* n;
1095
#endif
1096
0
  lookup.node.key = &lookup;
1097
0
  lookup.s = qstate->mesh_info;
1098
0
  RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) {
1099
#ifdef UNBOUND_DEBUG
1100
    n =
1101
#else
1102
0
    (void)
1103
0
#endif
1104
0
    rbtree_delete(&ref->s->super_set, &lookup);
1105
0
    log_assert(n != NULL); /* must have been present */
1106
0
    if(!ref->s->reply_list && !ref->s->cb_list
1107
0
      && ref->s->super_set.count == 0) {
1108
0
      mesh->num_detached_states++;
1109
0
      log_assert(mesh->num_detached_states + 
1110
0
        mesh->num_reply_states <= mesh->all.count);
1111
0
    }
1112
0
  }
1113
0
  rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare);
1114
0
}
1115
1116
int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo,
1117
        uint16_t qflags, int prime, int valrec, struct module_qstate** newq,
1118
  struct mesh_state** sub)
1119
0
{
1120
  /* find it, if not, create it */
1121
0
  struct mesh_area* mesh = qstate->env->mesh;
1122
0
  *sub = mesh_area_find(mesh, NULL, qinfo, qflags,
1123
0
    prime, valrec);
1124
0
  if(mesh_detect_cycle_found(qstate, *sub)) {
1125
0
    verbose(VERB_ALGO, "attach failed, cycle detected");
1126
0
    return 0;
1127
0
  }
1128
0
  if(!*sub) {
1129
#ifdef UNBOUND_DEBUG
1130
    struct rbnode_type* n;
1131
#endif
1132
    /* create a new one */
1133
0
    *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime,
1134
0
      valrec);
1135
0
    if(!*sub) {
1136
0
      log_err("mesh_attach_sub: out of memory");
1137
0
      return 0;
1138
0
    }
1139
#ifdef UNBOUND_DEBUG
1140
    n =
1141
#else
1142
0
    (void)
1143
0
#endif
1144
0
    rbtree_insert(&mesh->all, &(*sub)->node);
1145
0
    log_assert(n != NULL);
1146
    /* set detached (it is now) */
1147
0
    mesh->num_detached_states++;
1148
    /* set new query state to run */
1149
#ifdef UNBOUND_DEBUG
1150
    n =
1151
#else
1152
0
    (void)
1153
0
#endif
1154
0
    rbtree_insert(&mesh->run, &(*sub)->run_node);
1155
0
    log_assert(n != NULL);
1156
0
    *newq = &(*sub)->s;
1157
0
  } else
1158
0
    *newq = NULL;
1159
0
  return 1;
1160
0
}
1161
1162
int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
1163
        uint16_t qflags, int prime, int valrec, struct module_qstate** newq)
1164
0
{
1165
0
  struct mesh_area* mesh = qstate->env->mesh;
1166
0
  struct mesh_state* sub = NULL;
1167
0
  int was_detached;
1168
0
  if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub))
1169
0
    return 0;
1170
0
  was_detached = (sub->super_set.count == 0);
1171
0
  if(!mesh_state_attachment(qstate->mesh_info, sub))
1172
0
    return 0;
1173
  /* if it was a duplicate  attachment, the count was not zero before */
1174
0
  if(!sub->reply_list && !sub->cb_list && was_detached && 
1175
0
    sub->super_set.count == 1) {
1176
    /* it used to be detached, before this one got added */
1177
0
    log_assert(mesh->num_detached_states > 0);
1178
0
    mesh->num_detached_states--;
1179
0
  }
1180
  /* *newq will be run when inited after the current module stops */
1181
0
  return 1;
1182
0
}
1183
1184
int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub)
1185
0
{
1186
#ifdef UNBOUND_DEBUG
1187
  struct rbnode_type* n;
1188
#endif
1189
0
  struct mesh_state_ref* subref; /* points to sub, inserted in super */
1190
0
  struct mesh_state_ref* superref; /* points to super, inserted in sub */
1191
0
  if( !(subref = regional_alloc(super->s.region,
1192
0
    sizeof(struct mesh_state_ref))) ||
1193
0
    !(superref = regional_alloc(sub->s.region,
1194
0
    sizeof(struct mesh_state_ref))) ) {
1195
0
    log_err("mesh_state_attachment: out of memory");
1196
0
    return 0;
1197
0
  }
1198
0
  superref->node.key = superref;
1199
0
  superref->s = super;
1200
0
  subref->node.key = subref;
1201
0
  subref->s = sub;
1202
0
  if(!rbtree_insert(&sub->super_set, &superref->node)) {
1203
    /* this should not happen, iterator and validator do not
1204
     * attach subqueries that are identical. */
1205
    /* already attached, we are done, nothing todo.
1206
     * since superref and subref already allocated in region,
1207
     * we cannot free them */
1208
0
    return 1;
1209
0
  }
1210
#ifdef UNBOUND_DEBUG
1211
  n =
1212
#else
1213
0
  (void)
1214
0
#endif
1215
0
  rbtree_insert(&super->sub_set, &subref->node);
1216
0
  log_assert(n != NULL); /* we checked above if statement, the reverse
1217
    administration should not fail now, unless they are out of sync */
1218
0
  return 1;
1219
0
}
1220
1221
/**
1222
 * callback results to mesh cb entry
1223
 * @param m: mesh state to send it for.
1224
 * @param rcode: if not 0, error code.
1225
 * @param rep: reply to send (or NULL if rcode is set).
1226
 * @param r: callback entry
1227
 * @param start_time: the time to pass to callback functions, it is 0 or
1228
 *  a value from one of the packets if the mesh state had packets.
1229
 */
1230
static void
1231
mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep,
1232
  struct mesh_cb* r, struct timeval* start_time)
1233
0
{
1234
0
  int secure;
1235
0
  char* reason = NULL;
1236
0
  int was_ratelimited = m->s.was_ratelimited;
1237
  /* bogus messages are not made into servfail, sec_status passed
1238
   * to the callback function */
1239
0
  if(rep && rep->security == sec_status_secure)
1240
0
    secure = 1;
1241
0
  else  secure = 0;
1242
0
  if(!rep && rcode == LDNS_RCODE_NOERROR)
1243
0
    rcode = LDNS_RCODE_SERVFAIL;
1244
0
  if(!rcode && (rep->security == sec_status_bogus ||
1245
0
    rep->security == sec_status_secure_sentinel_fail)) {
1246
0
    if(!(reason = errinf_to_str_bogus(&m->s)))
1247
0
      rcode = LDNS_RCODE_SERVFAIL;
1248
0
  }
1249
  /* send the reply */
1250
0
  if(rcode) {
1251
0
    if(rcode == LDNS_RCODE_SERVFAIL) {
1252
0
      if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1253
0
        rep, rcode, &r->edns, NULL, m->s.region, start_time))
1254
0
          r->edns.opt_list_inplace_cb_out = NULL;
1255
0
    } else {
1256
0
      if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1257
0
        &r->edns, NULL, m->s.region, start_time))
1258
0
          r->edns.opt_list_inplace_cb_out = NULL;
1259
0
    }
1260
0
    fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1261
0
    (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL,
1262
0
      was_ratelimited);
1263
0
  } else {
1264
0
    size_t udp_size = r->edns.udp_size;
1265
0
    sldns_buffer_clear(r->buf);
1266
0
    r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1267
0
    r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1268
0
    r->edns.ext_rcode = 0;
1269
0
    r->edns.bits &= EDNS_DO;
1270
1271
0
    if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1272
0
      LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region, start_time) ||
1273
0
      !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 
1274
0
      r->qflags, r->buf, 0, 1, 
1275
0
      m->s.env->scratch, udp_size, &r->edns, 
1276
0
      (int)(r->edns.bits & EDNS_DO), secure)) 
1277
0
    {
1278
0
      fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1279
0
      (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf,
1280
0
        sec_status_unchecked, NULL, 0);
1281
0
    } else {
1282
0
      fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1283
0
      (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf,
1284
0
        rep->security, reason, was_ratelimited);
1285
0
    }
1286
0
  }
1287
0
  free(reason);
1288
0
  log_assert(m->s.env->mesh->num_reply_addrs > 0);
1289
0
  m->s.env->mesh->num_reply_addrs--;
1290
0
}
1291
1292
static inline int
1293
mesh_is_rpz_respip_tcponly_action(struct mesh_state const* m)
1294
0
{
1295
0
  struct respip_action_info const* respip_info = m->s.respip_action_info;
1296
0
  return respip_info == NULL
1297
0
      ? 0
1298
0
      : (respip_info->rpz_used
1299
0
      && !respip_info->rpz_disabled
1300
0
      && respip_info->action == respip_truncate);
1301
0
}
1302
1303
static inline int
1304
0
mesh_is_udp(struct mesh_reply const* r) {
1305
0
  return r->query_reply.c->type == comm_udp;
1306
0
}
1307
1308
/**
1309
 * Send reply to mesh reply entry
1310
 * @param m: mesh state to send it for.
1311
 * @param rcode: if not 0, error code.
1312
 * @param rep: reply to send (or NULL if rcode is set).
1313
 * @param r: reply entry
1314
 * @param r_buffer: buffer to use for reply entry.
1315
 * @param prev: previous reply, already has its answer encoded in buffer.
1316
 * @param prev_buffer: buffer for previous reply.
1317
 */
1318
static void
1319
mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep,
1320
  struct mesh_reply* r, struct sldns_buffer* r_buffer,
1321
  struct mesh_reply* prev, struct sldns_buffer* prev_buffer)
1322
0
{
1323
0
  struct timeval end_time;
1324
0
  struct timeval duration;
1325
0
  int secure;
1326
  /* briefly set the replylist to null in case the
1327
   * meshsendreply calls tcpreqinfo sendreply that
1328
   * comm_point_drops because of size, and then the
1329
   * null stops the mesh state remove and thus
1330
   * reply_list modification and accounting */
1331
0
  struct mesh_reply* rlist = m->reply_list;
1332
1333
  /* rpz: apply actions */
1334
0
  rcode = mesh_is_udp(r) && mesh_is_rpz_respip_tcponly_action(m)
1335
0
      ? (rcode|BIT_TC) : rcode;
1336
1337
  /* examine security status */
1338
0
  if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) ||
1339
0
    m->s.env->cfg->ignore_cd) && rep && 
1340
0
    (rep->security <= sec_status_bogus ||
1341
0
    rep->security == sec_status_secure_sentinel_fail)) {
1342
0
    rcode = LDNS_RCODE_SERVFAIL;
1343
0
    if(m->s.env->cfg->stat_extended)
1344
0
      m->s.env->mesh->ans_bogus++;
1345
0
  }
1346
0
  if(rep && rep->security == sec_status_secure)
1347
0
    secure = 1;
1348
0
  else  secure = 0;
1349
0
  if(!rep && rcode == LDNS_RCODE_NOERROR)
1350
0
    rcode = LDNS_RCODE_SERVFAIL;
1351
0
  if(r->query_reply.c->use_h2) {
1352
0
    r->query_reply.c->h2_stream = r->h2_stream;
1353
    /* Mesh reply won't exist for long anymore. Make it impossible
1354
     * for HTTP/2 stream to refer to mesh state, in case
1355
     * connection gets cleanup before HTTP/2 stream close. */
1356
0
    r->h2_stream->mesh_state = NULL;
1357
0
  }
1358
  /* send the reply */
1359
  /* We don't reuse the encoded answer if:
1360
   * - either the previous or current response has a local alias.  We could
1361
   *   compare the alias records and still reuse the previous answer if they
1362
   *   are the same, but that would be complicated and error prone for the
1363
   *   relatively minor case. So we err on the side of safety.
1364
   * - there are registered callback functions for the given rcode, as these
1365
   *   need to be called for each reply. */
1366
0
  if(((rcode != LDNS_RCODE_SERVFAIL &&
1367
0
      !m->s.env->inplace_cb_lists[inplace_cb_reply]) ||
1368
0
    (rcode == LDNS_RCODE_SERVFAIL &&
1369
0
      !m->s.env->inplace_cb_lists[inplace_cb_reply_servfail])) &&
1370
0
    prev && prev_buffer && prev->qflags == r->qflags &&
1371
0
    !prev->local_alias && !r->local_alias &&
1372
0
    prev->edns.edns_present == r->edns.edns_present &&
1373
0
    prev->edns.bits == r->edns.bits &&
1374
0
    prev->edns.udp_size == r->edns.udp_size &&
1375
0
    edns_opt_list_compare(prev->edns.opt_list_out, r->edns.opt_list_out) == 0 &&
1376
0
    edns_opt_list_compare(prev->edns.opt_list_inplace_cb_out, r->edns.opt_list_inplace_cb_out) == 0
1377
0
    ) {
1378
    /* if the previous reply is identical to this one, fix ID */
1379
0
    if(prev_buffer != r_buffer)
1380
0
      sldns_buffer_copy(r_buffer, prev_buffer);
1381
0
    sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t));
1382
0
    sldns_buffer_write_at(r_buffer, 12, r->qname,
1383
0
      m->s.qinfo.qname_len);
1384
0
    m->reply_list = NULL;
1385
0
    comm_point_send_reply(&r->query_reply);
1386
0
    m->reply_list = rlist;
1387
0
  } else if(rcode) {
1388
0
    m->s.qinfo.qname = r->qname;
1389
0
    m->s.qinfo.local_alias = r->local_alias;
1390
0
    if(rcode == LDNS_RCODE_SERVFAIL) {
1391
0
      if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1392
0
        rep, rcode, &r->edns, &r->query_reply, m->s.region, &r->start_time))
1393
0
          r->edns.opt_list_inplace_cb_out = NULL;
1394
0
    } else { 
1395
0
      if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1396
0
        &r->edns, &r->query_reply, m->s.region, &r->start_time))
1397
0
          r->edns.opt_list_inplace_cb_out = NULL;
1398
0
    }
1399
    /* Send along EDE BOGUS EDNS0 option when answer is bogus */
1400
0
    if(m->s.env->cfg->ede && rcode == LDNS_RCODE_SERVFAIL &&
1401
0
      m->s.env->need_to_validate && (!(r->qflags&BIT_CD) ||
1402
0
      m->s.env->cfg->ignore_cd) && rep &&
1403
0
      (rep->security <= sec_status_bogus ||
1404
0
      rep->security == sec_status_secure_sentinel_fail)) {
1405
0
      char *reason = m->s.env->cfg->val_log_level >= 2
1406
0
        ? errinf_to_str_bogus(&m->s) : NULL;
1407
1408
      /* During validation the EDE code can be received via two
1409
       * code paths. One code path fills the reply_info EDE, and
1410
       * the other fills it in the errinf_strlist. These paths
1411
       * intersect at some points, but where is opaque due to
1412
       * the complexity of the validator. At the time of writing
1413
       * we make the choice to prefer the EDE from errinf_strlist
1414
       * but a compelling reason to do otherwise is just as valid
1415
       */
1416
0
      sldns_ede_code reason_bogus = errinf_to_reason_bogus(&m->s);
1417
0
      if ((reason_bogus == LDNS_EDE_DNSSEC_BOGUS &&
1418
0
        rep->reason_bogus != LDNS_EDE_NONE) ||
1419
0
        reason_bogus == LDNS_EDE_NONE) {
1420
0
          reason_bogus = rep->reason_bogus;
1421
0
      }
1422
1423
0
      if(reason_bogus != LDNS_EDE_NONE) {
1424
0
        edns_opt_list_append_ede(&r->edns.opt_list_out,
1425
0
          m->s.region, reason_bogus, reason);
1426
0
      }
1427
0
      free(reason);
1428
0
    }
1429
0
    error_encode(r_buffer, rcode, &m->s.qinfo, r->qid,
1430
0
      r->qflags, &r->edns);
1431
0
    m->reply_list = NULL;
1432
0
    comm_point_send_reply(&r->query_reply);
1433
0
    m->reply_list = rlist;
1434
0
  } else {
1435
0
    size_t udp_size = r->edns.udp_size;
1436
0
    r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1437
0
    r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1438
0
    r->edns.ext_rcode = 0;
1439
0
    r->edns.bits &= EDNS_DO;
1440
0
    m->s.qinfo.qname = r->qname;
1441
0
    m->s.qinfo.local_alias = r->local_alias;
1442
0
    if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1443
0
      LDNS_RCODE_NOERROR, &r->edns, &r->query_reply, m->s.region, &r->start_time) ||
1444
0
      !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 
1445
0
      r->qflags, r_buffer, 0, 1, m->s.env->scratch,
1446
0
      udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO),
1447
0
      secure)) 
1448
0
    {
1449
0
      if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1450
0
      rep, LDNS_RCODE_SERVFAIL, &r->edns, &r->query_reply, m->s.region, &r->start_time))
1451
0
        r->edns.opt_list_inplace_cb_out = NULL;
1452
      /* internal server error (probably malloc failure) so no
1453
       * EDE (RFC8914) needed */
1454
0
      error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
1455
0
        &m->s.qinfo, r->qid, r->qflags, &r->edns);
1456
0
    }
1457
0
    m->reply_list = NULL;
1458
0
    comm_point_send_reply(&r->query_reply);
1459
0
    m->reply_list = rlist;
1460
0
  }
1461
  /* account */
1462
0
  log_assert(m->s.env->mesh->num_reply_addrs > 0);
1463
0
  m->s.env->mesh->num_reply_addrs--;
1464
0
  end_time = *m->s.env->now_tv;
1465
0
  timeval_subtract(&duration, &end_time, &r->start_time);
1466
0
  verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec",
1467
0
    (long long)duration.tv_sec, (int)duration.tv_usec);
1468
0
  m->s.env->mesh->replies_sent++;
1469
0
  timeval_add(&m->s.env->mesh->replies_sum_wait, &duration);
1470
0
  timehist_insert(m->s.env->mesh->histogram, &duration);
1471
0
  if(m->s.env->cfg->stat_extended) {
1472
0
    uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at(
1473
0
      r_buffer, 2));
1474
0
    if(secure) m->s.env->mesh->ans_secure++;
1475
0
    m->s.env->mesh->ans_rcode[ rc ] ++;
1476
0
    if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0)
1477
0
      m->s.env->mesh->ans_nodata++;
1478
0
  }
1479
  /* Log reply sent */
1480
0
  if(m->s.env->cfg->log_replies) {
1481
0
    log_reply_info(NO_VERBOSE, &m->s.qinfo,
1482
0
      &r->query_reply.client_addr,
1483
0
      r->query_reply.client_addrlen, duration, 0, r_buffer);
1484
0
  }
1485
0
}
1486
1487
void mesh_query_done(struct mesh_state* mstate)
1488
0
{
1489
0
  struct mesh_reply* r;
1490
0
  struct mesh_reply* prev = NULL;
1491
0
  struct sldns_buffer* prev_buffer = NULL;
1492
0
  struct mesh_cb* c;
1493
0
  struct reply_info* rep = (mstate->s.return_msg?
1494
0
    mstate->s.return_msg->rep:NULL);
1495
0
  struct timeval tv = {0, 0};
1496
  /* No need for the serve expired timer anymore; we are going to reply. */
1497
0
  if(mstate->s.serve_expired_data) {
1498
0
    comm_timer_delete(mstate->s.serve_expired_data->timer);
1499
0
    mstate->s.serve_expired_data->timer = NULL;
1500
0
  }
1501
0
  if(mstate->s.return_rcode == LDNS_RCODE_SERVFAIL ||
1502
0
    (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL)) {
1503
    /* we are SERVFAILing; check for expired answer here */
1504
0
    mesh_serve_expired_callback(mstate);
1505
0
    if((mstate->reply_list || mstate->cb_list)
1506
0
    && mstate->s.env->cfg->log_servfail
1507
0
    && !mstate->s.env->cfg->val_log_squelch) {
1508
0
      char* err = errinf_to_str_servfail(&mstate->s);
1509
0
      if(err)
1510
0
        log_err("%s", err);
1511
0
      free(err);
1512
0
    }
1513
0
  }
1514
0
  for(r = mstate->reply_list; r; r = r->next) {
1515
0
    tv = r->start_time;
1516
1517
    /* if a response-ip address block has been stored the
1518
     *  information should be logged for each client. */
1519
0
    if(mstate->s.respip_action_info &&
1520
0
      mstate->s.respip_action_info->addrinfo) {
1521
0
      respip_inform_print(mstate->s.respip_action_info,
1522
0
        r->qname, mstate->s.qinfo.qtype,
1523
0
        mstate->s.qinfo.qclass, r->local_alias,
1524
0
        &r->query_reply.client_addr,
1525
0
        r->query_reply.client_addrlen);
1526
0
      if(mstate->s.env->cfg->stat_extended &&
1527
0
        mstate->s.respip_action_info->rpz_used) {
1528
0
        if(mstate->s.respip_action_info->rpz_disabled)
1529
0
          mstate->s.env->mesh->rpz_action[RPZ_DISABLED_ACTION]++;
1530
0
        if(mstate->s.respip_action_info->rpz_cname_override)
1531
0
          mstate->s.env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++;
1532
0
        else
1533
0
          mstate->s.env->mesh->rpz_action[respip_action_to_rpz_action(
1534
0
            mstate->s.respip_action_info->action)]++;
1535
0
      }
1536
0
    }
1537
1538
    /* if this query is determined to be dropped during the
1539
     * mesh processing, this is the point to take that action. */
1540
0
    if(mstate->s.is_drop) {
1541
      /* briefly set the reply_list to NULL, so that the
1542
       * tcp req info cleanup routine that calls the mesh
1543
       * to deregister the meshstate for it is not done
1544
       * because the list is NULL and also accounting is not
1545
       * done there, but instead we do that here. */
1546
0
      struct mesh_reply* reply_list = mstate->reply_list;
1547
0
      mstate->reply_list = NULL;
1548
0
      comm_point_drop_reply(&r->query_reply);
1549
0
      mstate->reply_list = reply_list;
1550
0
    } else {
1551
0
      struct sldns_buffer* r_buffer = r->query_reply.c->buffer;
1552
0
      if(r->query_reply.c->tcp_req_info) {
1553
0
        r_buffer = r->query_reply.c->tcp_req_info->spool_buffer;
1554
0
        prev_buffer = NULL;
1555
0
      }
1556
0
      mesh_send_reply(mstate, mstate->s.return_rcode, rep,
1557
0
        r, r_buffer, prev, prev_buffer);
1558
0
      if(r->query_reply.c->tcp_req_info) {
1559
0
        tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate);
1560
0
        r_buffer = NULL;
1561
0
      }
1562
0
      prev = r;
1563
0
      prev_buffer = r_buffer;
1564
0
    }
1565
0
  }
1566
0
  if(mstate->reply_list) {
1567
0
    mstate->reply_list = NULL;
1568
0
    if(!mstate->reply_list && !mstate->cb_list) {
1569
      /* was a reply state, not anymore */
1570
0
      log_assert(mstate->s.env->mesh->num_reply_states > 0);
1571
0
      mstate->s.env->mesh->num_reply_states--;
1572
0
    }
1573
0
    if(!mstate->reply_list && !mstate->cb_list &&
1574
0
      mstate->super_set.count == 0)
1575
0
      mstate->s.env->mesh->num_detached_states++;
1576
0
  }
1577
0
  mstate->replies_sent = 1;
1578
0
  while((c = mstate->cb_list) != NULL) {
1579
    /* take this cb off the list; so that the list can be
1580
     * changed, eg. by adds from the callback routine */
1581
0
    if(!mstate->reply_list && mstate->cb_list && !c->next) {
1582
      /* was a reply state, not anymore */
1583
0
      log_assert(mstate->s.env->mesh->num_reply_states > 0);
1584
0
      mstate->s.env->mesh->num_reply_states--;
1585
0
    }
1586
0
    mstate->cb_list = c->next;
1587
0
    if(!mstate->reply_list && !mstate->cb_list &&
1588
0
      mstate->super_set.count == 0)
1589
0
      mstate->s.env->mesh->num_detached_states++;
1590
0
    mesh_do_callback(mstate, mstate->s.return_rcode, rep, c, &tv);
1591
0
  }
1592
0
}
1593
1594
void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate)
1595
0
{
1596
0
  struct mesh_state_ref* ref;
1597
0
  RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set)
1598
0
  {
1599
    /* make super runnable */
1600
0
    (void)rbtree_insert(&mesh->run, &ref->s->run_node);
1601
    /* callback the function to inform super of result */
1602
0
    fptr_ok(fptr_whitelist_mod_inform_super(
1603
0
      mesh->mods.mod[ref->s->s.curmod]->inform_super));
1604
0
    (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s, 
1605
0
      ref->s->s.curmod, &ref->s->s);
1606
    /* copy state that is always relevant to super */
1607
0
    copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s);
1608
0
  }
1609
0
}
1610
1611
struct mesh_state* mesh_area_find(struct mesh_area* mesh,
1612
  struct respip_client_info* cinfo, struct query_info* qinfo,
1613
  uint16_t qflags, int prime, int valrec)
1614
0
{
1615
0
  struct mesh_state key;
1616
0
  struct mesh_state* result;
1617
1618
0
  key.node.key = &key;
1619
0
  key.s.is_priming = prime;
1620
0
  key.s.is_valrec = valrec;
1621
0
  key.s.qinfo = *qinfo;
1622
0
  key.s.query_flags = qflags;
1623
  /* We are searching for a similar mesh state when we DO want to
1624
   * aggregate the state. Thus unique is set to NULL. (default when we
1625
   * desire aggregation).*/
1626
0
  key.unique = NULL;
1627
0
  key.s.client_info = cinfo;
1628
  
1629
0
  result = (struct mesh_state*)rbtree_search(&mesh->all, &key);
1630
0
  return result;
1631
0
}
1632
1633
int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
1634
        sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg,
1635
  uint16_t qid, uint16_t qflags)
1636
0
{
1637
0
  struct mesh_cb* r = regional_alloc(s->s.region, 
1638
0
    sizeof(struct mesh_cb));
1639
0
  if(!r)
1640
0
    return 0;
1641
0
  r->buf = buf;
1642
0
  log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/
1643
0
  r->cb = cb;
1644
0
  r->cb_arg = cb_arg;
1645
0
  r->edns = *edns;
1646
0
  if(edns->opt_list_in && !(r->edns.opt_list_in =
1647
0
      edns_opt_copy_region(edns->opt_list_in, s->s.region)))
1648
0
    return 0;
1649
0
  if(edns->opt_list_out && !(r->edns.opt_list_out =
1650
0
      edns_opt_copy_region(edns->opt_list_out, s->s.region)))
1651
0
    return 0;
1652
0
  if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out =
1653
0
      edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region)))
1654
0
    return 0;
1655
0
  r->qid = qid;
1656
0
  r->qflags = qflags;
1657
0
  r->next = s->cb_list;
1658
0
  s->cb_list = r;
1659
0
  return 1;
1660
1661
0
}
1662
1663
int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns,
1664
        struct comm_reply* rep, uint16_t qid, uint16_t qflags,
1665
        const struct query_info* qinfo)
1666
0
{
1667
0
  struct mesh_reply* r = regional_alloc(s->s.region,
1668
0
    sizeof(struct mesh_reply));
1669
0
  if(!r)
1670
0
    return 0;
1671
0
  r->query_reply = *rep;
1672
0
  r->edns = *edns;
1673
0
  if(edns->opt_list_in && !(r->edns.opt_list_in =
1674
0
      edns_opt_copy_region(edns->opt_list_in, s->s.region)))
1675
0
    return 0;
1676
0
  if(edns->opt_list_out && !(r->edns.opt_list_out =
1677
0
      edns_opt_copy_region(edns->opt_list_out, s->s.region)))
1678
0
    return 0;
1679
0
  if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out =
1680
0
      edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region)))
1681
0
    return 0;
1682
0
  r->qid = qid;
1683
0
  r->qflags = qflags;
1684
0
  r->start_time = *s->s.env->now_tv;
1685
0
  r->next = s->reply_list;
1686
0
  r->qname = regional_alloc_init(s->s.region, qinfo->qname,
1687
0
    s->s.qinfo.qname_len);
1688
0
  if(!r->qname)
1689
0
    return 0;
1690
0
  if(rep->c->use_h2)
1691
0
    r->h2_stream = rep->c->h2_stream;
1692
1693
  /* Data related to local alias stored in 'qinfo' (if any) is ephemeral
1694
   * and can be different for different original queries (even if the
1695
   * replaced query name is the same).  So we need to make a deep copy
1696
   * and store the copy for each reply info. */
1697
0
  if(qinfo->local_alias) {
1698
0
    struct packed_rrset_data* d;
1699
0
    struct packed_rrset_data* dsrc;
1700
0
    r->local_alias = regional_alloc_zero(s->s.region,
1701
0
      sizeof(*qinfo->local_alias));
1702
0
    if(!r->local_alias)
1703
0
      return 0;
1704
0
    r->local_alias->rrset = regional_alloc_init(s->s.region,
1705
0
      qinfo->local_alias->rrset,
1706
0
      sizeof(*qinfo->local_alias->rrset));
1707
0
    if(!r->local_alias->rrset)
1708
0
      return 0;
1709
0
    dsrc = qinfo->local_alias->rrset->entry.data;
1710
1711
    /* In the current implementation, a local alias must be
1712
     * a single CNAME RR (see worker_handle_request()). */
1713
0
    log_assert(!qinfo->local_alias->next && dsrc->count == 1 &&
1714
0
      qinfo->local_alias->rrset->rk.type ==
1715
0
      htons(LDNS_RR_TYPE_CNAME));
1716
    /* we should make a local copy for the owner name of
1717
     * the RRset */
1718
0
    r->local_alias->rrset->rk.dname_len =
1719
0
      qinfo->local_alias->rrset->rk.dname_len;
1720
0
    r->local_alias->rrset->rk.dname = regional_alloc_init(
1721
0
      s->s.region, qinfo->local_alias->rrset->rk.dname,
1722
0
      qinfo->local_alias->rrset->rk.dname_len);
1723
0
    if(!r->local_alias->rrset->rk.dname)
1724
0
      return 0;
1725
1726
    /* the rrset is not packed, like in the cache, but it is
1727
     * individually allocated with an allocator from localzone. */
1728
0
    d = regional_alloc_zero(s->s.region, sizeof(*d));
1729
0
    if(!d)
1730
0
      return 0;
1731
0
    r->local_alias->rrset->entry.data = d;
1732
0
    if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0],
1733
0
      dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias"))
1734
0
      return 0;
1735
0
  } else
1736
0
    r->local_alias = NULL;
1737
1738
0
  s->reply_list = r;
1739
0
  return 1;
1740
0
}
1741
1742
/* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'.
1743
 * Since this is only used for internal refetch of otherwise-expired answer,
1744
 * we simply ignore the rare failure mode when memory allocation fails. */
1745
static void
1746
mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop,
1747
  uint16_t* qflags)
1748
0
{
1749
0
  struct regional* region = mstate->s.env->scratch;
1750
0
  struct query_info* qinfo;
1751
1752
0
  qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo));
1753
0
  if(!qinfo)
1754
0
    return;
1755
0
  qinfo->qname = regional_alloc_init(region, qinfo->qname,
1756
0
    qinfo->qname_len);
1757
0
  if(!qinfo->qname)
1758
0
    return;
1759
0
  *qinfop = qinfo;
1760
0
  *qflags = mstate->s.query_flags;
1761
0
}
1762
1763
/**
1764
 * Continue processing the mesh state at another module.
1765
 * Handles module to modules transfer of control.
1766
 * Handles module finished.
1767
 * @param mesh: the mesh area.
1768
 * @param mstate: currently active mesh state.
1769
 *  Deleted if finished, calls _done and _supers to 
1770
 *  send replies to clients and inform other mesh states.
1771
 *  This in turn may create additional runnable mesh states.
1772
 * @param s: state at which the current module exited.
1773
 * @param ev: the event sent to the module.
1774
 *  returned is the event to send to the next module.
1775
 * @return true if continue processing at the new module.
1776
 *  false if not continued processing is needed.
1777
 */
1778
static int
1779
mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate,
1780
  enum module_ext_state s, enum module_ev* ev)
1781
0
{
1782
0
  mstate->num_activated++;
1783
0
  if(mstate->num_activated > MESH_MAX_ACTIVATION) {
1784
    /* module is looping. Stop it. */
1785
0
    log_err("internal error: looping module (%s) stopped",
1786
0
      mesh->mods.mod[mstate->s.curmod]->name);
1787
0
    log_query_info(NO_VERBOSE, "pass error for qstate",
1788
0
      &mstate->s.qinfo);
1789
0
    s = module_error;
1790
0
  }
1791
0
  if(s == module_wait_module || s == module_restart_next) {
1792
    /* start next module */
1793
0
    mstate->s.curmod++;
1794
0
    if(mesh->mods.num == mstate->s.curmod) {
1795
0
      log_err("Cannot pass to next module; at last module");
1796
0
      log_query_info(VERB_QUERY, "pass error for qstate",
1797
0
        &mstate->s.qinfo);
1798
0
      mstate->s.curmod--;
1799
0
      return mesh_continue(mesh, mstate, module_error, ev);
1800
0
    }
1801
0
    if(s == module_restart_next) {
1802
0
      int curmod = mstate->s.curmod;
1803
0
      for(; mstate->s.curmod < mesh->mods.num; 
1804
0
        mstate->s.curmod++) {
1805
0
        fptr_ok(fptr_whitelist_mod_clear(
1806
0
          mesh->mods.mod[mstate->s.curmod]->clear));
1807
0
        (*mesh->mods.mod[mstate->s.curmod]->clear)
1808
0
          (&mstate->s, mstate->s.curmod);
1809
0
        mstate->s.minfo[mstate->s.curmod] = NULL;
1810
0
      }
1811
0
      mstate->s.curmod = curmod;
1812
0
    }
1813
0
    *ev = module_event_pass;
1814
0
    return 1;
1815
0
  }
1816
0
  if(s == module_wait_subquery && mstate->sub_set.count == 0) {
1817
0
    log_err("module cannot wait for subquery, subquery list empty");
1818
0
    log_query_info(VERB_QUERY, "pass error for qstate",
1819
0
      &mstate->s.qinfo);
1820
0
    s = module_error;
1821
0
  }
1822
0
  if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) {
1823
    /* error is bad, handle pass back up below */
1824
0
    mstate->s.return_rcode = LDNS_RCODE_SERVFAIL;
1825
0
  }
1826
0
  if(s == module_error) {
1827
0
    mesh_query_done(mstate);
1828
0
    mesh_walk_supers(mesh, mstate);
1829
0
    mesh_state_delete(&mstate->s);
1830
0
    return 0;
1831
0
  }
1832
0
  if(s == module_finished) {
1833
0
    if(mstate->s.curmod == 0) {
1834
0
      struct query_info* qinfo = NULL;
1835
0
      uint16_t qflags;
1836
0
      int rpz_p = 0;
1837
1838
0
      mesh_query_done(mstate);
1839
0
      mesh_walk_supers(mesh, mstate);
1840
1841
      /* If the answer to the query needs to be refetched
1842
       * from an external DNS server, we'll need to schedule
1843
       * a prefetch after removing the current state, so
1844
       * we need to make a copy of the query info here. */
1845
0
      if(mstate->s.need_refetch) {
1846
0
        mesh_copy_qinfo(mstate, &qinfo, &qflags);
1847
0
        rpz_p = mstate->s.rpz_passthru;
1848
0
      }
1849
1850
0
      mesh_state_delete(&mstate->s);
1851
0
      if(qinfo) {
1852
0
        mesh_schedule_prefetch(mesh, qinfo, qflags,
1853
0
          0, 1, rpz_p);
1854
0
      }
1855
0
      return 0;
1856
0
    }
1857
    /* pass along the locus of control */
1858
0
    mstate->s.curmod --;
1859
0
    *ev = module_event_moddone;
1860
0
    return 1;
1861
0
  }
1862
0
  return 0;
1863
0
}
1864
1865
void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate,
1866
  enum module_ev ev, struct outbound_entry* e)
1867
0
{
1868
0
  enum module_ext_state s;
1869
0
  verbose(VERB_ALGO, "mesh_run: start");
1870
0
  while(mstate) {
1871
    /* run the module */
1872
0
    fptr_ok(fptr_whitelist_mod_operate(
1873
0
      mesh->mods.mod[mstate->s.curmod]->operate));
1874
0
    (*mesh->mods.mod[mstate->s.curmod]->operate)
1875
0
      (&mstate->s, ev, mstate->s.curmod, e);
1876
1877
    /* examine results */
1878
0
    mstate->s.reply = NULL;
1879
0
    regional_free_all(mstate->s.env->scratch);
1880
0
    s = mstate->s.ext_state[mstate->s.curmod];
1881
0
    verbose(VERB_ALGO, "mesh_run: %s module exit state is %s", 
1882
0
      mesh->mods.mod[mstate->s.curmod]->name, strextstate(s));
1883
0
    e = NULL;
1884
0
    if(mesh_continue(mesh, mstate, s, &ev))
1885
0
      continue;
1886
1887
    /* run more modules */
1888
0
    ev = module_event_pass;
1889
0
    if(mesh->run.count > 0) {
1890
      /* pop random element off the runnable tree */
1891
0
      mstate = (struct mesh_state*)mesh->run.root->key;
1892
0
      (void)rbtree_delete(&mesh->run, mstate);
1893
0
    } else mstate = NULL;
1894
0
  }
1895
0
  if(verbosity >= VERB_ALGO) {
1896
0
    mesh_stats(mesh, "mesh_run: end");
1897
0
    mesh_log_list(mesh);
1898
0
  }
1899
0
}
1900
1901
void 
1902
mesh_log_list(struct mesh_area* mesh)
1903
0
{
1904
0
  char buf[30];
1905
0
  struct mesh_state* m;
1906
0
  int num = 0;
1907
0
  RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1908
0
    snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s", 
1909
0
      num++, (m->s.is_priming)?"p":"",  /* prime */
1910
0
      (m->s.is_valrec)?"v":"",  /* prime */
1911
0
      (m->s.query_flags&BIT_RD)?"RD":"",
1912
0
      (m->s.query_flags&BIT_CD)?"CD":"",
1913
0
      (m->super_set.count==0)?"d":"", /* detached */
1914
0
      (m->sub_set.count!=0)?"c":"",  /* children */
1915
0
      m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/
1916
0
      (m->cb_list)?"cb":"" /* callbacks */
1917
0
      ); 
1918
0
    log_query_info(VERB_ALGO, buf, &m->s.qinfo);
1919
0
  }
1920
0
}
1921
1922
void 
1923
mesh_stats(struct mesh_area* mesh, const char* str)
1924
0
{
1925
0
  verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, "
1926
0
    "%u detached), %u waiting replies, %u recursion replies "
1927
0
    "sent, %d replies dropped, %d states jostled out", 
1928
0
    str, (unsigned)mesh->all.count, 
1929
0
    (unsigned)mesh->num_reply_states,
1930
0
    (unsigned)mesh->num_detached_states,
1931
0
    (unsigned)mesh->num_reply_addrs,
1932
0
    (unsigned)mesh->replies_sent,
1933
0
    (unsigned)mesh->stats_dropped,
1934
0
    (unsigned)mesh->stats_jostled);
1935
0
  if(mesh->replies_sent > 0) {
1936
0
    struct timeval avg;
1937
0
    timeval_divide(&avg, &mesh->replies_sum_wait, 
1938
0
      mesh->replies_sent);
1939
0
    log_info("average recursion processing time "
1940
0
      ARG_LL "d.%6.6d sec",
1941
0
      (long long)avg.tv_sec, (int)avg.tv_usec);
1942
0
    log_info("histogram of recursion processing times");
1943
0
    timehist_log(mesh->histogram, "recursions");
1944
0
  }
1945
0
}
1946
1947
void 
1948
mesh_stats_clear(struct mesh_area* mesh)
1949
0
{
1950
0
  if(!mesh)
1951
0
    return;
1952
0
  mesh->replies_sent = 0;
1953
0
  mesh->replies_sum_wait.tv_sec = 0;
1954
0
  mesh->replies_sum_wait.tv_usec = 0;
1955
0
  mesh->stats_jostled = 0;
1956
0
  mesh->stats_dropped = 0;
1957
0
  timehist_clear(mesh->histogram);
1958
0
  mesh->ans_secure = 0;
1959
0
  mesh->ans_bogus = 0;
1960
0
  mesh->ans_expired = 0;
1961
0
  memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*UB_STATS_RCODE_NUM);
1962
0
  memset(&mesh->rpz_action[0], 0, sizeof(size_t)*UB_STATS_RPZ_ACTION_NUM);
1963
0
  mesh->ans_nodata = 0;
1964
0
}
1965
1966
size_t 
1967
mesh_get_mem(struct mesh_area* mesh)
1968
0
{
1969
0
  struct mesh_state* m;
1970
0
  size_t s = sizeof(*mesh) + sizeof(struct timehist) +
1971
0
    sizeof(struct th_buck)*mesh->histogram->num +
1972
0
    sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak);
1973
0
  RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1974
    /* all, including m itself allocated in qstate region */
1975
0
    s += regional_get_mem(m->s.region);
1976
0
  }
1977
0
  return s;
1978
0
}
1979
1980
int 
1981
mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo,
1982
  uint16_t flags, int prime, int valrec)
1983
0
{
1984
0
  struct mesh_area* mesh = qstate->env->mesh;
1985
0
  struct mesh_state* dep_m = NULL;
1986
0
  dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec);
1987
0
  return mesh_detect_cycle_found(qstate, dep_m);
1988
0
}
1989
1990
void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp,
1991
        struct mesh_state** lp)
1992
0
{
1993
  /* insert as last element */
1994
0
  m->prev = *lp;
1995
0
  m->next = NULL;
1996
0
  if(*lp)
1997
0
    (*lp)->next = m;
1998
0
  else  *fp = m;
1999
0
  *lp = m;
2000
0
}
2001
2002
void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp,
2003
        struct mesh_state** lp)
2004
0
{
2005
0
  if(m->next)
2006
0
    m->next->prev = m->prev;
2007
0
  else  *lp = m->prev;
2008
0
  if(m->prev)
2009
0
    m->prev->next = m->next;
2010
0
  else  *fp = m->next;
2011
0
}
2012
2013
void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m,
2014
  struct comm_point* cp)
2015
0
{
2016
0
  struct mesh_reply* n, *prev = NULL;
2017
0
  n = m->reply_list;
2018
  /* when in mesh_cleanup, it sets the reply_list to NULL, so that
2019
   * there is no accounting twice */
2020
0
  if(!n) return; /* nothing to remove, also no accounting needed */
2021
0
  while(n) {
2022
0
    if(n->query_reply.c == cp) {
2023
      /* unlink it */
2024
0
      if(prev) prev->next = n->next;
2025
0
      else m->reply_list = n->next;
2026
      /* delete it, but allocated in m region */
2027
0
      log_assert(mesh->num_reply_addrs > 0);
2028
0
      mesh->num_reply_addrs--;
2029
2030
      /* prev = prev; */
2031
0
      n = n->next;
2032
0
      continue;
2033
0
    }
2034
0
    prev = n;
2035
0
    n = n->next;
2036
0
  }
2037
  /* it was not detached (because it had a reply list), could be now */
2038
0
  if(!m->reply_list && !m->cb_list
2039
0
    && m->super_set.count == 0) {
2040
0
    mesh->num_detached_states++;
2041
0
  }
2042
  /* if not replies any more in mstate, it is no longer a reply_state */
2043
0
  if(!m->reply_list && !m->cb_list) {
2044
0
    log_assert(mesh->num_reply_states > 0);
2045
0
    mesh->num_reply_states--;
2046
0
  }
2047
0
}
2048
2049
2050
static int
2051
apply_respip_action(struct module_qstate* qstate,
2052
  const struct query_info* qinfo, struct respip_client_info* cinfo,
2053
  struct respip_action_info* actinfo, struct reply_info* rep,
2054
  struct ub_packed_rrset_key** alias_rrset,
2055
  struct reply_info** encode_repp, struct auth_zones* az)
2056
0
{
2057
0
  if(qinfo->qtype != LDNS_RR_TYPE_A &&
2058
0
    qinfo->qtype != LDNS_RR_TYPE_AAAA &&
2059
0
    qinfo->qtype != LDNS_RR_TYPE_ANY)
2060
0
    return 1;
2061
2062
0
  if(!respip_rewrite_reply(qinfo, cinfo, rep, encode_repp, actinfo,
2063
0
    alias_rrset, 0, qstate->region, az, NULL))
2064
0
    return 0;
2065
2066
  /* xxx_deny actions mean dropping the reply, unless the original reply
2067
   * was redirected to response-ip data. */
2068
0
  if((actinfo->action == respip_deny ||
2069
0
    actinfo->action == respip_inform_deny) &&
2070
0
    *encode_repp == rep)
2071
0
    *encode_repp = NULL;
2072
2073
0
  return 1;
2074
0
}
2075
2076
void
2077
mesh_serve_expired_callback(void* arg)
2078
0
{
2079
0
  struct mesh_state* mstate = (struct mesh_state*) arg;
2080
0
  struct module_qstate* qstate = &mstate->s;
2081
0
  struct mesh_reply* r;
2082
0
  struct mesh_area* mesh = qstate->env->mesh;
2083
0
  struct dns_msg* msg;
2084
0
  struct mesh_cb* c;
2085
0
  struct mesh_reply* prev = NULL;
2086
0
  struct sldns_buffer* prev_buffer = NULL;
2087
0
  struct sldns_buffer* r_buffer = NULL;
2088
0
  struct reply_info* partial_rep = NULL;
2089
0
  struct ub_packed_rrset_key* alias_rrset = NULL;
2090
0
  struct reply_info* encode_rep = NULL;
2091
0
  struct respip_action_info actinfo;
2092
0
  struct query_info* lookup_qinfo = &qstate->qinfo;
2093
0
  struct query_info qinfo_tmp;
2094
0
  struct timeval tv = {0, 0};
2095
0
  int must_validate = (!(qstate->query_flags&BIT_CD)
2096
0
    || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate;
2097
0
  if(!qstate->serve_expired_data) return;
2098
0
  verbose(VERB_ALGO, "Serve expired: Trying to reply with expired data");
2099
0
  comm_timer_delete(qstate->serve_expired_data->timer);
2100
0
  qstate->serve_expired_data->timer = NULL;
2101
  /* If is_drop or no_cache_lookup (modules that handle their own cache e.g.,
2102
   * subnetmod) ignore stale data from the main cache. */
2103
0
  if(qstate->no_cache_lookup || qstate->is_drop) {
2104
0
    verbose(VERB_ALGO,
2105
0
      "Serve expired: Not allowed to look into cache for stale");
2106
0
    return;
2107
0
  }
2108
  /* The following while is used instead of the `goto lookup_cache`
2109
   * like in the worker. */
2110
0
  while(1) {
2111
0
    fptr_ok(fptr_whitelist_serve_expired_lookup(
2112
0
      qstate->serve_expired_data->get_cached_answer));
2113
0
    msg = (*qstate->serve_expired_data->get_cached_answer)(qstate,
2114
0
      lookup_qinfo);
2115
0
    if(!msg)
2116
0
      return;
2117
    /* Reset these in case we pass a second time from here. */
2118
0
    encode_rep = msg->rep;
2119
0
    memset(&actinfo, 0, sizeof(actinfo));
2120
0
    actinfo.action = respip_none;
2121
0
    alias_rrset = NULL;
2122
0
    if((mesh->use_response_ip || mesh->use_rpz) &&
2123
0
      !partial_rep && !apply_respip_action(qstate, &qstate->qinfo,
2124
0
      qstate->client_info, &actinfo, msg->rep, &alias_rrset, &encode_rep,
2125
0
      qstate->env->auth_zones)) {
2126
0
      return;
2127
0
    } else if(partial_rep &&
2128
0
      !respip_merge_cname(partial_rep, &qstate->qinfo, msg->rep,
2129
0
      qstate->client_info, must_validate, &encode_rep, qstate->region,
2130
0
      qstate->env->auth_zones)) {
2131
0
      return;
2132
0
    }
2133
0
    if(!encode_rep || alias_rrset) {
2134
0
      if(!encode_rep) {
2135
        /* Needs drop */
2136
0
        return;
2137
0
      } else {
2138
        /* A partial CNAME chain is found. */
2139
0
        partial_rep = encode_rep;
2140
0
      }
2141
0
    }
2142
    /* We've found a partial reply ending with an
2143
    * alias.  Replace the lookup qinfo for the
2144
    * alias target and lookup the cache again to
2145
    * (possibly) complete the reply.  As we're
2146
    * passing the "base" reply, there will be no
2147
    * more alias chasing. */
2148
0
    if(partial_rep) {
2149
0
      memset(&qinfo_tmp, 0, sizeof(qinfo_tmp));
2150
0
      get_cname_target(alias_rrset, &qinfo_tmp.qname,
2151
0
        &qinfo_tmp.qname_len);
2152
0
      if(!qinfo_tmp.qname) {
2153
0
        log_err("Serve expired: unexpected: invalid answer alias");
2154
0
        return;
2155
0
      }
2156
0
      qinfo_tmp.qtype = qstate->qinfo.qtype;
2157
0
      qinfo_tmp.qclass = qstate->qinfo.qclass;
2158
0
      lookup_qinfo = &qinfo_tmp;
2159
0
      continue;
2160
0
    }
2161
0
    break;
2162
0
  }
2163
2164
0
  if(verbosity >= VERB_ALGO)
2165
0
    log_dns_msg("Serve expired lookup", &qstate->qinfo, msg->rep);
2166
2167
0
  for(r = mstate->reply_list; r; r = r->next) {
2168
0
    tv = r->start_time;
2169
2170
    /* If address info is returned, it means the action should be an
2171
    * 'inform' variant and the information should be logged. */
2172
0
    if(actinfo.addrinfo) {
2173
0
      respip_inform_print(&actinfo, r->qname,
2174
0
        qstate->qinfo.qtype, qstate->qinfo.qclass,
2175
0
        r->local_alias, &r->query_reply.client_addr,
2176
0
        r->query_reply.client_addrlen);
2177
2178
0
      if(qstate->env->cfg->stat_extended && actinfo.rpz_used) {
2179
0
        if(actinfo.rpz_disabled)
2180
0
          qstate->env->mesh->rpz_action[RPZ_DISABLED_ACTION]++;
2181
0
        if(actinfo.rpz_cname_override)
2182
0
          qstate->env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++;
2183
0
        else
2184
0
          qstate->env->mesh->rpz_action[
2185
0
            respip_action_to_rpz_action(actinfo.action)]++;
2186
0
      }
2187
0
    }
2188
2189
    /* Add EDE Stale Answer (RCF8914). Ignore global ede as this is
2190
     * warning instead of an error */
2191
0
    if (r->edns.edns_present && qstate->env->cfg->ede_serve_expired &&
2192
0
      qstate->env->cfg->ede) {
2193
0
      edns_opt_list_append_ede(&r->edns.opt_list_out,
2194
0
        mstate->s.region, LDNS_EDE_STALE_ANSWER, NULL);
2195
0
    }
2196
2197
0
    r_buffer = r->query_reply.c->buffer;
2198
0
    if(r->query_reply.c->tcp_req_info)
2199
0
      r_buffer = r->query_reply.c->tcp_req_info->spool_buffer;
2200
0
    mesh_send_reply(mstate, LDNS_RCODE_NOERROR, msg->rep,
2201
0
      r, r_buffer, prev, prev_buffer);
2202
0
    if(r->query_reply.c->tcp_req_info)
2203
0
      tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate);
2204
0
    prev = r;
2205
0
    prev_buffer = r_buffer;
2206
2207
    /* Account for each reply sent. */
2208
0
    mesh->ans_expired++;
2209
2210
0
  }
2211
0
  if(mstate->reply_list) {
2212
0
    mstate->reply_list = NULL;
2213
0
    if(!mstate->reply_list && !mstate->cb_list) {
2214
0
      log_assert(mesh->num_reply_states > 0);
2215
0
      mesh->num_reply_states--;
2216
0
      if(mstate->super_set.count == 0) {
2217
0
        mesh->num_detached_states++;
2218
0
      }
2219
0
    }
2220
0
  }
2221
0
  while((c = mstate->cb_list) != NULL) {
2222
    /* take this cb off the list; so that the list can be
2223
     * changed, eg. by adds from the callback routine */
2224
0
    if(!mstate->reply_list && mstate->cb_list && !c->next) {
2225
      /* was a reply state, not anymore */
2226
0
      log_assert(qstate->env->mesh->num_reply_states > 0);
2227
0
      qstate->env->mesh->num_reply_states--;
2228
0
    }
2229
0
    mstate->cb_list = c->next;
2230
0
    if(!mstate->reply_list && !mstate->cb_list &&
2231
0
      mstate->super_set.count == 0)
2232
0
      qstate->env->mesh->num_detached_states++;
2233
0
    mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
2234
0
  }
2235
0
}
2236
2237
int mesh_jostle_exceeded(struct mesh_area* mesh)
2238
0
{
2239
0
  if(mesh->all.count < mesh->max_reply_states)
2240
0
    return 0;
2241
0
  return 1;
2242
0
}