Coverage Report

Created: 2025-11-11 06:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/unbound/validator/val_neg.c
Line
Count
Source
1
/*
2
 * validator/val_neg.c - validator aggressive negative caching functions.
3
 *
4
 * Copyright (c) 2008, NLnet Labs. All rights reserved.
5
 *
6
 * This software is open source.
7
 * 
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 
12
 * Redistributions of source code must retain the above copyright notice,
13
 * this list of conditions and the following disclaimer.
14
 * 
15
 * Redistributions in binary form must reproduce the above copyright notice,
16
 * this list of conditions and the following disclaimer in the documentation
17
 * and/or other materials provided with the distribution.
18
 * 
19
 * Neither the name of the NLNET LABS nor the names of its contributors may
20
 * be used to endorse or promote products derived from this software without
21
 * specific prior written permission.
22
 * 
23
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29
 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
 */
35
36
/**
37
 * \file
38
 *
39
 * This file contains helper functions for the validator module.
40
 * The functions help with aggressive negative caching.
41
 * This creates new denials of existence, and proofs for absence of types
42
 * from cached NSEC records.
43
 */
44
#include "config.h"
45
#ifdef HAVE_OPENSSL_SSL_H
46
#include <openssl/ssl.h>
47
#define NSEC3_SHA_LEN SHA_DIGEST_LENGTH
48
#else
49
#define NSEC3_SHA_LEN 20
50
#endif
51
#include "validator/val_neg.h"
52
#include "validator/val_nsec.h"
53
#include "validator/val_nsec3.h"
54
#include "validator/val_utils.h"
55
#include "util/data/dname.h"
56
#include "util/data/msgreply.h"
57
#include "util/log.h"
58
#include "util/net_help.h"
59
#include "util/config_file.h"
60
#include "services/cache/rrset.h"
61
#include "services/cache/dns.h"
62
#include "sldns/rrdef.h"
63
#include "sldns/sbuffer.h"
64
65
int val_neg_data_compare(const void* a, const void* b)
66
0
{
67
0
  struct val_neg_data* x = (struct val_neg_data*)a;
68
0
  struct val_neg_data* y = (struct val_neg_data*)b;
69
0
  int m;
70
0
  return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
71
0
}
72
73
int val_neg_zone_compare(const void* a, const void* b)
74
0
{
75
0
  struct val_neg_zone* x = (struct val_neg_zone*)a;
76
0
  struct val_neg_zone* y = (struct val_neg_zone*)b;
77
0
  int m;
78
0
  if(x->dclass != y->dclass) {
79
0
    if(x->dclass < y->dclass)
80
0
      return -1;
81
0
    return 1;
82
0
  }
83
0
  return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
84
0
}
85
86
struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter)
87
0
{
88
0
  struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1, 
89
0
    sizeof(*neg));
90
0
  if(!neg) {
91
0
    log_err("Could not create neg cache: out of memory");
92
0
    return NULL;
93
0
  }
94
0
  neg->nsec3_max_iter = maxiter;
95
0
  neg->max = 1024*1024; /* 1 M is thousands of entries */
96
0
  if(cfg) neg->max = cfg->neg_cache_size;
97
0
  rbtree_init(&neg->tree, &val_neg_zone_compare);
98
0
  lock_basic_init(&neg->lock);
99
0
  lock_protect(&neg->lock, neg, sizeof(*neg));
100
0
  return neg;
101
0
}
102
103
size_t val_neg_get_mem(struct val_neg_cache* neg)
104
0
{
105
0
  size_t result;
106
0
  lock_basic_lock(&neg->lock);
107
0
  result = sizeof(*neg) + neg->use;
108
0
  lock_basic_unlock(&neg->lock);
109
0
  return result;
110
0
}
111
112
/** clear datas on cache deletion */
113
static void
114
neg_clear_datas(rbnode_type* n, void* ATTR_UNUSED(arg))
115
0
{
116
0
  struct val_neg_data* d = (struct val_neg_data*)n;
117
0
  free(d->name);
118
0
  free(d);
119
0
}
120
121
/** clear zones on cache deletion */
122
static void
123
neg_clear_zones(rbnode_type* n, void* ATTR_UNUSED(arg))
124
0
{
125
0
  struct val_neg_zone* z = (struct val_neg_zone*)n;
126
  /* delete all the rrset entries in the tree */
127
0
  traverse_postorder(&z->tree, &neg_clear_datas, NULL);
128
0
  free(z->nsec3_salt);
129
0
  free(z->name);
130
0
  free(z);
131
0
}
132
133
void neg_cache_delete(struct val_neg_cache* neg)
134
0
{
135
0
  if(!neg) return;
136
0
  lock_basic_destroy(&neg->lock);
137
  /* delete all the zones in the tree */
138
0
  traverse_postorder(&neg->tree, &neg_clear_zones, NULL);
139
0
  free(neg);
140
0
}
141
142
/**
143
 * Put data element at the front of the LRU list.
144
 * @param neg: negative cache with LRU start and end.
145
 * @param data: this data is fronted.
146
 */
147
static void neg_lru_front(struct val_neg_cache* neg, 
148
  struct val_neg_data* data)
149
0
{
150
0
  data->prev = NULL;
151
0
  data->next = neg->first;
152
0
  if(!neg->first)
153
0
    neg->last = data;
154
0
  else  neg->first->prev = data;
155
0
  neg->first = data;
156
0
}
157
158
/**
159
 * Remove data element from LRU list.
160
 * @param neg: negative cache with LRU start and end.
161
 * @param data: this data is removed from the list.
162
 */
163
static void neg_lru_remove(struct val_neg_cache* neg, 
164
  struct val_neg_data* data)
165
0
{
166
0
  if(data->prev)
167
0
    data->prev->next = data->next;
168
0
  else  neg->first = data->next;
169
0
  if(data->next)
170
0
    data->next->prev = data->prev;
171
0
  else  neg->last = data->prev;
172
0
}
173
174
/**
175
 * Touch LRU for data element, put it at the start of the LRU list.
176
 * @param neg: negative cache with LRU start and end.
177
 * @param data: this data is used.
178
 */
179
static void neg_lru_touch(struct val_neg_cache* neg, 
180
  struct val_neg_data* data)
181
0
{
182
0
  if(data == neg->first)
183
0
    return; /* nothing to do */
184
  /* remove from current lru position */
185
0
  neg_lru_remove(neg, data);
186
  /* add at front */
187
0
  neg_lru_front(neg, data);
188
0
}
189
190
/**
191
 * Delete a zone element from the negative cache.
192
 * May delete other zone elements to keep tree coherent, or
193
 * only mark the element as 'not in use'.
194
 * @param neg: negative cache.
195
 * @param z: zone element to delete.
196
 */
197
static void neg_delete_zone(struct val_neg_cache* neg, struct val_neg_zone* z)
198
0
{
199
0
  struct val_neg_zone* p, *np;
200
0
  if(!z) return;
201
0
  log_assert(z->in_use);
202
0
  log_assert(z->count > 0);
203
0
  z->in_use = 0;
204
205
  /* go up the tree and reduce counts */
206
0
  p = z;
207
0
  while(p) {
208
0
    log_assert(p->count > 0);
209
0
    p->count --;
210
0
    p = p->parent;
211
0
  }
212
213
  /* remove zones with zero count */
214
0
  p = z;
215
0
  while(p && p->count == 0) {
216
0
    np = p->parent;
217
0
    (void)rbtree_delete(&neg->tree, &p->node);
218
0
    neg->use -= p->len + sizeof(*p);
219
0
    free(p->nsec3_salt);
220
0
    free(p->name);
221
0
    free(p);
222
0
    p = np;
223
0
  }
224
0
}
225
  
226
void neg_delete_data(struct val_neg_cache* neg, struct val_neg_data* el)
227
0
{
228
0
  struct val_neg_zone* z;
229
0
  struct val_neg_data* p, *np;
230
0
  if(!el) return;
231
0
  z = el->zone;
232
0
  log_assert(el->in_use);
233
0
  log_assert(el->count > 0);
234
0
  el->in_use = 0;
235
236
  /* remove it from the lru list */
237
0
  neg_lru_remove(neg, el);
238
0
  log_assert(neg->first != el && neg->last != el);
239
  
240
  /* go up the tree and reduce counts */
241
0
  p = el;
242
0
  while(p) {
243
0
    log_assert(p->count > 0);
244
0
    p->count --;
245
0
    p = p->parent;
246
0
  }
247
248
  /* delete 0 count items from tree */
249
0
  p = el;
250
0
  while(p && p->count == 0) {
251
0
    np = p->parent;
252
0
    (void)rbtree_delete(&z->tree, &p->node);
253
0
    neg->use -= p->len + sizeof(*p);
254
0
    free(p->name);
255
0
    free(p);
256
0
    p = np;
257
0
  }
258
259
  /* check if the zone is now unused */
260
0
  if(z->tree.count == 0) {
261
0
    neg_delete_zone(neg, z);
262
0
  }
263
0
}
264
265
/**
266
 * Create more space in negative cache
267
 * The oldest elements are deleted until enough space is present.
268
 * Empty zones are deleted.
269
 * @param neg: negative cache.
270
 * @param need: how many bytes are needed.
271
 */
272
static void neg_make_space(struct val_neg_cache* neg, size_t need)
273
0
{
274
  /* delete elements until enough space or its empty */
275
0
  while(neg->last && neg->max < neg->use + need) {
276
0
    neg_delete_data(neg, neg->last);
277
0
  }
278
0
}
279
280
struct val_neg_zone* neg_find_zone(struct val_neg_cache* neg, 
281
  uint8_t* nm, size_t len, uint16_t dclass)
282
0
{
283
0
  struct val_neg_zone lookfor;
284
0
  struct val_neg_zone* result;
285
0
  lookfor.node.key = &lookfor;
286
0
  lookfor.name = nm;
287
0
  lookfor.len = len;
288
0
  lookfor.labs = dname_count_labels(lookfor.name);
289
0
  lookfor.dclass = dclass;
290
291
0
  result = (struct val_neg_zone*)
292
0
    rbtree_search(&neg->tree, lookfor.node.key);
293
0
  return result;
294
0
}
295
296
/**
297
 * Find the given data
298
 * @param zone: negative zone
299
 * @param nm: what to look for.
300
 * @param len: length of nm
301
 * @param labs: labels in nm
302
 * @return data or NULL if not found.
303
 */
304
static struct val_neg_data* neg_find_data(struct val_neg_zone* zone, 
305
  uint8_t* nm, size_t len, int labs)
306
0
{
307
0
  struct val_neg_data lookfor;
308
0
  struct val_neg_data* result;
309
0
  lookfor.node.key = &lookfor;
310
0
  lookfor.name = nm;
311
0
  lookfor.len = len;
312
0
  lookfor.labs = labs;
313
314
0
  result = (struct val_neg_data*)
315
0
    rbtree_search(&zone->tree, lookfor.node.key);
316
0
  return result;
317
0
}
318
319
/**
320
 * Calculate space needed for the data and all its parents
321
 * @param rep: NSEC entries.
322
 * @return size.
323
 */
324
static size_t calc_data_need(struct reply_info* rep)
325
0
{
326
0
  uint8_t* d;
327
0
  size_t i, len, res = 0;
328
329
0
  for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
330
0
    if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
331
0
      d = rep->rrsets[i]->rk.dname;
332
0
      len = rep->rrsets[i]->rk.dname_len;
333
0
      res = sizeof(struct val_neg_data) + len;
334
0
      while(!dname_is_root(d)) {
335
0
        log_assert(len > 1); /* not root label */
336
0
        dname_remove_label(&d, &len);
337
0
        res += sizeof(struct val_neg_data) + len;
338
0
      }
339
0
    }
340
0
  }
341
0
  return res;
342
0
}
343
344
/**
345
 * Calculate space needed for zone and all its parents
346
 * @param d: name of zone
347
 * @param len: length of name
348
 * @return size.
349
 */
350
static size_t calc_zone_need(uint8_t* d, size_t len)
351
0
{
352
0
  size_t res = sizeof(struct val_neg_zone) + len;
353
0
  while(!dname_is_root(d)) {
354
0
    log_assert(len > 1); /* not root label */
355
0
    dname_remove_label(&d, &len);
356
0
    res += sizeof(struct val_neg_zone) + len;
357
0
  }
358
0
  return res;
359
0
}
360
361
/**
362
 * Find closest existing parent zone of the given name.
363
 * @param neg: negative cache.
364
 * @param nm: name to look for
365
 * @param nm_len: length of nm
366
 * @param labs: labelcount of nm.
367
 * @param qclass: class.
368
 * @return the zone or NULL if none found.
369
 */
370
static struct val_neg_zone* neg_closest_zone_parent(struct val_neg_cache* neg,
371
  uint8_t* nm, size_t nm_len, int labs, uint16_t qclass)
372
0
{
373
0
  struct val_neg_zone key;
374
0
  struct val_neg_zone* result;
375
0
  rbnode_type* res = NULL;
376
0
  key.node.key = &key;
377
0
  key.name = nm;
378
0
  key.len = nm_len;
379
0
  key.labs = labs;
380
0
  key.dclass = qclass;
381
0
  if(rbtree_find_less_equal(&neg->tree, &key, &res)) {
382
    /* exact match */
383
0
    result = (struct val_neg_zone*)res;
384
0
  } else {
385
    /* smaller element (or no element) */
386
0
    int m;
387
0
    result = (struct val_neg_zone*)res;
388
0
    if(!result || result->dclass != qclass)
389
0
      return NULL;
390
    /* count number of labels matched */
391
0
    (void)dname_lab_cmp(result->name, result->labs, key.name,
392
0
      key.labs, &m);
393
0
    while(result) { /* go up until qname is subdomain of stub */
394
0
      if(result->labs <= m)
395
0
        break;
396
0
      result = result->parent;
397
0
    }
398
0
  }
399
0
  return result;
400
0
}
401
402
/**
403
 * Find closest existing parent data for the given name.
404
 * @param zone: to look in.
405
 * @param nm: name to look for
406
 * @param nm_len: length of nm
407
 * @param labs: labelcount of nm.
408
 * @return the data or NULL if none found.
409
 */
410
static struct val_neg_data* neg_closest_data_parent(
411
  struct val_neg_zone* zone, uint8_t* nm, size_t nm_len, int labs)
412
0
{
413
0
  struct val_neg_data key;
414
0
  struct val_neg_data* result;
415
0
  rbnode_type* res = NULL;
416
0
  key.node.key = &key;
417
0
  key.name = nm;
418
0
  key.len = nm_len;
419
0
  key.labs = labs;
420
0
  if(rbtree_find_less_equal(&zone->tree, &key, &res)) {
421
    /* exact match */
422
0
    result = (struct val_neg_data*)res;
423
0
  } else {
424
    /* smaller element (or no element) */
425
0
    int m;
426
0
    result = (struct val_neg_data*)res;
427
0
    if(!result)
428
0
      return NULL;
429
    /* count number of labels matched */
430
0
    (void)dname_lab_cmp(result->name, result->labs, key.name,
431
0
      key.labs, &m);
432
0
    while(result) { /* go up until qname is subdomain of stub */
433
0
      if(result->labs <= m)
434
0
        break;
435
0
      result = result->parent;
436
0
    }
437
0
  }
438
0
  return result;
439
0
}
440
441
/**
442
 * Create a single zone node
443
 * @param nm: name for zone (copied)
444
 * @param nm_len: length of name
445
 * @param labs: labels in name.
446
 * @param dclass: class of zone, host order.
447
 * @return new zone or NULL on failure
448
 */
449
static struct val_neg_zone* neg_setup_zone_node(
450
  uint8_t* nm, size_t nm_len, int labs, uint16_t dclass)
451
0
{
452
0
  struct val_neg_zone* zone = 
453
0
    (struct val_neg_zone*)calloc(1, sizeof(*zone));
454
0
  if(!zone) {
455
0
    return NULL;
456
0
  }
457
0
  zone->node.key = zone;
458
0
  zone->name = memdup(nm, nm_len);
459
0
  if(!zone->name) {
460
0
    free(zone);
461
0
    return NULL;
462
0
  }
463
0
  zone->len = nm_len;
464
0
  zone->labs = labs;
465
0
  zone->dclass = dclass;
466
467
0
  rbtree_init(&zone->tree, &val_neg_data_compare);
468
0
  return zone;
469
0
}
470
471
/**
472
 * Create a linked list of parent zones, starting at longname ending on
473
 * the parent (can be NULL, creates to the root).
474
 * @param nm: name for lowest in chain
475
 * @param nm_len: length of name
476
 * @param labs: labels in name.
477
 * @param dclass: class of zone.
478
 * @param parent: NULL for to root, else so it fits under here.
479
 * @return zone; a chain of zones and their parents up to the parent.
480
 *    or NULL on malloc failure
481
 */
482
static struct val_neg_zone* neg_zone_chain(
483
  uint8_t* nm, size_t nm_len, int labs, uint16_t dclass,
484
  struct val_neg_zone* parent)
485
0
{
486
0
  int i;
487
0
  int tolabs = parent?parent->labs:0;
488
0
  struct val_neg_zone* zone, *prev = NULL, *first = NULL;
489
490
  /* create the new subtree, i is labelcount of current creation */
491
  /* this creates a 'first' to z->parent=NULL list of zones */
492
0
  for(i=labs; i!=tolabs; i--) {
493
    /* create new item */
494
0
    zone = neg_setup_zone_node(nm, nm_len, i, dclass);
495
0
    if(!zone) {
496
      /* need to delete other allocations in this routine!*/
497
0
      struct val_neg_zone* p=first, *np;
498
0
      while(p) {
499
0
        np = p->parent;
500
0
        free(p->name);
501
0
        free(p);
502
0
        p = np;
503
0
      }
504
0
      return NULL;
505
0
    }
506
0
    if(i == labs) {
507
0
      first = zone;
508
0
    } else {
509
0
      prev->parent = zone;
510
0
    }
511
    /* prepare for next name */
512
0
    prev = zone;
513
0
    dname_remove_label(&nm, &nm_len);
514
0
  }
515
0
  return first;
516
0
}  
517
518
void val_neg_zone_take_inuse(struct val_neg_zone* zone)
519
0
{
520
0
  if(!zone->in_use) {
521
0
    struct val_neg_zone* p;
522
0
    zone->in_use = 1;
523
    /* increase usage count of all parents */
524
0
    for(p=zone; p; p = p->parent) {
525
0
      p->count++;
526
0
    }
527
0
  }
528
0
}
529
530
struct val_neg_zone* neg_create_zone(struct val_neg_cache* neg,
531
  uint8_t* nm, size_t nm_len, uint16_t dclass)
532
0
{
533
0
  struct val_neg_zone* zone;
534
0
  struct val_neg_zone* parent;
535
0
  struct val_neg_zone* p, *np;
536
0
  int labs = dname_count_labels(nm);
537
538
  /* find closest enclosing parent zone that (still) exists */
539
0
  parent = neg_closest_zone_parent(neg, nm, nm_len, labs, dclass);
540
0
  if(parent && query_dname_compare(parent->name, nm) == 0)
541
0
    return parent; /* already exists, weird */
542
  /* if parent exists, it is in use */
543
0
  log_assert(!parent || parent->count > 0);
544
0
  zone = neg_zone_chain(nm, nm_len, labs, dclass, parent);
545
0
  if(!zone) {
546
0
    return NULL;
547
0
  }
548
549
  /* insert the list of zones into the tree */
550
0
  p = zone;
551
0
  while(p) {
552
0
    np = p->parent;
553
    /* mem use */
554
0
    neg->use += sizeof(struct val_neg_zone) + p->len;
555
    /* insert in tree */
556
0
    (void)rbtree_insert(&neg->tree, &p->node);
557
    /* last one needs proper parent pointer */
558
0
    if(np == NULL)
559
0
      p->parent = parent;
560
0
    p = np;
561
0
  }
562
0
  return zone;
563
0
}
564
565
/** find zone name of message, returns the SOA record */
566
static struct ub_packed_rrset_key* reply_find_soa(struct reply_info* rep)
567
0
{
568
0
  size_t i;
569
0
  for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
570
0
    if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_SOA)
571
0
      return rep->rrsets[i];
572
0
  }
573
0
  return NULL;
574
0
}
575
576
/** see if the reply has NSEC records worthy of caching */
577
static int reply_has_nsec(struct reply_info* rep)
578
0
{
579
0
  size_t i;
580
0
  struct packed_rrset_data* d;
581
0
  if(rep->security != sec_status_secure)
582
0
    return 0;
583
0
  for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
584
0
    if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
585
0
      d = (struct packed_rrset_data*)rep->rrsets[i]->
586
0
        entry.data;
587
0
      if(d->security == sec_status_secure)
588
0
        return 1;
589
0
    }
590
0
  }
591
0
  return 0;
592
0
}
593
594
595
/**
596
 * Create single node of data element.
597
 * @param nm: name (copied)
598
 * @param nm_len: length of name
599
 * @param labs: labels in name.
600
 * @return element with name nm, or NULL malloc failure.
601
 */
602
static struct val_neg_data* neg_setup_data_node(
603
  uint8_t* nm, size_t nm_len, int labs)
604
0
{
605
0
  struct val_neg_data* el;
606
0
  el = (struct val_neg_data*)calloc(1, sizeof(*el));
607
0
  if(!el) {
608
0
    return NULL;
609
0
  }
610
0
  el->node.key = el;
611
0
  el->name = memdup(nm, nm_len);
612
0
  if(!el->name) {
613
0
    free(el);
614
0
    return NULL;
615
0
  }
616
0
  el->len = nm_len;
617
0
  el->labs = labs;
618
0
  return el;
619
0
}
620
621
/**
622
 * Create chain of data element and parents
623
 * @param nm: name
624
 * @param nm_len: length of name
625
 * @param labs: labels in name.
626
 * @param parent: up to where to make, if NULL up to root label.
627
 * @return lowest element with name nm, or NULL malloc failure.
628
 */
629
static struct val_neg_data* neg_data_chain(
630
  uint8_t* nm, size_t nm_len, int labs, struct val_neg_data* parent)
631
0
{
632
0
  int i;
633
0
  int tolabs = parent?parent->labs:0;
634
0
  struct val_neg_data* el, *first = NULL, *prev = NULL;
635
636
  /* create the new subtree, i is labelcount of current creation */
637
  /* this creates a 'first' to z->parent=NULL list of zones */
638
0
  for(i=labs; i!=tolabs; i--) {
639
    /* create new item */
640
0
    el = neg_setup_data_node(nm, nm_len, i);
641
0
    if(!el) {
642
      /* need to delete other allocations in this routine!*/
643
0
      struct val_neg_data* p = first, *np;
644
0
      while(p) {
645
0
        np = p->parent;
646
0
        free(p->name);
647
0
        free(p);
648
0
        p = np;
649
0
      }
650
0
      return NULL;
651
0
    }
652
0
    if(i == labs) {
653
0
      first = el;
654
0
    } else {
655
0
      prev->parent = el;
656
0
    }
657
658
    /* prepare for next name */
659
0
    prev = el;
660
0
    dname_remove_label(&nm, &nm_len);
661
0
  }
662
0
  return first;
663
0
}
664
665
/**
666
 * Remove NSEC records between start and end points.
667
 * By walking the tree, the tree is sorted canonically.
668
 * @param neg: negative cache.
669
 * @param zone: the zone
670
 * @param el: element to start walking at.
671
 * @param nsec: the nsec record with the end point
672
 */
673
static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone, 
674
  struct val_neg_data* el, struct ub_packed_rrset_key* nsec)
675
0
{
676
0
  struct packed_rrset_data* d = (struct packed_rrset_data*)nsec->
677
0
    entry.data;
678
0
  uint8_t* end;
679
0
  size_t end_len;
680
0
  int end_labs, m;
681
0
  rbnode_type* walk, *next;
682
0
  struct val_neg_data* cur;
683
0
  uint8_t buf[257];
684
  /* get endpoint */
685
0
  if(!d || d->count == 0 || d->rr_len[0] < 2+1)
686
0
    return;
687
0
  if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC) {
688
0
    end = d->rr_data[0]+2;
689
0
    end_len = dname_valid(end, d->rr_len[0]-2);
690
0
    end_labs = dname_count_labels(end);
691
0
  } else {
692
    /* NSEC3 */
693
0
    if(!nsec3_get_nextowner_b32(nsec, 0, buf, sizeof(buf)))
694
0
      return;
695
0
    end = buf;
696
0
    end_labs = dname_count_size_labels(end, &end_len);
697
0
  }
698
699
  /* sanity check, both owner and end must be below the zone apex */
700
0
  if(!dname_subdomain_c(el->name, zone->name) || 
701
0
    !dname_subdomain_c(end, zone->name))
702
0
    return;
703
704
  /* detect end of zone NSEC ; wipe until the end of zone */
705
0
  if(query_dname_compare(end, zone->name) == 0) {
706
0
    end = NULL;
707
0
  }
708
709
0
  walk = rbtree_next(&el->node);
710
0
  while(walk && walk != RBTREE_NULL) {
711
0
    cur = (struct val_neg_data*)walk;
712
    /* sanity check: must be larger than start */
713
0
    if(dname_canon_lab_cmp(cur->name, cur->labs, 
714
0
      el->name, el->labs, &m) <= 0) {
715
      /* r == 0 skip original record. */
716
      /* r < 0  too small! */
717
0
      walk = rbtree_next(walk);
718
0
      continue;
719
0
    }
720
    /* stop at endpoint, also data at empty nonterminals must be
721
     * removed (no NSECs there) so everything between 
722
     * start and end */
723
0
    if(end && dname_canon_lab_cmp(cur->name, cur->labs,
724
0
      end, end_labs, &m) >= 0) {
725
0
      break;
726
0
    }
727
    /* this element has to be deleted, but we cannot do it
728
     * now, because we are walking the tree still ... */
729
    /* get the next element: */
730
0
    next = rbtree_next(walk);
731
    /* now delete the original element, this may trigger
732
     * rbtree rebalances, but really, the next element is
733
     * the one we need.
734
     * But it may trigger delete of other data and the
735
     * entire zone. However, if that happens, this is done
736
     * by deleting the *parents* of the element for deletion,
737
     * and maybe also the entire zone if it is empty. 
738
     * But parents are smaller in canonical compare, thus,
739
     * if a larger element exists, then it is not a parent,
740
     * it cannot get deleted, the zone cannot get empty.
741
     * If the next==NULL, then zone can be empty. */
742
0
    if(cur->in_use)
743
0
      neg_delete_data(neg, cur);
744
0
    walk = next;
745
0
  }
746
0
}
747
748
void neg_insert_data(struct val_neg_cache* neg, 
749
  struct val_neg_zone* zone, struct ub_packed_rrset_key* nsec)
750
0
{
751
0
  struct packed_rrset_data* d;
752
0
  struct val_neg_data* parent;
753
0
  struct val_neg_data* el;
754
0
  uint8_t* nm = nsec->rk.dname;
755
0
  size_t nm_len = nsec->rk.dname_len;
756
0
  int labs = dname_count_labels(nsec->rk.dname);
757
758
0
  d = (struct packed_rrset_data*)nsec->entry.data;
759
0
  if( !(d->security == sec_status_secure ||
760
0
    (d->security == sec_status_unchecked && d->rrsig_count > 0)))
761
0
    return;
762
0
  log_nametypeclass(VERB_ALGO, "negcache rr", 
763
0
    nsec->rk.dname, ntohs(nsec->rk.type), 
764
0
    ntohs(nsec->rk.rrset_class));
765
766
  /* find closest enclosing parent data that (still) exists */
767
0
  parent = neg_closest_data_parent(zone, nm, nm_len, labs);
768
0
  if(parent && query_dname_compare(parent->name, nm) == 0) {
769
    /* perfect match already exists */
770
0
    log_assert(parent->count > 0);
771
0
    el = parent;
772
0
  } else { 
773
0
    struct val_neg_data* p, *np;
774
775
    /* create subtree for perfect match */
776
    /* if parent exists, it is in use */
777
0
    log_assert(!parent || parent->count > 0);
778
779
0
    el = neg_data_chain(nm, nm_len, labs, parent);
780
0
    if(!el) {
781
0
      log_err("out of memory inserting NSEC negative cache");
782
0
      return;
783
0
    }
784
0
    el->in_use = 0; /* set on below */
785
786
    /* insert the list of zones into the tree */
787
0
    p = el;
788
0
    while(p) {
789
0
      np = p->parent;
790
      /* mem use */
791
0
      neg->use += sizeof(struct val_neg_data) + p->len;
792
      /* insert in tree */
793
0
      p->zone = zone;
794
0
      (void)rbtree_insert(&zone->tree, &p->node);
795
      /* last one needs proper parent pointer */
796
0
      if(np == NULL)
797
0
        p->parent = parent;
798
0
      p = np;
799
0
    }
800
0
  }
801
802
0
  if(!el->in_use) {
803
0
    struct val_neg_data* p;
804
805
0
    el->in_use = 1;
806
    /* increase usage count of all parents */
807
0
    for(p=el; p; p = p->parent) {
808
0
      p->count++;
809
0
    }
810
811
0
    neg_lru_front(neg, el);
812
0
  } else {
813
    /* in use, bring to front, lru */
814
0
    neg_lru_touch(neg, el);
815
0
  }
816
817
  /* if nsec3 store last used parameters */
818
0
  if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC3) {
819
0
    int h;
820
0
    uint8_t* s;
821
0
    size_t slen, it;
822
0
    if(nsec3_get_params(nsec, 0, &h, &it, &s, &slen) &&
823
0
      it <= neg->nsec3_max_iter &&
824
0
      (h != zone->nsec3_hash || it != zone->nsec3_iter ||
825
0
      slen != zone->nsec3_saltlen || 
826
0
      (slen != 0 && zone->nsec3_salt && s
827
0
        && memcmp(zone->nsec3_salt, s, slen) != 0))) {
828
829
0
      if(slen > 0) {
830
0
        uint8_t* sa = memdup(s, slen);
831
0
        if(sa) {
832
0
          free(zone->nsec3_salt);
833
0
          zone->nsec3_salt = sa;
834
0
          zone->nsec3_saltlen = slen;
835
0
          zone->nsec3_iter = it;
836
0
          zone->nsec3_hash = h;
837
0
        }
838
0
      } else {
839
0
        free(zone->nsec3_salt);
840
0
        zone->nsec3_salt = NULL;
841
0
        zone->nsec3_saltlen = 0;
842
0
        zone->nsec3_iter = it;
843
0
        zone->nsec3_hash = h;
844
0
      }
845
0
    }
846
0
  }
847
848
  /* wipe out the cache items between NSEC start and end */
849
0
  wipeout(neg, zone, el, nsec);
850
0
}
851
852
/** see if the reply has signed NSEC records and return the signer */
853
static uint8_t* reply_nsec_signer(struct reply_info* rep, size_t* signer_len,
854
  uint16_t* dclass)
855
0
{
856
0
  size_t i;
857
0
  struct packed_rrset_data* d;
858
0
  uint8_t* s;
859
0
  for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
860
0
    if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC ||
861
0
      ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC3) {
862
0
      d = (struct packed_rrset_data*)rep->rrsets[i]->
863
0
        entry.data;
864
      /* return first signer name of first NSEC */
865
0
      if(d->rrsig_count != 0) {
866
0
        val_find_rrset_signer(rep->rrsets[i],
867
0
          &s, signer_len);
868
0
        if(s && *signer_len) {
869
0
          *dclass = ntohs(rep->rrsets[i]->
870
0
            rk.rrset_class);
871
0
          return s;
872
0
        }
873
0
      }
874
0
    }
875
0
  }
876
0
  return 0;
877
0
}
878
879
void val_neg_addreply(struct val_neg_cache* neg, struct reply_info* rep)
880
0
{
881
0
  size_t i, need;
882
0
  struct ub_packed_rrset_key* soa;
883
0
  uint8_t* dname = NULL;
884
0
  size_t dname_len;
885
0
  uint16_t rrset_class;
886
0
  struct val_neg_zone* zone;
887
  /* see if secure nsecs inside */
888
0
  if(!reply_has_nsec(rep))
889
0
    return;
890
  /* find the zone name in message */
891
0
  if((soa = reply_find_soa(rep))) {
892
0
    dname = soa->rk.dname;
893
0
    dname_len = soa->rk.dname_len;
894
0
    rrset_class = ntohs(soa->rk.rrset_class);
895
0
  }
896
0
  else {
897
    /* No SOA in positive (wildcard) answer. Use signer from the 
898
     * validated answer RRsets' signature. */
899
0
    if(!(dname = reply_nsec_signer(rep, &dname_len, &rrset_class)))
900
0
      return;
901
0
  }
902
903
0
  log_nametypeclass(VERB_ALGO, "negcache insert for zone",
904
0
    dname, LDNS_RR_TYPE_SOA, rrset_class);
905
  
906
  /* ask for enough space to store all of it */
907
0
  need = calc_data_need(rep) + 
908
0
    calc_zone_need(dname, dname_len);
909
0
  lock_basic_lock(&neg->lock);
910
0
  neg_make_space(neg, need);
911
912
  /* find or create the zone entry */
913
0
  zone = neg_find_zone(neg, dname, dname_len, rrset_class);
914
0
  if(!zone) {
915
0
    if(!(zone = neg_create_zone(neg, dname, dname_len,
916
0
      rrset_class))) {
917
0
      lock_basic_unlock(&neg->lock);
918
0
      log_err("out of memory adding negative zone");
919
0
      return;
920
0
    }
921
0
  }
922
0
  val_neg_zone_take_inuse(zone);
923
924
  /* insert the NSECs */
925
0
  for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
926
0
    if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC)
927
0
      continue;
928
0
    if(!dname_subdomain_c(rep->rrsets[i]->rk.dname, 
929
0
      zone->name)) continue;
930
    /* insert NSEC into this zone's tree */
931
0
    neg_insert_data(neg, zone, rep->rrsets[i]);
932
0
  }
933
0
  if(zone->tree.count == 0) {
934
    /* remove empty zone if inserts failed */
935
0
    neg_delete_zone(neg, zone);
936
0
  }
937
0
  lock_basic_unlock(&neg->lock);
938
0
}
939
940
/**
941
 * Lookup closest data record. For NSEC denial.
942
 * @param zone: zone to look in
943
 * @param qname: name to look for.
944
 * @param len: length of name
945
 * @param labs: labels in name
946
 * @param data: data element, exact or smaller or NULL
947
 * @return true if exact match.
948
 */
949
static int neg_closest_data(struct val_neg_zone* zone,
950
  uint8_t* qname, size_t len, int labs, struct val_neg_data** data)
951
0
{
952
0
  struct val_neg_data key;
953
0
  rbnode_type* r;
954
0
  key.node.key = &key;
955
0
  key.name = qname;
956
0
  key.len = len;
957
0
  key.labs = labs;
958
0
  if(rbtree_find_less_equal(&zone->tree, &key, &r)) {
959
    /* exact match */
960
0
    *data = (struct val_neg_data*)r;
961
0
    return 1;
962
0
  } else {
963
    /* smaller match */
964
0
    *data = (struct val_neg_data*)r;
965
0
    return 0;
966
0
  }
967
0
}
968
969
void val_neg_addreferral(struct val_neg_cache* neg, struct reply_info* rep,
970
  uint8_t* zone_name)
971
0
{
972
0
  size_t i, need;
973
0
  uint8_t* signer;
974
0
  size_t signer_len;
975
0
  uint16_t dclass;
976
0
  struct val_neg_zone* zone;
977
  /* no SOA in this message, find RRSIG over NSEC's signer name.
978
   * note the NSEC records are maybe not validated yet */
979
0
  signer = reply_nsec_signer(rep, &signer_len, &dclass);
980
0
  if(!signer) 
981
0
    return;
982
0
  if(!dname_subdomain_c(signer, zone_name)) {
983
    /* the signer is not in the bailiwick, throw it out */
984
0
    return;
985
0
  }
986
987
0
  log_nametypeclass(VERB_ALGO, "negcache insert referral ",
988
0
    signer, LDNS_RR_TYPE_NS, dclass);
989
  
990
  /* ask for enough space to store all of it */
991
0
  need = calc_data_need(rep) + calc_zone_need(signer, signer_len);
992
0
  lock_basic_lock(&neg->lock);
993
0
  neg_make_space(neg, need);
994
995
  /* find or create the zone entry */
996
0
  zone = neg_find_zone(neg, signer, signer_len, dclass);
997
0
  if(!zone) {
998
0
    if(!(zone = neg_create_zone(neg, signer, signer_len, 
999
0
      dclass))) {
1000
0
      lock_basic_unlock(&neg->lock);
1001
0
      log_err("out of memory adding negative zone");
1002
0
      return;
1003
0
    }
1004
0
  }
1005
0
  val_neg_zone_take_inuse(zone);
1006
1007
  /* insert the NSECs */
1008
0
  for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
1009
0
    if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC &&
1010
0
      ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC3)
1011
0
      continue;
1012
0
    if(!dname_subdomain_c(rep->rrsets[i]->rk.dname, 
1013
0
      zone->name)) continue;
1014
    /* insert NSEC into this zone's tree */
1015
0
    neg_insert_data(neg, zone, rep->rrsets[i]);
1016
0
  }
1017
0
  if(zone->tree.count == 0) {
1018
    /* remove empty zone if inserts failed */
1019
0
    neg_delete_zone(neg, zone);
1020
0
  }
1021
0
  lock_basic_unlock(&neg->lock);
1022
0
}
1023
1024
/**
1025
 * Check that an NSEC3 rrset does not have a type set.
1026
 * None of the nsec3s in a hash-collision are allowed to have the type.
1027
 * (since we do not know which one is the nsec3 looked at, flags, ..., we
1028
 * ignore the cached item and let it bypass negative caching).
1029
 * @param k: the nsec3 rrset to check.
1030
 * @param t: type to check
1031
 * @return true if no RRs have the type.
1032
 */
1033
static int nsec3_no_type(struct ub_packed_rrset_key* k, uint16_t t)
1034
0
{
1035
0
  int count = (int)((struct packed_rrset_data*)k->entry.data)->count;
1036
0
  int i;
1037
0
  for(i=0; i<count; i++)
1038
0
    if(nsec3_has_type(k, i, t))
1039
0
      return 0;
1040
0
  return 1;
1041
0
}
1042
1043
/**
1044
 * See if rrset exists in rrset cache.
1045
 * If it does, the bit is checked, and if not expired, it is returned
1046
 * allocated in region.
1047
 * @param rrset_cache: rrset cache
1048
 * @param qname: to lookup rrset name
1049
 * @param qname_len: length of qname.
1050
 * @param qtype: type of rrset to lookup, host order
1051
 * @param qclass: class of rrset to lookup, host order
1052
 * @param flags: flags for rrset to lookup
1053
 * @param region: where to alloc result
1054
 * @param checkbit: if true, a bit in the nsec typemap is checked for absence.
1055
 * @param checktype: which bit to check
1056
 * @param now: to check ttl against
1057
 * @return rrset or NULL
1058
 */
1059
static struct ub_packed_rrset_key*
1060
grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len,
1061
  uint16_t qtype, uint16_t qclass, uint32_t flags, 
1062
  struct regional* region, int checkbit, uint16_t checktype, 
1063
  time_t now)
1064
0
{
1065
0
  struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache,
1066
0
    qname, qname_len, qtype, qclass, flags, now, 0);
1067
0
  struct packed_rrset_data* d;
1068
0
  if(!k) return NULL;
1069
0
  d = k->entry.data;
1070
  /* only secure or unchecked records that have signatures. */
1071
0
  if( ! ( d->security == sec_status_secure ||
1072
0
    (d->security == sec_status_unchecked &&
1073
0
    d->rrsig_count > 0) ) ) {
1074
0
    lock_rw_unlock(&k->entry.lock);
1075
0
    return NULL;
1076
0
  }
1077
  /* check if checktype is absent */
1078
0
  if(checkbit && (
1079
0
    (qtype == LDNS_RR_TYPE_NSEC && nsec_has_type(k, checktype)) ||
1080
0
    (qtype == LDNS_RR_TYPE_NSEC3 && !nsec3_no_type(k, checktype))
1081
0
    )) {
1082
0
    lock_rw_unlock(&k->entry.lock);
1083
0
    return NULL;
1084
0
  }
1085
  /* looks OK! copy to region and return it */
1086
0
  r = packed_rrset_copy_region(k, region, now);
1087
  /* if it failed, we return the NULL */
1088
0
  lock_rw_unlock(&k->entry.lock);
1089
0
  return r;
1090
0
}
1091
1092
/**
1093
 * Get best NSEC record for qname. Might be matching, covering or totally
1094
 * useless.
1095
 * @param neg_cache: neg cache
1096
 * @param qname: to lookup rrset name
1097
 * @param qname_len: length of qname.
1098
 * @param qclass: class of rrset to lookup, host order
1099
 * @param rrset_cache: rrset cache
1100
 * @param now: to check ttl against
1101
 * @param region: where to alloc result
1102
 * @return rrset or NULL
1103
 */
1104
static struct ub_packed_rrset_key*
1105
neg_find_nsec(struct val_neg_cache* neg_cache, uint8_t* qname, size_t qname_len,
1106
  uint16_t qclass, struct rrset_cache* rrset_cache, time_t now,
1107
  struct regional* region)
1108
0
{
1109
0
  int labs;
1110
0
  uint32_t flags;
1111
0
  struct val_neg_zone* zone;
1112
0
  struct val_neg_data* data;
1113
0
  struct ub_packed_rrset_key* nsec;
1114
1115
0
  labs = dname_count_labels(qname);
1116
0
  lock_basic_lock(&neg_cache->lock);
1117
0
  zone = neg_closest_zone_parent(neg_cache, qname, qname_len, labs,
1118
0
    qclass);
1119
0
  while(zone && !zone->in_use)
1120
0
    zone = zone->parent;
1121
0
  if(!zone) {
1122
0
    lock_basic_unlock(&neg_cache->lock);
1123
0
    return NULL;
1124
0
  }
1125
1126
  /* NSEC only for now */
1127
0
  if(zone->nsec3_hash) {
1128
0
    lock_basic_unlock(&neg_cache->lock);
1129
0
    return NULL;
1130
0
  }
1131
1132
  /* ignore return value, don't care if it is an exact or smaller match */
1133
0
  (void)neg_closest_data(zone, qname, qname_len, labs, &data);
1134
0
  if(!data) {
1135
0
    lock_basic_unlock(&neg_cache->lock);
1136
0
    return NULL;
1137
0
  }
1138
1139
  /* ENT nodes are not in use, try the previous node. If the previous node
1140
   * is not in use, we don't have an useful NSEC and give up. */
1141
0
  if(!data->in_use) {
1142
0
    data = (struct val_neg_data*)rbtree_previous((rbnode_type*)data);
1143
0
    if((rbnode_type*)data == RBTREE_NULL || !data->in_use) {
1144
0
      lock_basic_unlock(&neg_cache->lock);
1145
0
      return NULL;
1146
0
    }
1147
0
  }
1148
1149
0
  flags = 0;
1150
0
  if(query_dname_compare(data->name, zone->name) == 0)
1151
0
    flags = PACKED_RRSET_NSEC_AT_APEX;
1152
1153
0
  nsec = grab_nsec(rrset_cache, data->name, data->len, LDNS_RR_TYPE_NSEC,
1154
0
    zone->dclass, flags, region, 0, 0, now);
1155
0
  lock_basic_unlock(&neg_cache->lock);
1156
0
  return nsec;
1157
0
}
1158
1159
/** find nsec3 closest encloser in neg cache */
1160
static struct val_neg_data*
1161
neg_find_nsec3_ce(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1162
    int qlabs, sldns_buffer* buf, uint8_t* hashnc, size_t* nclen)
1163
0
{
1164
0
  struct val_neg_data* data;
1165
0
  uint8_t hashce[NSEC3_SHA_LEN];
1166
0
  uint8_t b32[257];
1167
0
  size_t celen, b32len;
1168
1169
0
  *nclen = 0;
1170
0
  while(qlabs > 0) {
1171
    /* hash */
1172
0
    if(!(celen=nsec3_get_hashed(buf, qname, qname_len, 
1173
0
      zone->nsec3_hash, zone->nsec3_iter, zone->nsec3_salt, 
1174
0
      zone->nsec3_saltlen, hashce, sizeof(hashce))))
1175
0
      return NULL;
1176
0
    if(!(b32len=nsec3_hash_to_b32(hashce, celen, zone->name,
1177
0
      zone->len, b32, sizeof(b32))))
1178
0
      return NULL;
1179
1180
    /* lookup (exact match only) */
1181
0
    data = neg_find_data(zone, b32, b32len, zone->labs+1);
1182
0
    if(data && data->in_use) {
1183
      /* found ce match! */
1184
0
      return data;
1185
0
    }
1186
1187
0
    *nclen = celen;
1188
0
    memmove(hashnc, hashce, celen);
1189
0
    dname_remove_label(&qname, &qname_len);
1190
0
    qlabs --;
1191
0
  }
1192
0
  return NULL;
1193
0
}
1194
1195
/** check nsec3 parameters on nsec3 rrset with current zone values */
1196
static int
1197
neg_params_ok(struct val_neg_zone* zone, struct ub_packed_rrset_key* rrset)
1198
0
{
1199
0
  int h;
1200
0
  uint8_t* s;
1201
0
  size_t slen, it;
1202
0
  if(!nsec3_get_params(rrset, 0, &h, &it, &s, &slen))
1203
0
    return 0;
1204
0
  return (h == zone->nsec3_hash && it == zone->nsec3_iter &&
1205
0
    slen == zone->nsec3_saltlen &&
1206
0
    (slen != 0 && zone->nsec3_salt && s
1207
0
      && memcmp(zone->nsec3_salt, s, slen) == 0));
1208
0
}
1209
1210
/** get next closer for nsec3 proof */
1211
static struct ub_packed_rrset_key*
1212
neg_nsec3_getnc(struct val_neg_zone* zone, uint8_t* hashnc, size_t nclen,
1213
  struct rrset_cache* rrset_cache, struct regional* region, 
1214
  time_t now, uint8_t* b32, size_t maxb32)
1215
0
{
1216
0
  struct ub_packed_rrset_key* nc_rrset;
1217
0
  struct val_neg_data* data;
1218
0
  size_t b32len;
1219
1220
0
  if(!(b32len=nsec3_hash_to_b32(hashnc, nclen, zone->name,
1221
0
    zone->len, b32, maxb32)))
1222
0
    return NULL;
1223
0
  (void)neg_closest_data(zone, b32, b32len, zone->labs+1, &data);
1224
0
  if(!data && zone->tree.count != 0) {
1225
    /* could be before the first entry ; return the last
1226
     * entry (possibly the rollover nsec3 at end) */
1227
0
    data = (struct val_neg_data*)rbtree_last(&zone->tree);
1228
0
  }
1229
0
  while(data && !data->in_use)
1230
0
    data = data->parent;
1231
0
  if(!data)
1232
0
    return NULL;
1233
  /* got a data element in tree, grab it */
1234
0
  nc_rrset = grab_nsec(rrset_cache, data->name, data->len, 
1235
0
    LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 0, 0, now);
1236
0
  if(!nc_rrset)
1237
0
    return NULL;
1238
0
  if(!neg_params_ok(zone, nc_rrset))
1239
0
    return NULL;
1240
0
  return nc_rrset;
1241
0
}
1242
1243
/** neg cache nsec3 proof procedure*/
1244
static struct dns_msg*
1245
neg_nsec3_proof_ds(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1246
    int qlabs, sldns_buffer* buf, struct rrset_cache* rrset_cache,
1247
    struct regional* region, time_t now, uint8_t* topname)
1248
0
{
1249
0
  struct dns_msg* msg;
1250
0
  struct val_neg_data* data;
1251
0
  uint8_t hashnc[NSEC3_SHA_LEN];
1252
0
  size_t nclen;
1253
0
  struct ub_packed_rrset_key* ce_rrset, *nc_rrset;
1254
0
  struct nsec3_cached_hash c;
1255
0
  uint8_t nc_b32[257];
1256
1257
  /* for NSEC3 ; determine the closest encloser for which we
1258
   * can find an exact match. Remember the hashed lower name,
1259
   * since that is the one we need a closest match for. 
1260
   * If we find a match straight away, then it becomes NODATA.
1261
   * Otherwise, NXDOMAIN or if OPTOUT, an insecure delegation.
1262
   * Also check that parameters are the same on closest encloser
1263
   * and on closest match.
1264
   */
1265
0
  if(!zone->nsec3_hash) 
1266
0
    return NULL; /* not nsec3 zone */
1267
1268
0
  if(!(data=neg_find_nsec3_ce(zone, qname, qname_len, qlabs, buf,
1269
0
    hashnc, &nclen))) {
1270
0
    return NULL;
1271
0
  }
1272
1273
  /* grab the ce rrset */
1274
0
  ce_rrset = grab_nsec(rrset_cache, data->name, data->len, 
1275
0
    LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 1, 
1276
0
    LDNS_RR_TYPE_DS, now);
1277
0
  if(!ce_rrset)
1278
0
    return NULL;
1279
0
  if(!neg_params_ok(zone, ce_rrset))
1280
0
    return NULL;
1281
1282
0
  if(nclen == 0) {
1283
    /* exact match, just check the type bits */
1284
    /* need: -SOA, -DS, +NS */
1285
0
    if(nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_SOA) ||
1286
0
      nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_DS) ||
1287
0
      !nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_NS))
1288
0
      return NULL;
1289
0
    if(!(msg = dns_msg_create(qname, qname_len, 
1290
0
      LDNS_RR_TYPE_DS, zone->dclass, region, 1))) 
1291
0
      return NULL;
1292
    /* TTL reduced in grab_nsec */
1293
0
    if(!dns_msg_authadd(msg, region, ce_rrset, 0)) 
1294
0
      return NULL;
1295
0
    return msg;
1296
0
  }
1297
1298
  /* optout is not allowed without knowing the trust-anchor in use,
1299
   * otherwise the optout could spoof away that anchor */
1300
0
  if(!topname)
1301
0
    return NULL;
1302
1303
  /* if there is no exact match, it must be in an optout span
1304
   * (an existing DS implies an NSEC3 must exist) */
1305
0
  nc_rrset = neg_nsec3_getnc(zone, hashnc, nclen, rrset_cache, 
1306
0
    region, now, nc_b32, sizeof(nc_b32));
1307
0
  if(!nc_rrset) 
1308
0
    return NULL;
1309
0
  if(!neg_params_ok(zone, nc_rrset))
1310
0
    return NULL;
1311
0
  if(!nsec3_has_optout(nc_rrset, 0))
1312
0
    return NULL;
1313
0
  c.hash = hashnc;
1314
0
  c.hash_len = nclen;
1315
0
  c.b32 = nc_b32+1;
1316
0
  c.b32_len = (size_t)nc_b32[0];
1317
0
  if(nsec3_covers(zone->name, &c, nc_rrset, 0, buf)) {
1318
    /* nc_rrset covers the next closer name.
1319
     * ce_rrset equals a closer encloser.
1320
     * nc_rrset is optout.
1321
     * No need to check wildcard for type DS */
1322
    /* capacity=3: ce + nc + soa(if needed) */
1323
0
    if(!(msg = dns_msg_create(qname, qname_len, 
1324
0
      LDNS_RR_TYPE_DS, zone->dclass, region, 3))) 
1325
0
      return NULL;
1326
    /* now=0 because TTL was reduced in grab_nsec */
1327
0
    if(!dns_msg_authadd(msg, region, ce_rrset, 0)) 
1328
0
      return NULL;
1329
0
    if(!dns_msg_authadd(msg, region, nc_rrset, 0)) 
1330
0
      return NULL;
1331
0
    return msg;
1332
0
  }
1333
0
  return NULL;
1334
0
}
1335
1336
/**
1337
 * Add SOA record for external responses.
1338
 * @param rrset_cache: to look into.
1339
 * @param now: current time.
1340
 * @param region: where to perform the allocation
1341
 * @param msg: current msg with NSEC.
1342
 * @param zone: val_neg_zone if we have one.
1343
 * @return false on lookup or alloc failure.
1344
 */
1345
static int add_soa(struct rrset_cache* rrset_cache, time_t now,
1346
  struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone)
1347
0
{
1348
0
  struct ub_packed_rrset_key* soa;
1349
0
  uint8_t* nm;
1350
0
  size_t nmlen;
1351
0
  uint16_t dclass;
1352
0
  if(zone) {
1353
0
    nm = zone->name;
1354
0
    nmlen = zone->len;
1355
0
    dclass = zone->dclass;
1356
0
  } else {
1357
    /* Assumes the signer is the zone SOA to add */
1358
0
    nm = reply_nsec_signer(msg->rep, &nmlen, &dclass);
1359
0
    if(!nm) 
1360
0
      return 0;
1361
0
  }
1362
0
  soa = rrset_cache_lookup(rrset_cache, nm, nmlen, LDNS_RR_TYPE_SOA, 
1363
0
    dclass, PACKED_RRSET_SOA_NEG, now, 0);
1364
0
  if(!soa)
1365
0
    return 0;
1366
0
  if(!dns_msg_authadd(msg, region, soa, now)) {
1367
0
    lock_rw_unlock(&soa->entry.lock);
1368
0
    return 0;
1369
0
  }
1370
0
  lock_rw_unlock(&soa->entry.lock);
1371
0
  return 1;
1372
0
}
1373
1374
struct dns_msg* 
1375
val_neg_getmsg(struct val_neg_cache* neg, struct query_info* qinfo, 
1376
  struct regional* region, struct rrset_cache* rrset_cache, 
1377
  sldns_buffer* buf, time_t now, int addsoa, uint8_t* topname,
1378
  struct config_file* cfg)
1379
0
{
1380
0
  struct dns_msg* msg;
1381
0
  struct ub_packed_rrset_key* nsec; /* qname matching/covering nsec */
1382
0
  struct ub_packed_rrset_key* wcrr; /* wildcard record or nsec */
1383
0
  uint8_t* nodata_wc = NULL;
1384
0
  uint8_t* ce = NULL;
1385
0
  size_t ce_len;
1386
0
  uint8_t wc_ce[LDNS_MAX_DOMAINLEN+3];
1387
0
  struct query_info wc_qinfo;
1388
0
  struct ub_packed_rrset_key* cache_wc;
1389
0
  struct packed_rrset_data* wcrr_data;
1390
0
  int rcode = LDNS_RCODE_NOERROR;
1391
0
  uint8_t* zname;
1392
0
  size_t zname_len;
1393
0
  int zname_labs;
1394
0
  struct val_neg_zone* zone;
1395
1396
  /* only for DS queries when aggressive use of NSEC is disabled */
1397
0
  if(qinfo->qtype != LDNS_RR_TYPE_DS && !cfg->aggressive_nsec)
1398
0
    return NULL;
1399
0
  log_assert(!topname || dname_subdomain_c(qinfo->qname, topname));
1400
1401
  /* Get best available NSEC for qname */
1402
0
  nsec = neg_find_nsec(neg, qinfo->qname, qinfo->qname_len, qinfo->qclass,
1403
0
    rrset_cache, now, region);
1404
1405
  /* Matching NSEC, use to generate No Data answer. Not creating answers
1406
   * yet for No Data proven using wildcard. */
1407
0
  if(nsec && nsec_proves_nodata(nsec, qinfo, &nodata_wc) && !nodata_wc) {
1408
    /* do not create nodata answers for qtype ANY, it is a query
1409
     * type, not an rrtype to disprove. Nameerrors are useful for
1410
     * qtype ANY, in the else branch. */
1411
0
    if(qinfo->qtype == LDNS_RR_TYPE_ANY)
1412
0
      return NULL;
1413
0
    if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len, 
1414
0
      qinfo->qtype, qinfo->qclass, region, 2))) 
1415
0
      return NULL;
1416
0
    if(!dns_msg_authadd(msg, region, nsec, 0)) 
1417
0
      return NULL;
1418
0
    if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL))
1419
0
      return NULL;
1420
1421
0
    lock_basic_lock(&neg->lock);
1422
0
    neg->num_neg_cache_noerror++;
1423
0
    lock_basic_unlock(&neg->lock);
1424
0
    return msg;
1425
0
  } else if(nsec && val_nsec_proves_name_error(nsec, qinfo->qname)) {
1426
0
    if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len, 
1427
0
      qinfo->qtype, qinfo->qclass, region, 3))) 
1428
0
      return NULL;
1429
0
    if(!(ce = nsec_closest_encloser(qinfo->qname, nsec)))
1430
0
      return NULL;
1431
0
    dname_count_size_labels(ce, &ce_len);
1432
1433
    /* No extra extra NSEC required if both nameerror qname and
1434
     * nodata *.ce. are proven already. */
1435
0
    if(!nodata_wc || query_dname_compare(nodata_wc, ce) != 0) {
1436
      /* Qname proven non existing, get wildcard record for
1437
       * QTYPE or NSEC covering or matching wildcard. */
1438
1439
      /* Num labels in ce is always smaller than in qname,
1440
       * therefore adding the wildcard label cannot overflow
1441
       * buffer. */
1442
0
      wc_ce[0] = 1;
1443
0
      wc_ce[1] = (uint8_t)'*';
1444
0
      memmove(wc_ce+2, ce, ce_len);
1445
0
      wc_qinfo.qname = wc_ce;
1446
0
      wc_qinfo.qname_len = ce_len + 2;
1447
0
      wc_qinfo.qtype = qinfo->qtype;
1448
1449
1450
0
      if((cache_wc = rrset_cache_lookup(rrset_cache, wc_qinfo.qname,
1451
0
        wc_qinfo.qname_len, wc_qinfo.qtype,
1452
0
        qinfo->qclass, 0/*flags*/, now, 0/*read only*/))) {
1453
        /* Synthesize wildcard answer */
1454
0
        wcrr_data = (struct packed_rrset_data*)cache_wc->entry.data;
1455
0
        if(!(wcrr_data->security == sec_status_secure ||
1456
0
          (wcrr_data->security == sec_status_unchecked &&
1457
0
          wcrr_data->rrsig_count > 0))) {
1458
0
          lock_rw_unlock(&cache_wc->entry.lock);
1459
0
          return NULL;
1460
0
        }
1461
0
        if(!(wcrr = packed_rrset_copy_region(cache_wc,
1462
0
          region, now))) {
1463
0
          lock_rw_unlock(&cache_wc->entry.lock);
1464
0
          return NULL;
1465
0
        };
1466
0
        lock_rw_unlock(&cache_wc->entry.lock);
1467
0
        wcrr->rk.dname = qinfo->qname;
1468
0
        wcrr->rk.dname_len = qinfo->qname_len;
1469
0
        if(!dns_msg_ansadd(msg, region, wcrr, 0))
1470
0
          return NULL;
1471
        /* No SOA needed for wildcard synthesised
1472
         * answer. */
1473
0
        addsoa = 0;
1474
0
      } else {
1475
        /* Get wildcard NSEC for possible non existence
1476
         * proof */
1477
0
        if(!(wcrr = neg_find_nsec(neg, wc_qinfo.qname,
1478
0
          wc_qinfo.qname_len, qinfo->qclass,
1479
0
          rrset_cache, now, region)))
1480
0
          return NULL;
1481
1482
0
        nodata_wc = NULL;
1483
0
        if(val_nsec_proves_name_error(wcrr, wc_ce))
1484
0
          rcode = LDNS_RCODE_NXDOMAIN;
1485
0
        else if(!nsec_proves_nodata(wcrr, &wc_qinfo,
1486
0
          &nodata_wc) || nodata_wc)
1487
          /* &nodata_wc shouldn't be set, wc_qinfo
1488
           * already contains wildcard domain. */
1489
          /* NSEC doesn't prove anything for
1490
           * wildcard. */
1491
0
          return NULL;
1492
0
        if(query_dname_compare(wcrr->rk.dname,
1493
0
          nsec->rk.dname) != 0)
1494
0
          if(!dns_msg_authadd(msg, region, wcrr, 0))
1495
0
            return NULL;
1496
0
      }
1497
0
    }
1498
1499
0
    if(!dns_msg_authadd(msg, region, nsec, 0))
1500
0
      return NULL;
1501
0
    if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL))
1502
0
      return NULL;
1503
1504
    /* Increment statistic counters */
1505
0
    lock_basic_lock(&neg->lock);
1506
0
    if(rcode == LDNS_RCODE_NOERROR)
1507
0
      neg->num_neg_cache_noerror++;
1508
0
    else if(rcode == LDNS_RCODE_NXDOMAIN)
1509
0
      neg->num_neg_cache_nxdomain++;
1510
0
    lock_basic_unlock(&neg->lock);
1511
1512
0
    FLAGS_SET_RCODE(msg->rep->flags, rcode);
1513
0
    return msg;
1514
0
  }
1515
1516
  /* No aggressive use of NSEC3 for now, only proceed for DS types. */
1517
0
  if(qinfo->qtype != LDNS_RR_TYPE_DS){
1518
0
    return NULL;
1519
0
  }
1520
  /* check NSEC3 neg cache for type DS */
1521
  /* need to look one zone higher for DS type */
1522
0
  zname = qinfo->qname;
1523
0
  zname_len = qinfo->qname_len;
1524
0
  dname_remove_label(&zname, &zname_len);
1525
0
  zname_labs = dname_count_labels(zname);
1526
1527
  /* lookup closest zone */
1528
0
  lock_basic_lock(&neg->lock);
1529
0
  zone = neg_closest_zone_parent(neg, zname, zname_len, zname_labs, 
1530
0
    qinfo->qclass);
1531
0
  while(zone && !zone->in_use)
1532
0
    zone = zone->parent;
1533
  /* check that the zone is not too high up so that we do not pick data
1534
   * out of a zone that is above the last-seen key (or trust-anchor). */
1535
0
  if(zone && topname) {
1536
0
    if(!dname_subdomain_c(zone->name, topname))
1537
0
      zone = NULL;
1538
0
  }
1539
0
  if(!zone) {
1540
0
    lock_basic_unlock(&neg->lock);
1541
0
    return NULL;
1542
0
  }
1543
1544
0
  msg = neg_nsec3_proof_ds(zone, qinfo->qname, qinfo->qname_len, 
1545
0
    zname_labs+1, buf, rrset_cache, region, now, topname);
1546
0
  if(msg && addsoa && !add_soa(rrset_cache, now, region, msg, zone)) {
1547
0
    lock_basic_unlock(&neg->lock);
1548
0
    return NULL;
1549
0
  }
1550
0
  lock_basic_unlock(&neg->lock);
1551
0
  return msg;
1552
0
}
1553
1554
void
1555
val_neg_adjust_size(struct val_neg_cache* neg, size_t max)
1556
0
{
1557
0
  lock_basic_lock(&neg->lock);
1558
0
  neg->max = max;
1559
0
  neg_make_space(neg, 0);
1560
0
  lock_basic_unlock(&neg->lock);
1561
0
}