Coverage Report

Created: 2026-04-03 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/haproxy/include/import/ebmbtree.h
Line
Count
Source
1
/*
2
 * Elastic Binary Trees - macros and structures for Multi-Byte data nodes.
3
 * Version 6.0.6
4
 * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation, version 2.1
9
 * exclusively.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
#ifndef _EBMBTREE_H
22
#define _EBMBTREE_H
23
24
#include <string.h>
25
#include "ebtree.h"
26
27
/* Return the structure of type <type> whose member <member> points to <ptr> */
28
0
#define ebmb_entry(ptr, type, member) container_of(ptr, type, member)
29
30
/*
31
 * Exported functions and macros.
32
 * Many of them are always inlined because they are extremely small, and
33
 * are generally called at most once or twice in a program.
34
 */
35
36
/* Return leftmost node in the tree, or NULL if none */
37
static forceinline struct ebmb_node *ebmb_first(struct eb_root *root)
38
0
{
39
0
  return ebmb_entry(eb_first(root), struct ebmb_node, node);
40
0
}
Unexecuted instantiation: peers.c:ebmb_first
Unexecuted instantiation: sample.c:ebmb_first
Unexecuted instantiation: stats.c:ebmb_first
Unexecuted instantiation: stick_table.c:ebmb_first
Unexecuted instantiation: tools.c:ebmb_first
Unexecuted instantiation: acl.c:ebmb_first
Unexecuted instantiation: ebmbtree.c:ebmb_first
Unexecuted instantiation: ebsttree.c:ebmb_first
Unexecuted instantiation: pattern.c:ebmb_first
Unexecuted instantiation: stats-file.c:ebmb_first
Unexecuted instantiation: shctx.c:ebmb_first
41
42
/* Return rightmost node in the tree, or NULL if none */
43
static forceinline struct ebmb_node *ebmb_last(struct eb_root *root)
44
0
{
45
0
  return ebmb_entry(eb_last(root), struct ebmb_node, node);
46
0
}
Unexecuted instantiation: peers.c:ebmb_last
Unexecuted instantiation: sample.c:ebmb_last
Unexecuted instantiation: stats.c:ebmb_last
Unexecuted instantiation: stick_table.c:ebmb_last
Unexecuted instantiation: tools.c:ebmb_last
Unexecuted instantiation: acl.c:ebmb_last
Unexecuted instantiation: ebmbtree.c:ebmb_last
Unexecuted instantiation: ebsttree.c:ebmb_last
Unexecuted instantiation: pattern.c:ebmb_last
Unexecuted instantiation: stats-file.c:ebmb_last
Unexecuted instantiation: shctx.c:ebmb_last
47
48
/* Return next node in the tree, or NULL if none */
49
static forceinline struct ebmb_node *ebmb_next(struct ebmb_node *ebmb)
50
0
{
51
0
  return ebmb_entry(eb_next(&ebmb->node), struct ebmb_node, node);
52
0
}
Unexecuted instantiation: peers.c:ebmb_next
Unexecuted instantiation: sample.c:ebmb_next
Unexecuted instantiation: stats.c:ebmb_next
Unexecuted instantiation: stick_table.c:ebmb_next
Unexecuted instantiation: tools.c:ebmb_next
Unexecuted instantiation: acl.c:ebmb_next
Unexecuted instantiation: ebmbtree.c:ebmb_next
Unexecuted instantiation: ebsttree.c:ebmb_next
Unexecuted instantiation: pattern.c:ebmb_next
Unexecuted instantiation: stats-file.c:ebmb_next
Unexecuted instantiation: shctx.c:ebmb_next
53
54
/* Return previous node in the tree, or NULL if none */
55
static forceinline struct ebmb_node *ebmb_prev(struct ebmb_node *ebmb)
56
0
{
57
0
  return ebmb_entry(eb_prev(&ebmb->node), struct ebmb_node, node);
58
0
}
Unexecuted instantiation: peers.c:ebmb_prev
Unexecuted instantiation: sample.c:ebmb_prev
Unexecuted instantiation: stats.c:ebmb_prev
Unexecuted instantiation: stick_table.c:ebmb_prev
Unexecuted instantiation: tools.c:ebmb_prev
Unexecuted instantiation: acl.c:ebmb_prev
Unexecuted instantiation: ebmbtree.c:ebmb_prev
Unexecuted instantiation: ebsttree.c:ebmb_prev
Unexecuted instantiation: pattern.c:ebmb_prev
Unexecuted instantiation: stats-file.c:ebmb_prev
Unexecuted instantiation: shctx.c:ebmb_prev
59
60
/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
61
static inline struct ebmb_node *ebmb_next_dup(struct ebmb_node *ebmb)
62
0
{
63
0
  return ebmb_entry(eb_next_dup(&ebmb->node), struct ebmb_node, node);
64
0
}
Unexecuted instantiation: peers.c:ebmb_next_dup
Unexecuted instantiation: sample.c:ebmb_next_dup
Unexecuted instantiation: stats.c:ebmb_next_dup
Unexecuted instantiation: stick_table.c:ebmb_next_dup
Unexecuted instantiation: tools.c:ebmb_next_dup
Unexecuted instantiation: acl.c:ebmb_next_dup
Unexecuted instantiation: ebmbtree.c:ebmb_next_dup
Unexecuted instantiation: ebsttree.c:ebmb_next_dup
Unexecuted instantiation: pattern.c:ebmb_next_dup
Unexecuted instantiation: stats-file.c:ebmb_next_dup
Unexecuted instantiation: shctx.c:ebmb_next_dup
65
66
/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
67
static inline struct ebmb_node *ebmb_prev_dup(struct ebmb_node *ebmb)
68
0
{
69
0
  return ebmb_entry(eb_prev_dup(&ebmb->node), struct ebmb_node, node);
70
0
}
Unexecuted instantiation: peers.c:ebmb_prev_dup
Unexecuted instantiation: sample.c:ebmb_prev_dup
Unexecuted instantiation: stats.c:ebmb_prev_dup
Unexecuted instantiation: stick_table.c:ebmb_prev_dup
Unexecuted instantiation: tools.c:ebmb_prev_dup
Unexecuted instantiation: acl.c:ebmb_prev_dup
Unexecuted instantiation: ebmbtree.c:ebmb_prev_dup
Unexecuted instantiation: ebsttree.c:ebmb_prev_dup
Unexecuted instantiation: pattern.c:ebmb_prev_dup
Unexecuted instantiation: stats-file.c:ebmb_prev_dup
Unexecuted instantiation: shctx.c:ebmb_prev_dup
71
72
/* Return next node in the tree, skipping duplicates, or NULL if none */
73
static forceinline struct ebmb_node *ebmb_next_unique(struct ebmb_node *ebmb)
74
0
{
75
0
  return ebmb_entry(eb_next_unique(&ebmb->node), struct ebmb_node, node);
76
0
}
Unexecuted instantiation: peers.c:ebmb_next_unique
Unexecuted instantiation: sample.c:ebmb_next_unique
Unexecuted instantiation: stats.c:ebmb_next_unique
Unexecuted instantiation: stick_table.c:ebmb_next_unique
Unexecuted instantiation: tools.c:ebmb_next_unique
Unexecuted instantiation: acl.c:ebmb_next_unique
Unexecuted instantiation: ebmbtree.c:ebmb_next_unique
Unexecuted instantiation: ebsttree.c:ebmb_next_unique
Unexecuted instantiation: pattern.c:ebmb_next_unique
Unexecuted instantiation: stats-file.c:ebmb_next_unique
Unexecuted instantiation: shctx.c:ebmb_next_unique
77
78
/* Return previous node in the tree, skipping duplicates, or NULL if none */
79
static forceinline struct ebmb_node *ebmb_prev_unique(struct ebmb_node *ebmb)
80
0
{
81
0
  return ebmb_entry(eb_prev_unique(&ebmb->node), struct ebmb_node, node);
82
0
}
Unexecuted instantiation: peers.c:ebmb_prev_unique
Unexecuted instantiation: sample.c:ebmb_prev_unique
Unexecuted instantiation: stats.c:ebmb_prev_unique
Unexecuted instantiation: stick_table.c:ebmb_prev_unique
Unexecuted instantiation: tools.c:ebmb_prev_unique
Unexecuted instantiation: acl.c:ebmb_prev_unique
Unexecuted instantiation: ebmbtree.c:ebmb_prev_unique
Unexecuted instantiation: ebsttree.c:ebmb_prev_unique
Unexecuted instantiation: pattern.c:ebmb_prev_unique
Unexecuted instantiation: stats-file.c:ebmb_prev_unique
Unexecuted instantiation: shctx.c:ebmb_prev_unique
83
84
/* Delete node from the tree if it was linked in. Mark the node unused. Note
85
 * that this function relies on a non-inlined generic function: eb_delete.
86
 */
87
static forceinline void ebmb_delete(struct ebmb_node *ebmb)
88
0
{
89
0
  eb_delete(&ebmb->node);
90
0
}
Unexecuted instantiation: peers.c:ebmb_delete
Unexecuted instantiation: sample.c:ebmb_delete
Unexecuted instantiation: stats.c:ebmb_delete
Unexecuted instantiation: stick_table.c:ebmb_delete
Unexecuted instantiation: tools.c:ebmb_delete
Unexecuted instantiation: acl.c:ebmb_delete
Unexecuted instantiation: ebmbtree.c:ebmb_delete
Unexecuted instantiation: ebsttree.c:ebmb_delete
Unexecuted instantiation: pattern.c:ebmb_delete
Unexecuted instantiation: stats-file.c:ebmb_delete
Unexecuted instantiation: shctx.c:ebmb_delete
91
92
/* The following functions are not inlined by default. They are declared
93
 * in ebmbtree.c, which simply relies on their inline version.
94
 */
95
struct ebmb_node *ebmb_lookup(struct eb_root *root, const void *x, unsigned int len);
96
struct ebmb_node *ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len);
97
struct ebmb_node *ebmb_lookup_longest(struct eb_root *root, const void *x);
98
struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
99
struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
100
101
/* start from a valid leaf and find the next matching prefix that's either a
102
 * duplicate, or immediately shorter than the node's current one and still
103
 * matches it. The purpose is to permit a caller that is not satisfied with a
104
 * result provided by ebmb_lookup_longest() to evaluate the next matching
105
 * entry. Given that shorter keys are necessarily attached to nodes located
106
 * above the current one, it's sufficient to restart from the current leaf and
107
 * go up until we find a shorter prefix, or a non-matching one.
108
 */
109
static inline struct ebmb_node *ebmb_lookup_shorter(struct ebmb_node *start)
110
0
{
111
0
  eb_troot_t *t = start->node.leaf_p;
112
0
  struct ebmb_node *node;
113
114
  /* first, check for duplicates */
115
0
  node = ebmb_next_dup(start);
116
0
  if (node)
117
0
    return node;
118
119
0
  while (1) {
120
0
    if (eb_gettag(t) == EB_LEFT) {
121
      /* Walking up from left branch. We must ensure that we never
122
       * walk beyond root.
123
       */
124
0
      if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
125
0
        return NULL;
126
0
      node = container_of(eb_root_to_node(eb_untag(t, EB_LEFT)), struct ebmb_node, node);
127
0
    } else {
128
      /* Walking up from right branch, so we cannot be below
129
       * root. However, if we end up on a node with an even
130
       * and positive bit, this is a cover node, which mandates
131
       * that the left branch only contains cover values, so we
132
       * must descend it.
133
       */
134
0
      node = container_of(eb_root_to_node(eb_untag(t, EB_RGHT)), struct ebmb_node, node);
135
0
      if (node->node.bit > 0 && !(node->node.bit & 1))
136
0
        return ebmb_entry(eb_walk_down(t, EB_LEFT), struct ebmb_node, node);
137
0
    }
138
139
    /* Note that <t> cannot be NULL at this stage */
140
0
    t = node->node.node_p;
141
142
    /* this is a node attached to a deeper (and possibly different)
143
     * leaf, not interesting for us.
144
     */
145
0
    if (node->node.pfx >= start->node.pfx)
146
0
      continue;
147
148
0
    if (check_bits(start->key, node->key, 0, node->node.pfx) == 0)
149
0
      break;
150
0
  }
151
0
  return node;
152
0
}
Unexecuted instantiation: peers.c:ebmb_lookup_shorter
Unexecuted instantiation: sample.c:ebmb_lookup_shorter
Unexecuted instantiation: stats.c:ebmb_lookup_shorter
Unexecuted instantiation: stick_table.c:ebmb_lookup_shorter
Unexecuted instantiation: tools.c:ebmb_lookup_shorter
Unexecuted instantiation: acl.c:ebmb_lookup_shorter
Unexecuted instantiation: ebmbtree.c:ebmb_lookup_shorter
Unexecuted instantiation: ebsttree.c:ebmb_lookup_shorter
Unexecuted instantiation: pattern.c:ebmb_lookup_shorter
Unexecuted instantiation: stats-file.c:ebmb_lookup_shorter
Unexecuted instantiation: shctx.c:ebmb_lookup_shorter
153
154
/* The following functions are less likely to be used directly, because their
155
 * code is larger. The non-inlined version is preferred.
156
 */
157
158
/* Delete node from the tree if it was linked in. Mark the node unused. */
159
static forceinline void __ebmb_delete(struct ebmb_node *ebmb)
160
0
{
161
0
  __eb_delete(&ebmb->node);
162
0
}
Unexecuted instantiation: peers.c:__ebmb_delete
Unexecuted instantiation: sample.c:__ebmb_delete
Unexecuted instantiation: stats.c:__ebmb_delete
Unexecuted instantiation: stick_table.c:__ebmb_delete
Unexecuted instantiation: tools.c:__ebmb_delete
Unexecuted instantiation: acl.c:__ebmb_delete
Unexecuted instantiation: ebmbtree.c:__ebmb_delete
Unexecuted instantiation: ebsttree.c:__ebmb_delete
Unexecuted instantiation: pattern.c:__ebmb_delete
Unexecuted instantiation: stats-file.c:__ebmb_delete
Unexecuted instantiation: shctx.c:__ebmb_delete
163
164
/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
165
 * tree <root>. The caller is responsible for ensuring that <len> will not exceed
166
 * the common parts between the tree's keys and <x>. In case of multiple matches,
167
 * the leftmost node is returned. This means that this function can be used to
168
 * lookup string keys by prefix if all keys in the tree are zero-terminated. If
169
 * no match is found, NULL is returned. Returns first node if <len> is zero.
170
 */
171
static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
172
0
{
173
0
  struct ebmb_node *node;
174
0
  eb_troot_t *troot;
175
0
  int pos, side;
176
0
  int node_bit;
177
178
0
  troot = root->b[EB_LEFT];
179
0
  if (unlikely(troot == NULL))
180
0
    goto ret_null;
181
182
0
  if (unlikely(len == 0))
183
0
    goto walk_down;
184
185
0
  pos = 0;
186
0
  while (1) {
187
0
    void *b0, *b1;
188
0
    unsigned char k, b;
189
190
0
    if (eb_gettag(troot) == EB_LEAF) {
191
0
      node = container_of(eb_untag(troot, EB_LEAF),
192
0
              struct ebmb_node, node.branches);
193
0
      if (eb_memcmp(node->key + pos, x, len) != 0)
194
0
        goto ret_null;
195
0
      else
196
0
        goto ret_node;
197
0
    }
198
0
    node = container_of(eb_untag(troot, EB_NODE),
199
0
            struct ebmb_node, node.branches);
200
201
0
    node_bit = node->node.bit;
202
0
    if (node_bit < 0) {
203
      /* We have a dup tree now. Either it's for the same
204
       * value, and we walk down left, or it's a different
205
       * one and we don't have our key.
206
       */
207
0
      if (eb_memcmp(node->key + pos, x, len) != 0)
208
0
        goto ret_null;
209
0
      else
210
0
        goto walk_left;
211
0
    }
212
213
    /* OK, normal data node, let's walk down. We check if all full
214
     * bytes are equal, and we start from the last one we did not
215
     * completely check. We stop as soon as we reach the last byte,
216
     * because we must decide to go left/right or abort.
217
     */
218
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
219
0
    if (node_bit < 0) {
220
      /* This surprising construction gives better performance
221
       * because gcc does not try to reorder the loop. Tested to
222
       * be fine with 2.95 to 4.2.
223
       */
224
0
      while (1) {
225
0
        if (node->key[pos++] ^ *(unsigned char*)(x++))
226
0
          goto ret_null;  /* more than one full byte is different */
227
0
        if (--len == 0)
228
0
          goto walk_left; /* return first node if all bytes matched */
229
0
        node_bit += 8;
230
0
        if (node_bit >= 0)
231
0
          break;
232
0
      }
233
0
    }
234
235
    /* here we know that only the last byte differs, so node_bit < 8.
236
     * We have 2 possibilities :
237
     *   - more than the last bit differs => return NULL
238
     *   - walk down on side = (x[pos] >> node_bit) & 1
239
     */
240
0
    b = *(unsigned char *)x;
241
0
    side = 1 << node_bit;
242
243
0
    eb_prefetch(node->node.branches.b[0], 0);
244
0
    eb_prefetch(node->node.branches.b[1], 0);
245
246
0
    k = node->key[pos];
247
0
    b0 = node->node.branches.b[0];
248
0
    b1 = node->node.branches.b[1];
249
0
    troot = (b & side) ? b1 : b0;
250
251
0
    if ((k ^ b) & -(side << 1))
252
0
      goto ret_null;
253
0
  }
254
0
 walk_left:
255
0
  troot = node->node.branches.b[EB_LEFT];
256
0
 walk_down:
257
0
  while (eb_gettag(troot) != EB_LEAF)
258
0
    troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
259
0
  node = container_of(eb_untag(troot, EB_LEAF),
260
0
          struct ebmb_node, node.branches);
261
0
 ret_node:
262
0
  return node;
263
0
 ret_null:
264
0
  return NULL;
265
0
}
Unexecuted instantiation: peers.c:__ebmb_lookup
Unexecuted instantiation: sample.c:__ebmb_lookup
Unexecuted instantiation: stats.c:__ebmb_lookup
Unexecuted instantiation: stick_table.c:__ebmb_lookup
Unexecuted instantiation: tools.c:__ebmb_lookup
Unexecuted instantiation: acl.c:__ebmb_lookup
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup
Unexecuted instantiation: ebsttree.c:__ebmb_lookup
Unexecuted instantiation: pattern.c:__ebmb_lookup
Unexecuted instantiation: stats-file.c:__ebmb_lookup
Unexecuted instantiation: shctx.c:__ebmb_lookup
266
267
/* Insert ebmb_node <new> into subtree starting at node root <root>.
268
 * Only new->key needs be set with the key. The ebmb_node is returned.
269
 * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
270
 * len is specified in bytes. It is absolutely mandatory that this length
271
 * is the same for all keys in the tree. This function cannot be used to
272
 * insert strings.
273
 */
274
static forceinline struct ebmb_node *
275
__ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
276
0
{
277
0
  struct ebmb_node *old;
278
0
  unsigned int side;
279
0
  eb_troot_t *troot, **up_ptr;
280
0
  eb_troot_t *root_right;
281
0
  int diff;
282
0
  int bit;
283
0
  eb_troot_t *new_left, *new_rght;
284
0
  eb_troot_t *new_leaf;
285
0
  int old_node_bit;
286
287
0
  side = EB_LEFT;
288
0
  troot = root->b[EB_LEFT];
289
0
  root_right = root->b[EB_RGHT];
290
0
  if (unlikely(troot == NULL)) {
291
    /* Tree is empty, insert the leaf part below the left branch */
292
0
    root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
293
0
    new->node.leaf_p = eb_dotag(root, EB_LEFT);
294
0
    new->node.node_p = NULL; /* node part unused */
295
0
    return new;
296
0
  }
297
298
  /* The tree descent is fairly easy :
299
   *  - first, check if we have reached a leaf node
300
   *  - second, check if we have gone too far
301
   *  - third, reiterate
302
   * Everywhere, we use <new> for the node node we are inserting, <root>
303
   * for the node we attach it to, and <old> for the node we are
304
   * displacing below <new>. <troot> will always point to the future node
305
   * (tagged with its type). <side> carries the side the node <new> is
306
   * attached to below its parent, which is also where previous node
307
   * was attached.
308
   */
309
310
0
  bit = 0;
311
0
  while (1) {
312
0
    if (unlikely(eb_gettag(troot) == EB_LEAF)) {
313
      /* insert above a leaf */
314
0
      old = container_of(eb_untag(troot, EB_LEAF),
315
0
              struct ebmb_node, node.branches);
316
0
      new->node.node_p = old->node.leaf_p;
317
0
      up_ptr = &old->node.leaf_p;
318
0
      goto check_bit_and_break;
319
0
    }
320
321
    /* OK we're walking down this link */
322
0
    old = container_of(eb_untag(troot, EB_NODE),
323
0
           struct ebmb_node, node.branches);
324
0
    old_node_bit = old->node.bit;
325
326
0
    if (unlikely(old->node.bit < 0)) {
327
      /* We're above a duplicate tree, so we must compare the whole value */
328
0
      new->node.node_p = old->node.node_p;
329
0
      up_ptr = &old->node.node_p;
330
0
    check_bit_and_break:
331
0
      bit = equal_bits(new->key, old->key, bit, len << 3);
332
0
      break;
333
0
    }
334
335
    /* Stop going down when we don't have common bits anymore. We
336
     * also stop in front of a duplicates tree because it means we
337
     * have to insert above. Note: we can compare more bits than
338
     * the current node's because as long as they are identical, we
339
     * know we descend along the correct side.
340
     */
341
342
0
    bit = equal_bits(new->key, old->key, bit, old_node_bit);
343
0
    if (unlikely(bit < old_node_bit)) {
344
      /* The tree did not contain the key, so we insert <new> before the
345
       * node <old>, and set ->bit to designate the lowest bit position in
346
       * <new> which applies to ->branches.b[].
347
       */
348
0
      new->node.node_p = old->node.node_p;
349
0
      up_ptr = &old->node.node_p;
350
0
      break;
351
0
    }
352
    /* we don't want to skip bits for further comparisons, so we must limit <bit>.
353
     * However, since we're going down around <old_node_bit>, we know it will be
354
     * properly matched, so we can skip this bit.
355
     */
356
0
    bit = old_node_bit + 1;
357
358
    /* walk down */
359
0
    root = &old->node.branches;
360
0
    side = old_node_bit & 7;
361
0
    side ^= 7;
362
0
    side = (new->key[old_node_bit >> 3] >> side) & 1;
363
0
    troot = root->b[side];
364
0
  }
365
366
0
  new_left = eb_dotag(&new->node.branches, EB_LEFT);
367
0
  new_rght = eb_dotag(&new->node.branches, EB_RGHT);
368
0
  new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
369
370
0
  new->node.bit = bit;
371
372
  /* Note: we can compare more bits than the current node's because as
373
   * long as they are identical, we know we descend along the correct
374
   * side. However we don't want to start to compare past the end.
375
   */
376
0
  diff = 0;
377
0
  if (((unsigned)bit >> 3) < len)
378
0
    diff = cmp_bits(new->key, old->key, bit);
379
380
0
  if (diff == 0) {
381
0
    new->node.bit = -1; /* mark as new dup tree, just in case */
382
383
0
    if (likely(eb_gettag(root_right))) {
384
      /* we refuse to duplicate this key if the tree is
385
       * tagged as containing only unique keys.
386
       */
387
0
      return old;
388
0
    }
389
390
0
    if (eb_gettag(troot) != EB_LEAF) {
391
      /* there was already a dup tree below */
392
0
      struct eb_node *ret;
393
0
      ret = eb_insert_dup(&old->node, &new->node);
394
0
      return container_of(ret, struct ebmb_node, node);
395
0
    }
396
    /* otherwise fall through */
397
0
  }
398
399
0
  if (diff >= 0) {
400
0
    new->node.branches.b[EB_LEFT] = troot;
401
0
    new->node.branches.b[EB_RGHT] = new_leaf;
402
0
    new->node.leaf_p = new_rght;
403
0
    *up_ptr = new_left;
404
0
  }
405
0
  else {
406
0
    new->node.branches.b[EB_LEFT] = new_leaf;
407
0
    new->node.branches.b[EB_RGHT] = troot;
408
0
    new->node.leaf_p = new_left;
409
0
    *up_ptr = new_rght;
410
0
  }
411
412
  /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
413
   * parent is already set to <new>, and the <root>'s branch is still in
414
   * <side>. Update the root's leaf till we have it. Note that we can also
415
   * find the side by checking the side of new->node.node_p.
416
   */
417
418
0
  root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
419
0
  return new;
420
0
}
Unexecuted instantiation: peers.c:__ebmb_insert
Unexecuted instantiation: sample.c:__ebmb_insert
Unexecuted instantiation: stats.c:__ebmb_insert
Unexecuted instantiation: stick_table.c:__ebmb_insert
Unexecuted instantiation: tools.c:__ebmb_insert
Unexecuted instantiation: acl.c:__ebmb_insert
Unexecuted instantiation: ebmbtree.c:__ebmb_insert
Unexecuted instantiation: ebsttree.c:__ebmb_insert
Unexecuted instantiation: pattern.c:__ebmb_insert
Unexecuted instantiation: stats-file.c:__ebmb_insert
Unexecuted instantiation: shctx.c:__ebmb_insert
421
422
423
/* Find the first occurrence of the longest prefix matching a key <x> in the
424
 * tree <root>. It's the caller's responsibility to ensure that key <x> is at
425
 * least as long as the keys in the tree. Note that this can be ensured by
426
 * having a byte at the end of <x> which cannot be part of any prefix, typically
427
 * the trailing zero for a string. If none can be found, return NULL.
428
 */
429
static forceinline struct ebmb_node *__ebmb_lookup_longest(struct eb_root *root, const void *x)
430
0
{
431
0
  struct ebmb_node *node;
432
0
  eb_troot_t *troot, *cover;
433
0
  int pos, side;
434
0
  int node_bit;
435
436
0
  troot = root->b[EB_LEFT];
437
0
  if (unlikely(troot == NULL))
438
0
    return NULL;
439
440
0
  cover = NULL;
441
0
  pos = 0;
442
0
  while (1) {
443
0
    if ((eb_gettag(troot) == EB_LEAF)) {
444
0
      node = container_of(eb_untag(troot, EB_LEAF),
445
0
              struct ebmb_node, node.branches);
446
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
447
0
        goto not_found;
448
449
0
      return node;
450
0
    }
451
0
    node = container_of(eb_untag(troot, EB_NODE),
452
0
            struct ebmb_node, node.branches);
453
454
0
    node_bit = node->node.bit;
455
0
    if (node_bit < 0) {
456
      /* We have a dup tree now. Either it's for the same
457
       * value, and we walk down left, or it's a different
458
       * one and we don't have our key.
459
       */
460
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
461
0
        goto not_found;
462
463
0
      troot = node->node.branches.b[EB_LEFT];
464
0
      while (eb_gettag(troot) != EB_LEAF)
465
0
        troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
466
0
      node = container_of(eb_untag(troot, EB_LEAF),
467
0
              struct ebmb_node, node.branches);
468
0
      return node;
469
0
    }
470
471
0
    node_bit >>= 1; /* strip cover bit */
472
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
473
0
    if (node_bit < 0) {
474
      /* This uncommon construction gives better performance
475
       * because gcc does not try to reorder the loop. Tested to
476
       * be fine with 2.95 to 4.2.
477
       */
478
0
      while (1) {
479
0
        x++; pos++;
480
0
        if (node->key[pos-1] ^ *(unsigned char*)(x-1))
481
0
          goto not_found; /* more than one full byte is different */
482
0
        node_bit += 8;
483
0
        if (node_bit >= 0)
484
0
          break;
485
0
      }
486
0
    }
487
488
    /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
489
     * We have 2 possibilities :
490
     *   - more than the last bit differs => data does not match
491
     *   - walk down on side = (x[pos] >> node_bit) & 1
492
     */
493
0
    side = *(unsigned char *)x >> node_bit;
494
0
    if (((node->key[pos] >> node_bit) ^ side) > 1)
495
0
      goto not_found;
496
497
0
    if (!(node->node.bit & 1)) {
498
      /* This is a cover node, let's keep a reference to it
499
       * for later. The covering subtree is on the left, and
500
       * the covered subtree is on the right, so we have to
501
       * walk down right.
502
       */
503
0
      cover = node->node.branches.b[EB_LEFT];
504
0
      troot = node->node.branches.b[EB_RGHT];
505
0
      continue;
506
0
    }
507
0
    side &= 1;
508
0
    troot = node->node.branches.b[side];
509
0
  }
510
511
0
 not_found:
512
  /* Walk down last cover tree if it exists. It does not matter if cover is NULL */
513
0
  return ebmb_entry(eb_walk_down(cover, EB_LEFT), struct ebmb_node, node);
514
0
}
Unexecuted instantiation: peers.c:__ebmb_lookup_longest
Unexecuted instantiation: sample.c:__ebmb_lookup_longest
Unexecuted instantiation: stats.c:__ebmb_lookup_longest
Unexecuted instantiation: stick_table.c:__ebmb_lookup_longest
Unexecuted instantiation: tools.c:__ebmb_lookup_longest
Unexecuted instantiation: acl.c:__ebmb_lookup_longest
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup_longest
Unexecuted instantiation: ebsttree.c:__ebmb_lookup_longest
Unexecuted instantiation: pattern.c:__ebmb_lookup_longest
Unexecuted instantiation: stats-file.c:__ebmb_lookup_longest
Unexecuted instantiation: shctx.c:__ebmb_lookup_longest
515
516
517
/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
518
 * tree <root>. It's the caller's responsibility to ensure that key <x> is at
519
 * least as long as the keys in the tree. Note that this can be ensured by
520
 * having a byte at the end of <x> which cannot be part of any prefix, typically
521
 * the trailing zero for a string. If none can be found, return NULL.
522
 */
523
static forceinline struct ebmb_node *__ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
524
0
{
525
0
  struct ebmb_node *node;
526
0
  eb_troot_t *troot;
527
0
  int pos, side;
528
0
  int node_bit;
529
530
0
  troot = root->b[EB_LEFT];
531
0
  if (unlikely(troot == NULL))
532
0
    return NULL;
533
534
0
  pos = 0;
535
0
  while (1) {
536
0
    if ((eb_gettag(troot) == EB_LEAF)) {
537
0
      node = container_of(eb_untag(troot, EB_LEAF),
538
0
              struct ebmb_node, node.branches);
539
0
      if (node->node.pfx != pfx)
540
0
        return NULL;
541
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
542
0
        return NULL;
543
0
      return node;
544
0
    }
545
0
    node = container_of(eb_untag(troot, EB_NODE),
546
0
            struct ebmb_node, node.branches);
547
548
0
    node_bit = node->node.bit;
549
0
    if (node_bit < 0) {
550
      /* We have a dup tree now. Either it's for the same
551
       * value, and we walk down left, or it's a different
552
       * one and we don't have our key.
553
       */
554
0
      if (node->node.pfx != pfx)
555
0
        return NULL;
556
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
557
0
        return NULL;
558
559
0
      troot = node->node.branches.b[EB_LEFT];
560
0
      while (eb_gettag(troot) != EB_LEAF)
561
0
        troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
562
0
      node = container_of(eb_untag(troot, EB_LEAF),
563
0
              struct ebmb_node, node.branches);
564
0
      return node;
565
0
    }
566
567
0
    node_bit >>= 1; /* strip cover bit */
568
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
569
0
    if (node_bit < 0) {
570
      /* This uncommon construction gives better performance
571
       * because gcc does not try to reorder the loop. Tested to
572
       * be fine with 2.95 to 4.2.
573
       */
574
0
      while (1) {
575
0
        x++; pos++;
576
0
        if (node->key[pos-1] ^ *(unsigned char*)(x-1))
577
0
          return NULL; /* more than one full byte is different */
578
0
        node_bit += 8;
579
0
        if (node_bit >= 0)
580
0
          break;
581
0
      }
582
0
    }
583
584
    /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
585
     * We have 2 possibilities :
586
     *   - more than the last bit differs => data does not match
587
     *   - walk down on side = (x[pos] >> node_bit) & 1
588
     */
589
0
    side = *(unsigned char *)x >> node_bit;
590
0
    if (((node->key[pos] >> node_bit) ^ side) > 1)
591
0
      return NULL;
592
593
0
    if (!(node->node.bit & 1)) {
594
      /* This is a cover node, it may be the entry we're
595
       * looking for. We already know that it matches all the
596
       * bits, let's compare prefixes and descend the cover
597
       * subtree if they match.
598
       */
599
0
      if ((unsigned short)node->node.bit >> 1 == pfx)
600
0
        troot = node->node.branches.b[EB_LEFT];
601
0
      else
602
0
        troot = node->node.branches.b[EB_RGHT];
603
0
      continue;
604
0
    }
605
0
    side &= 1;
606
0
    troot = node->node.branches.b[side];
607
0
  }
608
0
}
Unexecuted instantiation: peers.c:__ebmb_lookup_prefix
Unexecuted instantiation: sample.c:__ebmb_lookup_prefix
Unexecuted instantiation: stats.c:__ebmb_lookup_prefix
Unexecuted instantiation: stick_table.c:__ebmb_lookup_prefix
Unexecuted instantiation: tools.c:__ebmb_lookup_prefix
Unexecuted instantiation: acl.c:__ebmb_lookup_prefix
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup_prefix
Unexecuted instantiation: ebsttree.c:__ebmb_lookup_prefix
Unexecuted instantiation: pattern.c:__ebmb_lookup_prefix
Unexecuted instantiation: stats-file.c:__ebmb_lookup_prefix
Unexecuted instantiation: shctx.c:__ebmb_lookup_prefix
609
610
611
/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
612
 * Only new->key and new->pfx need be set with the key and its prefix length.
613
 * Note that bits between <pfx> and <len> are theoretically ignored and should be
614
 * zero, as it is not certain yet that they will always be ignored everywhere
615
 * (eg in bit compare functions).
616
 * The ebmb_node is returned.
617
 * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
618
 * len is specified in bytes.
619
 */
620
static forceinline struct ebmb_node *
621
__ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
622
0
{
623
0
  struct ebmb_node *old;
624
0
  unsigned int side;
625
0
  eb_troot_t *troot, **up_ptr;
626
0
  eb_troot_t *root_right;
627
0
  int diff;
628
0
  int bit;
629
0
  eb_troot_t *new_left, *new_rght;
630
0
  eb_troot_t *new_leaf;
631
0
  int old_node_bit;
632
0
  unsigned int npfx = new->node.pfx;
633
0
  unsigned int npfx1 = npfx << 1;
634
0
  const unsigned char *nkey = new->key;
635
636
0
  side = EB_LEFT;
637
0
  troot = root->b[EB_LEFT];
638
0
  root_right = root->b[EB_RGHT];
639
0
  if (unlikely(troot == NULL)) {
640
    /* Tree is empty, insert the leaf part below the left branch */
641
0
    root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
642
0
    new->node.leaf_p = eb_dotag(root, EB_LEFT);
643
0
    new->node.node_p = NULL; /* node part unused */
644
0
    return new;
645
0
  }
646
647
0
  len <<= 3;
648
0
  if (len > npfx)
649
0
    len = npfx;
650
651
  /* The tree descent is fairly easy :
652
   *  - first, check if we have reached a leaf node
653
   *  - second, check if we have gone too far
654
   *  - third, reiterate
655
   * Everywhere, we use <new> for the node node we are inserting, <root>
656
   * for the node we attach it to, and <old> for the node we are
657
   * displacing below <new>. <troot> will always point to the future node
658
   * (tagged with its type). <side> carries the side the node <new> is
659
   * attached to below its parent, which is also where previous node
660
   * was attached.
661
   */
662
663
0
  bit = 0;
664
0
  while (1) {
665
0
    if (unlikely(eb_gettag(troot) == EB_LEAF)) {
666
      /* Insert above a leaf. Note that this leaf could very
667
       * well be part of a cover node.
668
       */
669
0
      old = container_of(eb_untag(troot, EB_LEAF),
670
0
              struct ebmb_node, node.branches);
671
0
      new->node.node_p = old->node.leaf_p;
672
0
      up_ptr = &old->node.leaf_p;
673
0
      goto check_bit_and_break;
674
0
    }
675
676
    /* OK we're walking down this link */
677
0
    old = container_of(eb_untag(troot, EB_NODE),
678
0
           struct ebmb_node, node.branches);
679
0
    old_node_bit = old->node.bit;
680
    /* Note that old_node_bit can be :
681
     *   < 0    : dup tree
682
     *   = 2N   : cover node for N bits
683
     *   = 2N+1 : normal node at N bits
684
     */
685
686
0
    if (unlikely(old_node_bit < 0)) {
687
      /* We're above a duplicate tree, so we must compare the whole value */
688
0
      new->node.node_p = old->node.node_p;
689
0
      up_ptr = &old->node.node_p;
690
0
    check_bit_and_break:
691
      /* No need to compare everything if the leaves are shorter than the new one. */
692
0
      if (len > old->node.pfx)
693
0
        len = old->node.pfx;
694
0
      bit = equal_bits(nkey, old->key, bit, len);
695
0
      break;
696
0
    }
697
698
    /* WARNING: for the two blocks below, <bit> is counted in half-bits */
699
700
0
    bit = equal_bits(nkey, old->key, bit, old_node_bit >> 1);
701
0
    bit = (bit << 1) + 1; // assume comparisons with normal nodes
702
703
    /* we must always check that our prefix is larger than the nodes
704
     * we visit, otherwise we have to stop going down. The following
705
     * test is able to stop before both normal and cover nodes.
706
     */
707
0
    if (bit >= npfx1 && npfx1 < old_node_bit) {
708
      /* insert cover node here on the left */
709
0
      new->node.node_p = old->node.node_p;
710
0
      up_ptr = &old->node.node_p;
711
0
      new->node.bit = npfx1;
712
0
      diff = -1;
713
0
      goto insert_above;
714
0
    }
715
716
0
    if (unlikely(bit < old_node_bit)) {
717
      /* The tree did not contain the key, so we insert <new> before the
718
       * node <old>, and set ->bit to designate the lowest bit position in
719
       * <new> which applies to ->branches.b[]. We know that the bit is not
720
       * greater than the prefix length thanks to the test above.
721
       */
722
0
      new->node.node_p = old->node.node_p;
723
0
      up_ptr = &old->node.node_p;
724
0
      new->node.bit = bit;
725
0
      diff = cmp_bits(nkey, old->key, bit >> 1);
726
0
      goto insert_above;
727
0
    }
728
729
0
    if (!(old_node_bit & 1)) {
730
      /* if we encounter a cover node with our exact prefix length, it's
731
       * necessarily the same value, so we insert there as a duplicate on
732
       * the left. For that, we go down on the left and the leaf detection
733
       * code will finish the job.
734
       */
735
0
      if (npfx1 == old_node_bit) {
736
0
        root = &old->node.branches;
737
0
        side = EB_LEFT;
738
0
        troot = root->b[side];
739
0
        continue;
740
0
      }
741
742
      /* cover nodes are always walked through on the right */
743
0
      side = EB_RGHT;
744
0
      bit = old_node_bit >> 1; /* recheck that bit */
745
0
      root = &old->node.branches;
746
0
      troot = root->b[side];
747
0
      continue;
748
0
    }
749
750
    /* we don't want to skip bits for further comparisons, so we must limit <bit>.
751
     * However, since we're going down around <old_node_bit>, we know it will be
752
     * properly matched, so we can skip this bit.
753
     */
754
0
    old_node_bit >>= 1;
755
0
    bit = old_node_bit + 1;
756
757
    /* walk down */
758
0
    root = &old->node.branches;
759
0
    side = old_node_bit & 7;
760
0
    side ^= 7;
761
0
    side = (nkey[old_node_bit >> 3] >> side) & 1;
762
0
    troot = root->b[side];
763
0
  }
764
765
  /* Right here, we have 4 possibilities :
766
   * - the tree does not contain any leaf matching the
767
   *   key, and we have new->key < old->key. We insert
768
   *   new above old, on the left ;
769
   *
770
   * - the tree does not contain any leaf matching the
771
   *   key, and we have new->key > old->key. We insert
772
   *   new above old, on the right ;
773
   *
774
   * - the tree does contain the key with the same prefix
775
   *   length. We add the new key next to it as a first
776
   *   duplicate (since it was alone).
777
   *
778
   * The last two cases can easily be partially merged.
779
   *
780
   * - the tree contains a leaf matching the key, we have
781
   *   to insert above it as a cover node. The leaf with
782
   *   the shortest prefix becomes the left subtree and
783
   *   the leaf with the longest prefix becomes the right
784
   *   one. The cover node gets the min of both prefixes
785
   *   as its new bit.
786
   */
787
788
  /* first we want to ensure that we compare the correct bit, which means
789
   * the largest common to both nodes.
790
   */
791
0
  if (bit > npfx)
792
0
    bit = npfx;
793
0
  if (bit > old->node.pfx)
794
0
    bit = old->node.pfx;
795
796
0
  new->node.bit = (bit << 1) + 1; /* assume normal node by default */
797
798
  /* if one prefix is included in the second one, we don't compare bits
799
   * because they won't necessarily match, we just proceed with a cover
800
   * node insertion.
801
   */
802
0
  diff = 0;
803
0
  if (bit < old->node.pfx && bit < npfx)
804
0
    diff = cmp_bits(nkey, old->key, bit);
805
806
0
  if (diff == 0) {
807
    /* Both keys match. Either it's a duplicate entry or we have to
808
     * put the shortest prefix left and the largest one right below
809
     * a new cover node. By default, diff==0 means we'll be inserted
810
     * on the right.
811
     */
812
0
    new->node.bit--; /* anticipate cover node insertion */
813
0
    if (npfx == old->node.pfx) {
814
0
      new->node.bit = -1; /* mark as new dup tree, just in case */
815
816
0
      if (unlikely(eb_gettag(root_right))) {
817
        /* we refuse to duplicate this key if the tree is
818
         * tagged as containing only unique keys.
819
         */
820
0
        return old;
821
0
      }
822
823
0
      if (eb_gettag(troot) != EB_LEAF) {
824
        /* there was already a dup tree below */
825
0
        struct eb_node *ret;
826
0
        ret = eb_insert_dup(&old->node, &new->node);
827
0
        return container_of(ret, struct ebmb_node, node);
828
0
      }
829
      /* otherwise fall through to insert first duplicate */
830
0
    }
831
    /* otherwise we just rely on the tests below to select the right side */
832
0
    else if (npfx < old->node.pfx)
833
0
      diff = -1; /* force insertion to left side */
834
0
  }
835
836
0
 insert_above:
837
0
  new_left = eb_dotag(&new->node.branches, EB_LEFT);
838
0
  new_rght = eb_dotag(&new->node.branches, EB_RGHT);
839
0
  new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
840
841
0
  if (diff >= 0) {
842
0
    new->node.branches.b[EB_LEFT] = troot;
843
0
    new->node.branches.b[EB_RGHT] = new_leaf;
844
0
    new->node.leaf_p = new_rght;
845
0
    *up_ptr = new_left;
846
0
  }
847
0
  else {
848
0
    new->node.branches.b[EB_LEFT] = new_leaf;
849
0
    new->node.branches.b[EB_RGHT] = troot;
850
0
    new->node.leaf_p = new_left;
851
0
    *up_ptr = new_rght;
852
0
  }
853
854
0
  root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
855
0
  return new;
856
0
}
Unexecuted instantiation: peers.c:__ebmb_insert_prefix
Unexecuted instantiation: sample.c:__ebmb_insert_prefix
Unexecuted instantiation: stats.c:__ebmb_insert_prefix
Unexecuted instantiation: stick_table.c:__ebmb_insert_prefix
Unexecuted instantiation: tools.c:__ebmb_insert_prefix
Unexecuted instantiation: acl.c:__ebmb_insert_prefix
Unexecuted instantiation: ebmbtree.c:__ebmb_insert_prefix
Unexecuted instantiation: ebsttree.c:__ebmb_insert_prefix
Unexecuted instantiation: pattern.c:__ebmb_insert_prefix
Unexecuted instantiation: stats-file.c:__ebmb_insert_prefix
Unexecuted instantiation: shctx.c:__ebmb_insert_prefix
857
858
859
860
#endif /* _EBMBTREE_H */
861