Coverage Report

Created: 2025-09-04 06:06

/src/haproxy/include/import/ebmbtree.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Elastic Binary Trees - macros and structures for Multi-Byte data nodes.
3
 * Version 6.0.6
4
 * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation, version 2.1
9
 * exclusively.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
#ifndef _EBMBTREE_H
22
#define _EBMBTREE_H
23
24
#include <string.h>
25
#include "ebtree.h"
26
27
/* Return the structure of type <type> whose member <member> points to <ptr> */
28
0
#define ebmb_entry(ptr, type, member) container_of(ptr, type, member)
29
30
/*
31
 * Exported functions and macros.
32
 * Many of them are always inlined because they are extremely small, and
33
 * are generally called at most once or twice in a program.
34
 */
35
36
/* Return leftmost node in the tree, or NULL if none */
37
static forceinline struct ebmb_node *ebmb_first(struct eb_root *root)
38
0
{
39
0
  return ebmb_entry(eb_first(root), struct ebmb_node, node);
40
0
}
Unexecuted instantiation: connection.c:ebmb_first
Unexecuted instantiation: peers.c:ebmb_first
Unexecuted instantiation: sample.c:ebmb_first
Unexecuted instantiation: server.c:ebmb_first
Unexecuted instantiation: stats.c:ebmb_first
Unexecuted instantiation: stick_table.c:ebmb_first
Unexecuted instantiation: tools.c:ebmb_first
Unexecuted instantiation: acl.c:ebmb_first
Unexecuted instantiation: backend.c:ebmb_first
Unexecuted instantiation: ebmbtree.c:ebmb_first
Unexecuted instantiation: ebsttree.c:ebmb_first
Unexecuted instantiation: pattern.c:ebmb_first
Unexecuted instantiation: shctx.c:ebmb_first
Unexecuted instantiation: stats-file.c:ebmb_first
41
42
/* Return rightmost node in the tree, or NULL if none */
43
static forceinline struct ebmb_node *ebmb_last(struct eb_root *root)
44
0
{
45
0
  return ebmb_entry(eb_last(root), struct ebmb_node, node);
46
0
}
Unexecuted instantiation: connection.c:ebmb_last
Unexecuted instantiation: peers.c:ebmb_last
Unexecuted instantiation: sample.c:ebmb_last
Unexecuted instantiation: server.c:ebmb_last
Unexecuted instantiation: stats.c:ebmb_last
Unexecuted instantiation: stick_table.c:ebmb_last
Unexecuted instantiation: tools.c:ebmb_last
Unexecuted instantiation: acl.c:ebmb_last
Unexecuted instantiation: backend.c:ebmb_last
Unexecuted instantiation: ebmbtree.c:ebmb_last
Unexecuted instantiation: ebsttree.c:ebmb_last
Unexecuted instantiation: pattern.c:ebmb_last
Unexecuted instantiation: shctx.c:ebmb_last
Unexecuted instantiation: stats-file.c:ebmb_last
47
48
/* Return next node in the tree, or NULL if none */
49
static forceinline struct ebmb_node *ebmb_next(struct ebmb_node *ebmb)
50
0
{
51
0
  return ebmb_entry(eb_next(&ebmb->node), struct ebmb_node, node);
52
0
}
Unexecuted instantiation: connection.c:ebmb_next
Unexecuted instantiation: peers.c:ebmb_next
Unexecuted instantiation: sample.c:ebmb_next
Unexecuted instantiation: server.c:ebmb_next
Unexecuted instantiation: stats.c:ebmb_next
Unexecuted instantiation: stick_table.c:ebmb_next
Unexecuted instantiation: tools.c:ebmb_next
Unexecuted instantiation: acl.c:ebmb_next
Unexecuted instantiation: backend.c:ebmb_next
Unexecuted instantiation: ebmbtree.c:ebmb_next
Unexecuted instantiation: ebsttree.c:ebmb_next
Unexecuted instantiation: pattern.c:ebmb_next
Unexecuted instantiation: shctx.c:ebmb_next
Unexecuted instantiation: stats-file.c:ebmb_next
53
54
/* Return previous node in the tree, or NULL if none */
55
static forceinline struct ebmb_node *ebmb_prev(struct ebmb_node *ebmb)
56
0
{
57
0
  return ebmb_entry(eb_prev(&ebmb->node), struct ebmb_node, node);
58
0
}
Unexecuted instantiation: connection.c:ebmb_prev
Unexecuted instantiation: peers.c:ebmb_prev
Unexecuted instantiation: sample.c:ebmb_prev
Unexecuted instantiation: server.c:ebmb_prev
Unexecuted instantiation: stats.c:ebmb_prev
Unexecuted instantiation: stick_table.c:ebmb_prev
Unexecuted instantiation: tools.c:ebmb_prev
Unexecuted instantiation: acl.c:ebmb_prev
Unexecuted instantiation: backend.c:ebmb_prev
Unexecuted instantiation: ebmbtree.c:ebmb_prev
Unexecuted instantiation: ebsttree.c:ebmb_prev
Unexecuted instantiation: pattern.c:ebmb_prev
Unexecuted instantiation: shctx.c:ebmb_prev
Unexecuted instantiation: stats-file.c:ebmb_prev
59
60
/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
61
static inline struct ebmb_node *ebmb_next_dup(struct ebmb_node *ebmb)
62
0
{
63
0
  return ebmb_entry(eb_next_dup(&ebmb->node), struct ebmb_node, node);
64
0
}
Unexecuted instantiation: connection.c:ebmb_next_dup
Unexecuted instantiation: peers.c:ebmb_next_dup
Unexecuted instantiation: sample.c:ebmb_next_dup
Unexecuted instantiation: server.c:ebmb_next_dup
Unexecuted instantiation: stats.c:ebmb_next_dup
Unexecuted instantiation: stick_table.c:ebmb_next_dup
Unexecuted instantiation: tools.c:ebmb_next_dup
Unexecuted instantiation: acl.c:ebmb_next_dup
Unexecuted instantiation: backend.c:ebmb_next_dup
Unexecuted instantiation: ebmbtree.c:ebmb_next_dup
Unexecuted instantiation: ebsttree.c:ebmb_next_dup
Unexecuted instantiation: pattern.c:ebmb_next_dup
Unexecuted instantiation: shctx.c:ebmb_next_dup
Unexecuted instantiation: stats-file.c:ebmb_next_dup
65
66
/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
67
static inline struct ebmb_node *ebmb_prev_dup(struct ebmb_node *ebmb)
68
0
{
69
0
  return ebmb_entry(eb_prev_dup(&ebmb->node), struct ebmb_node, node);
70
0
}
Unexecuted instantiation: connection.c:ebmb_prev_dup
Unexecuted instantiation: peers.c:ebmb_prev_dup
Unexecuted instantiation: sample.c:ebmb_prev_dup
Unexecuted instantiation: server.c:ebmb_prev_dup
Unexecuted instantiation: stats.c:ebmb_prev_dup
Unexecuted instantiation: stick_table.c:ebmb_prev_dup
Unexecuted instantiation: tools.c:ebmb_prev_dup
Unexecuted instantiation: acl.c:ebmb_prev_dup
Unexecuted instantiation: backend.c:ebmb_prev_dup
Unexecuted instantiation: ebmbtree.c:ebmb_prev_dup
Unexecuted instantiation: ebsttree.c:ebmb_prev_dup
Unexecuted instantiation: pattern.c:ebmb_prev_dup
Unexecuted instantiation: shctx.c:ebmb_prev_dup
Unexecuted instantiation: stats-file.c:ebmb_prev_dup
71
72
/* Return next node in the tree, skipping duplicates, or NULL if none */
73
static forceinline struct ebmb_node *ebmb_next_unique(struct ebmb_node *ebmb)
74
0
{
75
0
  return ebmb_entry(eb_next_unique(&ebmb->node), struct ebmb_node, node);
76
0
}
Unexecuted instantiation: connection.c:ebmb_next_unique
Unexecuted instantiation: peers.c:ebmb_next_unique
Unexecuted instantiation: sample.c:ebmb_next_unique
Unexecuted instantiation: server.c:ebmb_next_unique
Unexecuted instantiation: stats.c:ebmb_next_unique
Unexecuted instantiation: stick_table.c:ebmb_next_unique
Unexecuted instantiation: tools.c:ebmb_next_unique
Unexecuted instantiation: acl.c:ebmb_next_unique
Unexecuted instantiation: backend.c:ebmb_next_unique
Unexecuted instantiation: ebmbtree.c:ebmb_next_unique
Unexecuted instantiation: ebsttree.c:ebmb_next_unique
Unexecuted instantiation: pattern.c:ebmb_next_unique
Unexecuted instantiation: shctx.c:ebmb_next_unique
Unexecuted instantiation: stats-file.c:ebmb_next_unique
77
78
/* Return previous node in the tree, skipping duplicates, or NULL if none */
79
static forceinline struct ebmb_node *ebmb_prev_unique(struct ebmb_node *ebmb)
80
0
{
81
0
  return ebmb_entry(eb_prev_unique(&ebmb->node), struct ebmb_node, node);
82
0
}
Unexecuted instantiation: connection.c:ebmb_prev_unique
Unexecuted instantiation: peers.c:ebmb_prev_unique
Unexecuted instantiation: sample.c:ebmb_prev_unique
Unexecuted instantiation: server.c:ebmb_prev_unique
Unexecuted instantiation: stats.c:ebmb_prev_unique
Unexecuted instantiation: stick_table.c:ebmb_prev_unique
Unexecuted instantiation: tools.c:ebmb_prev_unique
Unexecuted instantiation: acl.c:ebmb_prev_unique
Unexecuted instantiation: backend.c:ebmb_prev_unique
Unexecuted instantiation: ebmbtree.c:ebmb_prev_unique
Unexecuted instantiation: ebsttree.c:ebmb_prev_unique
Unexecuted instantiation: pattern.c:ebmb_prev_unique
Unexecuted instantiation: shctx.c:ebmb_prev_unique
Unexecuted instantiation: stats-file.c:ebmb_prev_unique
83
84
/* Delete node from the tree if it was linked in. Mark the node unused. Note
85
 * that this function relies on a non-inlined generic function: eb_delete.
86
 */
87
static forceinline void ebmb_delete(struct ebmb_node *ebmb)
88
0
{
89
0
  eb_delete(&ebmb->node);
90
0
}
Unexecuted instantiation: connection.c:ebmb_delete
Unexecuted instantiation: peers.c:ebmb_delete
Unexecuted instantiation: sample.c:ebmb_delete
Unexecuted instantiation: server.c:ebmb_delete
Unexecuted instantiation: stats.c:ebmb_delete
Unexecuted instantiation: stick_table.c:ebmb_delete
Unexecuted instantiation: tools.c:ebmb_delete
Unexecuted instantiation: acl.c:ebmb_delete
Unexecuted instantiation: backend.c:ebmb_delete
Unexecuted instantiation: ebmbtree.c:ebmb_delete
Unexecuted instantiation: ebsttree.c:ebmb_delete
Unexecuted instantiation: pattern.c:ebmb_delete
Unexecuted instantiation: shctx.c:ebmb_delete
Unexecuted instantiation: stats-file.c:ebmb_delete
91
92
/* The following functions are not inlined by default. They are declared
93
 * in ebmbtree.c, which simply relies on their inline version.
94
 */
95
struct ebmb_node *ebmb_lookup(struct eb_root *root, const void *x, unsigned int len);
96
struct ebmb_node *ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len);
97
struct ebmb_node *ebmb_lookup_longest(struct eb_root *root, const void *x);
98
struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
99
struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
100
101
/* start from a valid leaf and find the next matching prefix that's either a
102
 * duplicate, or immediately shorter than the node's current one and still
103
 * matches it. The purpose is to permit a caller that is not satisfied with a
104
 * result provided by ebmb_lookup_longest() to evaluate the next matching
105
 * entry. Given that shorter keys are necessarily attached to nodes located
106
 * above the current one, it's sufficient to restart from the current leaf and
107
 * go up until we find a shorter prefix, or a non-matching one.
108
 */
109
static inline struct ebmb_node *ebmb_lookup_shorter(struct ebmb_node *start)
110
0
{
111
0
  eb_troot_t *t = start->node.leaf_p;
112
0
  struct ebmb_node *node;
113
114
  /* first, check for duplicates */
115
0
  node = ebmb_next_dup(start);
116
0
  if (node)
117
0
    return node;
118
119
0
  while (1) {
120
0
    if (eb_gettag(t) == EB_LEFT) {
121
      /* Walking up from left branch. We must ensure that we never
122
       * walk beyond root.
123
       */
124
0
      if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
125
0
        return NULL;
126
0
      node = container_of(eb_root_to_node(eb_untag(t, EB_LEFT)), struct ebmb_node, node);
127
0
    } else {
128
      /* Walking up from right branch, so we cannot be below
129
       * root. However, if we end up on a node with an even
130
       * and positive bit, this is a cover node, which mandates
131
       * that the left branch only contains cover values, so we
132
       * must descend it.
133
       */
134
0
      node = container_of(eb_root_to_node(eb_untag(t, EB_RGHT)), struct ebmb_node, node);
135
0
      if (node->node.bit > 0 && !(node->node.bit & 1))
136
0
        return ebmb_entry(eb_walk_down(t, EB_LEFT), struct ebmb_node, node);
137
0
    }
138
139
    /* Note that <t> cannot be NULL at this stage */
140
0
    t = node->node.node_p;
141
142
    /* this is a node attached to a deeper (and possibly different)
143
     * leaf, not interesting for us.
144
     */
145
0
    if (node->node.pfx >= start->node.pfx)
146
0
      continue;
147
148
0
    if (check_bits(start->key, node->key, 0, node->node.pfx) == 0)
149
0
      break;
150
0
  }
151
0
  return node;
152
0
}
Unexecuted instantiation: connection.c:ebmb_lookup_shorter
Unexecuted instantiation: peers.c:ebmb_lookup_shorter
Unexecuted instantiation: sample.c:ebmb_lookup_shorter
Unexecuted instantiation: server.c:ebmb_lookup_shorter
Unexecuted instantiation: stats.c:ebmb_lookup_shorter
Unexecuted instantiation: stick_table.c:ebmb_lookup_shorter
Unexecuted instantiation: tools.c:ebmb_lookup_shorter
Unexecuted instantiation: acl.c:ebmb_lookup_shorter
Unexecuted instantiation: backend.c:ebmb_lookup_shorter
Unexecuted instantiation: ebmbtree.c:ebmb_lookup_shorter
Unexecuted instantiation: ebsttree.c:ebmb_lookup_shorter
Unexecuted instantiation: pattern.c:ebmb_lookup_shorter
Unexecuted instantiation: shctx.c:ebmb_lookup_shorter
Unexecuted instantiation: stats-file.c:ebmb_lookup_shorter
153
154
/* The following functions are less likely to be used directly, because their
155
 * code is larger. The non-inlined version is preferred.
156
 */
157
158
/* Delete node from the tree if it was linked in. Mark the node unused. */
159
static forceinline void __ebmb_delete(struct ebmb_node *ebmb)
160
0
{
161
0
  __eb_delete(&ebmb->node);
162
0
}
Unexecuted instantiation: connection.c:__ebmb_delete
Unexecuted instantiation: peers.c:__ebmb_delete
Unexecuted instantiation: sample.c:__ebmb_delete
Unexecuted instantiation: server.c:__ebmb_delete
Unexecuted instantiation: stats.c:__ebmb_delete
Unexecuted instantiation: stick_table.c:__ebmb_delete
Unexecuted instantiation: tools.c:__ebmb_delete
Unexecuted instantiation: acl.c:__ebmb_delete
Unexecuted instantiation: backend.c:__ebmb_delete
Unexecuted instantiation: ebmbtree.c:__ebmb_delete
Unexecuted instantiation: ebsttree.c:__ebmb_delete
Unexecuted instantiation: pattern.c:__ebmb_delete
Unexecuted instantiation: shctx.c:__ebmb_delete
Unexecuted instantiation: stats-file.c:__ebmb_delete
163
164
/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
165
 * tree <root>. The caller is responsible for ensuring that <len> will not exceed
166
 * the common parts between the tree's keys and <x>. In case of multiple matches,
167
 * the leftmost node is returned. This means that this function can be used to
168
 * lookup string keys by prefix if all keys in the tree are zero-terminated. If
169
 * no match is found, NULL is returned. Returns first node if <len> is zero.
170
 */
171
static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
172
0
{
173
0
  struct ebmb_node *node;
174
0
  eb_troot_t *troot;
175
0
  int pos, side;
176
0
  int node_bit;
177
178
0
  troot = root->b[EB_LEFT];
179
0
  if (unlikely(troot == NULL))
180
0
    goto ret_null;
181
182
0
  if (unlikely(len == 0))
183
0
    goto walk_down;
184
185
0
  pos = 0;
186
0
  while (1) {
187
0
    if (eb_gettag(troot) == EB_LEAF) {
188
0
      node = container_of(eb_untag(troot, EB_LEAF),
189
0
              struct ebmb_node, node.branches);
190
0
      if (eb_memcmp(node->key + pos, x, len) != 0)
191
0
        goto ret_null;
192
0
      else
193
0
        goto ret_node;
194
0
    }
195
0
    node = container_of(eb_untag(troot, EB_NODE),
196
0
            struct ebmb_node, node.branches);
197
198
0
    node_bit = node->node.bit;
199
0
    if (node_bit < 0) {
200
      /* We have a dup tree now. Either it's for the same
201
       * value, and we walk down left, or it's a different
202
       * one and we don't have our key.
203
       */
204
0
      if (eb_memcmp(node->key + pos, x, len) != 0)
205
0
        goto ret_null;
206
0
      else
207
0
        goto walk_left;
208
0
    }
209
210
    /* OK, normal data node, let's walk down. We check if all full
211
     * bytes are equal, and we start from the last one we did not
212
     * completely check. We stop as soon as we reach the last byte,
213
     * because we must decide to go left/right or abort.
214
     */
215
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
216
0
    if (node_bit < 0) {
217
      /* This surprising construction gives better performance
218
       * because gcc does not try to reorder the loop. Tested to
219
       * be fine with 2.95 to 4.2.
220
       */
221
0
      while (1) {
222
0
        if (node->key[pos++] ^ *(unsigned char*)(x++))
223
0
          goto ret_null;  /* more than one full byte is different */
224
0
        if (--len == 0)
225
0
          goto walk_left; /* return first node if all bytes matched */
226
0
        node_bit += 8;
227
0
        if (node_bit >= 0)
228
0
          break;
229
0
      }
230
0
    }
231
232
    /* here we know that only the last byte differs, so node_bit < 8.
233
     * We have 2 possibilities :
234
     *   - more than the last bit differs => return NULL
235
     *   - walk down on side = (x[pos] >> node_bit) & 1
236
     */
237
0
    side = *(unsigned char *)x >> node_bit;
238
0
    if (((node->key[pos] >> node_bit) ^ side) > 1)
239
0
      goto ret_null;
240
0
    side &= 1;
241
0
    troot = node->node.branches.b[side];
242
0
  }
243
0
 walk_left:
244
0
  troot = node->node.branches.b[EB_LEFT];
245
0
 walk_down:
246
0
  while (eb_gettag(troot) != EB_LEAF)
247
0
    troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
248
0
  node = container_of(eb_untag(troot, EB_LEAF),
249
0
          struct ebmb_node, node.branches);
250
0
 ret_node:
251
0
  return node;
252
0
 ret_null:
253
0
  return NULL;
254
0
}
Unexecuted instantiation: connection.c:__ebmb_lookup
Unexecuted instantiation: peers.c:__ebmb_lookup
Unexecuted instantiation: sample.c:__ebmb_lookup
Unexecuted instantiation: server.c:__ebmb_lookup
Unexecuted instantiation: stats.c:__ebmb_lookup
Unexecuted instantiation: stick_table.c:__ebmb_lookup
Unexecuted instantiation: tools.c:__ebmb_lookup
Unexecuted instantiation: acl.c:__ebmb_lookup
Unexecuted instantiation: backend.c:__ebmb_lookup
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup
Unexecuted instantiation: ebsttree.c:__ebmb_lookup
Unexecuted instantiation: pattern.c:__ebmb_lookup
Unexecuted instantiation: shctx.c:__ebmb_lookup
Unexecuted instantiation: stats-file.c:__ebmb_lookup
255
256
/* Insert ebmb_node <new> into subtree starting at node root <root>.
257
 * Only new->key needs be set with the key. The ebmb_node is returned.
258
 * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
259
 * len is specified in bytes. It is absolutely mandatory that this length
260
 * is the same for all keys in the tree. This function cannot be used to
261
 * insert strings.
262
 */
263
static forceinline struct ebmb_node *
264
__ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
265
0
{
266
0
  struct ebmb_node *old;
267
0
  unsigned int side;
268
0
  eb_troot_t *troot, **up_ptr;
269
0
  eb_troot_t *root_right;
270
0
  int diff;
271
0
  int bit;
272
0
  eb_troot_t *new_left, *new_rght;
273
0
  eb_troot_t *new_leaf;
274
0
  int old_node_bit;
275
276
0
  side = EB_LEFT;
277
0
  troot = root->b[EB_LEFT];
278
0
  root_right = root->b[EB_RGHT];
279
0
  if (unlikely(troot == NULL)) {
280
    /* Tree is empty, insert the leaf part below the left branch */
281
0
    root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
282
0
    new->node.leaf_p = eb_dotag(root, EB_LEFT);
283
0
    new->node.node_p = NULL; /* node part unused */
284
0
    return new;
285
0
  }
286
287
  /* The tree descent is fairly easy :
288
   *  - first, check if we have reached a leaf node
289
   *  - second, check if we have gone too far
290
   *  - third, reiterate
291
   * Everywhere, we use <new> for the node node we are inserting, <root>
292
   * for the node we attach it to, and <old> for the node we are
293
   * displacing below <new>. <troot> will always point to the future node
294
   * (tagged with its type). <side> carries the side the node <new> is
295
   * attached to below its parent, which is also where previous node
296
   * was attached.
297
   */
298
299
0
  bit = 0;
300
0
  while (1) {
301
0
    if (unlikely(eb_gettag(troot) == EB_LEAF)) {
302
      /* insert above a leaf */
303
0
      old = container_of(eb_untag(troot, EB_LEAF),
304
0
              struct ebmb_node, node.branches);
305
0
      new->node.node_p = old->node.leaf_p;
306
0
      up_ptr = &old->node.leaf_p;
307
0
      goto check_bit_and_break;
308
0
    }
309
310
    /* OK we're walking down this link */
311
0
    old = container_of(eb_untag(troot, EB_NODE),
312
0
           struct ebmb_node, node.branches);
313
0
    old_node_bit = old->node.bit;
314
315
0
    if (unlikely(old->node.bit < 0)) {
316
      /* We're above a duplicate tree, so we must compare the whole value */
317
0
      new->node.node_p = old->node.node_p;
318
0
      up_ptr = &old->node.node_p;
319
0
    check_bit_and_break:
320
0
      bit = equal_bits(new->key, old->key, bit, len << 3);
321
0
      break;
322
0
    }
323
324
    /* Stop going down when we don't have common bits anymore. We
325
     * also stop in front of a duplicates tree because it means we
326
     * have to insert above. Note: we can compare more bits than
327
     * the current node's because as long as they are identical, we
328
     * know we descend along the correct side.
329
     */
330
331
0
    bit = equal_bits(new->key, old->key, bit, old_node_bit);
332
0
    if (unlikely(bit < old_node_bit)) {
333
      /* The tree did not contain the key, so we insert <new> before the
334
       * node <old>, and set ->bit to designate the lowest bit position in
335
       * <new> which applies to ->branches.b[].
336
       */
337
0
      new->node.node_p = old->node.node_p;
338
0
      up_ptr = &old->node.node_p;
339
0
      break;
340
0
    }
341
    /* we don't want to skip bits for further comparisons, so we must limit <bit>.
342
     * However, since we're going down around <old_node_bit>, we know it will be
343
     * properly matched, so we can skip this bit.
344
     */
345
0
    bit = old_node_bit + 1;
346
347
    /* walk down */
348
0
    root = &old->node.branches;
349
0
    side = old_node_bit & 7;
350
0
    side ^= 7;
351
0
    side = (new->key[old_node_bit >> 3] >> side) & 1;
352
0
    troot = root->b[side];
353
0
  }
354
355
0
  new_left = eb_dotag(&new->node.branches, EB_LEFT);
356
0
  new_rght = eb_dotag(&new->node.branches, EB_RGHT);
357
0
  new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
358
359
0
  new->node.bit = bit;
360
361
  /* Note: we can compare more bits than the current node's because as
362
   * long as they are identical, we know we descend along the correct
363
   * side. However we don't want to start to compare past the end.
364
   */
365
0
  diff = 0;
366
0
  if (((unsigned)bit >> 3) < len)
367
0
    diff = cmp_bits(new->key, old->key, bit);
368
369
0
  if (diff == 0) {
370
0
    new->node.bit = -1; /* mark as new dup tree, just in case */
371
372
0
    if (likely(eb_gettag(root_right))) {
373
      /* we refuse to duplicate this key if the tree is
374
       * tagged as containing only unique keys.
375
       */
376
0
      return old;
377
0
    }
378
379
0
    if (eb_gettag(troot) != EB_LEAF) {
380
      /* there was already a dup tree below */
381
0
      struct eb_node *ret;
382
0
      ret = eb_insert_dup(&old->node, &new->node);
383
0
      return container_of(ret, struct ebmb_node, node);
384
0
    }
385
    /* otherwise fall through */
386
0
  }
387
388
0
  if (diff >= 0) {
389
0
    new->node.branches.b[EB_LEFT] = troot;
390
0
    new->node.branches.b[EB_RGHT] = new_leaf;
391
0
    new->node.leaf_p = new_rght;
392
0
    *up_ptr = new_left;
393
0
  }
394
0
  else {
395
0
    new->node.branches.b[EB_LEFT] = new_leaf;
396
0
    new->node.branches.b[EB_RGHT] = troot;
397
0
    new->node.leaf_p = new_left;
398
0
    *up_ptr = new_rght;
399
0
  }
400
401
  /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
402
   * parent is already set to <new>, and the <root>'s branch is still in
403
   * <side>. Update the root's leaf till we have it. Note that we can also
404
   * find the side by checking the side of new->node.node_p.
405
   */
406
407
0
  root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
408
0
  return new;
409
0
}
Unexecuted instantiation: connection.c:__ebmb_insert
Unexecuted instantiation: peers.c:__ebmb_insert
Unexecuted instantiation: sample.c:__ebmb_insert
Unexecuted instantiation: server.c:__ebmb_insert
Unexecuted instantiation: stats.c:__ebmb_insert
Unexecuted instantiation: stick_table.c:__ebmb_insert
Unexecuted instantiation: tools.c:__ebmb_insert
Unexecuted instantiation: acl.c:__ebmb_insert
Unexecuted instantiation: backend.c:__ebmb_insert
Unexecuted instantiation: ebmbtree.c:__ebmb_insert
Unexecuted instantiation: ebsttree.c:__ebmb_insert
Unexecuted instantiation: pattern.c:__ebmb_insert
Unexecuted instantiation: shctx.c:__ebmb_insert
Unexecuted instantiation: stats-file.c:__ebmb_insert
410
411
412
/* Find the first occurrence of the longest prefix matching a key <x> in the
413
 * tree <root>. It's the caller's responsibility to ensure that key <x> is at
414
 * least as long as the keys in the tree. Note that this can be ensured by
415
 * having a byte at the end of <x> which cannot be part of any prefix, typically
416
 * the trailing zero for a string. If none can be found, return NULL.
417
 */
418
static forceinline struct ebmb_node *__ebmb_lookup_longest(struct eb_root *root, const void *x)
419
0
{
420
0
  struct ebmb_node *node;
421
0
  eb_troot_t *troot, *cover;
422
0
  int pos, side;
423
0
  int node_bit;
424
425
0
  troot = root->b[EB_LEFT];
426
0
  if (unlikely(troot == NULL))
427
0
    return NULL;
428
429
0
  cover = NULL;
430
0
  pos = 0;
431
0
  while (1) {
432
0
    if ((eb_gettag(troot) == EB_LEAF)) {
433
0
      node = container_of(eb_untag(troot, EB_LEAF),
434
0
              struct ebmb_node, node.branches);
435
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
436
0
        goto not_found;
437
438
0
      return node;
439
0
    }
440
0
    node = container_of(eb_untag(troot, EB_NODE),
441
0
            struct ebmb_node, node.branches);
442
443
0
    node_bit = node->node.bit;
444
0
    if (node_bit < 0) {
445
      /* We have a dup tree now. Either it's for the same
446
       * value, and we walk down left, or it's a different
447
       * one and we don't have our key.
448
       */
449
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
450
0
        goto not_found;
451
452
0
      troot = node->node.branches.b[EB_LEFT];
453
0
      while (eb_gettag(troot) != EB_LEAF)
454
0
        troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
455
0
      node = container_of(eb_untag(troot, EB_LEAF),
456
0
              struct ebmb_node, node.branches);
457
0
      return node;
458
0
    }
459
460
0
    node_bit >>= 1; /* strip cover bit */
461
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
462
0
    if (node_bit < 0) {
463
      /* This uncommon construction gives better performance
464
       * because gcc does not try to reorder the loop. Tested to
465
       * be fine with 2.95 to 4.2.
466
       */
467
0
      while (1) {
468
0
        x++; pos++;
469
0
        if (node->key[pos-1] ^ *(unsigned char*)(x-1))
470
0
          goto not_found; /* more than one full byte is different */
471
0
        node_bit += 8;
472
0
        if (node_bit >= 0)
473
0
          break;
474
0
      }
475
0
    }
476
477
    /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
478
     * We have 2 possibilities :
479
     *   - more than the last bit differs => data does not match
480
     *   - walk down on side = (x[pos] >> node_bit) & 1
481
     */
482
0
    side = *(unsigned char *)x >> node_bit;
483
0
    if (((node->key[pos] >> node_bit) ^ side) > 1)
484
0
      goto not_found;
485
486
0
    if (!(node->node.bit & 1)) {
487
      /* This is a cover node, let's keep a reference to it
488
       * for later. The covering subtree is on the left, and
489
       * the covered subtree is on the right, so we have to
490
       * walk down right.
491
       */
492
0
      cover = node->node.branches.b[EB_LEFT];
493
0
      troot = node->node.branches.b[EB_RGHT];
494
0
      continue;
495
0
    }
496
0
    side &= 1;
497
0
    troot = node->node.branches.b[side];
498
0
  }
499
500
0
 not_found:
501
  /* Walk down last cover tree if it exists. It does not matter if cover is NULL */
502
0
  return ebmb_entry(eb_walk_down(cover, EB_LEFT), struct ebmb_node, node);
503
0
}
Unexecuted instantiation: connection.c:__ebmb_lookup_longest
Unexecuted instantiation: peers.c:__ebmb_lookup_longest
Unexecuted instantiation: sample.c:__ebmb_lookup_longest
Unexecuted instantiation: server.c:__ebmb_lookup_longest
Unexecuted instantiation: stats.c:__ebmb_lookup_longest
Unexecuted instantiation: stick_table.c:__ebmb_lookup_longest
Unexecuted instantiation: tools.c:__ebmb_lookup_longest
Unexecuted instantiation: acl.c:__ebmb_lookup_longest
Unexecuted instantiation: backend.c:__ebmb_lookup_longest
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup_longest
Unexecuted instantiation: ebsttree.c:__ebmb_lookup_longest
Unexecuted instantiation: pattern.c:__ebmb_lookup_longest
Unexecuted instantiation: shctx.c:__ebmb_lookup_longest
Unexecuted instantiation: stats-file.c:__ebmb_lookup_longest
504
505
506
/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
507
 * tree <root>. It's the caller's responsibility to ensure that key <x> is at
508
 * least as long as the keys in the tree. Note that this can be ensured by
509
 * having a byte at the end of <x> which cannot be part of any prefix, typically
510
 * the trailing zero for a string. If none can be found, return NULL.
511
 */
512
static forceinline struct ebmb_node *__ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
513
0
{
514
0
  struct ebmb_node *node;
515
0
  eb_troot_t *troot;
516
0
  int pos, side;
517
0
  int node_bit;
518
519
0
  troot = root->b[EB_LEFT];
520
0
  if (unlikely(troot == NULL))
521
0
    return NULL;
522
523
0
  pos = 0;
524
0
  while (1) {
525
0
    if ((eb_gettag(troot) == EB_LEAF)) {
526
0
      node = container_of(eb_untag(troot, EB_LEAF),
527
0
              struct ebmb_node, node.branches);
528
0
      if (node->node.pfx != pfx)
529
0
        return NULL;
530
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
531
0
        return NULL;
532
0
      return node;
533
0
    }
534
0
    node = container_of(eb_untag(troot, EB_NODE),
535
0
            struct ebmb_node, node.branches);
536
537
0
    node_bit = node->node.bit;
538
0
    if (node_bit < 0) {
539
      /* We have a dup tree now. Either it's for the same
540
       * value, and we walk down left, or it's a different
541
       * one and we don't have our key.
542
       */
543
0
      if (node->node.pfx != pfx)
544
0
        return NULL;
545
0
      if (check_bits(x - pos, node->key, pos, node->node.pfx))
546
0
        return NULL;
547
548
0
      troot = node->node.branches.b[EB_LEFT];
549
0
      while (eb_gettag(troot) != EB_LEAF)
550
0
        troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
551
0
      node = container_of(eb_untag(troot, EB_LEAF),
552
0
              struct ebmb_node, node.branches);
553
0
      return node;
554
0
    }
555
556
0
    node_bit >>= 1; /* strip cover bit */
557
0
    node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
558
0
    if (node_bit < 0) {
559
      /* This uncommon construction gives better performance
560
       * because gcc does not try to reorder the loop. Tested to
561
       * be fine with 2.95 to 4.2.
562
       */
563
0
      while (1) {
564
0
        x++; pos++;
565
0
        if (node->key[pos-1] ^ *(unsigned char*)(x-1))
566
0
          return NULL; /* more than one full byte is different */
567
0
        node_bit += 8;
568
0
        if (node_bit >= 0)
569
0
          break;
570
0
      }
571
0
    }
572
573
    /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
574
     * We have 2 possibilities :
575
     *   - more than the last bit differs => data does not match
576
     *   - walk down on side = (x[pos] >> node_bit) & 1
577
     */
578
0
    side = *(unsigned char *)x >> node_bit;
579
0
    if (((node->key[pos] >> node_bit) ^ side) > 1)
580
0
      return NULL;
581
582
0
    if (!(node->node.bit & 1)) {
583
      /* This is a cover node, it may be the entry we're
584
       * looking for. We already know that it matches all the
585
       * bits, let's compare prefixes and descend the cover
586
       * subtree if they match.
587
       */
588
0
      if ((unsigned short)node->node.bit >> 1 == pfx)
589
0
        troot = node->node.branches.b[EB_LEFT];
590
0
      else
591
0
        troot = node->node.branches.b[EB_RGHT];
592
0
      continue;
593
0
    }
594
0
    side &= 1;
595
0
    troot = node->node.branches.b[side];
596
0
  }
597
0
}
Unexecuted instantiation: connection.c:__ebmb_lookup_prefix
Unexecuted instantiation: peers.c:__ebmb_lookup_prefix
Unexecuted instantiation: sample.c:__ebmb_lookup_prefix
Unexecuted instantiation: server.c:__ebmb_lookup_prefix
Unexecuted instantiation: stats.c:__ebmb_lookup_prefix
Unexecuted instantiation: stick_table.c:__ebmb_lookup_prefix
Unexecuted instantiation: tools.c:__ebmb_lookup_prefix
Unexecuted instantiation: acl.c:__ebmb_lookup_prefix
Unexecuted instantiation: backend.c:__ebmb_lookup_prefix
Unexecuted instantiation: ebmbtree.c:__ebmb_lookup_prefix
Unexecuted instantiation: ebsttree.c:__ebmb_lookup_prefix
Unexecuted instantiation: pattern.c:__ebmb_lookup_prefix
Unexecuted instantiation: shctx.c:__ebmb_lookup_prefix
Unexecuted instantiation: stats-file.c:__ebmb_lookup_prefix
598
599
600
/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
601
 * Only new->key and new->pfx need be set with the key and its prefix length.
602
 * Note that bits between <pfx> and <len> are theoretically ignored and should be
603
 * zero, as it is not certain yet that they will always be ignored everywhere
604
 * (eg in bit compare functions).
605
 * The ebmb_node is returned.
606
 * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
607
 * len is specified in bytes.
608
 */
609
static forceinline struct ebmb_node *
610
__ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
611
0
{
612
0
  struct ebmb_node *old;
613
0
  unsigned int side;
614
0
  eb_troot_t *troot, **up_ptr;
615
0
  eb_troot_t *root_right;
616
0
  int diff;
617
0
  int bit;
618
0
  eb_troot_t *new_left, *new_rght;
619
0
  eb_troot_t *new_leaf;
620
0
  int old_node_bit;
621
0
  unsigned int npfx = new->node.pfx;
622
0
  unsigned int npfx1 = npfx << 1;
623
0
  const unsigned char *nkey = new->key;
624
625
0
  side = EB_LEFT;
626
0
  troot = root->b[EB_LEFT];
627
0
  root_right = root->b[EB_RGHT];
628
0
  if (unlikely(troot == NULL)) {
629
    /* Tree is empty, insert the leaf part below the left branch */
630
0
    root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
631
0
    new->node.leaf_p = eb_dotag(root, EB_LEFT);
632
0
    new->node.node_p = NULL; /* node part unused */
633
0
    return new;
634
0
  }
635
636
0
  len <<= 3;
637
0
  if (len > npfx)
638
0
    len = npfx;
639
640
  /* The tree descent is fairly easy :
641
   *  - first, check if we have reached a leaf node
642
   *  - second, check if we have gone too far
643
   *  - third, reiterate
644
   * Everywhere, we use <new> for the node node we are inserting, <root>
645
   * for the node we attach it to, and <old> for the node we are
646
   * displacing below <new>. <troot> will always point to the future node
647
   * (tagged with its type). <side> carries the side the node <new> is
648
   * attached to below its parent, which is also where previous node
649
   * was attached.
650
   */
651
652
0
  bit = 0;
653
0
  while (1) {
654
0
    if (unlikely(eb_gettag(troot) == EB_LEAF)) {
655
      /* Insert above a leaf. Note that this leaf could very
656
       * well be part of a cover node.
657
       */
658
0
      old = container_of(eb_untag(troot, EB_LEAF),
659
0
              struct ebmb_node, node.branches);
660
0
      new->node.node_p = old->node.leaf_p;
661
0
      up_ptr = &old->node.leaf_p;
662
0
      goto check_bit_and_break;
663
0
    }
664
665
    /* OK we're walking down this link */
666
0
    old = container_of(eb_untag(troot, EB_NODE),
667
0
           struct ebmb_node, node.branches);
668
0
    old_node_bit = old->node.bit;
669
    /* Note that old_node_bit can be :
670
     *   < 0    : dup tree
671
     *   = 2N   : cover node for N bits
672
     *   = 2N+1 : normal node at N bits
673
     */
674
675
0
    if (unlikely(old_node_bit < 0)) {
676
      /* We're above a duplicate tree, so we must compare the whole value */
677
0
      new->node.node_p = old->node.node_p;
678
0
      up_ptr = &old->node.node_p;
679
0
    check_bit_and_break:
680
      /* No need to compare everything if the leaves are shorter than the new one. */
681
0
      if (len > old->node.pfx)
682
0
        len = old->node.pfx;
683
0
      bit = equal_bits(nkey, old->key, bit, len);
684
0
      break;
685
0
    }
686
687
    /* WARNING: for the two blocks below, <bit> is counted in half-bits */
688
689
0
    bit = equal_bits(nkey, old->key, bit, old_node_bit >> 1);
690
0
    bit = (bit << 1) + 1; // assume comparisons with normal nodes
691
692
    /* we must always check that our prefix is larger than the nodes
693
     * we visit, otherwise we have to stop going down. The following
694
     * test is able to stop before both normal and cover nodes.
695
     */
696
0
    if (bit >= npfx1 && npfx1 < old_node_bit) {
697
      /* insert cover node here on the left */
698
0
      new->node.node_p = old->node.node_p;
699
0
      up_ptr = &old->node.node_p;
700
0
      new->node.bit = npfx1;
701
0
      diff = -1;
702
0
      goto insert_above;
703
0
    }
704
705
0
    if (unlikely(bit < old_node_bit)) {
706
      /* The tree did not contain the key, so we insert <new> before the
707
       * node <old>, and set ->bit to designate the lowest bit position in
708
       * <new> which applies to ->branches.b[]. We know that the bit is not
709
       * greater than the prefix length thanks to the test above.
710
       */
711
0
      new->node.node_p = old->node.node_p;
712
0
      up_ptr = &old->node.node_p;
713
0
      new->node.bit = bit;
714
0
      diff = cmp_bits(nkey, old->key, bit >> 1);
715
0
      goto insert_above;
716
0
    }
717
718
0
    if (!(old_node_bit & 1)) {
719
      /* if we encounter a cover node with our exact prefix length, it's
720
       * necessarily the same value, so we insert there as a duplicate on
721
       * the left. For that, we go down on the left and the leaf detection
722
       * code will finish the job.
723
       */
724
0
      if (npfx1 == old_node_bit) {
725
0
        root = &old->node.branches;
726
0
        side = EB_LEFT;
727
0
        troot = root->b[side];
728
0
        continue;
729
0
      }
730
731
      /* cover nodes are always walked through on the right */
732
0
      side = EB_RGHT;
733
0
      bit = old_node_bit >> 1; /* recheck that bit */
734
0
      root = &old->node.branches;
735
0
      troot = root->b[side];
736
0
      continue;
737
0
    }
738
739
    /* we don't want to skip bits for further comparisons, so we must limit <bit>.
740
     * However, since we're going down around <old_node_bit>, we know it will be
741
     * properly matched, so we can skip this bit.
742
     */
743
0
    old_node_bit >>= 1;
744
0
    bit = old_node_bit + 1;
745
746
    /* walk down */
747
0
    root = &old->node.branches;
748
0
    side = old_node_bit & 7;
749
0
    side ^= 7;
750
0
    side = (nkey[old_node_bit >> 3] >> side) & 1;
751
0
    troot = root->b[side];
752
0
  }
753
754
  /* Right here, we have 4 possibilities :
755
   * - the tree does not contain any leaf matching the
756
   *   key, and we have new->key < old->key. We insert
757
   *   new above old, on the left ;
758
   *
759
   * - the tree does not contain any leaf matching the
760
   *   key, and we have new->key > old->key. We insert
761
   *   new above old, on the right ;
762
   *
763
   * - the tree does contain the key with the same prefix
764
   *   length. We add the new key next to it as a first
765
   *   duplicate (since it was alone).
766
   *
767
   * The last two cases can easily be partially merged.
768
   *
769
   * - the tree contains a leaf matching the key, we have
770
   *   to insert above it as a cover node. The leaf with
771
   *   the shortest prefix becomes the left subtree and
772
   *   the leaf with the longest prefix becomes the right
773
   *   one. The cover node gets the min of both prefixes
774
   *   as its new bit.
775
   */
776
777
  /* first we want to ensure that we compare the correct bit, which means
778
   * the largest common to both nodes.
779
   */
780
0
  if (bit > npfx)
781
0
    bit = npfx;
782
0
  if (bit > old->node.pfx)
783
0
    bit = old->node.pfx;
784
785
0
  new->node.bit = (bit << 1) + 1; /* assume normal node by default */
786
787
  /* if one prefix is included in the second one, we don't compare bits
788
   * because they won't necessarily match, we just proceed with a cover
789
   * node insertion.
790
   */
791
0
  diff = 0;
792
0
  if (bit < old->node.pfx && bit < npfx)
793
0
    diff = cmp_bits(nkey, old->key, bit);
794
795
0
  if (diff == 0) {
796
    /* Both keys match. Either it's a duplicate entry or we have to
797
     * put the shortest prefix left and the largest one right below
798
     * a new cover node. By default, diff==0 means we'll be inserted
799
     * on the right.
800
     */
801
0
    new->node.bit--; /* anticipate cover node insertion */
802
0
    if (npfx == old->node.pfx) {
803
0
      new->node.bit = -1; /* mark as new dup tree, just in case */
804
805
0
      if (unlikely(eb_gettag(root_right))) {
806
        /* we refuse to duplicate this key if the tree is
807
         * tagged as containing only unique keys.
808
         */
809
0
        return old;
810
0
      }
811
812
0
      if (eb_gettag(troot) != EB_LEAF) {
813
        /* there was already a dup tree below */
814
0
        struct eb_node *ret;
815
0
        ret = eb_insert_dup(&old->node, &new->node);
816
0
        return container_of(ret, struct ebmb_node, node);
817
0
      }
818
      /* otherwise fall through to insert first duplicate */
819
0
    }
820
    /* otherwise we just rely on the tests below to select the right side */
821
0
    else if (npfx < old->node.pfx)
822
0
      diff = -1; /* force insertion to left side */
823
0
  }
824
825
0
 insert_above:
826
0
  new_left = eb_dotag(&new->node.branches, EB_LEFT);
827
0
  new_rght = eb_dotag(&new->node.branches, EB_RGHT);
828
0
  new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
829
830
0
  if (diff >= 0) {
831
0
    new->node.branches.b[EB_LEFT] = troot;
832
0
    new->node.branches.b[EB_RGHT] = new_leaf;
833
0
    new->node.leaf_p = new_rght;
834
0
    *up_ptr = new_left;
835
0
  }
836
0
  else {
837
0
    new->node.branches.b[EB_LEFT] = new_leaf;
838
0
    new->node.branches.b[EB_RGHT] = troot;
839
0
    new->node.leaf_p = new_left;
840
0
    *up_ptr = new_rght;
841
0
  }
842
843
0
  root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
844
0
  return new;
845
0
}
Unexecuted instantiation: connection.c:__ebmb_insert_prefix
Unexecuted instantiation: peers.c:__ebmb_insert_prefix
Unexecuted instantiation: sample.c:__ebmb_insert_prefix
Unexecuted instantiation: server.c:__ebmb_insert_prefix
Unexecuted instantiation: stats.c:__ebmb_insert_prefix
Unexecuted instantiation: stick_table.c:__ebmb_insert_prefix
Unexecuted instantiation: tools.c:__ebmb_insert_prefix
Unexecuted instantiation: acl.c:__ebmb_insert_prefix
Unexecuted instantiation: backend.c:__ebmb_insert_prefix
Unexecuted instantiation: ebmbtree.c:__ebmb_insert_prefix
Unexecuted instantiation: ebsttree.c:__ebmb_insert_prefix
Unexecuted instantiation: pattern.c:__ebmb_insert_prefix
Unexecuted instantiation: shctx.c:__ebmb_insert_prefix
Unexecuted instantiation: stats-file.c:__ebmb_insert_prefix
846
847
848
849
#endif /* _EBMBTREE_H */
850