/src/haproxy/include/import/cebtree-prv.h
Line | Count | Source |
1 | | /* |
2 | | * Compact Elastic Binary Trees - internal functions and types |
3 | | * |
4 | | * Copyright (C) 2014-2025 Willy Tarreau - w@1wt.eu |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining |
7 | | * a copy of this software and associated documentation files (the |
8 | | * "Software"), to deal in the Software without restriction, including |
9 | | * without limitation the rights to use, copy, modify, merge, publish, |
10 | | * distribute, sublicense, and/or sell copies of the Software, and to |
11 | | * permit persons to whom the Software is furnished to do so, subject to |
12 | | * the following conditions: |
13 | | * |
14 | | * The above copyright notice and this permission notice shall be |
15 | | * included in all copies or substantial portions of the Software. |
16 | | * |
17 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
18 | | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES |
19 | | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
20 | | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
21 | | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
22 | | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
23 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
24 | | * OTHER DEALINGS IN THE SOFTWARE. |
25 | | */ |
26 | | |
27 | | /* This file MUST NOT be included by public code, it contains macros, enums |
28 | | * with short names and function definitions that may clash with user code. |
29 | | * It may only be included by the respective types' C files. |
30 | | */ |
31 | | |
32 | | /* |
33 | | * These trees are optimized for adding the minimalest overhead to the stored |
34 | | * data. This version uses the node's pointer as the key, for the purpose of |
35 | | * quickly finding its neighbours. |
36 | | * |
37 | | * A few properties : |
38 | | * - the xor between two branches of a node cannot be zero unless the two |
39 | | * branches are duplicate keys |
40 | | * - the xor between two nodes has *at least* the split bit set, possibly more |
41 | | * - the split bit is always strictly smaller for a node than for its parent, |
42 | | * which implies that the xor between the keys of the lowest level node is |
43 | | * always smaller than the xor between a higher level node. Hence the xor |
44 | | * between the branches of a regular leaf is always strictly larger than the |
45 | | * xor of its parent node's branches if this node is different, since the |
46 | | * leaf is associated with a higher level node which has at least one higher |
47 | | * level branch. The first leaf doesn't validate this but is handled by the |
48 | | * rules below. |
49 | | * - during the descent, the node corresponding to a leaf is always visited |
50 | | * before the leaf, unless it's the first inserted, nodeless leaf. |
51 | | * - the first key is the only one without any node, and it has both its |
52 | | * branches pointing to itself during insertion to detect it (i.e. xor==0). |
53 | | * - a leaf is always present as a node on the path from the root, except for |
54 | | * the inserted first key which has no node, and is recognizable by its two |
55 | | * branches pointing to itself. |
56 | | * - a consequence of the rules above is that a non-first leaf appearing below |
57 | | * a node will necessarily have an associated node with a split bit equal to |
58 | | * or greater than the node's split bit. |
59 | | * - another consequence is that below a node, the split bits are different for |
60 | | * each branches since both of them are already present above the node, thus |
61 | | * at different levels, so their respective XOR values will be different. |
62 | | * - since all nodes in a given path have a different split bit, if a leaf has |
63 | | * the same split bit as its parent node, it is necessary its associated leaf |
64 | | * |
65 | | * When descending along the tree, it is possible to know that a search key is |
66 | | * not present, because its XOR with both of the branches is strictly higher |
67 | | * than the inter-branch XOR. The reason is simple : the inter-branch XOR will |
68 | | * have its highest bit set indicating the split bit. Since it's the bit that |
69 | | * differs between the two branches, the key cannot have it both set and |
70 | | * cleared when comparing to the branch values. So xoring the key with both |
71 | | * branches will emit a higher bit only when the key's bit differs from both |
72 | | * branches' similar bit. Thus, the following equation : |
73 | | * (XOR(key, L) > XOR(L, R)) && (XOR(key, R) > XOR(L, R)) |
74 | | * is only true when the key should be placed above that node. Since the key |
75 | | * has a higher bit which differs from the node, either it has it set and the |
76 | | * node has it clear (same for both branches), or it has it clear and the node |
77 | | * has it set for both branches. For this reason it's enough to compare the key |
78 | | * with any node when the equation above is true, to know if it ought to be |
79 | | * present on the left or on the right side. This is useful for insertion and |
80 | | * for range lookups. |
81 | | */ |
82 | | |
83 | | #ifndef _CEBTREE_PRV_H |
84 | | #define _CEBTREE_PRV_H |
85 | | |
86 | | #include <sys/types.h> |
87 | | #include <inttypes.h> |
88 | | #include <stddef.h> |
89 | | #include <string.h> |
90 | | #include "cebtree.h" |
91 | | |
92 | | /* A few utility functions and macros that we need below */ |
93 | | |
94 | | /* This is used to test if a macro is defined and equals 1. The principle is |
95 | | * that the macro is passed as a value and its value concatenated to the word |
96 | | * "comma_for_one" to form a new macro name. The macro "comma_for_one1" equals |
97 | | * one comma, which, once used in an argument, will shift all of them by one, |
98 | | * so that we can use this to concatenate both a 1 and a 0 and always pick the |
99 | | * second one. |
100 | | */ |
101 | | #define comma_for_one1 , |
102 | | #define _____equals_1(x, y, ...) (y) |
103 | | #define ____equals_1(x, ...) _____equals_1(x, 0) |
104 | | #define ___equals_1(x) ____equals_1(comma_for_one ## x 1) |
105 | | #define __equals_1(x) ___equals_1(x) |
106 | | |
107 | | /* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in |
108 | | * the case of gcc, but is convenient since handled at the preprocessor level. |
109 | | * In both cases it's possible to test for __has_attribute() using ifdef. When |
110 | | * not defined we remap this to the __has_attribute_<name> macro so that we'll |
111 | | * later be able to implement on a per-compiler basis those which are missing, |
112 | | * by defining __has_attribute_<name> to 1. |
113 | | */ |
114 | | #ifndef __has_attribute |
115 | | #define __has_attribute(x) __equals_1(__has_attribute_ ## x) |
116 | | #endif |
117 | | |
118 | | /* gcc 10 and clang 3 brought __has_builtin() to test if a builtin exists. |
119 | | * Just like above, if it doesn't exist, we remap it to a macro allowing us |
120 | | * to define these ourselves by defining __has_builtin_<name> to 1. |
121 | | */ |
122 | | #ifndef __has_builtin |
123 | | #define __has_builtin(x) __equals_1(__has_builtin_ ## x) |
124 | | #endif |
125 | | |
126 | | #if !defined(__GNUC__) |
127 | | /* Some versions of glibc irresponsibly redefine __attribute__() to empty for |
128 | | * non-gcc compilers, and as such, silently break all constructors with other |
129 | | * other compilers. Let's make sure such incompatibilities are detected if any, |
130 | | * or that the attribute is properly enforced. |
131 | | */ |
132 | | #undef __attribute__ |
133 | | #define __attribute__(x) __attribute__(x) |
134 | | #endif |
135 | | |
136 | | /* Define the missing __builtin_prefetch() for tcc. */ |
137 | | #if defined(__TINYC__) && !defined(__builtin_prefetch) |
138 | | #define __builtin_prefetch(addr, ...) do { } while (0) |
139 | | #endif |
140 | | |
141 | | /* __builtin_unreachable() was added in gcc 4.5 */ |
142 | | #if defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) |
143 | | #define __has_builtin___builtin_unreachable 1 /* make __builtin_unreachable() return 1 */ |
144 | | #elif !__has_builtin(__builtin_unreachable) |
145 | | #define __builtin_unreachable() do { } while (1) |
146 | | #endif |
147 | | |
148 | | /* FLSNZ: find last set bit for non-zero value. "Last" here means the highest |
149 | | * one. It returns a value from 1 to 32 for 1<<0 to 1<<31. |
150 | | */ |
151 | | |
152 | | #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2))) |
153 | | /* gcc >= 4.2 brings __builtin_clz() and __builtin_clzl(), also usable for |
154 | | * non-x86. However on x86 gcc does bad stuff if not properly handled. It xors |
155 | | * the bsr return with 31 and since it doesn't know how to deal with a xor |
156 | | * followed by a negation, it adds two instructions when using 32-clz(). Thus |
157 | | * instead we first cancel the xor using another one then add one. Even on ARM |
158 | | * that provides a clz instruction, it saves one register to proceed like this. |
159 | | */ |
160 | | |
161 | 0 | #define flsnz8(x) flsnz32((unsigned char)x) |
162 | | |
163 | | static inline __attribute__((always_inline)) unsigned int flsnz32(unsigned int x) |
164 | 0 | { |
165 | 0 | return (__builtin_clz(x) ^ 31) + 1; |
166 | 0 | } Unexecuted instantiation: ceb32_tree.c:flsnz32 Unexecuted instantiation: ceb64_tree.c:flsnz32 Unexecuted instantiation: cebis_tree.c:flsnz32 Unexecuted instantiation: cebs_tree.c:flsnz32 |
167 | | |
168 | | static inline __attribute__((always_inline)) unsigned int flsnz64(unsigned long long x) |
169 | 0 | { |
170 | 0 | return (__builtin_clzll(x) ^ 63) + 1; |
171 | 0 | } Unexecuted instantiation: ceb32_tree.c:flsnz64 Unexecuted instantiation: ceb64_tree.c:flsnz64 Unexecuted instantiation: cebis_tree.c:flsnz64 Unexecuted instantiation: cebs_tree.c:flsnz64 |
172 | | |
173 | | #elif (defined(__i386__) || defined(__x86_64__)) && !defined(__atom__) /* Not gcc >= 4.2 */ |
174 | | /* DO NOT USE ON ATOM! The instruction is emulated and is several times slower |
175 | | * than doing the math by hand. |
176 | | */ |
177 | | #define flsnz8(x) flsnz32((unsigned char)x) |
178 | | |
179 | | static inline __attribute__((always_inline)) unsigned int flsnz32(unsigned int x) |
180 | | { |
181 | | unsigned int r; |
182 | | __asm__("bsrl %1,%0\n" |
183 | | : "=r" (r) : "rm" (x)); |
184 | | return r + 1; |
185 | | } |
186 | | |
187 | | #if defined(__x86_64__) |
188 | | static inline __attribute__((always_inline)) unsigned int flsnz64(unsigned long long x) |
189 | | { |
190 | | unsigned long long r; |
191 | | __asm__("bsrq %1,%0\n" |
192 | | : "=r" (r) : "rm" (x)); |
193 | | return r + 1; |
194 | | } |
195 | | #else |
196 | | static inline __attribute__((always_inline)) unsigned int flsnz64(unsigned long long x) |
197 | | { |
198 | | unsigned int h; |
199 | | unsigned int bits = 32; |
200 | | |
201 | | h = x >> 32; |
202 | | if (!h) { |
203 | | h = x; |
204 | | bits = 0; |
205 | | } |
206 | | return flsnz32(h) + bits; |
207 | | } |
208 | | #endif |
209 | | |
210 | | #else /* Neither gcc >= 4.2 nor x86, use generic code */ |
211 | | |
212 | | static inline __attribute__((always_inline)) unsigned int flsnz8(unsigned int x) |
213 | | { |
214 | | unsigned int ret = 0; |
215 | | if (x >> 4) { x >>= 4; ret += 4; } |
216 | | return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1; |
217 | | } |
218 | | |
219 | | #define flsnz32(___a) ({ \ |
220 | | register unsigned int ___x, ___bits = 0; \ |
221 | | ___x = (___a); \ |
222 | | if (___x & 0xffff0000) { ___x &= 0xffff0000; ___bits += 16;} \ |
223 | | if (___x & 0xff00ff00) { ___x &= 0xff00ff00; ___bits += 8;} \ |
224 | | if (___x & 0xf0f0f0f0) { ___x &= 0xf0f0f0f0; ___bits += 4;} \ |
225 | | if (___x & 0xcccccccc) { ___x &= 0xcccccccc; ___bits += 2;} \ |
226 | | if (___x & 0xaaaaaaaa) { ___x &= 0xaaaaaaaa; ___bits += 1;} \ |
227 | | ___bits + 1; \ |
228 | | }) |
229 | | |
230 | | static inline __attribute__((always_inline)) unsigned int flsnz64(unsigned long long x) |
231 | | { |
232 | | unsigned int h; |
233 | | unsigned int bits = 32; |
234 | | |
235 | | h = x >> 32; |
236 | | if (!h) { |
237 | | h = x; |
238 | | bits = 0; |
239 | | } |
240 | | return flsnz32(h) + bits; |
241 | | } |
242 | | |
243 | | #endif |
244 | | |
245 | 0 | #define flsnz_long(x) ((sizeof(long) > 4) ? flsnz64(x) : flsnz32(x)) |
246 | 0 | #define flsnz(x) ((sizeof(x) > 4) ? flsnz64(x) : (sizeof(x) > 1) ? flsnz32(x) : flsnz8(x)) |
247 | | |
248 | | /* Compare blocks <a> and <b> byte-to-byte, from bit <ignore> to bit <len-1>. |
249 | | * Return the number of equal bits between strings, assuming that the first |
250 | | * <ignore> bits are already identical. It is possible to return slightly more |
251 | | * than <len> bits if <len> does not stop on a byte boundary and we find exact |
252 | | * bytes. Note that parts or all of <ignore> bits may be rechecked. It is only |
253 | | * passed here as a hint to speed up the check. |
254 | | */ |
255 | | static |
256 | | #if defined(__OPTIMIZE_SIZE__) |
257 | | __attribute__((noinline)) |
258 | | #else |
259 | | inline __attribute__((always_inline)) |
260 | | #endif |
261 | | size_t equal_bits(const unsigned char *a, |
262 | | const unsigned char *b, |
263 | | size_t ignore, size_t len) |
264 | 0 | { |
265 | 0 | for (ignore >>= 3, a += ignore, b += ignore, ignore <<= 3; |
266 | 0 | ignore < len; ) { |
267 | 0 | unsigned char c; |
268 | |
|
269 | 0 | a++; b++; |
270 | 0 | ignore += 8; |
271 | 0 | c = b[-1] ^ a[-1]; |
272 | |
|
273 | 0 | if (c) { |
274 | | /* OK now we know that old and new differ at byte <ptr> and that <c> holds |
275 | | * the bit differences. We have to find what bit is differing and report |
276 | | * it as the number of identical bits. Note that low bit numbers are |
277 | | * assigned to high positions in the byte, as we compare them as strings. |
278 | | */ |
279 | 0 | ignore -= flsnz_long(c); |
280 | 0 | break; |
281 | 0 | } |
282 | 0 | } |
283 | 0 | return ignore; |
284 | 0 | } Unexecuted instantiation: ceb32_tree.c:equal_bits Unexecuted instantiation: ceb64_tree.c:equal_bits Unexecuted instantiation: cebis_tree.c:equal_bits Unexecuted instantiation: cebs_tree.c:equal_bits |
285 | | |
286 | | /* Compare strings <a> and <b> byte-to-byte, from bit <ignore> to the last 0. |
287 | | * Return the number of equal bits between strings, assuming that the first |
288 | | * <ignore> bits are already identical. Note that parts or all of <ignore> bits |
289 | | * may be rechecked. It is only passed here as a hint to speed up the check. |
290 | | * The caller is responsible for not passing an <ignore> value larger than any |
291 | | * of the two strings. However, referencing any bit from the trailing zero is |
292 | | * permitted. Equal strings are reported as a negative number of bits, which |
293 | | * indicates the end was reached. |
294 | | */ |
295 | | static |
296 | | #if defined(__OPTIMIZE_SIZE__) |
297 | | __attribute__((noinline)) |
298 | | #else |
299 | | inline __attribute__((always_inline)) |
300 | | #endif |
301 | | size_t string_equal_bits(const unsigned char *a, |
302 | | const unsigned char *b, |
303 | | size_t ignore) |
304 | 0 | { |
305 | 0 | unsigned char c, d; |
306 | 0 | size_t beg; |
307 | |
|
308 | 0 | beg = ignore >> 3; |
309 | | |
310 | | /* skip known and identical bits. We stop at the first different byte |
311 | | * or at the first zero we encounter on either side. |
312 | | */ |
313 | 0 | for (;; beg += 2) { |
314 | 0 | c = a[beg + 0]; |
315 | 0 | d = b[beg + 0]; |
316 | 0 | c ^= d; |
317 | 0 | if (__builtin_expect(c != 0, 0)) |
318 | 0 | goto brk1; |
319 | 0 | if (!d) |
320 | 0 | goto same; |
321 | 0 | c = a[beg + 1]; |
322 | 0 | d = b[beg + 1]; |
323 | 0 | c ^= d; |
324 | 0 | if (__builtin_expect(c != 0, 0)) |
325 | 0 | goto brk2; |
326 | 0 | if (!d) |
327 | 0 | goto same; |
328 | 0 | } |
329 | 0 | brk2: |
330 | 0 | beg++; |
331 | 0 | brk1: |
332 | | |
333 | | /* OK now we know that a and b differ at byte <beg>. |
334 | | * We have to find what bit is differing and report it as the number of |
335 | | * identical bits. Note that low bit numbers are assigned to high positions |
336 | | * in the byte, as we compare them as strings. |
337 | | */ |
338 | 0 | return (beg << 3) + ((flsnz(c) - 1) ^ 7); |
339 | 0 | same: |
340 | 0 | return (size_t)-1; |
341 | 0 | } Unexecuted instantiation: ceb32_tree.c:string_equal_bits Unexecuted instantiation: ceb64_tree.c:string_equal_bits Unexecuted instantiation: cebis_tree.c:string_equal_bits Unexecuted instantiation: cebs_tree.c:string_equal_bits |
342 | | |
343 | | /* pointer tagging / untagging, to turn ceb_root to ceb_node and conversely */ |
344 | | |
345 | | /* tag an untagged pointer (node -> root) */ |
346 | | static inline struct ceb_root *_ceb_dotag(const struct ceb_node *node, const uintptr_t tag) |
347 | 0 | { |
348 | 0 | return (struct ceb_root *)((uintptr_t)node + tag); |
349 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_dotag Unexecuted instantiation: ceb64_tree.c:_ceb_dotag Unexecuted instantiation: cebis_tree.c:_ceb_dotag Unexecuted instantiation: cebs_tree.c:_ceb_dotag |
350 | | |
351 | | /* untag a tagged pointer (root -> node) */ |
352 | | static inline struct ceb_node *_ceb_untag(const struct ceb_root *node, const uintptr_t tag) |
353 | 0 | { |
354 | 0 | return (struct ceb_node *)((uintptr_t)node - tag); |
355 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_untag Unexecuted instantiation: ceb64_tree.c:_ceb_untag Unexecuted instantiation: cebis_tree.c:_ceb_untag Unexecuted instantiation: cebs_tree.c:_ceb_untag |
356 | | |
357 | | /* clear a pointer's tag, regardless of its previous value */ |
358 | | static inline struct ceb_node *_ceb_clrtag(const struct ceb_root *node) |
359 | 0 | { |
360 | 0 | return (struct ceb_node *)((uintptr_t)node & ~(uintptr_t)1); |
361 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_clrtag Unexecuted instantiation: ceb64_tree.c:_ceb_clrtag Unexecuted instantiation: cebis_tree.c:_ceb_clrtag Unexecuted instantiation: cebs_tree.c:_ceb_clrtag |
362 | | |
363 | | /* report the pointer's tag */ |
364 | | static inline uintptr_t _ceb_gettag(const struct ceb_root *node) |
365 | 0 | { |
366 | 0 | return (uintptr_t)node & (uintptr_t)1; |
367 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_gettag Unexecuted instantiation: ceb64_tree.c:_ceb_gettag Unexecuted instantiation: cebis_tree.c:_ceb_gettag Unexecuted instantiation: cebs_tree.c:_ceb_gettag |
368 | | |
369 | | /* These macros are used by upper level files to create two variants of their |
370 | | * exported functions: |
371 | | * - one which uses sizeof(struct ceb_node) as the key offset, for nodes with |
372 | | * adjacent keys ; these ones are named <pfx><sfx>(root, ...). This is |
373 | | * defined when CEB_USE_BASE is defined. |
374 | | * - one with an explicit key offset passed by the caller right after the |
375 | | * root. This is defined when CEB_USE_OFST is defined. |
376 | | * Both rely on a forced inline version with a body that immediately follows |
377 | | * the declaration, so that the declaration looks like a single decorated |
378 | | * function while 2 are built in practice. There are variants for the basic one |
379 | | * with 0, 1 and 2 extra arguments after the root. The root and the key offset |
380 | | * are always the first two arguments, and the key offset never appears in the |
381 | | * first variant, it's always replaced by sizeof(struct ceb_node) in the calls |
382 | | * to the inline version. |
383 | | */ |
384 | | #if defined(CEB_USE_BASE) |
385 | 0 | # define _CEB_DEF_BASE(x) x Unexecuted instantiation: ceb32_imm_insert Unexecuted instantiation: ceb32_imm_first Unexecuted instantiation: ceb32_imm_last Unexecuted instantiation: ceb32_imm_lookup Unexecuted instantiation: ceb32_imm_lookup_le Unexecuted instantiation: ceb32_imm_lookup_lt Unexecuted instantiation: ceb32_imm_lookup_ge Unexecuted instantiation: ceb32_imm_lookup_gt Unexecuted instantiation: ceb32_imm_next_unique Unexecuted instantiation: ceb32_imm_prev_unique Unexecuted instantiation: ceb32_imm_next_dup Unexecuted instantiation: ceb32_imm_prev_dup Unexecuted instantiation: ceb32_imm_next Unexecuted instantiation: ceb32_imm_prev Unexecuted instantiation: ceb32_imm_delete Unexecuted instantiation: ceb32_imm_pick Unexecuted instantiation: cebu32_imm_insert Unexecuted instantiation: cebu32_imm_first Unexecuted instantiation: cebu32_imm_last Unexecuted instantiation: cebu32_imm_lookup Unexecuted instantiation: cebu32_imm_lookup_le Unexecuted instantiation: cebu32_imm_lookup_lt Unexecuted instantiation: cebu32_imm_lookup_ge Unexecuted instantiation: cebu32_imm_lookup_gt Unexecuted instantiation: cebu32_imm_next Unexecuted instantiation: cebu32_imm_prev Unexecuted instantiation: cebu32_imm_delete Unexecuted instantiation: cebu32_imm_pick Unexecuted instantiation: ceb64_imm_insert Unexecuted instantiation: ceb64_imm_first Unexecuted instantiation: ceb64_imm_last Unexecuted instantiation: ceb64_imm_lookup Unexecuted instantiation: ceb64_imm_lookup_le Unexecuted instantiation: ceb64_imm_lookup_lt Unexecuted instantiation: ceb64_imm_lookup_ge Unexecuted instantiation: ceb64_imm_lookup_gt Unexecuted instantiation: ceb64_imm_next_unique Unexecuted instantiation: ceb64_imm_prev_unique Unexecuted instantiation: ceb64_imm_next_dup Unexecuted instantiation: ceb64_imm_prev_dup Unexecuted instantiation: ceb64_imm_next Unexecuted instantiation: ceb64_imm_prev Unexecuted instantiation: ceb64_imm_delete Unexecuted instantiation: ceb64_imm_pick Unexecuted instantiation: cebu64_imm_insert Unexecuted instantiation: cebu64_imm_first Unexecuted instantiation: cebu64_imm_last Unexecuted instantiation: cebu64_imm_lookup Unexecuted instantiation: cebu64_imm_lookup_le Unexecuted instantiation: cebu64_imm_lookup_lt Unexecuted instantiation: cebu64_imm_lookup_ge Unexecuted instantiation: cebu64_imm_lookup_gt Unexecuted instantiation: cebu64_imm_next Unexecuted instantiation: cebu64_imm_prev Unexecuted instantiation: cebu64_imm_delete Unexecuted instantiation: cebu64_imm_pick Unexecuted instantiation: cebis_imm_insert Unexecuted instantiation: cebis_imm_first Unexecuted instantiation: cebis_imm_last Unexecuted instantiation: cebis_imm_lookup Unexecuted instantiation: cebis_imm_lookup_le Unexecuted instantiation: cebis_imm_lookup_lt Unexecuted instantiation: cebis_imm_lookup_ge Unexecuted instantiation: cebis_imm_lookup_gt Unexecuted instantiation: cebis_imm_next_unique Unexecuted instantiation: cebis_imm_prev_unique Unexecuted instantiation: cebis_imm_next_dup Unexecuted instantiation: cebis_imm_prev_dup Unexecuted instantiation: cebis_imm_next Unexecuted instantiation: cebis_imm_prev Unexecuted instantiation: cebis_imm_delete Unexecuted instantiation: cebis_imm_pick Unexecuted instantiation: cebuis_imm_insert Unexecuted instantiation: cebuis_imm_first Unexecuted instantiation: cebuis_imm_last Unexecuted instantiation: cebuis_imm_lookup Unexecuted instantiation: cebuis_imm_lookup_le Unexecuted instantiation: cebuis_imm_lookup_lt Unexecuted instantiation: cebuis_imm_lookup_ge Unexecuted instantiation: cebuis_imm_lookup_gt Unexecuted instantiation: cebuis_imm_next Unexecuted instantiation: cebuis_imm_prev Unexecuted instantiation: cebuis_imm_delete Unexecuted instantiation: cebuis_imm_pick Unexecuted instantiation: cebs_imm_insert Unexecuted instantiation: cebs_imm_first Unexecuted instantiation: cebs_imm_last Unexecuted instantiation: cebs_imm_lookup Unexecuted instantiation: cebs_imm_lookup_le Unexecuted instantiation: cebs_imm_lookup_lt Unexecuted instantiation: cebs_imm_lookup_ge Unexecuted instantiation: cebs_imm_lookup_gt Unexecuted instantiation: cebs_imm_next_unique Unexecuted instantiation: cebs_imm_prev_unique Unexecuted instantiation: cebs_imm_next_dup Unexecuted instantiation: cebs_imm_prev_dup Unexecuted instantiation: cebs_imm_next Unexecuted instantiation: cebs_imm_prev Unexecuted instantiation: cebs_imm_delete Unexecuted instantiation: cebs_imm_pick Unexecuted instantiation: cebus_imm_insert Unexecuted instantiation: cebus_imm_first Unexecuted instantiation: cebus_imm_last Unexecuted instantiation: cebus_imm_lookup Unexecuted instantiation: cebus_imm_lookup_le Unexecuted instantiation: cebus_imm_lookup_lt Unexecuted instantiation: cebus_imm_lookup_ge Unexecuted instantiation: cebus_imm_lookup_gt Unexecuted instantiation: cebus_imm_next Unexecuted instantiation: cebus_imm_prev Unexecuted instantiation: cebus_imm_delete Unexecuted instantiation: cebus_imm_pick |
386 | | #else |
387 | | # define _CEB_DEF_BASE(x) |
388 | | #endif |
389 | | |
390 | | #if defined(CEB_USE_OFST) |
391 | 0 | # define _CEB_DEF_OFST(x) x Unexecuted instantiation: ceb32_ofs_insert Unexecuted instantiation: ceb32_ofs_first Unexecuted instantiation: ceb32_ofs_last Unexecuted instantiation: ceb32_ofs_lookup Unexecuted instantiation: ceb32_ofs_lookup_le Unexecuted instantiation: ceb32_ofs_lookup_lt Unexecuted instantiation: ceb32_ofs_lookup_ge Unexecuted instantiation: ceb32_ofs_lookup_gt Unexecuted instantiation: ceb32_ofs_next_unique Unexecuted instantiation: ceb32_ofs_prev_unique Unexecuted instantiation: ceb32_ofs_next_dup Unexecuted instantiation: ceb32_ofs_prev_dup Unexecuted instantiation: ceb32_ofs_next Unexecuted instantiation: ceb32_ofs_prev Unexecuted instantiation: ceb32_ofs_delete Unexecuted instantiation: ceb32_ofs_pick Unexecuted instantiation: cebu32_ofs_insert Unexecuted instantiation: cebu32_ofs_first Unexecuted instantiation: cebu32_ofs_last Unexecuted instantiation: cebu32_ofs_lookup Unexecuted instantiation: cebu32_ofs_lookup_le Unexecuted instantiation: cebu32_ofs_lookup_lt Unexecuted instantiation: cebu32_ofs_lookup_ge Unexecuted instantiation: cebu32_ofs_lookup_gt Unexecuted instantiation: cebu32_ofs_next Unexecuted instantiation: cebu32_ofs_prev Unexecuted instantiation: cebu32_ofs_delete Unexecuted instantiation: cebu32_ofs_pick Unexecuted instantiation: ceb64_ofs_insert Unexecuted instantiation: ceb64_ofs_first Unexecuted instantiation: ceb64_ofs_last Unexecuted instantiation: ceb64_ofs_lookup Unexecuted instantiation: ceb64_ofs_lookup_le Unexecuted instantiation: ceb64_ofs_lookup_lt Unexecuted instantiation: ceb64_ofs_lookup_ge Unexecuted instantiation: ceb64_ofs_lookup_gt Unexecuted instantiation: ceb64_ofs_next_unique Unexecuted instantiation: ceb64_ofs_prev_unique Unexecuted instantiation: ceb64_ofs_next_dup Unexecuted instantiation: ceb64_ofs_prev_dup Unexecuted instantiation: ceb64_ofs_next Unexecuted instantiation: ceb64_ofs_prev Unexecuted instantiation: ceb64_ofs_delete Unexecuted instantiation: ceb64_ofs_pick Unexecuted instantiation: cebu64_ofs_insert Unexecuted instantiation: cebu64_ofs_first Unexecuted instantiation: cebu64_ofs_last Unexecuted instantiation: cebu64_ofs_lookup Unexecuted instantiation: cebu64_ofs_lookup_le Unexecuted instantiation: cebu64_ofs_lookup_lt Unexecuted instantiation: cebu64_ofs_lookup_ge Unexecuted instantiation: cebu64_ofs_lookup_gt Unexecuted instantiation: cebu64_ofs_next Unexecuted instantiation: cebu64_ofs_prev Unexecuted instantiation: cebu64_ofs_delete Unexecuted instantiation: cebu64_ofs_pick Unexecuted instantiation: cebis_ofs_insert Unexecuted instantiation: cebis_ofs_first Unexecuted instantiation: cebis_ofs_last Unexecuted instantiation: cebis_ofs_lookup Unexecuted instantiation: cebis_ofs_lookup_le Unexecuted instantiation: cebis_ofs_lookup_lt Unexecuted instantiation: cebis_ofs_lookup_ge Unexecuted instantiation: cebis_ofs_lookup_gt Unexecuted instantiation: cebis_ofs_next_unique Unexecuted instantiation: cebis_ofs_prev_unique Unexecuted instantiation: cebis_ofs_next_dup Unexecuted instantiation: cebis_ofs_prev_dup Unexecuted instantiation: cebis_ofs_next Unexecuted instantiation: cebis_ofs_prev Unexecuted instantiation: cebis_ofs_delete Unexecuted instantiation: cebis_ofs_pick Unexecuted instantiation: cebuis_ofs_insert Unexecuted instantiation: cebuis_ofs_first Unexecuted instantiation: cebuis_ofs_last Unexecuted instantiation: cebuis_ofs_lookup Unexecuted instantiation: cebuis_ofs_lookup_le Unexecuted instantiation: cebuis_ofs_lookup_lt Unexecuted instantiation: cebuis_ofs_lookup_ge Unexecuted instantiation: cebuis_ofs_lookup_gt Unexecuted instantiation: cebuis_ofs_next Unexecuted instantiation: cebuis_ofs_prev Unexecuted instantiation: cebuis_ofs_delete Unexecuted instantiation: cebuis_ofs_pick Unexecuted instantiation: cebs_ofs_insert Unexecuted instantiation: cebs_ofs_first Unexecuted instantiation: cebs_ofs_last Unexecuted instantiation: cebs_ofs_lookup Unexecuted instantiation: cebs_ofs_lookup_le Unexecuted instantiation: cebs_ofs_lookup_lt Unexecuted instantiation: cebs_ofs_lookup_ge Unexecuted instantiation: cebs_ofs_lookup_gt Unexecuted instantiation: cebs_ofs_next_unique Unexecuted instantiation: cebs_ofs_prev_unique Unexecuted instantiation: cebs_ofs_next_dup Unexecuted instantiation: cebs_ofs_prev_dup Unexecuted instantiation: cebs_ofs_next Unexecuted instantiation: cebs_ofs_prev Unexecuted instantiation: cebs_ofs_delete Unexecuted instantiation: cebs_ofs_pick Unexecuted instantiation: cebus_ofs_insert Unexecuted instantiation: cebus_ofs_first Unexecuted instantiation: cebus_ofs_last Unexecuted instantiation: cebus_ofs_lookup Unexecuted instantiation: cebus_ofs_lookup_le Unexecuted instantiation: cebus_ofs_lookup_lt Unexecuted instantiation: cebus_ofs_lookup_ge Unexecuted instantiation: cebus_ofs_lookup_gt Unexecuted instantiation: cebus_ofs_next Unexecuted instantiation: cebus_ofs_prev Unexecuted instantiation: cebus_ofs_delete Unexecuted instantiation: cebus_ofs_pick |
392 | | #else |
393 | | # define _CEB_DEF_OFST(x) |
394 | | #endif |
395 | | |
396 | | #define CEB_FDECL2(type, pfx, sfx, type1, arg1, type2, arg2) \ |
397 | | _CEB_FDECL2(type, pfx, sfx, type1, arg1, type2, arg2) |
398 | | |
399 | | #define _CEB_FDECL2(type, pfx, sfx, type1, arg1, type2, arg2) \ |
400 | | static inline __attribute__((always_inline)) \ |
401 | | type _##pfx##sfx(type1 arg1, type2 arg2); \ |
402 | | _CEB_DEF_BASE(type pfx##_imm##sfx(type1 arg1) { \ |
403 | | return _##pfx##sfx(arg1, sizeof(struct ceb_node)); \ |
404 | | }) \ |
405 | | _CEB_DEF_OFST(type pfx##_ofs##sfx(type1 arg1, type2 arg2) { \ |
406 | | return _##pfx##sfx(arg1, arg2); \ |
407 | | }) \ |
408 | | static inline __attribute__((always_inline)) \ |
409 | | type _##pfx##sfx(type1 arg1, type2 arg2) |
410 | | /* function body follows */ |
411 | | |
412 | | #define CEB_FDECL3(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3) \ |
413 | | _CEB_FDECL3(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3) |
414 | | |
415 | | #define _CEB_FDECL3(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3) \ |
416 | | static inline __attribute__((always_inline)) \ |
417 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3); \ |
418 | | _CEB_DEF_BASE(type pfx##_imm##sfx(type1 arg1, type3 arg3) { \ |
419 | | return _##pfx##sfx(arg1, sizeof(struct ceb_node), arg3); \ |
420 | | }) \ |
421 | | _CEB_DEF_OFST(type pfx##_ofs##sfx(type1 arg1, type2 arg2, type3 arg3) { \ |
422 | | return _##pfx##sfx(arg1, arg2, arg3); \ |
423 | | }) \ |
424 | | static inline __attribute__((always_inline)) \ |
425 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3) |
426 | | /* function body follows */ |
427 | | |
428 | | #define CEB_FDECL4(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \ |
429 | | _CEB_FDECL4(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4) |
430 | | |
431 | | #define _CEB_FDECL4(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \ |
432 | | static inline __attribute__((always_inline)) \ |
433 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4); \ |
434 | | _CEB_DEF_BASE(type pfx##_imm##sfx(type1 arg1, type3 arg3, type4 arg4) { \ |
435 | | return _##pfx##sfx(arg1, sizeof(struct ceb_node), arg3, arg4); \ |
436 | | }) \ |
437 | | _CEB_DEF_OFST(type pfx##_ofs##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ |
438 | | return _##pfx##sfx(arg1, arg2, arg3, arg4); \ |
439 | | }) \ |
440 | | static inline __attribute__((always_inline)) \ |
441 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4) |
442 | | /* function body follows */ |
443 | | |
444 | | #define CEB_FDECL5(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \ |
445 | | _CEB_FDECL5(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) |
446 | | |
447 | | #define _CEB_FDECL5(type, pfx, sfx, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \ |
448 | | static inline __attribute__((always_inline)) \ |
449 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5); \ |
450 | | _CEB_DEF_BASE(type pfx##_imm##sfx(type1 arg1, type3 arg3, type4 arg4, type5 arg5) { \ |
451 | | return _##pfx##sfx(arg1, sizeof(struct ceb_node), arg3, arg4, arg5); \ |
452 | | }) \ |
453 | | _CEB_DEF_OFST(type pfx##_ofs##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \ |
454 | | return _##pfx##sfx(arg1, arg2, arg3, arg4, arg5); \ |
455 | | }) \ |
456 | | static inline __attribute__((always_inline)) \ |
457 | | type _##pfx##sfx(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) |
458 | | /* function body follows */ |
459 | | |
460 | | /* tree walk method: key, left, right */ |
461 | | enum ceb_walk_meth { |
462 | | CEB_WM_FST, /* look up "first" (walk left only) */ |
463 | | CEB_WM_NXT, /* look up "next" (walk right once then left) */ |
464 | | CEB_WM_PRV, /* look up "prev" (walk left once then right) */ |
465 | | CEB_WM_LST, /* look up "last" (walk right only) */ |
466 | | /* all methods from CEB_WM_KEQ and above do have a key */ |
467 | | CEB_WM_KEQ, /* look up the node equal to the key */ |
468 | | CEB_WM_KGE, /* look up the node greater than or equal to the key */ |
469 | | CEB_WM_KGT, /* look up the node greater than the key */ |
470 | | CEB_WM_KLE, /* look up the node lower than or equal to the key */ |
471 | | CEB_WM_KLT, /* look up the node lower than the key */ |
472 | | CEB_WM_KNX, /* look up the node's key first, then find the next */ |
473 | | CEB_WM_KPR, /* look up the node's key first, then find the prev */ |
474 | | }; |
475 | | |
476 | | enum ceb_key_type { |
477 | | CEB_KT_ADDR, /* the key is the node's address */ |
478 | | CEB_KT_U32, /* 32-bit unsigned word in key_u32 */ |
479 | | CEB_KT_U64, /* 64-bit unsigned word in key_u64 */ |
480 | | CEB_KT_MB, /* fixed size memory block in (key_u64,key_ptr), direct storage */ |
481 | | CEB_KT_IM, /* fixed size memory block in (key_u64,key_ptr), indirect storage */ |
482 | | CEB_KT_ST, /* NUL-terminated string in key_ptr, direct storage */ |
483 | | CEB_KT_IS, /* NUL-terminated string in key_ptr, indirect storage */ |
484 | | }; |
485 | | |
486 | | union ceb_key_storage { |
487 | | uint32_t u32; |
488 | | uint64_t u64; |
489 | | unsigned long ul; |
490 | | unsigned char mb[0]; |
491 | | unsigned char str[0]; |
492 | | unsigned char *ptr; /* for CEB_KT_IS */ |
493 | | }; |
494 | | |
495 | | /* returns the ceb_key_storage pointer for node <n> and offset <o> */ |
496 | 0 | #define NODEK(n, o) ((union ceb_key_storage*)(((char *)(n)) + (o))) |
497 | | |
498 | | /* Generic tree descent function. It must absolutely be inlined so that the |
499 | | * compiler can eliminate the tests related to the various return pointers, |
500 | | * which must either point to a local variable in the caller, or be NULL. |
501 | | * It must not be called with an empty tree, it's the caller business to |
502 | | * deal with this special case. It returns in ret_root the location of the |
503 | | * pointer to the leaf (i.e. where we have to insert ourselves). The integer |
504 | | * pointed to by ret_nside will contain the side the leaf should occupy at |
505 | | * its own node, with the sibling being *ret_root. Note that keys for fixed- |
506 | | * size arrays are passed in key_ptr with their length in key_u64. For keyless |
507 | | * nodes whose address serves as the key, the pointer needs to be passed in |
508 | | * key_ptr, and pxor64 will be used internally. |
509 | | * The support for duplicates is advertised by ret_is_dup not being null; it |
510 | | * will be filled on return with an indication whether the node belongs to a |
511 | | * duplicate list or not. |
512 | | */ |
513 | | static inline __attribute__((always_inline)) |
514 | | struct ceb_node *_ceb_descend(struct ceb_root **root, |
515 | | enum ceb_walk_meth meth, |
516 | | ptrdiff_t kofs, |
517 | | enum ceb_key_type key_type, |
518 | | uint32_t key_u32, |
519 | | uint64_t key_u64, |
520 | | const void *key_ptr, |
521 | | int *ret_nside, |
522 | | struct ceb_root ***ret_root, |
523 | | struct ceb_node **ret_lparent, |
524 | | int *ret_lpside, |
525 | | struct ceb_node **ret_nparent, |
526 | | int *ret_npside, |
527 | | struct ceb_node **ret_gparent, |
528 | | int *ret_gpside, |
529 | | struct ceb_root **ret_back, |
530 | | int *ret_is_dup) |
531 | 0 | { |
532 | | #if defined(__GNUC__) && (__GNUC__ >= 12) && !defined(__OPTIMIZE__) |
533 | | /* Avoid a bogus warning with gcc 12 and above: it warns about negative |
534 | | * memcmp() length in non-existing code paths at -O0, as reported here: |
535 | | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114622 |
536 | | */ |
537 | | #pragma GCC diagnostic push |
538 | | #pragma GCC diagnostic ignored "-Wstringop-overread" |
539 | | #endif |
540 | 0 | struct ceb_node *node; |
541 | 0 | union ceb_key_storage *k; |
542 | 0 | struct ceb_node *gparent = NULL; |
543 | 0 | struct ceb_node *bnode = NULL; |
544 | 0 | struct ceb_node *lparent; |
545 | 0 | uint32_t pxor32 __attribute__((unused)) = ~0U; // previous xor between branches |
546 | 0 | uint64_t pxor64 __attribute__((unused)) = ~0ULL; // previous xor between branches |
547 | 0 | int gpside = 0; // side on the grand parent |
548 | 0 | long lpside = 0; // side on the leaf's parent |
549 | 0 | long brside = 0; // branch side when descending |
550 | 0 | size_t llen = 0; // left vs key matching length |
551 | 0 | size_t rlen = 0; // right vs key matching length |
552 | 0 | size_t plen = 0; // previous common len between branches |
553 | 0 | int is_leaf = 0; // set if the current node is a leaf |
554 | | |
555 | | /* the parent will be the (possibly virtual) node so that |
556 | | * &lparent->l == root, i.e. container_of(root, struct ceb_node, b[0]). |
557 | | */ |
558 | 0 | lparent = (struct ceb_node *)((char *)root - offsetof(struct ceb_node, b)); |
559 | 0 | gparent = lparent; |
560 | 0 | if (ret_nparent) |
561 | 0 | *ret_nparent = NULL; |
562 | 0 | if (ret_npside) |
563 | 0 | *ret_npside = 0; |
564 | | |
565 | | /* for key-less descents we need to set the initial branch to take */ |
566 | 0 | switch (meth) { |
567 | 0 | case CEB_WM_NXT: |
568 | 0 | case CEB_WM_LST: |
569 | 0 | brside = 1; // start right for next/last |
570 | 0 | break; |
571 | 0 | case CEB_WM_FST: |
572 | 0 | case CEB_WM_PRV: |
573 | 0 | default: |
574 | 0 | brside = 0; // start left for first/prev |
575 | 0 | break; |
576 | 0 | } |
577 | | |
578 | | /* In case of deletion, we need the node's parent and side. It's |
579 | | * normally discovered during the descent while comparing branches, |
580 | | * but there's a case where it's not possible, it's when the root |
581 | | * is the node's parent because the first node is the one we're |
582 | | * looking for. So we have to perform this check here. |
583 | | */ |
584 | 0 | if (meth >= CEB_WM_KEQ && ret_nparent && ret_npside) { |
585 | 0 | union ceb_key_storage *k = NODEK(_ceb_clrtag(*root), kofs); |
586 | |
|
587 | 0 | if (((key_type == CEB_KT_MB || key_type == CEB_KT_IM) && |
588 | 0 | (memcmp(key_ptr, ((key_type == CEB_KT_MB) ? k->mb : k->ptr), key_u64) == 0)) || |
589 | 0 | ((key_type == CEB_KT_ST || key_type == CEB_KT_IS) && |
590 | 0 | (strcmp(key_ptr, (const void *)((key_type == CEB_KT_ST) ? k->str : k->ptr)) == 0))) { |
591 | 0 | *ret_nparent = lparent; |
592 | 0 | *ret_npside = lpside; |
593 | 0 | } |
594 | 0 | } |
595 | | |
596 | | /* the previous xor is initialized to the largest possible inter-branch |
597 | | * value so that it can never match on the first test as we want to use |
598 | | * it to detect a leaf vs node. That's achieved with plen==0 for arrays |
599 | | * and pxorXX==~0 for scalars. |
600 | | */ |
601 | 0 | node = _ceb_clrtag(*root); |
602 | 0 | is_leaf = _ceb_gettag(*root); |
603 | |
|
604 | 0 | if (ret_lpside) { |
605 | | /* this is a deletion, benefits from prefetching */ |
606 | 0 | __builtin_prefetch(node->b[0], 0); |
607 | 0 | __builtin_prefetch(node->b[1], 0); |
608 | 0 | } |
609 | |
|
610 | 0 | while (1) { |
611 | 0 | union ceb_key_storage *lks, *rks; |
612 | 0 | struct ceb_node *ln, *rn, *next; |
613 | 0 | struct ceb_root *lr, *rr; |
614 | 0 | int next_leaf, lnl, rnl; |
615 | |
|
616 | 0 | lr = node->b[0]; // tagged versions |
617 | 0 | rr = node->b[1]; |
618 | | |
619 | | /* get a copy of the corresponding nodes */ |
620 | 0 | lnl = _ceb_gettag(lr); |
621 | 0 | ln = _ceb_clrtag(lr); |
622 | 0 | rnl = _ceb_gettag(rr); |
623 | 0 | rn = _ceb_clrtag(rr); |
624 | | |
625 | | /* neither pointer is tagged */ |
626 | 0 | k = NODEK(node, kofs); |
627 | |
|
628 | 0 | if (is_leaf) |
629 | 0 | break; |
630 | | |
631 | | /* Tests show that this is the most optimal location to start |
632 | | * a prefetch for adjacent nodes. |
633 | | */ |
634 | 0 | __builtin_prefetch(ln, 0); |
635 | 0 | __builtin_prefetch(rn, 0); |
636 | |
|
637 | 0 | lks = NODEK(ln, kofs); |
638 | 0 | rks = NODEK(rn, kofs); |
639 | | |
640 | | /* In the following block, we're dealing with type-specific |
641 | | * operations which follow the same construct for each type: |
642 | | * 1) calculate the new side for key lookups (otherwise keep |
643 | | * the current side, e.g. for first/last). Doing it early |
644 | | * allows the CPU to more easily predict next branches and |
645 | | * is faster by ~10%. For complex bits we keep the length |
646 | | * of identical bits instead of xor. We can also xor lkey |
647 | | * and rkey with key and use it everywhere later but it |
648 | | * doesn't seem to bring anything. |
649 | | * |
650 | | * 2) calculate the xor between the two sides to figure the |
651 | | * split bit position. If the new split bit is before the |
652 | | * previous one, we've reached a leaf: each leaf we visit |
653 | | * had its node part already visited. The only way to |
654 | | * distinguish them is that the inter-branch xor of the |
655 | | * leaf will be the node's one, and will necessarily be |
656 | | * larger than the previous node's xor if the node is |
657 | | * above (we've already checked for direct descendent |
658 | | * below). Said differently, if an inter-branch xor is |
659 | | * strictly larger than the previous one, it necessarily |
660 | | * is the one of an upper node, so what we're seeing |
661 | | * cannot be the node, hence it's the leaf. The case where |
662 | | * they're equal was already dealt with by the test at the |
663 | | * end of the loop (node points to self). For scalar keys, |
664 | | * we directly store the last xor value in pxorXX. For |
665 | | * arrays and strings, instead we store the previous equal |
666 | | * length. |
667 | | * |
668 | | * 3) for lookups, check if the looked key still has a chance |
669 | | * to be below: if it has a xor with both branches that is |
670 | | * larger than the xor between them, it cannot be there, |
671 | | * since it means that it differs from these branches by |
672 | | * at least one bit that's higher than the split bit, |
673 | | * hence not common to these branches. In such cases: |
674 | | * - if we're just doing a lookup, the key is not found |
675 | | * and we fail. |
676 | | * - if we are inserting, we must stop here and we have |
677 | | * the guarantee to be above a node. |
678 | | * - if we're deleting, it could be the key we were |
679 | | * looking for so we have to check for it as long as |
680 | | * it's still possible to keep a copy of the node's |
681 | | * parent. |
682 | | */ |
683 | |
|
684 | 0 | if (key_type == CEB_KT_U32) { |
685 | 0 | uint32_t xor32; // left vs right branch xor |
686 | 0 | uint32_t kl, kr; |
687 | |
|
688 | 0 | kl = lks->u32; kr = rks->u32; |
689 | 0 | if (meth >= CEB_WM_KEQ) { |
690 | 0 | kl ^= key_u32; kr ^= key_u32; |
691 | 0 | brside = kl >= kr; |
692 | 0 | } |
693 | |
|
694 | 0 | xor32 = kl ^ kr; |
695 | 0 | if (meth >= CEB_WM_KEQ) { |
696 | | /* let's stop if our key is not there */ |
697 | 0 | if (kl > xor32 && kr > xor32) |
698 | 0 | break; |
699 | | |
700 | 0 | if (ret_nparent && !*ret_nparent && ret_npside) { |
701 | 0 | if (key_u32 == k->u32) { |
702 | 0 | *ret_nparent = lparent; |
703 | 0 | *ret_npside = lpside; |
704 | 0 | } |
705 | 0 | } |
706 | | |
707 | | /* for pure lookups, no need to go down the leaf |
708 | | * if we've found the key. |
709 | | */ |
710 | 0 | if (!ret_root && !ret_lpside && !ret_lparent && |
711 | 0 | !ret_gpside && !ret_gparent && !ret_back) { |
712 | 0 | if (key_u32 == k->u32) |
713 | 0 | break; |
714 | 0 | } |
715 | 0 | } |
716 | 0 | pxor32 = xor32; |
717 | 0 | } |
718 | 0 | else if (key_type == CEB_KT_U64) { |
719 | 0 | uint64_t xor64; // left vs right branch xor |
720 | 0 | uint64_t kl, kr; |
721 | |
|
722 | 0 | kl = lks->u64; kr = rks->u64; |
723 | 0 | if (meth >= CEB_WM_KEQ) { |
724 | 0 | kl ^= key_u64; kr ^= key_u64; |
725 | 0 | brside = kl >= kr; |
726 | 0 | } |
727 | |
|
728 | 0 | xor64 = kl ^ kr; |
729 | 0 | if (meth >= CEB_WM_KEQ) { |
730 | | /* let's stop if our key is not there */ |
731 | 0 | if (kl > xor64 && kr > xor64) |
732 | 0 | break; |
733 | | |
734 | 0 | if (ret_nparent && !*ret_nparent && ret_npside) { |
735 | 0 | if (key_u64 == k->u64) { |
736 | 0 | *ret_nparent = lparent; |
737 | 0 | *ret_npside = lpside; |
738 | 0 | } |
739 | 0 | } |
740 | | |
741 | | /* for pure lookups, no need to go down the leaf |
742 | | * if we've found the key. |
743 | | */ |
744 | 0 | if (!ret_root && !ret_lpside && !ret_lparent && |
745 | 0 | !ret_gpside && !ret_gparent && !ret_back) { |
746 | 0 | if (key_u64 == k->u64) |
747 | 0 | break; |
748 | 0 | } |
749 | 0 | } |
750 | 0 | pxor64 = xor64; |
751 | 0 | } |
752 | 0 | else if (key_type == CEB_KT_ADDR) { |
753 | 0 | uintptr_t xoraddr; // left vs right branch xor |
754 | 0 | uintptr_t kl, kr; |
755 | |
|
756 | 0 | kl = (uintptr_t)lks; kr = (uintptr_t)rks; |
757 | 0 | if (meth >= CEB_WM_KEQ) { |
758 | 0 | kl ^= (uintptr_t)key_ptr; kr ^= (uintptr_t)key_ptr; |
759 | 0 | brside = kl >= kr; |
760 | 0 | } |
761 | |
|
762 | 0 | xoraddr = kl ^ kr; |
763 | 0 | if (meth >= CEB_WM_KEQ) { |
764 | | /* let's stop if our key is not there */ |
765 | 0 | if (kl > xoraddr && kr > xoraddr) |
766 | 0 | break; |
767 | | |
768 | 0 | if (ret_nparent && !*ret_nparent && ret_npside) { |
769 | 0 | if ((uintptr_t)key_ptr == (uintptr_t)node) { |
770 | 0 | *ret_nparent = lparent; |
771 | 0 | *ret_npside = lpside; |
772 | 0 | } |
773 | 0 | } |
774 | | |
775 | | /* for pure lookups, no need to go down the leaf |
776 | | * if we've found the key. |
777 | | */ |
778 | 0 | if (!ret_root && !ret_lpside && !ret_lparent && |
779 | 0 | !ret_gpside && !ret_gparent && !ret_back) { |
780 | 0 | if ((uintptr_t)key_ptr == (uintptr_t)node) |
781 | 0 | break; |
782 | 0 | } |
783 | 0 | } |
784 | 0 | pxor64 = xoraddr; |
785 | 0 | } |
786 | 0 | else if (key_type == CEB_KT_MB || key_type == CEB_KT_IM) { |
787 | 0 | size_t xlen = 0; // left vs right matching length |
788 | |
|
789 | 0 | if (meth >= CEB_WM_KEQ) { |
790 | | /* measure identical lengths */ |
791 | 0 | llen = equal_bits(key_ptr, (key_type == CEB_KT_MB) ? lks->mb : lks->ptr, plen, key_u64 << 3); |
792 | 0 | rlen = equal_bits(key_ptr, (key_type == CEB_KT_MB) ? rks->mb : rks->ptr, plen, key_u64 << 3); |
793 | 0 | brside = llen <= rlen; |
794 | 0 | } |
795 | |
|
796 | 0 | xlen = equal_bits((key_type == CEB_KT_MB) ? lks->mb : lks->ptr, |
797 | 0 | (key_type == CEB_KT_MB) ? rks->mb : rks->ptr, plen, key_u64 << 3); |
798 | |
|
799 | 0 | if (meth >= CEB_WM_KEQ) { |
800 | | /* let's stop if our key is not there */ |
801 | 0 | if (llen < xlen && rlen < xlen) |
802 | 0 | break; |
803 | | |
804 | 0 | if (ret_nparent && ret_npside && !*ret_nparent && |
805 | 0 | ((llen == key_u64 << 3) || (rlen == key_u64 << 3))) { |
806 | 0 | *ret_nparent = node; |
807 | 0 | *ret_npside = brside; |
808 | 0 | } |
809 | | |
810 | | /* for pure lookups, no need to go down the leaf |
811 | | * if we've found the key. |
812 | | */ |
813 | 0 | if (!ret_root && !ret_lpside && !ret_lparent && |
814 | 0 | !ret_gpside && !ret_gparent && !ret_back) { |
815 | 0 | if (llen == key_u64 << 3) { |
816 | 0 | node = ln; |
817 | 0 | plen = llen; |
818 | 0 | break; |
819 | 0 | } |
820 | 0 | if (rlen == key_u64 << 3) { |
821 | 0 | node = rn; |
822 | 0 | plen = rlen; |
823 | 0 | break; |
824 | 0 | } |
825 | 0 | } |
826 | 0 | } |
827 | 0 | plen = xlen; |
828 | 0 | } |
829 | 0 | else if (key_type == CEB_KT_ST || key_type == CEB_KT_IS) { |
830 | 0 | size_t xlen = 0; // left vs right matching length |
831 | |
|
832 | 0 | if (meth >= CEB_WM_KEQ) { |
833 | | /* Note that a negative length indicates an |
834 | | * equal value with the final zero reached, but |
835 | | * it is still needed to descend to find the |
836 | | * leaf. We take that negative length for an |
837 | | * infinite one, hence the uint cast. |
838 | | */ |
839 | 0 | llen = string_equal_bits(key_ptr, (key_type == CEB_KT_ST) ? lks->str : lks->ptr, plen); |
840 | 0 | rlen = string_equal_bits(key_ptr, (key_type == CEB_KT_ST) ? rks->str : rks->ptr, plen); |
841 | 0 | brside = (size_t)llen <= (size_t)rlen; |
842 | 0 | if (ret_nparent && ret_npside && !*ret_nparent && |
843 | 0 | ((ssize_t)llen < 0 || (ssize_t)rlen < 0)) { |
844 | 0 | *ret_nparent = node; |
845 | 0 | *ret_npside = brside; |
846 | 0 | } |
847 | | |
848 | | /* for pure lookups, no need to go down the leaf |
849 | | * if we've found the key. |
850 | | */ |
851 | 0 | if (!ret_root && !ret_lpside && !ret_lparent && |
852 | 0 | !ret_gpside && !ret_gparent && !ret_back) { |
853 | 0 | if ((ssize_t)llen < 0) { |
854 | 0 | node = ln; |
855 | 0 | plen = llen; |
856 | 0 | break; |
857 | 0 | } |
858 | 0 | if ((ssize_t)rlen < 0) { |
859 | 0 | node = rn; |
860 | 0 | plen = rlen; |
861 | 0 | break; |
862 | 0 | } |
863 | 0 | } |
864 | 0 | } |
865 | | |
866 | | /* the compiler cannot know this never happens and this helps it optimize the code */ |
867 | 0 | if ((ssize_t)plen < 0) |
868 | 0 | __builtin_unreachable(); |
869 | | |
870 | 0 | xlen = string_equal_bits((key_type == CEB_KT_ST) ? lks->str : lks->ptr, |
871 | 0 | (key_type == CEB_KT_ST) ? rks->str : rks->ptr, plen); |
872 | | |
873 | | /* let's stop if our key is not there */ |
874 | 0 | if (meth >= CEB_WM_KEQ && llen < xlen && rlen < xlen) |
875 | 0 | break; |
876 | | |
877 | 0 | plen = xlen; |
878 | 0 | } |
879 | | |
880 | | /* shift all copies by one */ |
881 | 0 | gparent = lparent; |
882 | 0 | gpside = lpside; |
883 | 0 | lparent = node; |
884 | 0 | lpside = brside; |
885 | 0 | if (brside) { |
886 | 0 | if (meth == CEB_WM_KPR || meth == CEB_WM_KLE || meth == CEB_WM_KLT) |
887 | 0 | bnode = node; |
888 | 0 | next = rn; |
889 | 0 | next_leaf = rnl; |
890 | 0 | root = &node->b[1]; |
891 | | |
892 | | /* change branch for key-less walks */ |
893 | 0 | if (meth == CEB_WM_NXT) |
894 | 0 | brside = 0; |
895 | 0 | } |
896 | 0 | else { |
897 | 0 | if (meth == CEB_WM_KNX || meth == CEB_WM_KGE || meth == CEB_WM_KGT) |
898 | 0 | bnode = node; |
899 | 0 | next = ln; |
900 | 0 | next_leaf = lnl; |
901 | 0 | root = &node->b[0]; |
902 | | |
903 | | /* change branch for key-less walks */ |
904 | 0 | if (meth == CEB_WM_PRV) |
905 | 0 | brside = 1; |
906 | 0 | } |
907 | |
|
908 | 0 | if (next == node) { |
909 | | /* loops over itself, it's either a leaf or the single and last list element of a dup sub-tree */ |
910 | 0 | break; |
911 | 0 | } |
912 | | |
913 | | /* let the compiler know there's no NULL in the tree */ |
914 | 0 | if (!next) |
915 | 0 | __builtin_unreachable(); |
916 | | |
917 | 0 | node = next; |
918 | 0 | is_leaf = next_leaf; |
919 | 0 | } |
920 | | |
921 | 0 | if (ret_is_dup) { |
922 | 0 | if (is_leaf && _ceb_gettag(node->b[0]) && _ceb_gettag(node->b[1]) && |
923 | 0 | (_ceb_clrtag(node->b[0]) != node || _ceb_clrtag(node->b[1]) != node)) { |
924 | | /* This leaf has two tagged pointers, with at least one not pointing |
925 | | * to itself, it's not the nodeless leaf, it's a duplicate. |
926 | | */ |
927 | 0 | *ret_is_dup = 1; |
928 | 0 | } else { |
929 | 0 | *ret_is_dup = 0; |
930 | 0 | } |
931 | 0 | } |
932 | | |
933 | | /* here we're on the closest node from the requested value. It may be |
934 | | * slightly lower (has a zero where we expected a one) or slightly |
935 | | * larger has a one where we expected a zero). Thus another check is |
936 | | * still deserved, depending on the matching method. |
937 | | */ |
938 | | |
939 | | /* update the pointers needed for modifications (insert, delete) */ |
940 | 0 | if (ret_nside && meth >= CEB_WM_KEQ) { |
941 | 0 | switch (key_type) { |
942 | 0 | case CEB_KT_U32: |
943 | 0 | *ret_nside = key_u32 >= k->u32; |
944 | 0 | break; |
945 | 0 | case CEB_KT_U64: |
946 | 0 | *ret_nside = key_u64 >= k->u64; |
947 | 0 | break; |
948 | 0 | case CEB_KT_ADDR: |
949 | 0 | *ret_nside = (uintptr_t)key_ptr >= (uintptr_t)node; |
950 | 0 | break; |
951 | 0 | case CEB_KT_MB: |
952 | 0 | case CEB_KT_IM: |
953 | 0 | *ret_nside = (uint64_t)plen / 8 == key_u64 || |
954 | 0 | memcmp(key_ptr + plen / 8, ((key_type == CEB_KT_MB) ? k->mb : k->ptr) + plen / 8, key_u64 - plen / 8) >= 0; |
955 | 0 | break; |
956 | | |
957 | 0 | case CEB_KT_ST: |
958 | 0 | case CEB_KT_IS: |
959 | 0 | *ret_nside = (ssize_t)plen < 0 || |
960 | 0 | strcmp(key_ptr + plen / 8, (const void *)((key_type == CEB_KT_ST) ? k->str : k->ptr) + plen / 8) >= 0; |
961 | 0 | break; |
962 | 0 | } |
963 | 0 | } |
964 | | |
965 | 0 | if (ret_root) { |
966 | | /* this node is going to be changed */ |
967 | 0 | *ret_root = root; |
968 | 0 | __builtin_prefetch(root, 1); |
969 | 0 | } |
970 | | |
971 | | /* info needed by delete */ |
972 | 0 | if (ret_lpside) |
973 | 0 | *ret_lpside = lpside; |
974 | |
|
975 | 0 | if (ret_lparent) { |
976 | | /* this node is going to be changed */ |
977 | 0 | *ret_lparent = lparent; |
978 | 0 | __builtin_prefetch(lparent, 1); |
979 | 0 | } |
980 | |
|
981 | 0 | if (ret_gpside) |
982 | 0 | *ret_gpside = gpside; |
983 | |
|
984 | 0 | if (ret_gparent) |
985 | 0 | *ret_gparent = gparent; |
986 | |
|
987 | 0 | if (ret_back) |
988 | 0 | *ret_back = _ceb_dotag(bnode, 0); |
989 | |
|
990 | 0 | if (meth >= CEB_WM_KEQ) { |
991 | | /* For lookups, an equal value means an instant return. For insertions, |
992 | | * it is the same, we want to return the previously existing value so |
993 | | * that the caller can decide what to do. For deletion, we also want to |
994 | | * return the pointer that's about to be deleted. |
995 | | */ |
996 | 0 | if (key_type == CEB_KT_U32) { |
997 | 0 | if ((meth == CEB_WM_KEQ && k->u32 == key_u32) || |
998 | 0 | (meth == CEB_WM_KNX && k->u32 == key_u32) || |
999 | 0 | (meth == CEB_WM_KPR && k->u32 == key_u32) || |
1000 | 0 | (meth == CEB_WM_KGE && k->u32 >= key_u32) || |
1001 | 0 | (meth == CEB_WM_KGT && k->u32 > key_u32) || |
1002 | 0 | (meth == CEB_WM_KLE && k->u32 <= key_u32) || |
1003 | 0 | (meth == CEB_WM_KLT && k->u32 < key_u32)) |
1004 | 0 | return node; |
1005 | 0 | } |
1006 | 0 | else if (key_type == CEB_KT_U64) { |
1007 | 0 | if ((meth == CEB_WM_KEQ && k->u64 == key_u64) || |
1008 | 0 | (meth == CEB_WM_KNX && k->u64 == key_u64) || |
1009 | 0 | (meth == CEB_WM_KPR && k->u64 == key_u64) || |
1010 | 0 | (meth == CEB_WM_KGE && k->u64 >= key_u64) || |
1011 | 0 | (meth == CEB_WM_KGT && k->u64 > key_u64) || |
1012 | 0 | (meth == CEB_WM_KLE && k->u64 <= key_u64) || |
1013 | 0 | (meth == CEB_WM_KLT && k->u64 < key_u64)) |
1014 | 0 | return node; |
1015 | 0 | } |
1016 | 0 | else if (key_type == CEB_KT_ADDR) { |
1017 | 0 | if ((meth == CEB_WM_KEQ && (uintptr_t)node == (uintptr_t)key_ptr) || |
1018 | 0 | (meth == CEB_WM_KNX && (uintptr_t)node == (uintptr_t)key_ptr) || |
1019 | 0 | (meth == CEB_WM_KPR && (uintptr_t)node == (uintptr_t)key_ptr) || |
1020 | 0 | (meth == CEB_WM_KGE && (uintptr_t)node >= (uintptr_t)key_ptr) || |
1021 | 0 | (meth == CEB_WM_KGT && (uintptr_t)node > (uintptr_t)key_ptr) || |
1022 | 0 | (meth == CEB_WM_KLE && (uintptr_t)node <= (uintptr_t)key_ptr) || |
1023 | 0 | (meth == CEB_WM_KLT && (uintptr_t)node < (uintptr_t)key_ptr)) |
1024 | 0 | return node; |
1025 | 0 | } |
1026 | 0 | else if (key_type == CEB_KT_MB || key_type == CEB_KT_IM) { |
1027 | 0 | int diff; |
1028 | |
|
1029 | 0 | if ((uint64_t)plen / 8 == key_u64) |
1030 | 0 | diff = 0; |
1031 | 0 | else |
1032 | 0 | diff = memcmp(((key_type == CEB_KT_MB) ? k->mb : k->ptr) + plen / 8, key_ptr + plen / 8, key_u64 - plen / 8); |
1033 | |
|
1034 | 0 | if ((meth == CEB_WM_KEQ && diff == 0) || |
1035 | 0 | (meth == CEB_WM_KNX && diff == 0) || |
1036 | 0 | (meth == CEB_WM_KPR && diff == 0) || |
1037 | 0 | (meth == CEB_WM_KGE && diff >= 0) || |
1038 | 0 | (meth == CEB_WM_KGT && diff > 0) || |
1039 | 0 | (meth == CEB_WM_KLE && diff <= 0) || |
1040 | 0 | (meth == CEB_WM_KLT && diff < 0)) |
1041 | 0 | return node; |
1042 | 0 | } |
1043 | 0 | else if (key_type == CEB_KT_ST || key_type == CEB_KT_IS) { |
1044 | 0 | int diff; |
1045 | |
|
1046 | 0 | if ((ssize_t)plen < 0) |
1047 | 0 | diff = 0; |
1048 | 0 | else |
1049 | 0 | diff = strcmp((const void *)((key_type == CEB_KT_ST) ? k->str : k->ptr) + plen / 8, key_ptr + plen / 8); |
1050 | |
|
1051 | 0 | if ((meth == CEB_WM_KEQ && diff == 0) || |
1052 | 0 | (meth == CEB_WM_KNX && diff == 0) || |
1053 | 0 | (meth == CEB_WM_KPR && diff == 0) || |
1054 | 0 | (meth == CEB_WM_KGE && diff >= 0) || |
1055 | 0 | (meth == CEB_WM_KGT && diff > 0) || |
1056 | 0 | (meth == CEB_WM_KLE && diff <= 0) || |
1057 | 0 | (meth == CEB_WM_KLT && diff < 0)) |
1058 | 0 | return node; |
1059 | 0 | } |
1060 | 0 | } else if (meth == CEB_WM_FST || meth == CEB_WM_LST) { |
1061 | 0 | return node; |
1062 | 0 | } else if (meth == CEB_WM_PRV || meth == CEB_WM_NXT) { |
1063 | 0 | return node; |
1064 | 0 | } |
1065 | | |
1066 | | /* lookups and deletes fail here */ |
1067 | | |
1068 | | /* let's return NULL to indicate the key was not found. For a lookup or |
1069 | | * a delete, it's a failure. For an insert, it's an invitation to the |
1070 | | * caller to proceed since the element is not there. |
1071 | | */ |
1072 | 0 | return NULL; |
1073 | | #if defined(__GNUC__) && (__GNUC__ >= 12) && !defined(__OPTIMIZE__) |
1074 | | #pragma GCC diagnostic pop |
1075 | | #endif |
1076 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_descend Unexecuted instantiation: ceb64_tree.c:_ceb_descend Unexecuted instantiation: cebis_tree.c:_ceb_descend Unexecuted instantiation: cebs_tree.c:_ceb_descend |
1077 | | |
1078 | | /* |
1079 | | * Below are the functions that support duplicate keys (_ceb_*) |
1080 | | */ |
1081 | | |
1082 | | /* Generic tree insertion function for trees with duplicate keys. Inserts node |
1083 | | * <node> into tree <tree>, with key type <key_type> and key <key_*>. |
1084 | | * Returns the inserted node or the one that already contains the same key. |
1085 | | * If <is_dup_ptr> is non-null, then duplicates are permitted and this variable |
1086 | | * is used to temporarily carry an internal state. |
1087 | | */ |
1088 | | static inline __attribute__((always_inline)) |
1089 | | struct ceb_node *_ceb_insert(struct ceb_root **root, |
1090 | | struct ceb_node *node, |
1091 | | ptrdiff_t kofs, |
1092 | | enum ceb_key_type key_type, |
1093 | | uint32_t key_u32, |
1094 | | uint64_t key_u64, |
1095 | | const void *key_ptr, |
1096 | | int *is_dup_ptr) |
1097 | 0 | { |
1098 | 0 | struct ceb_root **parent; |
1099 | 0 | struct ceb_node *ret; |
1100 | 0 | int nside; |
1101 | |
|
1102 | 0 | if (!*root) { |
1103 | | /* empty tree, insert a leaf only */ |
1104 | 0 | node->b[0] = node->b[1] = _ceb_dotag(node, 1); |
1105 | 0 | *root = _ceb_dotag(node, 1); |
1106 | 0 | return node; |
1107 | 0 | } |
1108 | | |
1109 | 0 | ret = _ceb_descend(root, CEB_WM_KEQ, kofs, key_type, key_u32, key_u64, key_ptr, &nside, &parent, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1110 | |
|
1111 | 0 | if (!ret) { |
1112 | | /* The key was not in the tree, we can insert it. Better use an |
1113 | | * "if" like this because the inline function above already has |
1114 | | * quite identifiable code paths. This reduces the code and |
1115 | | * optimizes it a bit. |
1116 | | */ |
1117 | 0 | if (nside) { |
1118 | 0 | node->b[1] = _ceb_dotag(node, 1); |
1119 | 0 | node->b[0] = *parent; |
1120 | 0 | } else { |
1121 | 0 | node->b[0] = _ceb_dotag(node, 1); |
1122 | 0 | node->b[1] = *parent; |
1123 | 0 | } |
1124 | 0 | *parent = _ceb_dotag(node, 0); |
1125 | 0 | ret = node; |
1126 | 0 | } else if (is_dup_ptr) { |
1127 | | /* The key was found. We must insert after it as the last |
1128 | | * element of the dups list, which means that our left branch |
1129 | | * will point to the key, the right one to the first dup |
1130 | | * (i.e. previous dup's right if it exists, otherwise ourself) |
1131 | | * and the parent must point to us. |
1132 | | */ |
1133 | 0 | node->b[0] = *parent; |
1134 | |
|
1135 | 0 | if (*is_dup_ptr) { |
1136 | 0 | node->b[1] = _ceb_untag(*parent, 1)->b[1]; |
1137 | 0 | _ceb_untag(*parent, 1)->b[1] = _ceb_dotag(node, 1); |
1138 | 0 | } else { |
1139 | 0 | node->b[1] = _ceb_dotag(node, 1); |
1140 | 0 | } |
1141 | 0 | *parent = _ceb_dotag(node, 1); |
1142 | 0 | ret = node; |
1143 | 0 | } |
1144 | 0 | return ret; |
1145 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_insert Unexecuted instantiation: ceb64_tree.c:_ceb_insert Unexecuted instantiation: cebis_tree.c:_ceb_insert Unexecuted instantiation: cebs_tree.c:_ceb_insert |
1146 | | |
1147 | | /* Returns the first node or NULL if not found, assuming a tree made of keys of |
1148 | | * type <key_type>, and optionally <key_len> for fixed-size arrays (otherwise 0). |
1149 | | * If the tree starts with duplicates, the first of them is returned. |
1150 | | */ |
1151 | | static inline __attribute__((always_inline)) |
1152 | | struct ceb_node *_ceb_first(struct ceb_root *const *root, |
1153 | | ptrdiff_t kofs, |
1154 | | enum ceb_key_type key_type, |
1155 | | uint64_t key_len, |
1156 | | int *is_dup_ptr) |
1157 | 0 | { |
1158 | 0 | struct ceb_node *node; |
1159 | |
|
1160 | 0 | if (!*root) |
1161 | 0 | return NULL; |
1162 | | |
1163 | 0 | node = _ceb_descend((struct ceb_root **)root, CEB_WM_FST, kofs, key_type, 0, key_len, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1164 | 0 | if (node && is_dup_ptr && *is_dup_ptr) { |
1165 | | /* on a duplicate, the first node is right->left and it's a leaf */ |
1166 | 0 | node = _ceb_untag(_ceb_untag(node->b[1], 1)->b[0], 1); |
1167 | 0 | } |
1168 | 0 | return node; |
1169 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_first Unexecuted instantiation: ceb64_tree.c:_ceb_first Unexecuted instantiation: cebis_tree.c:_ceb_first Unexecuted instantiation: cebs_tree.c:_ceb_first |
1170 | | |
1171 | | /* Returns the last node or NULL if not found, assuming a tree made of keys of |
1172 | | * type <key_type>, and optionally <key_len> for fixed-size arrays (otherwise 0). |
1173 | | * If the tree ends with duplicates, the last of them is returned. |
1174 | | */ |
1175 | | static inline __attribute__((always_inline)) |
1176 | | struct ceb_node *_ceb_last(struct ceb_root *const *root, |
1177 | | ptrdiff_t kofs, |
1178 | | enum ceb_key_type key_type, |
1179 | | uint64_t key_len, |
1180 | | int *is_dup_ptr) |
1181 | 0 | { |
1182 | 0 | if (!*root) |
1183 | 0 | return NULL; |
1184 | | |
1185 | | /* note for duplicates: the current scheme always returns the last one by default */ |
1186 | 0 | return _ceb_descend((struct ceb_root **)root, CEB_WM_LST, kofs, key_type, 0, key_len, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1187 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_last Unexecuted instantiation: ceb64_tree.c:_ceb_last Unexecuted instantiation: cebis_tree.c:_ceb_last Unexecuted instantiation: cebs_tree.c:_ceb_last |
1188 | | |
1189 | | /* Searches in the tree <root> made of keys of type <key_type>, for the next |
1190 | | * node after the one containing the key <key_*>. Returns NULL if not found. |
1191 | | * It's up to the caller to pass the current node's key in <key_*>. The |
1192 | | * approach consists in looking up that node first, recalling the last time a |
1193 | | * left turn was made, and returning the first node along the right branch at |
1194 | | * that fork. |
1195 | | */ |
1196 | | static inline __attribute__((always_inline)) |
1197 | | struct ceb_node *_ceb_next_unique(struct ceb_root *const *root, |
1198 | | ptrdiff_t kofs, |
1199 | | enum ceb_key_type key_type, |
1200 | | uint32_t key_u32, |
1201 | | uint64_t key_u64, |
1202 | | const void *key_ptr, |
1203 | | int *is_dup_ptr) |
1204 | 0 | { |
1205 | 0 | struct ceb_root *restart; |
1206 | |
|
1207 | 0 | if (!*root) |
1208 | 0 | return NULL; |
1209 | | |
1210 | 0 | if (!_ceb_descend((struct ceb_root **)root, CEB_WM_KNX, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr)) |
1211 | 0 | return NULL; |
1212 | | |
1213 | 0 | if (!restart) |
1214 | 0 | return NULL; |
1215 | | |
1216 | 0 | return _ceb_descend(&restart, CEB_WM_NXT, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1217 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_next_unique Unexecuted instantiation: ceb64_tree.c:_ceb_next_unique Unexecuted instantiation: cebis_tree.c:_ceb_next_unique Unexecuted instantiation: cebs_tree.c:_ceb_next_unique |
1218 | | |
1219 | | /* Searches in the tree <root> made of keys of type <key_type>, for the prev |
1220 | | * node before the one containing the key <key_*>. Returns NULL if not found. |
1221 | | * It's up to the caller to pass the current node's key in <key_*>. The |
1222 | | * approach consists in looking up that node first, recalling the last time a |
1223 | | * right turn was made, and returning the last node along the left branch at |
1224 | | * that fork. |
1225 | | */ |
1226 | | static inline __attribute__((always_inline)) |
1227 | | struct ceb_node *_ceb_prev_unique(struct ceb_root *const *root, |
1228 | | ptrdiff_t kofs, |
1229 | | enum ceb_key_type key_type, |
1230 | | uint32_t key_u32, |
1231 | | uint64_t key_u64, |
1232 | | const void *key_ptr, |
1233 | | int *is_dup_ptr) |
1234 | 0 | { |
1235 | 0 | struct ceb_root *restart; |
1236 | |
|
1237 | 0 | if (!*root) |
1238 | 0 | return NULL; |
1239 | | |
1240 | 0 | if (!_ceb_descend((struct ceb_root **)root, CEB_WM_KPR, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr)) |
1241 | 0 | return NULL; |
1242 | | |
1243 | 0 | if (!restart) |
1244 | 0 | return NULL; |
1245 | | |
1246 | 0 | return _ceb_descend(&restart, CEB_WM_PRV, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1247 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_prev_unique Unexecuted instantiation: ceb64_tree.c:_ceb_prev_unique Unexecuted instantiation: cebis_tree.c:_ceb_prev_unique Unexecuted instantiation: cebs_tree.c:_ceb_prev_unique |
1248 | | |
1249 | | /* Searches in the tree <root> made of keys of type <key_type>, for the next |
1250 | | * node after <from> also containing key <key_*>. Returns NULL if not found. |
1251 | | * It's up to the caller to pass the current node's key in <key_*>. |
1252 | | */ |
1253 | | static inline __attribute__((always_inline)) |
1254 | | struct ceb_node *_ceb_next_dup(struct ceb_root *const *root, |
1255 | | ptrdiff_t kofs, |
1256 | | enum ceb_key_type key_type, |
1257 | | uint32_t key_u32, |
1258 | | uint64_t key_u64, |
1259 | | const void *key_ptr, |
1260 | | const struct ceb_node *from) |
1261 | 0 | { |
1262 | 0 | struct ceb_node *node; |
1263 | 0 | int is_dup; |
1264 | |
|
1265 | 0 | if (!*root) |
1266 | 0 | return NULL; |
1267 | | |
1268 | 0 | node = _ceb_descend((struct ceb_root **)root, CEB_WM_KNX, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &is_dup); |
1269 | 0 | if (!node) |
1270 | 0 | return NULL; |
1271 | | |
1272 | | /* Normally at this point, if node != from, we've found a node that |
1273 | | * differs from the one we're starting from, which indicates that |
1274 | | * the starting point belongs to a dup list and is not the last one. |
1275 | | * We must then visit the other members. We cannot navigate from the |
1276 | | * regular leaf node (the first one) but we can easily verify if we're |
1277 | | * on that one by checking if it's node->b[1]->b[0], in which case we |
1278 | | * jump to node->b[1]. Otherwise we take from->b[1]. |
1279 | | */ |
1280 | 0 | if (node != from) { |
1281 | 0 | if (_ceb_untag(node->b[1], 1)->b[0] == _ceb_dotag(from, 1)) |
1282 | 0 | return _ceb_untag(node->b[1], 1); |
1283 | 0 | else |
1284 | 0 | return _ceb_untag(from->b[1], 1); |
1285 | 0 | } |
1286 | | |
1287 | | /* there's no other dup here */ |
1288 | 0 | return NULL; |
1289 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_next_dup Unexecuted instantiation: ceb64_tree.c:_ceb_next_dup Unexecuted instantiation: cebis_tree.c:_ceb_next_dup Unexecuted instantiation: cebs_tree.c:_ceb_next_dup |
1290 | | |
1291 | | /* Searches in the tree <root> made of keys of type <key_type>, for the prev |
1292 | | * node before <from> also containing key <key_*>. Returns NULL if not found. |
1293 | | * It's up to the caller to pass the current node's key in <key_*>. |
1294 | | */ |
1295 | | static inline __attribute__((always_inline)) |
1296 | | struct ceb_node *_ceb_prev_dup(struct ceb_root *const *root, |
1297 | | ptrdiff_t kofs, |
1298 | | enum ceb_key_type key_type, |
1299 | | uint32_t key_u32, |
1300 | | uint64_t key_u64, |
1301 | | const void *key_ptr, |
1302 | | const struct ceb_node *from) |
1303 | 0 | { |
1304 | 0 | struct ceb_node *node; |
1305 | 0 | int is_dup; |
1306 | |
|
1307 | 0 | if (!*root) |
1308 | 0 | return NULL; |
1309 | | |
1310 | 0 | node = _ceb_descend((struct ceb_root **)root, CEB_WM_KPR, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &is_dup); |
1311 | 0 | if (!node) |
1312 | 0 | return NULL; |
1313 | | |
1314 | | /* Here we have several possibilities: |
1315 | | * - from == node => we've found our node. It may be a unique node, |
1316 | | * or the last one of a dup series. We'll sort that out thanks to |
1317 | | * is_dup, and if it's a dup, we'll use node->b[0]. |
1318 | | * - from is not the first dup, so we haven't visited them all yet, |
1319 | | * hence we visit node->b[0] to switch to the previous dup. |
1320 | | * - from is the first dup so we've visited them all. |
1321 | | */ |
1322 | 0 | if (is_dup && (node == from || _ceb_untag(node->b[1], 1)->b[0] != _ceb_dotag(from, 1))) |
1323 | 0 | return _ceb_untag(from->b[0], 1); |
1324 | | |
1325 | | /* there's no other dup here */ |
1326 | 0 | return NULL; |
1327 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_prev_dup Unexecuted instantiation: ceb64_tree.c:_ceb_prev_dup Unexecuted instantiation: cebis_tree.c:_ceb_prev_dup Unexecuted instantiation: cebs_tree.c:_ceb_prev_dup |
1328 | | |
1329 | | /* Searches in the tree <root> made of keys of type <key_type>, for the next |
1330 | | * node after <from> which contains key <key_*>. Returns NULL if not found. |
1331 | | * It's up to the caller to pass the current node's key in <key_*>. The |
1332 | | * approach consists in looking up that node first, recalling the last time a |
1333 | | * left turn was made, and returning the first node along the right branch at |
1334 | | * that fork. In case the current node belongs to a duplicate list, all dups |
1335 | | * will be visited in insertion order prior to jumping to different keys. |
1336 | | */ |
1337 | | static inline __attribute__((always_inline)) |
1338 | | struct ceb_node *_ceb_next(struct ceb_root *const *root, |
1339 | | ptrdiff_t kofs, |
1340 | | enum ceb_key_type key_type, |
1341 | | uint32_t key_u32, |
1342 | | uint64_t key_u64, |
1343 | | const void *key_ptr, |
1344 | | const struct ceb_node *from, |
1345 | | int *is_dup_ptr) |
1346 | 0 | { |
1347 | 0 | struct ceb_root *restart; |
1348 | 0 | struct ceb_node *node; |
1349 | |
|
1350 | 0 | if (!*root) |
1351 | 0 | return NULL; |
1352 | | |
1353 | 0 | node = _ceb_descend((struct ceb_root **)root, CEB_WM_KNX, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1354 | 0 | if (!node) |
1355 | 0 | return NULL; |
1356 | | |
1357 | | /* Normally at this point, if node != from, we've found a node that |
1358 | | * differs from the one we're starting from, which indicates that |
1359 | | * the starting point belongs to a dup list and is not the last one. |
1360 | | * We must then visit the other members. We cannot navigate from the |
1361 | | * regular leaf node (the first one) but we can easily verify if we're |
1362 | | * on that one by checking if it's _ceb_untag(node->b[1], 0)->b[0], in which case we |
1363 | | * jump to node->b[1]. Otherwise we take from->b[1]. |
1364 | | */ |
1365 | 0 | if (node != from) { |
1366 | 0 | if (_ceb_untag(node->b[1], 1)->b[0] == _ceb_dotag(from, 1)) |
1367 | 0 | return _ceb_untag(node->b[1], 1); |
1368 | 0 | else |
1369 | 0 | return _ceb_untag(from->b[1], 1); |
1370 | 0 | } |
1371 | | |
1372 | | /* Here the looked up node was found (node == from) and we can look up |
1373 | | * the next unique one if any. |
1374 | | */ |
1375 | 0 | if (!restart) |
1376 | 0 | return NULL; |
1377 | | |
1378 | | /* this look up will stop on the topmost dup in a sub-tree which is |
1379 | | * also the last one. Thanks to restart we know that this entry exists. |
1380 | | */ |
1381 | 0 | node = _ceb_descend(&restart, CEB_WM_NXT, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1382 | 0 | if (node && is_dup_ptr && *is_dup_ptr) { |
1383 | | /* on a duplicate, the first node is right->left and it's a leaf */ |
1384 | 0 | node = _ceb_untag(_ceb_untag(node->b[1], 1)->b[0], 1); |
1385 | 0 | } |
1386 | 0 | return node; |
1387 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_next Unexecuted instantiation: ceb64_tree.c:_ceb_next Unexecuted instantiation: cebis_tree.c:_ceb_next Unexecuted instantiation: cebs_tree.c:_ceb_next |
1388 | | |
1389 | | /* Searches in the tree <root> made of keys of type <key_type>, for the prev |
1390 | | * node before the one containing the key <key_*>. Returns NULL if not found. |
1391 | | * It's up to the caller to pass the current node's key in <key_*>. The |
1392 | | * approach consists in looking up that node first, recalling the last time a |
1393 | | * right turn was made, and returning the last node along the left branch at |
1394 | | * that fork. In case the current node belongs to a duplicate list, all dups |
1395 | | * will be visited in reverse insertion order prior to jumping to different |
1396 | | * keys. |
1397 | | */ |
1398 | | static inline __attribute__((always_inline)) |
1399 | | struct ceb_node *_ceb_prev(struct ceb_root *const *root, |
1400 | | ptrdiff_t kofs, |
1401 | | enum ceb_key_type key_type, |
1402 | | uint32_t key_u32, |
1403 | | uint64_t key_u64, |
1404 | | const void *key_ptr, |
1405 | | const struct ceb_node *from, |
1406 | | int *is_dup_ptr) |
1407 | 0 | { |
1408 | 0 | struct ceb_root *restart; |
1409 | 0 | struct ceb_node *node; |
1410 | |
|
1411 | 0 | if (!*root) |
1412 | 0 | return NULL; |
1413 | | |
1414 | 0 | node = _ceb_descend((struct ceb_root **)root, CEB_WM_KPR, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1415 | 0 | if (!node) |
1416 | 0 | return NULL; |
1417 | | |
1418 | | /* Here we have several possibilities: |
1419 | | * - from == node => we've found our node. It may be a unique node, |
1420 | | * or the last one of a dup series. We'll sort that out thanks to |
1421 | | * is_dup, and if it's a dup, we'll use node->b[0]. |
1422 | | * - from is not the first dup, so we haven't visited them all yet, |
1423 | | * hence we visit node->b[0] to switch to the previous dup. |
1424 | | * - from is the first dup so we've visited them all, we now need |
1425 | | * to jump to the previous unique value. |
1426 | | */ |
1427 | 0 | if (is_dup_ptr && *is_dup_ptr && (node == from || _ceb_untag(node->b[1], 1)->b[0] != _ceb_dotag(from, 1))) |
1428 | 0 | return _ceb_untag(from->b[0], 1); |
1429 | | |
1430 | | /* look up the previous unique entry */ |
1431 | 0 | if (!restart) |
1432 | 0 | return NULL; |
1433 | | |
1434 | | /* Note that the descent stops on the last dup which is the one we want */ |
1435 | 0 | return _ceb_descend(&restart, CEB_WM_PRV, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1436 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_prev Unexecuted instantiation: ceb64_tree.c:_ceb_prev Unexecuted instantiation: cebis_tree.c:_ceb_prev Unexecuted instantiation: cebs_tree.c:_ceb_prev |
1437 | | |
1438 | | /* Searches in the tree <root> made of keys of type <key_type>, for the first |
1439 | | * node containing the key <key_*>. Returns NULL if not found. |
1440 | | */ |
1441 | | static inline __attribute__((always_inline)) |
1442 | | struct ceb_node *_ceb_lookup(struct ceb_root *const *root, |
1443 | | ptrdiff_t kofs, |
1444 | | enum ceb_key_type key_type, |
1445 | | uint32_t key_u32, |
1446 | | uint64_t key_u64, |
1447 | | const void *key_ptr, |
1448 | | int *is_dup_ptr) |
1449 | 0 | { |
1450 | 0 | struct ceb_node *ret; |
1451 | |
|
1452 | 0 | if (!*root) |
1453 | 0 | return NULL; |
1454 | | |
1455 | 0 | ret = _ceb_descend((struct ceb_root **)root, CEB_WM_KEQ, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1456 | 0 | if (ret && is_dup_ptr && *is_dup_ptr) { |
1457 | | /* on a duplicate, the first node is right->left and it's a leaf */ |
1458 | 0 | ret = _ceb_untag(_ceb_untag(ret->b[1], 1)->b[0], 1); |
1459 | 0 | } |
1460 | 0 | return ret; |
1461 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_lookup Unexecuted instantiation: ceb64_tree.c:_ceb_lookup Unexecuted instantiation: cebis_tree.c:_ceb_lookup Unexecuted instantiation: cebs_tree.c:_ceb_lookup |
1462 | | |
1463 | | /* Searches in the tree <root> made of keys of type <key_type>, for the last |
1464 | | * node containing the key <key_*> or the highest one that's lower than it. |
1465 | | * Returns NULL if not found. |
1466 | | */ |
1467 | | static inline __attribute__((always_inline)) |
1468 | | struct ceb_node *_ceb_lookup_le(struct ceb_root *const *root, |
1469 | | ptrdiff_t kofs, |
1470 | | enum ceb_key_type key_type, |
1471 | | uint32_t key_u32, |
1472 | | uint64_t key_u64, |
1473 | | const void *key_ptr, |
1474 | | int *is_dup_ptr) |
1475 | 0 | { |
1476 | 0 | struct ceb_node *ret = NULL; |
1477 | 0 | struct ceb_root *restart; |
1478 | |
|
1479 | 0 | if (!*root) |
1480 | 0 | return NULL; |
1481 | | |
1482 | | /* note that for duplicates, we already find the last one */ |
1483 | 0 | ret = _ceb_descend((struct ceb_root **)root, CEB_WM_KLE, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1484 | 0 | if (ret) |
1485 | 0 | return ret; |
1486 | | |
1487 | 0 | if (!restart) |
1488 | 0 | return NULL; |
1489 | | |
1490 | 0 | return _ceb_descend(&restart, CEB_WM_PRV, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1491 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_lookup_le Unexecuted instantiation: ceb64_tree.c:_ceb_lookup_le Unexecuted instantiation: cebis_tree.c:_ceb_lookup_le Unexecuted instantiation: cebs_tree.c:_ceb_lookup_le |
1492 | | |
1493 | | /* Searches in the tree <root> made of keys of type <key_type>, for the last |
1494 | | * node containing the greatest key that is strictly lower than <key_*>. |
1495 | | * Returns NULL if not found. It's very similar to next() except that the |
1496 | | * looked up value doesn't need to exist. |
1497 | | */ |
1498 | | static inline __attribute__((always_inline)) |
1499 | | struct ceb_node *_ceb_lookup_lt(struct ceb_root *const *root, |
1500 | | ptrdiff_t kofs, |
1501 | | enum ceb_key_type key_type, |
1502 | | uint32_t key_u32, |
1503 | | uint64_t key_u64, |
1504 | | const void *key_ptr, |
1505 | | int *is_dup_ptr) |
1506 | 0 | { |
1507 | 0 | struct ceb_node *ret = NULL; |
1508 | 0 | struct ceb_root *restart; |
1509 | |
|
1510 | 0 | if (!*root) |
1511 | 0 | return NULL; |
1512 | | |
1513 | | /* note that for duplicates, we already find the last one */ |
1514 | 0 | ret = _ceb_descend((struct ceb_root **)root, CEB_WM_KLT, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1515 | 0 | if (ret) |
1516 | 0 | return ret; |
1517 | | |
1518 | 0 | if (!restart) |
1519 | 0 | return NULL; |
1520 | | |
1521 | 0 | return _ceb_descend(&restart, CEB_WM_PRV, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1522 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_lookup_lt Unexecuted instantiation: ceb64_tree.c:_ceb_lookup_lt Unexecuted instantiation: cebis_tree.c:_ceb_lookup_lt Unexecuted instantiation: cebs_tree.c:_ceb_lookup_lt |
1523 | | |
1524 | | /* Searches in the tree <root> made of keys of type <key_type>, for the first |
1525 | | * node containing the key <key_*> or the smallest one that's greater than it. |
1526 | | * Returns NULL if not found. If <is_dup_ptr> is non-null, then duplicates are |
1527 | | * permitted and this variable is used to temporarily carry an internal state. |
1528 | | |
1529 | | */ |
1530 | | static inline __attribute__((always_inline)) |
1531 | | struct ceb_node *_ceb_lookup_ge(struct ceb_root *const *root, |
1532 | | ptrdiff_t kofs, |
1533 | | enum ceb_key_type key_type, |
1534 | | uint32_t key_u32, |
1535 | | uint64_t key_u64, |
1536 | | const void *key_ptr, |
1537 | | int *is_dup_ptr) |
1538 | 0 | { |
1539 | 0 | struct ceb_node *ret = NULL; |
1540 | 0 | struct ceb_root *restart; |
1541 | |
|
1542 | 0 | if (!*root) |
1543 | 0 | return NULL; |
1544 | | |
1545 | 0 | ret = _ceb_descend((struct ceb_root **)root, CEB_WM_KGE, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1546 | 0 | if (!ret) { |
1547 | 0 | if (!restart) |
1548 | 0 | return NULL; |
1549 | | |
1550 | 0 | ret = _ceb_descend(&restart, CEB_WM_NXT, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1551 | 0 | } |
1552 | | |
1553 | 0 | if (ret && is_dup_ptr && *is_dup_ptr) { |
1554 | | /* on a duplicate, the first node is right->left and it's a leaf */ |
1555 | 0 | ret = _ceb_untag(_ceb_untag(ret->b[1], 1)->b[0], 1); |
1556 | 0 | } |
1557 | 0 | return ret; |
1558 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_lookup_ge Unexecuted instantiation: ceb64_tree.c:_ceb_lookup_ge Unexecuted instantiation: cebis_tree.c:_ceb_lookup_ge Unexecuted instantiation: cebs_tree.c:_ceb_lookup_ge |
1559 | | |
1560 | | /* Searches in the tree <root> made of keys of type <key_type>, for the first |
1561 | | * node containing the lowest key that is strictly greater than <key_*>. Returns |
1562 | | * NULL if not found. It's very similar to prev() except that the looked up |
1563 | | * value doesn't need to exist. If <is_dup_ptr> is non-null, then duplicates are |
1564 | | * permitted and this variable is used to temporarily carry an internal state. |
1565 | | */ |
1566 | | static inline __attribute__((always_inline)) |
1567 | | struct ceb_node *_ceb_lookup_gt(struct ceb_root *const *root, |
1568 | | ptrdiff_t kofs, |
1569 | | enum ceb_key_type key_type, |
1570 | | uint32_t key_u32, |
1571 | | uint64_t key_u64, |
1572 | | const void *key_ptr, |
1573 | | int *is_dup_ptr) |
1574 | 0 | { |
1575 | 0 | struct ceb_node *ret = NULL; |
1576 | 0 | struct ceb_root *restart; |
1577 | |
|
1578 | 0 | if (!*root) |
1579 | 0 | return NULL; |
1580 | | |
1581 | 0 | ret = _ceb_descend((struct ceb_root **)root, CEB_WM_KGT, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &restart, is_dup_ptr); |
1582 | 0 | if (!ret) { |
1583 | 0 | if (!restart) |
1584 | 0 | return NULL; |
1585 | | |
1586 | 0 | ret = _ceb_descend(&restart, CEB_WM_NXT, kofs, key_type, 0, key_u64, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, is_dup_ptr); |
1587 | 0 | } |
1588 | | |
1589 | 0 | if (ret && is_dup_ptr && *is_dup_ptr) { |
1590 | | /* on a duplicate, the first node is right->left and it's a leaf */ |
1591 | 0 | ret = _ceb_untag(_ceb_untag(ret->b[1], 1)->b[0], 1); |
1592 | 0 | } |
1593 | 0 | return ret; |
1594 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_lookup_gt Unexecuted instantiation: ceb64_tree.c:_ceb_lookup_gt Unexecuted instantiation: cebis_tree.c:_ceb_lookup_gt Unexecuted instantiation: cebs_tree.c:_ceb_lookup_gt |
1595 | | |
1596 | | /* Searches in the tree <root> made of keys of type <key_type>, for the node |
1597 | | * that contains the key <key_*>, and deletes it. If <node> is non-NULL, a |
1598 | | * check is performed and the node found is deleted only if it matches. The |
1599 | | * found node is returned in any case, otherwise NULL if not found. A deleted |
1600 | | * node is detected since it has b[0]==NULL, which this functions also clears |
1601 | | * after operation. The function is idempotent, so it's safe to attempt to |
1602 | | * delete an already deleted node (NULL is returned in this case since the node |
1603 | | * was not in the tree). If <is_dup_ptr> is non-null, then duplicates are |
1604 | | * permitted and this variable is used to temporarily carry an internal state. |
1605 | | */ |
1606 | | static inline __attribute__((always_inline)) |
1607 | | struct ceb_node *_ceb_delete(struct ceb_root **root, |
1608 | | struct ceb_node *node, |
1609 | | ptrdiff_t kofs, |
1610 | | enum ceb_key_type key_type, |
1611 | | uint32_t key_u32, |
1612 | | uint64_t key_u64, |
1613 | | const void *key_ptr, |
1614 | | int *is_dup_ptr) |
1615 | 0 | { |
1616 | 0 | struct ceb_node *lparent, *nparent, *gparent; |
1617 | 0 | int lpside, npside, gpside; |
1618 | 0 | struct ceb_node *ret = NULL; |
1619 | |
|
1620 | 0 | if (node && !node->b[0]) { |
1621 | | /* NULL on a branch means the node is not in the tree */ |
1622 | 0 | return NULL; |
1623 | 0 | } |
1624 | | |
1625 | 0 | if (!*root) { |
1626 | | /* empty tree, the node cannot be there */ |
1627 | 0 | goto done; |
1628 | 0 | } |
1629 | | |
1630 | 0 | ret = _ceb_descend(root, CEB_WM_KEQ, kofs, key_type, key_u32, key_u64, key_ptr, NULL, NULL, |
1631 | 0 | &lparent, &lpside, &nparent, &npside, &gparent, &gpside, NULL, is_dup_ptr); |
1632 | |
|
1633 | 0 | if (!ret) { |
1634 | | /* key not found */ |
1635 | 0 | goto done; |
1636 | 0 | } |
1637 | | |
1638 | 0 | if (is_dup_ptr && *is_dup_ptr) { |
1639 | | /* the node to be deleted belongs to a dup sub-tree whose ret |
1640 | | * is the last. The possibilities here are: |
1641 | | * 1) node==NULL => unspecified, we delete the first one, |
1642 | | * which is the tree leaf. The tree node (if it exists) |
1643 | | * is replaced by the first dup. There's nothing else to |
1644 | | * change. |
1645 | | * 2) node is the tree leaf. The tree node (if it exists) |
1646 | | * is replaced by the first dup. |
1647 | | * 3) node is a dup. We just delete the dup. |
1648 | | * In order to delete a dup, there are 4 cases: |
1649 | | * a) node==last and there's a single dup, it's this one |
1650 | | * -> *parent = node->b[0]; |
1651 | | * b) node==last and there's another dup: |
1652 | | * -> *parent = node->b[0]; |
1653 | | * node->b[0]->b[1] = node->b[1]; |
1654 | | * (or (*parent)->b[1] = node->b[1] covers a and b) |
1655 | | * c) node==first != last: |
1656 | | * -> node->b[1]->b[0] = node->b[0]; |
1657 | | * last->b[1] = node->b[1]; |
1658 | | * (or (*parent)->b[1] = node->b[1] covers a,b,c) |
1659 | | * d) node!=first && !=last: |
1660 | | * -> node->b[1]->b[0] = node->b[0]; |
1661 | | * node->b[0]->b[1] = node->b[1]; |
1662 | | * a,b,c,d can be simplified as: |
1663 | | * ((node == first) ? last : node->b[0])->b[1] = node->b[1]; |
1664 | | * *((node == last) ? parent : &node->b[1]->b[0]) = node->b[0]; |
1665 | | */ |
1666 | 0 | struct ceb_node *first, *last; |
1667 | |
|
1668 | 0 | last = ret; |
1669 | 0 | first = _ceb_untag(last->b[1], 1); |
1670 | | |
1671 | | /* cases 1 and 2 below */ |
1672 | 0 | if (!node || node == _ceb_untag(first->b[0], 1)) { |
1673 | | /* node unspecified or the first, remove the leaf and |
1674 | | * convert the first entry to it. |
1675 | | */ |
1676 | 0 | ret = _ceb_untag(first->b[0], 1); // update return node |
1677 | 0 | last->b[1] = first->b[1]; // new first (remains OK if last==first) |
1678 | |
|
1679 | 0 | if (ret->b[0] != _ceb_dotag(ret, 1) || ret->b[1] != _ceb_dotag(ret, 1)) { |
1680 | | /* not the nodeless leaf, a node exists, put it |
1681 | | * on the first and update its parent. |
1682 | | */ |
1683 | 0 | first->b[0] = ret->b[0]; |
1684 | 0 | first->b[1] = ret->b[1]; |
1685 | 0 | nparent->b[npside] = _ceb_dotag(first, 0); |
1686 | 0 | } |
1687 | 0 | else { |
1688 | | /* first becomes the nodeless leaf since we only keep its leaf */ |
1689 | 0 | first->b[0] = first->b[1] = _ceb_dotag(first, 1); |
1690 | 0 | } |
1691 | | /* first becomes a leaf, it must be tagged */ |
1692 | 0 | if (last != first) |
1693 | 0 | _ceb_untag(last->b[1], 1)->b[0] = _ceb_dotag(first, 1); |
1694 | | /* done */ |
1695 | 0 | } else { |
1696 | | /* case 3: the node to delete is a dup, we only have to |
1697 | | * manipulate the list. |
1698 | | */ |
1699 | 0 | ret = node; |
1700 | 0 | ((node == first) ? last : _ceb_untag(node->b[0], 1))->b[1] = node->b[1]; |
1701 | 0 | *((node == last) ? &lparent->b[lpside] : &_ceb_untag(node->b[1], 1)->b[0]) = node->b[0]; |
1702 | | /* done */ |
1703 | 0 | } |
1704 | 0 | goto mark_and_leave; |
1705 | 0 | } |
1706 | | |
1707 | | /* ok below the returned value is a real leaf, we have to adjust the tree */ |
1708 | | |
1709 | 0 | if (ret == node || !node) { |
1710 | 0 | if (&lparent->b[0] == root) { |
1711 | | /* there was a single entry, this one, so we're just |
1712 | | * deleting the nodeless leaf. |
1713 | | */ |
1714 | 0 | *root = NULL; |
1715 | 0 | goto mark_and_leave; |
1716 | 0 | } |
1717 | | |
1718 | | /* then we necessarily have a gparent */ |
1719 | 0 | gparent->b[gpside] = lparent->b[!lpside]; |
1720 | |
|
1721 | 0 | if (lparent == ret) { |
1722 | | /* we're removing the leaf and node together, nothing |
1723 | | * more to do. |
1724 | | */ |
1725 | 0 | goto mark_and_leave; |
1726 | 0 | } |
1727 | | |
1728 | 0 | if (ret->b[0] == ret->b[1]) { |
1729 | | /* we're removing the node-less item, the parent will |
1730 | | * take this role. |
1731 | | */ |
1732 | 0 | lparent->b[0] = lparent->b[1] = _ceb_dotag(lparent, 1); |
1733 | 0 | goto mark_and_leave; |
1734 | 0 | } |
1735 | | |
1736 | | /* more complicated, the node was split from the leaf, we have |
1737 | | * to find a spare one to switch it. The parent node is not |
1738 | | * needed anymore so we can reuse it. |
1739 | | */ |
1740 | 0 | lparent->b[0] = ret->b[0]; |
1741 | 0 | lparent->b[1] = ret->b[1]; |
1742 | 0 | nparent->b[npside] = _ceb_dotag(lparent, 0); |
1743 | |
|
1744 | 0 | mark_and_leave: |
1745 | | /* now mark the node as deleted */ |
1746 | 0 | ret->b[0] = NULL; |
1747 | 0 | } |
1748 | 0 | done: |
1749 | 0 | return ret; |
1750 | 0 | } Unexecuted instantiation: ceb32_tree.c:_ceb_delete Unexecuted instantiation: ceb64_tree.c:_ceb_delete Unexecuted instantiation: cebis_tree.c:_ceb_delete Unexecuted instantiation: cebs_tree.c:_ceb_delete |
1751 | | |
1752 | | //#if defined(CEB_ENABLE_DUMP) |
1753 | | /* The dump functions are in cebtree-dbg.c */ |
1754 | | |
1755 | | void ceb_imm_default_dump_root(ptrdiff_t kofs, enum ceb_key_type key_type, struct ceb_root *const *root, const void *ctx, int sub); |
1756 | | void ceb_imm_default_dump_node(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub); |
1757 | | void ceb_imm_default_dump_dups(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub); |
1758 | | void ceb_imm_default_dump_leaf(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub); |
1759 | | const struct ceb_node *ceb_imm_default_dump_tree(ptrdiff_t kofs, enum ceb_key_type key_type, struct ceb_root *const *root, |
1760 | | uint64_t pxor, const void *last, int level, const void *ctx, int sub, |
1761 | | void (*root_dump)(ptrdiff_t kofs, enum ceb_key_type key_type, struct ceb_root *const *root, const void *ctx, int sub), |
1762 | | void (*node_dump)(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub), |
1763 | | void (*dups_dump)(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub), |
1764 | | void (*leaf_dump)(ptrdiff_t kofs, enum ceb_key_type key_type, const struct ceb_node *node, int level, const void *ctx, int sub)); |
1765 | | //#endif /* CEB_ENABLE_DUMP */ |
1766 | | |
1767 | | #endif /* _CEBTREE_PRV_H */ |