/src/haproxy/include/import/ebtree.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Elastic Binary Trees - generic macros and structures. |
3 | | * Version 6.0.6 |
4 | | * (C) 2002-2011 - Willy Tarreau <w@1wt.eu> |
5 | | * |
6 | | * This library is free software; you can redistribute it and/or |
7 | | * modify it under the terms of the GNU Lesser General Public |
8 | | * License as published by the Free Software Foundation, version 2.1 |
9 | | * exclusively. |
10 | | * |
11 | | * This library is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | | * Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this library; if not, write to the Free Software |
18 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | | */ |
20 | | |
21 | | |
22 | | |
23 | | /* |
24 | | General idea: |
25 | | ------------- |
26 | | In a radix binary tree, we may have up to 2N-1 nodes for N keys if all of |
27 | | them are leaves. If we find a way to differentiate intermediate nodes (later |
28 | | called "nodes") and final nodes (later called "leaves"), and we associate |
29 | | them by two, it is possible to build sort of a self-contained radix tree with |
30 | | intermediate nodes always present. It will not be as cheap as the ultree for |
31 | | optimal cases as shown below, but the optimal case almost never happens : |
32 | | |
33 | | Eg, to store 8, 10, 12, 13, 14 : |
34 | | |
35 | | ultree this theoretical tree |
36 | | |
37 | | 8 8 |
38 | | / \ / \ |
39 | | 10 12 10 12 |
40 | | / \ / \ |
41 | | 13 14 12 14 |
42 | | / \ |
43 | | 12 13 |
44 | | |
45 | | Note that on real-world tests (with a scheduler), is was verified that the |
46 | | case with data on an intermediate node never happens. This is because the |
47 | | data spectrum is too large for such coincidences to happen. It would require |
48 | | for instance that a task has its expiration time at an exact second, with |
49 | | other tasks sharing that second. This is too rare to try to optimize for it. |
50 | | |
51 | | What is interesting is that the node will only be added above the leaf when |
52 | | necessary, which implies that it will always remain somewhere above it. So |
53 | | both the leaf and the node can share the exact value of the leaf, because |
54 | | when going down the node, the bit mask will be applied to comparisons. So we |
55 | | are tempted to have one single key shared between the node and the leaf. |
56 | | |
57 | | The bit only serves the nodes, and the dups only serve the leaves. So we can |
58 | | put a lot of information in common. This results in one single entity with |
59 | | two branch pointers and two parent pointers, one for the node part, and one |
60 | | for the leaf part : |
61 | | |
62 | | node's leaf's |
63 | | parent parent |
64 | | | | |
65 | | [node] [leaf] |
66 | | / \ |
67 | | left right |
68 | | branch branch |
69 | | |
70 | | The node may very well refer to its leaf counterpart in one of its branches, |
71 | | indicating that its own leaf is just below it : |
72 | | |
73 | | node's |
74 | | parent |
75 | | | |
76 | | [node] |
77 | | / \ |
78 | | left [leaf] |
79 | | branch |
80 | | |
81 | | Adding keys in such a tree simply consists in inserting nodes between |
82 | | other nodes and/or leaves : |
83 | | |
84 | | [root] |
85 | | | |
86 | | [node2] |
87 | | / \ |
88 | | [leaf1] [node3] |
89 | | / \ |
90 | | [leaf2] [leaf3] |
91 | | |
92 | | On this diagram, we notice that [node2] and [leaf2] have been pulled away |
93 | | from each other due to the insertion of [node3], just as if there would be |
94 | | an elastic between both parts. This elastic-like behaviour gave its name to |
95 | | the tree : "Elastic Binary Tree", or "EBtree". The entity which associates a |
96 | | node part and a leaf part will be called an "EB node". |
97 | | |
98 | | We also notice on the diagram that there is a root entity required to attach |
99 | | the tree. It only contains two branches and there is nothing above it. This |
100 | | is an "EB root". Some will note that [leaf1] has no [node1]. One property of |
101 | | the EBtree is that all nodes have their branches filled, and that if a node |
102 | | has only one branch, it does not need to exist. Here, [leaf1] was added |
103 | | below [root] and did not need any node. |
104 | | |
105 | | An EB node contains : |
106 | | - a pointer to the node's parent (node_p) |
107 | | - a pointer to the leaf's parent (leaf_p) |
108 | | - two branches pointing to lower nodes or leaves (branches) |
109 | | - a bit position (bit) |
110 | | - an optional key. |
111 | | |
112 | | The key here is optional because it's used only during insertion, in order |
113 | | to classify the nodes. Nothing else in the tree structure requires knowledge |
114 | | of the key. This makes it possible to write type-agnostic primitives for |
115 | | everything, and type-specific insertion primitives. This has led to consider |
116 | | two types of EB nodes. The type-agnostic ones will serve as a header for the |
117 | | other ones, and will simply be called "struct eb_node". The other ones will |
118 | | have their type indicated in the structure name. Eg: "struct eb32_node" for |
119 | | nodes carrying 32 bit keys. |
120 | | |
121 | | We will also node that the two branches in a node serve exactly the same |
122 | | purpose as an EB root. For this reason, a "struct eb_root" will be used as |
123 | | well inside the struct eb_node. In order to ease pointer manipulation and |
124 | | ROOT detection when walking upwards, all the pointers inside an eb_node will |
125 | | point to the eb_root part of the referenced EB nodes, relying on the same |
126 | | principle as the linked lists in Linux. |
127 | | |
128 | | Another important point to note, is that when walking inside a tree, it is |
129 | | very convenient to know where a node is attached in its parent, and what |
130 | | type of branch it has below it (leaf or node). In order to simplify the |
131 | | operations and to speed up the processing, it was decided in this specific |
132 | | implementation to use the lowest bit from the pointer to designate the side |
133 | | of the upper pointers (left/right) and the type of a branch (leaf/node). |
134 | | This practise is not mandatory by design, but an implementation-specific |
135 | | optimisation permitted on all platforms on which data must be aligned. All |
136 | | known 32 bit platforms align their integers and pointers to 32 bits, leaving |
137 | | the two lower bits unused. So, we say that the pointers are "tagged". And |
138 | | since they designate pointers to root parts, we simply call them |
139 | | "tagged root pointers", or "eb_troot" in the code. |
140 | | |
141 | | Duplicate keys are stored in a special manner. When inserting a key, if |
142 | | the same one is found, then an incremental binary tree is built at this |
143 | | place from these keys. This ensures that no special case has to be written |
144 | | to handle duplicates when walking through the tree or when deleting entries. |
145 | | It also guarantees that duplicates will be walked in the exact same order |
146 | | they were inserted. This is very important when trying to achieve fair |
147 | | processing distribution for instance. |
148 | | |
149 | | Algorithmic complexity can be derived from 3 variables : |
150 | | - the number of possible different keys in the tree : P |
151 | | - the number of entries in the tree : N |
152 | | - the number of duplicates for one key : D |
153 | | |
154 | | Note that this tree is deliberately NOT balanced. For this reason, the worst |
155 | | case may happen with a small tree (eg: 32 distinct keys of one bit). BUT, |
156 | | the operations required to manage such data are so much cheap that they make |
157 | | it worth using it even under such conditions. For instance, a balanced tree |
158 | | may require only 6 levels to store those 32 keys when this tree will |
159 | | require 32. But if per-level operations are 5 times cheaper, it wins. |
160 | | |
161 | | Minimal, Maximal and Average times are specified in number of operations. |
162 | | Minimal is given for best condition, Maximal for worst condition, and the |
163 | | average is reported for a tree containing random keys. An operation |
164 | | generally consists in jumping from one node to the other. |
165 | | |
166 | | Complexity : |
167 | | - lookup : min=1, max=log(P), avg=log(N) |
168 | | - insertion from root : min=1, max=log(P), avg=log(N) |
169 | | - insertion of dups : min=1, max=log(D), avg=log(D)/2 after lookup |
170 | | - deletion : min=1, max=1, avg=1 |
171 | | - prev/next : min=1, max=log(P), avg=2 : |
172 | | N/2 nodes need 1 hop => 1*N/2 |
173 | | N/4 nodes need 2 hops => 2*N/4 |
174 | | N/8 nodes need 3 hops => 3*N/8 |
175 | | ... |
176 | | N/x nodes need log(x) hops => log2(x)*N/x |
177 | | Total cost for all N nodes : sum[i=1..N](log2(i)*N/i) = N*sum[i=1..N](log2(i)/i) |
178 | | Average cost across N nodes = total / N = sum[i=1..N](log2(i)/i) = 2 |
179 | | |
180 | | This design is currently limited to only two branches per node. Most of the |
181 | | tree descent algorithm would be compatible with more branches (eg: 4, to cut |
182 | | the height in half), but this would probably require more complex operations |
183 | | and the deletion algorithm would be problematic. |
184 | | |
185 | | Useful properties : |
186 | | - a node is always added above the leaf it is tied to, and never can get |
187 | | below nor in another branch. This implies that leaves directly attached |
188 | | to the root do not use their node part, which is indicated by a NULL |
189 | | value in node_p. This also enhances the cache efficiency when walking |
190 | | down the tree, because when the leaf is reached, its node part will |
191 | | already have been visited (unless it's the first leaf in the tree). |
192 | | |
193 | | - pointers to lower nodes or leaves are stored in "branch" pointers. Only |
194 | | the root node may have a NULL in either branch, it is not possible for |
195 | | other branches. Since the nodes are attached to the left branch of the |
196 | | root, it is not possible to see a NULL left branch when walking up a |
197 | | tree. Thus, an empty tree is immediately identified by a NULL left |
198 | | branch at the root. Conversely, the one and only way to identify the |
199 | | root node is to check that it right branch is NULL. Note that the |
200 | | NULL pointer may have a few low-order bits set. |
201 | | |
202 | | - a node connected to its own leaf will have branch[0|1] pointing to |
203 | | itself, and leaf_p pointing to itself. |
204 | | |
205 | | - a node can never have node_p pointing to itself. |
206 | | |
207 | | - a node is linked in a tree if and only if it has a non-null leaf_p. |
208 | | |
209 | | - a node can never have both branches equal, except for the root which can |
210 | | have them both NULL. |
211 | | |
212 | | - deletion only applies to leaves. When a leaf is deleted, its parent must |
213 | | be released too (unless it's the root), and its sibling must attach to |
214 | | the grand-parent, replacing the parent. Also, when a leaf is deleted, |
215 | | the node tied to this leaf will be removed and must be released too. If |
216 | | this node is different from the leaf's parent, the freshly released |
217 | | leaf's parent will be used to replace the node which must go. A released |
218 | | node will never be used anymore, so there's no point in tracking it. |
219 | | |
220 | | - the bit index in a node indicates the bit position in the key which is |
221 | | represented by the branches. That means that a node with (bit == 0) is |
222 | | just above two leaves. Negative bit values are used to build a duplicate |
223 | | tree. The first node above two identical leaves gets (bit == -1). This |
224 | | value logarithmically decreases as the duplicate tree grows. During |
225 | | duplicate insertion, a node is inserted above the highest bit value (the |
226 | | lowest absolute value) in the tree during the right-sided walk. If bit |
227 | | -1 is not encountered (highest < -1), we insert above last leaf. |
228 | | Otherwise, we insert above the node with the highest value which was not |
229 | | equal to the one of its parent + 1. |
230 | | |
231 | | - the "eb_next" primitive walks from left to right, which means from lower |
232 | | to higher keys. It returns duplicates in the order they were inserted. |
233 | | The "eb_first" primitive returns the left-most entry. |
234 | | |
235 | | - the "eb_prev" primitive walks from right to left, which means from |
236 | | higher to lower keys. It returns duplicates in the opposite order they |
237 | | were inserted. The "eb_last" primitive returns the right-most entry. |
238 | | |
239 | | - a tree which has 1 in the lower bit of its root's right branch is a |
240 | | tree with unique nodes. This means that when a node is inserted with |
241 | | a key which already exists will not be inserted, and the previous |
242 | | entry will be returned. |
243 | | |
244 | | */ |
245 | | |
246 | | #ifndef _EBTREE_H |
247 | | #define _EBTREE_H |
248 | | |
249 | | #include <stdlib.h> |
250 | | #include <import/ebtree-t.h> |
251 | | #include <haproxy/api.h> |
252 | | |
253 | | /* returns clz from 7 to 0 for 0x01 to 0xFF. Returns 7 for 0 as well. */ |
254 | | static inline unsigned int clz8(unsigned char c) |
255 | 0 | { |
256 | 0 | unsigned int r = 4; |
257 | 0 |
|
258 | 0 | if (c & 0xf0) { |
259 | 0 | r = 0; |
260 | 0 | c >>= 4; |
261 | 0 | } |
262 | 0 | return r + ((0x000055afU >> (c * 2)) & 0x3); |
263 | 0 | } Unexecuted instantiation: haproxy.c:clz8 Unexecuted instantiation: fuzz_hpack_decode.c:clz8 |
264 | | |
265 | | /* FLSNZ: find last set bit for non-zero value. "Last" here means the highest |
266 | | * one. It returns a value from 1 to 32 for 1<<0 to 1<<31. |
267 | | */ |
268 | | |
269 | | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__atom__) |
270 | | /* DO NOT USE ON ATOM! The instruction is emulated and is several times slower |
271 | | * than doing the math by hand. |
272 | | */ |
273 | | static inline unsigned int flsnz32(unsigned int x) |
274 | 0 | { |
275 | 0 | unsigned int r; |
276 | 0 | __asm__("bsrl %1,%0\n" |
277 | 0 | : "=r" (r) : "rm" (x)); |
278 | 0 | return r + 1; |
279 | 0 | } Unexecuted instantiation: haproxy.c:flsnz32 Unexecuted instantiation: fuzz_hpack_decode.c:flsnz32 |
280 | | #define flsnz32(x) flsnz32(x) |
281 | | |
282 | | # if defined(__x86_64__) |
283 | | static inline unsigned int flsnz64(unsigned long long x) |
284 | 0 | { |
285 | 0 | unsigned long long r; |
286 | 0 | __asm__("bsrq %1,%0\n" |
287 | 0 | : "=r" (r) : "rm" (x)); |
288 | 0 | return r + 1; |
289 | 0 | } Unexecuted instantiation: haproxy.c:flsnz64 Unexecuted instantiation: fuzz_hpack_decode.c:flsnz64 |
290 | | # define flsnz64(x) flsnz64(x) |
291 | | # endif |
292 | | |
293 | | #elif !defined(__atom__) && defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2))) |
294 | | /* gcc >= 4.2 brings __builtin_clz() and __builtin_clzl(), usable for non-x86 */ |
295 | | |
296 | | static inline unsigned int flsnz32(unsigned int x) |
297 | | { |
298 | | return 32 - __builtin_clz(x); |
299 | | } |
300 | | # define flsnz32(x) flsnz32(x) |
301 | | |
302 | | # if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ > 4) |
303 | | static inline unsigned int flsnz64(unsigned long x) |
304 | | { |
305 | | return (__SIZEOF_LONG__ * 8) - __builtin_clzl(x); |
306 | | } |
307 | | # define flsnz64(x) flsnz64(x) |
308 | | # endif |
309 | | |
310 | | #endif /* end of arch-specific implementations */ |
311 | | |
312 | | /*** Fallback versions below ***/ |
313 | | |
314 | | #ifndef flsnz8 |
315 | | # if defined(flsnz32) |
316 | | # define flsnz8(x) flsnz32((unsigned char)x) |
317 | | # else |
318 | | static inline unsigned int flsnz8(unsigned int x) |
319 | | { |
320 | | unsigned int ret = 0; |
321 | | if (x >> 4) { x >>= 4; ret += 4; } |
322 | | return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1; |
323 | | } |
324 | | # define flsnz8(x) flsnz8(x) |
325 | | # endif |
326 | | #endif |
327 | | |
328 | | #ifndef flsnz32 |
329 | | # define flsnz32(___a) ({ \ |
330 | | register unsigned int ___x, ___bits = 0; \ |
331 | | ___x = (___a); \ |
332 | | if (___x & 0xffff0000) { ___x &= 0xffff0000; ___bits += 16;} \ |
333 | | if (___x & 0xff00ff00) { ___x &= 0xff00ff00; ___bits += 8;} \ |
334 | | if (___x & 0xf0f0f0f0) { ___x &= 0xf0f0f0f0; ___bits += 4;} \ |
335 | | if (___x & 0xcccccccc) { ___x &= 0xcccccccc; ___bits += 2;} \ |
336 | | if (___x & 0xaaaaaaaa) { ___x &= 0xaaaaaaaa; ___bits += 1;} \ |
337 | | ___bits + 1; \ |
338 | | }) |
339 | | #endif |
340 | | |
341 | | #ifndef flsnz64 |
342 | | static inline unsigned int flsnz64(unsigned long long x) |
343 | | { |
344 | | unsigned int h; |
345 | | unsigned int bits = 32; |
346 | | |
347 | | h = x >> 32; |
348 | | if (!h) { |
349 | | h = x; |
350 | | bits = 0; |
351 | | } |
352 | | return flsnz32(h) + bits; |
353 | | } |
354 | | # define flsnz64(x) flsnz64(x) |
355 | | #endif |
356 | | |
357 | | #ifndef flsnz_long |
358 | | # define flsnz_long(x) ((sizeof(long) > 4) ? flsnz64(x) : flsnz32(x)) |
359 | | #endif |
360 | | |
361 | | #ifndef flsnz |
362 | | # define flsnz(x) ((sizeof(x) > 4) ? flsnz64(x) : (sizeof(x) > 1) ? flsnz32(x) : flsnz8(x)) |
363 | | #endif |
364 | | |
365 | | #define fls64(x) flsnz64(x) |
366 | | #define fls_auto(x) ((x) ? flsnz(x) : 0) |
367 | | |
368 | | /* Linux-like "container_of". It returns a pointer to the structure of type |
369 | | * <type> which has its member <name> stored at address <ptr>. |
370 | | */ |
371 | | #ifndef container_of |
372 | | #define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name))) |
373 | | #endif |
374 | | |
375 | | /* returns a pointer to the structure of type <type> which has its member <name> |
376 | | * stored at address <ptr>, unless <ptr> is 0, in which case 0 is returned. |
377 | | */ |
378 | | #ifndef container_of_safe |
379 | | #define container_of_safe(ptr, type, name) \ |
380 | | ({ void *__p = (ptr); \ |
381 | | __p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \ |
382 | | }) |
383 | | #endif |
384 | | |
385 | | /* Return the structure of type <type> whose member <member> points to <ptr> */ |
386 | | #define eb_entry(ptr, type, member) container_of(ptr, type, member) |
387 | | |
388 | | /***************************************\ |
389 | | * Private functions. Not for end-user * |
390 | | \***************************************/ |
391 | | |
392 | | /* Converts a root pointer to its equivalent eb_troot_t pointer, |
393 | | * ready to be stored in ->branch[], leaf_p or node_p. NULL is not |
394 | | * conserved. To be used with EB_LEAF, EB_NODE, EB_LEFT or EB_RGHT in <tag>. |
395 | | */ |
396 | | static inline eb_troot_t *eb_dotag(const struct eb_root *root, const int tag) |
397 | 0 | { |
398 | 0 | return (eb_troot_t *)((void *)root + tag); |
399 | 0 | } Unexecuted instantiation: haproxy.c:eb_dotag Unexecuted instantiation: fuzz_hpack_decode.c:eb_dotag |
400 | | |
401 | | /* Converts an eb_troot_t pointer pointer to its equivalent eb_root pointer, |
402 | | * for use with pointers from ->branch[], leaf_p or node_p. NULL is conserved |
403 | | * as long as the tree is not corrupted. To be used with EB_LEAF, EB_NODE, |
404 | | * EB_LEFT or EB_RGHT in <tag>. |
405 | | */ |
406 | | static inline struct eb_root *eb_untag(const eb_troot_t *troot, const int tag) |
407 | 0 | { |
408 | 0 | return (struct eb_root *)((void *)troot - tag); |
409 | 0 | } Unexecuted instantiation: haproxy.c:eb_untag Unexecuted instantiation: fuzz_hpack_decode.c:eb_untag |
410 | | |
411 | | /* returns the tag associated with an eb_troot_t pointer */ |
412 | | static inline int eb_gettag(eb_troot_t *troot) |
413 | 0 | { |
414 | 0 | return (unsigned long)troot & 1; |
415 | 0 | } Unexecuted instantiation: haproxy.c:eb_gettag Unexecuted instantiation: fuzz_hpack_decode.c:eb_gettag |
416 | | |
417 | | /* Converts a root pointer to its equivalent eb_troot_t pointer and clears the |
418 | | * tag, no matter what its value was. |
419 | | */ |
420 | | static inline struct eb_root *eb_clrtag(const eb_troot_t *troot) |
421 | 0 | { |
422 | 0 | return (struct eb_root *)((unsigned long)troot & ~1UL); |
423 | 0 | } Unexecuted instantiation: haproxy.c:eb_clrtag Unexecuted instantiation: fuzz_hpack_decode.c:eb_clrtag |
424 | | |
425 | | /* Returns a pointer to the eb_node holding <root> */ |
426 | | static inline struct eb_node *eb_root_to_node(struct eb_root *root) |
427 | 0 | { |
428 | 0 | return container_of(root, struct eb_node, branches); |
429 | 0 | } Unexecuted instantiation: haproxy.c:eb_root_to_node Unexecuted instantiation: fuzz_hpack_decode.c:eb_root_to_node |
430 | | |
431 | | /* Walks down starting at root pointer <start>, and always walking on side |
432 | | * <side>. It either returns the node hosting the first leaf on that side, |
433 | | * or NULL if no leaf is found. <start> may either be NULL or a branch pointer. |
434 | | * The pointer to the leaf (or NULL) is returned. |
435 | | */ |
436 | | static inline struct eb_node *eb_walk_down(eb_troot_t *start, unsigned int side) |
437 | 0 | { |
438 | 0 | /* A NULL pointer on an empty tree root will be returned as-is */ |
439 | 0 | while (eb_gettag(start) == EB_NODE) |
440 | 0 | start = (eb_untag(start, EB_NODE))->b[side]; |
441 | 0 | /* NULL is left untouched (root==eb_node, EB_LEAF==0) */ |
442 | 0 | return eb_root_to_node(eb_untag(start, EB_LEAF)); |
443 | 0 | } Unexecuted instantiation: haproxy.c:eb_walk_down Unexecuted instantiation: fuzz_hpack_decode.c:eb_walk_down |
444 | | |
445 | | /* This function is used to build a tree of duplicates by adding a new node to |
446 | | * a subtree of at least 2 entries. It will probably never be needed inlined, |
447 | | * and it is not for end-user. |
448 | | */ |
449 | | static forceinline struct eb_node * |
450 | | __eb_insert_dup(struct eb_node *sub, struct eb_node *new) |
451 | 0 | { |
452 | 0 | struct eb_node *head = sub; |
453 | 0 | |
454 | 0 | eb_troot_t *new_left = eb_dotag(&new->branches, EB_LEFT); |
455 | 0 | eb_troot_t *new_rght = eb_dotag(&new->branches, EB_RGHT); |
456 | 0 | eb_troot_t *new_leaf = eb_dotag(&new->branches, EB_LEAF); |
457 | 0 |
|
458 | 0 | /* first, identify the deepest hole on the right branch */ |
459 | 0 | while (eb_gettag(head->branches.b[EB_RGHT]) != EB_LEAF) { |
460 | 0 | struct eb_node *last = head; |
461 | 0 | head = container_of(eb_untag(head->branches.b[EB_RGHT], EB_NODE), |
462 | 0 | struct eb_node, branches); |
463 | 0 | if (head->bit > last->bit + 1) |
464 | 0 | sub = head; /* there's a hole here */ |
465 | 0 | } |
466 | 0 |
|
467 | 0 | /* Here we have a leaf attached to (head)->b[EB_RGHT] */ |
468 | 0 | if (head->bit < -1) { |
469 | 0 | /* A hole exists just before the leaf, we insert there */ |
470 | 0 | new->bit = -1; |
471 | 0 | sub = container_of(eb_untag(head->branches.b[EB_RGHT], EB_LEAF), |
472 | 0 | struct eb_node, branches); |
473 | 0 | head->branches.b[EB_RGHT] = eb_dotag(&new->branches, EB_NODE); |
474 | 0 |
|
475 | 0 | new->node_p = sub->leaf_p; |
476 | 0 | new->leaf_p = new_rght; |
477 | 0 | sub->leaf_p = new_left; |
478 | 0 | new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_LEAF); |
479 | 0 | new->branches.b[EB_RGHT] = new_leaf; |
480 | 0 | return new; |
481 | 0 | } else { |
482 | 0 | int side; |
483 | 0 | /* No hole was found before a leaf. We have to insert above |
484 | 0 | * <sub>. Note that we cannot be certain that <sub> is attached |
485 | 0 | * to the right of its parent, as this is only true if <sub> |
486 | 0 | * is inside the dup tree, not at the head. |
487 | 0 | */ |
488 | 0 | new->bit = sub->bit - 1; /* install at the lowest level */ |
489 | 0 | side = eb_gettag(sub->node_p); |
490 | 0 | head = container_of(eb_untag(sub->node_p, side), struct eb_node, branches); |
491 | 0 | head->branches.b[side] = eb_dotag(&new->branches, EB_NODE); |
492 | 0 | |
493 | 0 | new->node_p = sub->node_p; |
494 | 0 | new->leaf_p = new_rght; |
495 | 0 | sub->node_p = new_left; |
496 | 0 | new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_NODE); |
497 | 0 | new->branches.b[EB_RGHT] = new_leaf; |
498 | 0 | return new; |
499 | 0 | } |
500 | 0 | } Unexecuted instantiation: haproxy.c:__eb_insert_dup Unexecuted instantiation: fuzz_hpack_decode.c:__eb_insert_dup |
501 | | |
502 | | |
503 | | /**************************************\ |
504 | | * Public functions, for the end-user * |
505 | | \**************************************/ |
506 | | |
507 | | /* Return non-zero if the tree is empty, otherwise zero */ |
508 | | static inline int eb_is_empty(const struct eb_root *root) |
509 | 0 | { |
510 | 0 | return !root->b[EB_LEFT]; |
511 | 0 | } Unexecuted instantiation: haproxy.c:eb_is_empty Unexecuted instantiation: fuzz_hpack_decode.c:eb_is_empty |
512 | | |
513 | | /* Return non-zero if the node is a duplicate, otherwise zero */ |
514 | | static inline int eb_is_dup(const struct eb_node *node) |
515 | 0 | { |
516 | 0 | return node->bit < 0; |
517 | 0 | } Unexecuted instantiation: haproxy.c:eb_is_dup Unexecuted instantiation: fuzz_hpack_decode.c:eb_is_dup |
518 | | |
519 | | /* Return the first leaf in the tree starting at <root>, or NULL if none */ |
520 | | static inline struct eb_node *eb_first(struct eb_root *root) |
521 | 0 | { |
522 | 0 | return eb_walk_down(root->b[0], EB_LEFT); |
523 | 0 | } Unexecuted instantiation: haproxy.c:eb_first Unexecuted instantiation: fuzz_hpack_decode.c:eb_first |
524 | | |
525 | | /* Return the last leaf in the tree starting at <root>, or NULL if none */ |
526 | | static inline struct eb_node *eb_last(struct eb_root *root) |
527 | 0 | { |
528 | 0 | return eb_walk_down(root->b[0], EB_RGHT); |
529 | 0 | } Unexecuted instantiation: haproxy.c:eb_last Unexecuted instantiation: fuzz_hpack_decode.c:eb_last |
530 | | |
531 | | /* Return previous leaf node before an existing leaf node, or NULL if none. */ |
532 | | static inline struct eb_node *eb_prev(struct eb_node *node) |
533 | 0 | { |
534 | 0 | eb_troot_t *t = node->leaf_p; |
535 | 0 |
|
536 | 0 | while (eb_gettag(t) == EB_LEFT) { |
537 | 0 | /* Walking up from left branch. We must ensure that we never |
538 | 0 | * walk beyond root. |
539 | 0 | */ |
540 | 0 | if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)) |
541 | 0 | return NULL; |
542 | 0 | t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p; |
543 | 0 | } |
544 | 0 | /* Note that <t> cannot be NULL at this stage */ |
545 | 0 | t = (eb_untag(t, EB_RGHT))->b[EB_LEFT]; |
546 | 0 | return eb_walk_down(t, EB_RGHT); |
547 | 0 | } Unexecuted instantiation: haproxy.c:eb_prev Unexecuted instantiation: fuzz_hpack_decode.c:eb_prev |
548 | | |
549 | | /* Return next leaf node after an existing leaf node, or NULL if none. */ |
550 | | static inline struct eb_node *eb_next(struct eb_node *node) |
551 | 0 | { |
552 | 0 | eb_troot_t *t = node->leaf_p; |
553 | 0 |
|
554 | 0 | while (eb_gettag(t) != EB_LEFT) |
555 | 0 | /* Walking up from right branch, so we cannot be below root */ |
556 | 0 | t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p; |
557 | 0 |
|
558 | 0 | /* Note that <t> cannot be NULL at this stage */ |
559 | 0 | t = (eb_untag(t, EB_LEFT))->b[EB_RGHT]; |
560 | 0 | if (eb_clrtag(t) == NULL) |
561 | 0 | return NULL; |
562 | 0 | return eb_walk_down(t, EB_LEFT); |
563 | 0 | } Unexecuted instantiation: haproxy.c:eb_next Unexecuted instantiation: fuzz_hpack_decode.c:eb_next |
564 | | |
565 | | /* Return previous leaf node within a duplicate sub-tree, or NULL if none. */ |
566 | | static inline struct eb_node *eb_prev_dup(struct eb_node *node) |
567 | 0 | { |
568 | 0 | eb_troot_t *t = node->leaf_p; |
569 | 0 |
|
570 | 0 | while (eb_gettag(t) == EB_LEFT) { |
571 | 0 | /* Walking up from left branch. We must ensure that we never |
572 | 0 | * walk beyond root. |
573 | 0 | */ |
574 | 0 | if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)) |
575 | 0 | return NULL; |
576 | 0 | /* if the current node leaves a dup tree, quit */ |
577 | 0 | if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0) |
578 | 0 | return NULL; |
579 | 0 | t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p; |
580 | 0 | } |
581 | 0 | /* Note that <t> cannot be NULL at this stage */ |
582 | 0 | if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0) |
583 | 0 | return NULL; |
584 | 0 | t = (eb_untag(t, EB_RGHT))->b[EB_LEFT]; |
585 | 0 | return eb_walk_down(t, EB_RGHT); |
586 | 0 | } Unexecuted instantiation: haproxy.c:eb_prev_dup Unexecuted instantiation: fuzz_hpack_decode.c:eb_prev_dup |
587 | | |
588 | | /* Return next leaf node within a duplicate sub-tree, or NULL if none. */ |
589 | | static inline struct eb_node *eb_next_dup(struct eb_node *node) |
590 | 0 | { |
591 | 0 | eb_troot_t *t = node->leaf_p; |
592 | 0 |
|
593 | 0 | while (eb_gettag(t) != EB_LEFT) { |
594 | 0 | /* Walking up from right branch, so we cannot be below root */ |
595 | 0 | /* if the current node leaves a dup tree, quit */ |
596 | 0 | if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0) |
597 | 0 | return NULL; |
598 | 0 | t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p; |
599 | 0 | } |
600 | 0 |
|
601 | 0 | /* Note that <t> cannot be NULL at this stage. If our leaf is directly |
602 | 0 | * under the root, we must not try to cast the leaf_p into a eb_node* |
603 | 0 | * since it is a pointer to an eb_root. |
604 | 0 | */ |
605 | 0 | if (eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL) |
606 | 0 | return NULL; |
607 | 0 |
|
608 | 0 | if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0) |
609 | 0 | return NULL; |
610 | 0 | t = (eb_untag(t, EB_LEFT))->b[EB_RGHT]; |
611 | 0 | return eb_walk_down(t, EB_LEFT); |
612 | 0 | } Unexecuted instantiation: haproxy.c:eb_next_dup Unexecuted instantiation: fuzz_hpack_decode.c:eb_next_dup |
613 | | |
614 | | /* Return previous leaf node before an existing leaf node, skipping duplicates, |
615 | | * or NULL if none. */ |
616 | | static inline struct eb_node *eb_prev_unique(struct eb_node *node) |
617 | 0 | { |
618 | 0 | eb_troot_t *t = node->leaf_p; |
619 | 0 |
|
620 | 0 | while (1) { |
621 | 0 | if (eb_gettag(t) != EB_LEFT) { |
622 | 0 | node = eb_root_to_node(eb_untag(t, EB_RGHT)); |
623 | 0 | /* if we're right and not in duplicates, stop here */ |
624 | 0 | if (node->bit >= 0) |
625 | 0 | break; |
626 | 0 | t = node->node_p; |
627 | 0 | } |
628 | 0 | else { |
629 | 0 | /* Walking up from left branch. We must ensure that we never |
630 | 0 | * walk beyond root. |
631 | 0 | */ |
632 | 0 | if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)) |
633 | 0 | return NULL; |
634 | 0 | t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p; |
635 | 0 | } |
636 | 0 | } |
637 | 0 | /* Note that <t> cannot be NULL at this stage */ |
638 | 0 | t = (eb_untag(t, EB_RGHT))->b[EB_LEFT]; |
639 | 0 | return eb_walk_down(t, EB_RGHT); |
640 | 0 | } Unexecuted instantiation: haproxy.c:eb_prev_unique Unexecuted instantiation: fuzz_hpack_decode.c:eb_prev_unique |
641 | | |
642 | | /* Return next leaf node after an existing leaf node, skipping duplicates, or |
643 | | * NULL if none. |
644 | | */ |
645 | | static inline struct eb_node *eb_next_unique(struct eb_node *node) |
646 | 0 | { |
647 | 0 | eb_troot_t *t = node->leaf_p; |
648 | 0 |
|
649 | 0 | while (1) { |
650 | 0 | if (eb_gettag(t) == EB_LEFT) { |
651 | 0 | if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)) |
652 | 0 | return NULL; /* we reached root */ |
653 | 0 | node = eb_root_to_node(eb_untag(t, EB_LEFT)); |
654 | 0 | /* if we're left and not in duplicates, stop here */ |
655 | 0 | if (node->bit >= 0) |
656 | 0 | break; |
657 | 0 | t = node->node_p; |
658 | 0 | } |
659 | 0 | else { |
660 | 0 | /* Walking up from right branch, so we cannot be below root */ |
661 | 0 | t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p; |
662 | 0 | } |
663 | 0 | } |
664 | 0 |
|
665 | 0 | /* Note that <t> cannot be NULL at this stage */ |
666 | 0 | t = (eb_untag(t, EB_LEFT))->b[EB_RGHT]; |
667 | 0 | if (eb_clrtag(t) == NULL) |
668 | 0 | return NULL; |
669 | 0 | return eb_walk_down(t, EB_LEFT); |
670 | 0 | } Unexecuted instantiation: haproxy.c:eb_next_unique Unexecuted instantiation: fuzz_hpack_decode.c:eb_next_unique |
671 | | |
672 | | |
673 | | /* Removes a leaf node from the tree if it was still in it. Marks the node |
674 | | * as unlinked. |
675 | | */ |
676 | | static forceinline void __eb_delete(struct eb_node *node) |
677 | 0 | { |
678 | 0 | __label__ delete_unlink; |
679 | 0 | unsigned int pside, gpside, sibtype; |
680 | 0 | struct eb_node *parent; |
681 | 0 | struct eb_root *gparent; |
682 | 0 |
|
683 | 0 | if (!node->leaf_p) |
684 | 0 | return; |
685 | 0 |
|
686 | 0 | /* we need the parent, our side, and the grand parent */ |
687 | 0 | pside = eb_gettag(node->leaf_p); |
688 | 0 | parent = eb_root_to_node(eb_untag(node->leaf_p, pside)); |
689 | 0 |
|
690 | 0 | /* We likely have to release the parent link, unless it's the root, |
691 | 0 | * in which case we only set our branch to NULL. Note that we can |
692 | 0 | * only be attached to the root by its left branch. |
693 | 0 | */ |
694 | 0 |
|
695 | 0 | if (eb_clrtag(parent->branches.b[EB_RGHT]) == NULL) { |
696 | 0 | /* we're just below the root, it's trivial. */ |
697 | 0 | parent->branches.b[EB_LEFT] = NULL; |
698 | 0 | goto delete_unlink; |
699 | 0 | } |
700 | 0 |
|
701 | 0 | /* To release our parent, we have to identify our sibling, and reparent |
702 | 0 | * it directly to/from the grand parent. Note that the sibling can |
703 | 0 | * either be a link or a leaf. |
704 | 0 | */ |
705 | 0 |
|
706 | 0 | gpside = eb_gettag(parent->node_p); |
707 | 0 | gparent = eb_untag(parent->node_p, gpside); |
708 | 0 |
|
709 | 0 | gparent->b[gpside] = parent->branches.b[!pside]; |
710 | 0 | sibtype = eb_gettag(gparent->b[gpside]); |
711 | 0 |
|
712 | 0 | if (sibtype == EB_LEAF) { |
713 | 0 | eb_root_to_node(eb_untag(gparent->b[gpside], EB_LEAF))->leaf_p = |
714 | 0 | eb_dotag(gparent, gpside); |
715 | 0 | } else { |
716 | 0 | eb_root_to_node(eb_untag(gparent->b[gpside], EB_NODE))->node_p = |
717 | 0 | eb_dotag(gparent, gpside); |
718 | 0 | } |
719 | 0 | /* Mark the parent unused. Note that we do not check if the parent is |
720 | 0 | * our own node, but that's not a problem because if it is, it will be |
721 | 0 | * marked unused at the same time, which we'll use below to know we can |
722 | 0 | * safely remove it. |
723 | 0 | */ |
724 | 0 | parent->node_p = NULL; |
725 | 0 |
|
726 | 0 | /* The parent node has been detached, and is currently unused. It may |
727 | 0 | * belong to another node, so we cannot remove it that way. Also, our |
728 | 0 | * own node part might still be used. so we can use this spare node |
729 | 0 | * to replace ours if needed. |
730 | 0 | */ |
731 | 0 |
|
732 | 0 | /* If our link part is unused, we can safely exit now */ |
733 | 0 | if (!node->node_p) |
734 | 0 | goto delete_unlink; |
735 | 0 |
|
736 | 0 | /* From now on, <node> and <parent> are necessarily different, and the |
737 | 0 | * <node>'s node part is in use. By definition, <parent> is at least |
738 | 0 | * below <node>, so keeping its key for the bit string is OK. |
739 | 0 | */ |
740 | 0 |
|
741 | 0 | parent->node_p = node->node_p; |
742 | 0 | parent->branches = node->branches; |
743 | 0 | parent->bit = node->bit; |
744 | 0 |
|
745 | 0 | /* We must now update the new node's parent... */ |
746 | 0 | gpside = eb_gettag(parent->node_p); |
747 | 0 | gparent = eb_untag(parent->node_p, gpside); |
748 | 0 | gparent->b[gpside] = eb_dotag(&parent->branches, EB_NODE); |
749 | 0 |
|
750 | 0 | /* ... and its branches */ |
751 | 0 | for (pside = 0; pside <= 1; pside++) { |
752 | 0 | if (eb_gettag(parent->branches.b[pside]) == EB_NODE) { |
753 | 0 | eb_root_to_node(eb_untag(parent->branches.b[pside], EB_NODE))->node_p = |
754 | 0 | eb_dotag(&parent->branches, pside); |
755 | 0 | } else { |
756 | 0 | eb_root_to_node(eb_untag(parent->branches.b[pside], EB_LEAF))->leaf_p = |
757 | 0 | eb_dotag(&parent->branches, pside); |
758 | 0 | } |
759 | 0 | } |
760 | 0 | delete_unlink: |
761 | 0 | /* Now the node has been completely unlinked */ |
762 | 0 | node->leaf_p = NULL; |
763 | 0 | return; /* tree is not empty yet */ |
764 | 0 | } Unexecuted instantiation: haproxy.c:__eb_delete Unexecuted instantiation: fuzz_hpack_decode.c:__eb_delete |
765 | | |
766 | | /* Compare blocks <a> and <b> byte-to-byte, from bit <ignore> to bit <len-1>. |
767 | | * Return the number of equal bits between strings, assuming that the first |
768 | | * <ignore> bits are already identical. It is possible to return slightly more |
769 | | * than <len> bits if <len> does not stop on a byte boundary and we find exact |
770 | | * bytes. Note that parts or all of <ignore> bits may be rechecked. It is only |
771 | | * passed here as a hint to speed up the check. |
772 | | */ |
773 | | static forceinline size_t equal_bits(const unsigned char *a, |
774 | | const unsigned char *b, |
775 | | size_t ignore, size_t len) |
776 | 0 | { |
777 | 0 | for (ignore >>= 3, a += ignore, b += ignore, ignore <<= 3; |
778 | 0 | ignore < len; ) { |
779 | 0 | unsigned char c; |
780 | 0 |
|
781 | 0 | a++; b++; |
782 | 0 | ignore += 8; |
783 | 0 | c = b[-1] ^ a[-1]; |
784 | 0 |
|
785 | 0 | if (c) { |
786 | 0 | /* OK now we know that old and new differ at byte <ptr> and that <c> holds |
787 | 0 | * the bit differences. We have to find what bit is differing and report |
788 | 0 | * it as the number of identical bits. Note that low bit numbers are |
789 | 0 | * assigned to high positions in the byte, as we compare them as strings. |
790 | 0 | */ |
791 | 0 | ignore -= flsnz_long(c); |
792 | 0 | break; |
793 | 0 | } |
794 | 0 | } |
795 | 0 | return ignore; |
796 | 0 | } Unexecuted instantiation: haproxy.c:equal_bits Unexecuted instantiation: fuzz_hpack_decode.c:equal_bits |
797 | | |
798 | | /* check that the two blocks <a> and <b> are equal on <len> bits. If it is known |
799 | | * they already are on some bytes, this number of equal bytes to be skipped may |
800 | | * be passed in <skip>. It returns 0 if they match, otherwise non-zero. |
801 | | */ |
802 | | static forceinline int check_bits(const unsigned char *a, |
803 | | const unsigned char *b, |
804 | | int skip, |
805 | | int len) |
806 | 0 | { |
807 | 0 | int bit, ret; |
808 | 0 |
|
809 | 0 | /* This uncommon construction gives the best performance on x86 because |
810 | 0 | * it makes heavy use multiple-index addressing and parallel instructions, |
811 | 0 | * and it prevents gcc from reordering the loop since it is already |
812 | 0 | * properly oriented. Tested to be fine with 2.95 to 4.2. |
813 | 0 | */ |
814 | 0 | bit = ~len + (skip << 3) + 9; // = (skip << 3) + (8 - len) |
815 | 0 | ret = a[skip] ^ b[skip]; |
816 | 0 | if (unlikely(bit >= 0)) |
817 | 0 | return ret >> bit; |
818 | 0 | while (1) { |
819 | 0 | skip++; |
820 | 0 | if (ret) |
821 | 0 | return ret; |
822 | 0 | ret = a[skip] ^ b[skip]; |
823 | 0 | bit += 8; |
824 | 0 | if (bit >= 0) |
825 | 0 | return ret >> bit; |
826 | 0 | } |
827 | 0 | } Unexecuted instantiation: haproxy.c:check_bits Unexecuted instantiation: fuzz_hpack_decode.c:check_bits |
828 | | |
829 | | |
830 | | /* Compare strings <a> and <b> byte-to-byte, from bit <ignore> to the last 0. |
831 | | * Return the number of equal bits between strings, assuming that the first |
832 | | * <ignore> bits are already identical. Note that parts or all of <ignore> bits |
833 | | * may be rechecked. It is only passed here as a hint to speed up the check. |
834 | | * The caller is responsible for not passing an <ignore> value larger than any |
835 | | * of the two strings. However, referencing any bit from the trailing zero is |
836 | | * permitted. Equal strings are reported as a negative number of bits, which |
837 | | * indicates the end was reached. |
838 | | */ |
839 | | static forceinline size_t string_equal_bits(const unsigned char *a, |
840 | | const unsigned char *b, |
841 | | size_t ignore) |
842 | 0 | { |
843 | 0 | unsigned char c, d; |
844 | 0 | size_t beg; |
845 | 0 |
|
846 | 0 | beg = ignore >> 3; |
847 | 0 |
|
848 | 0 | /* skip known and identical bits. We stop at the first different byte |
849 | 0 | * or at the first zero we encounter on either side. |
850 | 0 | */ |
851 | 0 | while (1) { |
852 | 0 | c = a[beg]; |
853 | 0 | d = b[beg]; |
854 | 0 | beg++; |
855 | 0 |
|
856 | 0 | c ^= d; |
857 | 0 | if (c) |
858 | 0 | break; |
859 | 0 | if (!d) |
860 | 0 | return (size_t)-1; |
861 | 0 | } |
862 | 0 | /* OK now we know that a and b differ at byte <beg>, or that both are zero. |
863 | 0 | * We have to find what bit is differing and report it as the number of |
864 | 0 | * identical bits. Note that low bit numbers are assigned to high positions |
865 | 0 | * in the byte, as we compare them as strings. |
866 | 0 | */ |
867 | 0 | return (beg << 3) - flsnz(c); |
868 | 0 | } Unexecuted instantiation: haproxy.c:string_equal_bits Unexecuted instantiation: fuzz_hpack_decode.c:string_equal_bits |
869 | | |
870 | | static forceinline int cmp_bits(const unsigned char *a, const unsigned char *b, unsigned int pos) |
871 | 0 | { |
872 | 0 | unsigned int ofs; |
873 | 0 | unsigned char bit_a, bit_b; |
874 | 0 |
|
875 | 0 | ofs = pos >> 3; |
876 | 0 | pos = ~pos & 7; |
877 | 0 |
|
878 | 0 | bit_a = (a[ofs] >> pos) & 1; |
879 | 0 | bit_b = (b[ofs] >> pos) & 1; |
880 | 0 |
|
881 | 0 | return bit_a - bit_b; /* -1: a<b; 0: a=b; 1: a>b */ |
882 | 0 | } Unexecuted instantiation: haproxy.c:cmp_bits Unexecuted instantiation: fuzz_hpack_decode.c:cmp_bits |
883 | | |
884 | | static forceinline int get_bit(const unsigned char *a, unsigned int pos) |
885 | 0 | { |
886 | 0 | unsigned int ofs; |
887 | 0 |
|
888 | 0 | ofs = pos >> 3; |
889 | 0 | pos = ~pos & 7; |
890 | 0 | return (a[ofs] >> pos) & 1; |
891 | 0 | } Unexecuted instantiation: haproxy.c:get_bit Unexecuted instantiation: fuzz_hpack_decode.c:get_bit |
892 | | |
893 | | /* These functions are declared in ebtree.c */ |
894 | | void eb_delete(struct eb_node *node); |
895 | | struct eb_node *eb_insert_dup(struct eb_node *sub, struct eb_node *new); |
896 | | int eb_memcmp(const void *m1, const void *m2, size_t len); |
897 | | |
898 | | #endif /* _EB_TREE_H */ |
899 | | |
900 | | /* |
901 | | * Local variables: |
902 | | * c-indent-level: 8 |
903 | | * c-basic-offset: 8 |
904 | | * End: |
905 | | */ |