/src/openvswitch/lib/hindex.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2013 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include "hindex.h" |
19 | | #include "coverage.h" |
20 | | |
21 | | static bool hindex_node_is_body(const struct hindex_node *); |
22 | | static bool hindex_node_is_head(const struct hindex_node *); |
23 | | static void hindex_resize(struct hindex *, size_t new_mask); |
24 | | static size_t hindex_calc_mask(size_t capacity); |
25 | | |
26 | | COVERAGE_DEFINE(hindex_pathological); |
27 | | COVERAGE_DEFINE(hindex_expand); |
28 | | COVERAGE_DEFINE(hindex_shrink); |
29 | | COVERAGE_DEFINE(hindex_reserve); |
30 | | |
31 | | /* Initializes 'hindex' as an empty hash index. */ |
32 | | void |
33 | | hindex_init(struct hindex *hindex) |
34 | 0 | { |
35 | 0 | hindex->buckets = &hindex->one; |
36 | 0 | hindex->one = NULL; |
37 | 0 | hindex->mask = 0; |
38 | 0 | hindex->n_unique = 0; |
39 | 0 | } |
40 | | |
41 | | /* Frees memory reserved by 'hindex'. It is the client's responsibility to |
42 | | * free the nodes themselves, if necessary. */ |
43 | | void |
44 | | hindex_destroy(struct hindex *hindex) |
45 | 0 | { |
46 | 0 | if (hindex && hindex->buckets != &hindex->one) { |
47 | 0 | free(hindex->buckets); |
48 | 0 | } |
49 | 0 | } |
50 | | |
51 | | /* Removes all node from 'hindex', leaving it ready to accept more nodes. Does |
52 | | * not free memory allocated for 'hindex'. |
53 | | * |
54 | | * This function is appropriate when 'hindex' will soon have about as many |
55 | | * elements as it before. If 'hindex' will likely have fewer elements than |
56 | | * before, use hindex_destroy() followed by hindex_clear() to save memory and |
57 | | * iteration time. */ |
58 | | void |
59 | | hindex_clear(struct hindex *hindex) |
60 | 0 | { |
61 | 0 | if (hindex->n_unique > 0) { |
62 | 0 | hindex->n_unique = 0; |
63 | 0 | memset(hindex->buckets, 0, |
64 | 0 | (hindex->mask + 1) * sizeof *hindex->buckets); |
65 | 0 | } |
66 | 0 | } |
67 | | |
68 | | /* Exchanges hash indexes 'a' and 'b'. */ |
69 | | void |
70 | | hindex_swap(struct hindex *a, struct hindex *b) |
71 | 0 | { |
72 | 0 | struct hindex tmp = *a; |
73 | 0 | *a = *b; |
74 | 0 | *b = tmp; |
75 | 0 | hindex_moved(a); |
76 | 0 | hindex_moved(b); |
77 | 0 | } |
78 | | |
79 | | /* Adjusts 'hindex' to compensate for having moved position in memory (e.g. due |
80 | | * to realloc()). */ |
81 | | void |
82 | | hindex_moved(struct hindex *hindex) |
83 | 0 | { |
84 | 0 | if (!hindex->mask) { |
85 | 0 | hindex->buckets = &hindex->one; |
86 | 0 | } |
87 | 0 | } |
88 | | |
89 | | /* Expands 'hindex', if necessary, to optimize the performance of searches. */ |
90 | | void |
91 | | hindex_expand(struct hindex *hindex) |
92 | 0 | { |
93 | 0 | size_t new_mask = hindex_calc_mask(hindex->n_unique); |
94 | 0 | if (new_mask > hindex->mask) { |
95 | 0 | COVERAGE_INC(hindex_expand); |
96 | 0 | hindex_resize(hindex, new_mask); |
97 | 0 | } |
98 | 0 | } |
99 | | |
100 | | /* Shrinks 'hindex', if necessary, to optimize the performance of iteration. */ |
101 | | void |
102 | | hindex_shrink(struct hindex *hindex) |
103 | 0 | { |
104 | 0 | size_t new_mask = hindex_calc_mask(hindex->n_unique); |
105 | 0 | if (new_mask < hindex->mask) { |
106 | 0 | COVERAGE_INC(hindex_shrink); |
107 | 0 | hindex_resize(hindex, new_mask); |
108 | 0 | } |
109 | 0 | } |
110 | | |
111 | | /* Expands 'hindex', if necessary, to optimize the performance of searches when |
112 | | * it has up to 'n' unique hashes. (But iteration will be slow in a hash index |
113 | | * whose allocated capacity is much higher than its current number of |
114 | | * nodes.) */ |
115 | | void |
116 | | hindex_reserve(struct hindex *hindex, size_t n) |
117 | 0 | { |
118 | 0 | size_t new_mask = hindex_calc_mask(n); |
119 | 0 | if (new_mask > hindex->mask) { |
120 | 0 | COVERAGE_INC(hindex_reserve); |
121 | 0 | hindex_resize(hindex, new_mask); |
122 | 0 | } |
123 | 0 | } |
124 | | |
125 | | /* Inserts 'node', with the given 'hash', into 'hindex'. Never automatically |
126 | | * expands 'hindex' (use hindex_insert() instead if you want that). */ |
127 | | void |
128 | | hindex_insert_fast(struct hindex *hindex, |
129 | | struct hindex_node *node, size_t hash) |
130 | 0 | { |
131 | 0 | struct hindex_node *head = hindex_node_with_hash(hindex, hash); |
132 | 0 | if (head) { |
133 | | /* 'head' is an existing head with hash == 'hash'. |
134 | | * Insert 'node' as a body node just below 'head'. */ |
135 | 0 | node->s = head->s; |
136 | 0 | node->d = head; |
137 | 0 | if (node->s) { |
138 | 0 | node->s->d = node; |
139 | 0 | } |
140 | 0 | head->s = node; |
141 | 0 | } else { |
142 | | /* No existing node has hash 'hash'. Insert 'node' as a new head in |
143 | | * its bucket. */ |
144 | 0 | struct hindex_node **bucket = &hindex->buckets[hash & hindex->mask]; |
145 | 0 | node->s = NULL; |
146 | 0 | node->d = *bucket; |
147 | 0 | *bucket = node; |
148 | 0 | hindex->n_unique++; |
149 | 0 | } |
150 | 0 | node->hash = hash; |
151 | 0 | } |
152 | | |
153 | | /* Inserts 'node', with the given 'hash', into 'hindex', and expands 'hindex' |
154 | | * if necessary to optimize search performance. */ |
155 | | void |
156 | | hindex_insert(struct hindex *hindex, struct hindex_node *node, size_t hash) |
157 | 0 | { |
158 | 0 | hindex_insert_fast(hindex, node, hash); |
159 | 0 | if (hindex->n_unique / 2 > hindex->mask) { |
160 | 0 | hindex_expand(hindex); |
161 | 0 | } |
162 | 0 | } |
163 | | |
164 | | /* Removes 'node' from 'hindex'. Does not shrink the hash index; call |
165 | | * hindex_shrink() directly if desired. */ |
166 | | void |
167 | | hindex_remove(struct hindex *hindex, struct hindex_node *node) |
168 | 0 | { |
169 | 0 | if (!hindex_node_is_head(node)) { |
170 | 0 | node->d->s = node->s; |
171 | 0 | if (node->s) { |
172 | 0 | node->s->d = node->d; |
173 | 0 | } |
174 | 0 | } else { |
175 | 0 | struct hindex_node **head; |
176 | |
|
177 | 0 | for (head = &hindex->buckets[node->hash & hindex->mask]; |
178 | 0 | (*head)->hash != node->hash; |
179 | 0 | head = &(*head)->d) |
180 | 0 | { |
181 | 0 | continue; |
182 | 0 | } |
183 | |
|
184 | 0 | if (node->s) { |
185 | 0 | *head = node->s; |
186 | 0 | node->s->d = node->d; |
187 | 0 | } else { |
188 | 0 | *head = node->d; |
189 | 0 | hindex->n_unique--; |
190 | 0 | } |
191 | 0 | } |
192 | 0 | } |
193 | | |
194 | | /* Helper functions. */ |
195 | | |
196 | | /* Returns true if 'node', which must be inserted into an hindex, is a "body" |
197 | | * node, that is, it is not reachable from a bucket by following zero or more |
198 | | * 'd' pointers. Returns false otherwise. */ |
199 | | static bool |
200 | | hindex_node_is_body(const struct hindex_node *node) |
201 | 0 | { |
202 | 0 | return node->d && node->d->hash == node->hash; |
203 | 0 | } |
204 | | |
205 | | /* Returns true if 'node', which must be inserted into an hindex, is a "head" |
206 | | * node, that is, if it is reachable from a bucket by following zero or more |
207 | | * 'd' pointers. Returns false if 'node' is a body node (and therefore one |
208 | | * must follow at least one 's' pointer to reach it). */ |
209 | | static bool |
210 | | hindex_node_is_head(const struct hindex_node *node) |
211 | 0 | { |
212 | 0 | return !hindex_node_is_body(node); |
213 | 0 | } |
214 | | |
215 | | /* Reallocates 'hindex''s array of buckets to use bitwise mask 'new_mask'. */ |
216 | | static void |
217 | | hindex_resize(struct hindex *hindex, size_t new_mask) |
218 | 0 | { |
219 | 0 | struct hindex tmp; |
220 | 0 | size_t i; |
221 | |
|
222 | 0 | ovs_assert(is_pow2(new_mask + 1)); |
223 | 0 | ovs_assert(new_mask != SIZE_MAX); |
224 | |
|
225 | 0 | hindex_init(&tmp); |
226 | 0 | if (new_mask) { |
227 | 0 | tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1)); |
228 | 0 | tmp.mask = new_mask; |
229 | 0 | for (i = 0; i <= tmp.mask; i++) { |
230 | 0 | tmp.buckets[i] = NULL; |
231 | 0 | } |
232 | 0 | } |
233 | 0 | for (i = 0; i <= hindex->mask; i++) { |
234 | 0 | struct hindex_node *node, *next; |
235 | 0 | int count; |
236 | |
|
237 | 0 | count = 0; |
238 | 0 | for (node = hindex->buckets[i]; node; node = next) { |
239 | 0 | struct hindex_node **head = &tmp.buckets[node->hash & tmp.mask]; |
240 | |
|
241 | 0 | next = node->d; |
242 | 0 | node->d = *head; |
243 | 0 | *head = node; |
244 | 0 | count++; |
245 | 0 | } |
246 | 0 | if (count > 5) { |
247 | 0 | COVERAGE_INC(hindex_pathological); |
248 | 0 | } |
249 | 0 | } |
250 | 0 | tmp.n_unique = hindex->n_unique; |
251 | 0 | hindex_swap(hindex, &tmp); |
252 | 0 | hindex_destroy(&tmp); |
253 | 0 | } |
254 | | |
255 | | /* Returns the bitwise mask to use in struct hindex to support 'capacity' |
256 | | * hindex_nodes with unique hashes. */ |
257 | | static size_t |
258 | | hindex_calc_mask(size_t capacity) |
259 | 0 | { |
260 | 0 | size_t mask = capacity / 2; |
261 | 0 | mask |= mask >> 1; |
262 | 0 | mask |= mask >> 2; |
263 | 0 | mask |= mask >> 4; |
264 | 0 | mask |= mask >> 8; |
265 | 0 | mask |= mask >> 16; |
266 | 0 | #if SIZE_MAX > UINT32_MAX |
267 | 0 | mask |= mask >> 32; |
268 | 0 | #endif |
269 | | |
270 | | /* If we need to dynamically allocate buckets we might as well allocate at |
271 | | * least 4 of them. */ |
272 | 0 | mask |= (mask & 1) << 1; |
273 | |
|
274 | 0 | return mask; |
275 | 0 | } |
276 | | |
277 | | /* Returns the head node in 'hindex' with the given 'hash'. 'hindex' must |
278 | | * contain a head node with the given hash. */ |
279 | | static struct hindex_node * |
280 | | hindex_head_node(const struct hindex *hindex, size_t hash) |
281 | 0 | { |
282 | 0 | struct hindex_node *node = hindex->buckets[hash & hindex->mask]; |
283 | |
|
284 | 0 | while (node->hash != hash) { |
285 | 0 | node = node->d; |
286 | 0 | } |
287 | 0 | return node; |
288 | 0 | } |
289 | | |
290 | | static struct hindex_node * |
291 | | hindex_next__(const struct hindex *hindex, size_t start) |
292 | 0 | { |
293 | 0 | size_t i; |
294 | 0 | for (i = start; i <= hindex->mask; i++) { |
295 | 0 | struct hindex_node *node = hindex->buckets[i]; |
296 | 0 | if (node) { |
297 | 0 | return node; |
298 | 0 | } |
299 | 0 | } |
300 | 0 | return NULL; |
301 | 0 | } |
302 | | |
303 | | /* Returns the first node in 'hindex', in arbitrary order, or a null pointer if |
304 | | * 'hindex' is empty. */ |
305 | | struct hindex_node * |
306 | | hindex_first(const struct hindex *hindex) |
307 | 0 | { |
308 | 0 | return hindex_next__(hindex, 0); |
309 | 0 | } |
310 | | |
311 | | /* Returns the next node in 'hindex' following 'node', in arbitrary order, or a |
312 | | * null pointer if 'node' is the last node in 'hindex'. |
313 | | * |
314 | | * If the hash index has been reallocated since 'node' was visited, some nodes |
315 | | * may be skipped or visited twice. */ |
316 | | struct hindex_node * |
317 | | hindex_next(const struct hindex *hindex, const struct hindex_node *node) |
318 | 0 | { |
319 | 0 | struct hindex_node *head; |
320 | | |
321 | | /* If there's a node with the same hash, return it. */ |
322 | 0 | if (node->s) { |
323 | 0 | return node->s; |
324 | 0 | } |
325 | | |
326 | | /* If there's another node in the same bucket, return it. */ |
327 | 0 | head = hindex_head_node(hindex, node->hash); |
328 | 0 | if (head->d) { |
329 | 0 | return head->d; |
330 | 0 | } |
331 | | |
332 | | /* Return the first node in the next (or later) bucket. */ |
333 | 0 | return hindex_next__(hindex, (node->hash & hindex->mask) + 1); |
334 | 0 | } |