/src/elfutils/lib/dynamicsizehash_concurrent.c
Line | Count | Source |
1 | | /* Copyright (C) 2000-2019 Red Hat, Inc. |
2 | | This file is part of elfutils. |
3 | | Written by Srdan Milakovic <sm108@rice.edu>, 2019. |
4 | | Derived from Ulrich Drepper <drepper@redhat.com>, 2000. |
5 | | |
6 | | This file is free software; you can redistribute it and/or modify |
7 | | it under the terms of either |
8 | | |
9 | | * the GNU Lesser General Public License as published by the Free |
10 | | Software Foundation; either version 3 of the License, or (at |
11 | | your option) any later version |
12 | | |
13 | | or |
14 | | |
15 | | * the GNU General Public License as published by the Free |
16 | | Software Foundation; either version 2 of the License, or (at |
17 | | your option) any later version |
18 | | |
19 | | or both in parallel, as here. |
20 | | |
21 | | elfutils is distributed in the hope that it will be useful, but |
22 | | WITHOUT ANY WARRANTY; without even the implied warranty of |
23 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
24 | | General Public License for more details. |
25 | | |
26 | | You should have received copies of the GNU General Public License and |
27 | | the GNU Lesser General Public License along with this program. If |
28 | | not, see <http://www.gnu.org/licenses/>. */ |
29 | | |
30 | | #include <assert.h> |
31 | | #include <stdlib.h> |
32 | | #include <system.h> |
33 | | #include <pthread.h> |
34 | | |
35 | | /* Before including this file the following macros must be defined: |
36 | | |
37 | | NAME name of the hash table structure. |
38 | | TYPE data type of the hash table entries |
39 | | */ |
40 | | |
41 | | |
42 | | static size_t |
43 | | lookup (NAME *htab, HASHTYPE hval) |
44 | 0 | { |
45 | | /* First hash function: simply take the modulus but prevent zero. Small values |
46 | | can skip the division, which helps performance when this is common. */ |
47 | 0 | size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size); |
48 | |
|
49 | 0 | HASHTYPE hash; |
50 | |
|
51 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
52 | 0 | memory_order_acquire); |
53 | 0 | if (hash == hval) |
54 | 0 | return idx; |
55 | 0 | else if (hash == 0) |
56 | 0 | return 0; |
57 | | |
58 | | /* Second hash function as suggested in [Knuth]. */ |
59 | 0 | HASHTYPE second_hash = 1 + hval % (htab->size - 2); |
60 | |
|
61 | 0 | for(;;) |
62 | 0 | { |
63 | 0 | if (idx <= second_hash) |
64 | 0 | idx = htab->size + idx - second_hash; |
65 | 0 | else |
66 | 0 | idx -= second_hash; |
67 | |
|
68 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
69 | 0 | memory_order_acquire); |
70 | 0 | if (hash == hval) |
71 | 0 | return idx; |
72 | 0 | else if (hash == 0) |
73 | 0 | return 0; |
74 | 0 | } |
75 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab.c:lookup Unexecuted instantiation: dwflst_tracker_dwfltab.c:lookup Unexecuted instantiation: dwarf_abbrev_hash.c:lookup Unexecuted instantiation: dwarf_sig8_hash.c:lookup |
76 | | |
77 | | static int |
78 | | insert_helper (NAME *htab, HASHTYPE hval, TYPE val) |
79 | 0 | { |
80 | | /* First hash function: simply take the modulus but prevent zero. Small values |
81 | | can skip the division, which helps performance when this is common. */ |
82 | 0 | size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size); |
83 | |
|
84 | 0 | TYPE val_ptr; |
85 | 0 | HASHTYPE hash; |
86 | |
|
87 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
88 | 0 | memory_order_acquire); |
89 | 0 | if (hash == hval) |
90 | 0 | return -1; |
91 | 0 | else if (hash == 0) |
92 | 0 | { |
93 | 0 | val_ptr = NULL; |
94 | 0 | atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr, |
95 | 0 | (uintptr_t *) &val_ptr, |
96 | 0 | (uintptr_t) val, |
97 | 0 | memory_order_acquire, |
98 | 0 | memory_order_acquire); |
99 | |
|
100 | 0 | if (val_ptr == NULL) |
101 | 0 | { |
102 | 0 | atomic_store_explicit(&htab->table[idx].hashval, hval, |
103 | 0 | memory_order_release); |
104 | 0 | return 0; |
105 | 0 | } |
106 | 0 | else |
107 | 0 | { |
108 | 0 | do |
109 | 0 | { |
110 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
111 | 0 | memory_order_acquire); |
112 | 0 | } |
113 | 0 | while (hash == 0); |
114 | 0 | if (hash == hval) |
115 | 0 | return -1; |
116 | 0 | } |
117 | 0 | } |
118 | | |
119 | | /* Second hash function as suggested in [Knuth]. */ |
120 | 0 | HASHTYPE second_hash = 1 + hval % (htab->size - 2); |
121 | |
|
122 | 0 | for(;;) |
123 | 0 | { |
124 | 0 | if (idx <= second_hash) |
125 | 0 | idx = htab->size + idx - second_hash; |
126 | 0 | else |
127 | 0 | idx -= second_hash; |
128 | |
|
129 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
130 | 0 | memory_order_acquire); |
131 | 0 | if (hash == hval) |
132 | 0 | return -1; |
133 | 0 | else if (hash == 0) |
134 | 0 | { |
135 | 0 | val_ptr = NULL; |
136 | 0 | atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr, |
137 | 0 | (uintptr_t *) &val_ptr, |
138 | 0 | (uintptr_t) val, |
139 | 0 | memory_order_acquire, |
140 | 0 | memory_order_acquire); |
141 | |
|
142 | 0 | if (val_ptr == NULL) |
143 | 0 | { |
144 | 0 | atomic_store_explicit(&htab->table[idx].hashval, hval, |
145 | 0 | memory_order_release); |
146 | 0 | return 0; |
147 | 0 | } |
148 | 0 | else |
149 | 0 | { |
150 | 0 | do |
151 | 0 | { |
152 | 0 | hash = atomic_load_explicit(&htab->table[idx].hashval, |
153 | 0 | memory_order_acquire); |
154 | 0 | } |
155 | 0 | while (hash == 0); |
156 | 0 | if (hash == hval) |
157 | 0 | return -1; |
158 | 0 | } |
159 | 0 | } |
160 | 0 | } |
161 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab.c:insert_helper Unexecuted instantiation: dwflst_tracker_dwfltab.c:insert_helper Unexecuted instantiation: dwarf_abbrev_hash.c:insert_helper Unexecuted instantiation: dwarf_sig8_hash.c:insert_helper |
162 | | |
163 | 0 | #define NO_RESIZING 0u |
164 | 0 | #define ALLOCATING_MEMORY 1u |
165 | 0 | #define MOVING_DATA 3u |
166 | 0 | #define CLEANING 2u |
167 | | |
168 | 0 | #define STATE_BITS 2u |
169 | 0 | #define STATE_INCREMENT (1u << STATE_BITS) |
170 | 0 | #define STATE_MASK (STATE_INCREMENT - 1) |
171 | 0 | #define GET_STATE(A) ((A) & STATE_MASK) |
172 | | |
173 | 0 | #define IS_NO_RESIZE_OR_CLEANING(A) (((A) & 0x1u) == 0) |
174 | | |
175 | 0 | #define GET_ACTIVE_WORKERS(A) ((A) >> STATE_BITS) |
176 | | |
177 | 0 | #define INITIALIZATION_BLOCK_SIZE 256 |
178 | 0 | #define MOVE_BLOCK_SIZE 256 |
179 | 0 | #define CEIL(A, B) (((A) + (B) - 1) / (B)) |
180 | | |
181 | | /* Initializes records and copies the data from the old table. |
182 | | It can share work with other threads. Only the coordinator |
183 | | will pass blocking as 1, other worker threads pass 0. */ |
184 | | static void resize_helper(NAME *htab, int blocking) |
185 | 0 | { |
186 | 0 | size_t num_old_blocks = CEIL(htab->old_size, MOVE_BLOCK_SIZE); |
187 | 0 | size_t num_new_blocks = CEIL(htab->size, INITIALIZATION_BLOCK_SIZE); |
188 | |
|
189 | 0 | size_t my_block; |
190 | 0 | size_t num_finished_blocks = 0; |
191 | |
|
192 | 0 | while ((my_block = atomic_fetch_add_explicit(&htab->next_init_block, 1, |
193 | 0 | memory_order_acquire)) |
194 | 0 | < num_new_blocks) |
195 | 0 | { |
196 | 0 | size_t record_it = my_block * INITIALIZATION_BLOCK_SIZE; |
197 | 0 | size_t record_end = (my_block + 1) * INITIALIZATION_BLOCK_SIZE; |
198 | 0 | if (record_end > htab->size) |
199 | 0 | record_end = htab->size; |
200 | |
|
201 | 0 | while (record_it++ != record_end) |
202 | 0 | { |
203 | 0 | atomic_init(&htab->table[record_it].hashval, (uintptr_t) NULL); |
204 | 0 | atomic_init(&htab->table[record_it].val_ptr, (uintptr_t) NULL); |
205 | 0 | } |
206 | |
|
207 | 0 | num_finished_blocks++; |
208 | 0 | } |
209 | |
|
210 | 0 | atomic_fetch_add_explicit(&htab->num_initialized_blocks, |
211 | 0 | num_finished_blocks, memory_order_release); |
212 | 0 | while (atomic_load_explicit(&htab->num_initialized_blocks, |
213 | 0 | memory_order_acquire) != num_new_blocks); |
214 | | |
215 | | /* All block are initialized, start moving */ |
216 | 0 | num_finished_blocks = 0; |
217 | 0 | while ((my_block = atomic_fetch_add_explicit(&htab->next_move_block, 1, |
218 | 0 | memory_order_acquire)) |
219 | 0 | < num_old_blocks) |
220 | 0 | { |
221 | 0 | size_t record_it = my_block * MOVE_BLOCK_SIZE; |
222 | 0 | size_t record_end = (my_block + 1) * MOVE_BLOCK_SIZE; |
223 | 0 | if (record_end > htab->old_size) |
224 | 0 | record_end = htab->old_size; |
225 | |
|
226 | 0 | while (record_it++ != record_end) |
227 | 0 | { |
228 | 0 | TYPE val_ptr = (TYPE) atomic_load_explicit( |
229 | 0 | &htab->old_table[record_it].val_ptr, |
230 | 0 | memory_order_acquire); |
231 | 0 | if (val_ptr == NULL) |
232 | 0 | continue; |
233 | | |
234 | 0 | HASHTYPE hashval = atomic_load_explicit( |
235 | 0 | &htab->old_table[record_it].hashval, |
236 | 0 | memory_order_acquire); |
237 | 0 | assert(hashval); |
238 | |
|
239 | 0 | insert_helper(htab, hashval, val_ptr); |
240 | 0 | } |
241 | | |
242 | 0 | num_finished_blocks++; |
243 | 0 | } |
244 | | |
245 | 0 | atomic_fetch_add_explicit(&htab->num_moved_blocks, num_finished_blocks, |
246 | 0 | memory_order_release); |
247 | | |
248 | | /* The coordinating thread will block here waiting for all blocks to |
249 | | be moved. */ |
250 | 0 | if (blocking) |
251 | 0 | while (atomic_load_explicit(&htab->num_moved_blocks, |
252 | 0 | memory_order_acquire) != num_old_blocks); |
253 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab.c:resize_helper Unexecuted instantiation: dwflst_tracker_dwfltab.c:resize_helper Unexecuted instantiation: dwarf_abbrev_hash.c:resize_helper Unexecuted instantiation: dwarf_sig8_hash.c:resize_helper |
254 | | |
255 | | /* Called by the main thread holding the htab->resize_rwl lock to |
256 | | coordinate the moving of hash table data. Allocates the new hash |
257 | | table and frees the old one when moving all data is done. */ |
258 | | static void |
259 | | resize_coordinator(NAME *htab) |
260 | 0 | { |
261 | 0 | htab->old_size = htab->size; |
262 | 0 | htab->old_table = htab->table; |
263 | |
|
264 | 0 | htab->size = next_prime(htab->size * 2); |
265 | 0 | htab->table = malloc((1 + htab->size) * sizeof(htab->table[0])); |
266 | 0 | assert(htab->table); |
267 | | |
268 | | /* Change state from ALLOCATING_MEMORY to MOVING_DATA */ |
269 | 0 | atomic_fetch_xor_explicit(&htab->resizing_state, |
270 | 0 | ALLOCATING_MEMORY ^ MOVING_DATA, |
271 | 0 | memory_order_release); |
272 | |
|
273 | 0 | resize_helper(htab, 1); |
274 | | |
275 | | /* Change state from MOVING_DATA to CLEANING */ |
276 | 0 | size_t resize_state = atomic_fetch_xor_explicit(&htab->resizing_state, |
277 | 0 | MOVING_DATA ^ CLEANING, |
278 | 0 | memory_order_acq_rel); |
279 | 0 | while (GET_ACTIVE_WORKERS(resize_state) != 0) |
280 | 0 | resize_state = atomic_load_explicit(&htab->resizing_state, |
281 | 0 | memory_order_acquire); |
282 | | |
283 | | /* There are no more active workers */ |
284 | 0 | atomic_store_explicit(&htab->next_init_block, 0, memory_order_relaxed); |
285 | 0 | atomic_store_explicit(&htab->num_initialized_blocks, 0, |
286 | 0 | memory_order_relaxed); |
287 | |
|
288 | 0 | atomic_store_explicit(&htab->next_move_block, 0, memory_order_relaxed); |
289 | 0 | atomic_store_explicit(&htab->num_moved_blocks, 0, memory_order_relaxed); |
290 | |
|
291 | 0 | free(htab->old_table); |
292 | | |
293 | | /* Change state to NO_RESIZING */ |
294 | 0 | atomic_fetch_xor_explicit(&htab->resizing_state, CLEANING ^ NO_RESIZING, |
295 | 0 | memory_order_relaxed); |
296 | |
|
297 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab.c:resize_coordinator Unexecuted instantiation: dwflst_tracker_dwfltab.c:resize_coordinator Unexecuted instantiation: dwarf_abbrev_hash.c:resize_coordinator Unexecuted instantiation: dwarf_sig8_hash.c:resize_coordinator |
298 | | |
299 | | /* Called by any thread that wants to do an insert or find operation |
300 | | but notices it cannot get the htab->resize_rwl lock because another |
301 | | thread is resizing the hash table. Try to help out by moving table |
302 | | data if still necessary. */ |
303 | | static void |
304 | | resize_worker(NAME *htab) |
305 | 0 | { |
306 | 0 | size_t resize_state = atomic_load_explicit(&htab->resizing_state, |
307 | 0 | memory_order_acquire); |
308 | | |
309 | | /* If the resize has finished */ |
310 | 0 | if (IS_NO_RESIZE_OR_CLEANING(resize_state)) |
311 | 0 | return; |
312 | | |
313 | | /* Register as worker and check if the resize has finished in the meantime*/ |
314 | 0 | resize_state = atomic_fetch_add_explicit(&htab->resizing_state, |
315 | 0 | STATE_INCREMENT, |
316 | 0 | memory_order_acquire); |
317 | 0 | if (IS_NO_RESIZE_OR_CLEANING(resize_state)) |
318 | 0 | { |
319 | 0 | atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT, |
320 | 0 | memory_order_relaxed); |
321 | 0 | return; |
322 | 0 | } |
323 | | |
324 | | /* Wait while the new table is being allocated. */ |
325 | 0 | while (GET_STATE(resize_state) == ALLOCATING_MEMORY) |
326 | 0 | resize_state = atomic_load_explicit(&htab->resizing_state, |
327 | 0 | memory_order_acquire); |
328 | | |
329 | | /* Check if the resize is done */ |
330 | 0 | assert(GET_STATE(resize_state) != NO_RESIZING); |
331 | 0 | if (GET_STATE(resize_state) == CLEANING) |
332 | 0 | { |
333 | 0 | atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT, |
334 | 0 | memory_order_relaxed); |
335 | 0 | return; |
336 | 0 | } |
337 | | |
338 | 0 | resize_helper(htab, 0); |
339 | | |
340 | | /* Deregister worker */ |
341 | 0 | atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT, |
342 | 0 | memory_order_release); |
343 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab.c:resize_worker Unexecuted instantiation: dwflst_tracker_dwfltab.c:resize_worker Unexecuted instantiation: dwarf_abbrev_hash.c:resize_worker Unexecuted instantiation: dwarf_sig8_hash.c:resize_worker |
344 | | |
345 | | |
346 | | int |
347 | | #define INIT(name) _INIT (name) |
348 | | #define _INIT(name) \ |
349 | | name##_init |
350 | | INIT(NAME) (NAME *htab, size_t init_size) |
351 | 5.72k | { |
352 | | /* We need the size to be a prime. */ |
353 | 5.72k | init_size = next_prime (init_size); |
354 | | |
355 | | /* Initialize the data structure. */ |
356 | 5.72k | htab->size = init_size; |
357 | 5.72k | atomic_init(&htab->filled, 0); |
358 | 5.72k | atomic_init(&htab->resizing_state, 0); |
359 | | |
360 | 5.72k | atomic_init(&htab->next_init_block, 0); |
361 | 5.72k | atomic_init(&htab->num_initialized_blocks, 0); |
362 | | |
363 | 5.72k | atomic_init(&htab->next_move_block, 0); |
364 | 5.72k | atomic_init(&htab->num_moved_blocks, 0); |
365 | | |
366 | 5.72k | pthread_rwlock_init(&htab->resize_rwl, NULL); |
367 | | |
368 | 5.72k | htab->table = malloc ((init_size + 1) * sizeof (htab->table[0])); |
369 | 5.72k | if (htab->table == NULL) |
370 | 0 | return -1; |
371 | | |
372 | 74.4k | for (size_t i = 0; i <= init_size; i++) |
373 | 68.7k | { |
374 | 68.7k | atomic_init(&htab->table[i].hashval, (uintptr_t) NULL); |
375 | 68.7k | atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL); |
376 | 68.7k | } |
377 | | |
378 | 5.72k | return 0; |
379 | 5.72k | } Unexecuted instantiation: dwflst_tracker_elftab_init Unexecuted instantiation: dwflst_tracker_dwfltab_init Unexecuted instantiation: Dwarf_Abbrev_Hash_init Line | Count | Source | 351 | 5.72k | { | 352 | | /* We need the size to be a prime. */ | 353 | 5.72k | init_size = next_prime (init_size); | 354 | | | 355 | | /* Initialize the data structure. */ | 356 | 5.72k | htab->size = init_size; | 357 | 5.72k | atomic_init(&htab->filled, 0); | 358 | 5.72k | atomic_init(&htab->resizing_state, 0); | 359 | | | 360 | 5.72k | atomic_init(&htab->next_init_block, 0); | 361 | 5.72k | atomic_init(&htab->num_initialized_blocks, 0); | 362 | | | 363 | 5.72k | atomic_init(&htab->next_move_block, 0); | 364 | 5.72k | atomic_init(&htab->num_moved_blocks, 0); | 365 | | | 366 | 5.72k | pthread_rwlock_init(&htab->resize_rwl, NULL); | 367 | | | 368 | 5.72k | htab->table = malloc ((init_size + 1) * sizeof (htab->table[0])); | 369 | 5.72k | if (htab->table == NULL) | 370 | 0 | return -1; | 371 | | | 372 | 74.4k | for (size_t i = 0; i <= init_size; i++) | 373 | 68.7k | { | 374 | 68.7k | atomic_init(&htab->table[i].hashval, (uintptr_t) NULL); | 375 | 68.7k | atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL); | 376 | 68.7k | } | 377 | | | 378 | 5.72k | return 0; | 379 | 5.72k | } |
|
380 | | |
381 | | |
382 | | int |
383 | | #define FREE(name) _FREE (name) |
384 | | #define _FREE(name) \ |
385 | | name##_free |
386 | | FREE(NAME) (NAME *htab) |
387 | 5.72k | { |
388 | 5.72k | pthread_rwlock_destroy(&htab->resize_rwl); |
389 | 5.72k | free (htab->table); |
390 | 5.72k | return 0; |
391 | 5.72k | } Unexecuted instantiation: dwflst_tracker_elftab_free Unexecuted instantiation: dwflst_tracker_dwfltab_free Unexecuted instantiation: Dwarf_Abbrev_Hash_free Line | Count | Source | 387 | 5.72k | { | 388 | 5.72k | pthread_rwlock_destroy(&htab->resize_rwl); | 389 | 5.72k | free (htab->table); | 390 | 5.72k | return 0; | 391 | 5.72k | } |
|
392 | | |
393 | | |
394 | | int |
395 | | #define INSERT(name) _INSERT (name) |
396 | | #define _INSERT(name) \ |
397 | | name##_insert |
398 | | INSERT(NAME) (NAME *htab, HASHTYPE hval, TYPE data) |
399 | 0 | { |
400 | 0 | int incremented = 0; |
401 | |
|
402 | 0 | for(;;) |
403 | 0 | { |
404 | | /* If we cannot get the resize_rwl lock someone is resizing |
405 | | hash table, try to help out by moving table data. */ |
406 | 0 | while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0) |
407 | 0 | resize_worker(htab); |
408 | |
|
409 | 0 | size_t filled; |
410 | 0 | if (!incremented) |
411 | 0 | { |
412 | 0 | filled = atomic_fetch_add_explicit(&htab->filled, 1, |
413 | 0 | memory_order_acquire); |
414 | 0 | incremented = 1; |
415 | 0 | } |
416 | 0 | else |
417 | 0 | { |
418 | 0 | filled = atomic_load_explicit(&htab->filled, |
419 | 0 | memory_order_acquire); |
420 | 0 | } |
421 | | |
422 | |
|
423 | 0 | if (100 * filled > 90 * htab->size) |
424 | 0 | { |
425 | | /* Table is filled more than 90%. Resize the table. */ |
426 | |
|
427 | 0 | size_t resizing_state = atomic_load_explicit(&htab->resizing_state, |
428 | 0 | memory_order_acquire); |
429 | 0 | if (resizing_state == 0 && |
430 | 0 | atomic_compare_exchange_strong_explicit(&htab->resizing_state, |
431 | 0 | &resizing_state, |
432 | 0 | ALLOCATING_MEMORY, |
433 | 0 | memory_order_acquire, |
434 | 0 | memory_order_acquire)) |
435 | 0 | { |
436 | | /* Main resizing thread, will coordinate moving data. */ |
437 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
438 | |
|
439 | 0 | pthread_rwlock_wrlock(&htab->resize_rwl); |
440 | 0 | resize_coordinator(htab); |
441 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
442 | |
|
443 | 0 | } |
444 | 0 | else |
445 | 0 | { |
446 | | /* Worker thread, will help moving data. */ |
447 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
448 | 0 | resize_worker(htab); |
449 | 0 | } |
450 | 0 | } |
451 | 0 | else |
452 | 0 | { |
453 | | /* Lock acquired, no need for resize*/ |
454 | 0 | break; |
455 | 0 | } |
456 | 0 | } |
457 | |
|
458 | 0 | int ret_val = insert_helper(htab, hval, data); |
459 | 0 | if (ret_val == -1) |
460 | 0 | atomic_fetch_sub_explicit(&htab->filled, 1, memory_order_relaxed); |
461 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
462 | 0 | return ret_val; |
463 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab_insert Unexecuted instantiation: dwflst_tracker_dwfltab_insert Unexecuted instantiation: Dwarf_Abbrev_Hash_insert Unexecuted instantiation: Dwarf_Sig8_Hash_insert |
464 | | |
465 | | |
466 | | |
467 | | TYPE |
468 | | #define FIND(name) _FIND (name) |
469 | | #define _FIND(name) \ |
470 | | name##_find |
471 | | FIND(NAME) (NAME *htab, HASHTYPE hval) |
472 | 0 | { |
473 | | /* If we cannot get the resize_rwl lock someone is resizing |
474 | | the hash table, try to help out by moving table data. */ |
475 | 0 | while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0) |
476 | 0 | resize_worker(htab); |
477 | |
|
478 | 0 | size_t idx; |
479 | | |
480 | | /* Make the hash data nonzero. */ |
481 | 0 | hval = hval ?: 1; |
482 | 0 | idx = lookup(htab, hval); |
483 | |
|
484 | 0 | if (idx == 0) |
485 | 0 | { |
486 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
487 | 0 | return NULL; |
488 | 0 | } |
489 | | |
490 | | /* get a copy before unlocking the lock */ |
491 | 0 | TYPE ret_val = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr, |
492 | 0 | memory_order_relaxed); |
493 | |
|
494 | 0 | pthread_rwlock_unlock(&htab->resize_rwl); |
495 | 0 | return ret_val; |
496 | 0 | } Unexecuted instantiation: dwflst_tracker_elftab_find Unexecuted instantiation: dwflst_tracker_dwfltab_find Unexecuted instantiation: Dwarf_Abbrev_Hash_find Unexecuted instantiation: Dwarf_Sig8_Hash_find |