/src/tinysparql/subprojects/glib-2.80.3/glib/gbitlock.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright © 2008 Ryan Lortie |
3 | | * Copyright © 2010 Codethink Limited |
4 | | * |
5 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | | * |
20 | | * Author: Ryan Lortie <desrt@desrt.ca> |
21 | | */ |
22 | | |
23 | | #include "config.h" |
24 | | |
25 | | #include "gbitlock.h" |
26 | | |
27 | | #include <glib/gmacros.h> |
28 | | #include <glib/gmessages.h> |
29 | | #include <glib/gatomic.h> |
30 | | #include <glib/gslist.h> |
31 | | #include <glib/gthread.h> |
32 | | #include <glib/gslice.h> |
33 | | |
34 | | #include "gthreadprivate.h" |
35 | | |
36 | | #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION |
37 | | #undef HAVE_FUTEX |
38 | | #undef HAVE_FUTEX_TIME64 |
39 | | #endif |
40 | | |
41 | | #ifndef HAVE_FUTEX |
42 | | static GMutex g_futex_mutex; |
43 | | static GSList *g_futex_address_list = NULL; |
44 | | #endif |
45 | | |
46 | | #if defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64) |
47 | | /* |
48 | | * We have headers for futex(2) on the build machine. This does not |
49 | | * imply that every system that ever runs the resulting glib will have |
50 | | * kernel support for futex, but you'd have to have a pretty old |
51 | | * kernel in order for that not to be the case. |
52 | | * |
53 | | * If anyone actually gets bit by this, please file a bug. :) |
54 | | */ |
55 | | |
56 | | /* < private > |
57 | | * g_futex_wait: |
58 | | * @address: a pointer to an integer |
59 | | * @value: the value that should be at @address |
60 | | * |
61 | | * Atomically checks that the value stored at @address is equal to |
62 | | * @value and then blocks. If the value stored at @address is not |
63 | | * equal to @value then this function returns immediately. |
64 | | * |
65 | | * To unblock, call g_futex_wake() on @address. |
66 | | * |
67 | | * This call may spuriously unblock (for example, in response to the |
68 | | * process receiving a signal) but this is not guaranteed. Unlike the |
69 | | * Linux system call of a similar name, there is no guarantee that a |
70 | | * waiting process will unblock due to a g_futex_wake() call in a |
71 | | * separate process. |
72 | | */ |
73 | | static void |
74 | | g_futex_wait (const gint *address, |
75 | | gint value) |
76 | 0 | { |
77 | 0 | g_futex_simple (address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); |
78 | 0 | } |
79 | | |
80 | | /* < private > |
81 | | * g_futex_wake: |
82 | | * @address: a pointer to an integer |
83 | | * |
84 | | * Nominally, wakes one thread that is blocked in g_futex_wait() on |
85 | | * @address (if any thread is currently waiting). |
86 | | * |
87 | | * As mentioned in the documentation for g_futex_wait(), spurious |
88 | | * wakeups may occur. As such, this call may result in more than one |
89 | | * thread being woken up. |
90 | | */ |
91 | | static void |
92 | | g_futex_wake (const gint *address) |
93 | 0 | { |
94 | 0 | g_futex_simple (address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
95 | 0 | } |
96 | | |
97 | | #else |
98 | | |
99 | | /* emulate futex(2) */ |
100 | | typedef struct |
101 | | { |
102 | | const gint *address; |
103 | | gint ref_count; |
104 | | GCond wait_queue; |
105 | | } WaitAddress; |
106 | | |
107 | | static WaitAddress * |
108 | | g_futex_find_address (const gint *address) |
109 | | { |
110 | | GSList *node; |
111 | | |
112 | | for (node = g_futex_address_list; node; node = node->next) |
113 | | { |
114 | | WaitAddress *waiter = node->data; |
115 | | |
116 | | if (waiter->address == address) |
117 | | return waiter; |
118 | | } |
119 | | |
120 | | return NULL; |
121 | | } |
122 | | |
123 | | static void |
124 | | g_futex_wait (const gint *address, |
125 | | gint value) |
126 | | { |
127 | | g_mutex_lock (&g_futex_mutex); |
128 | | if G_LIKELY (g_atomic_int_get (address) == value) |
129 | | { |
130 | | WaitAddress *waiter; |
131 | | |
132 | | if ((waiter = g_futex_find_address (address)) == NULL) |
133 | | { |
134 | | waiter = g_slice_new (WaitAddress); |
135 | | waiter->address = address; |
136 | | g_cond_init (&waiter->wait_queue); |
137 | | waiter->ref_count = 0; |
138 | | g_futex_address_list = |
139 | | g_slist_prepend (g_futex_address_list, waiter); |
140 | | } |
141 | | |
142 | | waiter->ref_count++; |
143 | | g_cond_wait (&waiter->wait_queue, &g_futex_mutex); |
144 | | |
145 | | if (!--waiter->ref_count) |
146 | | { |
147 | | g_futex_address_list = |
148 | | g_slist_remove (g_futex_address_list, waiter); |
149 | | g_cond_clear (&waiter->wait_queue); |
150 | | g_slice_free (WaitAddress, waiter); |
151 | | } |
152 | | } |
153 | | g_mutex_unlock (&g_futex_mutex); |
154 | | } |
155 | | |
156 | | static void |
157 | | g_futex_wake (const gint *address) |
158 | | { |
159 | | WaitAddress *waiter; |
160 | | |
161 | | /* need to lock here for two reasons: |
162 | | * 1) need to acquire/release lock to ensure waiter is not in |
163 | | * the process of registering a wait |
164 | | * 2) need to -stay- locked until the end to ensure a wake() |
165 | | * in another thread doesn't cause 'waiter' to stop existing |
166 | | */ |
167 | | g_mutex_lock (&g_futex_mutex); |
168 | | if ((waiter = g_futex_find_address (address))) |
169 | | g_cond_signal (&waiter->wait_queue); |
170 | | g_mutex_unlock (&g_futex_mutex); |
171 | | } |
172 | | #endif |
173 | | |
174 | | #define CONTENTION_CLASSES 11 |
175 | | static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */ |
176 | | |
177 | | G_ALWAYS_INLINE static inline guint |
178 | | bit_lock_contended_class (gpointer address) |
179 | 90.1M | { |
180 | 90.1M | return ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
181 | 90.1M | } |
182 | | |
183 | | #if (defined (i386) || defined (__amd64__)) |
184 | | #if G_GNUC_CHECK_VERSION(4, 5) |
185 | | #define USE_ASM_GOTO 1 |
186 | | #endif |
187 | | #endif |
188 | | |
189 | | /** |
190 | | * g_bit_lock: |
191 | | * @address: a pointer to an integer |
192 | | * @lock_bit: a bit value between 0 and 31 |
193 | | * |
194 | | * Sets the indicated @lock_bit in @address. If the bit is already |
195 | | * set, this call will block until g_bit_unlock() unsets the |
196 | | * corresponding bit. |
197 | | * |
198 | | * Attempting to lock on two different bits within the same integer is |
199 | | * not supported and will very probably cause deadlocks. |
200 | | * |
201 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
202 | | * between 0 and 31 then the result is undefined. |
203 | | * |
204 | | * This function accesses @address atomically. All other accesses to |
205 | | * @address must be atomic in order for this function to work |
206 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
207 | | * artifact and the argument passed to it should not be `volatile`. |
208 | | * |
209 | | * Since: 2.24 |
210 | | **/ |
211 | | void |
212 | | g_bit_lock (volatile gint *address, |
213 | | gint lock_bit) |
214 | 57.0M | { |
215 | 57.0M | gint *address_nonvolatile = (gint *) address; |
216 | | |
217 | | #ifdef USE_ASM_GOTO |
218 | | retry: |
219 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
220 | | "jc %l[contended]" |
221 | | : /* no output */ |
222 | | : "r" (address), "r" (lock_bit) |
223 | | : "cc", "memory" |
224 | | : contended); |
225 | | return; |
226 | | |
227 | | contended: |
228 | | { |
229 | | guint mask = 1u << lock_bit; |
230 | | guint v; |
231 | | |
232 | | v = (guint) g_atomic_int_get (address_nonvolatile); |
233 | | if (v & mask) |
234 | | { |
235 | | guint class = bit_lock_contended_class (address_nonvolatile); |
236 | | |
237 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
238 | | g_futex_wait (address_nonvolatile, v); |
239 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
240 | | } |
241 | | } |
242 | | goto retry; |
243 | | #else |
244 | 57.0M | guint mask = 1u << lock_bit; |
245 | 57.0M | guint v; |
246 | | |
247 | 57.0M | retry: |
248 | 57.0M | v = g_atomic_int_or (address_nonvolatile, mask); |
249 | 57.0M | if (v & mask) |
250 | | /* already locked */ |
251 | 0 | { |
252 | 0 | guint class = bit_lock_contended_class (address_nonvolatile); |
253 | |
|
254 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
255 | 0 | g_futex_wait (address_nonvolatile, v); |
256 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
257 | |
|
258 | 0 | goto retry; |
259 | 0 | } |
260 | 57.0M | #endif |
261 | 57.0M | } |
262 | | |
263 | | /** |
264 | | * g_bit_trylock: |
265 | | * @address: a pointer to an integer |
266 | | * @lock_bit: a bit value between 0 and 31 |
267 | | * |
268 | | * Sets the indicated @lock_bit in @address, returning %TRUE if |
269 | | * successful. If the bit is already set, returns %FALSE immediately. |
270 | | * |
271 | | * Attempting to lock on two different bits within the same integer is |
272 | | * not supported. |
273 | | * |
274 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
275 | | * between 0 and 31 then the result is undefined. |
276 | | * |
277 | | * This function accesses @address atomically. All other accesses to |
278 | | * @address must be atomic in order for this function to work |
279 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
280 | | * artifact and the argument passed to it should not be `volatile`. |
281 | | * |
282 | | * Returns: %TRUE if the lock was acquired |
283 | | * |
284 | | * Since: 2.24 |
285 | | **/ |
286 | | gboolean |
287 | | g_bit_trylock (volatile gint *address, |
288 | | gint lock_bit) |
289 | 0 | { |
290 | | #ifdef USE_ASM_GOTO |
291 | | gboolean result; |
292 | | |
293 | | __asm__ volatile ("lock bts %2, (%1)\n" |
294 | | "setnc %%al\n" |
295 | | "movzx %%al, %0" |
296 | | : "=r" (result) |
297 | | : "r" (address), "r" (lock_bit) |
298 | | : "cc", "memory"); |
299 | | |
300 | | return result; |
301 | | #else |
302 | 0 | gint *address_nonvolatile = (gint *) address; |
303 | 0 | guint mask = 1u << lock_bit; |
304 | 0 | guint v; |
305 | |
|
306 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
307 | |
|
308 | 0 | return ~v & mask; |
309 | 0 | #endif |
310 | 0 | } |
311 | | |
312 | | /** |
313 | | * g_bit_unlock: |
314 | | * @address: a pointer to an integer |
315 | | * @lock_bit: a bit value between 0 and 31 |
316 | | * |
317 | | * Clears the indicated @lock_bit in @address. If another thread is |
318 | | * currently blocked in g_bit_lock() on this same bit then it will be |
319 | | * woken up. |
320 | | * |
321 | | * This function accesses @address atomically. All other accesses to |
322 | | * @address must be atomic in order for this function to work |
323 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
324 | | * artifact and the argument passed to it should not be `volatile`. |
325 | | * |
326 | | * Since: 2.24 |
327 | | **/ |
328 | | void |
329 | | g_bit_unlock (volatile gint *address, |
330 | | gint lock_bit) |
331 | 57.0M | { |
332 | 57.0M | gint *address_nonvolatile = (gint *) address; |
333 | | |
334 | | #ifdef USE_ASM_GOTO |
335 | | __asm__ volatile ("lock btr %1, (%0)" |
336 | | : /* no output */ |
337 | | : "r" (address), "r" (lock_bit) |
338 | | : "cc", "memory"); |
339 | | #else |
340 | 57.0M | guint mask = 1u << lock_bit; |
341 | | |
342 | 57.0M | g_atomic_int_and (address_nonvolatile, ~mask); |
343 | 57.0M | #endif |
344 | | |
345 | | /* Warning: unlocking may allow another thread to proceed and destroy the |
346 | | * memory that @address points to. We thus must not dereference it anymore. |
347 | | */ |
348 | | |
349 | 57.0M | { |
350 | 57.0M | guint class = bit_lock_contended_class (address_nonvolatile); |
351 | | |
352 | 57.0M | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
353 | 0 | g_futex_wake (address_nonvolatile); |
354 | 57.0M | } |
355 | 57.0M | } |
356 | | |
357 | | |
358 | | /* We emulate pointer-sized futex(2) because the kernel API only |
359 | | * supports integers. |
360 | | * |
361 | | * We assume that the 'interesting' part is always the lower order bits. |
362 | | * This assumption holds because pointer bitlocks are restricted to |
363 | | * using the low order bits of the pointer as the lock. |
364 | | * |
365 | | * On 32 bits, there is nothing to do since the pointer size is equal to |
366 | | * the integer size. On little endian the lower-order bits don't move, |
367 | | * so do nothing. Only on 64bit big endian do we need to do a bit of |
368 | | * pointer arithmetic: the low order bits are shifted by 4 bytes. We |
369 | | * have a helper function that always does the right thing here. |
370 | | * |
371 | | * Since we always consider the low-order bits of the integer value, a |
372 | | * simple cast from (gsize) to (guint) always takes care of that. |
373 | | * |
374 | | * After that, pointer-sized futex becomes as simple as: |
375 | | * |
376 | | * g_futex_wait (g_futex_int_address (address), (guint) value); |
377 | | * |
378 | | * and |
379 | | * |
380 | | * g_futex_wake (g_futex_int_address (int_address)); |
381 | | */ |
382 | | static const gint * |
383 | | g_futex_int_address (const void *address) |
384 | 0 | { |
385 | 0 | const gint *int_address = address; |
386 | | |
387 | | /* this implementation makes these (reasonable) assumptions: */ |
388 | 0 | G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || |
389 | 0 | (G_BYTE_ORDER == G_BIG_ENDIAN && |
390 | 0 | sizeof (int) == 4 && |
391 | 0 | (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); |
392 | |
|
393 | | #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 |
394 | | int_address++; |
395 | | #endif |
396 | |
|
397 | 0 | return int_address; |
398 | 0 | } |
399 | | |
400 | | G_ALWAYS_INLINE static inline gpointer |
401 | | pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr) |
402 | 9.34M | { |
403 | 9.34M | guintptr x_ptr; |
404 | 9.34M | guintptr x_preserve_ptr; |
405 | 9.34M | guintptr lock_mask; |
406 | | |
407 | 9.34M | x_ptr = (guintptr) ptr; |
408 | | |
409 | 9.34M | if (preserve_mask != 0) |
410 | 4.62M | { |
411 | 4.62M | x_preserve_ptr = (guintptr) preserve_ptr; |
412 | 4.62M | x_ptr = (x_preserve_ptr & preserve_mask) | (x_ptr & ~preserve_mask); |
413 | 4.62M | } |
414 | | |
415 | 9.34M | if (lock_bit == G_MAXUINT) |
416 | 0 | return (gpointer) x_ptr; |
417 | | |
418 | 9.34M | lock_mask = (guintptr) (1u << lock_bit); |
419 | 9.34M | if (set) |
420 | 0 | return (gpointer) (x_ptr | lock_mask); |
421 | 9.34M | else |
422 | 9.34M | return (gpointer) (x_ptr & ~lock_mask); |
423 | 9.34M | } |
424 | | |
425 | | /** |
426 | | * g_pointer_bit_lock_and_get: |
427 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
428 | | * @lock_bit: a bit value between 0 and 31 |
429 | | * @out_ptr: (out) (optional): returns the set pointer atomically. |
430 | | * This is the value after setting the lock, it thus always has the |
431 | | * lock bit set, while previously @address had the lockbit unset. |
432 | | * You may also use g_pointer_bit_lock_mask_ptr() to clear the lock bit. |
433 | | * |
434 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
435 | | * pointer-sized values). |
436 | | * |
437 | | * For portability reasons, you may only lock on the bottom 32 bits of |
438 | | * the pointer. |
439 | | * |
440 | | * Since: 2.80 |
441 | | **/ |
442 | | void |
443 | | (g_pointer_bit_lock_and_get) (gpointer address, |
444 | | guint lock_bit, |
445 | | guintptr *out_ptr) |
446 | 16.5M | { |
447 | 16.5M | guint class = bit_lock_contended_class (address); |
448 | 16.5M | guintptr mask; |
449 | 16.5M | guintptr v; |
450 | | |
451 | 16.5M | g_return_if_fail (lock_bit < 32); |
452 | | |
453 | 16.5M | mask = 1u << lock_bit; |
454 | | |
455 | | #ifdef USE_ASM_GOTO |
456 | | if (G_LIKELY (!out_ptr)) |
457 | | { |
458 | | while (TRUE) |
459 | | { |
460 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
461 | | "jc %l[contended]" |
462 | | : /* no output */ |
463 | | : "r"(address), "r"((gsize) lock_bit) |
464 | | : "cc", "memory" |
465 | | : contended); |
466 | | return; |
467 | | |
468 | | contended: |
469 | | v = (guintptr) g_atomic_pointer_get ((gpointer *) address); |
470 | | if (v & mask) |
471 | | { |
472 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
473 | | g_futex_wait (g_futex_int_address (address), v); |
474 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
475 | | } |
476 | | } |
477 | | } |
478 | | #endif |
479 | | |
480 | 16.5M | retry: |
481 | 16.5M | v = g_atomic_pointer_or ((gpointer *) address, mask); |
482 | 16.5M | if (v & mask) |
483 | | /* already locked */ |
484 | 0 | { |
485 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
486 | 0 | g_futex_wait (g_futex_int_address (address), (guint) v); |
487 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
488 | 0 | goto retry; |
489 | 0 | } |
490 | | |
491 | 16.5M | if (out_ptr) |
492 | 16.5M | *out_ptr = (v | mask); |
493 | 16.5M | } |
494 | | |
495 | | /** |
496 | | * g_pointer_bit_lock: |
497 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
498 | | * @lock_bit: a bit value between 0 and 31 |
499 | | * |
500 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
501 | | * pointer-sized values). |
502 | | * |
503 | | * For portability reasons, you may only lock on the bottom 32 bits of |
504 | | * the pointer. |
505 | | * |
506 | | * While @address has a `volatile` qualifier, this is a historical |
507 | | * artifact and the argument passed to it should not be `volatile`. |
508 | | * |
509 | | * Since: 2.30 |
510 | | **/ |
511 | | void |
512 | | (g_pointer_bit_lock) (volatile void *address, |
513 | | gint lock_bit) |
514 | 0 | { |
515 | 0 | g_pointer_bit_lock_and_get ((gpointer *) address, (guint) lock_bit, NULL); |
516 | 0 | } |
517 | | |
518 | | /** |
519 | | * g_pointer_bit_trylock: |
520 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
521 | | * @lock_bit: a bit value between 0 and 31 |
522 | | * |
523 | | * This is equivalent to g_bit_trylock(), but working on pointers (or |
524 | | * other pointer-sized values). |
525 | | * |
526 | | * For portability reasons, you may only lock on the bottom 32 bits of |
527 | | * the pointer. |
528 | | * |
529 | | * While @address has a `volatile` qualifier, this is a historical |
530 | | * artifact and the argument passed to it should not be `volatile`. |
531 | | * |
532 | | * Returns: %TRUE if the lock was acquired |
533 | | * |
534 | | * Since: 2.30 |
535 | | **/ |
536 | | gboolean |
537 | | (g_pointer_bit_trylock) (volatile void *address, |
538 | | gint lock_bit) |
539 | 0 | { |
540 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
541 | | |
542 | 0 | { |
543 | | #ifdef USE_ASM_GOTO |
544 | | gboolean result; |
545 | | |
546 | | __asm__ volatile ("lock bts %2, (%1)\n" |
547 | | "setnc %%al\n" |
548 | | "movzx %%al, %0" |
549 | | : "=r" (result) |
550 | | : "r" (address), "r" ((gsize) lock_bit) |
551 | | : "cc", "memory"); |
552 | | |
553 | | return result; |
554 | | #else |
555 | 0 | void *address_nonvolatile = (void *) address; |
556 | 0 | gpointer *pointer_address = address_nonvolatile; |
557 | 0 | gsize mask = 1u << lock_bit; |
558 | 0 | guintptr v; |
559 | |
|
560 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
561 | | |
562 | 0 | v = g_atomic_pointer_or (pointer_address, mask); |
563 | |
|
564 | 0 | return (~(gsize) v & mask) != 0; |
565 | 0 | #endif |
566 | 0 | } |
567 | 0 | } |
568 | | |
569 | | /** |
570 | | * g_pointer_bit_unlock: |
571 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
572 | | * @lock_bit: a bit value between 0 and 31 |
573 | | * |
574 | | * This is equivalent to g_bit_unlock, but working on pointers (or other |
575 | | * pointer-sized values). |
576 | | * |
577 | | * For portability reasons, you may only lock on the bottom 32 bits of |
578 | | * the pointer. |
579 | | * |
580 | | * While @address has a `volatile` qualifier, this is a historical |
581 | | * artifact and the argument passed to it should not be `volatile`. |
582 | | * |
583 | | * Since: 2.30 |
584 | | **/ |
585 | | void |
586 | | (g_pointer_bit_unlock) (volatile void *address, |
587 | | gint lock_bit) |
588 | 11.8M | { |
589 | 11.8M | void *address_nonvolatile = (void *) address; |
590 | | |
591 | 11.8M | g_return_if_fail (lock_bit < 32); |
592 | | |
593 | 11.8M | { |
594 | | #ifdef USE_ASM_GOTO |
595 | | __asm__ volatile ("lock btr %1, (%0)" |
596 | | : /* no output */ |
597 | | : "r" (address), "r" ((gsize) lock_bit) |
598 | | : "cc", "memory"); |
599 | | #else |
600 | 11.8M | gpointer *pointer_address = address_nonvolatile; |
601 | 11.8M | gsize mask = 1u << lock_bit; |
602 | | |
603 | 11.8M | g_atomic_pointer_and (pointer_address, ~mask); |
604 | 11.8M | #endif |
605 | | |
606 | | /* Warning: unlocking may allow another thread to proceed and destroy the |
607 | | * memory that @address points to. We thus must not dereference it anymore. |
608 | | */ |
609 | | |
610 | 11.8M | { |
611 | 11.8M | guint class = bit_lock_contended_class (address_nonvolatile); |
612 | | |
613 | 11.8M | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
614 | 0 | g_futex_wake (g_futex_int_address (address_nonvolatile)); |
615 | 11.8M | } |
616 | 11.8M | } |
617 | 11.8M | } |
618 | | |
619 | | /** |
620 | | * g_pointer_bit_lock_mask_ptr: |
621 | | * @ptr: (nullable): the pointer to mask |
622 | | * @lock_bit: the bit to set/clear. If set to `G_MAXUINT`, the |
623 | | * lockbit is taken from @preserve_ptr or @ptr (depending on @preserve_mask). |
624 | | * @set: whether to set (lock) the bit or unset (unlock). This |
625 | | * has no effect, if @lock_bit is set to `G_MAXUINT`. |
626 | | * @preserve_mask: if non-zero, a bit-mask for @preserve_ptr. The |
627 | | * @preserve_mask bits from @preserve_ptr are set in the result. |
628 | | * Note that the @lock_bit bit will be always set according to @set, |
629 | | * regardless of @preserve_mask and @preserve_ptr (unless @lock_bit is |
630 | | * `G_MAXUINT`). |
631 | | * @preserve_ptr: (nullable): if @preserve_mask is non-zero, the bits |
632 | | * from this pointer are set in the result. |
633 | | * |
634 | | * This mangles @ptr as g_pointer_bit_lock() and g_pointer_bit_unlock() |
635 | | * do. |
636 | | * |
637 | | * Returns: the mangled pointer. |
638 | | * |
639 | | * Since: 2.80 |
640 | | **/ |
641 | | gpointer |
642 | | g_pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr) |
643 | 74.2k | { |
644 | 74.2k | g_return_val_if_fail (lock_bit < 32u || lock_bit == G_MAXUINT, ptr); |
645 | | |
646 | 74.2k | return pointer_bit_lock_mask_ptr (ptr, lock_bit, set, preserve_mask, preserve_ptr); |
647 | 74.2k | } |
648 | | |
649 | | /** |
650 | | * g_pointer_bit_unlock_and_set: |
651 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
652 | | * @lock_bit: a bit value between 0 and 31 |
653 | | * @ptr: the new pointer value to set |
654 | | * @preserve_mask: if non-zero, those bits of the current pointer in @address |
655 | | * are preserved. |
656 | | * Note that the @lock_bit bit will be always set according to @set, |
657 | | * regardless of @preserve_mask and the currently set value in @address. |
658 | | * |
659 | | * This is equivalent to g_pointer_bit_unlock() and atomically setting |
660 | | * the pointer value. |
661 | | * |
662 | | * Note that the lock bit will be cleared from the pointer. If the unlocked |
663 | | * pointer that was set is not identical to @ptr, an assertion fails. In other |
664 | | * words, @ptr must have @lock_bit unset. This also means, you usually can |
665 | | * only use this on the lowest bits. |
666 | | * |
667 | | * Since: 2.80 |
668 | | **/ |
669 | | void (g_pointer_bit_unlock_and_set) (void *address, |
670 | | guint lock_bit, |
671 | | gpointer ptr, |
672 | | guintptr preserve_mask) |
673 | 4.63M | { |
674 | 4.63M | gpointer *pointer_address = address; |
675 | 4.63M | guint class = bit_lock_contended_class (address); |
676 | 4.63M | gpointer ptr2; |
677 | | |
678 | 4.63M | g_return_if_fail (lock_bit < 32u); |
679 | | |
680 | 4.63M | if (preserve_mask != 0) |
681 | 4.62M | { |
682 | 4.62M | gpointer old_ptr = g_atomic_pointer_get ((gpointer *) address); |
683 | | |
684 | 4.62M | again: |
685 | 4.62M | ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, preserve_mask, old_ptr); |
686 | 4.62M | if (!g_atomic_pointer_compare_and_exchange_full (pointer_address, old_ptr, ptr2, &old_ptr)) |
687 | 0 | goto again; |
688 | 4.62M | } |
689 | 16.5k | else |
690 | 16.5k | { |
691 | 16.5k | ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL); |
692 | 16.5k | g_atomic_pointer_set (pointer_address, ptr2); |
693 | 16.5k | } |
694 | | |
695 | 4.63M | if (g_atomic_int_get (&g_bit_lock_contended[class]) > 0) |
696 | 0 | g_futex_wake (g_futex_int_address (address)); |
697 | | |
698 | | /* It makes no sense, if unlocking mangles the pointer. Assert against |
699 | | * that. |
700 | | * |
701 | | * Note that based on @preserve_mask, the pointer also gets mangled, which |
702 | | * can make sense for the caller. We don't assert for that. */ |
703 | 4.63M | g_return_if_fail (ptr == pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL)); |
704 | 4.63M | } |