/src/glib-2.80.0/glib/gbitlock.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright © 2008 Ryan Lortie |
3 | | * Copyright © 2010 Codethink Limited |
4 | | * |
5 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | | * |
20 | | * Author: Ryan Lortie <desrt@desrt.ca> |
21 | | */ |
22 | | |
23 | | #include "config.h" |
24 | | |
25 | | #include "gbitlock.h" |
26 | | |
27 | | #include <glib/gmacros.h> |
28 | | #include <glib/gmessages.h> |
29 | | #include <glib/gatomic.h> |
30 | | #include <glib/gslist.h> |
31 | | #include <glib/gthread.h> |
32 | | #include <glib/gslice.h> |
33 | | |
34 | | #include "gthreadprivate.h" |
35 | | |
36 | | #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION |
37 | | #undef HAVE_FUTEX |
38 | | #undef HAVE_FUTEX_TIME64 |
39 | | #endif |
40 | | |
41 | | #ifndef HAVE_FUTEX |
42 | | static GMutex g_futex_mutex; |
43 | | static GSList *g_futex_address_list = NULL; |
44 | | #endif |
45 | | |
46 | | #if defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64) |
47 | | /* |
48 | | * We have headers for futex(2) on the build machine. This does not |
49 | | * imply that every system that ever runs the resulting glib will have |
50 | | * kernel support for futex, but you'd have to have a pretty old |
51 | | * kernel in order for that not to be the case. |
52 | | * |
53 | | * If anyone actually gets bit by this, please file a bug. :) |
54 | | */ |
55 | | |
56 | | /* < private > |
57 | | * g_futex_wait: |
58 | | * @address: a pointer to an integer |
59 | | * @value: the value that should be at @address |
60 | | * |
61 | | * Atomically checks that the value stored at @address is equal to |
62 | | * @value and then blocks. If the value stored at @address is not |
63 | | * equal to @value then this function returns immediately. |
64 | | * |
65 | | * To unblock, call g_futex_wake() on @address. |
66 | | * |
67 | | * This call may spuriously unblock (for example, in response to the |
68 | | * process receiving a signal) but this is not guaranteed. Unlike the |
69 | | * Linux system call of a similar name, there is no guarantee that a |
70 | | * waiting process will unblock due to a g_futex_wake() call in a |
71 | | * separate process. |
72 | | */ |
73 | | static void |
74 | | g_futex_wait (const gint *address, |
75 | | gint value) |
76 | 0 | { |
77 | 0 | g_futex_simple (address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); |
78 | 0 | } |
79 | | |
80 | | /* < private > |
81 | | * g_futex_wake: |
82 | | * @address: a pointer to an integer |
83 | | * |
84 | | * Nominally, wakes one thread that is blocked in g_futex_wait() on |
85 | | * @address (if any thread is currently waiting). |
86 | | * |
87 | | * As mentioned in the documentation for g_futex_wait(), spurious |
88 | | * wakeups may occur. As such, this call may result in more than one |
89 | | * thread being woken up. |
90 | | */ |
91 | | static void |
92 | | g_futex_wake (const gint *address) |
93 | 0 | { |
94 | 0 | g_futex_simple (address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
95 | 0 | } |
96 | | |
97 | | #else |
98 | | |
99 | | /* emulate futex(2) */ |
100 | | typedef struct |
101 | | { |
102 | | const gint *address; |
103 | | gint ref_count; |
104 | | GCond wait_queue; |
105 | | } WaitAddress; |
106 | | |
107 | | static WaitAddress * |
108 | | g_futex_find_address (const gint *address) |
109 | | { |
110 | | GSList *node; |
111 | | |
112 | | for (node = g_futex_address_list; node; node = node->next) |
113 | | { |
114 | | WaitAddress *waiter = node->data; |
115 | | |
116 | | if (waiter->address == address) |
117 | | return waiter; |
118 | | } |
119 | | |
120 | | return NULL; |
121 | | } |
122 | | |
123 | | static void |
124 | | g_futex_wait (const gint *address, |
125 | | gint value) |
126 | | { |
127 | | g_mutex_lock (&g_futex_mutex); |
128 | | if G_LIKELY (g_atomic_int_get (address) == value) |
129 | | { |
130 | | WaitAddress *waiter; |
131 | | |
132 | | if ((waiter = g_futex_find_address (address)) == NULL) |
133 | | { |
134 | | waiter = g_slice_new (WaitAddress); |
135 | | waiter->address = address; |
136 | | g_cond_init (&waiter->wait_queue); |
137 | | waiter->ref_count = 0; |
138 | | g_futex_address_list = |
139 | | g_slist_prepend (g_futex_address_list, waiter); |
140 | | } |
141 | | |
142 | | waiter->ref_count++; |
143 | | g_cond_wait (&waiter->wait_queue, &g_futex_mutex); |
144 | | |
145 | | if (!--waiter->ref_count) |
146 | | { |
147 | | g_futex_address_list = |
148 | | g_slist_remove (g_futex_address_list, waiter); |
149 | | g_cond_clear (&waiter->wait_queue); |
150 | | g_slice_free (WaitAddress, waiter); |
151 | | } |
152 | | } |
153 | | g_mutex_unlock (&g_futex_mutex); |
154 | | } |
155 | | |
156 | | static void |
157 | | g_futex_wake (const gint *address) |
158 | | { |
159 | | WaitAddress *waiter; |
160 | | |
161 | | /* need to lock here for two reasons: |
162 | | * 1) need to acquire/release lock to ensure waiter is not in |
163 | | * the process of registering a wait |
164 | | * 2) need to -stay- locked until the end to ensure a wake() |
165 | | * in another thread doesn't cause 'waiter' to stop existing |
166 | | */ |
167 | | g_mutex_lock (&g_futex_mutex); |
168 | | if ((waiter = g_futex_find_address (address))) |
169 | | g_cond_signal (&waiter->wait_queue); |
170 | | g_mutex_unlock (&g_futex_mutex); |
171 | | } |
172 | | #endif |
173 | | |
174 | | #define CONTENTION_CLASSES 11 |
175 | | static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */ |
176 | | |
177 | | G_ALWAYS_INLINE static inline guint |
178 | | bit_lock_contended_class (gpointer address) |
179 | 584k | { |
180 | 584k | return ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
181 | 584k | } |
182 | | |
183 | | #if (defined (i386) || defined (__amd64__)) |
184 | | #if G_GNUC_CHECK_VERSION(4, 5) |
185 | | #define USE_ASM_GOTO 1 |
186 | | #endif |
187 | | #endif |
188 | | |
189 | | /** |
190 | | * g_bit_lock: |
191 | | * @address: a pointer to an integer |
192 | | * @lock_bit: a bit value between 0 and 31 |
193 | | * |
194 | | * Sets the indicated @lock_bit in @address. If the bit is already |
195 | | * set, this call will block until g_bit_unlock() unsets the |
196 | | * corresponding bit. |
197 | | * |
198 | | * Attempting to lock on two different bits within the same integer is |
199 | | * not supported and will very probably cause deadlocks. |
200 | | * |
201 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
202 | | * between 0 and 31 then the result is undefined. |
203 | | * |
204 | | * This function accesses @address atomically. All other accesses to |
205 | | * @address must be atomic in order for this function to work |
206 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
207 | | * artifact and the argument passed to it should not be `volatile`. |
208 | | * |
209 | | * Since: 2.24 |
210 | | **/ |
211 | | void |
212 | | g_bit_lock (volatile gint *address, |
213 | | gint lock_bit) |
214 | 73.7k | { |
215 | 73.7k | gint *address_nonvolatile = (gint *) address; |
216 | | |
217 | | #ifdef USE_ASM_GOTO |
218 | | retry: |
219 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
220 | | "jc %l[contended]" |
221 | | : /* no output */ |
222 | | : "r" (address), "r" (lock_bit) |
223 | | : "cc", "memory" |
224 | | : contended); |
225 | | return; |
226 | | |
227 | | contended: |
228 | | { |
229 | | guint mask = 1u << lock_bit; |
230 | | guint v; |
231 | | |
232 | | v = (guint) g_atomic_int_get (address_nonvolatile); |
233 | | if (v & mask) |
234 | | { |
235 | | guint class = bit_lock_contended_class (address_nonvolatile); |
236 | | |
237 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
238 | | g_futex_wait (address_nonvolatile, v); |
239 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
240 | | } |
241 | | } |
242 | | goto retry; |
243 | | #else |
244 | 73.7k | guint mask = 1u << lock_bit; |
245 | 73.7k | guint v; |
246 | | |
247 | 73.7k | retry: |
248 | 73.7k | v = g_atomic_int_or (address_nonvolatile, mask); |
249 | 73.7k | if (v & mask) |
250 | | /* already locked */ |
251 | 0 | { |
252 | 0 | guint class = bit_lock_contended_class (address_nonvolatile); |
253 | |
|
254 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
255 | 0 | g_futex_wait (address_nonvolatile, v); |
256 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
257 | |
|
258 | 0 | goto retry; |
259 | 0 | } |
260 | 73.7k | #endif |
261 | 73.7k | } |
262 | | |
263 | | /** |
264 | | * g_bit_trylock: |
265 | | * @address: a pointer to an integer |
266 | | * @lock_bit: a bit value between 0 and 31 |
267 | | * |
268 | | * Sets the indicated @lock_bit in @address, returning %TRUE if |
269 | | * successful. If the bit is already set, returns %FALSE immediately. |
270 | | * |
271 | | * Attempting to lock on two different bits within the same integer is |
272 | | * not supported. |
273 | | * |
274 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
275 | | * between 0 and 31 then the result is undefined. |
276 | | * |
277 | | * This function accesses @address atomically. All other accesses to |
278 | | * @address must be atomic in order for this function to work |
279 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
280 | | * artifact and the argument passed to it should not be `volatile`. |
281 | | * |
282 | | * Returns: %TRUE if the lock was acquired |
283 | | * |
284 | | * Since: 2.24 |
285 | | **/ |
286 | | gboolean |
287 | | g_bit_trylock (volatile gint *address, |
288 | | gint lock_bit) |
289 | 0 | { |
290 | | #ifdef USE_ASM_GOTO |
291 | | gboolean result; |
292 | | |
293 | | __asm__ volatile ("lock bts %2, (%1)\n" |
294 | | "setnc %%al\n" |
295 | | "movzx %%al, %0" |
296 | | : "=r" (result) |
297 | | : "r" (address), "r" (lock_bit) |
298 | | : "cc", "memory"); |
299 | | |
300 | | return result; |
301 | | #else |
302 | 0 | gint *address_nonvolatile = (gint *) address; |
303 | 0 | guint mask = 1u << lock_bit; |
304 | 0 | guint v; |
305 | |
|
306 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
307 | |
|
308 | 0 | return ~v & mask; |
309 | 0 | #endif |
310 | 0 | } |
311 | | |
312 | | /** |
313 | | * g_bit_unlock: |
314 | | * @address: a pointer to an integer |
315 | | * @lock_bit: a bit value between 0 and 31 |
316 | | * |
317 | | * Clears the indicated @lock_bit in @address. If another thread is |
318 | | * currently blocked in g_bit_lock() on this same bit then it will be |
319 | | * woken up. |
320 | | * |
321 | | * This function accesses @address atomically. All other accesses to |
322 | | * @address must be atomic in order for this function to work |
323 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
324 | | * artifact and the argument passed to it should not be `volatile`. |
325 | | * |
326 | | * Since: 2.24 |
327 | | **/ |
328 | | void |
329 | | g_bit_unlock (volatile gint *address, |
330 | | gint lock_bit) |
331 | 73.7k | { |
332 | 73.7k | gint *address_nonvolatile = (gint *) address; |
333 | | |
334 | | #ifdef USE_ASM_GOTO |
335 | | __asm__ volatile ("lock btr %1, (%0)" |
336 | | : /* no output */ |
337 | | : "r" (address), "r" (lock_bit) |
338 | | : "cc", "memory"); |
339 | | #else |
340 | 73.7k | guint mask = 1u << lock_bit; |
341 | | |
342 | 73.7k | g_atomic_int_and (address_nonvolatile, ~mask); |
343 | 73.7k | #endif |
344 | | |
345 | 73.7k | { |
346 | 73.7k | guint class = bit_lock_contended_class (address_nonvolatile); |
347 | | |
348 | 73.7k | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
349 | 0 | g_futex_wake (address_nonvolatile); |
350 | 73.7k | } |
351 | 73.7k | } |
352 | | |
353 | | |
354 | | /* We emulate pointer-sized futex(2) because the kernel API only |
355 | | * supports integers. |
356 | | * |
357 | | * We assume that the 'interesting' part is always the lower order bits. |
358 | | * This assumption holds because pointer bitlocks are restricted to |
359 | | * using the low order bits of the pointer as the lock. |
360 | | * |
361 | | * On 32 bits, there is nothing to do since the pointer size is equal to |
362 | | * the integer size. On little endian the lower-order bits don't move, |
363 | | * so do nothing. Only on 64bit big endian do we need to do a bit of |
364 | | * pointer arithmetic: the low order bits are shifted by 4 bytes. We |
365 | | * have a helper function that always does the right thing here. |
366 | | * |
367 | | * Since we always consider the low-order bits of the integer value, a |
368 | | * simple cast from (gsize) to (guint) always takes care of that. |
369 | | * |
370 | | * After that, pointer-sized futex becomes as simple as: |
371 | | * |
372 | | * g_futex_wait (g_futex_int_address (address), (guint) value); |
373 | | * |
374 | | * and |
375 | | * |
376 | | * g_futex_wake (g_futex_int_address (int_address)); |
377 | | */ |
378 | | static const gint * |
379 | | g_futex_int_address (const void *address) |
380 | 0 | { |
381 | 0 | const gint *int_address = address; |
382 | | |
383 | | /* this implementation makes these (reasonable) assumptions: */ |
384 | 0 | G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || |
385 | 0 | (G_BYTE_ORDER == G_BIG_ENDIAN && |
386 | 0 | sizeof (int) == 4 && |
387 | 0 | (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); |
388 | |
|
389 | | #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 |
390 | | int_address++; |
391 | | #endif |
392 | |
|
393 | 0 | return int_address; |
394 | 0 | } |
395 | | |
396 | | G_ALWAYS_INLINE static inline gpointer |
397 | | pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr) |
398 | 145k | { |
399 | 145k | guintptr x_ptr; |
400 | 145k | guintptr x_preserve_ptr; |
401 | 145k | guintptr lock_mask; |
402 | | |
403 | 145k | x_ptr = (guintptr) ptr; |
404 | | |
405 | 145k | if (preserve_mask != 0) |
406 | 72.9k | { |
407 | 72.9k | x_preserve_ptr = (guintptr) preserve_ptr; |
408 | 72.9k | x_ptr = (x_preserve_ptr & preserve_mask) | (x_ptr & ~preserve_mask); |
409 | 72.9k | } |
410 | | |
411 | 145k | if (lock_bit == G_MAXUINT) |
412 | 0 | return (gpointer) x_ptr; |
413 | | |
414 | 145k | lock_mask = (guintptr) (1u << lock_bit); |
415 | 145k | if (set) |
416 | 0 | return (gpointer) (x_ptr | lock_mask); |
417 | 145k | else |
418 | 145k | return (gpointer) (x_ptr & ~lock_mask); |
419 | 145k | } |
420 | | |
421 | | /** |
422 | | * g_pointer_bit_lock_and_get: |
423 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
424 | | * @lock_bit: a bit value between 0 and 31 |
425 | | * @out_ptr: (out) (optional): returns the set pointer atomically. |
426 | | * This is the value after setting the lock, it thus always has the |
427 | | * lock bit set, while previously @address had the lockbit unset. |
428 | | * You may also use g_pointer_bit_lock_mask_ptr() to clear the lock bit. |
429 | | * |
430 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
431 | | * pointer-sized values). |
432 | | * |
433 | | * For portability reasons, you may only lock on the bottom 32 bits of |
434 | | * the pointer. |
435 | | * |
436 | | * Since: 2.80 |
437 | | **/ |
438 | | void |
439 | | (g_pointer_bit_lock_and_get) (gpointer address, |
440 | | guint lock_bit, |
441 | | guintptr *out_ptr) |
442 | 255k | { |
443 | 255k | guint class = bit_lock_contended_class (address); |
444 | 255k | guintptr mask; |
445 | 255k | guintptr v; |
446 | | |
447 | 255k | g_return_if_fail (lock_bit < 32); |
448 | | |
449 | 255k | mask = 1u << lock_bit; |
450 | | |
451 | | #ifdef USE_ASM_GOTO |
452 | | if (G_LIKELY (!out_ptr)) |
453 | | { |
454 | | while (TRUE) |
455 | | { |
456 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
457 | | "jc %l[contended]" |
458 | | : /* no output */ |
459 | | : "r"(address), "r"((gsize) lock_bit) |
460 | | : "cc", "memory" |
461 | | : contended); |
462 | | return; |
463 | | |
464 | | contended: |
465 | | v = (guintptr) g_atomic_pointer_get ((gpointer *) address); |
466 | | if (v & mask) |
467 | | { |
468 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
469 | | g_futex_wait (g_futex_int_address (address), v); |
470 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
471 | | } |
472 | | } |
473 | | } |
474 | | #endif |
475 | | |
476 | 255k | retry: |
477 | 255k | v = g_atomic_pointer_or ((gpointer *) address, mask); |
478 | 255k | if (v & mask) |
479 | | /* already locked */ |
480 | 0 | { |
481 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
482 | 0 | g_futex_wait (g_futex_int_address (address), (guint) v); |
483 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
484 | 0 | goto retry; |
485 | 0 | } |
486 | | |
487 | 255k | if (out_ptr) |
488 | 255k | *out_ptr = (v | mask); |
489 | 255k | } |
490 | | |
491 | | /** |
492 | | * g_pointer_bit_lock: |
493 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
494 | | * @lock_bit: a bit value between 0 and 31 |
495 | | * |
496 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
497 | | * pointer-sized values). |
498 | | * |
499 | | * For portability reasons, you may only lock on the bottom 32 bits of |
500 | | * the pointer. |
501 | | * |
502 | | * While @address has a `volatile` qualifier, this is a historical |
503 | | * artifact and the argument passed to it should not be `volatile`. |
504 | | * |
505 | | * Since: 2.30 |
506 | | **/ |
507 | | void |
508 | | (g_pointer_bit_lock) (volatile void *address, |
509 | | gint lock_bit) |
510 | 0 | { |
511 | 0 | g_pointer_bit_lock_and_get ((gpointer *) address, (guint) lock_bit, NULL); |
512 | 0 | } |
513 | | |
514 | | /** |
515 | | * g_pointer_bit_trylock: |
516 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
517 | | * @lock_bit: a bit value between 0 and 31 |
518 | | * |
519 | | * This is equivalent to g_bit_trylock(), but working on pointers (or |
520 | | * other pointer-sized values). |
521 | | * |
522 | | * For portability reasons, you may only lock on the bottom 32 bits of |
523 | | * the pointer. |
524 | | * |
525 | | * While @address has a `volatile` qualifier, this is a historical |
526 | | * artifact and the argument passed to it should not be `volatile`. |
527 | | * |
528 | | * Returns: %TRUE if the lock was acquired |
529 | | * |
530 | | * Since: 2.30 |
531 | | **/ |
532 | | gboolean |
533 | | (g_pointer_bit_trylock) (volatile void *address, |
534 | | gint lock_bit) |
535 | 0 | { |
536 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
537 | | |
538 | 0 | { |
539 | | #ifdef USE_ASM_GOTO |
540 | | gboolean result; |
541 | | |
542 | | __asm__ volatile ("lock bts %2, (%1)\n" |
543 | | "setnc %%al\n" |
544 | | "movzx %%al, %0" |
545 | | : "=r" (result) |
546 | | : "r" (address), "r" ((gsize) lock_bit) |
547 | | : "cc", "memory"); |
548 | | |
549 | | return result; |
550 | | #else |
551 | 0 | void *address_nonvolatile = (void *) address; |
552 | 0 | gpointer *pointer_address = address_nonvolatile; |
553 | 0 | gsize mask = 1u << lock_bit; |
554 | 0 | guintptr v; |
555 | |
|
556 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
557 | | |
558 | 0 | v = g_atomic_pointer_or (pointer_address, mask); |
559 | |
|
560 | 0 | return (~(gsize) v & mask) != 0; |
561 | 0 | #endif |
562 | 0 | } |
563 | 0 | } |
564 | | |
565 | | /** |
566 | | * g_pointer_bit_unlock: |
567 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
568 | | * @lock_bit: a bit value between 0 and 31 |
569 | | * |
570 | | * This is equivalent to g_bit_unlock, but working on pointers (or other |
571 | | * pointer-sized values). |
572 | | * |
573 | | * For portability reasons, you may only lock on the bottom 32 bits of |
574 | | * the pointer. |
575 | | * |
576 | | * While @address has a `volatile` qualifier, this is a historical |
577 | | * artifact and the argument passed to it should not be `volatile`. |
578 | | * |
579 | | * Since: 2.30 |
580 | | **/ |
581 | | void |
582 | | (g_pointer_bit_unlock) (volatile void *address, |
583 | | gint lock_bit) |
584 | 182k | { |
585 | 182k | void *address_nonvolatile = (void *) address; |
586 | | |
587 | 182k | g_return_if_fail (lock_bit < 32); |
588 | | |
589 | 182k | { |
590 | | #ifdef USE_ASM_GOTO |
591 | | __asm__ volatile ("lock btr %1, (%0)" |
592 | | : /* no output */ |
593 | | : "r" (address), "r" ((gsize) lock_bit) |
594 | | : "cc", "memory"); |
595 | | #else |
596 | 182k | gpointer *pointer_address = address_nonvolatile; |
597 | 182k | gsize mask = 1u << lock_bit; |
598 | | |
599 | 182k | g_atomic_pointer_and (pointer_address, ~mask); |
600 | 182k | #endif |
601 | | |
602 | 182k | { |
603 | 182k | guint class = bit_lock_contended_class (address_nonvolatile); |
604 | | |
605 | 182k | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
606 | 0 | g_futex_wake (g_futex_int_address (address_nonvolatile)); |
607 | 182k | } |
608 | 182k | } |
609 | 182k | } |
610 | | |
611 | | /** |
612 | | * g_pointer_bit_lock_mask_ptr: |
613 | | * @ptr: (nullable): the pointer to mask |
614 | | * @lock_bit: the bit to set/clear. If set to `G_MAXUINT`, the |
615 | | * lockbit is taken from @preserve_ptr or @ptr (depending on @preserve_mask). |
616 | | * @set: whether to set (lock) the bit or unset (unlock). This |
617 | | * has no effect, if @lock_bit is set to `G_MAXUINT`. |
618 | | * @preserve_mask: if non-zero, a bit-mask for @preserve_ptr. The |
619 | | * @preserve_mask bits from @preserve_ptr are set in the result. |
620 | | * Note that the @lock_bit bit will be always set according to @set, |
621 | | * regardless of @preserve_mask and @preserve_ptr (unless @lock_bit is |
622 | | * `G_MAXUINT`). |
623 | | * @preserve_ptr: (nullable): if @preserve_mask is non-zero, the bits |
624 | | * from this pointer are set in the result. |
625 | | * |
626 | | * This mangles @ptr as g_pointer_bit_lock() and g_pointer_bit_unlock() |
627 | | * do. |
628 | | * |
629 | | * Returns: the mangled pointer. |
630 | | * |
631 | | * Since: 2.80 |
632 | | **/ |
633 | | gpointer |
634 | | g_pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr) |
635 | 0 | { |
636 | 0 | g_return_val_if_fail (lock_bit < 32u || lock_bit == G_MAXUINT, ptr); |
637 | | |
638 | 0 | return pointer_bit_lock_mask_ptr (ptr, lock_bit, set, preserve_mask, preserve_ptr); |
639 | 0 | } |
640 | | |
641 | | /** |
642 | | * g_pointer_bit_unlock_and_set: |
643 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
644 | | * @lock_bit: a bit value between 0 and 31 |
645 | | * @ptr: the new pointer value to set |
646 | | * @preserve_mask: if non-zero, those bits of the current pointer in @address |
647 | | * are preserved. |
648 | | * Note that the @lock_bit bit will be always set according to @set, |
649 | | * regardless of @preserve_mask and the currently set value in @address. |
650 | | * |
651 | | * This is equivalent to g_pointer_bit_unlock() and atomically setting |
652 | | * the pointer value. |
653 | | * |
654 | | * Note that the lock bit will be cleared from the pointer. If the unlocked |
655 | | * pointer that was set is not identical to @ptr, an assertion fails. In other |
656 | | * words, @ptr must have @lock_bit unset. This also means, you usually can |
657 | | * only use this on the lowest bits. |
658 | | * |
659 | | * Since: 2.80 |
660 | | **/ |
661 | | void (g_pointer_bit_unlock_and_set) (void *address, |
662 | | guint lock_bit, |
663 | | gpointer ptr, |
664 | | guintptr preserve_mask) |
665 | 72.9k | { |
666 | 72.9k | gpointer *pointer_address = address; |
667 | 72.9k | guint class = bit_lock_contended_class (address); |
668 | 72.9k | gpointer ptr2; |
669 | | |
670 | 72.9k | g_return_if_fail (lock_bit < 32u); |
671 | | |
672 | 72.9k | if (preserve_mask != 0) |
673 | 72.9k | { |
674 | 72.9k | gpointer old_ptr = g_atomic_pointer_get ((gpointer *) address); |
675 | | |
676 | 72.9k | again: |
677 | 72.9k | ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, preserve_mask, old_ptr); |
678 | 72.9k | if (!g_atomic_pointer_compare_and_exchange_full (pointer_address, old_ptr, ptr2, &old_ptr)) |
679 | 0 | goto again; |
680 | 72.9k | } |
681 | 0 | else |
682 | 0 | { |
683 | 0 | ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL); |
684 | 0 | g_atomic_pointer_set (pointer_address, ptr2); |
685 | 0 | } |
686 | | |
687 | 72.9k | if (g_atomic_int_get (&g_bit_lock_contended[class]) > 0) |
688 | 0 | g_futex_wake (g_futex_int_address (address)); |
689 | | |
690 | | /* It makes no sense, if unlocking mangles the pointer. Assert against |
691 | | * that. |
692 | | * |
693 | | * Note that based on @preserve_mask, the pointer also gets mangled, which |
694 | | * can make sense for the caller. We don't assert for that. */ |
695 | 72.9k | g_return_if_fail (ptr == pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL)); |
696 | 72.9k | } |