/src/irssi/subprojects/glib-2.74.3/glib/gbitlock.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright © 2008 Ryan Lortie |
3 | | * Copyright © 2010 Codethink Limited |
4 | | * |
5 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | | * |
20 | | * Author: Ryan Lortie <desrt@desrt.ca> |
21 | | */ |
22 | | |
23 | | #include "config.h" |
24 | | |
25 | | #include "gbitlock.h" |
26 | | |
27 | | #include <glib/gmacros.h> |
28 | | #include <glib/gmessages.h> |
29 | | #include <glib/gatomic.h> |
30 | | #include <glib/gslist.h> |
31 | | #include <glib/gthread.h> |
32 | | #include <glib/gslice.h> |
33 | | |
34 | | #include "gthreadprivate.h" |
35 | | |
36 | | #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION |
37 | | #undef HAVE_FUTEX |
38 | | #endif |
39 | | |
40 | | #ifndef HAVE_FUTEX |
41 | | static GMutex g_futex_mutex; |
42 | | static GSList *g_futex_address_list = NULL; |
43 | | #endif |
44 | | |
45 | | #ifdef HAVE_FUTEX |
46 | | /* |
47 | | * We have headers for futex(2) on the build machine. This does not |
48 | | * imply that every system that ever runs the resulting glib will have |
49 | | * kernel support for futex, but you'd have to have a pretty old |
50 | | * kernel in order for that not to be the case. |
51 | | * |
52 | | * If anyone actually gets bit by this, please file a bug. :) |
53 | | */ |
54 | | #include <linux/futex.h> |
55 | | #include <sys/syscall.h> |
56 | | #include <unistd.h> |
57 | | |
58 | | #ifndef FUTEX_WAIT_PRIVATE |
59 | | #define FUTEX_WAIT_PRIVATE FUTEX_WAIT |
60 | | #define FUTEX_WAKE_PRIVATE FUTEX_WAKE |
61 | | #endif |
62 | | |
63 | | /* < private > |
64 | | * g_futex_wait: |
65 | | * @address: a pointer to an integer |
66 | | * @value: the value that should be at @address |
67 | | * |
68 | | * Atomically checks that the value stored at @address is equal to |
69 | | * @value and then blocks. If the value stored at @address is not |
70 | | * equal to @value then this function returns immediately. |
71 | | * |
72 | | * To unblock, call g_futex_wake() on @address. |
73 | | * |
74 | | * This call may spuriously unblock (for example, in response to the |
75 | | * process receiving a signal) but this is not guaranteed. Unlike the |
76 | | * Linux system call of a similar name, there is no guarantee that a |
77 | | * waiting process will unblock due to a g_futex_wake() call in a |
78 | | * separate process. |
79 | | */ |
80 | | static void |
81 | | g_futex_wait (const gint *address, |
82 | | gint value) |
83 | 0 | { |
84 | 0 | syscall (__NR_futex, address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); |
85 | 0 | } |
86 | | |
87 | | /* < private > |
88 | | * g_futex_wake: |
89 | | * @address: a pointer to an integer |
90 | | * |
91 | | * Nominally, wakes one thread that is blocked in g_futex_wait() on |
92 | | * @address (if any thread is currently waiting). |
93 | | * |
94 | | * As mentioned in the documentation for g_futex_wait(), spurious |
95 | | * wakeups may occur. As such, this call may result in more than one |
96 | | * thread being woken up. |
97 | | */ |
98 | | static void |
99 | | g_futex_wake (const gint *address) |
100 | 0 | { |
101 | 0 | syscall (__NR_futex, address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
102 | 0 | } |
103 | | |
104 | | #else |
105 | | |
106 | | /* emulate futex(2) */ |
107 | | typedef struct |
108 | | { |
109 | | const gint *address; |
110 | | gint ref_count; |
111 | | GCond wait_queue; |
112 | | } WaitAddress; |
113 | | |
114 | | static WaitAddress * |
115 | | g_futex_find_address (const gint *address) |
116 | | { |
117 | | GSList *node; |
118 | | |
119 | | for (node = g_futex_address_list; node; node = node->next) |
120 | | { |
121 | | WaitAddress *waiter = node->data; |
122 | | |
123 | | if (waiter->address == address) |
124 | | return waiter; |
125 | | } |
126 | | |
127 | | return NULL; |
128 | | } |
129 | | |
130 | | static void |
131 | | g_futex_wait (const gint *address, |
132 | | gint value) |
133 | | { |
134 | | g_mutex_lock (&g_futex_mutex); |
135 | | if G_LIKELY (g_atomic_int_get (address) == value) |
136 | | { |
137 | | WaitAddress *waiter; |
138 | | |
139 | | if ((waiter = g_futex_find_address (address)) == NULL) |
140 | | { |
141 | | waiter = g_slice_new (WaitAddress); |
142 | | waiter->address = address; |
143 | | g_cond_init (&waiter->wait_queue); |
144 | | waiter->ref_count = 0; |
145 | | g_futex_address_list = |
146 | | g_slist_prepend (g_futex_address_list, waiter); |
147 | | } |
148 | | |
149 | | waiter->ref_count++; |
150 | | g_cond_wait (&waiter->wait_queue, &g_futex_mutex); |
151 | | |
152 | | if (!--waiter->ref_count) |
153 | | { |
154 | | g_futex_address_list = |
155 | | g_slist_remove (g_futex_address_list, waiter); |
156 | | g_cond_clear (&waiter->wait_queue); |
157 | | g_slice_free (WaitAddress, waiter); |
158 | | } |
159 | | } |
160 | | g_mutex_unlock (&g_futex_mutex); |
161 | | } |
162 | | |
163 | | static void |
164 | | g_futex_wake (const gint *address) |
165 | | { |
166 | | WaitAddress *waiter; |
167 | | |
168 | | /* need to lock here for two reasons: |
169 | | * 1) need to acquire/release lock to ensure waiter is not in |
170 | | * the process of registering a wait |
171 | | * 2) need to -stay- locked until the end to ensure a wake() |
172 | | * in another thread doesn't cause 'waiter' to stop existing |
173 | | */ |
174 | | g_mutex_lock (&g_futex_mutex); |
175 | | if ((waiter = g_futex_find_address (address))) |
176 | | g_cond_signal (&waiter->wait_queue); |
177 | | g_mutex_unlock (&g_futex_mutex); |
178 | | } |
179 | | #endif |
180 | | |
181 | | #define CONTENTION_CLASSES 11 |
182 | | static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */ |
183 | | |
184 | | #if (defined (i386) || defined (__amd64__)) |
185 | | #if G_GNUC_CHECK_VERSION(4, 5) |
186 | | #define USE_ASM_GOTO 1 |
187 | | #endif |
188 | | #endif |
189 | | |
190 | | /** |
191 | | * g_bit_lock: |
192 | | * @address: a pointer to an integer |
193 | | * @lock_bit: a bit value between 0 and 31 |
194 | | * |
195 | | * Sets the indicated @lock_bit in @address. If the bit is already |
196 | | * set, this call will block until g_bit_unlock() unsets the |
197 | | * corresponding bit. |
198 | | * |
199 | | * Attempting to lock on two different bits within the same integer is |
200 | | * not supported and will very probably cause deadlocks. |
201 | | * |
202 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
203 | | * between 0 and 31 then the result is undefined. |
204 | | * |
205 | | * This function accesses @address atomically. All other accesses to |
206 | | * @address must be atomic in order for this function to work |
207 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
208 | | * artifact and the argument passed to it should not be `volatile`. |
209 | | * |
210 | | * Since: 2.24 |
211 | | **/ |
212 | | void |
213 | | g_bit_lock (volatile gint *address, |
214 | | gint lock_bit) |
215 | 0 | { |
216 | 0 | gint *address_nonvolatile = (gint *) address; |
217 | |
|
218 | | #ifdef USE_ASM_GOTO |
219 | | retry: |
220 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
221 | | "jc %l[contended]" |
222 | | : /* no output */ |
223 | | : "r" (address), "r" (lock_bit) |
224 | | : "cc", "memory" |
225 | | : contended); |
226 | | return; |
227 | | |
228 | | contended: |
229 | | { |
230 | | guint mask = 1u << lock_bit; |
231 | | guint v; |
232 | | |
233 | | v = (guint) g_atomic_int_get (address_nonvolatile); |
234 | | if (v & mask) |
235 | | { |
236 | | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
237 | | |
238 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
239 | | g_futex_wait (address_nonvolatile, v); |
240 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
241 | | } |
242 | | } |
243 | | goto retry; |
244 | | #else |
245 | 0 | guint mask = 1u << lock_bit; |
246 | 0 | guint v; |
247 | |
|
248 | 0 | retry: |
249 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
250 | 0 | if (v & mask) |
251 | | /* already locked */ |
252 | 0 | { |
253 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
254 | |
|
255 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
256 | 0 | g_futex_wait (address_nonvolatile, v); |
257 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
258 | |
|
259 | 0 | goto retry; |
260 | 0 | } |
261 | 0 | #endif |
262 | 0 | } |
263 | | |
264 | | /** |
265 | | * g_bit_trylock: |
266 | | * @address: a pointer to an integer |
267 | | * @lock_bit: a bit value between 0 and 31 |
268 | | * |
269 | | * Sets the indicated @lock_bit in @address, returning %TRUE if |
270 | | * successful. If the bit is already set, returns %FALSE immediately. |
271 | | * |
272 | | * Attempting to lock on two different bits within the same integer is |
273 | | * not supported. |
274 | | * |
275 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
276 | | * between 0 and 31 then the result is undefined. |
277 | | * |
278 | | * This function accesses @address atomically. All other accesses to |
279 | | * @address must be atomic in order for this function to work |
280 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
281 | | * artifact and the argument passed to it should not be `volatile`. |
282 | | * |
283 | | * Returns: %TRUE if the lock was acquired |
284 | | * |
285 | | * Since: 2.24 |
286 | | **/ |
287 | | gboolean |
288 | | g_bit_trylock (volatile gint *address, |
289 | | gint lock_bit) |
290 | 0 | { |
291 | | #ifdef USE_ASM_GOTO |
292 | | gboolean result; |
293 | | |
294 | | __asm__ volatile ("lock bts %2, (%1)\n" |
295 | | "setnc %%al\n" |
296 | | "movzx %%al, %0" |
297 | | : "=r" (result) |
298 | | : "r" (address), "r" (lock_bit) |
299 | | : "cc", "memory"); |
300 | | |
301 | | return result; |
302 | | #else |
303 | 0 | gint *address_nonvolatile = (gint *) address; |
304 | 0 | guint mask = 1u << lock_bit; |
305 | 0 | guint v; |
306 | |
|
307 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
308 | |
|
309 | 0 | return ~v & mask; |
310 | 0 | #endif |
311 | 0 | } |
312 | | |
313 | | /** |
314 | | * g_bit_unlock: |
315 | | * @address: a pointer to an integer |
316 | | * @lock_bit: a bit value between 0 and 31 |
317 | | * |
318 | | * Clears the indicated @lock_bit in @address. If another thread is |
319 | | * currently blocked in g_bit_lock() on this same bit then it will be |
320 | | * woken up. |
321 | | * |
322 | | * This function accesses @address atomically. All other accesses to |
323 | | * @address must be atomic in order for this function to work |
324 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
325 | | * artifact and the argument passed to it should not be `volatile`. |
326 | | * |
327 | | * Since: 2.24 |
328 | | **/ |
329 | | void |
330 | | g_bit_unlock (volatile gint *address, |
331 | | gint lock_bit) |
332 | 0 | { |
333 | 0 | gint *address_nonvolatile = (gint *) address; |
334 | |
|
335 | | #ifdef USE_ASM_GOTO |
336 | | __asm__ volatile ("lock btr %1, (%0)" |
337 | | : /* no output */ |
338 | | : "r" (address), "r" (lock_bit) |
339 | | : "cc", "memory"); |
340 | | #else |
341 | 0 | guint mask = 1u << lock_bit; |
342 | |
|
343 | 0 | g_atomic_int_and (address_nonvolatile, ~mask); |
344 | 0 | #endif |
345 | |
|
346 | 0 | { |
347 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
348 | |
|
349 | 0 | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
350 | 0 | g_futex_wake (address_nonvolatile); |
351 | 0 | } |
352 | 0 | } |
353 | | |
354 | | |
355 | | /* We emulate pointer-sized futex(2) because the kernel API only |
356 | | * supports integers. |
357 | | * |
358 | | * We assume that the 'interesting' part is always the lower order bits. |
359 | | * This assumption holds because pointer bitlocks are restricted to |
360 | | * using the low order bits of the pointer as the lock. |
361 | | * |
362 | | * On 32 bits, there is nothing to do since the pointer size is equal to |
363 | | * the integer size. On little endian the lower-order bits don't move, |
364 | | * so do nothing. Only on 64bit big endian do we need to do a bit of |
365 | | * pointer arithmetic: the low order bits are shifted by 4 bytes. We |
366 | | * have a helper function that always does the right thing here. |
367 | | * |
368 | | * Since we always consider the low-order bits of the integer value, a |
369 | | * simple cast from (gsize) to (guint) always takes care of that. |
370 | | * |
371 | | * After that, pointer-sized futex becomes as simple as: |
372 | | * |
373 | | * g_futex_wait (g_futex_int_address (address), (guint) value); |
374 | | * |
375 | | * and |
376 | | * |
377 | | * g_futex_wake (g_futex_int_address (int_address)); |
378 | | */ |
379 | | static const gint * |
380 | | g_futex_int_address (const void *address) |
381 | 0 | { |
382 | 0 | const gint *int_address = address; |
383 | | |
384 | | /* this implementation makes these (reasonable) assumptions: */ |
385 | 0 | G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || |
386 | 0 | (G_BYTE_ORDER == G_BIG_ENDIAN && |
387 | 0 | sizeof (int) == 4 && |
388 | 0 | (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); |
389 | |
|
390 | | #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 |
391 | | int_address++; |
392 | | #endif |
393 | |
|
394 | 0 | return int_address; |
395 | 0 | } |
396 | | |
397 | | /** |
398 | | * g_pointer_bit_lock: |
399 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
400 | | * @lock_bit: a bit value between 0 and 31 |
401 | | * |
402 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
403 | | * pointer-sized values). |
404 | | * |
405 | | * For portability reasons, you may only lock on the bottom 32 bits of |
406 | | * the pointer. |
407 | | * |
408 | | * While @address has a `volatile` qualifier, this is a historical |
409 | | * artifact and the argument passed to it should not be `volatile`. |
410 | | * |
411 | | * Since: 2.30 |
412 | | **/ |
413 | | void |
414 | | (g_pointer_bit_lock) (volatile void *address, |
415 | | gint lock_bit) |
416 | 4.40k | { |
417 | 4.40k | void *address_nonvolatile = (void *) address; |
418 | | |
419 | 4.40k | g_return_if_fail (lock_bit < 32); |
420 | | |
421 | 4.40k | { |
422 | | #ifdef USE_ASM_GOTO |
423 | | retry: |
424 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
425 | | "jc %l[contended]" |
426 | | : /* no output */ |
427 | | : "r" (address), "r" ((gsize) lock_bit) |
428 | | : "cc", "memory" |
429 | | : contended); |
430 | | return; |
431 | | |
432 | | contended: |
433 | | { |
434 | | gsize *pointer_address = address_nonvolatile; |
435 | | gsize mask = 1u << lock_bit; |
436 | | gsize v; |
437 | | |
438 | | v = (gsize) g_atomic_pointer_get (pointer_address); |
439 | | if (v & mask) |
440 | | { |
441 | | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
442 | | |
443 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
444 | | g_futex_wait (g_futex_int_address (address_nonvolatile), v); |
445 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
446 | | } |
447 | | } |
448 | | goto retry; |
449 | | #else |
450 | 4.40k | gsize *pointer_address = address_nonvolatile; |
451 | 4.40k | gsize mask = 1u << lock_bit; |
452 | 4.40k | gsize v; |
453 | | |
454 | 4.40k | retry: |
455 | 4.40k | v = g_atomic_pointer_or (pointer_address, mask); |
456 | 4.40k | if (v & mask) |
457 | | /* already locked */ |
458 | 0 | { |
459 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
460 | |
|
461 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
462 | 0 | g_futex_wait (g_futex_int_address (address_nonvolatile), (guint) v); |
463 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
464 | |
|
465 | 0 | goto retry; |
466 | 0 | } |
467 | 4.40k | #endif |
468 | 4.40k | } |
469 | 4.40k | } |
470 | | |
471 | | /** |
472 | | * g_pointer_bit_trylock: |
473 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
474 | | * @lock_bit: a bit value between 0 and 31 |
475 | | * |
476 | | * This is equivalent to g_bit_trylock(), but working on pointers (or |
477 | | * other pointer-sized values). |
478 | | * |
479 | | * For portability reasons, you may only lock on the bottom 32 bits of |
480 | | * the pointer. |
481 | | * |
482 | | * While @address has a `volatile` qualifier, this is a historical |
483 | | * artifact and the argument passed to it should not be `volatile`. |
484 | | * |
485 | | * Returns: %TRUE if the lock was acquired |
486 | | * |
487 | | * Since: 2.30 |
488 | | **/ |
489 | | gboolean |
490 | | (g_pointer_bit_trylock) (volatile void *address, |
491 | | gint lock_bit) |
492 | 0 | { |
493 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
494 | | |
495 | 0 | { |
496 | | #ifdef USE_ASM_GOTO |
497 | | gboolean result; |
498 | | |
499 | | __asm__ volatile ("lock bts %2, (%1)\n" |
500 | | "setnc %%al\n" |
501 | | "movzx %%al, %0" |
502 | | : "=r" (result) |
503 | | : "r" (address), "r" ((gsize) lock_bit) |
504 | | : "cc", "memory"); |
505 | | |
506 | | return result; |
507 | | #else |
508 | 0 | void *address_nonvolatile = (void *) address; |
509 | 0 | gsize *pointer_address = address_nonvolatile; |
510 | 0 | gsize mask = 1u << lock_bit; |
511 | 0 | gsize v; |
512 | |
|
513 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
514 | | |
515 | 0 | v = g_atomic_pointer_or (pointer_address, mask); |
516 | |
|
517 | 0 | return ~v & mask; |
518 | 0 | #endif |
519 | 0 | } |
520 | 0 | } |
521 | | |
522 | | /** |
523 | | * g_pointer_bit_unlock: |
524 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
525 | | * @lock_bit: a bit value between 0 and 31 |
526 | | * |
527 | | * This is equivalent to g_bit_unlock, but working on pointers (or other |
528 | | * pointer-sized values). |
529 | | * |
530 | | * For portability reasons, you may only lock on the bottom 32 bits of |
531 | | * the pointer. |
532 | | * |
533 | | * While @address has a `volatile` qualifier, this is a historical |
534 | | * artifact and the argument passed to it should not be `volatile`. |
535 | | * |
536 | | * Since: 2.30 |
537 | | **/ |
538 | | void |
539 | | (g_pointer_bit_unlock) (volatile void *address, |
540 | | gint lock_bit) |
541 | 4.40k | { |
542 | 4.40k | void *address_nonvolatile = (void *) address; |
543 | | |
544 | 4.40k | g_return_if_fail (lock_bit < 32); |
545 | | |
546 | 4.40k | { |
547 | | #ifdef USE_ASM_GOTO |
548 | | __asm__ volatile ("lock btr %1, (%0)" |
549 | | : /* no output */ |
550 | | : "r" (address), "r" ((gsize) lock_bit) |
551 | | : "cc", "memory"); |
552 | | #else |
553 | 4.40k | gsize *pointer_address = address_nonvolatile; |
554 | 4.40k | gsize mask = 1u << lock_bit; |
555 | | |
556 | 4.40k | g_atomic_pointer_and (pointer_address, ~mask); |
557 | 4.40k | #endif |
558 | | |
559 | 4.40k | { |
560 | 4.40k | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
561 | 4.40k | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
562 | 0 | g_futex_wake (g_futex_int_address (address_nonvolatile)); |
563 | 4.40k | } |
564 | 4.40k | } |
565 | 4.40k | } |