/src/rauc/subprojects/glib-2.76.5/glib/gbitlock.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright © 2008 Ryan Lortie |
3 | | * Copyright © 2010 Codethink Limited |
4 | | * |
5 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | | * |
20 | | * Author: Ryan Lortie <desrt@desrt.ca> |
21 | | */ |
22 | | |
23 | | #include "config.h" |
24 | | |
25 | | #include "gbitlock.h" |
26 | | |
27 | | #include <glib/gmacros.h> |
28 | | #include <glib/gmessages.h> |
29 | | #include <glib/gatomic.h> |
30 | | #include <glib/gslist.h> |
31 | | #include <glib/gthread.h> |
32 | | #include <glib/gslice.h> |
33 | | |
34 | | #include "gthreadprivate.h" |
35 | | |
36 | | #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION |
37 | | #undef HAVE_FUTEX |
38 | | #undef HAVE_FUTEX_TIME64 |
39 | | #endif |
40 | | |
41 | | #ifndef HAVE_FUTEX |
42 | | static GMutex g_futex_mutex; |
43 | | static GSList *g_futex_address_list = NULL; |
44 | | #endif |
45 | | |
46 | | #if defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64) |
47 | | /* |
48 | | * We have headers for futex(2) on the build machine. This does not |
49 | | * imply that every system that ever runs the resulting glib will have |
50 | | * kernel support for futex, but you'd have to have a pretty old |
51 | | * kernel in order for that not to be the case. |
52 | | * |
53 | | * If anyone actually gets bit by this, please file a bug. :) |
54 | | */ |
55 | | |
56 | | /* < private > |
57 | | * g_futex_wait: |
58 | | * @address: a pointer to an integer |
59 | | * @value: the value that should be at @address |
60 | | * |
61 | | * Atomically checks that the value stored at @address is equal to |
62 | | * @value and then blocks. If the value stored at @address is not |
63 | | * equal to @value then this function returns immediately. |
64 | | * |
65 | | * To unblock, call g_futex_wake() on @address. |
66 | | * |
67 | | * This call may spuriously unblock (for example, in response to the |
68 | | * process receiving a signal) but this is not guaranteed. Unlike the |
69 | | * Linux system call of a similar name, there is no guarantee that a |
70 | | * waiting process will unblock due to a g_futex_wake() call in a |
71 | | * separate process. |
72 | | */ |
73 | | static void |
74 | | g_futex_wait (const gint *address, |
75 | | gint value) |
76 | 0 | { |
77 | 0 | g_futex_simple (address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); |
78 | 0 | } |
79 | | |
80 | | /* < private > |
81 | | * g_futex_wake: |
82 | | * @address: a pointer to an integer |
83 | | * |
84 | | * Nominally, wakes one thread that is blocked in g_futex_wait() on |
85 | | * @address (if any thread is currently waiting). |
86 | | * |
87 | | * As mentioned in the documentation for g_futex_wait(), spurious |
88 | | * wakeups may occur. As such, this call may result in more than one |
89 | | * thread being woken up. |
90 | | */ |
91 | | static void |
92 | | g_futex_wake (const gint *address) |
93 | 0 | { |
94 | 0 | g_futex_simple (address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
95 | 0 | } |
96 | | |
97 | | #else |
98 | | |
99 | | /* emulate futex(2) */ |
100 | | typedef struct |
101 | | { |
102 | | const gint *address; |
103 | | gint ref_count; |
104 | | GCond wait_queue; |
105 | | } WaitAddress; |
106 | | |
107 | | static WaitAddress * |
108 | | g_futex_find_address (const gint *address) |
109 | | { |
110 | | GSList *node; |
111 | | |
112 | | for (node = g_futex_address_list; node; node = node->next) |
113 | | { |
114 | | WaitAddress *waiter = node->data; |
115 | | |
116 | | if (waiter->address == address) |
117 | | return waiter; |
118 | | } |
119 | | |
120 | | return NULL; |
121 | | } |
122 | | |
123 | | static void |
124 | | g_futex_wait (const gint *address, |
125 | | gint value) |
126 | | { |
127 | | g_mutex_lock (&g_futex_mutex); |
128 | | if G_LIKELY (g_atomic_int_get (address) == value) |
129 | | { |
130 | | WaitAddress *waiter; |
131 | | |
132 | | if ((waiter = g_futex_find_address (address)) == NULL) |
133 | | { |
134 | | waiter = g_slice_new (WaitAddress); |
135 | | waiter->address = address; |
136 | | g_cond_init (&waiter->wait_queue); |
137 | | waiter->ref_count = 0; |
138 | | g_futex_address_list = |
139 | | g_slist_prepend (g_futex_address_list, waiter); |
140 | | } |
141 | | |
142 | | waiter->ref_count++; |
143 | | g_cond_wait (&waiter->wait_queue, &g_futex_mutex); |
144 | | |
145 | | if (!--waiter->ref_count) |
146 | | { |
147 | | g_futex_address_list = |
148 | | g_slist_remove (g_futex_address_list, waiter); |
149 | | g_cond_clear (&waiter->wait_queue); |
150 | | g_slice_free (WaitAddress, waiter); |
151 | | } |
152 | | } |
153 | | g_mutex_unlock (&g_futex_mutex); |
154 | | } |
155 | | |
156 | | static void |
157 | | g_futex_wake (const gint *address) |
158 | | { |
159 | | WaitAddress *waiter; |
160 | | |
161 | | /* need to lock here for two reasons: |
162 | | * 1) need to acquire/release lock to ensure waiter is not in |
163 | | * the process of registering a wait |
164 | | * 2) need to -stay- locked until the end to ensure a wake() |
165 | | * in another thread doesn't cause 'waiter' to stop existing |
166 | | */ |
167 | | g_mutex_lock (&g_futex_mutex); |
168 | | if ((waiter = g_futex_find_address (address))) |
169 | | g_cond_signal (&waiter->wait_queue); |
170 | | g_mutex_unlock (&g_futex_mutex); |
171 | | } |
172 | | #endif |
173 | | |
174 | | #define CONTENTION_CLASSES 11 |
175 | | static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */ |
176 | | |
177 | | #if (defined (i386) || defined (__amd64__)) |
178 | | #if G_GNUC_CHECK_VERSION(4, 5) |
179 | | #define USE_ASM_GOTO 1 |
180 | | #endif |
181 | | #endif |
182 | | |
183 | | /** |
184 | | * g_bit_lock: |
185 | | * @address: a pointer to an integer |
186 | | * @lock_bit: a bit value between 0 and 31 |
187 | | * |
188 | | * Sets the indicated @lock_bit in @address. If the bit is already |
189 | | * set, this call will block until g_bit_unlock() unsets the |
190 | | * corresponding bit. |
191 | | * |
192 | | * Attempting to lock on two different bits within the same integer is |
193 | | * not supported and will very probably cause deadlocks. |
194 | | * |
195 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
196 | | * between 0 and 31 then the result is undefined. |
197 | | * |
198 | | * This function accesses @address atomically. All other accesses to |
199 | | * @address must be atomic in order for this function to work |
200 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
201 | | * artifact and the argument passed to it should not be `volatile`. |
202 | | * |
203 | | * Since: 2.24 |
204 | | **/ |
205 | | void |
206 | | g_bit_lock (volatile gint *address, |
207 | | gint lock_bit) |
208 | 0 | { |
209 | 0 | gint *address_nonvolatile = (gint *) address; |
210 | |
|
211 | | #ifdef USE_ASM_GOTO |
212 | | retry: |
213 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
214 | | "jc %l[contended]" |
215 | | : /* no output */ |
216 | | : "r" (address), "r" (lock_bit) |
217 | | : "cc", "memory" |
218 | | : contended); |
219 | | return; |
220 | | |
221 | | contended: |
222 | | { |
223 | | guint mask = 1u << lock_bit; |
224 | | guint v; |
225 | | |
226 | | v = (guint) g_atomic_int_get (address_nonvolatile); |
227 | | if (v & mask) |
228 | | { |
229 | | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
230 | | |
231 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
232 | | g_futex_wait (address_nonvolatile, v); |
233 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
234 | | } |
235 | | } |
236 | | goto retry; |
237 | | #else |
238 | 0 | guint mask = 1u << lock_bit; |
239 | 0 | guint v; |
240 | |
|
241 | 0 | retry: |
242 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
243 | 0 | if (v & mask) |
244 | | /* already locked */ |
245 | 0 | { |
246 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
247 | |
|
248 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
249 | 0 | g_futex_wait (address_nonvolatile, v); |
250 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
251 | |
|
252 | 0 | goto retry; |
253 | 0 | } |
254 | 0 | #endif |
255 | 0 | } |
256 | | |
257 | | /** |
258 | | * g_bit_trylock: |
259 | | * @address: a pointer to an integer |
260 | | * @lock_bit: a bit value between 0 and 31 |
261 | | * |
262 | | * Sets the indicated @lock_bit in @address, returning %TRUE if |
263 | | * successful. If the bit is already set, returns %FALSE immediately. |
264 | | * |
265 | | * Attempting to lock on two different bits within the same integer is |
266 | | * not supported. |
267 | | * |
268 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
269 | | * between 0 and 31 then the result is undefined. |
270 | | * |
271 | | * This function accesses @address atomically. All other accesses to |
272 | | * @address must be atomic in order for this function to work |
273 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
274 | | * artifact and the argument passed to it should not be `volatile`. |
275 | | * |
276 | | * Returns: %TRUE if the lock was acquired |
277 | | * |
278 | | * Since: 2.24 |
279 | | **/ |
280 | | gboolean |
281 | | g_bit_trylock (volatile gint *address, |
282 | | gint lock_bit) |
283 | 0 | { |
284 | | #ifdef USE_ASM_GOTO |
285 | | gboolean result; |
286 | | |
287 | | __asm__ volatile ("lock bts %2, (%1)\n" |
288 | | "setnc %%al\n" |
289 | | "movzx %%al, %0" |
290 | | : "=r" (result) |
291 | | : "r" (address), "r" (lock_bit) |
292 | | : "cc", "memory"); |
293 | | |
294 | | return result; |
295 | | #else |
296 | 0 | gint *address_nonvolatile = (gint *) address; |
297 | 0 | guint mask = 1u << lock_bit; |
298 | 0 | guint v; |
299 | |
|
300 | 0 | v = g_atomic_int_or (address_nonvolatile, mask); |
301 | |
|
302 | 0 | return ~v & mask; |
303 | 0 | #endif |
304 | 0 | } |
305 | | |
306 | | /** |
307 | | * g_bit_unlock: |
308 | | * @address: a pointer to an integer |
309 | | * @lock_bit: a bit value between 0 and 31 |
310 | | * |
311 | | * Clears the indicated @lock_bit in @address. If another thread is |
312 | | * currently blocked in g_bit_lock() on this same bit then it will be |
313 | | * woken up. |
314 | | * |
315 | | * This function accesses @address atomically. All other accesses to |
316 | | * @address must be atomic in order for this function to work |
317 | | * reliably. While @address has a `volatile` qualifier, this is a historical |
318 | | * artifact and the argument passed to it should not be `volatile`. |
319 | | * |
320 | | * Since: 2.24 |
321 | | **/ |
322 | | void |
323 | | g_bit_unlock (volatile gint *address, |
324 | | gint lock_bit) |
325 | 0 | { |
326 | 0 | gint *address_nonvolatile = (gint *) address; |
327 | |
|
328 | | #ifdef USE_ASM_GOTO |
329 | | __asm__ volatile ("lock btr %1, (%0)" |
330 | | : /* no output */ |
331 | | : "r" (address), "r" (lock_bit) |
332 | | : "cc", "memory"); |
333 | | #else |
334 | 0 | guint mask = 1u << lock_bit; |
335 | |
|
336 | 0 | g_atomic_int_and (address_nonvolatile, ~mask); |
337 | 0 | #endif |
338 | |
|
339 | 0 | { |
340 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
341 | |
|
342 | 0 | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
343 | 0 | g_futex_wake (address_nonvolatile); |
344 | 0 | } |
345 | 0 | } |
346 | | |
347 | | |
348 | | /* We emulate pointer-sized futex(2) because the kernel API only |
349 | | * supports integers. |
350 | | * |
351 | | * We assume that the 'interesting' part is always the lower order bits. |
352 | | * This assumption holds because pointer bitlocks are restricted to |
353 | | * using the low order bits of the pointer as the lock. |
354 | | * |
355 | | * On 32 bits, there is nothing to do since the pointer size is equal to |
356 | | * the integer size. On little endian the lower-order bits don't move, |
357 | | * so do nothing. Only on 64bit big endian do we need to do a bit of |
358 | | * pointer arithmetic: the low order bits are shifted by 4 bytes. We |
359 | | * have a helper function that always does the right thing here. |
360 | | * |
361 | | * Since we always consider the low-order bits of the integer value, a |
362 | | * simple cast from (gsize) to (guint) always takes care of that. |
363 | | * |
364 | | * After that, pointer-sized futex becomes as simple as: |
365 | | * |
366 | | * g_futex_wait (g_futex_int_address (address), (guint) value); |
367 | | * |
368 | | * and |
369 | | * |
370 | | * g_futex_wake (g_futex_int_address (int_address)); |
371 | | */ |
372 | | static const gint * |
373 | | g_futex_int_address (const void *address) |
374 | 0 | { |
375 | 0 | const gint *int_address = address; |
376 | | |
377 | | /* this implementation makes these (reasonable) assumptions: */ |
378 | 0 | G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || |
379 | 0 | (G_BYTE_ORDER == G_BIG_ENDIAN && |
380 | 0 | sizeof (int) == 4 && |
381 | 0 | (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); |
382 | |
|
383 | | #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 |
384 | | int_address++; |
385 | | #endif |
386 | |
|
387 | 0 | return int_address; |
388 | 0 | } |
389 | | |
390 | | /** |
391 | | * g_pointer_bit_lock: |
392 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
393 | | * @lock_bit: a bit value between 0 and 31 |
394 | | * |
395 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
396 | | * pointer-sized values). |
397 | | * |
398 | | * For portability reasons, you may only lock on the bottom 32 bits of |
399 | | * the pointer. |
400 | | * |
401 | | * While @address has a `volatile` qualifier, this is a historical |
402 | | * artifact and the argument passed to it should not be `volatile`. |
403 | | * |
404 | | * Since: 2.30 |
405 | | **/ |
406 | | void |
407 | | (g_pointer_bit_lock) (volatile void *address, |
408 | | gint lock_bit) |
409 | 151k | { |
410 | 151k | void *address_nonvolatile = (void *) address; |
411 | | |
412 | 151k | g_return_if_fail (lock_bit < 32); |
413 | | |
414 | 151k | { |
415 | | #ifdef USE_ASM_GOTO |
416 | | retry: |
417 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
418 | | "jc %l[contended]" |
419 | | : /* no output */ |
420 | | : "r" (address), "r" ((gsize) lock_bit) |
421 | | : "cc", "memory" |
422 | | : contended); |
423 | | return; |
424 | | |
425 | | contended: |
426 | | { |
427 | | gsize *pointer_address = address_nonvolatile; |
428 | | gsize mask = 1u << lock_bit; |
429 | | gsize v; |
430 | | |
431 | | v = (gsize) g_atomic_pointer_get (pointer_address); |
432 | | if (v & mask) |
433 | | { |
434 | | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
435 | | |
436 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
437 | | g_futex_wait (g_futex_int_address (address_nonvolatile), v); |
438 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
439 | | } |
440 | | } |
441 | | goto retry; |
442 | | #else |
443 | 151k | gsize *pointer_address = address_nonvolatile; |
444 | 151k | gsize mask = 1u << lock_bit; |
445 | 151k | gsize v; |
446 | | |
447 | 151k | retry: |
448 | 151k | v = g_atomic_pointer_or (pointer_address, mask); |
449 | 151k | if (v & mask) |
450 | | /* already locked */ |
451 | 0 | { |
452 | 0 | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
453 | |
|
454 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
455 | 0 | g_futex_wait (g_futex_int_address (address_nonvolatile), (guint) v); |
456 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
457 | |
|
458 | 0 | goto retry; |
459 | 0 | } |
460 | 151k | #endif |
461 | 151k | } |
462 | 151k | } |
463 | | |
464 | | /** |
465 | | * g_pointer_bit_trylock: |
466 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
467 | | * @lock_bit: a bit value between 0 and 31 |
468 | | * |
469 | | * This is equivalent to g_bit_trylock(), but working on pointers (or |
470 | | * other pointer-sized values). |
471 | | * |
472 | | * For portability reasons, you may only lock on the bottom 32 bits of |
473 | | * the pointer. |
474 | | * |
475 | | * While @address has a `volatile` qualifier, this is a historical |
476 | | * artifact and the argument passed to it should not be `volatile`. |
477 | | * |
478 | | * Returns: %TRUE if the lock was acquired |
479 | | * |
480 | | * Since: 2.30 |
481 | | **/ |
482 | | gboolean |
483 | | (g_pointer_bit_trylock) (volatile void *address, |
484 | | gint lock_bit) |
485 | 0 | { |
486 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
487 | | |
488 | 0 | { |
489 | | #ifdef USE_ASM_GOTO |
490 | | gboolean result; |
491 | | |
492 | | __asm__ volatile ("lock bts %2, (%1)\n" |
493 | | "setnc %%al\n" |
494 | | "movzx %%al, %0" |
495 | | : "=r" (result) |
496 | | : "r" (address), "r" ((gsize) lock_bit) |
497 | | : "cc", "memory"); |
498 | | |
499 | | return result; |
500 | | #else |
501 | 0 | void *address_nonvolatile = (void *) address; |
502 | 0 | gsize *pointer_address = address_nonvolatile; |
503 | 0 | gsize mask = 1u << lock_bit; |
504 | 0 | gsize v; |
505 | |
|
506 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
507 | | |
508 | 0 | v = g_atomic_pointer_or (pointer_address, mask); |
509 | |
|
510 | 0 | return ~v & mask; |
511 | 0 | #endif |
512 | 0 | } |
513 | 0 | } |
514 | | |
515 | | /** |
516 | | * g_pointer_bit_unlock: |
517 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
518 | | * @lock_bit: a bit value between 0 and 31 |
519 | | * |
520 | | * This is equivalent to g_bit_unlock, but working on pointers (or other |
521 | | * pointer-sized values). |
522 | | * |
523 | | * For portability reasons, you may only lock on the bottom 32 bits of |
524 | | * the pointer. |
525 | | * |
526 | | * While @address has a `volatile` qualifier, this is a historical |
527 | | * artifact and the argument passed to it should not be `volatile`. |
528 | | * |
529 | | * Since: 2.30 |
530 | | **/ |
531 | | void |
532 | | (g_pointer_bit_unlock) (volatile void *address, |
533 | | gint lock_bit) |
534 | 151k | { |
535 | 151k | void *address_nonvolatile = (void *) address; |
536 | | |
537 | 151k | g_return_if_fail (lock_bit < 32); |
538 | | |
539 | 151k | { |
540 | | #ifdef USE_ASM_GOTO |
541 | | __asm__ volatile ("lock btr %1, (%0)" |
542 | | : /* no output */ |
543 | | : "r" (address), "r" ((gsize) lock_bit) |
544 | | : "cc", "memory"); |
545 | | #else |
546 | 151k | gsize *pointer_address = address_nonvolatile; |
547 | 151k | gsize mask = 1u << lock_bit; |
548 | | |
549 | 151k | g_atomic_pointer_and (pointer_address, ~mask); |
550 | 151k | #endif |
551 | | |
552 | 151k | { |
553 | 151k | guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended); |
554 | 151k | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
555 | 0 | g_futex_wake (g_futex_int_address (address_nonvolatile)); |
556 | 151k | } |
557 | 151k | } |
558 | 151k | } |