/src/glib/glib/gbitlock.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright © 2008 Ryan Lortie |
3 | | * Copyright © 2010 Codethink Limited |
4 | | * |
5 | | * This library is free software; you can redistribute it and/or |
6 | | * modify it under the terms of the GNU Lesser General Public |
7 | | * License as published by the Free Software Foundation; either |
8 | | * version 2.1 of the License, or (at your option) any later version. |
9 | | * |
10 | | * This library is distributed in the hope that it will be useful, |
11 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | | * Lesser General Public License for more details. |
14 | | * |
15 | | * You should have received a copy of the GNU Lesser General Public |
16 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
17 | | * |
18 | | * Author: Ryan Lortie <desrt@desrt.ca> |
19 | | */ |
20 | | |
21 | | #include "config.h" |
22 | | |
23 | | #include "gbitlock.h" |
24 | | |
25 | | #include <glib/gmessages.h> |
26 | | #include <glib/gatomic.h> |
27 | | #include <glib/gslist.h> |
28 | | #include <glib/gthread.h> |
29 | | #include <glib/gslice.h> |
30 | | |
31 | | #include "gthreadprivate.h" |
32 | | |
33 | | #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION |
34 | | #undef HAVE_FUTEX |
35 | | #endif |
36 | | |
37 | | #ifndef HAVE_FUTEX |
38 | | static GMutex g_futex_mutex; |
39 | | static GSList *g_futex_address_list = NULL; |
40 | | #endif |
41 | | |
42 | | #ifdef HAVE_FUTEX |
43 | | /* |
44 | | * We have headers for futex(2) on the build machine. This does not |
45 | | * imply that every system that ever runs the resulting glib will have |
46 | | * kernel support for futex, but you'd have to have a pretty old |
47 | | * kernel in order for that not to be the case. |
48 | | * |
49 | | * If anyone actually gets bit by this, please file a bug. :) |
50 | | */ |
51 | | #include <linux/futex.h> |
52 | | #include <sys/syscall.h> |
53 | | #include <unistd.h> |
54 | | |
55 | | #ifndef FUTEX_WAIT_PRIVATE |
56 | | #define FUTEX_WAIT_PRIVATE FUTEX_WAIT |
57 | | #define FUTEX_WAKE_PRIVATE FUTEX_WAKE |
58 | | #endif |
59 | | |
60 | | /* < private > |
61 | | * g_futex_wait: |
62 | | * @address: a pointer to an integer |
63 | | * @value: the value that should be at @address |
64 | | * |
65 | | * Atomically checks that the value stored at @address is equal to |
66 | | * @value and then blocks. If the value stored at @address is not |
67 | | * equal to @value then this function returns immediately. |
68 | | * |
69 | | * To unblock, call g_futex_wake() on @address. |
70 | | * |
71 | | * This call may spuriously unblock (for example, in response to the |
72 | | * process receiving a signal) but this is not guaranteed. Unlike the |
73 | | * Linux system call of a similar name, there is no guarantee that a |
74 | | * waiting process will unblock due to a g_futex_wake() call in a |
75 | | * separate process. |
76 | | */ |
77 | | static void |
78 | | g_futex_wait (const volatile gint *address, |
79 | | gint value) |
80 | 0 | { |
81 | 0 | syscall (__NR_futex, address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); |
82 | 0 | } |
83 | | |
84 | | /* < private > |
85 | | * g_futex_wake: |
86 | | * @address: a pointer to an integer |
87 | | * |
88 | | * Nominally, wakes one thread that is blocked in g_futex_wait() on |
89 | | * @address (if any thread is currently waiting). |
90 | | * |
91 | | * As mentioned in the documentation for g_futex_wait(), spurious |
92 | | * wakeups may occur. As such, this call may result in more than one |
93 | | * thread being woken up. |
94 | | */ |
95 | | static void |
96 | | g_futex_wake (const volatile gint *address) |
97 | 0 | { |
98 | 0 | syscall (__NR_futex, address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
99 | 0 | } |
100 | | |
101 | | #else |
102 | | |
103 | | /* emulate futex(2) */ |
104 | | typedef struct |
105 | | { |
106 | | const volatile gint *address; |
107 | | gint ref_count; |
108 | | GCond wait_queue; |
109 | | } WaitAddress; |
110 | | |
111 | | static WaitAddress * |
112 | | g_futex_find_address (const volatile gint *address) |
113 | | { |
114 | | GSList *node; |
115 | | |
116 | | for (node = g_futex_address_list; node; node = node->next) |
117 | | { |
118 | | WaitAddress *waiter = node->data; |
119 | | |
120 | | if (waiter->address == address) |
121 | | return waiter; |
122 | | } |
123 | | |
124 | | return NULL; |
125 | | } |
126 | | |
127 | | static void |
128 | | g_futex_wait (const volatile gint *address, |
129 | | gint value) |
130 | | { |
131 | | g_mutex_lock (&g_futex_mutex); |
132 | | if G_LIKELY (g_atomic_int_get (address) == value) |
133 | | { |
134 | | WaitAddress *waiter; |
135 | | |
136 | | if ((waiter = g_futex_find_address (address)) == NULL) |
137 | | { |
138 | | waiter = g_slice_new (WaitAddress); |
139 | | waiter->address = address; |
140 | | g_cond_init (&waiter->wait_queue); |
141 | | waiter->ref_count = 0; |
142 | | g_futex_address_list = |
143 | | g_slist_prepend (g_futex_address_list, waiter); |
144 | | } |
145 | | |
146 | | waiter->ref_count++; |
147 | | g_cond_wait (&waiter->wait_queue, &g_futex_mutex); |
148 | | |
149 | | if (!--waiter->ref_count) |
150 | | { |
151 | | g_futex_address_list = |
152 | | g_slist_remove (g_futex_address_list, waiter); |
153 | | g_cond_clear (&waiter->wait_queue); |
154 | | g_slice_free (WaitAddress, waiter); |
155 | | } |
156 | | } |
157 | | g_mutex_unlock (&g_futex_mutex); |
158 | | } |
159 | | |
160 | | static void |
161 | | g_futex_wake (const volatile gint *address) |
162 | | { |
163 | | WaitAddress *waiter; |
164 | | |
165 | | /* need to lock here for two reasons: |
166 | | * 1) need to acquire/release lock to ensure waiter is not in |
167 | | * the process of registering a wait |
168 | | * 2) need to -stay- locked until the end to ensure a wake() |
169 | | * in another thread doesn't cause 'waiter' to stop existing |
170 | | */ |
171 | | g_mutex_lock (&g_futex_mutex); |
172 | | if ((waiter = g_futex_find_address (address))) |
173 | | g_cond_signal (&waiter->wait_queue); |
174 | | g_mutex_unlock (&g_futex_mutex); |
175 | | } |
176 | | #endif |
177 | | |
178 | | #define CONTENTION_CLASSES 11 |
179 | | static volatile gint g_bit_lock_contended[CONTENTION_CLASSES]; |
180 | | |
181 | | #if (defined (i386) || defined (__amd64__)) |
182 | | #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) |
183 | | #define USE_ASM_GOTO 1 |
184 | | #endif |
185 | | #endif |
186 | | |
187 | | /** |
188 | | * g_bit_lock: |
189 | | * @address: a pointer to an integer |
190 | | * @lock_bit: a bit value between 0 and 31 |
191 | | * |
192 | | * Sets the indicated @lock_bit in @address. If the bit is already |
193 | | * set, this call will block until g_bit_unlock() unsets the |
194 | | * corresponding bit. |
195 | | * |
196 | | * Attempting to lock on two different bits within the same integer is |
197 | | * not supported and will very probably cause deadlocks. |
198 | | * |
199 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
200 | | * between 0 and 31 then the result is undefined. |
201 | | * |
202 | | * This function accesses @address atomically. All other accesses to |
203 | | * @address must be atomic in order for this function to work |
204 | | * reliably. |
205 | | * |
206 | | * Since: 2.24 |
207 | | **/ |
208 | | void |
209 | | g_bit_lock (volatile gint *address, |
210 | | gint lock_bit) |
211 | 0 | { |
212 | | #ifdef USE_ASM_GOTO |
213 | | retry: |
214 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
215 | | "jc %l[contended]" |
216 | | : /* no output */ |
217 | | : "r" (address), "r" (lock_bit) |
218 | | : "cc", "memory" |
219 | | : contended); |
220 | | return; |
221 | | |
222 | | contended: |
223 | | { |
224 | | guint mask = 1u << lock_bit; |
225 | | guint v; |
226 | | |
227 | | v = (guint) g_atomic_int_get (address); |
228 | | if (v & mask) |
229 | | { |
230 | | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
231 | | |
232 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
233 | | g_futex_wait (address, v); |
234 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
235 | | } |
236 | | } |
237 | | goto retry; |
238 | | #else |
239 | 0 | guint mask = 1u << lock_bit; |
240 | 0 | guint v; |
241 | |
|
242 | 0 | retry: |
243 | 0 | v = g_atomic_int_or (address, mask); |
244 | 0 | if (v & mask) |
245 | | /* already locked */ |
246 | 0 | { |
247 | 0 | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
248 | |
|
249 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
250 | 0 | g_futex_wait (address, v); |
251 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
252 | |
|
253 | 0 | goto retry; |
254 | 0 | } |
255 | 0 | #endif |
256 | 0 | } |
257 | | |
258 | | /** |
259 | | * g_bit_trylock: |
260 | | * @address: a pointer to an integer |
261 | | * @lock_bit: a bit value between 0 and 31 |
262 | | * |
263 | | * Sets the indicated @lock_bit in @address, returning %TRUE if |
264 | | * successful. If the bit is already set, returns %FALSE immediately. |
265 | | * |
266 | | * Attempting to lock on two different bits within the same integer is |
267 | | * not supported. |
268 | | * |
269 | | * The value of the bit that is set is (1u << @bit). If @bit is not |
270 | | * between 0 and 31 then the result is undefined. |
271 | | * |
272 | | * This function accesses @address atomically. All other accesses to |
273 | | * @address must be atomic in order for this function to work |
274 | | * reliably. |
275 | | * |
276 | | * Returns: %TRUE if the lock was acquired |
277 | | * |
278 | | * Since: 2.24 |
279 | | **/ |
280 | | gboolean |
281 | | g_bit_trylock (volatile gint *address, |
282 | | gint lock_bit) |
283 | 0 | { |
284 | | #ifdef USE_ASM_GOTO |
285 | | gboolean result; |
286 | | |
287 | | __asm__ volatile ("lock bts %2, (%1)\n" |
288 | | "setnc %%al\n" |
289 | | "movzx %%al, %0" |
290 | | : "=r" (result) |
291 | | : "r" (address), "r" (lock_bit) |
292 | | : "cc", "memory"); |
293 | | |
294 | | return result; |
295 | | #else |
296 | 0 | guint mask = 1u << lock_bit; |
297 | 0 | guint v; |
298 | |
|
299 | 0 | v = g_atomic_int_or (address, mask); |
300 | |
|
301 | 0 | return ~v & mask; |
302 | 0 | #endif |
303 | 0 | } |
304 | | |
305 | | /** |
306 | | * g_bit_unlock: |
307 | | * @address: a pointer to an integer |
308 | | * @lock_bit: a bit value between 0 and 31 |
309 | | * |
310 | | * Clears the indicated @lock_bit in @address. If another thread is |
311 | | * currently blocked in g_bit_lock() on this same bit then it will be |
312 | | * woken up. |
313 | | * |
314 | | * This function accesses @address atomically. All other accesses to |
315 | | * @address must be atomic in order for this function to work |
316 | | * reliably. |
317 | | * |
318 | | * Since: 2.24 |
319 | | **/ |
320 | | void |
321 | | g_bit_unlock (volatile gint *address, |
322 | | gint lock_bit) |
323 | 0 | { |
324 | | #ifdef USE_ASM_GOTO |
325 | | __asm__ volatile ("lock btr %1, (%0)" |
326 | | : /* no output */ |
327 | | : "r" (address), "r" (lock_bit) |
328 | | : "cc", "memory"); |
329 | | #else |
330 | 0 | guint mask = 1u << lock_bit; |
331 | |
|
332 | 0 | g_atomic_int_and (address, ~mask); |
333 | 0 | #endif |
334 | |
|
335 | 0 | { |
336 | 0 | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
337 | |
|
338 | 0 | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
339 | 0 | g_futex_wake (address); |
340 | 0 | } |
341 | 0 | } |
342 | | |
343 | | |
344 | | /* We emulate pointer-sized futex(2) because the kernel API only |
345 | | * supports integers. |
346 | | * |
347 | | * We assume that the 'interesting' part is always the lower order bits. |
348 | | * This assumption holds because pointer bitlocks are restricted to |
349 | | * using the low order bits of the pointer as the lock. |
350 | | * |
351 | | * On 32 bits, there is nothing to do since the pointer size is equal to |
352 | | * the integer size. On little endian the lower-order bits don't move, |
353 | | * so do nothing. Only on 64bit big endian do we need to do a bit of |
354 | | * pointer arithmetic: the low order bits are shifted by 4 bytes. We |
355 | | * have a helper function that always does the right thing here. |
356 | | * |
357 | | * Since we always consider the low-order bits of the integer value, a |
358 | | * simple cast from (gsize) to (guint) always takes care of that. |
359 | | * |
360 | | * After that, pointer-sized futex becomes as simple as: |
361 | | * |
362 | | * g_futex_wait (g_futex_int_address (address), (guint) value); |
363 | | * |
364 | | * and |
365 | | * |
366 | | * g_futex_wake (g_futex_int_address (int_address)); |
367 | | */ |
368 | | static const volatile gint * |
369 | | g_futex_int_address (const volatile void *address) |
370 | 0 | { |
371 | 0 | const volatile gint *int_address = address; |
372 | | |
373 | | /* this implementation makes these (reasonable) assumptions: */ |
374 | 0 | G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || |
375 | 0 | (G_BYTE_ORDER == G_BIG_ENDIAN && |
376 | 0 | sizeof (int) == 4 && |
377 | 0 | (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); |
378 | |
|
379 | | #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 |
380 | | int_address++; |
381 | | #endif |
382 | |
|
383 | 0 | return int_address; |
384 | 0 | } |
385 | | |
386 | | /** |
387 | | * g_pointer_bit_lock: |
388 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
389 | | * @lock_bit: a bit value between 0 and 31 |
390 | | * |
391 | | * This is equivalent to g_bit_lock, but working on pointers (or other |
392 | | * pointer-sized values). |
393 | | * |
394 | | * For portability reasons, you may only lock on the bottom 32 bits of |
395 | | * the pointer. |
396 | | * |
397 | | * Since: 2.30 |
398 | | **/ |
399 | | void |
400 | | (g_pointer_bit_lock) (volatile void *address, |
401 | | gint lock_bit) |
402 | 140M | { |
403 | 140M | g_return_if_fail (lock_bit < 32); |
404 | | |
405 | 140M | { |
406 | | #ifdef USE_ASM_GOTO |
407 | | retry: |
408 | | __asm__ volatile goto ("lock bts %1, (%0)\n" |
409 | | "jc %l[contended]" |
410 | | : /* no output */ |
411 | | : "r" (address), "r" ((gsize) lock_bit) |
412 | | : "cc", "memory" |
413 | | : contended); |
414 | | return; |
415 | | |
416 | | contended: |
417 | | { |
418 | | volatile gsize *pointer_address = address; |
419 | | gsize mask = 1u << lock_bit; |
420 | | gsize v; |
421 | | |
422 | | v = (gsize) g_atomic_pointer_get (pointer_address); |
423 | | if (v & mask) |
424 | | { |
425 | | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
426 | | |
427 | | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
428 | | g_futex_wait (g_futex_int_address (address), v); |
429 | | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
430 | | } |
431 | | } |
432 | | goto retry; |
433 | | #else |
434 | 140M | volatile gsize *pointer_address = address; |
435 | 140M | gsize mask = 1u << lock_bit; |
436 | 140M | gsize v; |
437 | | |
438 | 140M | retry: |
439 | 140M | v = g_atomic_pointer_or (pointer_address, mask); |
440 | 140M | if (v & mask) |
441 | | /* already locked */ |
442 | 0 | { |
443 | 0 | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
444 | |
|
445 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], +1); |
446 | 0 | g_futex_wait (g_futex_int_address (address), (guint) v); |
447 | 0 | g_atomic_int_add (&g_bit_lock_contended[class], -1); |
448 | |
|
449 | 0 | goto retry; |
450 | 0 | } |
451 | 140M | #endif |
452 | 140M | } |
453 | 140M | } |
454 | | |
455 | | /** |
456 | | * g_pointer_bit_trylock: |
457 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
458 | | * @lock_bit: a bit value between 0 and 31 |
459 | | * |
460 | | * This is equivalent to g_bit_trylock, but working on pointers (or |
461 | | * other pointer-sized values). |
462 | | * |
463 | | * For portability reasons, you may only lock on the bottom 32 bits of |
464 | | * the pointer. |
465 | | * |
466 | | * Returns: %TRUE if the lock was acquired |
467 | | * |
468 | | * Since: 2.30 |
469 | | **/ |
470 | | gboolean |
471 | | (g_pointer_bit_trylock) (volatile void *address, |
472 | | gint lock_bit) |
473 | 0 | { |
474 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
475 | | |
476 | 0 | { |
477 | | #ifdef USE_ASM_GOTO |
478 | | gboolean result; |
479 | | |
480 | | __asm__ volatile ("lock bts %2, (%1)\n" |
481 | | "setnc %%al\n" |
482 | | "movzx %%al, %0" |
483 | | : "=r" (result) |
484 | | : "r" (address), "r" ((gsize) lock_bit) |
485 | | : "cc", "memory"); |
486 | | |
487 | | return result; |
488 | | #else |
489 | 0 | volatile gsize *pointer_address = address; |
490 | 0 | gsize mask = 1u << lock_bit; |
491 | 0 | gsize v; |
492 | |
|
493 | 0 | g_return_val_if_fail (lock_bit < 32, FALSE); |
494 | | |
495 | 0 | v = g_atomic_pointer_or (pointer_address, mask); |
496 | |
|
497 | 0 | return ~v & mask; |
498 | 0 | #endif |
499 | 0 | } |
500 | 0 | } |
501 | | |
502 | | /** |
503 | | * g_pointer_bit_unlock: |
504 | | * @address: (not nullable): a pointer to a #gpointer-sized value |
505 | | * @lock_bit: a bit value between 0 and 31 |
506 | | * |
507 | | * This is equivalent to g_bit_unlock, but working on pointers (or other |
508 | | * pointer-sized values). |
509 | | * |
510 | | * For portability reasons, you may only lock on the bottom 32 bits of |
511 | | * the pointer. |
512 | | * |
513 | | * Since: 2.30 |
514 | | **/ |
515 | | void |
516 | | (g_pointer_bit_unlock) (volatile void *address, |
517 | | gint lock_bit) |
518 | 140M | { |
519 | 140M | g_return_if_fail (lock_bit < 32); |
520 | | |
521 | 140M | { |
522 | | #ifdef USE_ASM_GOTO |
523 | | __asm__ volatile ("lock btr %1, (%0)" |
524 | | : /* no output */ |
525 | | : "r" (address), "r" ((gsize) lock_bit) |
526 | | : "cc", "memory"); |
527 | | #else |
528 | 140M | volatile gsize *pointer_address = address; |
529 | 140M | gsize mask = 1u << lock_bit; |
530 | | |
531 | 140M | g_atomic_pointer_and (pointer_address, ~mask); |
532 | 140M | #endif |
533 | | |
534 | 140M | { |
535 | 140M | guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); |
536 | 140M | if (g_atomic_int_get (&g_bit_lock_contended[class])) |
537 | 0 | g_futex_wake (g_futex_int_address (address)); |
538 | 140M | } |
539 | 140M | } |
540 | 140M | } |