/src/tinysparql/subprojects/glib-2.80.3/glib/gthread-posix.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* GLIB - Library of useful routines for C programming |
2 | | * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald |
3 | | * |
4 | | * gthread.c: posix thread system implementation |
5 | | * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe |
6 | | * |
7 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
8 | | * |
9 | | * This library is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public |
11 | | * License as published by the Free Software Foundation; either |
12 | | * version 2.1 of the License, or (at your option) any later version. |
13 | | * |
14 | | * This library is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public |
20 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
21 | | */ |
22 | | |
23 | | /* |
24 | | * Modified by the GLib Team and others 1997-2000. See the AUTHORS |
25 | | * file for a list of people on the GLib Team. See the ChangeLog |
26 | | * files for a list of changes. These files are distributed with |
27 | | * GLib at ftp://ftp.gtk.org/pub/gtk/. |
28 | | */ |
29 | | |
30 | | /* The GMutex, GCond and GPrivate implementations in this file are some |
31 | | * of the lowest-level code in GLib. All other parts of GLib (messages, |
32 | | * memory, slices, etc) assume that they can freely use these facilities |
33 | | * without risking recursion. |
34 | | * |
35 | | * As such, these functions are NOT permitted to call any other part of |
36 | | * GLib. |
37 | | * |
38 | | * The thread manipulation functions (create, exit, join, etc.) have |
39 | | * more freedom -- they can do as they please. |
40 | | */ |
41 | | |
42 | | #include "config.h" |
43 | | |
44 | | #include "gthread.h" |
45 | | |
46 | | #include "gmain.h" |
47 | | #include "gmessages.h" |
48 | | #include "gslice.h" |
49 | | #include "gstrfuncs.h" |
50 | | #include "gtestutils.h" |
51 | | #include "gthreadprivate.h" |
52 | | #include "gutils.h" |
53 | | |
54 | | #include <stdlib.h> |
55 | | #include <stdio.h> |
56 | | #include <string.h> |
57 | | #include <errno.h> |
58 | | #include <pthread.h> |
59 | | |
60 | | #include <sys/time.h> |
61 | | #include <unistd.h> |
62 | | |
63 | | #ifdef HAVE_PTHREAD_SET_NAME_NP |
64 | | #include <pthread_np.h> |
65 | | #endif |
66 | | #ifdef HAVE_SCHED_H |
67 | | #include <sched.h> |
68 | | #endif |
69 | | #ifdef G_OS_WIN32 |
70 | | #include <windows.h> |
71 | | #endif |
72 | | |
73 | | #if defined(HAVE_SYS_SCHED_GETATTR) |
74 | | #include <sys/syscall.h> |
75 | | #endif |
76 | | |
77 | | #if (defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)) && \ |
78 | | (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST)) |
79 | | #define USE_NATIVE_MUTEX |
80 | | #endif |
81 | | |
82 | | static void |
83 | | g_thread_abort (gint status, |
84 | | const gchar *function) |
85 | 0 | { |
86 | 0 | fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n", |
87 | 0 | function, strerror (status)); |
88 | 0 | g_abort (); |
89 | 0 | } |
90 | | |
91 | | /* {{{1 GMutex */ |
92 | | |
93 | | #if !defined(USE_NATIVE_MUTEX) |
94 | | |
95 | | static pthread_mutex_t * |
96 | | g_mutex_impl_new (void) |
97 | | { |
98 | | pthread_mutexattr_t *pattr = NULL; |
99 | | pthread_mutex_t *mutex; |
100 | | gint status; |
101 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
102 | | pthread_mutexattr_t attr; |
103 | | #endif |
104 | | |
105 | | mutex = malloc (sizeof (pthread_mutex_t)); |
106 | | if G_UNLIKELY (mutex == NULL) |
107 | | g_thread_abort (errno, "malloc"); |
108 | | |
109 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
110 | | pthread_mutexattr_init (&attr); |
111 | | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP); |
112 | | pattr = &attr; |
113 | | #endif |
114 | | |
115 | | if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0) |
116 | | g_thread_abort (status, "pthread_mutex_init"); |
117 | | |
118 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
119 | | pthread_mutexattr_destroy (&attr); |
120 | | #endif |
121 | | |
122 | | return mutex; |
123 | | } |
124 | | |
125 | | static void |
126 | | g_mutex_impl_free (pthread_mutex_t *mutex) |
127 | | { |
128 | | pthread_mutex_destroy (mutex); |
129 | | free (mutex); |
130 | | } |
131 | | |
132 | | static inline pthread_mutex_t * |
133 | | g_mutex_get_impl (GMutex *mutex) |
134 | | { |
135 | | pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p); |
136 | | |
137 | | if G_UNLIKELY (impl == NULL) |
138 | | { |
139 | | impl = g_mutex_impl_new (); |
140 | | if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl)) |
141 | | g_mutex_impl_free (impl); |
142 | | impl = mutex->p; |
143 | | } |
144 | | |
145 | | return impl; |
146 | | } |
147 | | |
148 | | |
149 | | /** |
150 | | * g_mutex_init: |
151 | | * @mutex: an uninitialized #GMutex |
152 | | * |
153 | | * Initializes a #GMutex so that it can be used. |
154 | | * |
155 | | * This function is useful to initialize a mutex that has been |
156 | | * allocated on the stack, or as part of a larger structure. |
157 | | * It is not necessary to initialize a mutex that has been |
158 | | * statically allocated. |
159 | | * |
160 | | * |[<!-- language="C" --> |
161 | | * typedef struct { |
162 | | * GMutex m; |
163 | | * ... |
164 | | * } Blob; |
165 | | * |
166 | | * Blob *b; |
167 | | * |
168 | | * b = g_new (Blob, 1); |
169 | | * g_mutex_init (&b->m); |
170 | | * ]| |
171 | | * |
172 | | * To undo the effect of g_mutex_init() when a mutex is no longer |
173 | | * needed, use g_mutex_clear(). |
174 | | * |
175 | | * Calling g_mutex_init() on an already initialized #GMutex leads |
176 | | * to undefined behaviour. |
177 | | * |
178 | | * Since: 2.32 |
179 | | */ |
180 | | void |
181 | | g_mutex_init (GMutex *mutex) |
182 | | { |
183 | | mutex->p = g_mutex_impl_new (); |
184 | | } |
185 | | |
186 | | /** |
187 | | * g_mutex_clear: |
188 | | * @mutex: an initialized #GMutex |
189 | | * |
190 | | * Frees the resources allocated to a mutex with g_mutex_init(). |
191 | | * |
192 | | * This function should not be used with a #GMutex that has been |
193 | | * statically allocated. |
194 | | * |
195 | | * Calling g_mutex_clear() on a locked mutex leads to undefined |
196 | | * behaviour. |
197 | | * |
198 | | * Since: 2.32 |
199 | | */ |
200 | | void |
201 | | g_mutex_clear (GMutex *mutex) |
202 | | { |
203 | | g_mutex_impl_free (mutex->p); |
204 | | } |
205 | | |
206 | | /** |
207 | | * g_mutex_lock: |
208 | | * @mutex: a #GMutex |
209 | | * |
210 | | * Locks @mutex. If @mutex is already locked by another thread, the |
211 | | * current thread will block until @mutex is unlocked by the other |
212 | | * thread. |
213 | | * |
214 | | * #GMutex is neither guaranteed to be recursive nor to be |
215 | | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
216 | | * already been locked by the same thread results in undefined behaviour |
217 | | * (including but not limited to deadlocks). |
218 | | */ |
219 | | void |
220 | | g_mutex_lock (GMutex *mutex) |
221 | | { |
222 | | gint status; |
223 | | |
224 | | if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0) |
225 | | g_thread_abort (status, "pthread_mutex_lock"); |
226 | | } |
227 | | |
228 | | /** |
229 | | * g_mutex_unlock: |
230 | | * @mutex: a #GMutex |
231 | | * |
232 | | * Unlocks @mutex. If another thread is blocked in a g_mutex_lock() |
233 | | * call for @mutex, it will become unblocked and can lock @mutex itself. |
234 | | * |
235 | | * Calling g_mutex_unlock() on a mutex that is not locked by the |
236 | | * current thread leads to undefined behaviour. |
237 | | */ |
238 | | void |
239 | | g_mutex_unlock (GMutex *mutex) |
240 | | { |
241 | | gint status; |
242 | | |
243 | | if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0) |
244 | | g_thread_abort (status, "pthread_mutex_unlock"); |
245 | | } |
246 | | |
247 | | /** |
248 | | * g_mutex_trylock: |
249 | | * @mutex: a #GMutex |
250 | | * |
251 | | * Tries to lock @mutex. If @mutex is already locked by another thread, |
252 | | * it immediately returns %FALSE. Otherwise it locks @mutex and returns |
253 | | * %TRUE. |
254 | | * |
255 | | * #GMutex is neither guaranteed to be recursive nor to be |
256 | | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
257 | | * already been locked by the same thread results in undefined behaviour |
258 | | * (including but not limited to deadlocks or arbitrary return values). |
259 | | * |
260 | | * Returns: %TRUE if @mutex could be locked |
261 | | */ |
262 | | gboolean |
263 | | g_mutex_trylock (GMutex *mutex) |
264 | | { |
265 | | gint status; |
266 | | |
267 | | if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0) |
268 | | return TRUE; |
269 | | |
270 | | if G_UNLIKELY (status != EBUSY) |
271 | | g_thread_abort (status, "pthread_mutex_trylock"); |
272 | | |
273 | | return FALSE; |
274 | | } |
275 | | |
276 | | #endif /* !defined(USE_NATIVE_MUTEX) */ |
277 | | |
278 | | /* {{{1 GRecMutex */ |
279 | | |
280 | | static pthread_mutex_t * |
281 | | g_rec_mutex_impl_new (void) |
282 | 16 | { |
283 | 16 | pthread_mutexattr_t attr; |
284 | 16 | pthread_mutex_t *mutex; |
285 | | |
286 | 16 | mutex = malloc (sizeof (pthread_mutex_t)); |
287 | 16 | if G_UNLIKELY (mutex == NULL) |
288 | 0 | g_thread_abort (errno, "malloc"); |
289 | | |
290 | 16 | pthread_mutexattr_init (&attr); |
291 | 16 | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); |
292 | 16 | pthread_mutex_init (mutex, &attr); |
293 | 16 | pthread_mutexattr_destroy (&attr); |
294 | | |
295 | 16 | return mutex; |
296 | 16 | } |
297 | | |
298 | | static void |
299 | | g_rec_mutex_impl_free (pthread_mutex_t *mutex) |
300 | 0 | { |
301 | 0 | pthread_mutex_destroy (mutex); |
302 | 0 | free (mutex); |
303 | 0 | } |
304 | | |
305 | | static inline pthread_mutex_t * |
306 | | g_rec_mutex_get_impl (GRecMutex *rec_mutex) |
307 | 766k | { |
308 | 766k | pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p); |
309 | | |
310 | 766k | if G_UNLIKELY (impl == NULL) |
311 | 16 | { |
312 | 16 | impl = g_rec_mutex_impl_new (); |
313 | 16 | if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl)) |
314 | 0 | g_rec_mutex_impl_free (impl); |
315 | 16 | impl = rec_mutex->p; |
316 | 16 | } |
317 | | |
318 | 766k | return impl; |
319 | 766k | } |
320 | | |
321 | | /** |
322 | | * g_rec_mutex_init: |
323 | | * @rec_mutex: an uninitialized #GRecMutex |
324 | | * |
325 | | * Initializes a #GRecMutex so that it can be used. |
326 | | * |
327 | | * This function is useful to initialize a recursive mutex |
328 | | * that has been allocated on the stack, or as part of a larger |
329 | | * structure. |
330 | | * |
331 | | * It is not necessary to initialise a recursive mutex that has been |
332 | | * statically allocated. |
333 | | * |
334 | | * |[<!-- language="C" --> |
335 | | * typedef struct { |
336 | | * GRecMutex m; |
337 | | * ... |
338 | | * } Blob; |
339 | | * |
340 | | * Blob *b; |
341 | | * |
342 | | * b = g_new (Blob, 1); |
343 | | * g_rec_mutex_init (&b->m); |
344 | | * ]| |
345 | | * |
346 | | * Calling g_rec_mutex_init() on an already initialized #GRecMutex |
347 | | * leads to undefined behaviour. |
348 | | * |
349 | | * To undo the effect of g_rec_mutex_init() when a recursive mutex |
350 | | * is no longer needed, use g_rec_mutex_clear(). |
351 | | * |
352 | | * Since: 2.32 |
353 | | */ |
354 | | void |
355 | | g_rec_mutex_init (GRecMutex *rec_mutex) |
356 | 0 | { |
357 | 0 | rec_mutex->p = g_rec_mutex_impl_new (); |
358 | 0 | } |
359 | | |
360 | | /** |
361 | | * g_rec_mutex_clear: |
362 | | * @rec_mutex: an initialized #GRecMutex |
363 | | * |
364 | | * Frees the resources allocated to a recursive mutex with |
365 | | * g_rec_mutex_init(). |
366 | | * |
367 | | * This function should not be used with a #GRecMutex that has been |
368 | | * statically allocated. |
369 | | * |
370 | | * Calling g_rec_mutex_clear() on a locked recursive mutex leads |
371 | | * to undefined behaviour. |
372 | | * |
373 | | * Since: 2.32 |
374 | | */ |
375 | | void |
376 | | g_rec_mutex_clear (GRecMutex *rec_mutex) |
377 | 0 | { |
378 | 0 | g_rec_mutex_impl_free (rec_mutex->p); |
379 | 0 | } |
380 | | |
381 | | /** |
382 | | * g_rec_mutex_lock: |
383 | | * @rec_mutex: a #GRecMutex |
384 | | * |
385 | | * Locks @rec_mutex. If @rec_mutex is already locked by another |
386 | | * thread, the current thread will block until @rec_mutex is |
387 | | * unlocked by the other thread. If @rec_mutex is already locked |
388 | | * by the current thread, the 'lock count' of @rec_mutex is increased. |
389 | | * The mutex will only become available again when it is unlocked |
390 | | * as many times as it has been locked. |
391 | | * |
392 | | * Since: 2.32 |
393 | | */ |
394 | | void |
395 | | g_rec_mutex_lock (GRecMutex *mutex) |
396 | 766k | { |
397 | 766k | pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); |
398 | 766k | } |
399 | | |
400 | | /** |
401 | | * g_rec_mutex_unlock: |
402 | | * @rec_mutex: a #GRecMutex |
403 | | * |
404 | | * Unlocks @rec_mutex. If another thread is blocked in a |
405 | | * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked |
406 | | * and can lock @rec_mutex itself. |
407 | | * |
408 | | * Calling g_rec_mutex_unlock() on a recursive mutex that is not |
409 | | * locked by the current thread leads to undefined behaviour. |
410 | | * |
411 | | * Since: 2.32 |
412 | | */ |
413 | | void |
414 | | g_rec_mutex_unlock (GRecMutex *rec_mutex) |
415 | 766k | { |
416 | 766k | pthread_mutex_unlock (rec_mutex->p); |
417 | 766k | } |
418 | | |
419 | | /** |
420 | | * g_rec_mutex_trylock: |
421 | | * @rec_mutex: a #GRecMutex |
422 | | * |
423 | | * Tries to lock @rec_mutex. If @rec_mutex is already locked |
424 | | * by another thread, it immediately returns %FALSE. Otherwise |
425 | | * it locks @rec_mutex and returns %TRUE. |
426 | | * |
427 | | * Returns: %TRUE if @rec_mutex could be locked |
428 | | * |
429 | | * Since: 2.32 |
430 | | */ |
431 | | gboolean |
432 | | g_rec_mutex_trylock (GRecMutex *rec_mutex) |
433 | 0 | { |
434 | 0 | if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) |
435 | 0 | return FALSE; |
436 | | |
437 | 0 | return TRUE; |
438 | 0 | } |
439 | | |
440 | | /* {{{1 GRWLock */ |
441 | | |
442 | | static pthread_rwlock_t * |
443 | | g_rw_lock_impl_new (void) |
444 | 20 | { |
445 | 20 | pthread_rwlock_t *rwlock; |
446 | 20 | gint status; |
447 | | |
448 | 20 | rwlock = malloc (sizeof (pthread_rwlock_t)); |
449 | 20 | if G_UNLIKELY (rwlock == NULL) |
450 | 0 | g_thread_abort (errno, "malloc"); |
451 | | |
452 | 20 | if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0) |
453 | 0 | g_thread_abort (status, "pthread_rwlock_init"); |
454 | | |
455 | 20 | return rwlock; |
456 | 20 | } |
457 | | |
458 | | static void |
459 | | g_rw_lock_impl_free (pthread_rwlock_t *rwlock) |
460 | 0 | { |
461 | 0 | pthread_rwlock_destroy (rwlock); |
462 | 0 | free (rwlock); |
463 | 0 | } |
464 | | |
465 | | static inline pthread_rwlock_t * |
466 | | g_rw_lock_get_impl (GRWLock *lock) |
467 | 35.5M | { |
468 | 35.5M | pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p); |
469 | | |
470 | 35.5M | if G_UNLIKELY (impl == NULL) |
471 | 20 | { |
472 | 20 | impl = g_rw_lock_impl_new (); |
473 | 20 | if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl)) |
474 | 0 | g_rw_lock_impl_free (impl); |
475 | 20 | impl = lock->p; |
476 | 20 | } |
477 | | |
478 | 35.5M | return impl; |
479 | 35.5M | } |
480 | | |
481 | | /** |
482 | | * g_rw_lock_init: |
483 | | * @rw_lock: an uninitialized #GRWLock |
484 | | * |
485 | | * Initializes a #GRWLock so that it can be used. |
486 | | * |
487 | | * This function is useful to initialize a lock that has been |
488 | | * allocated on the stack, or as part of a larger structure. It is not |
489 | | * necessary to initialise a reader-writer lock that has been statically |
490 | | * allocated. |
491 | | * |
492 | | * |[<!-- language="C" --> |
493 | | * typedef struct { |
494 | | * GRWLock l; |
495 | | * ... |
496 | | * } Blob; |
497 | | * |
498 | | * Blob *b; |
499 | | * |
500 | | * b = g_new (Blob, 1); |
501 | | * g_rw_lock_init (&b->l); |
502 | | * ]| |
503 | | * |
504 | | * To undo the effect of g_rw_lock_init() when a lock is no longer |
505 | | * needed, use g_rw_lock_clear(). |
506 | | * |
507 | | * Calling g_rw_lock_init() on an already initialized #GRWLock leads |
508 | | * to undefined behaviour. |
509 | | * |
510 | | * Since: 2.32 |
511 | | */ |
512 | | void |
513 | | g_rw_lock_init (GRWLock *rw_lock) |
514 | 0 | { |
515 | 0 | rw_lock->p = g_rw_lock_impl_new (); |
516 | 0 | } |
517 | | |
518 | | /** |
519 | | * g_rw_lock_clear: |
520 | | * @rw_lock: an initialized #GRWLock |
521 | | * |
522 | | * Frees the resources allocated to a lock with g_rw_lock_init(). |
523 | | * |
524 | | * This function should not be used with a #GRWLock that has been |
525 | | * statically allocated. |
526 | | * |
527 | | * Calling g_rw_lock_clear() when any thread holds the lock |
528 | | * leads to undefined behaviour. |
529 | | * |
530 | | * Since: 2.32 |
531 | | */ |
532 | | void |
533 | | g_rw_lock_clear (GRWLock *rw_lock) |
534 | 0 | { |
535 | 0 | g_rw_lock_impl_free (rw_lock->p); |
536 | 0 | } |
537 | | |
538 | | /** |
539 | | * g_rw_lock_writer_lock: |
540 | | * @rw_lock: a #GRWLock |
541 | | * |
542 | | * Obtain a write lock on @rw_lock. If another thread currently holds |
543 | | * a read or write lock on @rw_lock, the current thread will block |
544 | | * until all other threads have dropped their locks on @rw_lock. |
545 | | * |
546 | | * Calling g_rw_lock_writer_lock() while the current thread already |
547 | | * owns a read or write lock on @rw_lock leads to undefined behaviour. |
548 | | * |
549 | | * Since: 2.32 |
550 | | */ |
551 | | void |
552 | | g_rw_lock_writer_lock (GRWLock *rw_lock) |
553 | 2.18k | { |
554 | 2.18k | int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); |
555 | | |
556 | 2.18k | if (retval != 0) |
557 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
558 | 2.18k | } |
559 | | |
560 | | /** |
561 | | * g_rw_lock_writer_trylock: |
562 | | * @rw_lock: a #GRWLock |
563 | | * |
564 | | * Tries to obtain a write lock on @rw_lock. If another thread |
565 | | * currently holds a read or write lock on @rw_lock, it immediately |
566 | | * returns %FALSE. |
567 | | * Otherwise it locks @rw_lock and returns %TRUE. |
568 | | * |
569 | | * Returns: %TRUE if @rw_lock could be locked |
570 | | * |
571 | | * Since: 2.32 |
572 | | */ |
573 | | gboolean |
574 | | g_rw_lock_writer_trylock (GRWLock *rw_lock) |
575 | 0 | { |
576 | 0 | if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) |
577 | 0 | return FALSE; |
578 | | |
579 | 0 | return TRUE; |
580 | 0 | } |
581 | | |
582 | | /** |
583 | | * g_rw_lock_writer_unlock: |
584 | | * @rw_lock: a #GRWLock |
585 | | * |
586 | | * Release a write lock on @rw_lock. |
587 | | * |
588 | | * Calling g_rw_lock_writer_unlock() on a lock that is not held |
589 | | * by the current thread leads to undefined behaviour. |
590 | | * |
591 | | * Since: 2.32 |
592 | | */ |
593 | | void |
594 | | g_rw_lock_writer_unlock (GRWLock *rw_lock) |
595 | 2.18k | { |
596 | 2.18k | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
597 | 2.18k | } |
598 | | |
599 | | /** |
600 | | * g_rw_lock_reader_lock: |
601 | | * @rw_lock: a #GRWLock |
602 | | * |
603 | | * Obtain a read lock on @rw_lock. If another thread currently holds |
604 | | * the write lock on @rw_lock, the current thread will block until the |
605 | | * write lock was (held and) released. If another thread does not hold |
606 | | * the write lock, but is waiting for it, it is implementation defined |
607 | | * whether the reader or writer will block. Read locks can be taken |
608 | | * recursively. |
609 | | * |
610 | | * Calling g_rw_lock_reader_lock() while the current thread already |
611 | | * owns a write lock leads to undefined behaviour. Read locks however |
612 | | * can be taken recursively, in which case you need to make sure to |
613 | | * call g_rw_lock_reader_unlock() the same amount of times. |
614 | | * |
615 | | * It is implementation-defined how many read locks are allowed to be |
616 | | * held on the same lock simultaneously. If the limit is hit, |
617 | | * or if a deadlock is detected, a critical warning will be emitted. |
618 | | * |
619 | | * Since: 2.32 |
620 | | */ |
621 | | void |
622 | | g_rw_lock_reader_lock (GRWLock *rw_lock) |
623 | 17.7M | { |
624 | 17.7M | int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); |
625 | | |
626 | 17.7M | if (retval != 0) |
627 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
628 | 17.7M | } |
629 | | |
630 | | /** |
631 | | * g_rw_lock_reader_trylock: |
632 | | * @rw_lock: a #GRWLock |
633 | | * |
634 | | * Tries to obtain a read lock on @rw_lock and returns %TRUE if |
635 | | * the read lock was successfully obtained. Otherwise it |
636 | | * returns %FALSE. |
637 | | * |
638 | | * Returns: %TRUE if @rw_lock could be locked |
639 | | * |
640 | | * Since: 2.32 |
641 | | */ |
642 | | gboolean |
643 | | g_rw_lock_reader_trylock (GRWLock *rw_lock) |
644 | 0 | { |
645 | 0 | if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) |
646 | 0 | return FALSE; |
647 | | |
648 | 0 | return TRUE; |
649 | 0 | } |
650 | | |
651 | | /** |
652 | | * g_rw_lock_reader_unlock: |
653 | | * @rw_lock: a #GRWLock |
654 | | * |
655 | | * Release a read lock on @rw_lock. |
656 | | * |
657 | | * Calling g_rw_lock_reader_unlock() on a lock that is not held |
658 | | * by the current thread leads to undefined behaviour. |
659 | | * |
660 | | * Since: 2.32 |
661 | | */ |
662 | | void |
663 | | g_rw_lock_reader_unlock (GRWLock *rw_lock) |
664 | 17.7M | { |
665 | 17.7M | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
666 | 17.7M | } |
667 | | |
668 | | /* {{{1 GCond */ |
669 | | |
670 | | #if !defined(USE_NATIVE_MUTEX) |
671 | | |
672 | | static pthread_cond_t * |
673 | | g_cond_impl_new (void) |
674 | | { |
675 | | pthread_condattr_t attr; |
676 | | pthread_cond_t *cond; |
677 | | gint status; |
678 | | |
679 | | pthread_condattr_init (&attr); |
680 | | |
681 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
682 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
683 | | if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0) |
684 | | g_thread_abort (status, "pthread_condattr_setclock"); |
685 | | #else |
686 | | #error Cannot support GCond on your platform. |
687 | | #endif |
688 | | |
689 | | cond = malloc (sizeof (pthread_cond_t)); |
690 | | if G_UNLIKELY (cond == NULL) |
691 | | g_thread_abort (errno, "malloc"); |
692 | | |
693 | | if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0) |
694 | | g_thread_abort (status, "pthread_cond_init"); |
695 | | |
696 | | pthread_condattr_destroy (&attr); |
697 | | |
698 | | return cond; |
699 | | } |
700 | | |
701 | | static void |
702 | | g_cond_impl_free (pthread_cond_t *cond) |
703 | | { |
704 | | pthread_cond_destroy (cond); |
705 | | free (cond); |
706 | | } |
707 | | |
708 | | static inline pthread_cond_t * |
709 | | g_cond_get_impl (GCond *cond) |
710 | | { |
711 | | pthread_cond_t *impl = g_atomic_pointer_get (&cond->p); |
712 | | |
713 | | if G_UNLIKELY (impl == NULL) |
714 | | { |
715 | | impl = g_cond_impl_new (); |
716 | | if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl)) |
717 | | g_cond_impl_free (impl); |
718 | | impl = cond->p; |
719 | | } |
720 | | |
721 | | return impl; |
722 | | } |
723 | | |
724 | | /** |
725 | | * g_cond_init: |
726 | | * @cond: an uninitialized #GCond |
727 | | * |
728 | | * Initialises a #GCond so that it can be used. |
729 | | * |
730 | | * This function is useful to initialise a #GCond that has been |
731 | | * allocated as part of a larger structure. It is not necessary to |
732 | | * initialise a #GCond that has been statically allocated. |
733 | | * |
734 | | * To undo the effect of g_cond_init() when a #GCond is no longer |
735 | | * needed, use g_cond_clear(). |
736 | | * |
737 | | * Calling g_cond_init() on an already-initialised #GCond leads |
738 | | * to undefined behaviour. |
739 | | * |
740 | | * Since: 2.32 |
741 | | */ |
742 | | void |
743 | | g_cond_init (GCond *cond) |
744 | | { |
745 | | cond->p = g_cond_impl_new (); |
746 | | } |
747 | | |
748 | | /** |
749 | | * g_cond_clear: |
750 | | * @cond: an initialised #GCond |
751 | | * |
752 | | * Frees the resources allocated to a #GCond with g_cond_init(). |
753 | | * |
754 | | * This function should not be used with a #GCond that has been |
755 | | * statically allocated. |
756 | | * |
757 | | * Calling g_cond_clear() for a #GCond on which threads are |
758 | | * blocking leads to undefined behaviour. |
759 | | * |
760 | | * Since: 2.32 |
761 | | */ |
762 | | void |
763 | | g_cond_clear (GCond *cond) |
764 | | { |
765 | | g_cond_impl_free (cond->p); |
766 | | } |
767 | | |
768 | | /** |
769 | | * g_cond_wait: |
770 | | * @cond: a #GCond |
771 | | * @mutex: a #GMutex that is currently locked |
772 | | * |
773 | | * Atomically releases @mutex and waits until @cond is signalled. |
774 | | * When this function returns, @mutex is locked again and owned by the |
775 | | * calling thread. |
776 | | * |
777 | | * When using condition variables, it is possible that a spurious wakeup |
778 | | * may occur (ie: g_cond_wait() returns even though g_cond_signal() was |
779 | | * not called). It's also possible that a stolen wakeup may occur. |
780 | | * This is when g_cond_signal() is called, but another thread acquires |
781 | | * @mutex before this thread and modifies the state of the program in |
782 | | * such a way that when g_cond_wait() is able to return, the expected |
783 | | * condition is no longer met. |
784 | | * |
785 | | * For this reason, g_cond_wait() must always be used in a loop. See |
786 | | * the documentation for #GCond for a complete example. |
787 | | **/ |
788 | | void |
789 | | g_cond_wait (GCond *cond, |
790 | | GMutex *mutex) |
791 | | { |
792 | | gint status; |
793 | | |
794 | | if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0) |
795 | | g_thread_abort (status, "pthread_cond_wait"); |
796 | | } |
797 | | |
798 | | /** |
799 | | * g_cond_signal: |
800 | | * @cond: a #GCond |
801 | | * |
802 | | * If threads are waiting for @cond, at least one of them is unblocked. |
803 | | * If no threads are waiting for @cond, this function has no effect. |
804 | | * It is good practice to hold the same lock as the waiting thread |
805 | | * while calling this function, though not required. |
806 | | */ |
807 | | void |
808 | | g_cond_signal (GCond *cond) |
809 | | { |
810 | | gint status; |
811 | | |
812 | | if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0) |
813 | | g_thread_abort (status, "pthread_cond_signal"); |
814 | | } |
815 | | |
816 | | /** |
817 | | * g_cond_broadcast: |
818 | | * @cond: a #GCond |
819 | | * |
820 | | * If threads are waiting for @cond, all of them are unblocked. |
821 | | * If no threads are waiting for @cond, this function has no effect. |
822 | | * It is good practice to lock the same mutex as the waiting threads |
823 | | * while calling this function, though not required. |
824 | | */ |
825 | | void |
826 | | g_cond_broadcast (GCond *cond) |
827 | | { |
828 | | gint status; |
829 | | |
830 | | if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0) |
831 | | g_thread_abort (status, "pthread_cond_broadcast"); |
832 | | } |
833 | | |
834 | | /** |
835 | | * g_cond_wait_until: |
836 | | * @cond: a #GCond |
837 | | * @mutex: a #GMutex that is currently locked |
838 | | * @end_time: the monotonic time to wait until |
839 | | * |
840 | | * Waits until either @cond is signalled or @end_time has passed. |
841 | | * |
842 | | * As with g_cond_wait() it is possible that a spurious or stolen wakeup |
843 | | * could occur. For that reason, waiting on a condition variable should |
844 | | * always be in a loop, based on an explicitly-checked predicate. |
845 | | * |
846 | | * %TRUE is returned if the condition variable was signalled (or in the |
847 | | * case of a spurious wakeup). %FALSE is returned if @end_time has |
848 | | * passed. |
849 | | * |
850 | | * The following code shows how to correctly perform a timed wait on a |
851 | | * condition variable (extending the example presented in the |
852 | | * documentation for #GCond): |
853 | | * |
854 | | * |[<!-- language="C" --> |
855 | | * gpointer |
856 | | * pop_data_timed (void) |
857 | | * { |
858 | | * gint64 end_time; |
859 | | * gpointer data; |
860 | | * |
861 | | * g_mutex_lock (&data_mutex); |
862 | | * |
863 | | * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND; |
864 | | * while (!current_data) |
865 | | * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time)) |
866 | | * { |
867 | | * // timeout has passed. |
868 | | * g_mutex_unlock (&data_mutex); |
869 | | * return NULL; |
870 | | * } |
871 | | * |
872 | | * // there is data for us |
873 | | * data = current_data; |
874 | | * current_data = NULL; |
875 | | * |
876 | | * g_mutex_unlock (&data_mutex); |
877 | | * |
878 | | * return data; |
879 | | * } |
880 | | * ]| |
881 | | * |
882 | | * Notice that the end time is calculated once, before entering the |
883 | | * loop and reused. This is the motivation behind the use of absolute |
884 | | * time on this API -- if a relative time of 5 seconds were passed |
885 | | * directly to the call and a spurious wakeup occurred, the program would |
886 | | * have to start over waiting again (which would lead to a total wait |
887 | | * time of more than 5 seconds). |
888 | | * |
889 | | * Returns: %TRUE on a signal, %FALSE on a timeout |
890 | | * Since: 2.32 |
891 | | **/ |
892 | | gboolean |
893 | | g_cond_wait_until (GCond *cond, |
894 | | GMutex *mutex, |
895 | | gint64 end_time) |
896 | | { |
897 | | struct timespec ts; |
898 | | gint status; |
899 | | |
900 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
901 | | /* end_time is given relative to the monotonic clock as returned by |
902 | | * g_get_monotonic_time(). |
903 | | * |
904 | | * Since this pthreads wants the relative time, convert it back again. |
905 | | */ |
906 | | { |
907 | | gint64 now = g_get_monotonic_time (); |
908 | | gint64 relative; |
909 | | |
910 | | if (end_time <= now) |
911 | | return FALSE; |
912 | | |
913 | | relative = end_time - now; |
914 | | |
915 | | ts.tv_sec = relative / 1000000; |
916 | | ts.tv_nsec = (relative % 1000000) * 1000; |
917 | | |
918 | | if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
919 | | return TRUE; |
920 | | } |
921 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
922 | | /* This is the exact check we used during init to set the clock to |
923 | | * monotonic, so if we're in this branch, timedwait() will already be |
924 | | * expecting a monotonic clock. |
925 | | */ |
926 | | { |
927 | | ts.tv_sec = end_time / 1000000; |
928 | | ts.tv_nsec = (end_time % 1000000) * 1000; |
929 | | |
930 | | if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
931 | | return TRUE; |
932 | | } |
933 | | #else |
934 | | #error Cannot support GCond on your platform. |
935 | | #endif |
936 | | |
937 | | if G_UNLIKELY (status != ETIMEDOUT) |
938 | | g_thread_abort (status, "pthread_cond_timedwait"); |
939 | | |
940 | | return FALSE; |
941 | | } |
942 | | |
943 | | #endif /* defined(USE_NATIVE_MUTEX) */ |
944 | | |
945 | | /* {{{1 GPrivate */ |
946 | | |
947 | | /** |
948 | | * GPrivate: |
949 | | * |
950 | | * The #GPrivate struct is an opaque data structure to represent a |
951 | | * thread-local data key. It is approximately equivalent to the |
952 | | * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to |
953 | | * TlsSetValue()/TlsGetValue() on Windows. |
954 | | * |
955 | | * If you don't already know why you might want this functionality, |
956 | | * then you probably don't need it. |
957 | | * |
958 | | * #GPrivate is a very limited resource (as far as 128 per program, |
959 | | * shared between all libraries). It is also not possible to destroy a |
960 | | * #GPrivate after it has been used. As such, it is only ever acceptable |
961 | | * to use #GPrivate in static scope, and even then sparingly so. |
962 | | * |
963 | | * See G_PRIVATE_INIT() for a couple of examples. |
964 | | * |
965 | | * The #GPrivate structure should be considered opaque. It should only |
966 | | * be accessed via the g_private_ functions. |
967 | | */ |
968 | | |
969 | | /** |
970 | | * G_PRIVATE_INIT: |
971 | | * @notify: a #GDestroyNotify |
972 | | * |
973 | | * A macro to assist with the static initialisation of a #GPrivate. |
974 | | * |
975 | | * This macro is useful for the case that a #GDestroyNotify function |
976 | | * should be associated with the key. This is needed when the key will be |
977 | | * used to point at memory that should be deallocated when the thread |
978 | | * exits. |
979 | | * |
980 | | * Additionally, the #GDestroyNotify will also be called on the previous |
981 | | * value stored in the key when g_private_replace() is used. |
982 | | * |
983 | | * If no #GDestroyNotify is needed, then use of this macro is not |
984 | | * required -- if the #GPrivate is declared in static scope then it will |
985 | | * be properly initialised by default (ie: to all zeros). See the |
986 | | * examples below. |
987 | | * |
988 | | * |[<!-- language="C" --> |
989 | | * static GPrivate name_key = G_PRIVATE_INIT (g_free); |
990 | | * |
991 | | * // return value should not be freed |
992 | | * const gchar * |
993 | | * get_local_name (void) |
994 | | * { |
995 | | * return g_private_get (&name_key); |
996 | | * } |
997 | | * |
998 | | * void |
999 | | * set_local_name (const gchar *name) |
1000 | | * { |
1001 | | * g_private_replace (&name_key, g_strdup (name)); |
1002 | | * } |
1003 | | * |
1004 | | * |
1005 | | * static GPrivate count_key; // no free function |
1006 | | * |
1007 | | * gint |
1008 | | * get_local_count (void) |
1009 | | * { |
1010 | | * return GPOINTER_TO_INT (g_private_get (&count_key)); |
1011 | | * } |
1012 | | * |
1013 | | * void |
1014 | | * set_local_count (gint count) |
1015 | | * { |
1016 | | * g_private_set (&count_key, GINT_TO_POINTER (count)); |
1017 | | * } |
1018 | | * ]| |
1019 | | * |
1020 | | * Since: 2.32 |
1021 | | **/ |
1022 | | |
1023 | | static pthread_key_t * |
1024 | | g_private_impl_new (GDestroyNotify notify) |
1025 | 0 | { |
1026 | 0 | pthread_key_t *key; |
1027 | 0 | gint status; |
1028 | 0 |
|
1029 | 0 | key = malloc (sizeof (pthread_key_t)); |
1030 | 0 | if G_UNLIKELY (key == NULL) |
1031 | 0 | g_thread_abort (errno, "malloc"); |
1032 | 0 | status = pthread_key_create (key, notify); |
1033 | 0 | if G_UNLIKELY (status != 0) |
1034 | 0 | g_thread_abort (status, "pthread_key_create"); |
1035 | 0 |
|
1036 | 0 | return key; |
1037 | 0 | } |
1038 | | |
1039 | | static void |
1040 | | g_private_impl_free (pthread_key_t *key) |
1041 | 0 | { |
1042 | 0 | gint status; |
1043 | 0 |
|
1044 | 0 | status = pthread_key_delete (*key); |
1045 | 0 | if G_UNLIKELY (status != 0) |
1046 | 0 | g_thread_abort (status, "pthread_key_delete"); |
1047 | 0 | free (key); |
1048 | 0 | } |
1049 | | |
1050 | | static gpointer |
1051 | | g_private_impl_new_direct (GDestroyNotify notify) |
1052 | 25 | { |
1053 | 25 | gpointer impl = (void *) (gssize) -1; |
1054 | 25 | pthread_key_t key; |
1055 | 25 | gint status; |
1056 | | |
1057 | 25 | status = pthread_key_create (&key, notify); |
1058 | 25 | if G_UNLIKELY (status != 0) |
1059 | 0 | g_thread_abort (status, "pthread_key_create"); |
1060 | | |
1061 | 25 | memcpy (&impl, &key, sizeof (pthread_key_t)); |
1062 | | |
1063 | | /* pthread_key_create could theoretically put a NULL value into key. |
1064 | | * If that happens, waste the result and create a new one, since we |
1065 | | * use NULL to mean "not yet allocated". |
1066 | | * |
1067 | | * This will only happen once per program run. |
1068 | | * |
1069 | | * We completely avoid this problem for the case where pthread_key_t |
1070 | | * is smaller than void* (for example, on 64 bit Linux) by putting |
1071 | | * some high bits in the value of 'impl' to start with. Since we only |
1072 | | * overwrite part of the pointer, we will never end up with NULL. |
1073 | | */ |
1074 | 25 | if (sizeof (pthread_key_t) == sizeof (gpointer)) |
1075 | 0 | { |
1076 | 0 | if G_UNLIKELY (impl == NULL) |
1077 | 0 | { |
1078 | 0 | status = pthread_key_create (&key, notify); |
1079 | 0 | if G_UNLIKELY (status != 0) |
1080 | 0 | g_thread_abort (status, "pthread_key_create"); |
1081 | |
|
1082 | 0 | memcpy (&impl, &key, sizeof (pthread_key_t)); |
1083 | |
|
1084 | 0 | if G_UNLIKELY (impl == NULL) |
1085 | 0 | g_thread_abort (status, "pthread_key_create (gave NULL result twice)"); |
1086 | 0 | } |
1087 | 0 | } |
1088 | | |
1089 | 25 | return impl; |
1090 | 25 | } |
1091 | | |
1092 | | static void |
1093 | | g_private_impl_free_direct (gpointer impl) |
1094 | 0 | { |
1095 | 0 | pthread_key_t tmp; |
1096 | 0 | gint status; |
1097 | |
|
1098 | 0 | memcpy (&tmp, &impl, sizeof (pthread_key_t)); |
1099 | |
|
1100 | 0 | status = pthread_key_delete (tmp); |
1101 | 0 | if G_UNLIKELY (status != 0) |
1102 | 0 | g_thread_abort (status, "pthread_key_delete"); |
1103 | 0 | } |
1104 | | |
1105 | | static inline pthread_key_t |
1106 | | g_private_get_impl (GPrivate *key) |
1107 | 2.07M | { |
1108 | 2.07M | if (sizeof (pthread_key_t) > sizeof (gpointer)) |
1109 | 0 | { |
1110 | 0 | pthread_key_t *impl = g_atomic_pointer_get (&key->p); |
1111 | |
|
1112 | 0 | if G_UNLIKELY (impl == NULL) |
1113 | 0 | { |
1114 | 0 | impl = g_private_impl_new (key->notify); |
1115 | 0 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
1116 | 0 | { |
1117 | 0 | g_private_impl_free (impl); |
1118 | 0 | impl = key->p; |
1119 | 0 | } |
1120 | 0 | } |
1121 | |
|
1122 | 0 | return *impl; |
1123 | 0 | } |
1124 | 2.07M | else |
1125 | 2.07M | { |
1126 | 2.07M | gpointer impl = g_atomic_pointer_get (&key->p); |
1127 | 2.07M | pthread_key_t tmp; |
1128 | | |
1129 | 2.07M | if G_UNLIKELY (impl == NULL) |
1130 | 25 | { |
1131 | 25 | impl = g_private_impl_new_direct (key->notify); |
1132 | 25 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
1133 | 0 | { |
1134 | 0 | g_private_impl_free_direct (impl); |
1135 | 0 | impl = key->p; |
1136 | 0 | } |
1137 | 25 | } |
1138 | | |
1139 | 2.07M | memcpy (&tmp, &impl, sizeof (pthread_key_t)); |
1140 | | |
1141 | 2.07M | return tmp; |
1142 | 2.07M | } |
1143 | 2.07M | } |
1144 | | |
1145 | | /** |
1146 | | * g_private_get: |
1147 | | * @key: a #GPrivate |
1148 | | * |
1149 | | * Returns the current value of the thread local variable @key. |
1150 | | * |
1151 | | * If the value has not yet been set in this thread, %NULL is returned. |
1152 | | * Values are never copied between threads (when a new thread is |
1153 | | * created, for example). |
1154 | | * |
1155 | | * Returns: the thread-local value |
1156 | | */ |
1157 | | gpointer |
1158 | | g_private_get (GPrivate *key) |
1159 | 697k | { |
1160 | | /* quote POSIX: No errors are returned from pthread_getspecific(). */ |
1161 | 697k | return pthread_getspecific (g_private_get_impl (key)); |
1162 | 697k | } |
1163 | | |
1164 | | /** |
1165 | | * g_private_set: |
1166 | | * @key: a #GPrivate |
1167 | | * @value: the new value |
1168 | | * |
1169 | | * Sets the thread local variable @key to have the value @value in the |
1170 | | * current thread. |
1171 | | * |
1172 | | * This function differs from g_private_replace() in the following way: |
1173 | | * the #GDestroyNotify for @key is not called on the old value. |
1174 | | */ |
1175 | | void |
1176 | | g_private_set (GPrivate *key, |
1177 | | gpointer value) |
1178 | 1.37M | { |
1179 | 1.37M | gint status; |
1180 | | |
1181 | 1.37M | if G_UNLIKELY ((status = pthread_setspecific (g_private_get_impl (key), value)) != 0) |
1182 | 0 | g_thread_abort (status, "pthread_setspecific"); |
1183 | 1.37M | } |
1184 | | |
1185 | | /** |
1186 | | * g_private_replace: |
1187 | | * @key: a #GPrivate |
1188 | | * @value: the new value |
1189 | | * |
1190 | | * Sets the thread local variable @key to have the value @value in the |
1191 | | * current thread. |
1192 | | * |
1193 | | * This function differs from g_private_set() in the following way: if |
1194 | | * the previous value was non-%NULL then the #GDestroyNotify handler for |
1195 | | * @key is run on it. |
1196 | | * |
1197 | | * Since: 2.32 |
1198 | | **/ |
1199 | | void |
1200 | | g_private_replace (GPrivate *key, |
1201 | | gpointer value) |
1202 | 4 | { |
1203 | 4 | pthread_key_t impl = g_private_get_impl (key); |
1204 | 4 | gpointer old; |
1205 | 4 | gint status; |
1206 | | |
1207 | 4 | old = pthread_getspecific (impl); |
1208 | | |
1209 | 4 | if G_UNLIKELY ((status = pthread_setspecific (impl, value)) != 0) |
1210 | 0 | g_thread_abort (status, "pthread_setspecific"); |
1211 | | |
1212 | 4 | if (old && key->notify) |
1213 | 0 | key->notify (old); |
1214 | 4 | } |
1215 | | |
1216 | | /* {{{1 GThread */ |
1217 | | |
1218 | 24 | #define posix_check_err(err, name) G_STMT_START{ \ |
1219 | 24 | int error = (err); \ |
1220 | 24 | if (error) \ |
1221 | 24 | g_error ("file %s: line %d (%s): error '%s' during '%s'", \ |
1222 | 24 | __FILE__, __LINE__, G_STRFUNC, \ |
1223 | 24 | g_strerror (error), name); \ |
1224 | 24 | }G_STMT_END |
1225 | | |
1226 | 16 | #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd) |
1227 | | |
1228 | | typedef struct |
1229 | | { |
1230 | | GRealThread thread; |
1231 | | |
1232 | | pthread_t system_thread; |
1233 | | gboolean joined; |
1234 | | GMutex lock; |
1235 | | |
1236 | | void *(*proxy) (void *); |
1237 | | } GThreadPosix; |
1238 | | |
1239 | | void |
1240 | | g_system_thread_free (GRealThread *thread) |
1241 | 0 | { |
1242 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
1243 | |
|
1244 | 0 | if (!pt->joined) |
1245 | 0 | pthread_detach (pt->system_thread); |
1246 | |
|
1247 | 0 | g_mutex_clear (&pt->lock); |
1248 | |
|
1249 | 0 | g_slice_free (GThreadPosix, pt); |
1250 | 0 | } |
1251 | | |
1252 | | GRealThread * |
1253 | | g_system_thread_new (GThreadFunc proxy, |
1254 | | gulong stack_size, |
1255 | | const char *name, |
1256 | | GThreadFunc func, |
1257 | | gpointer data, |
1258 | | GError **error) |
1259 | 8 | { |
1260 | 8 | GThreadPosix *thread; |
1261 | 8 | GRealThread *base_thread; |
1262 | 8 | pthread_attr_t attr; |
1263 | 8 | gint ret; |
1264 | | |
1265 | 8 | thread = g_slice_new0 (GThreadPosix); |
1266 | 8 | base_thread = (GRealThread*)thread; |
1267 | 8 | base_thread->ref_count = 2; |
1268 | 8 | base_thread->ours = TRUE; |
1269 | 8 | base_thread->thread.joinable = TRUE; |
1270 | 8 | base_thread->thread.func = func; |
1271 | 8 | base_thread->thread.data = data; |
1272 | 8 | base_thread->name = g_strdup (name); |
1273 | 8 | thread->proxy = proxy; |
1274 | | |
1275 | 8 | posix_check_cmd (pthread_attr_init (&attr)); |
1276 | | |
1277 | 8 | #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE |
1278 | 8 | if (stack_size) |
1279 | 0 | { |
1280 | 0 | #ifdef _SC_THREAD_STACK_MIN |
1281 | 0 | long min_stack_size = sysconf (_SC_THREAD_STACK_MIN); |
1282 | 0 | if (min_stack_size >= 0) |
1283 | 0 | stack_size = MAX ((gulong) min_stack_size, stack_size); |
1284 | 0 | #endif /* _SC_THREAD_STACK_MIN */ |
1285 | | /* No error check here, because some systems can't do it and |
1286 | | * we simply don't want threads to fail because of that. */ |
1287 | 0 | pthread_attr_setstacksize (&attr, stack_size); |
1288 | 0 | } |
1289 | 8 | #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ |
1290 | | |
1291 | 8 | #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED |
1292 | 8 | { |
1293 | | /* While this is the default, better be explicit about it */ |
1294 | 8 | pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED); |
1295 | 8 | } |
1296 | 8 | #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */ |
1297 | | |
1298 | 8 | ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread); |
1299 | | |
1300 | 8 | posix_check_cmd (pthread_attr_destroy (&attr)); |
1301 | | |
1302 | 8 | if (ret == EAGAIN) |
1303 | 0 | { |
1304 | 0 | g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, |
1305 | 0 | "Error creating thread: %s", g_strerror (ret)); |
1306 | 0 | g_free (thread->thread.name); |
1307 | 0 | g_slice_free (GThreadPosix, thread); |
1308 | 0 | return NULL; |
1309 | 0 | } |
1310 | | |
1311 | 8 | posix_check_err (ret, "pthread_create"); |
1312 | | |
1313 | 8 | g_mutex_init (&thread->lock); |
1314 | | |
1315 | 8 | return (GRealThread *) thread; |
1316 | 8 | } |
1317 | | |
1318 | | /** |
1319 | | * g_thread_yield: |
1320 | | * |
1321 | | * Causes the calling thread to voluntarily relinquish the CPU, so |
1322 | | * that other threads can run. |
1323 | | * |
1324 | | * This function is often used as a method to make busy wait less evil. |
1325 | | */ |
1326 | | void |
1327 | | g_thread_yield (void) |
1328 | 0 | { |
1329 | 0 | sched_yield (); |
1330 | 0 | } |
1331 | | |
1332 | | void |
1333 | | g_system_thread_wait (GRealThread *thread) |
1334 | 0 | { |
1335 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
1336 | |
|
1337 | 0 | g_mutex_lock (&pt->lock); |
1338 | |
|
1339 | 0 | if (!pt->joined) |
1340 | 0 | { |
1341 | 0 | posix_check_cmd (pthread_join (pt->system_thread, NULL)); |
1342 | 0 | pt->joined = TRUE; |
1343 | 0 | } |
1344 | |
|
1345 | 0 | g_mutex_unlock (&pt->lock); |
1346 | 0 | } |
1347 | | |
1348 | | void |
1349 | | g_system_thread_exit (void) |
1350 | 0 | { |
1351 | 0 | pthread_exit (NULL); |
1352 | 0 | } |
1353 | | |
1354 | | void |
1355 | | g_system_thread_set_name (const gchar *name) |
1356 | 8 | { |
1357 | | #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) |
1358 | | pthread_setname_np (name); /* on OS X and iOS */ |
1359 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) |
1360 | | pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */ |
1361 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) |
1362 | | pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */ |
1363 | | #elif defined(HAVE_PTHREAD_SET_NAME_NP) |
1364 | | pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */ |
1365 | | #endif |
1366 | 8 | } |
1367 | | |
1368 | | /* {{{1 GMutex and GCond futex implementation */ |
1369 | | |
1370 | | #if defined(USE_NATIVE_MUTEX) |
1371 | | /* We should expand the set of operations available in gatomic once we |
1372 | | * have better C11 support in GCC in common distributions (ie: 4.9). |
1373 | | * |
1374 | | * Before then, let's define a couple of useful things for our own |
1375 | | * purposes... |
1376 | | */ |
1377 | | |
1378 | | #ifdef HAVE_STDATOMIC_H |
1379 | | |
1380 | | #include <stdatomic.h> |
1381 | | |
1382 | | #define exchange_acquire(ptr, new) \ |
1383 | 2 | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE) |
1384 | | #define compare_exchange_acquire(ptr, old, new) \ |
1385 | 0 | atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \ |
1386 | 0 | __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
1387 | | |
1388 | | #define exchange_release(ptr, new) \ |
1389 | 82.5M | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
1390 | | #define store_release(ptr, new) \ |
1391 | | atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
1392 | | |
1393 | | #else |
1394 | | |
1395 | | #define exchange_acquire(ptr, new) \ |
1396 | | __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE) |
1397 | | #define compare_exchange_acquire(ptr, old, new) \ |
1398 | | __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
1399 | | |
1400 | | #define exchange_release(ptr, new) \ |
1401 | | __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE) |
1402 | | #define store_release(ptr, new) \ |
1403 | | __atomic_store_4((ptr), (new), __ATOMIC_RELEASE) |
1404 | | |
1405 | | #endif |
1406 | | |
1407 | | /* Our strategy for the mutex is pretty simple: |
1408 | | * |
1409 | | * 0: not in use |
1410 | | * |
1411 | | * 1: acquired by one thread only, no contention |
1412 | | * |
1413 | | * 2: contended |
1414 | | */ |
1415 | | |
1416 | | typedef enum { |
1417 | | G_MUTEX_STATE_EMPTY = 0, |
1418 | | G_MUTEX_STATE_OWNED, |
1419 | | G_MUTEX_STATE_CONTENDED, |
1420 | | } GMutexState; |
1421 | | |
1422 | | /* |
1423 | | * As such, attempting to acquire the lock should involve an increment. |
1424 | | * If we find that the previous value was 0 then we can return |
1425 | | * immediately. |
1426 | | * |
1427 | | * On unlock, we always store 0 to indicate that the lock is available. |
1428 | | * If the value there was 1 before then we didn't have contention and |
1429 | | * can return immediately. If the value was something other than 1 then |
1430 | | * we have the contended case and need to wake a waiter. |
1431 | | * |
1432 | | * If it was not 0 then there is another thread holding it and we must |
1433 | | * wait. We must always ensure that we mark a value >1 while we are |
1434 | | * waiting in order to instruct the holder to do a wake operation on |
1435 | | * unlock. |
1436 | | */ |
1437 | | |
1438 | | void |
1439 | | g_mutex_init (GMutex *mutex) |
1440 | 50.2k | { |
1441 | 50.2k | mutex->i[0] = G_MUTEX_STATE_EMPTY; |
1442 | 50.2k | } |
1443 | | |
1444 | | void |
1445 | | g_mutex_clear (GMutex *mutex) |
1446 | 38.2k | { |
1447 | 38.2k | if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY) |
1448 | 0 | { |
1449 | 0 | fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n"); |
1450 | 0 | g_abort (); |
1451 | 0 | } |
1452 | 38.2k | } |
1453 | | |
1454 | | G_GNUC_NO_INLINE |
1455 | | static void |
1456 | | g_mutex_lock_slowpath (GMutex *mutex) |
1457 | 1 | { |
1458 | | /* Set to contended. If it was empty before then we |
1459 | | * just acquired the lock. |
1460 | | * |
1461 | | * Otherwise, sleep for as long as the contended state remains... |
1462 | | */ |
1463 | 2 | while (exchange_acquire (&mutex->i[0], G_MUTEX_STATE_CONTENDED) != G_MUTEX_STATE_EMPTY) |
1464 | 1 | { |
1465 | 1 | g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, |
1466 | 1 | G_MUTEX_STATE_CONTENDED, NULL); |
1467 | 1 | } |
1468 | 1 | } |
1469 | | |
1470 | | G_GNUC_NO_INLINE |
1471 | | static void |
1472 | | g_mutex_unlock_slowpath (GMutex *mutex, |
1473 | | guint prev) |
1474 | 2 | { |
1475 | | /* We seem to get better code for the uncontended case by splitting |
1476 | | * this out... |
1477 | | */ |
1478 | 2 | if G_UNLIKELY (prev == G_MUTEX_STATE_EMPTY) |
1479 | 0 | { |
1480 | 0 | fprintf (stderr, "Attempt to unlock mutex that was not locked\n"); |
1481 | 0 | g_abort (); |
1482 | 0 | } |
1483 | | |
1484 | 2 | g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
1485 | 2 | } |
1486 | | |
1487 | | void |
1488 | | g_mutex_lock (GMutex *mutex) |
1489 | 82.5M | { |
1490 | | /* empty -> owned and we're done. Anything else, and we need to wait... */ |
1491 | 82.5M | if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0], |
1492 | 82.5M | G_MUTEX_STATE_EMPTY, |
1493 | 82.5M | G_MUTEX_STATE_OWNED)) |
1494 | 1 | g_mutex_lock_slowpath (mutex); |
1495 | 82.5M | } |
1496 | | |
1497 | | void |
1498 | | g_mutex_unlock (GMutex *mutex) |
1499 | 82.5M | { |
1500 | 82.5M | guint prev; |
1501 | | |
1502 | 82.5M | prev = exchange_release (&mutex->i[0], G_MUTEX_STATE_EMPTY); |
1503 | | |
1504 | | /* 1-> 0 and we're done. Anything else and we need to signal... */ |
1505 | 82.5M | if G_UNLIKELY (prev != G_MUTEX_STATE_OWNED) |
1506 | 2 | g_mutex_unlock_slowpath (mutex, prev); |
1507 | 82.5M | } |
1508 | | |
1509 | | gboolean |
1510 | | g_mutex_trylock (GMutex *mutex) |
1511 | 0 | { |
1512 | 0 | GMutexState empty = G_MUTEX_STATE_EMPTY; |
1513 | | |
1514 | | /* We don't want to touch the value at all unless we can move it from |
1515 | | * exactly empty to owned. |
1516 | | */ |
1517 | 0 | return compare_exchange_acquire (&mutex->i[0], &empty, G_MUTEX_STATE_OWNED); |
1518 | 0 | } |
1519 | | |
1520 | | /* Condition variables are implemented in a rather simple way as well. |
1521 | | * In many ways, futex() as an abstraction is even more ideally suited |
1522 | | * to condition variables than it is to mutexes. |
1523 | | * |
1524 | | * We store a generation counter. We sample it with the lock held and |
1525 | | * unlock before sleeping on the futex. |
1526 | | * |
1527 | | * Signalling simply involves increasing the counter and making the |
1528 | | * appropriate futex call. |
1529 | | * |
1530 | | * The only thing that is the slightest bit complicated is timed waits |
1531 | | * because we must convert our absolute time to relative. |
1532 | | */ |
1533 | | |
1534 | | void |
1535 | | g_cond_init (GCond *cond) |
1536 | 38.3k | { |
1537 | 38.3k | cond->i[0] = 0; |
1538 | 38.3k | } |
1539 | | |
1540 | | void |
1541 | | g_cond_clear (GCond *cond) |
1542 | 38.2k | { |
1543 | 38.2k | } |
1544 | | |
1545 | | void |
1546 | | g_cond_wait (GCond *cond, |
1547 | | GMutex *mutex) |
1548 | 7.66k | { |
1549 | 7.66k | guint sampled = (guint) g_atomic_int_get (&cond->i[0]); |
1550 | | |
1551 | 7.66k | g_mutex_unlock (mutex); |
1552 | 7.66k | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL); |
1553 | 7.66k | g_mutex_lock (mutex); |
1554 | 7.66k | } |
1555 | | |
1556 | | void |
1557 | | g_cond_signal (GCond *cond) |
1558 | 15.3k | { |
1559 | 15.3k | g_atomic_int_inc (&cond->i[0]); |
1560 | | |
1561 | 15.3k | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
1562 | 15.3k | } |
1563 | | |
1564 | | void |
1565 | | g_cond_broadcast (GCond *cond) |
1566 | 436 | { |
1567 | 436 | g_atomic_int_inc (&cond->i[0]); |
1568 | | |
1569 | 436 | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL); |
1570 | 436 | } |
1571 | | |
1572 | | gboolean |
1573 | | g_cond_wait_until (GCond *cond, |
1574 | | GMutex *mutex, |
1575 | | gint64 end_time) |
1576 | 7.65k | { |
1577 | 7.65k | struct timespec now; |
1578 | 7.65k | struct timespec span; |
1579 | | |
1580 | 7.65k | guint sampled; |
1581 | 7.65k | int res; |
1582 | 7.65k | gboolean success; |
1583 | | |
1584 | 7.65k | if (end_time < 0) |
1585 | 0 | return FALSE; |
1586 | | |
1587 | 7.65k | clock_gettime (CLOCK_MONOTONIC, &now); |
1588 | 7.65k | span.tv_sec = (end_time / 1000000) - now.tv_sec; |
1589 | 7.65k | span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec; |
1590 | 7.65k | if (span.tv_nsec < 0) |
1591 | 7.65k | { |
1592 | 7.65k | span.tv_nsec += 1000000000; |
1593 | 7.65k | span.tv_sec--; |
1594 | 7.65k | } |
1595 | | |
1596 | 7.65k | if (span.tv_sec < 0) |
1597 | 0 | return FALSE; |
1598 | | |
1599 | | /* `struct timespec` as defined by the libc headers does not necessarily |
1600 | | * have any relation to the one used by the kernel for the `futex` syscall. |
1601 | | * |
1602 | | * Specifically, the libc headers might use 64-bit `time_t` while the kernel |
1603 | | * headers use 32-bit types on certain systems. |
1604 | | * |
1605 | | * To get around this problem we |
1606 | | * a) check if `futex_time64` is available, which only exists on 32-bit |
1607 | | * platforms and always uses 64-bit `time_t`. |
1608 | | * b) otherwise (or if that returns `ENOSYS`), we call the normal `futex` |
1609 | | * syscall with the `struct timespec` used by the kernel. By default, we |
1610 | | * use `__kernel_long_t` for both its fields, which is equivalent to |
1611 | | * `__kernel_old_time_t` and is available in the kernel headers for a |
1612 | | * longer time. |
1613 | | * c) With very old headers (~2.6.x), `__kernel_long_t` is not available, and |
1614 | | * we use an older definition that uses `__kernel_time_t` and `long`. |
1615 | | * |
1616 | | * Also some 32-bit systems do not define `__NR_futex` at all and only |
1617 | | * define `__NR_futex_time64`. |
1618 | | */ |
1619 | | |
1620 | 7.65k | sampled = cond->i[0]; |
1621 | 7.65k | g_mutex_unlock (mutex); |
1622 | | |
1623 | | #ifdef __NR_futex_time64 |
1624 | | { |
1625 | | struct |
1626 | | { |
1627 | | gint64 tv_sec; |
1628 | | gint64 tv_nsec; |
1629 | | } span_arg; |
1630 | | |
1631 | | span_arg.tv_sec = span.tv_sec; |
1632 | | span_arg.tv_nsec = span.tv_nsec; |
1633 | | |
1634 | | res = syscall (__NR_futex_time64, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg); |
1635 | | |
1636 | | /* If the syscall does not exist (`ENOSYS`), we retry again below with the |
1637 | | * normal `futex` syscall. This can happen if newer kernel headers are |
1638 | | * used than the kernel that is actually running. |
1639 | | */ |
1640 | | # ifdef __NR_futex |
1641 | | if (res >= 0 || errno != ENOSYS) |
1642 | | # endif /* defined(__NR_futex) */ |
1643 | | { |
1644 | | success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
1645 | | g_mutex_lock (mutex); |
1646 | | |
1647 | | return success; |
1648 | | } |
1649 | | } |
1650 | | #endif |
1651 | | |
1652 | 7.65k | #ifdef __NR_futex |
1653 | 7.65k | { |
1654 | | # ifdef __kernel_long_t |
1655 | | # define KERNEL_SPAN_SEC_TYPE __kernel_long_t |
1656 | | struct |
1657 | | { |
1658 | | __kernel_long_t tv_sec; |
1659 | | __kernel_long_t tv_nsec; |
1660 | | } span_arg; |
1661 | | # else |
1662 | | /* Very old kernel headers: version 2.6.32 and thereabouts */ |
1663 | 7.65k | # define KERNEL_SPAN_SEC_TYPE __kernel_time_t |
1664 | 7.65k | struct |
1665 | 7.65k | { |
1666 | 7.65k | __kernel_time_t tv_sec; |
1667 | 7.65k | long tv_nsec; |
1668 | 7.65k | } span_arg; |
1669 | 7.65k | # endif |
1670 | | /* Make sure to only ever call this if the end time actually fits into the target type */ |
1671 | 7.65k | if (G_UNLIKELY (sizeof (KERNEL_SPAN_SEC_TYPE) < 8 && span.tv_sec > G_MAXINT32)) |
1672 | 7.65k | g_error ("%s: Can’t wait for more than %us", G_STRFUNC, G_MAXINT32); |
1673 | | |
1674 | 7.65k | span_arg.tv_sec = span.tv_sec; |
1675 | 7.65k | span_arg.tv_nsec = span.tv_nsec; |
1676 | | |
1677 | 7.65k | res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg); |
1678 | 7.65k | success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
1679 | 7.65k | g_mutex_lock (mutex); |
1680 | | |
1681 | 7.65k | return success; |
1682 | 7.65k | } |
1683 | 0 | # undef KERNEL_SPAN_SEC_TYPE |
1684 | 0 | #endif /* defined(__NR_futex) */ |
1685 | | |
1686 | | /* We can't end up here because of the checks above */ |
1687 | 7.65k | g_assert_not_reached (); |
1688 | 0 | } |
1689 | | |
1690 | | #endif |
1691 | | |
1692 | | /* {{{1 Epilogue */ |
1693 | | /* vim:set foldmethod=marker: */ |