/src/glib/glib/gthread-posix.c
Line | Count | Source |
1 | | /* GLIB - Library of useful routines for C programming |
2 | | * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald |
3 | | * |
4 | | * gthread.c: posix thread system implementation |
5 | | * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | | */ |
20 | | |
21 | | /* |
22 | | * Modified by the GLib Team and others 1997-2000. See the AUTHORS |
23 | | * file for a list of people on the GLib Team. See the ChangeLog |
24 | | * files for a list of changes. These files are distributed with |
25 | | * GLib at ftp://ftp.gtk.org/pub/gtk/. |
26 | | */ |
27 | | |
28 | | /* The GMutex, GCond and GPrivate implementations in this file are some |
29 | | * of the lowest-level code in GLib. All other parts of GLib (messages, |
30 | | * memory, slices, etc) assume that they can freely use these facilities |
31 | | * without risking recursion. |
32 | | * |
33 | | * As such, these functions are NOT permitted to call any other part of |
34 | | * GLib. |
35 | | * |
36 | | * The thread manipulation functions (create, exit, join, etc.) have |
37 | | * more freedom -- they can do as they please. |
38 | | */ |
39 | | |
40 | | #include "config.h" |
41 | | |
42 | | #include "gthread.h" |
43 | | |
44 | | #include "gmain.h" |
45 | | #include "gmessages.h" |
46 | | #include "gslice.h" |
47 | | #include "gstrfuncs.h" |
48 | | #include "gtestutils.h" |
49 | | #include "gthreadprivate.h" |
50 | | #include "gutils.h" |
51 | | |
52 | | #include <stdlib.h> |
53 | | #include <stdio.h> |
54 | | #include <string.h> |
55 | | #include <errno.h> |
56 | | #include <pthread.h> |
57 | | |
58 | | #include <sys/time.h> |
59 | | #include <unistd.h> |
60 | | |
61 | | #ifdef HAVE_PTHREAD_SET_NAME_NP |
62 | | #include <pthread_np.h> |
63 | | #endif |
64 | | #ifdef HAVE_SCHED_H |
65 | | #include <sched.h> |
66 | | #endif |
67 | | #ifdef G_OS_WIN32 |
68 | | #include <windows.h> |
69 | | #endif |
70 | | |
71 | | #if defined(HAVE_SYS_SCHED_GETATTR) |
72 | | #include <sys/syscall.h> |
73 | | #endif |
74 | | |
75 | | #if defined(HAVE_FUTEX) && \ |
76 | | (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST)) |
77 | | #define USE_NATIVE_MUTEX |
78 | | #endif |
79 | | |
80 | | static void |
81 | | g_thread_abort (gint status, |
82 | | const gchar *function) |
83 | 0 | { |
84 | 0 | fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n", |
85 | 0 | function, strerror (status)); |
86 | 0 | g_abort (); |
87 | 0 | } |
88 | | |
89 | | /* {{{1 GMutex */ |
90 | | |
91 | | #if !defined(USE_NATIVE_MUTEX) |
92 | | |
93 | | static pthread_mutex_t * |
94 | | g_mutex_impl_new (void) |
95 | | { |
96 | | pthread_mutexattr_t *pattr = NULL; |
97 | | pthread_mutex_t *mutex; |
98 | | gint status; |
99 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
100 | | pthread_mutexattr_t attr; |
101 | | #endif |
102 | | |
103 | | mutex = malloc (sizeof (pthread_mutex_t)); |
104 | | if G_UNLIKELY (mutex == NULL) |
105 | | g_thread_abort (errno, "malloc"); |
106 | | |
107 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
108 | | pthread_mutexattr_init (&attr); |
109 | | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP); |
110 | | pattr = &attr; |
111 | | #endif |
112 | | |
113 | | if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0) |
114 | | g_thread_abort (status, "pthread_mutex_init"); |
115 | | |
116 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
117 | | pthread_mutexattr_destroy (&attr); |
118 | | #endif |
119 | | |
120 | | return mutex; |
121 | | } |
122 | | |
123 | | static void |
124 | | g_mutex_impl_free (pthread_mutex_t *mutex) |
125 | | { |
126 | | pthread_mutex_destroy (mutex); |
127 | | free (mutex); |
128 | | } |
129 | | |
130 | | static inline pthread_mutex_t * |
131 | | g_mutex_get_impl (GMutex *mutex) |
132 | | { |
133 | | pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p); |
134 | | |
135 | | if G_UNLIKELY (impl == NULL) |
136 | | { |
137 | | impl = g_mutex_impl_new (); |
138 | | if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl)) |
139 | | g_mutex_impl_free (impl); |
140 | | impl = mutex->p; |
141 | | } |
142 | | |
143 | | return impl; |
144 | | } |
145 | | |
146 | | |
147 | | /** |
148 | | * g_mutex_init: |
149 | | * @mutex: an uninitialized #GMutex |
150 | | * |
151 | | * Initializes a #GMutex so that it can be used. |
152 | | * |
153 | | * This function is useful to initialize a mutex that has been |
154 | | * allocated on the stack, or as part of a larger structure. |
155 | | * It is not necessary to initialize a mutex that has been |
156 | | * statically allocated. |
157 | | * |
158 | | * |[<!-- language="C" --> |
159 | | * typedef struct { |
160 | | * GMutex m; |
161 | | * ... |
162 | | * } Blob; |
163 | | * |
164 | | * Blob *b; |
165 | | * |
166 | | * b = g_new (Blob, 1); |
167 | | * g_mutex_init (&b->m); |
168 | | * ]| |
169 | | * |
170 | | * To undo the effect of g_mutex_init() when a mutex is no longer |
171 | | * needed, use g_mutex_clear(). |
172 | | * |
173 | | * Calling g_mutex_init() on an already initialized #GMutex leads |
174 | | * to undefined behaviour. |
175 | | * |
176 | | * Since: 2.32 |
177 | | */ |
178 | | void |
179 | | g_mutex_init (GMutex *mutex) |
180 | | { |
181 | | mutex->p = g_mutex_impl_new (); |
182 | | } |
183 | | |
184 | | /** |
185 | | * g_mutex_clear: |
186 | | * @mutex: an initialized #GMutex |
187 | | * |
188 | | * Frees the resources allocated to a mutex with g_mutex_init(). |
189 | | * |
190 | | * This function should not be used with a #GMutex that has been |
191 | | * statically allocated. |
192 | | * |
193 | | * Calling g_mutex_clear() on a locked mutex leads to undefined |
194 | | * behaviour. |
195 | | * |
196 | | * Sine: 2.32 |
197 | | */ |
198 | | void |
199 | | g_mutex_clear (GMutex *mutex) |
200 | | { |
201 | | g_mutex_impl_free (mutex->p); |
202 | | } |
203 | | |
204 | | /** |
205 | | * g_mutex_lock: |
206 | | * @mutex: a #GMutex |
207 | | * |
208 | | * Locks @mutex. If @mutex is already locked by another thread, the |
209 | | * current thread will block until @mutex is unlocked by the other |
210 | | * thread. |
211 | | * |
212 | | * #GMutex is neither guaranteed to be recursive nor to be |
213 | | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
214 | | * already been locked by the same thread results in undefined behaviour |
215 | | * (including but not limited to deadlocks). |
216 | | */ |
217 | | void |
218 | | g_mutex_lock (GMutex *mutex) |
219 | | { |
220 | | gint status; |
221 | | |
222 | | if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0) |
223 | | g_thread_abort (status, "pthread_mutex_lock"); |
224 | | } |
225 | | |
226 | | /** |
227 | | * g_mutex_unlock: |
228 | | * @mutex: a #GMutex |
229 | | * |
230 | | * Unlocks @mutex. If another thread is blocked in a g_mutex_lock() |
231 | | * call for @mutex, it will become unblocked and can lock @mutex itself. |
232 | | * |
233 | | * Calling g_mutex_unlock() on a mutex that is not locked by the |
234 | | * current thread leads to undefined behaviour. |
235 | | */ |
236 | | void |
237 | | g_mutex_unlock (GMutex *mutex) |
238 | | { |
239 | | gint status; |
240 | | |
241 | | if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0) |
242 | | g_thread_abort (status, "pthread_mutex_unlock"); |
243 | | } |
244 | | |
245 | | /** |
246 | | * g_mutex_trylock: |
247 | | * @mutex: a #GMutex |
248 | | * |
249 | | * Tries to lock @mutex. If @mutex is already locked by another thread, |
250 | | * it immediately returns %FALSE. Otherwise it locks @mutex and returns |
251 | | * %TRUE. |
252 | | * |
253 | | * #GMutex is neither guaranteed to be recursive nor to be |
254 | | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
255 | | * already been locked by the same thread results in undefined behaviour |
256 | | * (including but not limited to deadlocks or arbitrary return values). |
257 | | * |
258 | | * Returns: %TRUE if @mutex could be locked |
259 | | */ |
260 | | gboolean |
261 | | g_mutex_trylock (GMutex *mutex) |
262 | | { |
263 | | gint status; |
264 | | |
265 | | if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0) |
266 | | return TRUE; |
267 | | |
268 | | if G_UNLIKELY (status != EBUSY) |
269 | | g_thread_abort (status, "pthread_mutex_trylock"); |
270 | | |
271 | | return FALSE; |
272 | | } |
273 | | |
274 | | #endif /* !defined(USE_NATIVE_MUTEX) */ |
275 | | |
276 | | /* {{{1 GRecMutex */ |
277 | | |
278 | | static pthread_mutex_t * |
279 | | g_rec_mutex_impl_new (void) |
280 | 39 | { |
281 | 39 | pthread_mutexattr_t attr; |
282 | 39 | pthread_mutex_t *mutex; |
283 | | |
284 | 39 | mutex = malloc (sizeof (pthread_mutex_t)); |
285 | 39 | if G_UNLIKELY (mutex == NULL) |
286 | 0 | g_thread_abort (errno, "malloc"); |
287 | | |
288 | 39 | pthread_mutexattr_init (&attr); |
289 | 39 | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); |
290 | 39 | pthread_mutex_init (mutex, &attr); |
291 | 39 | pthread_mutexattr_destroy (&attr); |
292 | | |
293 | 39 | return mutex; |
294 | 39 | } |
295 | | |
296 | | static void |
297 | | g_rec_mutex_impl_free (pthread_mutex_t *mutex) |
298 | 0 | { |
299 | 0 | pthread_mutex_destroy (mutex); |
300 | 0 | free (mutex); |
301 | 0 | } |
302 | | |
303 | | static inline pthread_mutex_t * |
304 | | g_rec_mutex_get_impl (GRecMutex *rec_mutex) |
305 | 697 | { |
306 | 697 | pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p); |
307 | | |
308 | 697 | if G_UNLIKELY (impl == NULL) |
309 | 39 | { |
310 | 39 | impl = g_rec_mutex_impl_new (); |
311 | 39 | if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl)) |
312 | 0 | g_rec_mutex_impl_free (impl); |
313 | 39 | impl = rec_mutex->p; |
314 | 39 | } |
315 | | |
316 | 697 | return impl; |
317 | 697 | } |
318 | | |
319 | | /** |
320 | | * g_rec_mutex_init: |
321 | | * @rec_mutex: an uninitialized #GRecMutex |
322 | | * |
323 | | * Initializes a #GRecMutex so that it can be used. |
324 | | * |
325 | | * This function is useful to initialize a recursive mutex |
326 | | * that has been allocated on the stack, or as part of a larger |
327 | | * structure. |
328 | | * |
329 | | * It is not necessary to initialise a recursive mutex that has been |
330 | | * statically allocated. |
331 | | * |
332 | | * |[<!-- language="C" --> |
333 | | * typedef struct { |
334 | | * GRecMutex m; |
335 | | * ... |
336 | | * } Blob; |
337 | | * |
338 | | * Blob *b; |
339 | | * |
340 | | * b = g_new (Blob, 1); |
341 | | * g_rec_mutex_init (&b->m); |
342 | | * ]| |
343 | | * |
344 | | * Calling g_rec_mutex_init() on an already initialized #GRecMutex |
345 | | * leads to undefined behaviour. |
346 | | * |
347 | | * To undo the effect of g_rec_mutex_init() when a recursive mutex |
348 | | * is no longer needed, use g_rec_mutex_clear(). |
349 | | * |
350 | | * Since: 2.32 |
351 | | */ |
352 | | void |
353 | | g_rec_mutex_init (GRecMutex *rec_mutex) |
354 | 0 | { |
355 | 0 | rec_mutex->p = g_rec_mutex_impl_new (); |
356 | 0 | } |
357 | | |
358 | | /** |
359 | | * g_rec_mutex_clear: |
360 | | * @rec_mutex: an initialized #GRecMutex |
361 | | * |
362 | | * Frees the resources allocated to a recursive mutex with |
363 | | * g_rec_mutex_init(). |
364 | | * |
365 | | * This function should not be used with a #GRecMutex that has been |
366 | | * statically allocated. |
367 | | * |
368 | | * Calling g_rec_mutex_clear() on a locked recursive mutex leads |
369 | | * to undefined behaviour. |
370 | | * |
371 | | * Sine: 2.32 |
372 | | */ |
373 | | void |
374 | | g_rec_mutex_clear (GRecMutex *rec_mutex) |
375 | 0 | { |
376 | 0 | g_rec_mutex_impl_free (rec_mutex->p); |
377 | 0 | } |
378 | | |
379 | | /** |
380 | | * g_rec_mutex_lock: |
381 | | * @rec_mutex: a #GRecMutex |
382 | | * |
383 | | * Locks @rec_mutex. If @rec_mutex is already locked by another |
384 | | * thread, the current thread will block until @rec_mutex is |
385 | | * unlocked by the other thread. If @rec_mutex is already locked |
386 | | * by the current thread, the 'lock count' of @rec_mutex is increased. |
387 | | * The mutex will only become available again when it is unlocked |
388 | | * as many times as it has been locked. |
389 | | * |
390 | | * Since: 2.32 |
391 | | */ |
392 | | void |
393 | | g_rec_mutex_lock (GRecMutex *mutex) |
394 | 697 | { |
395 | 697 | pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); |
396 | 697 | } |
397 | | |
398 | | /** |
399 | | * g_rec_mutex_unlock: |
400 | | * @rec_mutex: a #GRecMutex |
401 | | * |
402 | | * Unlocks @rec_mutex. If another thread is blocked in a |
403 | | * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked |
404 | | * and can lock @rec_mutex itself. |
405 | | * |
406 | | * Calling g_rec_mutex_unlock() on a recursive mutex that is not |
407 | | * locked by the current thread leads to undefined behaviour. |
408 | | * |
409 | | * Since: 2.32 |
410 | | */ |
411 | | void |
412 | | g_rec_mutex_unlock (GRecMutex *rec_mutex) |
413 | 697 | { |
414 | 697 | pthread_mutex_unlock (rec_mutex->p); |
415 | 697 | } |
416 | | |
417 | | /** |
418 | | * g_rec_mutex_trylock: |
419 | | * @rec_mutex: a #GRecMutex |
420 | | * |
421 | | * Tries to lock @rec_mutex. If @rec_mutex is already locked |
422 | | * by another thread, it immediately returns %FALSE. Otherwise |
423 | | * it locks @rec_mutex and returns %TRUE. |
424 | | * |
425 | | * Returns: %TRUE if @rec_mutex could be locked |
426 | | * |
427 | | * Since: 2.32 |
428 | | */ |
429 | | gboolean |
430 | | g_rec_mutex_trylock (GRecMutex *rec_mutex) |
431 | 0 | { |
432 | 0 | if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) |
433 | 0 | return FALSE; |
434 | | |
435 | 0 | return TRUE; |
436 | 0 | } |
437 | | |
438 | | /* {{{1 GRWLock */ |
439 | | |
440 | | static pthread_rwlock_t * |
441 | | g_rw_lock_impl_new (void) |
442 | 89 | { |
443 | 89 | pthread_rwlock_t *rwlock; |
444 | 89 | gint status; |
445 | | |
446 | 89 | rwlock = malloc (sizeof (pthread_rwlock_t)); |
447 | 89 | if G_UNLIKELY (rwlock == NULL) |
448 | 0 | g_thread_abort (errno, "malloc"); |
449 | | |
450 | 89 | if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0) |
451 | 0 | g_thread_abort (status, "pthread_rwlock_init"); |
452 | | |
453 | 89 | return rwlock; |
454 | 89 | } |
455 | | |
456 | | static void |
457 | | g_rw_lock_impl_free (pthread_rwlock_t *rwlock) |
458 | 0 | { |
459 | 0 | pthread_rwlock_destroy (rwlock); |
460 | 0 | free (rwlock); |
461 | 0 | } |
462 | | |
463 | | static inline pthread_rwlock_t * |
464 | | g_rw_lock_get_impl (GRWLock *lock) |
465 | 937k | { |
466 | 937k | pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p); |
467 | | |
468 | 937k | if G_UNLIKELY (impl == NULL) |
469 | 89 | { |
470 | 89 | impl = g_rw_lock_impl_new (); |
471 | 89 | if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl)) |
472 | 0 | g_rw_lock_impl_free (impl); |
473 | 89 | impl = lock->p; |
474 | 89 | } |
475 | | |
476 | 937k | return impl; |
477 | 937k | } |
478 | | |
479 | | /** |
480 | | * g_rw_lock_init: |
481 | | * @rw_lock: an uninitialized #GRWLock |
482 | | * |
483 | | * Initializes a #GRWLock so that it can be used. |
484 | | * |
485 | | * This function is useful to initialize a lock that has been |
486 | | * allocated on the stack, or as part of a larger structure. It is not |
487 | | * necessary to initialise a reader-writer lock that has been statically |
488 | | * allocated. |
489 | | * |
490 | | * |[<!-- language="C" --> |
491 | | * typedef struct { |
492 | | * GRWLock l; |
493 | | * ... |
494 | | * } Blob; |
495 | | * |
496 | | * Blob *b; |
497 | | * |
498 | | * b = g_new (Blob, 1); |
499 | | * g_rw_lock_init (&b->l); |
500 | | * ]| |
501 | | * |
502 | | * To undo the effect of g_rw_lock_init() when a lock is no longer |
503 | | * needed, use g_rw_lock_clear(). |
504 | | * |
505 | | * Calling g_rw_lock_init() on an already initialized #GRWLock leads |
506 | | * to undefined behaviour. |
507 | | * |
508 | | * Since: 2.32 |
509 | | */ |
510 | | void |
511 | | g_rw_lock_init (GRWLock *rw_lock) |
512 | 0 | { |
513 | 0 | rw_lock->p = g_rw_lock_impl_new (); |
514 | 0 | } |
515 | | |
516 | | /** |
517 | | * g_rw_lock_clear: |
518 | | * @rw_lock: an initialized #GRWLock |
519 | | * |
520 | | * Frees the resources allocated to a lock with g_rw_lock_init(). |
521 | | * |
522 | | * This function should not be used with a #GRWLock that has been |
523 | | * statically allocated. |
524 | | * |
525 | | * Calling g_rw_lock_clear() when any thread holds the lock |
526 | | * leads to undefined behaviour. |
527 | | * |
528 | | * Sine: 2.32 |
529 | | */ |
530 | | void |
531 | | g_rw_lock_clear (GRWLock *rw_lock) |
532 | 0 | { |
533 | 0 | g_rw_lock_impl_free (rw_lock->p); |
534 | 0 | } |
535 | | |
536 | | /** |
537 | | * g_rw_lock_writer_lock: |
538 | | * @rw_lock: a #GRWLock |
539 | | * |
540 | | * Obtain a write lock on @rw_lock. If another thread currently holds |
541 | | * a read or write lock on @rw_lock, the current thread will block |
542 | | * until all other threads have dropped their locks on @rw_lock. |
543 | | * |
544 | | * Calling g_rw_lock_writer_lock() while the current thread already |
545 | | * owns a read or write lock on @rw_lock leads to undefined behaviour. |
546 | | * |
547 | | * Since: 2.32 |
548 | | */ |
549 | | void |
550 | | g_rw_lock_writer_lock (GRWLock *rw_lock) |
551 | 6.22k | { |
552 | 6.22k | int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); |
553 | | |
554 | 6.22k | if (retval != 0) |
555 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
556 | 6.22k | } |
557 | | |
558 | | /** |
559 | | * g_rw_lock_writer_trylock: |
560 | | * @rw_lock: a #GRWLock |
561 | | * |
562 | | * Tries to obtain a write lock on @rw_lock. If another thread |
563 | | * currently holds a read or write lock on @rw_lock, it immediately |
564 | | * returns %FALSE. |
565 | | * Otherwise it locks @rw_lock and returns %TRUE. |
566 | | * |
567 | | * Returns: %TRUE if @rw_lock could be locked |
568 | | * |
569 | | * Since: 2.32 |
570 | | */ |
571 | | gboolean |
572 | | g_rw_lock_writer_trylock (GRWLock *rw_lock) |
573 | 0 | { |
574 | 0 | if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) |
575 | 0 | return FALSE; |
576 | | |
577 | 0 | return TRUE; |
578 | 0 | } |
579 | | |
580 | | /** |
581 | | * g_rw_lock_writer_unlock: |
582 | | * @rw_lock: a #GRWLock |
583 | | * |
584 | | * Release a write lock on @rw_lock. |
585 | | * |
586 | | * Calling g_rw_lock_writer_unlock() on a lock that is not held |
587 | | * by the current thread leads to undefined behaviour. |
588 | | * |
589 | | * Since: 2.32 |
590 | | */ |
591 | | void |
592 | | g_rw_lock_writer_unlock (GRWLock *rw_lock) |
593 | 6.22k | { |
594 | 6.22k | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
595 | 6.22k | } |
596 | | |
597 | | /** |
598 | | * g_rw_lock_reader_lock: |
599 | | * @rw_lock: a #GRWLock |
600 | | * |
601 | | * Obtain a read lock on @rw_lock. If another thread currently holds |
602 | | * the write lock on @rw_lock, the current thread will block until the |
603 | | * write lock was (held and) released. If another thread does not hold |
604 | | * the write lock, but is waiting for it, it is implementation defined |
605 | | * whether the reader or writer will block. Read locks can be taken |
606 | | * recursively. |
607 | | * |
608 | | * Calling g_rw_lock_reader_lock() while the current thread already |
609 | | * owns a write lock leads to undefined behaviour. Read locks however |
610 | | * can be taken recursively, in which case you need to make sure to |
611 | | * call g_rw_lock_reader_unlock() the same amount of times. |
612 | | * |
613 | | * It is implementation-defined how many read locks are allowed to be |
614 | | * held on the same lock simultaneously. If the limit is hit, |
615 | | * or if a deadlock is detected, a critical warning will be emitted. |
616 | | * |
617 | | * Since: 2.32 |
618 | | */ |
619 | | void |
620 | | g_rw_lock_reader_lock (GRWLock *rw_lock) |
621 | 462k | { |
622 | 462k | int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); |
623 | | |
624 | 462k | if (retval != 0) |
625 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
626 | 462k | } |
627 | | |
628 | | /** |
629 | | * g_rw_lock_reader_trylock: |
630 | | * @rw_lock: a #GRWLock |
631 | | * |
632 | | * Tries to obtain a read lock on @rw_lock and returns %TRUE if |
633 | | * the read lock was successfully obtained. Otherwise it |
634 | | * returns %FALSE. |
635 | | * |
636 | | * Returns: %TRUE if @rw_lock could be locked |
637 | | * |
638 | | * Since: 2.32 |
639 | | */ |
640 | | gboolean |
641 | | g_rw_lock_reader_trylock (GRWLock *rw_lock) |
642 | 0 | { |
643 | 0 | if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) |
644 | 0 | return FALSE; |
645 | | |
646 | 0 | return TRUE; |
647 | 0 | } |
648 | | |
649 | | /** |
650 | | * g_rw_lock_reader_unlock: |
651 | | * @rw_lock: a #GRWLock |
652 | | * |
653 | | * Release a read lock on @rw_lock. |
654 | | * |
655 | | * Calling g_rw_lock_reader_unlock() on a lock that is not held |
656 | | * by the current thread leads to undefined behaviour. |
657 | | * |
658 | | * Since: 2.32 |
659 | | */ |
660 | | void |
661 | | g_rw_lock_reader_unlock (GRWLock *rw_lock) |
662 | 462k | { |
663 | 462k | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
664 | 462k | } |
665 | | |
666 | | /* {{{1 GCond */ |
667 | | |
668 | | #if !defined(USE_NATIVE_MUTEX) |
669 | | |
670 | | static pthread_cond_t * |
671 | | g_cond_impl_new (void) |
672 | | { |
673 | | pthread_condattr_t attr; |
674 | | pthread_cond_t *cond; |
675 | | gint status; |
676 | | |
677 | | pthread_condattr_init (&attr); |
678 | | |
679 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
680 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
681 | | if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0) |
682 | | g_thread_abort (status, "pthread_condattr_setclock"); |
683 | | #else |
684 | | #error Cannot support GCond on your platform. |
685 | | #endif |
686 | | |
687 | | cond = malloc (sizeof (pthread_cond_t)); |
688 | | if G_UNLIKELY (cond == NULL) |
689 | | g_thread_abort (errno, "malloc"); |
690 | | |
691 | | if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0) |
692 | | g_thread_abort (status, "pthread_cond_init"); |
693 | | |
694 | | pthread_condattr_destroy (&attr); |
695 | | |
696 | | return cond; |
697 | | } |
698 | | |
699 | | static void |
700 | | g_cond_impl_free (pthread_cond_t *cond) |
701 | | { |
702 | | pthread_cond_destroy (cond); |
703 | | free (cond); |
704 | | } |
705 | | |
706 | | static inline pthread_cond_t * |
707 | | g_cond_get_impl (GCond *cond) |
708 | | { |
709 | | pthread_cond_t *impl = g_atomic_pointer_get (&cond->p); |
710 | | |
711 | | if G_UNLIKELY (impl == NULL) |
712 | | { |
713 | | impl = g_cond_impl_new (); |
714 | | if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl)) |
715 | | g_cond_impl_free (impl); |
716 | | impl = cond->p; |
717 | | } |
718 | | |
719 | | return impl; |
720 | | } |
721 | | |
722 | | /** |
723 | | * g_cond_init: |
724 | | * @cond: an uninitialized #GCond |
725 | | * |
726 | | * Initialises a #GCond so that it can be used. |
727 | | * |
728 | | * This function is useful to initialise a #GCond that has been |
729 | | * allocated as part of a larger structure. It is not necessary to |
730 | | * initialise a #GCond that has been statically allocated. |
731 | | * |
732 | | * To undo the effect of g_cond_init() when a #GCond is no longer |
733 | | * needed, use g_cond_clear(). |
734 | | * |
735 | | * Calling g_cond_init() on an already-initialised #GCond leads |
736 | | * to undefined behaviour. |
737 | | * |
738 | | * Since: 2.32 |
739 | | */ |
740 | | void |
741 | | g_cond_init (GCond *cond) |
742 | | { |
743 | | cond->p = g_cond_impl_new (); |
744 | | } |
745 | | |
746 | | /** |
747 | | * g_cond_clear: |
748 | | * @cond: an initialised #GCond |
749 | | * |
750 | | * Frees the resources allocated to a #GCond with g_cond_init(). |
751 | | * |
752 | | * This function should not be used with a #GCond that has been |
753 | | * statically allocated. |
754 | | * |
755 | | * Calling g_cond_clear() for a #GCond on which threads are |
756 | | * blocking leads to undefined behaviour. |
757 | | * |
758 | | * Since: 2.32 |
759 | | */ |
760 | | void |
761 | | g_cond_clear (GCond *cond) |
762 | | { |
763 | | g_cond_impl_free (cond->p); |
764 | | } |
765 | | |
766 | | /** |
767 | | * g_cond_wait: |
768 | | * @cond: a #GCond |
769 | | * @mutex: a #GMutex that is currently locked |
770 | | * |
771 | | * Atomically releases @mutex and waits until @cond is signalled. |
772 | | * When this function returns, @mutex is locked again and owned by the |
773 | | * calling thread. |
774 | | * |
775 | | * When using condition variables, it is possible that a spurious wakeup |
776 | | * may occur (ie: g_cond_wait() returns even though g_cond_signal() was |
777 | | * not called). It's also possible that a stolen wakeup may occur. |
778 | | * This is when g_cond_signal() is called, but another thread acquires |
779 | | * @mutex before this thread and modifies the state of the program in |
780 | | * such a way that when g_cond_wait() is able to return, the expected |
781 | | * condition is no longer met. |
782 | | * |
783 | | * For this reason, g_cond_wait() must always be used in a loop. See |
784 | | * the documentation for #GCond for a complete example. |
785 | | **/ |
786 | | void |
787 | | g_cond_wait (GCond *cond, |
788 | | GMutex *mutex) |
789 | | { |
790 | | gint status; |
791 | | |
792 | | if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0) |
793 | | g_thread_abort (status, "pthread_cond_wait"); |
794 | | } |
795 | | |
796 | | /** |
797 | | * g_cond_signal: |
798 | | * @cond: a #GCond |
799 | | * |
800 | | * If threads are waiting for @cond, at least one of them is unblocked. |
801 | | * If no threads are waiting for @cond, this function has no effect. |
802 | | * It is good practice to hold the same lock as the waiting thread |
803 | | * while calling this function, though not required. |
804 | | */ |
805 | | void |
806 | | g_cond_signal (GCond *cond) |
807 | | { |
808 | | gint status; |
809 | | |
810 | | if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0) |
811 | | g_thread_abort (status, "pthread_cond_signal"); |
812 | | } |
813 | | |
814 | | /** |
815 | | * g_cond_broadcast: |
816 | | * @cond: a #GCond |
817 | | * |
818 | | * If threads are waiting for @cond, all of them are unblocked. |
819 | | * If no threads are waiting for @cond, this function has no effect. |
820 | | * It is good practice to lock the same mutex as the waiting threads |
821 | | * while calling this function, though not required. |
822 | | */ |
823 | | void |
824 | | g_cond_broadcast (GCond *cond) |
825 | | { |
826 | | gint status; |
827 | | |
828 | | if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0) |
829 | | g_thread_abort (status, "pthread_cond_broadcast"); |
830 | | } |
831 | | |
832 | | /** |
833 | | * g_cond_wait_until: |
834 | | * @cond: a #GCond |
835 | | * @mutex: a #GMutex that is currently locked |
836 | | * @end_time: the monotonic time to wait until |
837 | | * |
838 | | * Waits until either @cond is signalled or @end_time has passed. |
839 | | * |
840 | | * As with g_cond_wait() it is possible that a spurious or stolen wakeup |
841 | | * could occur. For that reason, waiting on a condition variable should |
842 | | * always be in a loop, based on an explicitly-checked predicate. |
843 | | * |
844 | | * %TRUE is returned if the condition variable was signalled (or in the |
845 | | * case of a spurious wakeup). %FALSE is returned if @end_time has |
846 | | * passed. |
847 | | * |
848 | | * The following code shows how to correctly perform a timed wait on a |
849 | | * condition variable (extending the example presented in the |
850 | | * documentation for #GCond): |
851 | | * |
852 | | * |[<!-- language="C" --> |
853 | | * gpointer |
854 | | * pop_data_timed (void) |
855 | | * { |
856 | | * gint64 end_time; |
857 | | * gpointer data; |
858 | | * |
859 | | * g_mutex_lock (&data_mutex); |
860 | | * |
861 | | * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND; |
862 | | * while (!current_data) |
863 | | * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time)) |
864 | | * { |
865 | | * // timeout has passed. |
866 | | * g_mutex_unlock (&data_mutex); |
867 | | * return NULL; |
868 | | * } |
869 | | * |
870 | | * // there is data for us |
871 | | * data = current_data; |
872 | | * current_data = NULL; |
873 | | * |
874 | | * g_mutex_unlock (&data_mutex); |
875 | | * |
876 | | * return data; |
877 | | * } |
878 | | * ]| |
879 | | * |
880 | | * Notice that the end time is calculated once, before entering the |
881 | | * loop and reused. This is the motivation behind the use of absolute |
882 | | * time on this API -- if a relative time of 5 seconds were passed |
883 | | * directly to the call and a spurious wakeup occurred, the program would |
884 | | * have to start over waiting again (which would lead to a total wait |
885 | | * time of more than 5 seconds). |
886 | | * |
887 | | * Returns: %TRUE on a signal, %FALSE on a timeout |
888 | | * Since: 2.32 |
889 | | **/ |
890 | | gboolean |
891 | | g_cond_wait_until (GCond *cond, |
892 | | GMutex *mutex, |
893 | | gint64 end_time) |
894 | | { |
895 | | struct timespec ts; |
896 | | gint status; |
897 | | |
898 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
899 | | /* end_time is given relative to the monotonic clock as returned by |
900 | | * g_get_monotonic_time(). |
901 | | * |
902 | | * Since this pthreads wants the relative time, convert it back again. |
903 | | */ |
904 | | { |
905 | | gint64 now = g_get_monotonic_time (); |
906 | | gint64 relative; |
907 | | |
908 | | if (end_time <= now) |
909 | | return FALSE; |
910 | | |
911 | | relative = end_time - now; |
912 | | |
913 | | ts.tv_sec = relative / 1000000; |
914 | | ts.tv_nsec = (relative % 1000000) * 1000; |
915 | | |
916 | | if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
917 | | return TRUE; |
918 | | } |
919 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
920 | | /* This is the exact check we used during init to set the clock to |
921 | | * monotonic, so if we're in this branch, timedwait() will already be |
922 | | * expecting a monotonic clock. |
923 | | */ |
924 | | { |
925 | | ts.tv_sec = end_time / 1000000; |
926 | | ts.tv_nsec = (end_time % 1000000) * 1000; |
927 | | |
928 | | if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
929 | | return TRUE; |
930 | | } |
931 | | #else |
932 | | #error Cannot support GCond on your platform. |
933 | | #endif |
934 | | |
935 | | if G_UNLIKELY (status != ETIMEDOUT) |
936 | | g_thread_abort (status, "pthread_cond_timedwait"); |
937 | | |
938 | | return FALSE; |
939 | | } |
940 | | |
941 | | #endif /* defined(USE_NATIVE_MUTEX) */ |
942 | | |
943 | | /* {{{1 GPrivate */ |
944 | | |
945 | | /** |
946 | | * GPrivate: |
947 | | * |
948 | | * The #GPrivate struct is an opaque data structure to represent a |
949 | | * thread-local data key. It is approximately equivalent to the |
950 | | * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to |
951 | | * TlsSetValue()/TlsGetValue() on Windows. |
952 | | * |
953 | | * If you don't already know why you might want this functionality, |
954 | | * then you probably don't need it. |
955 | | * |
956 | | * #GPrivate is a very limited resource (as far as 128 per program, |
957 | | * shared between all libraries). It is also not possible to destroy a |
958 | | * #GPrivate after it has been used. As such, it is only ever acceptable |
959 | | * to use #GPrivate in static scope, and even then sparingly so. |
960 | | * |
961 | | * See G_PRIVATE_INIT() for a couple of examples. |
962 | | * |
963 | | * The #GPrivate structure should be considered opaque. It should only |
964 | | * be accessed via the g_private_ functions. |
965 | | */ |
966 | | |
967 | | /** |
968 | | * G_PRIVATE_INIT: |
969 | | * @notify: a #GDestroyNotify |
970 | | * |
971 | | * A macro to assist with the static initialisation of a #GPrivate. |
972 | | * |
973 | | * This macro is useful for the case that a #GDestroyNotify function |
974 | | * should be associated with the key. This is needed when the key will be |
975 | | * used to point at memory that should be deallocated when the thread |
976 | | * exits. |
977 | | * |
978 | | * Additionally, the #GDestroyNotify will also be called on the previous |
979 | | * value stored in the key when g_private_replace() is used. |
980 | | * |
981 | | * If no #GDestroyNotify is needed, then use of this macro is not |
982 | | * required -- if the #GPrivate is declared in static scope then it will |
983 | | * be properly initialised by default (ie: to all zeros). See the |
984 | | * examples below. |
985 | | * |
986 | | * |[<!-- language="C" --> |
987 | | * static GPrivate name_key = G_PRIVATE_INIT (g_free); |
988 | | * |
989 | | * // return value should not be freed |
990 | | * const gchar * |
991 | | * get_local_name (void) |
992 | | * { |
993 | | * return g_private_get (&name_key); |
994 | | * } |
995 | | * |
996 | | * void |
997 | | * set_local_name (const gchar *name) |
998 | | * { |
999 | | * g_private_replace (&name_key, g_strdup (name)); |
1000 | | * } |
1001 | | * |
1002 | | * |
1003 | | * static GPrivate count_key; // no free function |
1004 | | * |
1005 | | * gint |
1006 | | * get_local_count (void) |
1007 | | * { |
1008 | | * return GPOINTER_TO_INT (g_private_get (&count_key)); |
1009 | | * } |
1010 | | * |
1011 | | * void |
1012 | | * set_local_count (gint count) |
1013 | | * { |
1014 | | * g_private_set (&count_key, GINT_TO_POINTER (count)); |
1015 | | * } |
1016 | | * ]| |
1017 | | * |
1018 | | * Since: 2.32 |
1019 | | **/ |
1020 | | |
1021 | | static pthread_key_t * |
1022 | | g_private_impl_new (GDestroyNotify notify) |
1023 | 140 | { |
1024 | 140 | pthread_key_t *key; |
1025 | 140 | gint status; |
1026 | | |
1027 | 140 | key = malloc (sizeof (pthread_key_t)); |
1028 | 140 | if G_UNLIKELY (key == NULL) |
1029 | 0 | g_thread_abort (errno, "malloc"); |
1030 | 140 | status = pthread_key_create (key, notify); |
1031 | 140 | if G_UNLIKELY (status != 0) |
1032 | 0 | g_thread_abort (status, "pthread_key_create"); |
1033 | | |
1034 | 140 | return key; |
1035 | 140 | } |
1036 | | |
1037 | | static void |
1038 | | g_private_impl_free (pthread_key_t *key) |
1039 | 0 | { |
1040 | 0 | gint status; |
1041 | |
|
1042 | 0 | status = pthread_key_delete (*key); |
1043 | 0 | if G_UNLIKELY (status != 0) |
1044 | 0 | g_thread_abort (status, "pthread_key_delete"); |
1045 | 0 | free (key); |
1046 | 0 | } |
1047 | | |
1048 | | static inline pthread_key_t * |
1049 | | g_private_get_impl (GPrivate *key) |
1050 | 263M | { |
1051 | 263M | pthread_key_t *impl = g_atomic_pointer_get (&key->p); |
1052 | | |
1053 | 263M | if G_UNLIKELY (impl == NULL) |
1054 | 140 | { |
1055 | 140 | impl = g_private_impl_new (key->notify); |
1056 | 140 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
1057 | 0 | { |
1058 | 0 | g_private_impl_free (impl); |
1059 | 0 | impl = key->p; |
1060 | 0 | } |
1061 | 140 | } |
1062 | | |
1063 | 263M | return impl; |
1064 | 263M | } |
1065 | | |
1066 | | /** |
1067 | | * g_private_get: |
1068 | | * @key: a #GPrivate |
1069 | | * |
1070 | | * Returns the current value of the thread local variable @key. |
1071 | | * |
1072 | | * If the value has not yet been set in this thread, %NULL is returned. |
1073 | | * Values are never copied between threads (when a new thread is |
1074 | | * created, for example). |
1075 | | * |
1076 | | * Returns: the thread-local value |
1077 | | */ |
1078 | | gpointer |
1079 | | g_private_get (GPrivate *key) |
1080 | 252M | { |
1081 | | /* quote POSIX: No errors are returned from pthread_getspecific(). */ |
1082 | 252M | return pthread_getspecific (*g_private_get_impl (key)); |
1083 | 252M | } |
1084 | | |
1085 | | /** |
1086 | | * g_private_set: |
1087 | | * @key: a #GPrivate |
1088 | | * @value: the new value |
1089 | | * |
1090 | | * Sets the thread local variable @key to have the value @value in the |
1091 | | * current thread. |
1092 | | * |
1093 | | * This function differs from g_private_replace() in the following way: |
1094 | | * the #GDestroyNotify for @key is not called on the old value. |
1095 | | */ |
1096 | | void |
1097 | | g_private_set (GPrivate *key, |
1098 | | gpointer value) |
1099 | 11.4M | { |
1100 | 11.4M | gint status; |
1101 | | |
1102 | 11.4M | if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0) |
1103 | 0 | g_thread_abort (status, "pthread_setspecific"); |
1104 | 11.4M | } |
1105 | | |
1106 | | /** |
1107 | | * g_private_replace: |
1108 | | * @key: a #GPrivate |
1109 | | * @value: the new value |
1110 | | * |
1111 | | * Sets the thread local variable @key to have the value @value in the |
1112 | | * current thread. |
1113 | | * |
1114 | | * This function differs from g_private_set() in the following way: if |
1115 | | * the previous value was non-%NULL then the #GDestroyNotify handler for |
1116 | | * @key is run on it. |
1117 | | * |
1118 | | * Since: 2.32 |
1119 | | **/ |
1120 | | void |
1121 | | g_private_replace (GPrivate *key, |
1122 | | gpointer value) |
1123 | 0 | { |
1124 | 0 | pthread_key_t *impl = g_private_get_impl (key); |
1125 | 0 | gpointer old; |
1126 | 0 | gint status; |
1127 | |
|
1128 | 0 | old = pthread_getspecific (*impl); |
1129 | |
|
1130 | 0 | if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0) |
1131 | 0 | g_thread_abort (status, "pthread_setspecific"); |
1132 | |
|
1133 | 0 | if (old && key->notify) |
1134 | 0 | key->notify (old); |
1135 | 0 | } |
1136 | | |
1137 | | /* {{{1 GThread */ |
1138 | | |
1139 | 0 | #define posix_check_err(err, name) G_STMT_START{ \ |
1140 | 0 | int error = (err); \ |
1141 | 0 | if (error) \ |
1142 | 0 | g_error ("file %s: line %d (%s): error '%s' during '%s'", \ |
1143 | 0 | __FILE__, __LINE__, G_STRFUNC, \ |
1144 | 0 | g_strerror (error), name); \ |
1145 | 0 | }G_STMT_END |
1146 | | |
1147 | 0 | #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd) |
1148 | | |
1149 | | typedef struct |
1150 | | { |
1151 | | GRealThread thread; |
1152 | | |
1153 | | pthread_t system_thread; |
1154 | | gboolean joined; |
1155 | | GMutex lock; |
1156 | | |
1157 | | void *(*proxy) (void *); |
1158 | | |
1159 | | /* Must be statically allocated and valid forever */ |
1160 | | const GThreadSchedulerSettings *scheduler_settings; |
1161 | | } GThreadPosix; |
1162 | | |
1163 | | void |
1164 | | g_system_thread_free (GRealThread *thread) |
1165 | 0 | { |
1166 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
1167 | |
|
1168 | 0 | if (!pt->joined) |
1169 | 0 | pthread_detach (pt->system_thread); |
1170 | |
|
1171 | 0 | g_mutex_clear (&pt->lock); |
1172 | |
|
1173 | 0 | g_slice_free (GThreadPosix, pt); |
1174 | 0 | } |
1175 | | |
1176 | | gboolean |
1177 | | g_system_thread_get_scheduler_settings (GThreadSchedulerSettings *scheduler_settings) |
1178 | 0 | { |
1179 | | /* FIXME: Implement the same for macOS and the BSDs so it doesn't go through |
1180 | | * the fallback code using an additional thread. */ |
1181 | 0 | #if defined(HAVE_SYS_SCHED_GETATTR) |
1182 | 0 | pid_t tid; |
1183 | 0 | int res; |
1184 | | /* FIXME: The struct definition does not seem to be possible to pull in |
1185 | | * via any of the normal system headers and it's only declared in the |
1186 | | * kernel headers. That's why we hardcode 56 here right now. */ |
1187 | 0 | guint size = 56; /* Size as of Linux 5.3.9 */ |
1188 | 0 | guint flags = 0; |
1189 | |
|
1190 | 0 | tid = (pid_t) syscall (SYS_gettid); |
1191 | |
|
1192 | 0 | scheduler_settings->attr = g_malloc0 (size); |
1193 | |
|
1194 | 0 | do |
1195 | 0 | { |
1196 | 0 | int errsv; |
1197 | |
|
1198 | 0 | res = syscall (SYS_sched_getattr, tid, scheduler_settings->attr, size, flags); |
1199 | 0 | errsv = errno; |
1200 | 0 | if (res == -1) |
1201 | 0 | { |
1202 | 0 | if (errsv == EAGAIN) |
1203 | 0 | { |
1204 | 0 | continue; |
1205 | 0 | } |
1206 | 0 | else if (errsv == E2BIG) |
1207 | 0 | { |
1208 | 0 | g_assert (size < G_MAXINT); |
1209 | 0 | size *= 2; |
1210 | 0 | scheduler_settings->attr = g_realloc (scheduler_settings->attr, size); |
1211 | | /* Needs to be zero-initialized */ |
1212 | 0 | memset (scheduler_settings->attr, 0, size); |
1213 | 0 | } |
1214 | 0 | else |
1215 | 0 | { |
1216 | 0 | g_debug ("Failed to get thread scheduler attributes: %s", g_strerror (errsv)); |
1217 | 0 | g_free (scheduler_settings->attr); |
1218 | |
|
1219 | 0 | return FALSE; |
1220 | 0 | } |
1221 | 0 | } |
1222 | 0 | } |
1223 | 0 | while (res == -1); |
1224 | | |
1225 | | /* Try setting them on the current thread to see if any system policies are |
1226 | | * in place that would disallow doing so */ |
1227 | 0 | res = syscall (SYS_sched_setattr, tid, scheduler_settings->attr, flags); |
1228 | 0 | if (res == -1) |
1229 | 0 | { |
1230 | 0 | int errsv = errno; |
1231 | |
|
1232 | 0 | g_debug ("Failed to set thread scheduler attributes: %s", g_strerror (errsv)); |
1233 | 0 | g_free (scheduler_settings->attr); |
1234 | |
|
1235 | 0 | return FALSE; |
1236 | 0 | } |
1237 | | |
1238 | 0 | return TRUE; |
1239 | | #else |
1240 | | return FALSE; |
1241 | | #endif |
1242 | 0 | } |
1243 | | |
1244 | | #if defined(HAVE_SYS_SCHED_GETATTR) |
1245 | | static void * |
1246 | | linux_pthread_proxy (void *data) |
1247 | 0 | { |
1248 | 0 | GThreadPosix *thread = data; |
1249 | 0 | static gboolean printed_scheduler_warning = FALSE; /* (atomic) */ |
1250 | | |
1251 | | /* Set scheduler settings first if requested */ |
1252 | 0 | if (thread->scheduler_settings) |
1253 | 0 | { |
1254 | 0 | pid_t tid = 0; |
1255 | 0 | guint flags = 0; |
1256 | 0 | int res; |
1257 | 0 | int errsv; |
1258 | |
|
1259 | 0 | tid = (pid_t) syscall (SYS_gettid); |
1260 | 0 | res = syscall (SYS_sched_setattr, tid, thread->scheduler_settings->attr, flags); |
1261 | 0 | errsv = errno; |
1262 | 0 | if (res == -1 && g_atomic_int_compare_and_exchange (&printed_scheduler_warning, FALSE, TRUE)) |
1263 | 0 | g_critical ("Failed to set scheduler settings: %s", g_strerror (errsv)); |
1264 | 0 | else if (res == -1) |
1265 | 0 | g_debug ("Failed to set scheduler settings: %s", g_strerror (errsv)); |
1266 | 0 | printed_scheduler_warning = TRUE; |
1267 | 0 | } |
1268 | |
|
1269 | 0 | return thread->proxy (data); |
1270 | 0 | } |
1271 | | #endif |
1272 | | |
1273 | | GRealThread * |
1274 | | g_system_thread_new (GThreadFunc proxy, |
1275 | | gulong stack_size, |
1276 | | const GThreadSchedulerSettings *scheduler_settings, |
1277 | | const char *name, |
1278 | | GThreadFunc func, |
1279 | | gpointer data, |
1280 | | GError **error) |
1281 | 0 | { |
1282 | 0 | GThreadPosix *thread; |
1283 | 0 | GRealThread *base_thread; |
1284 | 0 | pthread_attr_t attr; |
1285 | 0 | gint ret; |
1286 | |
|
1287 | 0 | thread = g_slice_new0 (GThreadPosix); |
1288 | 0 | base_thread = (GRealThread*)thread; |
1289 | 0 | base_thread->ref_count = 2; |
1290 | 0 | base_thread->ours = TRUE; |
1291 | 0 | base_thread->thread.joinable = TRUE; |
1292 | 0 | base_thread->thread.func = func; |
1293 | 0 | base_thread->thread.data = data; |
1294 | 0 | base_thread->name = g_strdup (name); |
1295 | 0 | thread->scheduler_settings = scheduler_settings; |
1296 | 0 | thread->proxy = proxy; |
1297 | |
|
1298 | 0 | posix_check_cmd (pthread_attr_init (&attr)); |
1299 | |
|
1300 | 0 | #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE |
1301 | 0 | if (stack_size) |
1302 | 0 | { |
1303 | 0 | #ifdef _SC_THREAD_STACK_MIN |
1304 | 0 | long min_stack_size = sysconf (_SC_THREAD_STACK_MIN); |
1305 | 0 | if (min_stack_size >= 0) |
1306 | 0 | stack_size = MAX ((gulong) min_stack_size, stack_size); |
1307 | 0 | #endif /* _SC_THREAD_STACK_MIN */ |
1308 | | /* No error check here, because some systems can't do it and |
1309 | | * we simply don't want threads to fail because of that. */ |
1310 | 0 | pthread_attr_setstacksize (&attr, stack_size); |
1311 | 0 | } |
1312 | 0 | #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ |
1313 | |
|
1314 | 0 | #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED |
1315 | 0 | if (!scheduler_settings) |
1316 | 0 | { |
1317 | | /* While this is the default, better be explicit about it */ |
1318 | 0 | pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED); |
1319 | 0 | } |
1320 | 0 | #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */ |
1321 | |
|
1322 | 0 | #if defined(HAVE_SYS_SCHED_GETATTR) |
1323 | 0 | ret = pthread_create (&thread->system_thread, &attr, linux_pthread_proxy, thread); |
1324 | | #else |
1325 | | ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread); |
1326 | | #endif |
1327 | |
|
1328 | 0 | posix_check_cmd (pthread_attr_destroy (&attr)); |
1329 | |
|
1330 | 0 | if (ret == EAGAIN) |
1331 | 0 | { |
1332 | 0 | g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, |
1333 | 0 | "Error creating thread: %s", g_strerror (ret)); |
1334 | 0 | g_slice_free (GThreadPosix, thread); |
1335 | 0 | return NULL; |
1336 | 0 | } |
1337 | | |
1338 | 0 | posix_check_err (ret, "pthread_create"); |
1339 | |
|
1340 | 0 | g_mutex_init (&thread->lock); |
1341 | |
|
1342 | 0 | return (GRealThread *) thread; |
1343 | 0 | } |
1344 | | |
1345 | | /** |
1346 | | * g_thread_yield: |
1347 | | * |
1348 | | * Causes the calling thread to voluntarily relinquish the CPU, so |
1349 | | * that other threads can run. |
1350 | | * |
1351 | | * This function is often used as a method to make busy wait less evil. |
1352 | | */ |
1353 | | void |
1354 | | g_thread_yield (void) |
1355 | 0 | { |
1356 | 0 | sched_yield (); |
1357 | 0 | } |
1358 | | |
1359 | | void |
1360 | | g_system_thread_wait (GRealThread *thread) |
1361 | 0 | { |
1362 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
1363 | |
|
1364 | 0 | g_mutex_lock (&pt->lock); |
1365 | |
|
1366 | 0 | if (!pt->joined) |
1367 | 0 | { |
1368 | 0 | posix_check_cmd (pthread_join (pt->system_thread, NULL)); |
1369 | 0 | pt->joined = TRUE; |
1370 | 0 | } |
1371 | |
|
1372 | 0 | g_mutex_unlock (&pt->lock); |
1373 | 0 | } |
1374 | | |
1375 | | void |
1376 | | g_system_thread_exit (void) |
1377 | 0 | { |
1378 | 0 | pthread_exit (NULL); |
1379 | 0 | } |
1380 | | |
1381 | | void |
1382 | | g_system_thread_set_name (const gchar *name) |
1383 | 0 | { |
1384 | | #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) |
1385 | | pthread_setname_np (name); /* on OS X and iOS */ |
1386 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) |
1387 | | pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */ |
1388 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) |
1389 | | pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */ |
1390 | | #elif defined(HAVE_PTHREAD_SET_NAME_NP) |
1391 | | pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */ |
1392 | | #endif |
1393 | 0 | } |
1394 | | |
1395 | | /* {{{1 GMutex and GCond futex implementation */ |
1396 | | |
1397 | | #if defined(USE_NATIVE_MUTEX) |
1398 | | |
1399 | | #include <linux/futex.h> |
1400 | | #include <sys/syscall.h> |
1401 | | |
1402 | | #ifndef FUTEX_WAIT_PRIVATE |
1403 | | #define FUTEX_WAIT_PRIVATE FUTEX_WAIT |
1404 | | #define FUTEX_WAKE_PRIVATE FUTEX_WAKE |
1405 | | #endif |
1406 | | |
1407 | | /* We should expand the set of operations available in gatomic once we |
1408 | | * have better C11 support in GCC in common distributions (ie: 4.9). |
1409 | | * |
1410 | | * Before then, let's define a couple of useful things for our own |
1411 | | * purposes... |
1412 | | */ |
1413 | | |
1414 | | #ifdef HAVE_STDATOMIC_H |
1415 | | |
1416 | | #include <stdatomic.h> |
1417 | | |
1418 | | #define exchange_acquire(ptr, new) \ |
1419 | 0 | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE) |
1420 | | #define compare_exchange_acquire(ptr, old, new) \ |
1421 | 753k | atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \ |
1422 | 753k | __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
1423 | | |
1424 | | #define exchange_release(ptr, new) \ |
1425 | 71.4M | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
1426 | | #define store_release(ptr, new) \ |
1427 | | atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
1428 | | |
1429 | | #else |
1430 | | |
1431 | | #define exchange_acquire(ptr, new) \ |
1432 | | __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE) |
1433 | | #define compare_exchange_acquire(ptr, old, new) \ |
1434 | | __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
1435 | | |
1436 | | #define exchange_release(ptr, new) \ |
1437 | | __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE) |
1438 | | #define store_release(ptr, new) \ |
1439 | | __atomic_store_4((ptr), (new), __ATOMIC_RELEASE) |
1440 | | |
1441 | | #endif |
1442 | | |
1443 | | /* Our strategy for the mutex is pretty simple: |
1444 | | * |
1445 | | * 0: not in use |
1446 | | * |
1447 | | * 1: acquired by one thread only, no contention |
1448 | | * |
1449 | | * > 1: contended |
1450 | | * |
1451 | | * |
1452 | | * As such, attempting to acquire the lock should involve an increment. |
1453 | | * If we find that the previous value was 0 then we can return |
1454 | | * immediately. |
1455 | | * |
1456 | | * On unlock, we always store 0 to indicate that the lock is available. |
1457 | | * If the value there was 1 before then we didn't have contention and |
1458 | | * can return immediately. If the value was something other than 1 then |
1459 | | * we have the contended case and need to wake a waiter. |
1460 | | * |
1461 | | * If it was not 0 then there is another thread holding it and we must |
1462 | | * wait. We must always ensure that we mark a value >1 while we are |
1463 | | * waiting in order to instruct the holder to do a wake operation on |
1464 | | * unlock. |
1465 | | */ |
1466 | | |
1467 | | void |
1468 | | g_mutex_init (GMutex *mutex) |
1469 | 0 | { |
1470 | 0 | mutex->i[0] = 0; |
1471 | 0 | } |
1472 | | |
1473 | | void |
1474 | | g_mutex_clear (GMutex *mutex) |
1475 | 0 | { |
1476 | 0 | if G_UNLIKELY (mutex->i[0] != 0) |
1477 | 0 | { |
1478 | 0 | fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n"); |
1479 | 0 | g_abort (); |
1480 | 0 | } |
1481 | 0 | } |
1482 | | |
1483 | | static void __attribute__((noinline)) |
1484 | | g_mutex_lock_slowpath (GMutex *mutex) |
1485 | 0 | { |
1486 | | /* Set to 2 to indicate contention. If it was zero before then we |
1487 | | * just acquired the lock. |
1488 | | * |
1489 | | * Otherwise, sleep for as long as the 2 remains... |
1490 | | */ |
1491 | 0 | while (exchange_acquire (&mutex->i[0], 2) != 0) |
1492 | 0 | syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL); |
1493 | 0 | } |
1494 | | |
1495 | | static void __attribute__((noinline)) |
1496 | | g_mutex_unlock_slowpath (GMutex *mutex, |
1497 | | guint prev) |
1498 | 0 | { |
1499 | | /* We seem to get better code for the uncontended case by splitting |
1500 | | * this out... |
1501 | | */ |
1502 | 0 | if G_UNLIKELY (prev == 0) |
1503 | 0 | { |
1504 | 0 | fprintf (stderr, "Attempt to unlock mutex that was not locked\n"); |
1505 | 0 | g_abort (); |
1506 | 0 | } |
1507 | | |
1508 | 0 | syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
1509 | 0 | } |
1510 | | |
1511 | | void |
1512 | | g_mutex_lock (GMutex *mutex) |
1513 | 70.6M | { |
1514 | | /* 0 -> 1 and we're done. Anything else, and we need to wait... */ |
1515 | 70.6M | if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0) |
1516 | 0 | g_mutex_lock_slowpath (mutex); |
1517 | 70.6M | } |
1518 | | |
1519 | | void |
1520 | | g_mutex_unlock (GMutex *mutex) |
1521 | 71.4M | { |
1522 | 71.4M | guint prev; |
1523 | | |
1524 | 71.4M | prev = exchange_release (&mutex->i[0], 0); |
1525 | | |
1526 | | /* 1-> 0 and we're done. Anything else and we need to signal... */ |
1527 | 71.4M | if G_UNLIKELY (prev != 1) |
1528 | 0 | g_mutex_unlock_slowpath (mutex, prev); |
1529 | 71.4M | } |
1530 | | |
1531 | | gboolean |
1532 | | g_mutex_trylock (GMutex *mutex) |
1533 | 753k | { |
1534 | 753k | guint zero = 0; |
1535 | | |
1536 | | /* We don't want to touch the value at all unless we can move it from |
1537 | | * exactly 0 to 1. |
1538 | | */ |
1539 | 753k | return compare_exchange_acquire (&mutex->i[0], &zero, 1); |
1540 | 753k | } |
1541 | | |
1542 | | /* Condition variables are implemented in a rather simple way as well. |
1543 | | * In many ways, futex() as an abstraction is even more ideally suited |
1544 | | * to condition variables than it is to mutexes. |
1545 | | * |
1546 | | * We store a generation counter. We sample it with the lock held and |
1547 | | * unlock before sleeping on the futex. |
1548 | | * |
1549 | | * Signalling simply involves increasing the counter and making the |
1550 | | * appropriate futex call. |
1551 | | * |
1552 | | * The only thing that is the slightest bit complicated is timed waits |
1553 | | * because we must convert our absolute time to relative. |
1554 | | */ |
1555 | | |
1556 | | void |
1557 | | g_cond_init (GCond *cond) |
1558 | 0 | { |
1559 | 0 | cond->i[0] = 0; |
1560 | 0 | } |
1561 | | |
1562 | | void |
1563 | | g_cond_clear (GCond *cond) |
1564 | 0 | { |
1565 | 0 | } |
1566 | | |
1567 | | void |
1568 | | g_cond_wait (GCond *cond, |
1569 | | GMutex *mutex) |
1570 | 0 | { |
1571 | 0 | guint sampled = (guint) g_atomic_int_get (&cond->i[0]); |
1572 | |
|
1573 | 0 | g_mutex_unlock (mutex); |
1574 | 0 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL); |
1575 | 0 | g_mutex_lock (mutex); |
1576 | 0 | } |
1577 | | |
1578 | | void |
1579 | | g_cond_signal (GCond *cond) |
1580 | 0 | { |
1581 | 0 | g_atomic_int_inc (&cond->i[0]); |
1582 | |
|
1583 | 0 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
1584 | 0 | } |
1585 | | |
1586 | | void |
1587 | | g_cond_broadcast (GCond *cond) |
1588 | 667 | { |
1589 | 667 | g_atomic_int_inc (&cond->i[0]); |
1590 | | |
1591 | 667 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL); |
1592 | 667 | } |
1593 | | |
1594 | | gboolean |
1595 | | g_cond_wait_until (GCond *cond, |
1596 | | GMutex *mutex, |
1597 | | gint64 end_time) |
1598 | 0 | { |
1599 | 0 | struct timespec now; |
1600 | 0 | struct timespec span; |
1601 | 0 | guint sampled; |
1602 | 0 | int res; |
1603 | 0 | gboolean success; |
1604 | |
|
1605 | 0 | if (end_time < 0) |
1606 | 0 | return FALSE; |
1607 | | |
1608 | 0 | clock_gettime (CLOCK_MONOTONIC, &now); |
1609 | 0 | span.tv_sec = (end_time / 1000000) - now.tv_sec; |
1610 | 0 | span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec; |
1611 | 0 | if (span.tv_nsec < 0) |
1612 | 0 | { |
1613 | 0 | span.tv_nsec += 1000000000; |
1614 | 0 | span.tv_sec--; |
1615 | 0 | } |
1616 | |
|
1617 | 0 | if (span.tv_sec < 0) |
1618 | 0 | return FALSE; |
1619 | | |
1620 | 0 | sampled = cond->i[0]; |
1621 | 0 | g_mutex_unlock (mutex); |
1622 | 0 | res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span); |
1623 | 0 | success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
1624 | 0 | g_mutex_lock (mutex); |
1625 | |
|
1626 | 0 | return success; |
1627 | 0 | } |
1628 | | |
1629 | | #endif |
1630 | | |
1631 | | /* {{{1 Epilogue */ |
1632 | | /* vim:set foldmethod=marker: */ |