/src/pango/subprojects/glib/glib/gthread-posix.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* GLIB - Library of useful routines for C programming |
2 | | * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald |
3 | | * |
4 | | * gthread.c: posix thread system implementation |
5 | | * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe |
6 | | * |
7 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
8 | | * |
9 | | * This library is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public |
11 | | * License as published by the Free Software Foundation; either |
12 | | * version 2.1 of the License, or (at your option) any later version. |
13 | | * |
14 | | * This library is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public |
20 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
21 | | */ |
22 | | |
23 | | /* |
24 | | * Modified by the GLib Team and others 1997-2000. See the AUTHORS |
25 | | * file for a list of people on the GLib Team. See the ChangeLog |
26 | | * files for a list of changes. These files are distributed with |
27 | | * GLib at ftp://ftp.gtk.org/pub/gtk/. |
28 | | */ |
29 | | |
30 | | /* The GMutex, GCond and GPrivate implementations in this file are some |
31 | | * of the lowest-level code in GLib. All other parts of GLib (messages, |
32 | | * memory, slices, etc) assume that they can freely use these facilities |
33 | | * without risking recursion. |
34 | | * |
35 | | * As such, these functions are NOT permitted to call any other part of |
36 | | * GLib. |
37 | | * |
38 | | * The thread manipulation functions (create, exit, join, etc.) have |
39 | | * more freedom -- they can do as they please. |
40 | | */ |
41 | | |
42 | | #include "config.h" |
43 | | |
44 | | #include "gthread.h" |
45 | | |
46 | | #include "gmain.h" |
47 | | #include "gmessages.h" |
48 | | #include "gslice.h" |
49 | | #include "gstrfuncs.h" |
50 | | #include "gtestutils.h" |
51 | | #include "gthreadprivate.h" |
52 | | #include "gutils.h" |
53 | | |
54 | | #include <stdlib.h> |
55 | | #include <stdio.h> |
56 | | #include <string.h> |
57 | | #include <errno.h> |
58 | | #include <pthread.h> |
59 | | |
60 | | #include <sys/time.h> |
61 | | #include <unistd.h> |
62 | | |
63 | | #ifdef HAVE_PTHREAD_SET_NAME_NP |
64 | | #include <pthread_np.h> |
65 | | #endif |
66 | | #ifdef HAVE_SCHED_H |
67 | | #include <sched.h> |
68 | | #endif |
69 | | #ifdef G_OS_WIN32 |
70 | | #include <windows.h> |
71 | | #endif |
72 | | |
73 | | #if defined(HAVE_SYS_SCHED_GETATTR) |
74 | | #include <sys/syscall.h> |
75 | | #endif |
76 | | |
77 | | #if (defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)) && \ |
78 | | (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST)) |
79 | | #define USE_NATIVE_MUTEX |
80 | | #endif |
81 | | |
82 | | static void |
83 | | g_thread_abort (gint status, |
84 | | const gchar *function) |
85 | 0 | { |
86 | 0 | fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n", |
87 | 0 | function, strerror (status)); |
88 | 0 | g_abort (); |
89 | 0 | } |
90 | | |
91 | | /* {{{1 GMutex */ |
92 | | |
93 | | #if !defined(USE_NATIVE_MUTEX) |
94 | | |
95 | | static pthread_mutex_t * |
96 | | g_mutex_impl_new (void) |
97 | | { |
98 | | pthread_mutexattr_t *pattr = NULL; |
99 | | pthread_mutex_t *mutex; |
100 | | gint status; |
101 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
102 | | pthread_mutexattr_t attr; |
103 | | #endif |
104 | | |
105 | | mutex = malloc (sizeof (pthread_mutex_t)); |
106 | | if G_UNLIKELY (mutex == NULL) |
107 | | g_thread_abort (errno, "malloc"); |
108 | | |
109 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
110 | | pthread_mutexattr_init (&attr); |
111 | | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP); |
112 | | pattr = &attr; |
113 | | #endif |
114 | | |
115 | | if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0) |
116 | | g_thread_abort (status, "pthread_mutex_init"); |
117 | | |
118 | | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
119 | | pthread_mutexattr_destroy (&attr); |
120 | | #endif |
121 | | |
122 | | return mutex; |
123 | | } |
124 | | |
125 | | static void |
126 | | g_mutex_impl_free (pthread_mutex_t *mutex) |
127 | | { |
128 | | pthread_mutex_destroy (mutex); |
129 | | free (mutex); |
130 | | } |
131 | | |
132 | | static inline pthread_mutex_t * |
133 | | g_mutex_get_impl (GMutex *mutex) |
134 | | { |
135 | | pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p); |
136 | | |
137 | | if G_UNLIKELY (impl == NULL) |
138 | | { |
139 | | impl = g_mutex_impl_new (); |
140 | | if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl)) |
141 | | g_mutex_impl_free (impl); |
142 | | impl = mutex->p; |
143 | | } |
144 | | |
145 | | return impl; |
146 | | } |
147 | | |
148 | | |
149 | | G_ALWAYS_INLINE static inline void |
150 | | g_mutex_init_impl (GMutex *mutex) |
151 | | { |
152 | | mutex->p = g_mutex_impl_new (); |
153 | | } |
154 | | |
155 | | G_ALWAYS_INLINE static inline void |
156 | | g_mutex_clear_impl (GMutex *mutex) |
157 | | { |
158 | | g_mutex_impl_free (mutex->p); |
159 | | } |
160 | | |
161 | | G_ALWAYS_INLINE static inline void |
162 | | g_mutex_lock_impl (GMutex *mutex) |
163 | | { |
164 | | gint status; |
165 | | |
166 | | if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0) |
167 | | g_thread_abort (status, "pthread_mutex_lock"); |
168 | | } |
169 | | |
170 | | G_ALWAYS_INLINE static inline void |
171 | | g_mutex_unlock_impl (GMutex *mutex) |
172 | | { |
173 | | gint status; |
174 | | |
175 | | if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0) |
176 | | g_thread_abort (status, "pthread_mutex_unlock"); |
177 | | } |
178 | | |
179 | | G_ALWAYS_INLINE static inline gboolean |
180 | | g_mutex_trylock_impl (GMutex *mutex) |
181 | | { |
182 | | gint status; |
183 | | |
184 | | if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0) |
185 | | return TRUE; |
186 | | |
187 | | if G_UNLIKELY (status != EBUSY) |
188 | | g_thread_abort (status, "pthread_mutex_trylock"); |
189 | | |
190 | | return FALSE; |
191 | | } |
192 | | |
193 | | #endif /* !defined(USE_NATIVE_MUTEX) */ |
194 | | |
195 | | /* {{{1 GRecMutex */ |
196 | | |
197 | | static pthread_mutex_t * |
198 | | g_rec_mutex_impl_new (void) |
199 | 12 | { |
200 | 12 | pthread_mutexattr_t attr; |
201 | 12 | pthread_mutex_t *mutex; |
202 | | |
203 | 12 | mutex = malloc (sizeof (pthread_mutex_t)); |
204 | 12 | if G_UNLIKELY (mutex == NULL) |
205 | 0 | g_thread_abort (errno, "malloc"); |
206 | | |
207 | 12 | pthread_mutexattr_init (&attr); |
208 | 12 | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); |
209 | 12 | pthread_mutex_init (mutex, &attr); |
210 | 12 | pthread_mutexattr_destroy (&attr); |
211 | | |
212 | 12 | return mutex; |
213 | 12 | } |
214 | | |
215 | | static void |
216 | | g_rec_mutex_impl_free (pthread_mutex_t *mutex) |
217 | 0 | { |
218 | 0 | pthread_mutex_destroy (mutex); |
219 | 0 | free (mutex); |
220 | 0 | } |
221 | | |
222 | | static inline pthread_mutex_t * |
223 | | g_rec_mutex_get_impl (GRecMutex *rec_mutex) |
224 | 329 | { |
225 | 329 | pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p); |
226 | | |
227 | 329 | if G_UNLIKELY (impl == NULL) |
228 | 12 | { |
229 | 12 | impl = g_rec_mutex_impl_new (); |
230 | 12 | if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl)) |
231 | 0 | g_rec_mutex_impl_free (impl); |
232 | 12 | impl = rec_mutex->p; |
233 | 12 | } |
234 | | |
235 | 329 | return impl; |
236 | 329 | } |
237 | | |
238 | | G_ALWAYS_INLINE static inline void |
239 | | g_rec_mutex_init_impl (GRecMutex *rec_mutex) |
240 | 0 | { |
241 | 0 | rec_mutex->p = g_rec_mutex_impl_new (); |
242 | 0 | } |
243 | | |
244 | | G_ALWAYS_INLINE static inline void |
245 | | g_rec_mutex_clear_impl (GRecMutex *rec_mutex) |
246 | 0 | { |
247 | 0 | g_rec_mutex_impl_free (rec_mutex->p); |
248 | 0 | } |
249 | | |
250 | | G_ALWAYS_INLINE static inline void |
251 | | g_rec_mutex_lock_impl (GRecMutex *mutex) |
252 | 329 | { |
253 | 329 | pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); |
254 | 329 | } |
255 | | |
256 | | G_ALWAYS_INLINE static inline void |
257 | | g_rec_mutex_unlock_impl (GRecMutex *rec_mutex) |
258 | 329 | { |
259 | 329 | pthread_mutex_unlock (rec_mutex->p); |
260 | 329 | } |
261 | | |
262 | | G_ALWAYS_INLINE static inline gboolean |
263 | | g_rec_mutex_trylock_impl (GRecMutex *rec_mutex) |
264 | 0 | { |
265 | 0 | if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) |
266 | 0 | return FALSE; |
267 | | |
268 | 0 | return TRUE; |
269 | 0 | } |
270 | | |
271 | | /* {{{1 GRWLock */ |
272 | | |
273 | | static pthread_rwlock_t * |
274 | | g_rw_lock_impl_new (void) |
275 | 22 | { |
276 | 22 | pthread_rwlock_t *rwlock; |
277 | 22 | gint status; |
278 | | |
279 | 22 | rwlock = malloc (sizeof (pthread_rwlock_t)); |
280 | 22 | if G_UNLIKELY (rwlock == NULL) |
281 | 0 | g_thread_abort (errno, "malloc"); |
282 | | |
283 | 22 | if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0) |
284 | 0 | g_thread_abort (status, "pthread_rwlock_init"); |
285 | | |
286 | 22 | return rwlock; |
287 | 22 | } |
288 | | |
289 | | static void |
290 | | g_rw_lock_impl_free (pthread_rwlock_t *rwlock) |
291 | 0 | { |
292 | 0 | pthread_rwlock_destroy (rwlock); |
293 | 0 | free (rwlock); |
294 | 0 | } |
295 | | |
296 | | static inline pthread_rwlock_t * |
297 | | g_rw_lock_get_impl (GRWLock *lock) |
298 | 381M | { |
299 | 381M | pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p); |
300 | | |
301 | 381M | if G_UNLIKELY (impl == NULL) |
302 | 22 | { |
303 | 22 | impl = g_rw_lock_impl_new (); |
304 | 22 | if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl)) |
305 | 0 | g_rw_lock_impl_free (impl); |
306 | 22 | impl = lock->p; |
307 | 22 | } |
308 | | |
309 | 381M | return impl; |
310 | 381M | } |
311 | | |
312 | | G_ALWAYS_INLINE static inline void |
313 | | g_rw_lock_init_impl (GRWLock *rw_lock) |
314 | 0 | { |
315 | 0 | rw_lock->p = g_rw_lock_impl_new (); |
316 | 0 | } |
317 | | |
318 | | G_ALWAYS_INLINE static inline void |
319 | | g_rw_lock_clear_impl (GRWLock *rw_lock) |
320 | 0 | { |
321 | 0 | g_rw_lock_impl_free (rw_lock->p); |
322 | 0 | } |
323 | | |
324 | | G_ALWAYS_INLINE static inline void |
325 | | g_rw_lock_writer_lock_impl (GRWLock *rw_lock) |
326 | 1.61k | { |
327 | 1.61k | int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); |
328 | | |
329 | 1.61k | if (retval != 0) |
330 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
331 | 1.61k | } |
332 | | |
333 | | G_ALWAYS_INLINE static inline gboolean |
334 | | g_rw_lock_writer_trylock_impl (GRWLock *rw_lock) |
335 | 0 | { |
336 | 0 | if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) |
337 | 0 | return FALSE; |
338 | | |
339 | 0 | return TRUE; |
340 | 0 | } |
341 | | |
342 | | G_ALWAYS_INLINE static inline void |
343 | | g_rw_lock_writer_unlock_impl (GRWLock *rw_lock) |
344 | 1.61k | { |
345 | 1.61k | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
346 | 1.61k | } |
347 | | |
348 | | G_ALWAYS_INLINE static inline void |
349 | | g_rw_lock_reader_lock_impl (GRWLock *rw_lock) |
350 | 190M | { |
351 | 190M | int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); |
352 | | |
353 | 190M | if (retval != 0) |
354 | 0 | g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); |
355 | 190M | } |
356 | | |
357 | | G_ALWAYS_INLINE static inline gboolean |
358 | | g_rw_lock_reader_trylock_impl (GRWLock *rw_lock) |
359 | 0 | { |
360 | 0 | if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) |
361 | 0 | return FALSE; |
362 | | |
363 | 0 | return TRUE; |
364 | 0 | } |
365 | | |
366 | | G_ALWAYS_INLINE static inline void |
367 | | g_rw_lock_reader_unlock_impl (GRWLock *rw_lock) |
368 | 190M | { |
369 | 190M | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
370 | 190M | } |
371 | | |
372 | | /* {{{1 GCond */ |
373 | | |
374 | | #if !defined(USE_NATIVE_MUTEX) |
375 | | |
376 | | static pthread_cond_t * |
377 | | g_cond_impl_new (void) |
378 | | { |
379 | | pthread_condattr_t attr; |
380 | | pthread_cond_t *cond; |
381 | | gint status; |
382 | | |
383 | | pthread_condattr_init (&attr); |
384 | | |
385 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
386 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
387 | | if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0) |
388 | | g_thread_abort (status, "pthread_condattr_setclock"); |
389 | | #else |
390 | | #error Cannot support GCond on your platform. |
391 | | #endif |
392 | | |
393 | | cond = malloc (sizeof (pthread_cond_t)); |
394 | | if G_UNLIKELY (cond == NULL) |
395 | | g_thread_abort (errno, "malloc"); |
396 | | |
397 | | if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0) |
398 | | g_thread_abort (status, "pthread_cond_init"); |
399 | | |
400 | | pthread_condattr_destroy (&attr); |
401 | | |
402 | | return cond; |
403 | | } |
404 | | |
405 | | static void |
406 | | g_cond_impl_free (pthread_cond_t *cond) |
407 | | { |
408 | | pthread_cond_destroy (cond); |
409 | | free (cond); |
410 | | } |
411 | | |
412 | | static inline pthread_cond_t * |
413 | | g_cond_get_impl (GCond *cond) |
414 | | { |
415 | | pthread_cond_t *impl = g_atomic_pointer_get (&cond->p); |
416 | | |
417 | | if G_UNLIKELY (impl == NULL) |
418 | | { |
419 | | impl = g_cond_impl_new (); |
420 | | if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl)) |
421 | | g_cond_impl_free (impl); |
422 | | impl = cond->p; |
423 | | } |
424 | | |
425 | | return impl; |
426 | | } |
427 | | |
428 | | G_ALWAYS_INLINE static inline void |
429 | | g_cond_init_impl (GCond *cond) |
430 | | { |
431 | | cond->p = g_cond_impl_new (); |
432 | | } |
433 | | |
434 | | G_ALWAYS_INLINE static inline void |
435 | | g_cond_clear_impl (GCond *cond) |
436 | | { |
437 | | g_cond_impl_free (cond->p); |
438 | | } |
439 | | |
440 | | G_ALWAYS_INLINE static inline void |
441 | | g_cond_wait_impl (GCond *cond, |
442 | | GMutex *mutex) |
443 | | { |
444 | | gint status; |
445 | | |
446 | | if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0) |
447 | | g_thread_abort (status, "pthread_cond_wait"); |
448 | | } |
449 | | |
450 | | G_ALWAYS_INLINE static inline void |
451 | | g_cond_signal_impl (GCond *cond) |
452 | | { |
453 | | gint status; |
454 | | |
455 | | if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0) |
456 | | g_thread_abort (status, "pthread_cond_signal"); |
457 | | } |
458 | | |
459 | | G_ALWAYS_INLINE static inline void |
460 | | g_cond_broadcast_impl (GCond *cond) |
461 | | { |
462 | | gint status; |
463 | | |
464 | | if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0) |
465 | | g_thread_abort (status, "pthread_cond_broadcast"); |
466 | | } |
467 | | |
468 | | G_ALWAYS_INLINE static inline gboolean |
469 | | g_cond_wait_until_impl (GCond *cond, |
470 | | GMutex *mutex, |
471 | | gint64 end_time) |
472 | | { |
473 | | struct timespec ts; |
474 | | gint status; |
475 | | |
476 | | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
477 | | /* end_time is given relative to the monotonic clock as returned by |
478 | | * g_get_monotonic_time(). |
479 | | * |
480 | | * Since this pthreads wants the relative time, convert it back again. |
481 | | */ |
482 | | { |
483 | | gint64 now = g_get_monotonic_time (); |
484 | | gint64 relative; |
485 | | |
486 | | if (end_time <= now) |
487 | | return FALSE; |
488 | | |
489 | | relative = end_time - now; |
490 | | |
491 | | ts.tv_sec = relative / 1000000; |
492 | | ts.tv_nsec = (relative % 1000000) * 1000; |
493 | | |
494 | | if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
495 | | return TRUE; |
496 | | } |
497 | | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
498 | | /* This is the exact check we used during init to set the clock to |
499 | | * monotonic, so if we're in this branch, timedwait() will already be |
500 | | * expecting a monotonic clock. |
501 | | */ |
502 | | { |
503 | | ts.tv_sec = end_time / 1000000; |
504 | | ts.tv_nsec = (end_time % 1000000) * 1000; |
505 | | |
506 | | if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
507 | | return TRUE; |
508 | | } |
509 | | #else |
510 | | #error Cannot support GCond on your platform. |
511 | | #endif |
512 | | |
513 | | if G_UNLIKELY (status != ETIMEDOUT) |
514 | | g_thread_abort (status, "pthread_cond_timedwait"); |
515 | | |
516 | | return FALSE; |
517 | | } |
518 | | |
519 | | #endif /* defined(USE_NATIVE_MUTEX) */ |
520 | | |
521 | | /* {{{1 GPrivate */ |
522 | | |
523 | | static pthread_key_t * |
524 | | g_private_impl_new (GDestroyNotify notify) |
525 | 0 | { |
526 | 0 | pthread_key_t *key; |
527 | 0 | gint status; |
528 | 0 |
|
529 | 0 | key = malloc (sizeof (pthread_key_t)); |
530 | 0 | if G_UNLIKELY (key == NULL) |
531 | 0 | g_thread_abort (errno, "malloc"); |
532 | 0 | status = pthread_key_create (key, notify); |
533 | 0 | if G_UNLIKELY (status != 0) |
534 | 0 | g_thread_abort (status, "pthread_key_create"); |
535 | 0 |
|
536 | 0 | return key; |
537 | 0 | } |
538 | | |
539 | | static void |
540 | | g_private_impl_free (pthread_key_t *key) |
541 | 0 | { |
542 | 0 | gint status; |
543 | 0 |
|
544 | 0 | status = pthread_key_delete (*key); |
545 | 0 | if G_UNLIKELY (status != 0) |
546 | 0 | g_thread_abort (status, "pthread_key_delete"); |
547 | 0 | free (key); |
548 | 0 | } |
549 | | |
550 | | static gpointer |
551 | | g_private_impl_new_direct (GDestroyNotify notify) |
552 | 11 | { |
553 | 11 | gpointer impl = (void *) (gssize) -1; |
554 | 11 | pthread_key_t key; |
555 | 11 | gint status; |
556 | | |
557 | 11 | status = pthread_key_create (&key, notify); |
558 | 11 | if G_UNLIKELY (status != 0) |
559 | 0 | g_thread_abort (status, "pthread_key_create"); |
560 | | |
561 | 11 | memcpy (&impl, &key, sizeof (pthread_key_t)); |
562 | | |
563 | | /* pthread_key_create could theoretically put a NULL value into key. |
564 | | * If that happens, waste the result and create a new one, since we |
565 | | * use NULL to mean "not yet allocated". |
566 | | * |
567 | | * This will only happen once per program run. |
568 | | * |
569 | | * We completely avoid this problem for the case where pthread_key_t |
570 | | * is smaller than void* (for example, on 64 bit Linux) by putting |
571 | | * some high bits in the value of 'impl' to start with. Since we only |
572 | | * overwrite part of the pointer, we will never end up with NULL. |
573 | | */ |
574 | 11 | if (sizeof (pthread_key_t) == sizeof (gpointer)) |
575 | 0 | { |
576 | 0 | if G_UNLIKELY (impl == NULL) |
577 | 0 | { |
578 | 0 | status = pthread_key_create (&key, notify); |
579 | 0 | if G_UNLIKELY (status != 0) |
580 | 0 | g_thread_abort (status, "pthread_key_create"); |
581 | |
|
582 | 0 | memcpy (&impl, &key, sizeof (pthread_key_t)); |
583 | |
|
584 | 0 | if G_UNLIKELY (impl == NULL) |
585 | 0 | g_thread_abort (status, "pthread_key_create (gave NULL result twice)"); |
586 | 0 | } |
587 | 0 | } |
588 | | |
589 | 11 | return impl; |
590 | 11 | } |
591 | | |
592 | | static void |
593 | | g_private_impl_free_direct (gpointer impl) |
594 | 0 | { |
595 | 0 | pthread_key_t tmp; |
596 | 0 | gint status; |
597 | |
|
598 | 0 | memcpy (&tmp, &impl, sizeof (pthread_key_t)); |
599 | |
|
600 | 0 | status = pthread_key_delete (tmp); |
601 | 0 | if G_UNLIKELY (status != 0) |
602 | 0 | g_thread_abort (status, "pthread_key_delete"); |
603 | 0 | } |
604 | | |
605 | | static inline pthread_key_t |
606 | | _g_private_get_impl (GPrivate *key) |
607 | 1.14G | { |
608 | 1.14G | if (sizeof (pthread_key_t) > sizeof (gpointer)) |
609 | 0 | { |
610 | 0 | pthread_key_t *impl = g_atomic_pointer_get (&key->p); |
611 | |
|
612 | 0 | if G_UNLIKELY (impl == NULL) |
613 | 0 | { |
614 | 0 | impl = g_private_impl_new (key->notify); |
615 | 0 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
616 | 0 | { |
617 | 0 | g_private_impl_free (impl); |
618 | 0 | impl = key->p; |
619 | 0 | } |
620 | 0 | } |
621 | |
|
622 | 0 | return *impl; |
623 | 0 | } |
624 | 1.14G | else |
625 | 1.14G | { |
626 | 1.14G | gpointer impl = g_atomic_pointer_get (&key->p); |
627 | 1.14G | pthread_key_t tmp; |
628 | | |
629 | 1.14G | if G_UNLIKELY (impl == NULL) |
630 | 11 | { |
631 | 11 | impl = g_private_impl_new_direct (key->notify); |
632 | 11 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
633 | 0 | { |
634 | 0 | g_private_impl_free_direct (impl); |
635 | 0 | impl = key->p; |
636 | 0 | } |
637 | 11 | } |
638 | | |
639 | 1.14G | memcpy (&tmp, &impl, sizeof (pthread_key_t)); |
640 | | |
641 | 1.14G | return tmp; |
642 | 1.14G | } |
643 | 1.14G | } |
644 | | |
645 | | G_ALWAYS_INLINE static inline gpointer |
646 | | g_private_get_impl (GPrivate *key) |
647 | 381M | { |
648 | | /* quote POSIX: No errors are returned from pthread_getspecific(). */ |
649 | 381M | return pthread_getspecific (_g_private_get_impl (key)); |
650 | 381M | } |
651 | | |
652 | | G_ALWAYS_INLINE static inline void |
653 | | g_private_set_impl (GPrivate *key, |
654 | | gpointer value) |
655 | 762M | { |
656 | 762M | gint status; |
657 | | |
658 | 762M | if G_UNLIKELY ((status = pthread_setspecific (_g_private_get_impl (key), value)) != 0) |
659 | 0 | g_thread_abort (status, "pthread_setspecific"); |
660 | 762M | } |
661 | | |
662 | | G_ALWAYS_INLINE static inline void |
663 | | g_private_replace_impl (GPrivate *key, |
664 | | gpointer value) |
665 | 0 | { |
666 | 0 | pthread_key_t impl = _g_private_get_impl (key); |
667 | 0 | gpointer old; |
668 | 0 | gint status; |
669 | |
|
670 | 0 | old = pthread_getspecific (impl); |
671 | |
|
672 | 0 | if G_UNLIKELY ((status = pthread_setspecific (impl, value)) != 0) |
673 | 0 | g_thread_abort (status, "pthread_setspecific"); |
674 | |
|
675 | 0 | if (old && key->notify) |
676 | 0 | key->notify (old); |
677 | 0 | } |
678 | | |
679 | | /* {{{1 GThread */ |
680 | | |
681 | 0 | #define posix_check_err(err, name) G_STMT_START{ \ |
682 | 0 | int error = (err); \ |
683 | 0 | if (error) \ |
684 | 0 | g_error ("file %s: line %d (%s): error '%s' during '%s'", \ |
685 | 0 | __FILE__, __LINE__, G_STRFUNC, \ |
686 | 0 | g_strerror (error), name); \ |
687 | 0 | }G_STMT_END |
688 | | |
689 | 0 | #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd) |
690 | | |
691 | | typedef struct |
692 | | { |
693 | | GRealThread thread; |
694 | | |
695 | | pthread_t system_thread; |
696 | | gboolean joined; |
697 | | GMutex lock; |
698 | | |
699 | | void *(*proxy) (void *); |
700 | | } GThreadPosix; |
701 | | |
702 | | void |
703 | | g_system_thread_free (GRealThread *thread) |
704 | 0 | { |
705 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
706 | |
|
707 | 0 | if (!pt->joined) |
708 | 0 | pthread_detach (pt->system_thread); |
709 | |
|
710 | 0 | g_mutex_clear (&pt->lock); |
711 | |
|
712 | 0 | g_slice_free (GThreadPosix, pt); |
713 | 0 | } |
714 | | |
715 | | GRealThread * |
716 | | g_system_thread_new (GThreadFunc proxy, |
717 | | gulong stack_size, |
718 | | const char *name, |
719 | | GThreadFunc func, |
720 | | gpointer data, |
721 | | GError **error) |
722 | 0 | { |
723 | 0 | GThreadPosix *thread; |
724 | 0 | GRealThread *base_thread; |
725 | 0 | pthread_attr_t attr; |
726 | 0 | gint ret; |
727 | |
|
728 | 0 | thread = g_slice_new0 (GThreadPosix); |
729 | 0 | base_thread = (GRealThread*)thread; |
730 | 0 | base_thread->ref_count = 2; |
731 | 0 | base_thread->ours = TRUE; |
732 | 0 | base_thread->thread.joinable = TRUE; |
733 | 0 | base_thread->thread.func = func; |
734 | 0 | base_thread->thread.data = data; |
735 | 0 | if (name) |
736 | 0 | g_strlcpy (base_thread->name, name, sizeof (base_thread->name)); |
737 | 0 | thread->proxy = proxy; |
738 | |
|
739 | 0 | posix_check_cmd (pthread_attr_init (&attr)); |
740 | |
|
741 | 0 | #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE |
742 | 0 | if (stack_size) |
743 | 0 | { |
744 | 0 | #ifdef _SC_THREAD_STACK_MIN |
745 | 0 | long min_stack_size = sysconf (_SC_THREAD_STACK_MIN); |
746 | 0 | if (min_stack_size >= 0) |
747 | 0 | stack_size = MAX ((gulong) min_stack_size, stack_size); |
748 | 0 | #endif /* _SC_THREAD_STACK_MIN */ |
749 | | /* No error check here, because some systems can't do it and |
750 | | * we simply don't want threads to fail because of that. */ |
751 | 0 | pthread_attr_setstacksize (&attr, stack_size); |
752 | 0 | } |
753 | 0 | #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ |
754 | |
|
755 | 0 | #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED |
756 | 0 | { |
757 | | /* While this is the default, better be explicit about it */ |
758 | 0 | pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED); |
759 | 0 | } |
760 | 0 | #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */ |
761 | |
|
762 | 0 | ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread); |
763 | |
|
764 | 0 | posix_check_cmd (pthread_attr_destroy (&attr)); |
765 | |
|
766 | 0 | if (ret == EAGAIN) |
767 | 0 | { |
768 | 0 | g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, |
769 | 0 | "Error creating thread: %s", g_strerror (ret)); |
770 | 0 | g_slice_free (GThreadPosix, thread); |
771 | 0 | return NULL; |
772 | 0 | } |
773 | | |
774 | 0 | posix_check_err (ret, "pthread_create"); |
775 | |
|
776 | 0 | g_mutex_init (&thread->lock); |
777 | |
|
778 | 0 | return (GRealThread *) thread; |
779 | 0 | } |
780 | | |
781 | | G_ALWAYS_INLINE static inline void |
782 | | g_thread_yield_impl (void) |
783 | 0 | { |
784 | 0 | sched_yield (); |
785 | 0 | } |
786 | | |
787 | | void |
788 | | g_system_thread_wait (GRealThread *thread) |
789 | 0 | { |
790 | 0 | GThreadPosix *pt = (GThreadPosix *) thread; |
791 | |
|
792 | 0 | g_mutex_lock (&pt->lock); |
793 | |
|
794 | 0 | if (!pt->joined) |
795 | 0 | { |
796 | 0 | posix_check_cmd (pthread_join (pt->system_thread, NULL)); |
797 | 0 | pt->joined = TRUE; |
798 | 0 | } |
799 | |
|
800 | 0 | g_mutex_unlock (&pt->lock); |
801 | 0 | } |
802 | | |
803 | | void |
804 | | g_system_thread_exit (void) |
805 | 0 | { |
806 | 0 | pthread_exit (NULL); |
807 | 0 | } |
808 | | |
809 | | void |
810 | | g_system_thread_set_name (const gchar *name) |
811 | 0 | { |
812 | | #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) |
813 | | pthread_setname_np (name); /* on OS X and iOS */ |
814 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) |
815 | | #ifdef __linux__ |
816 | 0 | #define MAX_THREADNAME_LEN 16 |
817 | | #else |
818 | | #define MAX_THREADNAME_LEN 32 |
819 | | #endif |
820 | 0 | char name_[MAX_THREADNAME_LEN]; |
821 | 0 | g_strlcpy (name_, name, MAX_THREADNAME_LEN); |
822 | 0 | pthread_setname_np (pthread_self (), name_); /* on Linux and Solaris */ |
823 | | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) |
824 | | pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */ |
825 | | #elif defined(HAVE_PTHREAD_SET_NAME_NP) |
826 | | pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */ |
827 | | #endif |
828 | 0 | } |
829 | | |
830 | | void |
831 | | g_system_thread_get_name (char *buffer, |
832 | | gsize length) |
833 | 0 | { |
834 | 0 | #ifdef HAVE_PTHREAD_GETNAME_NP |
835 | 0 | pthread_getname_np (pthread_self (), buffer, length); |
836 | | #else |
837 | | g_assert (length >= 1); |
838 | | buffer[0] = '\0'; |
839 | | #endif |
840 | 0 | } |
841 | | |
842 | | /* {{{1 GMutex and GCond futex implementation */ |
843 | | |
844 | | #if defined(USE_NATIVE_MUTEX) |
845 | | /* We should expand the set of operations available in gatomic once we |
846 | | * have better C11 support in GCC in common distributions (ie: 4.9). |
847 | | * |
848 | | * Before then, let's define a couple of useful things for our own |
849 | | * purposes... |
850 | | */ |
851 | | |
852 | | #ifdef HAVE_STDATOMIC_H |
853 | | |
854 | | #include <stdatomic.h> |
855 | | |
856 | | #define exchange_acquire(ptr, new) \ |
857 | 0 | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE) |
858 | | #define compare_exchange_acquire(ptr, old, new) \ |
859 | 0 | atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \ |
860 | 0 | __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
861 | | |
862 | | #define exchange_release(ptr, new) \ |
863 | 381M | atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
864 | | #define store_release(ptr, new) \ |
865 | | atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE) |
866 | | |
867 | | #else |
868 | | |
869 | | #define exchange_acquire(ptr, new) \ |
870 | | __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE) |
871 | | #define compare_exchange_acquire(ptr, old, new) \ |
872 | | __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
873 | | |
874 | | #define exchange_release(ptr, new) \ |
875 | | __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE) |
876 | | #define store_release(ptr, new) \ |
877 | | __atomic_store_4((ptr), (new), __ATOMIC_RELEASE) |
878 | | |
879 | | #endif |
880 | | |
881 | | /* Our strategy for the mutex is pretty simple: |
882 | | * |
883 | | * 0: not in use |
884 | | * |
885 | | * 1: acquired by one thread only, no contention |
886 | | * |
887 | | * 2: contended |
888 | | */ |
889 | | |
890 | | typedef enum { |
891 | | G_MUTEX_STATE_EMPTY = 0, |
892 | | G_MUTEX_STATE_OWNED, |
893 | | G_MUTEX_STATE_CONTENDED, |
894 | | } GMutexState; |
895 | | |
896 | | /* |
897 | | * As such, attempting to acquire the lock should involve an increment. |
898 | | * If we find that the previous value was 0 then we can return |
899 | | * immediately. |
900 | | * |
901 | | * On unlock, we always store 0 to indicate that the lock is available. |
902 | | * If the value there was 1 before then we didn't have contention and |
903 | | * can return immediately. If the value was something other than 1 then |
904 | | * we have the contended case and need to wake a waiter. |
905 | | * |
906 | | * If it was not 0 then there is another thread holding it and we must |
907 | | * wait. We must always ensure that we mark a value >1 while we are |
908 | | * waiting in order to instruct the holder to do a wake operation on |
909 | | * unlock. |
910 | | */ |
911 | | |
912 | | void |
913 | | g_mutex_init_impl (GMutex *mutex) |
914 | 0 | { |
915 | 0 | mutex->i[0] = G_MUTEX_STATE_EMPTY; |
916 | 0 | } |
917 | | |
918 | | void |
919 | | g_mutex_clear_impl (GMutex *mutex) |
920 | 0 | { |
921 | 0 | if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY) |
922 | 0 | { |
923 | 0 | fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n"); |
924 | 0 | g_abort (); |
925 | 0 | } |
926 | 0 | } |
927 | | |
928 | | G_GNUC_NO_INLINE |
929 | | static void |
930 | | g_mutex_lock_slowpath (GMutex *mutex) |
931 | 0 | { |
932 | | /* Set to contended. If it was empty before then we |
933 | | * just acquired the lock. |
934 | | * |
935 | | * Otherwise, sleep for as long as the contended state remains... |
936 | | */ |
937 | 0 | while (exchange_acquire (&mutex->i[0], G_MUTEX_STATE_CONTENDED) != G_MUTEX_STATE_EMPTY) |
938 | 0 | { |
939 | 0 | g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, |
940 | 0 | G_MUTEX_STATE_CONTENDED, NULL); |
941 | 0 | } |
942 | 0 | } |
943 | | |
944 | | G_GNUC_NO_INLINE |
945 | | static void |
946 | | g_mutex_unlock_slowpath (GMutex *mutex, |
947 | | guint prev) |
948 | 0 | { |
949 | | /* We seem to get better code for the uncontended case by splitting |
950 | | * this out... |
951 | | */ |
952 | 0 | if G_UNLIKELY (prev == G_MUTEX_STATE_EMPTY) |
953 | 0 | { |
954 | 0 | fprintf (stderr, "Attempt to unlock mutex that was not locked\n"); |
955 | 0 | g_abort (); |
956 | 0 | } |
957 | | |
958 | 0 | g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
959 | 0 | } |
960 | | |
961 | | inline void |
962 | | g_mutex_lock_impl (GMutex *mutex) |
963 | 381M | { |
964 | | /* empty -> owned and we're done. Anything else, and we need to wait... */ |
965 | 381M | if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0], |
966 | 381M | G_MUTEX_STATE_EMPTY, |
967 | 381M | G_MUTEX_STATE_OWNED)) |
968 | 0 | g_mutex_lock_slowpath (mutex); |
969 | 381M | } |
970 | | |
971 | | void |
972 | | g_mutex_unlock_impl (GMutex *mutex) |
973 | 381M | { |
974 | 381M | guint prev; |
975 | | |
976 | 381M | prev = exchange_release (&mutex->i[0], G_MUTEX_STATE_EMPTY); |
977 | | |
978 | | /* 1-> 0 and we're done. Anything else and we need to signal... */ |
979 | 381M | if G_UNLIKELY (prev != G_MUTEX_STATE_OWNED) |
980 | 0 | g_mutex_unlock_slowpath (mutex, prev); |
981 | 381M | } |
982 | | |
983 | | gboolean |
984 | | g_mutex_trylock_impl (GMutex *mutex) |
985 | 0 | { |
986 | 0 | GMutexState empty = G_MUTEX_STATE_EMPTY; |
987 | | |
988 | | /* We don't want to touch the value at all unless we can move it from |
989 | | * exactly empty to owned. |
990 | | */ |
991 | 0 | return compare_exchange_acquire (&mutex->i[0], &empty, G_MUTEX_STATE_OWNED); |
992 | 0 | } |
993 | | |
994 | | /* Condition variables are implemented in a rather simple way as well. |
995 | | * In many ways, futex() as an abstraction is even more ideally suited |
996 | | * to condition variables than it is to mutexes. |
997 | | * |
998 | | * We store a generation counter. We sample it with the lock held and |
999 | | * unlock before sleeping on the futex. |
1000 | | * |
1001 | | * Signalling simply involves increasing the counter and making the |
1002 | | * appropriate futex call. |
1003 | | * |
1004 | | * The only thing that is the slightest bit complicated is timed waits |
1005 | | * because we must convert our absolute time to relative. |
1006 | | */ |
1007 | | |
1008 | | void |
1009 | | g_cond_init_impl (GCond *cond) |
1010 | 0 | { |
1011 | 0 | cond->i[0] = 0; |
1012 | 0 | } |
1013 | | |
1014 | | void |
1015 | | g_cond_clear_impl (GCond *cond) |
1016 | 0 | { |
1017 | 0 | } |
1018 | | |
1019 | | void |
1020 | | g_cond_wait_impl (GCond *cond, |
1021 | | GMutex *mutex) |
1022 | 0 | { |
1023 | 0 | guint sampled = (guint) g_atomic_int_get (&cond->i[0]); |
1024 | |
|
1025 | 0 | g_mutex_unlock (mutex); |
1026 | 0 | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL); |
1027 | 0 | g_mutex_lock (mutex); |
1028 | 0 | } |
1029 | | |
1030 | | void |
1031 | | g_cond_signal_impl (GCond *cond) |
1032 | 0 | { |
1033 | 0 | g_atomic_int_inc (&cond->i[0]); |
1034 | |
|
1035 | 0 | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
1036 | 0 | } |
1037 | | |
1038 | | void |
1039 | | g_cond_broadcast_impl (GCond *cond) |
1040 | 94 | { |
1041 | 94 | g_atomic_int_inc (&cond->i[0]); |
1042 | | |
1043 | 94 | g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL); |
1044 | 94 | } |
1045 | | |
1046 | | gboolean |
1047 | | g_cond_wait_until_impl (GCond *cond, |
1048 | | GMutex *mutex, |
1049 | | gint64 end_time) |
1050 | 0 | { |
1051 | 0 | struct timespec now; |
1052 | 0 | struct timespec span; |
1053 | |
|
1054 | 0 | guint sampled; |
1055 | 0 | int res; |
1056 | 0 | gboolean success; |
1057 | |
|
1058 | 0 | if (end_time < 0) |
1059 | 0 | return FALSE; |
1060 | | |
1061 | 0 | clock_gettime (CLOCK_MONOTONIC, &now); |
1062 | 0 | span.tv_sec = (end_time / 1000000) - now.tv_sec; |
1063 | 0 | span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec; |
1064 | 0 | if (span.tv_nsec < 0) |
1065 | 0 | { |
1066 | 0 | span.tv_nsec += 1000000000; |
1067 | 0 | span.tv_sec--; |
1068 | 0 | } |
1069 | |
|
1070 | 0 | if (span.tv_sec < 0) |
1071 | 0 | return FALSE; |
1072 | | |
1073 | | /* `struct timespec` as defined by the libc headers does not necessarily |
1074 | | * have any relation to the one used by the kernel for the `futex` syscall. |
1075 | | * |
1076 | | * Specifically, the libc headers might use 64-bit `time_t` while the kernel |
1077 | | * headers use 32-bit types on certain systems. |
1078 | | * |
1079 | | * To get around this problem we |
1080 | | * a) check if `futex_time64` is available, which only exists on 32-bit |
1081 | | * platforms and always uses 64-bit `time_t`. |
1082 | | * b) if `futex_time64` is available, but the Android runtime's API level |
1083 | | * is < 30, `futex_time64` is blocked by seccomp and using it will cause |
1084 | | * the app to be terminated. Skip to c). |
1085 | | * https://android-review.googlesource.com/c/platform/bionic/+/1094758 |
1086 | | * c) otherwise (or if that returns `ENOSYS`), we call the normal `futex` |
1087 | | * syscall with the `struct timespec` used by the kernel. By default, we |
1088 | | * use `__kernel_long_t` for both its fields, which is equivalent to |
1089 | | * `__kernel_old_time_t` and is available in the kernel headers for a |
1090 | | * longer time. |
1091 | | * d) With very old headers (~2.6.x), `__kernel_long_t` is not available, and |
1092 | | * we use an older definition that uses `__kernel_time_t` and `long`. |
1093 | | * |
1094 | | * Also some 32-bit systems do not define `__NR_futex` at all and only |
1095 | | * define `__NR_futex_time64`. |
1096 | | */ |
1097 | | |
1098 | 0 | sampled = cond->i[0]; |
1099 | 0 | g_mutex_unlock (mutex); |
1100 | |
|
1101 | | #if defined(HAVE_FUTEX_TIME64) |
1102 | | #if defined(__ANDROID__) |
1103 | | if (__builtin_available (android 30, *)) { |
1104 | | #else |
1105 | | { |
1106 | | #endif |
1107 | | struct |
1108 | | { |
1109 | | gint64 tv_sec; |
1110 | | gint64 tv_nsec; |
1111 | | } span_arg; |
1112 | | |
1113 | | span_arg.tv_sec = span.tv_sec; |
1114 | | span_arg.tv_nsec = span.tv_nsec; |
1115 | | |
1116 | | res = syscall (__NR_futex_time64, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg); |
1117 | | |
1118 | | /* If the syscall does not exist (`ENOSYS`), we retry again below with the |
1119 | | * normal `futex` syscall. This can happen if newer kernel headers are |
1120 | | * used than the kernel that is actually running. |
1121 | | */ |
1122 | | # if defined(HAVE_FUTEX) |
1123 | | if (res >= 0 || errno != ENOSYS) |
1124 | | # endif /* defined(HAVE_FUTEX) */ |
1125 | | { |
1126 | | success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
1127 | | g_mutex_lock (mutex); |
1128 | | |
1129 | | return success; |
1130 | | } |
1131 | | } |
1132 | | #endif |
1133 | |
|
1134 | 0 | #if defined(HAVE_FUTEX) |
1135 | 0 | { |
1136 | | # ifdef __kernel_long_t |
1137 | | # define KERNEL_SPAN_SEC_TYPE __kernel_long_t |
1138 | | struct |
1139 | | { |
1140 | | __kernel_long_t tv_sec; |
1141 | | __kernel_long_t tv_nsec; |
1142 | | } span_arg; |
1143 | | # else |
1144 | | /* Very old kernel headers: version 2.6.32 and thereabouts */ |
1145 | 0 | # define KERNEL_SPAN_SEC_TYPE __kernel_time_t |
1146 | 0 | struct |
1147 | 0 | { |
1148 | 0 | __kernel_time_t tv_sec; |
1149 | 0 | long tv_nsec; |
1150 | 0 | } span_arg; |
1151 | 0 | # endif |
1152 | | /* Make sure to only ever call this if the end time actually fits into the target type */ |
1153 | 0 | if (G_UNLIKELY (sizeof (KERNEL_SPAN_SEC_TYPE) < 8 && span.tv_sec > G_MAXINT32)) |
1154 | 0 | g_error ("%s: Can’t wait for more than %us", G_STRFUNC, G_MAXINT32); |
1155 | |
|
1156 | 0 | span_arg.tv_sec = span.tv_sec; |
1157 | 0 | span_arg.tv_nsec = span.tv_nsec; |
1158 | |
|
1159 | 0 | res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg); |
1160 | 0 | success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
1161 | 0 | g_mutex_lock (mutex); |
1162 | |
|
1163 | 0 | return success; |
1164 | 0 | } |
1165 | 0 | # undef KERNEL_SPAN_SEC_TYPE |
1166 | 0 | #endif /* defined(HAVE_FUTEX) */ |
1167 | | |
1168 | | /* We can't end up here because of the checks above */ |
1169 | 0 | g_assert_not_reached (); |
1170 | 0 | } |
1171 | | |
1172 | | #endif |
1173 | | |
1174 | | /* {{{1 Epilogue */ |
1175 | | /* vim:set foldmethod=marker: */ |