Coverage Report

Created: 2026-02-26 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/irssi/subprojects/glib-2.74.7/glib/gthread-posix.c
Line
Count
Source
1
/* GLIB - Library of useful routines for C programming
2
 * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3
 *
4
 * gthread.c: posix thread system implementation
5
 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
6
 *
7
 * SPDX-License-Identifier: LGPL-2.1-or-later
8
 *
9
 * This library is free software; you can redistribute it and/or
10
 * modify it under the terms of the GNU Lesser General Public
11
 * License as published by the Free Software Foundation; either
12
 * version 2.1 of the License, or (at your option) any later version.
13
 *
14
 * This library is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * Lesser General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU Lesser General Public
20
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21
 */
22
23
/*
24
 * Modified by the GLib Team and others 1997-2000.  See the AUTHORS
25
 * file for a list of people on the GLib Team.  See the ChangeLog
26
 * files for a list of changes.  These files are distributed with
27
 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28
 */
29
30
/* The GMutex, GCond and GPrivate implementations in this file are some
31
 * of the lowest-level code in GLib.  All other parts of GLib (messages,
32
 * memory, slices, etc) assume that they can freely use these facilities
33
 * without risking recursion.
34
 *
35
 * As such, these functions are NOT permitted to call any other part of
36
 * GLib.
37
 *
38
 * The thread manipulation functions (create, exit, join, etc.) have
39
 * more freedom -- they can do as they please.
40
 */
41
42
#include "config.h"
43
44
#include "gthread.h"
45
46
#include "gmain.h"
47
#include "gmessages.h"
48
#include "gslice.h"
49
#include "gstrfuncs.h"
50
#include "gtestutils.h"
51
#include "gthreadprivate.h"
52
#include "gutils.h"
53
54
#include <stdlib.h>
55
#include <stdio.h>
56
#include <string.h>
57
#include <errno.h>
58
#include <pthread.h>
59
60
#include <sys/time.h>
61
#include <unistd.h>
62
63
#ifdef HAVE_PTHREAD_SET_NAME_NP
64
#include <pthread_np.h>
65
#endif
66
#ifdef HAVE_SCHED_H
67
#include <sched.h>
68
#endif
69
#ifdef G_OS_WIN32
70
#include <windows.h>
71
#endif
72
73
#if defined(HAVE_SYS_SCHED_GETATTR)
74
#include <sys/syscall.h>
75
#endif
76
77
#if (defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)) && \
78
    (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST))
79
#define USE_NATIVE_MUTEX
80
#endif
81
82
static void
83
g_thread_abort (gint         status,
84
                const gchar *function)
85
0
{
86
0
  fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s.  Aborting.\n",
87
0
           function, strerror (status));
88
0
  g_abort ();
89
0
}
90
91
/* {{{1 GMutex */
92
93
#if !defined(USE_NATIVE_MUTEX)
94
95
static pthread_mutex_t *
96
g_mutex_impl_new (void)
97
{
98
  pthread_mutexattr_t *pattr = NULL;
99
  pthread_mutex_t *mutex;
100
  gint status;
101
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
102
  pthread_mutexattr_t attr;
103
#endif
104
105
  mutex = malloc (sizeof (pthread_mutex_t));
106
  if G_UNLIKELY (mutex == NULL)
107
    g_thread_abort (errno, "malloc");
108
109
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
110
  pthread_mutexattr_init (&attr);
111
  pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
112
  pattr = &attr;
113
#endif
114
115
  if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
116
    g_thread_abort (status, "pthread_mutex_init");
117
118
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
119
  pthread_mutexattr_destroy (&attr);
120
#endif
121
122
  return mutex;
123
}
124
125
static void
126
g_mutex_impl_free (pthread_mutex_t *mutex)
127
{
128
  pthread_mutex_destroy (mutex);
129
  free (mutex);
130
}
131
132
static inline pthread_mutex_t *
133
g_mutex_get_impl (GMutex *mutex)
134
{
135
  pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
136
137
  if G_UNLIKELY (impl == NULL)
138
    {
139
      impl = g_mutex_impl_new ();
140
      if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
141
        g_mutex_impl_free (impl);
142
      impl = mutex->p;
143
    }
144
145
  return impl;
146
}
147
148
149
/**
150
 * g_mutex_init:
151
 * @mutex: an uninitialized #GMutex
152
 *
153
 * Initializes a #GMutex so that it can be used.
154
 *
155
 * This function is useful to initialize a mutex that has been
156
 * allocated on the stack, or as part of a larger structure.
157
 * It is not necessary to initialize a mutex that has been
158
 * statically allocated.
159
 *
160
 * |[<!-- language="C" --> 
161
 *   typedef struct {
162
 *     GMutex m;
163
 *     ...
164
 *   } Blob;
165
 *
166
 * Blob *b;
167
 *
168
 * b = g_new (Blob, 1);
169
 * g_mutex_init (&b->m);
170
 * ]|
171
 *
172
 * To undo the effect of g_mutex_init() when a mutex is no longer
173
 * needed, use g_mutex_clear().
174
 *
175
 * Calling g_mutex_init() on an already initialized #GMutex leads
176
 * to undefined behaviour.
177
 *
178
 * Since: 2.32
179
 */
180
void
181
g_mutex_init (GMutex *mutex)
182
{
183
  mutex->p = g_mutex_impl_new ();
184
}
185
186
/**
187
 * g_mutex_clear:
188
 * @mutex: an initialized #GMutex
189
 *
190
 * Frees the resources allocated to a mutex with g_mutex_init().
191
 *
192
 * This function should not be used with a #GMutex that has been
193
 * statically allocated.
194
 *
195
 * Calling g_mutex_clear() on a locked mutex leads to undefined
196
 * behaviour.
197
 *
198
 * Since: 2.32
199
 */
200
void
201
g_mutex_clear (GMutex *mutex)
202
{
203
  g_mutex_impl_free (mutex->p);
204
}
205
206
/**
207
 * g_mutex_lock:
208
 * @mutex: a #GMutex
209
 *
210
 * Locks @mutex. If @mutex is already locked by another thread, the
211
 * current thread will block until @mutex is unlocked by the other
212
 * thread.
213
 *
214
 * #GMutex is neither guaranteed to be recursive nor to be
215
 * non-recursive.  As such, calling g_mutex_lock() on a #GMutex that has
216
 * already been locked by the same thread results in undefined behaviour
217
 * (including but not limited to deadlocks).
218
 */
219
void
220
g_mutex_lock (GMutex *mutex)
221
{
222
  gint status;
223
224
  if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
225
    g_thread_abort (status, "pthread_mutex_lock");
226
}
227
228
/**
229
 * g_mutex_unlock:
230
 * @mutex: a #GMutex
231
 *
232
 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
233
 * call for @mutex, it will become unblocked and can lock @mutex itself.
234
 *
235
 * Calling g_mutex_unlock() on a mutex that is not locked by the
236
 * current thread leads to undefined behaviour.
237
 */
238
void
239
g_mutex_unlock (GMutex *mutex)
240
{
241
  gint status;
242
243
  if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
244
    g_thread_abort (status, "pthread_mutex_unlock");
245
}
246
247
/**
248
 * g_mutex_trylock:
249
 * @mutex: a #GMutex
250
 *
251
 * Tries to lock @mutex. If @mutex is already locked by another thread,
252
 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
253
 * %TRUE.
254
 *
255
 * #GMutex is neither guaranteed to be recursive nor to be
256
 * non-recursive.  As such, calling g_mutex_lock() on a #GMutex that has
257
 * already been locked by the same thread results in undefined behaviour
258
 * (including but not limited to deadlocks or arbitrary return values).
259
 *
260
 * Returns: %TRUE if @mutex could be locked
261
 */
262
gboolean
263
g_mutex_trylock (GMutex *mutex)
264
{
265
  gint status;
266
267
  if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
268
    return TRUE;
269
270
  if G_UNLIKELY (status != EBUSY)
271
    g_thread_abort (status, "pthread_mutex_trylock");
272
273
  return FALSE;
274
}
275
276
#endif /* !defined(USE_NATIVE_MUTEX) */
277
278
/* {{{1 GRecMutex */
279
280
static pthread_mutex_t *
281
g_rec_mutex_impl_new (void)
282
8
{
283
8
  pthread_mutexattr_t attr;
284
8
  pthread_mutex_t *mutex;
285
286
8
  mutex = malloc (sizeof (pthread_mutex_t));
287
8
  if G_UNLIKELY (mutex == NULL)
288
0
    g_thread_abort (errno, "malloc");
289
290
8
  pthread_mutexattr_init (&attr);
291
8
  pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
292
8
  pthread_mutex_init (mutex, &attr);
293
8
  pthread_mutexattr_destroy (&attr);
294
295
8
  return mutex;
296
8
}
297
298
static void
299
g_rec_mutex_impl_free (pthread_mutex_t *mutex)
300
0
{
301
0
  pthread_mutex_destroy (mutex);
302
0
  free (mutex);
303
0
}
304
305
static inline pthread_mutex_t *
306
g_rec_mutex_get_impl (GRecMutex *rec_mutex)
307
168
{
308
168
  pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
309
310
168
  if G_UNLIKELY (impl == NULL)
311
8
    {
312
8
      impl = g_rec_mutex_impl_new ();
313
8
      if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
314
0
        g_rec_mutex_impl_free (impl);
315
8
      impl = rec_mutex->p;
316
8
    }
317
318
168
  return impl;
319
168
}
320
321
/**
322
 * g_rec_mutex_init:
323
 * @rec_mutex: an uninitialized #GRecMutex
324
 *
325
 * Initializes a #GRecMutex so that it can be used.
326
 *
327
 * This function is useful to initialize a recursive mutex
328
 * that has been allocated on the stack, or as part of a larger
329
 * structure.
330
 *
331
 * It is not necessary to initialise a recursive mutex that has been
332
 * statically allocated.
333
 *
334
 * |[<!-- language="C" --> 
335
 *   typedef struct {
336
 *     GRecMutex m;
337
 *     ...
338
 *   } Blob;
339
 *
340
 * Blob *b;
341
 *
342
 * b = g_new (Blob, 1);
343
 * g_rec_mutex_init (&b->m);
344
 * ]|
345
 *
346
 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
347
 * leads to undefined behaviour.
348
 *
349
 * To undo the effect of g_rec_mutex_init() when a recursive mutex
350
 * is no longer needed, use g_rec_mutex_clear().
351
 *
352
 * Since: 2.32
353
 */
354
void
355
g_rec_mutex_init (GRecMutex *rec_mutex)
356
0
{
357
0
  rec_mutex->p = g_rec_mutex_impl_new ();
358
0
}
359
360
/**
361
 * g_rec_mutex_clear:
362
 * @rec_mutex: an initialized #GRecMutex
363
 *
364
 * Frees the resources allocated to a recursive mutex with
365
 * g_rec_mutex_init().
366
 *
367
 * This function should not be used with a #GRecMutex that has been
368
 * statically allocated.
369
 *
370
 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
371
 * to undefined behaviour.
372
 *
373
 * Since: 2.32
374
 */
375
void
376
g_rec_mutex_clear (GRecMutex *rec_mutex)
377
0
{
378
0
  g_rec_mutex_impl_free (rec_mutex->p);
379
0
}
380
381
/**
382
 * g_rec_mutex_lock:
383
 * @rec_mutex: a #GRecMutex
384
 *
385
 * Locks @rec_mutex. If @rec_mutex is already locked by another
386
 * thread, the current thread will block until @rec_mutex is
387
 * unlocked by the other thread. If @rec_mutex is already locked
388
 * by the current thread, the 'lock count' of @rec_mutex is increased.
389
 * The mutex will only become available again when it is unlocked
390
 * as many times as it has been locked.
391
 *
392
 * Since: 2.32
393
 */
394
void
395
g_rec_mutex_lock (GRecMutex *mutex)
396
168
{
397
168
  pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
398
168
}
399
400
/**
401
 * g_rec_mutex_unlock:
402
 * @rec_mutex: a #GRecMutex
403
 *
404
 * Unlocks @rec_mutex. If another thread is blocked in a
405
 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
406
 * and can lock @rec_mutex itself.
407
 *
408
 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
409
 * locked by the current thread leads to undefined behaviour.
410
 *
411
 * Since: 2.32
412
 */
413
void
414
g_rec_mutex_unlock (GRecMutex *rec_mutex)
415
168
{
416
168
  pthread_mutex_unlock (rec_mutex->p);
417
168
}
418
419
/**
420
 * g_rec_mutex_trylock:
421
 * @rec_mutex: a #GRecMutex
422
 *
423
 * Tries to lock @rec_mutex. If @rec_mutex is already locked
424
 * by another thread, it immediately returns %FALSE. Otherwise
425
 * it locks @rec_mutex and returns %TRUE.
426
 *
427
 * Returns: %TRUE if @rec_mutex could be locked
428
 *
429
 * Since: 2.32
430
 */
431
gboolean
432
g_rec_mutex_trylock (GRecMutex *rec_mutex)
433
0
{
434
0
  if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
435
0
    return FALSE;
436
437
0
  return TRUE;
438
0
}
439
440
/* {{{1 GRWLock */
441
442
static pthread_rwlock_t *
443
g_rw_lock_impl_new (void)
444
9
{
445
9
  pthread_rwlock_t *rwlock;
446
9
  gint status;
447
448
9
  rwlock = malloc (sizeof (pthread_rwlock_t));
449
9
  if G_UNLIKELY (rwlock == NULL)
450
0
    g_thread_abort (errno, "malloc");
451
452
9
  if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
453
0
    g_thread_abort (status, "pthread_rwlock_init");
454
455
9
  return rwlock;
456
9
}
457
458
static void
459
g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
460
0
{
461
0
  pthread_rwlock_destroy (rwlock);
462
0
  free (rwlock);
463
0
}
464
465
static inline pthread_rwlock_t *
466
g_rw_lock_get_impl (GRWLock *lock)
467
35.6k
{
468
35.6k
  pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
469
470
35.6k
  if G_UNLIKELY (impl == NULL)
471
9
    {
472
9
      impl = g_rw_lock_impl_new ();
473
9
      if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
474
0
        g_rw_lock_impl_free (impl);
475
9
      impl = lock->p;
476
9
    }
477
478
35.6k
  return impl;
479
35.6k
}
480
481
/**
482
 * g_rw_lock_init:
483
 * @rw_lock: an uninitialized #GRWLock
484
 *
485
 * Initializes a #GRWLock so that it can be used.
486
 *
487
 * This function is useful to initialize a lock that has been
488
 * allocated on the stack, or as part of a larger structure.  It is not
489
 * necessary to initialise a reader-writer lock that has been statically
490
 * allocated.
491
 *
492
 * |[<!-- language="C" --> 
493
 *   typedef struct {
494
 *     GRWLock l;
495
 *     ...
496
 *   } Blob;
497
 *
498
 * Blob *b;
499
 *
500
 * b = g_new (Blob, 1);
501
 * g_rw_lock_init (&b->l);
502
 * ]|
503
 *
504
 * To undo the effect of g_rw_lock_init() when a lock is no longer
505
 * needed, use g_rw_lock_clear().
506
 *
507
 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
508
 * to undefined behaviour.
509
 *
510
 * Since: 2.32
511
 */
512
void
513
g_rw_lock_init (GRWLock *rw_lock)
514
0
{
515
0
  rw_lock->p = g_rw_lock_impl_new ();
516
0
}
517
518
/**
519
 * g_rw_lock_clear:
520
 * @rw_lock: an initialized #GRWLock
521
 *
522
 * Frees the resources allocated to a lock with g_rw_lock_init().
523
 *
524
 * This function should not be used with a #GRWLock that has been
525
 * statically allocated.
526
 *
527
 * Calling g_rw_lock_clear() when any thread holds the lock
528
 * leads to undefined behaviour.
529
 *
530
 * Since: 2.32
531
 */
532
void
533
g_rw_lock_clear (GRWLock *rw_lock)
534
0
{
535
0
  g_rw_lock_impl_free (rw_lock->p);
536
0
}
537
538
/**
539
 * g_rw_lock_writer_lock:
540
 * @rw_lock: a #GRWLock
541
 *
542
 * Obtain a write lock on @rw_lock. If another thread currently holds
543
 * a read or write lock on @rw_lock, the current thread will block
544
 * until all other threads have dropped their locks on @rw_lock.
545
 *
546
 * Calling g_rw_lock_writer_lock() while the current thread already
547
 * owns a read or write lock on @rw_lock leads to undefined behaviour.
548
 *
549
 * Since: 2.32
550
 */
551
void
552
g_rw_lock_writer_lock (GRWLock *rw_lock)
553
880
{
554
880
  int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
555
556
880
  if (retval != 0)
557
0
    g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
558
880
}
559
560
/**
561
 * g_rw_lock_writer_trylock:
562
 * @rw_lock: a #GRWLock
563
 *
564
 * Tries to obtain a write lock on @rw_lock. If another thread
565
 * currently holds a read or write lock on @rw_lock, it immediately
566
 * returns %FALSE.
567
 * Otherwise it locks @rw_lock and returns %TRUE.
568
 *
569
 * Returns: %TRUE if @rw_lock could be locked
570
 *
571
 * Since: 2.32
572
 */
573
gboolean
574
g_rw_lock_writer_trylock (GRWLock *rw_lock)
575
0
{
576
0
  if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
577
0
    return FALSE;
578
579
0
  return TRUE;
580
0
}
581
582
/**
583
 * g_rw_lock_writer_unlock:
584
 * @rw_lock: a #GRWLock
585
 *
586
 * Release a write lock on @rw_lock.
587
 *
588
 * Calling g_rw_lock_writer_unlock() on a lock that is not held
589
 * by the current thread leads to undefined behaviour.
590
 *
591
 * Since: 2.32
592
 */
593
void
594
g_rw_lock_writer_unlock (GRWLock *rw_lock)
595
880
{
596
880
  pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
597
880
}
598
599
/**
600
 * g_rw_lock_reader_lock:
601
 * @rw_lock: a #GRWLock
602
 *
603
 * Obtain a read lock on @rw_lock. If another thread currently holds
604
 * the write lock on @rw_lock, the current thread will block until the
605
 * write lock was (held and) released. If another thread does not hold
606
 * the write lock, but is waiting for it, it is implementation defined
607
 * whether the reader or writer will block. Read locks can be taken
608
 * recursively.
609
 *
610
 * Calling g_rw_lock_reader_lock() while the current thread already
611
 * owns a write lock leads to undefined behaviour. Read locks however
612
 * can be taken recursively, in which case you need to make sure to
613
 * call g_rw_lock_reader_unlock() the same amount of times.
614
 *
615
 * It is implementation-defined how many read locks are allowed to be
616
 * held on the same lock simultaneously. If the limit is hit,
617
 * or if a deadlock is detected, a critical warning will be emitted.
618
 *
619
 * Since: 2.32
620
 */
621
void
622
g_rw_lock_reader_lock (GRWLock *rw_lock)
623
16.9k
{
624
16.9k
  int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
625
626
16.9k
  if (retval != 0)
627
0
    g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
628
16.9k
}
629
630
/**
631
 * g_rw_lock_reader_trylock:
632
 * @rw_lock: a #GRWLock
633
 *
634
 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
635
 * the read lock was successfully obtained. Otherwise it
636
 * returns %FALSE.
637
 *
638
 * Returns: %TRUE if @rw_lock could be locked
639
 *
640
 * Since: 2.32
641
 */
642
gboolean
643
g_rw_lock_reader_trylock (GRWLock *rw_lock)
644
0
{
645
0
  if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
646
0
    return FALSE;
647
648
0
  return TRUE;
649
0
}
650
651
/**
652
 * g_rw_lock_reader_unlock:
653
 * @rw_lock: a #GRWLock
654
 *
655
 * Release a read lock on @rw_lock.
656
 *
657
 * Calling g_rw_lock_reader_unlock() on a lock that is not held
658
 * by the current thread leads to undefined behaviour.
659
 *
660
 * Since: 2.32
661
 */
662
void
663
g_rw_lock_reader_unlock (GRWLock *rw_lock)
664
16.9k
{
665
16.9k
  pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
666
16.9k
}
667
668
/* {{{1 GCond */
669
670
#if !defined(USE_NATIVE_MUTEX)
671
672
static pthread_cond_t *
673
g_cond_impl_new (void)
674
{
675
  pthread_condattr_t attr;
676
  pthread_cond_t *cond;
677
  gint status;
678
679
  pthread_condattr_init (&attr);
680
681
#ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
682
#elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
683
  if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
684
    g_thread_abort (status, "pthread_condattr_setclock");
685
#else
686
#error Cannot support GCond on your platform.
687
#endif
688
689
  cond = malloc (sizeof (pthread_cond_t));
690
  if G_UNLIKELY (cond == NULL)
691
    g_thread_abort (errno, "malloc");
692
693
  if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
694
    g_thread_abort (status, "pthread_cond_init");
695
696
  pthread_condattr_destroy (&attr);
697
698
  return cond;
699
}
700
701
static void
702
g_cond_impl_free (pthread_cond_t *cond)
703
{
704
  pthread_cond_destroy (cond);
705
  free (cond);
706
}
707
708
static inline pthread_cond_t *
709
g_cond_get_impl (GCond *cond)
710
{
711
  pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
712
713
  if G_UNLIKELY (impl == NULL)
714
    {
715
      impl = g_cond_impl_new ();
716
      if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
717
        g_cond_impl_free (impl);
718
      impl = cond->p;
719
    }
720
721
  return impl;
722
}
723
724
/**
725
 * g_cond_init:
726
 * @cond: an uninitialized #GCond
727
 *
728
 * Initialises a #GCond so that it can be used.
729
 *
730
 * This function is useful to initialise a #GCond that has been
731
 * allocated as part of a larger structure.  It is not necessary to
732
 * initialise a #GCond that has been statically allocated.
733
 *
734
 * To undo the effect of g_cond_init() when a #GCond is no longer
735
 * needed, use g_cond_clear().
736
 *
737
 * Calling g_cond_init() on an already-initialised #GCond leads
738
 * to undefined behaviour.
739
 *
740
 * Since: 2.32
741
 */
742
void
743
g_cond_init (GCond *cond)
744
{
745
  cond->p = g_cond_impl_new ();
746
}
747
748
/**
749
 * g_cond_clear:
750
 * @cond: an initialised #GCond
751
 *
752
 * Frees the resources allocated to a #GCond with g_cond_init().
753
 *
754
 * This function should not be used with a #GCond that has been
755
 * statically allocated.
756
 *
757
 * Calling g_cond_clear() for a #GCond on which threads are
758
 * blocking leads to undefined behaviour.
759
 *
760
 * Since: 2.32
761
 */
762
void
763
g_cond_clear (GCond *cond)
764
{
765
  g_cond_impl_free (cond->p);
766
}
767
768
/**
769
 * g_cond_wait:
770
 * @cond: a #GCond
771
 * @mutex: a #GMutex that is currently locked
772
 *
773
 * Atomically releases @mutex and waits until @cond is signalled.
774
 * When this function returns, @mutex is locked again and owned by the
775
 * calling thread.
776
 *
777
 * When using condition variables, it is possible that a spurious wakeup
778
 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
779
 * not called).  It's also possible that a stolen wakeup may occur.
780
 * This is when g_cond_signal() is called, but another thread acquires
781
 * @mutex before this thread and modifies the state of the program in
782
 * such a way that when g_cond_wait() is able to return, the expected
783
 * condition is no longer met.
784
 *
785
 * For this reason, g_cond_wait() must always be used in a loop.  See
786
 * the documentation for #GCond for a complete example.
787
 **/
788
void
789
g_cond_wait (GCond  *cond,
790
             GMutex *mutex)
791
{
792
  gint status;
793
794
  if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
795
    g_thread_abort (status, "pthread_cond_wait");
796
}
797
798
/**
799
 * g_cond_signal:
800
 * @cond: a #GCond
801
 *
802
 * If threads are waiting for @cond, at least one of them is unblocked.
803
 * If no threads are waiting for @cond, this function has no effect.
804
 * It is good practice to hold the same lock as the waiting thread
805
 * while calling this function, though not required.
806
 */
807
void
808
g_cond_signal (GCond *cond)
809
{
810
  gint status;
811
812
  if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
813
    g_thread_abort (status, "pthread_cond_signal");
814
}
815
816
/**
817
 * g_cond_broadcast:
818
 * @cond: a #GCond
819
 *
820
 * If threads are waiting for @cond, all of them are unblocked.
821
 * If no threads are waiting for @cond, this function has no effect.
822
 * It is good practice to lock the same mutex as the waiting threads
823
 * while calling this function, though not required.
824
 */
825
void
826
g_cond_broadcast (GCond *cond)
827
{
828
  gint status;
829
830
  if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
831
    g_thread_abort (status, "pthread_cond_broadcast");
832
}
833
834
/**
835
 * g_cond_wait_until:
836
 * @cond: a #GCond
837
 * @mutex: a #GMutex that is currently locked
838
 * @end_time: the monotonic time to wait until
839
 *
840
 * Waits until either @cond is signalled or @end_time has passed.
841
 *
842
 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
843
 * could occur.  For that reason, waiting on a condition variable should
844
 * always be in a loop, based on an explicitly-checked predicate.
845
 *
846
 * %TRUE is returned if the condition variable was signalled (or in the
847
 * case of a spurious wakeup).  %FALSE is returned if @end_time has
848
 * passed.
849
 *
850
 * The following code shows how to correctly perform a timed wait on a
851
 * condition variable (extending the example presented in the
852
 * documentation for #GCond):
853
 *
854
 * |[<!-- language="C" --> 
855
 * gpointer
856
 * pop_data_timed (void)
857
 * {
858
 *   gint64 end_time;
859
 *   gpointer data;
860
 *
861
 *   g_mutex_lock (&data_mutex);
862
 *
863
 *   end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
864
 *   while (!current_data)
865
 *     if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
866
 *       {
867
 *         // timeout has passed.
868
 *         g_mutex_unlock (&data_mutex);
869
 *         return NULL;
870
 *       }
871
 *
872
 *   // there is data for us
873
 *   data = current_data;
874
 *   current_data = NULL;
875
 *
876
 *   g_mutex_unlock (&data_mutex);
877
 *
878
 *   return data;
879
 * }
880
 * ]|
881
 *
882
 * Notice that the end time is calculated once, before entering the
883
 * loop and reused.  This is the motivation behind the use of absolute
884
 * time on this API -- if a relative time of 5 seconds were passed
885
 * directly to the call and a spurious wakeup occurred, the program would
886
 * have to start over waiting again (which would lead to a total wait
887
 * time of more than 5 seconds).
888
 *
889
 * Returns: %TRUE on a signal, %FALSE on a timeout
890
 * Since: 2.32
891
 **/
892
gboolean
893
g_cond_wait_until (GCond  *cond,
894
                   GMutex *mutex,
895
                   gint64  end_time)
896
{
897
  struct timespec ts;
898
  gint status;
899
900
#ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
901
  /* end_time is given relative to the monotonic clock as returned by
902
   * g_get_monotonic_time().
903
   *
904
   * Since this pthreads wants the relative time, convert it back again.
905
   */
906
  {
907
    gint64 now = g_get_monotonic_time ();
908
    gint64 relative;
909
910
    if (end_time <= now)
911
      return FALSE;
912
913
    relative = end_time - now;
914
915
    ts.tv_sec = relative / 1000000;
916
    ts.tv_nsec = (relative % 1000000) * 1000;
917
918
    if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
919
      return TRUE;
920
  }
921
#elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
922
  /* This is the exact check we used during init to set the clock to
923
   * monotonic, so if we're in this branch, timedwait() will already be
924
   * expecting a monotonic clock.
925
   */
926
  {
927
    ts.tv_sec = end_time / 1000000;
928
    ts.tv_nsec = (end_time % 1000000) * 1000;
929
930
    if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
931
      return TRUE;
932
  }
933
#else
934
#error Cannot support GCond on your platform.
935
#endif
936
937
  if G_UNLIKELY (status != ETIMEDOUT)
938
    g_thread_abort (status, "pthread_cond_timedwait");
939
940
  return FALSE;
941
}
942
943
#endif /* defined(USE_NATIVE_MUTEX) */
944
945
/* {{{1 GPrivate */
946
947
/**
948
 * GPrivate:
949
 *
950
 * The #GPrivate struct is an opaque data structure to represent a
951
 * thread-local data key. It is approximately equivalent to the
952
 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
953
 * TlsSetValue()/TlsGetValue() on Windows.
954
 *
955
 * If you don't already know why you might want this functionality,
956
 * then you probably don't need it.
957
 *
958
 * #GPrivate is a very limited resource (as far as 128 per program,
959
 * shared between all libraries). It is also not possible to destroy a
960
 * #GPrivate after it has been used. As such, it is only ever acceptable
961
 * to use #GPrivate in static scope, and even then sparingly so.
962
 *
963
 * See G_PRIVATE_INIT() for a couple of examples.
964
 *
965
 * The #GPrivate structure should be considered opaque.  It should only
966
 * be accessed via the g_private_ functions.
967
 */
968
969
/**
970
 * G_PRIVATE_INIT:
971
 * @notify: a #GDestroyNotify
972
 *
973
 * A macro to assist with the static initialisation of a #GPrivate.
974
 *
975
 * This macro is useful for the case that a #GDestroyNotify function
976
 * should be associated with the key.  This is needed when the key will be
977
 * used to point at memory that should be deallocated when the thread
978
 * exits.
979
 *
980
 * Additionally, the #GDestroyNotify will also be called on the previous
981
 * value stored in the key when g_private_replace() is used.
982
 *
983
 * If no #GDestroyNotify is needed, then use of this macro is not
984
 * required -- if the #GPrivate is declared in static scope then it will
985
 * be properly initialised by default (ie: to all zeros).  See the
986
 * examples below.
987
 *
988
 * |[<!-- language="C" --> 
989
 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
990
 *
991
 * // return value should not be freed
992
 * const gchar *
993
 * get_local_name (void)
994
 * {
995
 *   return g_private_get (&name_key);
996
 * }
997
 *
998
 * void
999
 * set_local_name (const gchar *name)
1000
 * {
1001
 *   g_private_replace (&name_key, g_strdup (name));
1002
 * }
1003
 *
1004
 *
1005
 * static GPrivate count_key;   // no free function
1006
 *
1007
 * gint
1008
 * get_local_count (void)
1009
 * {
1010
 *   return GPOINTER_TO_INT (g_private_get (&count_key));
1011
 * }
1012
 *
1013
 * void
1014
 * set_local_count (gint count)
1015
 * {
1016
 *   g_private_set (&count_key, GINT_TO_POINTER (count));
1017
 * }
1018
 * ]|
1019
 *
1020
 * Since: 2.32
1021
 **/
1022
1023
static pthread_key_t *
1024
g_private_impl_new (GDestroyNotify notify)
1025
19
{
1026
19
  pthread_key_t *key;
1027
19
  gint status;
1028
1029
19
  key = malloc (sizeof (pthread_key_t));
1030
19
  if G_UNLIKELY (key == NULL)
1031
0
    g_thread_abort (errno, "malloc");
1032
19
  status = pthread_key_create (key, notify);
1033
19
  if G_UNLIKELY (status != 0)
1034
0
    g_thread_abort (status, "pthread_key_create");
1035
1036
19
  return key;
1037
19
}
1038
1039
static void
1040
g_private_impl_free (pthread_key_t *key)
1041
0
{
1042
0
  gint status;
1043
1044
0
  status = pthread_key_delete (*key);
1045
0
  if G_UNLIKELY (status != 0)
1046
0
    g_thread_abort (status, "pthread_key_delete");
1047
0
  free (key);
1048
0
}
1049
1050
static inline pthread_key_t *
1051
g_private_get_impl (GPrivate *key)
1052
719M
{
1053
719M
  pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1054
1055
719M
  if G_UNLIKELY (impl == NULL)
1056
19
    {
1057
19
      impl = g_private_impl_new (key->notify);
1058
19
      if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1059
0
        {
1060
0
          g_private_impl_free (impl);
1061
0
          impl = key->p;
1062
0
        }
1063
19
    }
1064
1065
719M
  return impl;
1066
719M
}
1067
1068
/**
1069
 * g_private_get:
1070
 * @key: a #GPrivate
1071
 *
1072
 * Returns the current value of the thread local variable @key.
1073
 *
1074
 * If the value has not yet been set in this thread, %NULL is returned.
1075
 * Values are never copied between threads (when a new thread is
1076
 * created, for example).
1077
 *
1078
 * Returns: the thread-local value
1079
 */
1080
gpointer
1081
g_private_get (GPrivate *key)
1082
717M
{
1083
  /* quote POSIX: No errors are returned from pthread_getspecific(). */
1084
717M
  return pthread_getspecific (*g_private_get_impl (key));
1085
717M
}
1086
1087
/**
1088
 * g_private_set:
1089
 * @key: a #GPrivate
1090
 * @value: the new value
1091
 *
1092
 * Sets the thread local variable @key to have the value @value in the
1093
 * current thread.
1094
 *
1095
 * This function differs from g_private_replace() in the following way:
1096
 * the #GDestroyNotify for @key is not called on the old value.
1097
 */
1098
void
1099
g_private_set (GPrivate *key,
1100
               gpointer  value)
1101
2.15M
{
1102
2.15M
  gint status;
1103
1104
2.15M
  if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1105
0
    g_thread_abort (status, "pthread_setspecific");
1106
2.15M
}
1107
1108
/**
1109
 * g_private_replace:
1110
 * @key: a #GPrivate
1111
 * @value: the new value
1112
 *
1113
 * Sets the thread local variable @key to have the value @value in the
1114
 * current thread.
1115
 *
1116
 * This function differs from g_private_set() in the following way: if
1117
 * the previous value was non-%NULL then the #GDestroyNotify handler for
1118
 * @key is run on it.
1119
 *
1120
 * Since: 2.32
1121
 **/
1122
void
1123
g_private_replace (GPrivate *key,
1124
                   gpointer  value)
1125
0
{
1126
0
  pthread_key_t *impl = g_private_get_impl (key);
1127
0
  gpointer old;
1128
0
  gint status;
1129
1130
0
  old = pthread_getspecific (*impl);
1131
1132
0
  if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1133
0
    g_thread_abort (status, "pthread_setspecific");
1134
1135
0
  if (old && key->notify)
1136
0
    key->notify (old);
1137
0
}
1138
1139
/* {{{1 GThread */
1140
1141
0
#define posix_check_err(err, name) G_STMT_START{     \
1142
0
  int error = (err);              \
1143
0
  if (error)               \
1144
0
    g_error ("file %s: line %d (%s): error '%s' during '%s'",    \
1145
0
           __FILE__, __LINE__, G_STRFUNC,       \
1146
0
           g_strerror (error), name);         \
1147
0
  }G_STMT_END
1148
1149
0
#define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1150
1151
typedef struct
1152
{
1153
  GRealThread thread;
1154
1155
  pthread_t system_thread;
1156
  gboolean  joined;
1157
  GMutex    lock;
1158
1159
  void *(*proxy) (void *);
1160
1161
  /* Must be statically allocated and valid forever */
1162
  const GThreadSchedulerSettings *scheduler_settings;
1163
} GThreadPosix;
1164
1165
void
1166
g_system_thread_free (GRealThread *thread)
1167
0
{
1168
0
  GThreadPosix *pt = (GThreadPosix *) thread;
1169
1170
0
  if (!pt->joined)
1171
0
    pthread_detach (pt->system_thread);
1172
1173
0
  g_mutex_clear (&pt->lock);
1174
1175
0
  g_slice_free (GThreadPosix, pt);
1176
0
}
1177
1178
gboolean
1179
g_system_thread_get_scheduler_settings (GThreadSchedulerSettings *scheduler_settings)
1180
0
{
1181
  /* FIXME: Implement the same for macOS and the BSDs so it doesn't go through
1182
   * the fallback code using an additional thread. */
1183
0
#if defined(HAVE_SYS_SCHED_GETATTR)
1184
0
  pid_t tid;
1185
0
  int res;
1186
  /* FIXME: The struct definition does not seem to be possible to pull in
1187
   * via any of the normal system headers and it's only declared in the
1188
   * kernel headers. That's why we hardcode 56 here right now. */
1189
0
  guint size = 56; /* Size as of Linux 5.3.9 */
1190
0
  guint flags = 0;
1191
1192
0
  tid = (pid_t) syscall (SYS_gettid);
1193
1194
0
  scheduler_settings->attr = g_malloc0 (size);
1195
1196
0
  do
1197
0
    {
1198
0
      int errsv;
1199
1200
0
      res = syscall (SYS_sched_getattr, tid, scheduler_settings->attr, size, flags);
1201
0
      errsv = errno;
1202
0
      if (res == -1)
1203
0
        {
1204
0
          if (errsv == EAGAIN)
1205
0
            {
1206
0
              continue;
1207
0
            }
1208
0
          else if (errsv == E2BIG)
1209
0
            {
1210
0
              g_assert (size < G_MAXINT);
1211
0
              size *= 2;
1212
0
              scheduler_settings->attr = g_realloc (scheduler_settings->attr, size);
1213
              /* Needs to be zero-initialized */
1214
0
              memset (scheduler_settings->attr, 0, size);
1215
0
            }
1216
0
          else
1217
0
            {
1218
0
              g_debug ("Failed to get thread scheduler attributes: %s", g_strerror (errsv));
1219
0
              g_free (scheduler_settings->attr);
1220
1221
0
              return FALSE;
1222
0
            }
1223
0
        }
1224
0
    }
1225
0
  while (res == -1);
1226
1227
  /* Try setting them on the current thread to see if any system policies are
1228
   * in place that would disallow doing so */
1229
0
  res = syscall (SYS_sched_setattr, tid, scheduler_settings->attr, flags);
1230
0
  if (res == -1)
1231
0
    {
1232
0
      int errsv = errno;
1233
1234
0
      g_debug ("Failed to set thread scheduler attributes: %s", g_strerror (errsv));
1235
0
      g_free (scheduler_settings->attr);
1236
1237
0
      return FALSE;
1238
0
    }
1239
1240
0
  return TRUE;
1241
#else
1242
  return FALSE;
1243
#endif
1244
0
}
1245
1246
#if defined(HAVE_SYS_SCHED_GETATTR)
1247
static void *
1248
linux_pthread_proxy (void *data)
1249
0
{
1250
0
  GThreadPosix *thread = data;
1251
0
  static gboolean printed_scheduler_warning = FALSE;  /* (atomic) */
1252
1253
  /* Set scheduler settings first if requested */
1254
0
  if (thread->scheduler_settings)
1255
0
    {
1256
0
      pid_t tid = 0;
1257
0
      guint flags = 0;
1258
0
      int res;
1259
0
      int errsv;
1260
1261
0
      tid = (pid_t) syscall (SYS_gettid);
1262
0
      res = syscall (SYS_sched_setattr, tid, thread->scheduler_settings->attr, flags);
1263
0
      errsv = errno;
1264
0
      if (res == -1 && g_atomic_int_compare_and_exchange (&printed_scheduler_warning, FALSE, TRUE))
1265
0
        g_critical ("Failed to set scheduler settings: %s", g_strerror (errsv));
1266
0
      else if (res == -1)
1267
0
        g_debug ("Failed to set scheduler settings: %s", g_strerror (errsv));
1268
0
    }
1269
1270
0
  return thread->proxy (data);
1271
0
}
1272
#endif
1273
1274
GRealThread *
1275
g_system_thread_new (GThreadFunc proxy,
1276
                     gulong stack_size,
1277
                     const GThreadSchedulerSettings *scheduler_settings,
1278
                     const char *name,
1279
                     GThreadFunc func,
1280
                     gpointer data,
1281
                     GError **error)
1282
0
{
1283
0
  GThreadPosix *thread;
1284
0
  GRealThread *base_thread;
1285
0
  pthread_attr_t attr;
1286
0
  gint ret;
1287
1288
0
  thread = g_slice_new0 (GThreadPosix);
1289
0
  base_thread = (GRealThread*)thread;
1290
0
  base_thread->ref_count = 2;
1291
0
  base_thread->ours = TRUE;
1292
0
  base_thread->thread.joinable = TRUE;
1293
0
  base_thread->thread.func = func;
1294
0
  base_thread->thread.data = data;
1295
0
  base_thread->name = g_strdup (name);
1296
0
  thread->scheduler_settings = scheduler_settings;
1297
0
  thread->proxy = proxy;
1298
1299
0
  posix_check_cmd (pthread_attr_init (&attr));
1300
1301
0
#ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1302
0
  if (stack_size)
1303
0
    {
1304
0
#ifdef _SC_THREAD_STACK_MIN
1305
0
      long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1306
0
      if (min_stack_size >= 0)
1307
0
        stack_size = MAX ((gulong) min_stack_size, stack_size);
1308
0
#endif /* _SC_THREAD_STACK_MIN */
1309
      /* No error check here, because some systems can't do it and
1310
       * we simply don't want threads to fail because of that. */
1311
0
      pthread_attr_setstacksize (&attr, stack_size);
1312
0
    }
1313
0
#endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1314
1315
0
#ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1316
0
  if (!scheduler_settings)
1317
0
    {
1318
      /* While this is the default, better be explicit about it */
1319
0
      pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED);
1320
0
    }
1321
0
#endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */
1322
1323
0
#if defined(HAVE_SYS_SCHED_GETATTR)
1324
0
  ret = pthread_create (&thread->system_thread, &attr, linux_pthread_proxy, thread);
1325
#else
1326
  ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1327
#endif
1328
1329
0
  posix_check_cmd (pthread_attr_destroy (&attr));
1330
1331
0
  if (ret == EAGAIN)
1332
0
    {
1333
0
      g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, 
1334
0
                   "Error creating thread: %s", g_strerror (ret));
1335
0
      g_free (thread->thread.name);
1336
0
      g_slice_free (GThreadPosix, thread);
1337
0
      return NULL;
1338
0
    }
1339
1340
0
  posix_check_err (ret, "pthread_create");
1341
1342
0
  g_mutex_init (&thread->lock);
1343
1344
0
  return (GRealThread *) thread;
1345
0
}
1346
1347
/**
1348
 * g_thread_yield:
1349
 *
1350
 * Causes the calling thread to voluntarily relinquish the CPU, so
1351
 * that other threads can run.
1352
 *
1353
 * This function is often used as a method to make busy wait less evil.
1354
 */
1355
void
1356
g_thread_yield (void)
1357
0
{
1358
0
  sched_yield ();
1359
0
}
1360
1361
void
1362
g_system_thread_wait (GRealThread *thread)
1363
0
{
1364
0
  GThreadPosix *pt = (GThreadPosix *) thread;
1365
1366
0
  g_mutex_lock (&pt->lock);
1367
1368
0
  if (!pt->joined)
1369
0
    {
1370
0
      posix_check_cmd (pthread_join (pt->system_thread, NULL));
1371
0
      pt->joined = TRUE;
1372
0
    }
1373
1374
0
  g_mutex_unlock (&pt->lock);
1375
0
}
1376
1377
void
1378
g_system_thread_exit (void)
1379
0
{
1380
0
  pthread_exit (NULL);
1381
0
}
1382
1383
void
1384
g_system_thread_set_name (const gchar *name)
1385
0
{
1386
#if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1387
  pthread_setname_np (name); /* on OS X and iOS */
1388
#elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1389
  pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1390
#elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1391
  pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1392
#elif defined(HAVE_PTHREAD_SET_NAME_NP)
1393
  pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1394
#endif
1395
0
}
1396
1397
/* {{{1 GMutex and GCond futex implementation */
1398
1399
#if defined(USE_NATIVE_MUTEX)
1400
/* We should expand the set of operations available in gatomic once we
1401
 * have better C11 support in GCC in common distributions (ie: 4.9).
1402
 *
1403
 * Before then, let's define a couple of useful things for our own
1404
 * purposes...
1405
 */
1406
1407
#ifdef HAVE_STDATOMIC_H
1408
1409
#include <stdatomic.h>
1410
1411
#define exchange_acquire(ptr, new) \
1412
0
  atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE)
1413
#define compare_exchange_acquire(ptr, old, new) \
1414
7.00M
  atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \
1415
7.00M
                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1416
1417
#define exchange_release(ptr, new) \
1418
15.1M
  atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1419
#define store_release(ptr, new) \
1420
  atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1421
1422
#else
1423
1424
#define exchange_acquire(ptr, new) \
1425
  __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1426
#define compare_exchange_acquire(ptr, old, new) \
1427
  __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1428
1429
#define exchange_release(ptr, new) \
1430
  __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1431
#define store_release(ptr, new) \
1432
  __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1433
1434
#endif
1435
1436
/* Our strategy for the mutex is pretty simple:
1437
 *
1438
 *  0: not in use
1439
 *
1440
 *  1: acquired by one thread only, no contention
1441
 *
1442
 *  2: contended
1443
 */
1444
1445
typedef enum {
1446
  G_MUTEX_STATE_EMPTY = 0,
1447
  G_MUTEX_STATE_OWNED,
1448
  G_MUTEX_STATE_CONTENDED,
1449
} GMutexState;
1450
1451
 /*
1452
 * As such, attempting to acquire the lock should involve an increment.
1453
 * If we find that the previous value was 0 then we can return
1454
 * immediately.
1455
 *
1456
 * On unlock, we always store 0 to indicate that the lock is available.
1457
 * If the value there was 1 before then we didn't have contention and
1458
 * can return immediately.  If the value was something other than 1 then
1459
 * we have the contended case and need to wake a waiter.
1460
 *
1461
 * If it was not 0 then there is another thread holding it and we must
1462
 * wait.  We must always ensure that we mark a value >1 while we are
1463
 * waiting in order to instruct the holder to do a wake operation on
1464
 * unlock.
1465
 */
1466
1467
void
1468
g_mutex_init (GMutex *mutex)
1469
8
{
1470
8
  mutex->i[0] = G_MUTEX_STATE_EMPTY;
1471
8
}
1472
1473
void
1474
g_mutex_clear (GMutex *mutex)
1475
0
{
1476
0
  if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY)
1477
0
    {
1478
0
      fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1479
0
      g_abort ();
1480
0
    }
1481
0
}
1482
1483
G_GNUC_NO_INLINE
1484
static void
1485
g_mutex_lock_slowpath (GMutex *mutex)
1486
0
{
1487
  /* Set to contended.  If it was empty before then we
1488
   * just acquired the lock.
1489
   *
1490
   * Otherwise, sleep for as long as the contended state remains...
1491
   */
1492
0
  while (exchange_acquire (&mutex->i[0], G_MUTEX_STATE_CONTENDED) != G_MUTEX_STATE_EMPTY)
1493
0
    {
1494
0
      g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE,
1495
0
                      G_MUTEX_STATE_CONTENDED, NULL);
1496
0
    }
1497
0
}
1498
1499
G_GNUC_NO_INLINE
1500
static void
1501
g_mutex_unlock_slowpath (GMutex *mutex,
1502
                         guint   prev)
1503
0
{
1504
  /* We seem to get better code for the uncontended case by splitting
1505
   * this out...
1506
   */
1507
0
  if G_UNLIKELY (prev == G_MUTEX_STATE_EMPTY)
1508
0
    {
1509
0
      fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1510
0
      g_abort ();
1511
0
    }
1512
1513
0
  g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1514
0
}
1515
1516
void
1517
g_mutex_lock (GMutex *mutex)
1518
8.18M
{
1519
  /* empty -> owned and we're done.  Anything else, and we need to wait... */
1520
8.18M
  if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0],
1521
8.18M
                                                     G_MUTEX_STATE_EMPTY,
1522
8.18M
                                                     G_MUTEX_STATE_OWNED))
1523
0
    g_mutex_lock_slowpath (mutex);
1524
8.18M
}
1525
1526
void
1527
g_mutex_unlock (GMutex *mutex)
1528
15.1M
{
1529
15.1M
  guint prev;
1530
1531
15.1M
  prev = exchange_release (&mutex->i[0], G_MUTEX_STATE_EMPTY);
1532
1533
  /* 1-> 0 and we're done.  Anything else and we need to signal... */
1534
15.1M
  if G_UNLIKELY (prev != G_MUTEX_STATE_OWNED)
1535
0
    g_mutex_unlock_slowpath (mutex, prev);
1536
15.1M
}
1537
1538
gboolean
1539
g_mutex_trylock (GMutex *mutex)
1540
7.00M
{
1541
7.00M
  GMutexState empty = G_MUTEX_STATE_EMPTY;
1542
1543
  /* We don't want to touch the value at all unless we can move it from
1544
   * exactly empty to owned.
1545
   */
1546
7.00M
  return compare_exchange_acquire (&mutex->i[0], &empty, G_MUTEX_STATE_OWNED);
1547
7.00M
}
1548
1549
/* Condition variables are implemented in a rather simple way as well.
1550
 * In many ways, futex() as an abstraction is even more ideally suited
1551
 * to condition variables than it is to mutexes.
1552
 *
1553
 * We store a generation counter.  We sample it with the lock held and
1554
 * unlock before sleeping on the futex.
1555
 *
1556
 * Signalling simply involves increasing the counter and making the
1557
 * appropriate futex call.
1558
 *
1559
 * The only thing that is the slightest bit complicated is timed waits
1560
 * because we must convert our absolute time to relative.
1561
 */
1562
1563
void
1564
g_cond_init (GCond *cond)
1565
8
{
1566
8
  cond->i[0] = 0;
1567
8
}
1568
1569
void
1570
g_cond_clear (GCond *cond)
1571
0
{
1572
0
}
1573
1574
void
1575
g_cond_wait (GCond  *cond,
1576
             GMutex *mutex)
1577
0
{
1578
0
  guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
1579
1580
0
  g_mutex_unlock (mutex);
1581
0
  g_futex_simple (&cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1582
0
  g_mutex_lock (mutex);
1583
0
}
1584
1585
void
1586
g_cond_signal (GCond *cond)
1587
0
{
1588
0
  g_atomic_int_inc (&cond->i[0]);
1589
1590
0
  g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1591
0
}
1592
1593
void
1594
g_cond_broadcast (GCond *cond)
1595
44
{
1596
44
  g_atomic_int_inc (&cond->i[0]);
1597
1598
44
  g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1599
44
}
1600
1601
gboolean
1602
g_cond_wait_until (GCond  *cond,
1603
                   GMutex *mutex,
1604
                   gint64  end_time)
1605
0
{
1606
0
  struct timespec now;
1607
0
  struct timespec span;
1608
1609
0
  guint sampled;
1610
0
  int res;
1611
0
  gboolean success;
1612
1613
0
  if (end_time < 0)
1614
0
    return FALSE;
1615
1616
0
  clock_gettime (CLOCK_MONOTONIC, &now);
1617
0
  span.tv_sec = (end_time / 1000000) - now.tv_sec;
1618
0
  span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1619
0
  if (span.tv_nsec < 0)
1620
0
    {
1621
0
      span.tv_nsec += 1000000000;
1622
0
      span.tv_sec--;
1623
0
    }
1624
1625
0
  if (span.tv_sec < 0)
1626
0
    return FALSE;
1627
1628
  /* `struct timespec` as defined by the libc headers does not necessarily
1629
   * have any relation to the one used by the kernel for the `futex` syscall.
1630
   *
1631
   * Specifically, the libc headers might use 64-bit `time_t` while the kernel
1632
   * headers use 32-bit `__kernel_old_time_t` on certain systems.
1633
   *
1634
   * To get around this problem we
1635
   *   a) check if `futex_time64` is available, which only exists on 32-bit
1636
   *      platforms and always uses 64-bit `time_t`.
1637
   *   b) otherwise (or if that returns `ENOSYS`), we call the normal `futex`
1638
   *      syscall with the `struct timespec` used by the kernel, which uses
1639
   *      `__kernel_long_t` for both its fields. We use that instead of
1640
   *      `__kernel_old_time_t` because it is equivalent and available in the
1641
   *      kernel headers for a longer time.
1642
   *
1643
   * Also some 32-bit systems do not define `__NR_futex` at all and only
1644
   * define `__NR_futex_time64`.
1645
   */
1646
1647
0
  sampled = cond->i[0];
1648
0
  g_mutex_unlock (mutex);
1649
1650
#ifdef __NR_futex_time64
1651
  {
1652
    struct
1653
    {
1654
      gint64 tv_sec;
1655
      gint64 tv_nsec;
1656
    } span_arg;
1657
1658
    span_arg.tv_sec = span.tv_sec;
1659
    span_arg.tv_nsec = span.tv_nsec;
1660
1661
    res = syscall (__NR_futex_time64, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1662
1663
    /* If the syscall does not exist (`ENOSYS`), we retry again below with the
1664
     * normal `futex` syscall. This can happen if newer kernel headers are
1665
     * used than the kernel that is actually running.
1666
     */
1667
#  ifdef __NR_futex
1668
    if (res >= 0 || errno != ENOSYS)
1669
#  endif /* defined(__NR_futex) */
1670
      {
1671
        success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1672
        g_mutex_lock (mutex);
1673
1674
        return success;
1675
      }
1676
  }
1677
#endif
1678
1679
0
#ifdef __NR_futex
1680
0
  {
1681
0
    struct
1682
0
    {
1683
0
      __kernel_long_t tv_sec;
1684
0
      __kernel_long_t tv_nsec;
1685
0
    } span_arg;
1686
1687
    /* Make sure to only ever call this if the end time actually fits into the target type */
1688
0
    if (G_UNLIKELY (sizeof (__kernel_long_t) < 8 && span.tv_sec > G_MAXINT32))
1689
0
      g_error ("%s: Can’t wait for more than %us", G_STRFUNC, G_MAXINT32);
1690
1691
0
    span_arg.tv_sec = span.tv_sec;
1692
0
    span_arg.tv_nsec = span.tv_nsec;
1693
1694
0
    res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1695
0
    success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1696
0
    g_mutex_lock (mutex);
1697
1698
0
    return success;
1699
0
  }
1700
0
#endif /* defined(__NR_futex) */
1701
1702
  /* We can't end up here because of the checks above */
1703
0
  g_assert_not_reached ();
1704
0
}
1705
1706
#endif
1707
1708
  /* {{{1 Epilogue */
1709
/* vim:set foldmethod=marker: */