/src/glib-2.80.0/glib/gthread.c
Line | Count | Source |
1 | | /* GLIB - Library of useful routines for C programming |
2 | | * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald |
3 | | * |
4 | | * gthread.c: MT safety related functions |
5 | | * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe |
6 | | * Owen Taylor |
7 | | * |
8 | | * SPDX-License-Identifier: LGPL-2.1-or-later |
9 | | * |
10 | | * This library is free software; you can redistribute it and/or |
11 | | * modify it under the terms of the GNU Lesser General Public |
12 | | * License as published by the Free Software Foundation; either |
13 | | * version 2.1 of the License, or (at your option) any later version. |
14 | | * |
15 | | * This library is distributed in the hope that it will be useful, |
16 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | | * Lesser General Public License for more details. |
19 | | * |
20 | | * You should have received a copy of the GNU Lesser General Public |
21 | | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
22 | | */ |
23 | | |
24 | | /* Prelude {{{1 ----------------------------------------------------------- */ |
25 | | |
26 | | /* |
27 | | * Modified by the GLib Team and others 1997-2000. See the AUTHORS |
28 | | * file for a list of people on the GLib Team. See the ChangeLog |
29 | | * files for a list of changes. These files are distributed with |
30 | | * GLib at ftp://ftp.gtk.org/pub/gtk/. |
31 | | */ |
32 | | |
33 | | /* |
34 | | * MT safe |
35 | | */ |
36 | | |
37 | | /* implement gthread.h's inline functions */ |
38 | | #define G_IMPLEMENT_INLINES 1 |
39 | | #define __G_THREAD_C__ |
40 | | |
41 | | #include "config.h" |
42 | | |
43 | | #include "gthread.h" |
44 | | #include "gthreadprivate.h" |
45 | | |
46 | | #include <string.h> |
47 | | |
48 | | #ifdef G_OS_UNIX |
49 | | #include <unistd.h> |
50 | | |
51 | | #if defined(THREADS_POSIX) && defined(HAVE_PTHREAD_GETAFFINITY_NP) |
52 | | #include <pthread.h> |
53 | | #endif |
54 | | #endif /* G_OS_UNIX */ |
55 | | |
56 | | #ifndef G_OS_WIN32 |
57 | | #include <sys/time.h> |
58 | | #include <time.h> |
59 | | #else |
60 | | #include <windows.h> |
61 | | #endif /* G_OS_WIN32 */ |
62 | | |
63 | | #include "gslice.h" |
64 | | #include "gstrfuncs.h" |
65 | | #include "gtestutils.h" |
66 | | #include "glib_trace.h" |
67 | | #include "gtrace-private.h" |
68 | | |
69 | | /* G_LOCK Documentation {{{1 ---------------------------------------------- */ |
70 | | |
71 | | /** |
72 | | * G_LOCK_DEFINE: |
73 | | * @name: the name of the lock |
74 | | * |
75 | | * The `G_LOCK_` macros provide a convenient interface to #GMutex. |
76 | | * %G_LOCK_DEFINE defines a lock. It can appear in any place where |
77 | | * variable definitions may appear in programs, i.e. in the first block |
78 | | * of a function or outside of functions. The @name parameter will be |
79 | | * mangled to get the name of the #GMutex. This means that you |
80 | | * can use names of existing variables as the parameter - e.g. the name |
81 | | * of the variable you intend to protect with the lock. Look at our |
82 | | * give_me_next_number() example using the `G_LOCK` macros: |
83 | | * |
84 | | * Here is an example for using the `G_LOCK` convenience macros: |
85 | | * |
86 | | * |[<!-- language="C" --> |
87 | | * G_LOCK_DEFINE (current_number); |
88 | | * |
89 | | * int |
90 | | * give_me_next_number (void) |
91 | | * { |
92 | | * static int current_number = 0; |
93 | | * int ret_val; |
94 | | * |
95 | | * G_LOCK (current_number); |
96 | | * ret_val = current_number = calc_next_number (current_number); |
97 | | * G_UNLOCK (current_number); |
98 | | * |
99 | | * return ret_val; |
100 | | * } |
101 | | * ]| |
102 | | */ |
103 | | |
104 | | /** |
105 | | * G_LOCK_DEFINE_STATIC: |
106 | | * @name: the name of the lock |
107 | | * |
108 | | * This works like %G_LOCK_DEFINE, but it creates a static object. |
109 | | */ |
110 | | |
111 | | /** |
112 | | * G_LOCK_EXTERN: |
113 | | * @name: the name of the lock |
114 | | * |
115 | | * This declares a lock, that is defined with %G_LOCK_DEFINE in another |
116 | | * module. |
117 | | */ |
118 | | |
119 | | /** |
120 | | * G_LOCK: |
121 | | * @name: the name of the lock |
122 | | * |
123 | | * Works like g_mutex_lock(), but for a lock defined with |
124 | | * %G_LOCK_DEFINE. |
125 | | */ |
126 | | |
127 | | /** |
128 | | * G_TRYLOCK: |
129 | | * @name: the name of the lock |
130 | | * |
131 | | * Works like g_mutex_trylock(), but for a lock defined with |
132 | | * %G_LOCK_DEFINE. |
133 | | * |
134 | | * Returns: %TRUE, if the lock could be locked. |
135 | | */ |
136 | | |
137 | | /** |
138 | | * G_UNLOCK: |
139 | | * @name: the name of the lock |
140 | | * |
141 | | * Works like g_mutex_unlock(), but for a lock defined with |
142 | | * %G_LOCK_DEFINE. |
143 | | */ |
144 | | |
145 | | /** |
146 | | * G_AUTO_LOCK: |
147 | | * @name: the name of the lock |
148 | | * |
149 | | * Works like [func@GLib.MUTEX_AUTO_LOCK], but for a lock defined with |
150 | | * [func@GLib.LOCK_DEFINE]. |
151 | | * |
152 | | * This feature is only supported on GCC and clang. This macro is not defined on |
153 | | * other compilers and should not be used in programs that are intended to be |
154 | | * portable to those compilers. |
155 | | * |
156 | | * Since: 2.80 |
157 | | */ |
158 | | |
159 | | /* GMutex Documentation {{{1 ------------------------------------------ */ |
160 | | |
161 | | /** |
162 | | * GMutex: |
163 | | * |
164 | | * The #GMutex struct is an opaque data structure to represent a mutex |
165 | | * (mutual exclusion). It can be used to protect data against shared |
166 | | * access. |
167 | | * |
168 | | * Take for example the following function: |
169 | | * |[<!-- language="C" --> |
170 | | * int |
171 | | * give_me_next_number (void) |
172 | | * { |
173 | | * static int current_number = 0; |
174 | | * |
175 | | * // now do a very complicated calculation to calculate the new |
176 | | * // number, this might for example be a random number generator |
177 | | * current_number = calc_next_number (current_number); |
178 | | * |
179 | | * return current_number; |
180 | | * } |
181 | | * ]| |
182 | | * It is easy to see that this won't work in a multi-threaded |
183 | | * application. There current_number must be protected against shared |
184 | | * access. A #GMutex can be used as a solution to this problem: |
185 | | * |[<!-- language="C" --> |
186 | | * int |
187 | | * give_me_next_number (void) |
188 | | * { |
189 | | * static GMutex mutex; |
190 | | * static int current_number = 0; |
191 | | * int ret_val; |
192 | | * |
193 | | * g_mutex_lock (&mutex); |
194 | | * ret_val = current_number = calc_next_number (current_number); |
195 | | * g_mutex_unlock (&mutex); |
196 | | * |
197 | | * return ret_val; |
198 | | * } |
199 | | * ]| |
200 | | * Notice that the #GMutex is not initialised to any particular value. |
201 | | * Its placement in static storage ensures that it will be initialised |
202 | | * to all-zeros, which is appropriate. |
203 | | * |
204 | | * If a #GMutex is placed in other contexts (eg: embedded in a struct) |
205 | | * then it must be explicitly initialised using g_mutex_init(). |
206 | | * |
207 | | * A #GMutex should only be accessed via g_mutex_ functions. |
208 | | */ |
209 | | |
210 | | /* GRecMutex Documentation {{{1 -------------------------------------- */ |
211 | | |
212 | | /** |
213 | | * GRecMutex: |
214 | | * |
215 | | * The GRecMutex struct is an opaque data structure to represent a |
216 | | * recursive mutex. It is similar to a #GMutex with the difference |
217 | | * that it is possible to lock a GRecMutex multiple times in the same |
218 | | * thread without deadlock. When doing so, care has to be taken to |
219 | | * unlock the recursive mutex as often as it has been locked. |
220 | | * |
221 | | * If a #GRecMutex is allocated in static storage then it can be used |
222 | | * without initialisation. Otherwise, you should call |
223 | | * g_rec_mutex_init() on it and g_rec_mutex_clear() when done. |
224 | | * |
225 | | * A GRecMutex should only be accessed with the |
226 | | * g_rec_mutex_ functions. |
227 | | * |
228 | | * Since: 2.32 |
229 | | */ |
230 | | |
231 | | /* GRWLock Documentation {{{1 ---------------------------------------- */ |
232 | | |
233 | | /** |
234 | | * GRWLock: |
235 | | * |
236 | | * The GRWLock struct is an opaque data structure to represent a |
237 | | * reader-writer lock. It is similar to a #GMutex in that it allows |
238 | | * multiple threads to coordinate access to a shared resource. |
239 | | * |
240 | | * The difference to a mutex is that a reader-writer lock discriminates |
241 | | * between read-only ('reader') and full ('writer') access. While only |
242 | | * one thread at a time is allowed write access (by holding the 'writer' |
243 | | * lock via g_rw_lock_writer_lock()), multiple threads can gain |
244 | | * simultaneous read-only access (by holding the 'reader' lock via |
245 | | * g_rw_lock_reader_lock()). |
246 | | * |
247 | | * It is unspecified whether readers or writers have priority in acquiring the |
248 | | * lock when a reader already holds the lock and a writer is queued to acquire |
249 | | * it. |
250 | | * |
251 | | * Here is an example for an array with access functions: |
252 | | * |[<!-- language="C" --> |
253 | | * GRWLock lock; |
254 | | * GPtrArray *array; |
255 | | * |
256 | | * gpointer |
257 | | * my_array_get (guint index) |
258 | | * { |
259 | | * gpointer retval = NULL; |
260 | | * |
261 | | * if (!array) |
262 | | * return NULL; |
263 | | * |
264 | | * g_rw_lock_reader_lock (&lock); |
265 | | * if (index < array->len) |
266 | | * retval = g_ptr_array_index (array, index); |
267 | | * g_rw_lock_reader_unlock (&lock); |
268 | | * |
269 | | * return retval; |
270 | | * } |
271 | | * |
272 | | * void |
273 | | * my_array_set (guint index, gpointer data) |
274 | | * { |
275 | | * g_rw_lock_writer_lock (&lock); |
276 | | * |
277 | | * if (!array) |
278 | | * array = g_ptr_array_new (); |
279 | | * |
280 | | * if (index >= array->len) |
281 | | * g_ptr_array_set_size (array, index+1); |
282 | | * g_ptr_array_index (array, index) = data; |
283 | | * |
284 | | * g_rw_lock_writer_unlock (&lock); |
285 | | * } |
286 | | * ]| |
287 | | * This example shows an array which can be accessed by many readers |
288 | | * (the my_array_get() function) simultaneously, whereas the writers |
289 | | * (the my_array_set() function) will only be allowed one at a time |
290 | | * and only if no readers currently access the array. This is because |
291 | | * of the potentially dangerous resizing of the array. Using these |
292 | | * functions is fully multi-thread safe now. |
293 | | * |
294 | | * If a #GRWLock is allocated in static storage then it can be used |
295 | | * without initialisation. Otherwise, you should call |
296 | | * g_rw_lock_init() on it and g_rw_lock_clear() when done. |
297 | | * |
298 | | * A GRWLock should only be accessed with the g_rw_lock_ functions. |
299 | | * |
300 | | * Since: 2.32 |
301 | | */ |
302 | | |
303 | | /* GCond Documentation {{{1 ------------------------------------------ */ |
304 | | |
305 | | /** |
306 | | * GCond: |
307 | | * |
308 | | * The #GCond struct is an opaque data structure that represents a |
309 | | * condition. Threads can block on a #GCond if they find a certain |
310 | | * condition to be false. If other threads change the state of this |
311 | | * condition they signal the #GCond, and that causes the waiting |
312 | | * threads to be woken up. |
313 | | * |
314 | | * Consider the following example of a shared variable. One or more |
315 | | * threads can wait for data to be published to the variable and when |
316 | | * another thread publishes the data, it can signal one of the waiting |
317 | | * threads to wake up to collect the data. |
318 | | * |
319 | | * Here is an example for using GCond to block a thread until a condition |
320 | | * is satisfied: |
321 | | * |[<!-- language="C" --> |
322 | | * gpointer current_data = NULL; |
323 | | * GMutex data_mutex; |
324 | | * GCond data_cond; |
325 | | * |
326 | | * void |
327 | | * push_data (gpointer data) |
328 | | * { |
329 | | * g_mutex_lock (&data_mutex); |
330 | | * current_data = data; |
331 | | * g_cond_signal (&data_cond); |
332 | | * g_mutex_unlock (&data_mutex); |
333 | | * } |
334 | | * |
335 | | * gpointer |
336 | | * pop_data (void) |
337 | | * { |
338 | | * gpointer data; |
339 | | * |
340 | | * g_mutex_lock (&data_mutex); |
341 | | * while (!current_data) |
342 | | * g_cond_wait (&data_cond, &data_mutex); |
343 | | * data = current_data; |
344 | | * current_data = NULL; |
345 | | * g_mutex_unlock (&data_mutex); |
346 | | * |
347 | | * return data; |
348 | | * } |
349 | | * ]| |
350 | | * Whenever a thread calls pop_data() now, it will wait until |
351 | | * current_data is non-%NULL, i.e. until some other thread |
352 | | * has called push_data(). |
353 | | * |
354 | | * The example shows that use of a condition variable must always be |
355 | | * paired with a mutex. Without the use of a mutex, there would be a |
356 | | * race between the check of @current_data by the while loop in |
357 | | * pop_data() and waiting. Specifically, another thread could set |
358 | | * @current_data after the check, and signal the cond (with nobody |
359 | | * waiting on it) before the first thread goes to sleep. #GCond is |
360 | | * specifically useful for its ability to release the mutex and go |
361 | | * to sleep atomically. |
362 | | * |
363 | | * It is also important to use the g_cond_wait() and g_cond_wait_until() |
364 | | * functions only inside a loop which checks for the condition to be |
365 | | * true. See g_cond_wait() for an explanation of why the condition may |
366 | | * not be true even after it returns. |
367 | | * |
368 | | * If a #GCond is allocated in static storage then it can be used |
369 | | * without initialisation. Otherwise, you should call g_cond_init() |
370 | | * on it and g_cond_clear() when done. |
371 | | * |
372 | | * A #GCond should only be accessed via the g_cond_ functions. |
373 | | */ |
374 | | |
375 | | /* GThread Documentation {{{1 ---------------------------------------- */ |
376 | | |
377 | | /** |
378 | | * GThread: |
379 | | * |
380 | | * The #GThread struct represents a running thread. This struct |
381 | | * is returned by g_thread_new() or g_thread_try_new(). You can |
382 | | * obtain the #GThread struct representing the current thread by |
383 | | * calling g_thread_self(). |
384 | | * |
385 | | * GThread is refcounted, see g_thread_ref() and g_thread_unref(). |
386 | | * The thread represented by it holds a reference while it is running, |
387 | | * and g_thread_join() consumes the reference that it is given, so |
388 | | * it is normally not necessary to manage GThread references |
389 | | * explicitly. |
390 | | * |
391 | | * The structure is opaque -- none of its fields may be directly |
392 | | * accessed. |
393 | | */ |
394 | | |
395 | | /** |
396 | | * GThreadFunc: |
397 | | * @data: data passed to the thread |
398 | | * |
399 | | * Specifies the type of the @func functions passed to g_thread_new() |
400 | | * or g_thread_try_new(). |
401 | | * |
402 | | * Returns: the return value of the thread |
403 | | */ |
404 | | |
405 | | /** |
406 | | * g_thread_supported: |
407 | | * |
408 | | * This macro returns %TRUE if the thread system is initialized, |
409 | | * and %FALSE if it is not. |
410 | | * |
411 | | * For language bindings, g_thread_get_initialized() provides |
412 | | * the same functionality as a function. |
413 | | * |
414 | | * Returns: %TRUE, if the thread system is initialized |
415 | | */ |
416 | | |
417 | | /* GThreadError {{{1 ------------------------------------------------------- */ |
418 | | /** |
419 | | * GThreadError: |
420 | | * @G_THREAD_ERROR_AGAIN: a thread couldn't be created due to resource |
421 | | * shortage. Try again later. |
422 | | * |
423 | | * Possible errors of thread related functions. |
424 | | **/ |
425 | | |
426 | | /** |
427 | | * G_THREAD_ERROR: |
428 | | * |
429 | | * The error domain of the GLib thread subsystem. |
430 | | **/ |
431 | | G_DEFINE_QUARK (g_thread_error, g_thread_error) |
432 | | |
433 | | /* Local Data {{{1 -------------------------------------------------------- */ |
434 | | |
435 | | static GMutex g_once_mutex; |
436 | | static GCond g_once_cond; |
437 | | static GSList *g_once_init_list = NULL; |
438 | | |
439 | | static guint g_thread_n_created_counter = 0; /* (atomic) */ |
440 | | |
441 | | static void g_thread_cleanup (gpointer data); |
442 | | static GPrivate g_thread_specific_private = G_PRIVATE_INIT (g_thread_cleanup); |
443 | | |
444 | | /* |
445 | | * g_private_set_alloc0: |
446 | | * @key: a #GPrivate |
447 | | * @size: size of the allocation, in bytes |
448 | | * |
449 | | * Sets the thread local variable @key to have a newly-allocated and zero-filled |
450 | | * value of given @size, and returns a pointer to that memory. Allocations made |
451 | | * using this API will be suppressed in valgrind: it is intended to be used for |
452 | | * one-time allocations which are known to be leaked, such as those for |
453 | | * per-thread initialisation data. Otherwise, this function behaves the same as |
454 | | * g_private_set(). |
455 | | * |
456 | | * Returns: (transfer full): new thread-local heap allocation of size @size |
457 | | * Since: 2.60 |
458 | | */ |
459 | | /*< private >*/ |
460 | | gpointer |
461 | | g_private_set_alloc0 (GPrivate *key, |
462 | | gsize size) |
463 | 1 | { |
464 | 1 | gpointer allocated = g_malloc0 (size); |
465 | | |
466 | 1 | g_private_set (key, allocated); |
467 | | |
468 | 1 | return g_steal_pointer (&allocated); |
469 | 1 | } |
470 | | |
471 | | /* GOnce {{{1 ------------------------------------------------------------- */ |
472 | | |
473 | | /** |
474 | | * GOnce: |
475 | | * @status: the status of the #GOnce |
476 | | * @retval: the value returned by the call to the function, if @status |
477 | | * is %G_ONCE_STATUS_READY |
478 | | * |
479 | | * A #GOnce struct controls a one-time initialization function. Any |
480 | | * one-time initialization function must have its own unique #GOnce |
481 | | * struct. |
482 | | * |
483 | | * Since: 2.4 |
484 | | */ |
485 | | |
486 | | /** |
487 | | * G_ONCE_INIT: |
488 | | * |
489 | | * A #GOnce must be initialized with this macro before it can be used. |
490 | | * |
491 | | * |[<!-- language="C" --> |
492 | | * GOnce my_once = G_ONCE_INIT; |
493 | | * ]| |
494 | | * |
495 | | * Since: 2.4 |
496 | | */ |
497 | | |
498 | | /** |
499 | | * GOnceStatus: |
500 | | * @G_ONCE_STATUS_NOTCALLED: the function has not been called yet. |
501 | | * @G_ONCE_STATUS_PROGRESS: the function call is currently in progress. |
502 | | * @G_ONCE_STATUS_READY: the function has been called. |
503 | | * |
504 | | * The possible statuses of a one-time initialization function |
505 | | * controlled by a #GOnce struct. |
506 | | * |
507 | | * Since: 2.4 |
508 | | */ |
509 | | |
510 | | /** |
511 | | * g_once: |
512 | | * @once: a #GOnce structure |
513 | | * @func: the #GThreadFunc function associated to @once. This function |
514 | | * is called only once, regardless of the number of times it and |
515 | | * its associated #GOnce struct are passed to g_once(). |
516 | | * @arg: data to be passed to @func |
517 | | * |
518 | | * The first call to this routine by a process with a given #GOnce |
519 | | * struct calls @func with the given argument. Thereafter, subsequent |
520 | | * calls to g_once() with the same #GOnce struct do not call @func |
521 | | * again, but return the stored result of the first call. On return |
522 | | * from g_once(), the status of @once will be %G_ONCE_STATUS_READY. |
523 | | * |
524 | | * For example, a mutex or a thread-specific data key must be created |
525 | | * exactly once. In a threaded environment, calling g_once() ensures |
526 | | * that the initialization is serialized across multiple threads. |
527 | | * |
528 | | * Calling g_once() recursively on the same #GOnce struct in |
529 | | * @func will lead to a deadlock. |
530 | | * |
531 | | * |[<!-- language="C" --> |
532 | | * gpointer |
533 | | * get_debug_flags (void) |
534 | | * { |
535 | | * static GOnce my_once = G_ONCE_INIT; |
536 | | * |
537 | | * g_once (&my_once, parse_debug_flags, NULL); |
538 | | * |
539 | | * return my_once.retval; |
540 | | * } |
541 | | * ]| |
542 | | * |
543 | | * Since: 2.4 |
544 | | */ |
545 | | gpointer |
546 | | g_once_impl (GOnce *once, |
547 | | GThreadFunc func, |
548 | | gpointer arg) |
549 | 0 | { |
550 | 0 | g_mutex_lock (&g_once_mutex); |
551 | |
|
552 | 0 | while (once->status == G_ONCE_STATUS_PROGRESS) |
553 | 0 | g_cond_wait (&g_once_cond, &g_once_mutex); |
554 | |
|
555 | 0 | if (once->status != G_ONCE_STATUS_READY) |
556 | 0 | { |
557 | 0 | gpointer retval; |
558 | |
|
559 | 0 | once->status = G_ONCE_STATUS_PROGRESS; |
560 | 0 | g_mutex_unlock (&g_once_mutex); |
561 | |
|
562 | 0 | retval = func (arg); |
563 | |
|
564 | 0 | g_mutex_lock (&g_once_mutex); |
565 | | /* We prefer the new C11-style atomic extension of GCC if available. If not, |
566 | | * fall back to always locking. */ |
567 | 0 | #if defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) && defined(__ATOMIC_SEQ_CST) |
568 | | /* Only the second store needs to be atomic, as the two writes are related |
569 | | * by a happens-before relationship here. */ |
570 | 0 | once->retval = retval; |
571 | 0 | __atomic_store_n (&once->status, G_ONCE_STATUS_READY, __ATOMIC_RELEASE); |
572 | | #else |
573 | | once->retval = retval; |
574 | | once->status = G_ONCE_STATUS_READY; |
575 | | #endif |
576 | 0 | g_cond_broadcast (&g_once_cond); |
577 | 0 | } |
578 | |
|
579 | 0 | g_mutex_unlock (&g_once_mutex); |
580 | |
|
581 | 0 | return once->retval; |
582 | 0 | } |
583 | | |
584 | | /** |
585 | | * g_once_init_enter: |
586 | | * @location: (inout) (not optional): location of a static initializable variable |
587 | | * containing 0 |
588 | | * |
589 | | * Function to be called when starting a critical initialization |
590 | | * section. The argument @location must point to a static |
591 | | * 0-initialized variable that will be set to a value other than 0 at |
592 | | * the end of the initialization section. In combination with |
593 | | * g_once_init_leave() and the unique address @value_location, it can |
594 | | * be ensured that an initialization section will be executed only once |
595 | | * during a program's life time, and that concurrent threads are |
596 | | * blocked until initialization completed. To be used in constructs |
597 | | * like this: |
598 | | * |
599 | | * |[<!-- language="C" --> |
600 | | * static gsize initialization_value = 0; |
601 | | * |
602 | | * if (g_once_init_enter (&initialization_value)) |
603 | | * { |
604 | | * gsize setup_value = 42; // initialization code here |
605 | | * |
606 | | * g_once_init_leave (&initialization_value, setup_value); |
607 | | * } |
608 | | * |
609 | | * // use initialization_value here |
610 | | * ]| |
611 | | * |
612 | | * While @location has a `volatile` qualifier, this is a historical artifact and |
613 | | * the pointer passed to it should not be `volatile`. |
614 | | * |
615 | | * Returns: %TRUE if the initialization section should be entered, |
616 | | * %FALSE and blocks otherwise |
617 | | * |
618 | | * Since: 2.14 |
619 | | */ |
620 | | gboolean |
621 | | (g_once_init_enter) (volatile void *location) |
622 | 48 | { |
623 | 48 | gsize *value_location = (gsize *) location; |
624 | 48 | gboolean need_init = FALSE; |
625 | 48 | g_mutex_lock (&g_once_mutex); |
626 | 48 | if (g_atomic_pointer_get (value_location) == 0) |
627 | 48 | { |
628 | 48 | if (!g_slist_find (g_once_init_list, (void*) value_location)) |
629 | 48 | { |
630 | 48 | need_init = TRUE; |
631 | 48 | g_once_init_list = g_slist_prepend (g_once_init_list, (void*) value_location); |
632 | 48 | } |
633 | 0 | else |
634 | 0 | do |
635 | 0 | g_cond_wait (&g_once_cond, &g_once_mutex); |
636 | 0 | while (g_slist_find (g_once_init_list, (void*) value_location)); |
637 | 48 | } |
638 | 48 | g_mutex_unlock (&g_once_mutex); |
639 | 48 | return need_init; |
640 | 48 | } |
641 | | |
642 | | /** |
643 | | * g_once_init_enter_pointer: |
644 | | * @location: (not nullable): location of a static initializable variable |
645 | | * containing `NULL` |
646 | | * |
647 | | * This functions behaves in the same way as g_once_init_enter(), but can |
648 | | * can be used to initialize pointers (or #guintptr) instead of #gsize. |
649 | | * |
650 | | * |[<!-- language="C" --> |
651 | | * static MyStruct *interesting_struct = NULL; |
652 | | * |
653 | | * if (g_once_init_enter_pointer (&interesting_struct)) |
654 | | * { |
655 | | * MyStruct *setup_value = allocate_my_struct (); // initialization code here |
656 | | * |
657 | | * g_once_init_leave_pointer (&interesting_struct, g_steal_pointer (&setup_value)); |
658 | | * } |
659 | | * |
660 | | * // use interesting_struct here |
661 | | * ]| |
662 | | * |
663 | | * Returns: %TRUE if the initialization section should be entered, |
664 | | * %FALSE and blocks otherwise |
665 | | * |
666 | | * Since: 2.80 |
667 | | */ |
668 | | gboolean |
669 | | (g_once_init_enter_pointer) (gpointer location) |
670 | 41 | { |
671 | 41 | gpointer *value_location = (gpointer *) location; |
672 | 41 | gboolean need_init = FALSE; |
673 | 41 | g_mutex_lock (&g_once_mutex); |
674 | 41 | if (g_atomic_pointer_get (value_location) == 0) |
675 | 41 | { |
676 | 41 | if (!g_slist_find (g_once_init_list, (void *) value_location)) |
677 | 41 | { |
678 | 41 | need_init = TRUE; |
679 | 41 | g_once_init_list = g_slist_prepend (g_once_init_list, (void *) value_location); |
680 | 41 | } |
681 | 0 | else |
682 | 0 | do |
683 | 0 | g_cond_wait (&g_once_cond, &g_once_mutex); |
684 | 0 | while (g_slist_find (g_once_init_list, (void *) value_location)); |
685 | 41 | } |
686 | 41 | g_mutex_unlock (&g_once_mutex); |
687 | 41 | return need_init; |
688 | 41 | } |
689 | | |
690 | | /** |
691 | | * g_once_init_leave: |
692 | | * @location: (inout) (not optional): location of a static initializable variable |
693 | | * containing 0 |
694 | | * @result: new non-0 value for *@value_location |
695 | | * |
696 | | * Counterpart to g_once_init_enter(). Expects a location of a static |
697 | | * 0-initialized initialization variable, and an initialization value |
698 | | * other than 0. Sets the variable to the initialization value, and |
699 | | * releases concurrent threads blocking in g_once_init_enter() on this |
700 | | * initialization variable. |
701 | | * |
702 | | * While @location has a `volatile` qualifier, this is a historical artifact and |
703 | | * the pointer passed to it should not be `volatile`. |
704 | | * |
705 | | * Since: 2.14 |
706 | | */ |
707 | | void |
708 | | (g_once_init_leave) (volatile void *location, |
709 | | gsize result) |
710 | 48 | { |
711 | 48 | gsize *value_location = (gsize *) location; |
712 | 48 | gsize old_value; |
713 | | |
714 | 48 | g_return_if_fail (result != 0); |
715 | | |
716 | 48 | old_value = (gsize) g_atomic_pointer_exchange (value_location, result); |
717 | 48 | g_return_if_fail (old_value == 0); |
718 | | |
719 | 48 | g_mutex_lock (&g_once_mutex); |
720 | 48 | g_return_if_fail (g_once_init_list != NULL); |
721 | 48 | g_once_init_list = g_slist_remove (g_once_init_list, (void*) value_location); |
722 | 48 | g_cond_broadcast (&g_once_cond); |
723 | 48 | g_mutex_unlock (&g_once_mutex); |
724 | 48 | } |
725 | | |
726 | | /** |
727 | | * g_once_init_leave_pointer: |
728 | | * @location: (not nullable): location of a static initializable variable |
729 | | * containing `NULL` |
730 | | * @result: new non-`NULL` value for `*location` |
731 | | * |
732 | | * Counterpart to g_once_init_enter_pointer(). Expects a location of a static |
733 | | * `NULL`-initialized initialization variable, and an initialization value |
734 | | * other than `NULL`. Sets the variable to the initialization value, and |
735 | | * releases concurrent threads blocking in g_once_init_enter_pointer() on this |
736 | | * initialization variable. |
737 | | * |
738 | | * This functions behaves in the same way as g_once_init_leave(), but |
739 | | * can be used to initialize pointers (or #guintptr) instead of #gsize. |
740 | | * |
741 | | * Since: 2.80 |
742 | | */ |
743 | | void |
744 | | (g_once_init_leave_pointer) (gpointer location, |
745 | | gpointer result) |
746 | 41 | { |
747 | 41 | gpointer *value_location = (gpointer *) location; |
748 | 41 | gpointer old_value; |
749 | | |
750 | 41 | g_return_if_fail (result != 0); |
751 | | |
752 | 41 | old_value = g_atomic_pointer_exchange (value_location, result); |
753 | 41 | g_return_if_fail (old_value == 0); |
754 | | |
755 | 41 | g_mutex_lock (&g_once_mutex); |
756 | 41 | g_return_if_fail (g_once_init_list != NULL); |
757 | 41 | g_once_init_list = g_slist_remove (g_once_init_list, (void *) value_location); |
758 | 41 | g_cond_broadcast (&g_once_cond); |
759 | 41 | g_mutex_unlock (&g_once_mutex); |
760 | 41 | } |
761 | | |
762 | | /* GThread {{{1 -------------------------------------------------------- */ |
763 | | |
764 | | /** |
765 | | * g_thread_ref: |
766 | | * @thread: a #GThread |
767 | | * |
768 | | * Increase the reference count on @thread. |
769 | | * |
770 | | * Returns: (transfer full): a new reference to @thread |
771 | | * |
772 | | * Since: 2.32 |
773 | | */ |
774 | | GThread * |
775 | | g_thread_ref (GThread *thread) |
776 | 0 | { |
777 | 0 | GRealThread *real = (GRealThread *) thread; |
778 | |
|
779 | 0 | g_atomic_int_inc (&real->ref_count); |
780 | |
|
781 | 0 | return thread; |
782 | 0 | } |
783 | | |
784 | | /** |
785 | | * g_thread_unref: |
786 | | * @thread: (transfer full): a #GThread |
787 | | * |
788 | | * Decrease the reference count on @thread, possibly freeing all |
789 | | * resources associated with it. |
790 | | * |
791 | | * Note that each thread holds a reference to its #GThread while |
792 | | * it is running, so it is safe to drop your own reference to it |
793 | | * if you don't need it anymore. |
794 | | * |
795 | | * Since: 2.32 |
796 | | */ |
797 | | void |
798 | | g_thread_unref (GThread *thread) |
799 | 0 | { |
800 | 0 | GRealThread *real = (GRealThread *) thread; |
801 | |
|
802 | 0 | if (g_atomic_int_dec_and_test (&real->ref_count)) |
803 | 0 | { |
804 | 0 | if (real->ours) |
805 | 0 | g_system_thread_free (real); |
806 | 0 | else |
807 | 0 | g_slice_free (GRealThread, real); |
808 | 0 | } |
809 | 0 | } |
810 | | |
811 | | static void |
812 | | g_thread_cleanup (gpointer data) |
813 | 0 | { |
814 | 0 | g_thread_unref (data); |
815 | 0 | } |
816 | | |
817 | | gpointer |
818 | | g_thread_proxy (gpointer data) |
819 | 0 | { |
820 | 0 | GRealThread* thread = data; |
821 | |
|
822 | 0 | g_assert (data); |
823 | 0 | g_private_set (&g_thread_specific_private, data); |
824 | |
|
825 | 0 | TRACE (GLIB_THREAD_SPAWNED (thread->thread.func, thread->thread.data, |
826 | 0 | thread->name)); |
827 | |
|
828 | 0 | if (thread->name) |
829 | 0 | { |
830 | 0 | g_system_thread_set_name (thread->name); |
831 | 0 | g_free (thread->name); |
832 | 0 | thread->name = NULL; |
833 | 0 | } |
834 | |
|
835 | 0 | thread->retval = thread->thread.func (thread->thread.data); |
836 | |
|
837 | 0 | return NULL; |
838 | 0 | } |
839 | | |
840 | | guint |
841 | | g_thread_n_created (void) |
842 | 0 | { |
843 | 0 | return g_atomic_int_get (&g_thread_n_created_counter); |
844 | 0 | } |
845 | | |
846 | | /** |
847 | | * g_thread_new: |
848 | | * @name: (nullable): an (optional) name for the new thread |
849 | | * @func: (closure data) (scope async): a function to execute in the new thread |
850 | | * @data: (nullable): an argument to supply to the new thread |
851 | | * |
852 | | * This function creates a new thread. The new thread starts by invoking |
853 | | * @func with the argument data. The thread will run until @func returns |
854 | | * or until g_thread_exit() is called from the new thread. The return value |
855 | | * of @func becomes the return value of the thread, which can be obtained |
856 | | * with g_thread_join(). |
857 | | * |
858 | | * The @name can be useful for discriminating threads in a debugger. |
859 | | * It is not used for other purposes and does not have to be unique. |
860 | | * Some systems restrict the length of @name to 16 bytes. |
861 | | * |
862 | | * If the thread can not be created the program aborts. See |
863 | | * g_thread_try_new() if you want to attempt to deal with failures. |
864 | | * |
865 | | * If you are using threads to offload (potentially many) short-lived tasks, |
866 | | * #GThreadPool may be more appropriate than manually spawning and tracking |
867 | | * multiple #GThreads. |
868 | | * |
869 | | * To free the struct returned by this function, use g_thread_unref(). |
870 | | * Note that g_thread_join() implicitly unrefs the #GThread as well. |
871 | | * |
872 | | * New threads by default inherit their scheduler policy (POSIX) or thread |
873 | | * priority (Windows) of the thread creating the new thread. |
874 | | * |
875 | | * This behaviour changed in GLib 2.64: before threads on Windows were not |
876 | | * inheriting the thread priority but were spawned with the default priority. |
877 | | * Starting with GLib 2.64 the behaviour is now consistent between Windows and |
878 | | * POSIX and all threads inherit their parent thread's priority. |
879 | | * |
880 | | * Returns: (transfer full): the new #GThread |
881 | | * |
882 | | * Since: 2.32 |
883 | | */ |
884 | | GThread * |
885 | | g_thread_new (const gchar *name, |
886 | | GThreadFunc func, |
887 | | gpointer data) |
888 | 0 | { |
889 | 0 | GError *error = NULL; |
890 | 0 | GThread *thread; |
891 | |
|
892 | 0 | thread = g_thread_new_internal (name, g_thread_proxy, func, data, 0, &error); |
893 | |
|
894 | 0 | if G_UNLIKELY (thread == NULL) |
895 | 0 | g_error ("creating thread '%s': %s", name ? name : "", error->message); |
896 | |
|
897 | 0 | return thread; |
898 | 0 | } |
899 | | |
900 | | /** |
901 | | * g_thread_try_new: |
902 | | * @name: (nullable): an (optional) name for the new thread |
903 | | * @func: (closure data) (scope async): a function to execute in the new thread |
904 | | * @data: (nullable): an argument to supply to the new thread |
905 | | * @error: return location for error, or %NULL |
906 | | * |
907 | | * This function is the same as g_thread_new() except that |
908 | | * it allows for the possibility of failure. |
909 | | * |
910 | | * If a thread can not be created (due to resource limits), |
911 | | * @error is set and %NULL is returned. |
912 | | * |
913 | | * Returns: (transfer full): the new #GThread, or %NULL if an error occurred |
914 | | * |
915 | | * Since: 2.32 |
916 | | */ |
917 | | GThread * |
918 | | g_thread_try_new (const gchar *name, |
919 | | GThreadFunc func, |
920 | | gpointer data, |
921 | | GError **error) |
922 | 0 | { |
923 | 0 | return g_thread_new_internal (name, g_thread_proxy, func, data, 0, error); |
924 | 0 | } |
925 | | |
926 | | GThread * |
927 | | g_thread_new_internal (const gchar *name, |
928 | | GThreadFunc proxy, |
929 | | GThreadFunc func, |
930 | | gpointer data, |
931 | | gsize stack_size, |
932 | | GError **error) |
933 | 0 | { |
934 | 0 | g_return_val_if_fail (func != NULL, NULL); |
935 | | |
936 | 0 | g_atomic_int_inc (&g_thread_n_created_counter); |
937 | |
|
938 | 0 | g_trace_mark (G_TRACE_CURRENT_TIME, 0, "GLib", "GThread created", "%s", name ? name : "(unnamed)"); |
939 | 0 | return (GThread *) g_system_thread_new (proxy, stack_size, name, func, data, error); |
940 | 0 | } |
941 | | |
942 | | /** |
943 | | * g_thread_exit: |
944 | | * @retval: the return value of this thread |
945 | | * |
946 | | * Terminates the current thread. |
947 | | * |
948 | | * If another thread is waiting for us using g_thread_join() then the |
949 | | * waiting thread will be woken up and get @retval as the return value |
950 | | * of g_thread_join(). |
951 | | * |
952 | | * Calling g_thread_exit() with a parameter @retval is equivalent to |
953 | | * returning @retval from the function @func, as given to g_thread_new(). |
954 | | * |
955 | | * You must only call g_thread_exit() from a thread that you created |
956 | | * yourself with g_thread_new() or related APIs. You must not call |
957 | | * this function from a thread created with another threading library |
958 | | * or or from within a #GThreadPool. |
959 | | */ |
960 | | void |
961 | | g_thread_exit (gpointer retval) |
962 | 0 | { |
963 | 0 | GRealThread* real = (GRealThread*) g_thread_self (); |
964 | |
|
965 | 0 | if G_UNLIKELY (!real->ours) |
966 | 0 | g_error ("attempt to g_thread_exit() a thread not created by GLib"); |
967 | |
|
968 | 0 | real->retval = retval; |
969 | |
|
970 | 0 | g_system_thread_exit (); |
971 | 0 | } |
972 | | |
973 | | /** |
974 | | * g_thread_join: |
975 | | * @thread: (transfer full): a #GThread |
976 | | * |
977 | | * Waits until @thread finishes, i.e. the function @func, as |
978 | | * given to g_thread_new(), returns or g_thread_exit() is called. |
979 | | * If @thread has already terminated, then g_thread_join() |
980 | | * returns immediately. |
981 | | * |
982 | | * Any thread can wait for any other thread by calling g_thread_join(), |
983 | | * not just its 'creator'. Calling g_thread_join() from multiple threads |
984 | | * for the same @thread leads to undefined behaviour. |
985 | | * |
986 | | * The value returned by @func or given to g_thread_exit() is |
987 | | * returned by this function. |
988 | | * |
989 | | * g_thread_join() consumes the reference to the passed-in @thread. |
990 | | * This will usually cause the #GThread struct and associated resources |
991 | | * to be freed. Use g_thread_ref() to obtain an extra reference if you |
992 | | * want to keep the GThread alive beyond the g_thread_join() call. |
993 | | * |
994 | | * Returns: (transfer full): the return value of the thread |
995 | | */ |
996 | | gpointer |
997 | | g_thread_join (GThread *thread) |
998 | 0 | { |
999 | 0 | GRealThread *real = (GRealThread*) thread; |
1000 | 0 | gpointer retval; |
1001 | |
|
1002 | 0 | g_return_val_if_fail (thread, NULL); |
1003 | 0 | g_return_val_if_fail (real->ours, NULL); |
1004 | | |
1005 | 0 | g_system_thread_wait (real); |
1006 | |
|
1007 | 0 | retval = real->retval; |
1008 | | |
1009 | | /* Just to make sure, this isn't used any more */ |
1010 | 0 | thread->joinable = 0; |
1011 | |
|
1012 | 0 | g_thread_unref (thread); |
1013 | |
|
1014 | 0 | return retval; |
1015 | 0 | } |
1016 | | |
1017 | | /** |
1018 | | * g_thread_self: |
1019 | | * |
1020 | | * This function returns the #GThread corresponding to the |
1021 | | * current thread. Note that this function does not increase |
1022 | | * the reference count of the returned struct. |
1023 | | * |
1024 | | * This function will return a #GThread even for threads that |
1025 | | * were not created by GLib (i.e. those created by other threading |
1026 | | * APIs). This may be useful for thread identification purposes |
1027 | | * (i.e. comparisons) but you must not use GLib functions (such |
1028 | | * as g_thread_join()) on these threads. |
1029 | | * |
1030 | | * Returns: (transfer none): the #GThread representing the current thread |
1031 | | */ |
1032 | | GThread* |
1033 | | g_thread_self (void) |
1034 | 0 | { |
1035 | 0 | GRealThread* thread = g_private_get (&g_thread_specific_private); |
1036 | |
|
1037 | 0 | if (!thread) |
1038 | 0 | { |
1039 | | /* If no thread data is available, provide and set one. |
1040 | | * This can happen for the main thread and for threads |
1041 | | * that are not created by GLib. |
1042 | | */ |
1043 | 0 | thread = g_slice_new0 (GRealThread); |
1044 | 0 | thread->ref_count = 1; |
1045 | |
|
1046 | 0 | g_private_set (&g_thread_specific_private, thread); |
1047 | 0 | } |
1048 | |
|
1049 | 0 | return (GThread*) thread; |
1050 | 0 | } |
1051 | | |
1052 | | /** |
1053 | | * g_get_num_processors: |
1054 | | * |
1055 | | * Determine the approximate number of threads that the system will |
1056 | | * schedule simultaneously for this process. This is intended to be |
1057 | | * used as a parameter to g_thread_pool_new() for CPU bound tasks and |
1058 | | * similar cases. |
1059 | | * |
1060 | | * Returns: Number of schedulable threads, always greater than 0 |
1061 | | * |
1062 | | * Since: 2.36 |
1063 | | */ |
1064 | | guint |
1065 | | g_get_num_processors (void) |
1066 | 0 | { |
1067 | | #ifdef G_OS_WIN32 |
1068 | | unsigned int count; |
1069 | | SYSTEM_INFO sysinfo; |
1070 | | DWORD_PTR process_cpus; |
1071 | | DWORD_PTR system_cpus; |
1072 | | |
1073 | | /* This *never* fails, use it as fallback */ |
1074 | | GetNativeSystemInfo (&sysinfo); |
1075 | | count = (int) sysinfo.dwNumberOfProcessors; |
1076 | | |
1077 | | if (GetProcessAffinityMask (GetCurrentProcess (), |
1078 | | &process_cpus, &system_cpus)) |
1079 | | { |
1080 | | unsigned int af_count; |
1081 | | |
1082 | | for (af_count = 0; process_cpus != 0; process_cpus >>= 1) |
1083 | | if (process_cpus & 1) |
1084 | | af_count++; |
1085 | | |
1086 | | /* Prefer affinity-based result, if available */ |
1087 | | if (af_count > 0) |
1088 | | count = af_count; |
1089 | | } |
1090 | | |
1091 | | if (count > 0) |
1092 | | return count; |
1093 | | #elif defined(_SC_NPROCESSORS_ONLN) && defined(THREADS_POSIX) && defined(HAVE_PTHREAD_GETAFFINITY_NP) |
1094 | | { |
1095 | 0 | int idx; |
1096 | 0 | int ncores = MIN (sysconf (_SC_NPROCESSORS_ONLN), CPU_SETSIZE); |
1097 | 0 | cpu_set_t cpu_mask; |
1098 | 0 | CPU_ZERO (&cpu_mask); |
1099 | |
|
1100 | 0 | int af_count = 0; |
1101 | 0 | int err = pthread_getaffinity_np (pthread_self (), sizeof (cpu_mask), &cpu_mask); |
1102 | 0 | if (!err) |
1103 | 0 | for (idx = 0; idx < ncores && idx < CPU_SETSIZE; ++idx) |
1104 | 0 | af_count += CPU_ISSET (idx, &cpu_mask); |
1105 | |
|
1106 | 0 | int count = (af_count > 0) ? af_count : ncores; |
1107 | 0 | return count; |
1108 | 0 | } |
1109 | | #elif defined(_SC_NPROCESSORS_ONLN) |
1110 | | { |
1111 | | int count; |
1112 | | |
1113 | | count = sysconf (_SC_NPROCESSORS_ONLN); |
1114 | | if (count > 0) |
1115 | | return count; |
1116 | | } |
1117 | | #elif defined HW_NCPU |
1118 | | { |
1119 | | int mib[2], count = 0; |
1120 | | size_t len; |
1121 | | |
1122 | | mib[0] = CTL_HW; |
1123 | | mib[1] = HW_NCPU; |
1124 | | len = sizeof(count); |
1125 | | |
1126 | | if (sysctl (mib, 2, &count, &len, NULL, 0) == 0 && count > 0) |
1127 | | return count; |
1128 | | } |
1129 | | #endif |
1130 | | |
1131 | 0 | return 1; /* Fallback */ |
1132 | 0 | } |
1133 | | |
1134 | | /* Epilogue {{{1 */ |
1135 | | /* vim: set foldmethod=marker: */ |