/src/Python-3.8.3/Python/thread_pthread.h
Line | Count | Source (jump to first uncovered line) |
1 | | |
2 | | /* Posix threads interface */ |
3 | | |
4 | | #include <stdlib.h> |
5 | | #include <string.h> |
6 | | #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR) |
7 | | #define destructor xxdestructor |
8 | | #endif |
9 | | #include <pthread.h> |
10 | | #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR) |
11 | | #undef destructor |
12 | | #endif |
13 | | #include <signal.h> |
14 | | |
15 | | #if defined(__linux__) |
16 | | # include <sys/syscall.h> /* syscall(SYS_gettid) */ |
17 | | #elif defined(__FreeBSD__) |
18 | | # include <pthread_np.h> /* pthread_getthreadid_np() */ |
19 | | #elif defined(__OpenBSD__) |
20 | | # include <unistd.h> /* getthrid() */ |
21 | | #elif defined(_AIX) |
22 | | # include <sys/thread.h> /* thread_self() */ |
23 | | #elif defined(__NetBSD__) |
24 | | # include <lwp.h> /* _lwp_self() */ |
25 | | #endif |
26 | | |
27 | | /* The POSIX spec requires that use of pthread_attr_setstacksize |
28 | | be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */ |
29 | | #ifdef _POSIX_THREAD_ATTR_STACKSIZE |
30 | | #ifndef THREAD_STACK_SIZE |
31 | 0 | #define THREAD_STACK_SIZE 0 /* use default stack size */ |
32 | | #endif |
33 | | |
34 | | /* The default stack size for new threads on OSX and BSD is small enough that |
35 | | * we'll get hard crashes instead of 'maximum recursion depth exceeded' |
36 | | * exceptions. |
37 | | * |
38 | | * The default stack sizes below are the empirically determined minimal stack |
39 | | * sizes where a simple recursive function doesn't cause a hard crash. |
40 | | */ |
41 | | #if defined(__APPLE__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 |
42 | | #undef THREAD_STACK_SIZE |
43 | | /* Note: This matches the value of -Wl,-stack_size in configure.ac */ |
44 | | #define THREAD_STACK_SIZE 0x1000000 |
45 | | #endif |
46 | | #if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 |
47 | | #undef THREAD_STACK_SIZE |
48 | | #define THREAD_STACK_SIZE 0x400000 |
49 | | #endif |
50 | | #if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 |
51 | | #undef THREAD_STACK_SIZE |
52 | | #define THREAD_STACK_SIZE 0x200000 |
53 | | #endif |
54 | | /* for safety, ensure a viable minimum stacksize */ |
55 | 0 | #define THREAD_STACK_MIN 0x8000 /* 32 KiB */ |
56 | | #else /* !_POSIX_THREAD_ATTR_STACKSIZE */ |
57 | | #ifdef THREAD_STACK_SIZE |
58 | | #error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined" |
59 | | #endif |
60 | | #endif |
61 | | |
62 | | /* The POSIX spec says that implementations supporting the sem_* |
63 | | family of functions must indicate this by defining |
64 | | _POSIX_SEMAPHORES. */ |
65 | | #ifdef _POSIX_SEMAPHORES |
66 | | /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so |
67 | | we need to add 0 to make it work there as well. */ |
68 | | #if (_POSIX_SEMAPHORES+0) == -1 |
69 | | #define HAVE_BROKEN_POSIX_SEMAPHORES |
70 | | #else |
71 | | #include <semaphore.h> |
72 | | #include <errno.h> |
73 | | #endif |
74 | | #endif |
75 | | |
76 | | |
77 | | /* Whether or not to use semaphores directly rather than emulating them with |
78 | | * mutexes and condition variables: |
79 | | */ |
80 | | #if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \ |
81 | | defined(HAVE_SEM_TIMEDWAIT)) |
82 | | # define USE_SEMAPHORES |
83 | | #else |
84 | | # undef USE_SEMAPHORES |
85 | | #endif |
86 | | |
87 | | |
88 | | /* On platforms that don't use standard POSIX threads pthread_sigmask() |
89 | | * isn't present. DEC threads uses sigprocmask() instead as do most |
90 | | * other UNIX International compliant systems that don't have the full |
91 | | * pthread implementation. |
92 | | */ |
93 | | #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK) |
94 | | # define SET_THREAD_SIGMASK pthread_sigmask |
95 | | #else |
96 | | # define SET_THREAD_SIGMASK sigprocmask |
97 | | #endif |
98 | | |
99 | | |
100 | | /* We assume all modern POSIX systems have gettimeofday() */ |
101 | | #ifdef GETTIMEOFDAY_NO_TZ |
102 | | #define GETTIMEOFDAY(ptv) gettimeofday(ptv) |
103 | | #else |
104 | 0 | #define GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL) |
105 | | #endif |
106 | | |
107 | 0 | #define MICROSECONDS_TO_TIMESPEC(microseconds, ts) \ |
108 | 0 | do { \ |
109 | 0 | struct timeval tv; \ |
110 | 0 | GETTIMEOFDAY(&tv); \ |
111 | 0 | tv.tv_usec += microseconds % 1000000; \ |
112 | 0 | tv.tv_sec += microseconds / 1000000; \ |
113 | 0 | tv.tv_sec += tv.tv_usec / 1000000; \ |
114 | 0 | tv.tv_usec %= 1000000; \ |
115 | 0 | ts.tv_sec = tv.tv_sec; \ |
116 | 0 | ts.tv_nsec = tv.tv_usec * 1000; \ |
117 | 0 | } while(0) |
118 | | |
119 | | |
120 | | /* |
121 | | * pthread_cond support |
122 | | */ |
123 | | |
124 | | #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) |
125 | | // monotonic is supported statically. It doesn't mean it works on runtime. |
126 | | #define CONDATTR_MONOTONIC |
127 | | #endif |
128 | | |
129 | | // NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported. |
130 | | static pthread_condattr_t *condattr_monotonic = NULL; |
131 | | |
132 | | static void |
133 | | init_condattr() |
134 | 14 | { |
135 | 14 | #ifdef CONDATTR_MONOTONIC |
136 | 14 | static pthread_condattr_t ca; |
137 | 14 | pthread_condattr_init(&ca); |
138 | 14 | if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) { |
139 | 14 | condattr_monotonic = &ca; // Use monotonic clock |
140 | 14 | } |
141 | 14 | #endif |
142 | 14 | } |
143 | | |
144 | | int |
145 | | _PyThread_cond_init(PyCOND_T *cond) |
146 | 28 | { |
147 | 28 | return pthread_cond_init(cond, condattr_monotonic); |
148 | 28 | } |
149 | | |
150 | | void |
151 | | _PyThread_cond_after(long long us, struct timespec *abs) |
152 | 0 | { |
153 | 0 | #ifdef CONDATTR_MONOTONIC |
154 | 0 | if (condattr_monotonic) { |
155 | 0 | clock_gettime(CLOCK_MONOTONIC, abs); |
156 | 0 | abs->tv_sec += us / 1000000; |
157 | 0 | abs->tv_nsec += (us % 1000000) * 1000; |
158 | 0 | abs->tv_sec += abs->tv_nsec / 1000000000; |
159 | 0 | abs->tv_nsec %= 1000000000; |
160 | 0 | return; |
161 | 0 | } |
162 | 0 | #endif |
163 | | |
164 | 0 | struct timespec ts; |
165 | 0 | MICROSECONDS_TO_TIMESPEC(us, ts); |
166 | 0 | *abs = ts; |
167 | 0 | } |
168 | | |
169 | | |
170 | | /* A pthread mutex isn't sufficient to model the Python lock type |
171 | | * because, according to Draft 5 of the docs (P1003.4a/D5), both of the |
172 | | * following are undefined: |
173 | | * -> a thread tries to lock a mutex it already has locked |
174 | | * -> a thread tries to unlock a mutex locked by a different thread |
175 | | * pthread mutexes are designed for serializing threads over short pieces |
176 | | * of code anyway, so wouldn't be an appropriate implementation of |
177 | | * Python's locks regardless. |
178 | | * |
179 | | * The pthread_lock struct implements a Python lock as a "locked?" bit |
180 | | * and a <condition, mutex> pair. In general, if the bit can be acquired |
181 | | * instantly, it is, else the pair is used to block the thread until the |
182 | | * bit is cleared. 9 May 1994 tim@ksr.com |
183 | | */ |
184 | | |
185 | | typedef struct { |
186 | | char locked; /* 0=unlocked, 1=locked */ |
187 | | /* a <cond, mutex> pair to handle an acquire of a locked lock */ |
188 | | pthread_cond_t lock_released; |
189 | | pthread_mutex_t mut; |
190 | | } pthread_lock; |
191 | | |
192 | 8.73k | #define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; } |
193 | | #define CHECK_STATUS_PTHREAD(name) if (status != 0) { fprintf(stderr, \ |
194 | | "%s: %s\n", name, strerror(status)); error = 1; } |
195 | | |
196 | | /* |
197 | | * Initialization. |
198 | | */ |
199 | | static void |
200 | | PyThread__init_thread(void) |
201 | 14 | { |
202 | | #if defined(_AIX) && defined(__GNUC__) |
203 | | extern void pthread_init(void); |
204 | | pthread_init(); |
205 | | #endif |
206 | 14 | init_condattr(); |
207 | 14 | } |
208 | | |
209 | | /* |
210 | | * Thread support. |
211 | | */ |
212 | | |
213 | | /* bpo-33015: pythread_callback struct and pythread_wrapper() cast |
214 | | "void func(void *)" to "void* func(void *)": always return NULL. |
215 | | |
216 | | PyThread_start_new_thread() uses "void func(void *)" type, whereas |
217 | | pthread_create() requires a void* return value. */ |
218 | | typedef struct { |
219 | | void (*func) (void *); |
220 | | void *arg; |
221 | | } pythread_callback; |
222 | | |
223 | | static void * |
224 | | pythread_wrapper(void *arg) |
225 | 0 | { |
226 | | /* copy func and func_arg and free the temporary structure */ |
227 | 0 | pythread_callback *callback = arg; |
228 | 0 | void (*func)(void *) = callback->func; |
229 | 0 | void *func_arg = callback->arg; |
230 | 0 | PyMem_RawFree(arg); |
231 | |
|
232 | 0 | func(func_arg); |
233 | 0 | return NULL; |
234 | 0 | } |
235 | | |
236 | | unsigned long |
237 | | PyThread_start_new_thread(void (*func)(void *), void *arg) |
238 | 0 | { |
239 | 0 | pthread_t th; |
240 | 0 | int status; |
241 | 0 | #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) |
242 | 0 | pthread_attr_t attrs; |
243 | 0 | #endif |
244 | 0 | #if defined(THREAD_STACK_SIZE) |
245 | 0 | size_t tss; |
246 | 0 | #endif |
247 | |
|
248 | 0 | dprintf(("PyThread_start_new_thread called\n")); |
249 | 0 | if (!initialized) |
250 | 0 | PyThread_init_thread(); |
251 | |
|
252 | 0 | #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) |
253 | 0 | if (pthread_attr_init(&attrs) != 0) |
254 | 0 | return PYTHREAD_INVALID_THREAD_ID; |
255 | 0 | #endif |
256 | 0 | #if defined(THREAD_STACK_SIZE) |
257 | 0 | PyThreadState *tstate = _PyThreadState_GET(); |
258 | 0 | size_t stacksize = tstate ? tstate->interp->pythread_stacksize : 0; |
259 | 0 | tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE; |
260 | 0 | if (tss != 0) { |
261 | 0 | if (pthread_attr_setstacksize(&attrs, tss) != 0) { |
262 | 0 | pthread_attr_destroy(&attrs); |
263 | 0 | return PYTHREAD_INVALID_THREAD_ID; |
264 | 0 | } |
265 | 0 | } |
266 | 0 | #endif |
267 | 0 | #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) |
268 | 0 | pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM); |
269 | 0 | #endif |
270 | |
|
271 | 0 | pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback)); |
272 | |
|
273 | 0 | if (callback == NULL) { |
274 | 0 | return PYTHREAD_INVALID_THREAD_ID; |
275 | 0 | } |
276 | | |
277 | 0 | callback->func = func; |
278 | 0 | callback->arg = arg; |
279 | |
|
280 | 0 | status = pthread_create(&th, |
281 | 0 | #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) |
282 | 0 | &attrs, |
283 | | #else |
284 | | (pthread_attr_t*)NULL, |
285 | | #endif |
286 | 0 | pythread_wrapper, callback); |
287 | |
|
288 | 0 | #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) |
289 | 0 | pthread_attr_destroy(&attrs); |
290 | 0 | #endif |
291 | |
|
292 | 0 | if (status != 0) { |
293 | 0 | PyMem_RawFree(callback); |
294 | 0 | return PYTHREAD_INVALID_THREAD_ID; |
295 | 0 | } |
296 | | |
297 | 0 | pthread_detach(th); |
298 | |
|
299 | 0 | #if SIZEOF_PTHREAD_T <= SIZEOF_LONG |
300 | 0 | return (unsigned long) th; |
301 | | #else |
302 | | return (unsigned long) *(unsigned long *) &th; |
303 | | #endif |
304 | 0 | } |
305 | | |
306 | | /* XXX This implementation is considered (to quote Tim Peters) "inherently |
307 | | hosed" because: |
308 | | - It does not guarantee the promise that a non-zero integer is returned. |
309 | | - The cast to unsigned long is inherently unsafe. |
310 | | - It is not clear that the 'volatile' (for AIX?) are any longer necessary. |
311 | | */ |
312 | | unsigned long |
313 | | PyThread_get_thread_ident(void) |
314 | 8.80k | { |
315 | 8.80k | volatile pthread_t threadid; |
316 | 8.80k | if (!initialized) |
317 | 0 | PyThread_init_thread(); |
318 | 8.80k | threadid = pthread_self(); |
319 | 8.80k | return (unsigned long) threadid; |
320 | 8.80k | } |
321 | | |
322 | | #ifdef PY_HAVE_THREAD_NATIVE_ID |
323 | | unsigned long |
324 | | PyThread_get_thread_native_id(void) |
325 | 1 | { |
326 | 1 | if (!initialized) |
327 | 0 | PyThread_init_thread(); |
328 | | #ifdef __APPLE__ |
329 | | uint64_t native_id; |
330 | | (void) pthread_threadid_np(NULL, &native_id); |
331 | | #elif defined(__linux__) |
332 | | pid_t native_id; |
333 | 1 | native_id = syscall(SYS_gettid); |
334 | | #elif defined(__FreeBSD__) |
335 | | int native_id; |
336 | | native_id = pthread_getthreadid_np(); |
337 | | #elif defined(__OpenBSD__) |
338 | | pid_t native_id; |
339 | | native_id = getthrid(); |
340 | | #elif defined(_AIX) |
341 | | tid_t native_id; |
342 | | native_id = thread_self(); |
343 | | #elif defined(__NetBSD__) |
344 | | lwpid_t native_id; |
345 | | native_id = _lwp_self(); |
346 | | #endif |
347 | 1 | return (unsigned long) native_id; |
348 | 1 | } |
349 | | #endif |
350 | | |
351 | | void _Py_NO_RETURN |
352 | | PyThread_exit_thread(void) |
353 | 0 | { |
354 | 0 | dprintf(("PyThread_exit_thread called\n")); |
355 | 0 | if (!initialized) |
356 | 0 | exit(0); |
357 | 0 | pthread_exit(0); |
358 | 0 | } |
359 | | |
360 | | #ifdef USE_SEMAPHORES |
361 | | |
362 | | /* |
363 | | * Lock support. |
364 | | */ |
365 | | |
366 | | PyThread_type_lock |
367 | | PyThread_allocate_lock(void) |
368 | 1.09k | { |
369 | 1.09k | sem_t *lock; |
370 | 1.09k | int status, error = 0; |
371 | | |
372 | 1.09k | dprintf(("PyThread_allocate_lock called\n")); |
373 | 1.09k | if (!initialized) |
374 | 14 | PyThread_init_thread(); |
375 | | |
376 | 1.09k | lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t)); |
377 | | |
378 | 1.09k | if (lock) { |
379 | 1.09k | status = sem_init(lock,0,1); |
380 | 1.09k | CHECK_STATUS("sem_init"); |
381 | | |
382 | 1.09k | if (error) { |
383 | 0 | PyMem_RawFree((void *)lock); |
384 | 0 | lock = NULL; |
385 | 0 | } |
386 | 1.09k | } |
387 | | |
388 | 1.09k | dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock)); |
389 | 1.09k | return (PyThread_type_lock)lock; |
390 | 1.09k | } |
391 | | |
392 | | void |
393 | | PyThread_free_lock(PyThread_type_lock lock) |
394 | 960 | { |
395 | 960 | sem_t *thelock = (sem_t *)lock; |
396 | 960 | int status, error = 0; |
397 | | |
398 | 960 | (void) error; /* silence unused-but-set-variable warning */ |
399 | 960 | dprintf(("PyThread_free_lock(%p) called\n", lock)); |
400 | | |
401 | 960 | if (!thelock) |
402 | 0 | return; |
403 | | |
404 | 960 | status = sem_destroy(thelock); |
405 | 960 | CHECK_STATUS("sem_destroy"); |
406 | | |
407 | 960 | PyMem_RawFree((void *)thelock); |
408 | 960 | } |
409 | | |
410 | | /* |
411 | | * As of February 2002, Cygwin thread implementations mistakenly report error |
412 | | * codes in the return value of the sem_ calls (like the pthread_ functions). |
413 | | * Correct implementations return -1 and put the code in errno. This supports |
414 | | * either. |
415 | | */ |
416 | | static int |
417 | | fix_status(int status) |
418 | 3.34k | { |
419 | 3.34k | return (status == -1) ? errno : status; |
420 | 3.34k | } |
421 | | |
422 | | PyLockStatus |
423 | | PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds, |
424 | | int intr_flag) |
425 | 3.34k | { |
426 | 3.34k | PyLockStatus success; |
427 | 3.34k | sem_t *thelock = (sem_t *)lock; |
428 | 3.34k | int status, error = 0; |
429 | 3.34k | struct timespec ts; |
430 | 3.34k | _PyTime_t deadline = 0; |
431 | | |
432 | 3.34k | (void) error; /* silence unused-but-set-variable warning */ |
433 | 3.34k | dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n", |
434 | 3.34k | lock, microseconds, intr_flag)); |
435 | | |
436 | 3.34k | if (microseconds > PY_TIMEOUT_MAX) { |
437 | 0 | Py_FatalError("Timeout larger than PY_TIMEOUT_MAX"); |
438 | 0 | } |
439 | | |
440 | 3.34k | if (microseconds > 0) { |
441 | 0 | MICROSECONDS_TO_TIMESPEC(microseconds, ts); |
442 | |
|
443 | 0 | if (!intr_flag) { |
444 | | /* cannot overflow thanks to (microseconds > PY_TIMEOUT_MAX) |
445 | | check done above */ |
446 | 0 | _PyTime_t timeout = _PyTime_FromNanoseconds(microseconds * 1000); |
447 | 0 | deadline = _PyTime_GetMonotonicClock() + timeout; |
448 | 0 | } |
449 | 0 | } |
450 | | |
451 | 3.34k | while (1) { |
452 | 3.34k | if (microseconds > 0) { |
453 | 0 | status = fix_status(sem_timedwait(thelock, &ts)); |
454 | 0 | } |
455 | 3.34k | else if (microseconds == 0) { |
456 | 3.30k | status = fix_status(sem_trywait(thelock)); |
457 | 3.30k | } |
458 | 42 | else { |
459 | 42 | status = fix_status(sem_wait(thelock)); |
460 | 42 | } |
461 | | |
462 | | /* Retry if interrupted by a signal, unless the caller wants to be |
463 | | notified. */ |
464 | 3.34k | if (intr_flag || status != EINTR) { |
465 | 3.34k | break; |
466 | 3.34k | } |
467 | | |
468 | 0 | if (microseconds > 0) { |
469 | | /* wait interrupted by a signal (EINTR): recompute the timeout */ |
470 | 0 | _PyTime_t dt = deadline - _PyTime_GetMonotonicClock(); |
471 | 0 | if (dt < 0) { |
472 | 0 | status = ETIMEDOUT; |
473 | 0 | break; |
474 | 0 | } |
475 | 0 | else if (dt > 0) { |
476 | 0 | _PyTime_t realtime_deadline = _PyTime_GetSystemClock() + dt; |
477 | 0 | if (_PyTime_AsTimespec(realtime_deadline, &ts) < 0) { |
478 | | /* Cannot occur thanks to (microseconds > PY_TIMEOUT_MAX) |
479 | | check done above */ |
480 | 0 | Py_UNREACHABLE(); |
481 | 0 | } |
482 | | /* no need to update microseconds value, the code only care |
483 | | if (microseconds > 0 or (microseconds == 0). */ |
484 | 0 | } |
485 | 0 | else { |
486 | 0 | microseconds = 0; |
487 | 0 | } |
488 | 0 | } |
489 | 0 | } |
490 | | |
491 | | /* Don't check the status if we're stopping because of an interrupt. */ |
492 | 3.34k | if (!(intr_flag && status == EINTR)) { |
493 | 3.34k | if (microseconds > 0) { |
494 | 0 | if (status != ETIMEDOUT) |
495 | 0 | CHECK_STATUS("sem_timedwait"); |
496 | 0 | } |
497 | 3.34k | else if (microseconds == 0) { |
498 | 3.30k | if (status != EAGAIN) |
499 | 3.30k | CHECK_STATUS("sem_trywait"); |
500 | 3.30k | } |
501 | 42 | else { |
502 | 42 | CHECK_STATUS("sem_wait"); |
503 | 42 | } |
504 | 3.34k | } |
505 | | |
506 | 3.34k | if (status == 0) { |
507 | 3.34k | success = PY_LOCK_ACQUIRED; |
508 | 3.34k | } else if (intr_flag && status == EINTR) { |
509 | 0 | success = PY_LOCK_INTR; |
510 | 1 | } else { |
511 | 1 | success = PY_LOCK_FAILURE; |
512 | 1 | } |
513 | | |
514 | 3.34k | dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n", |
515 | 3.34k | lock, microseconds, intr_flag, success)); |
516 | 3.34k | return success; |
517 | 3.34k | } |
518 | | |
519 | | void |
520 | | PyThread_release_lock(PyThread_type_lock lock) |
521 | 3.33k | { |
522 | 3.33k | sem_t *thelock = (sem_t *)lock; |
523 | 3.33k | int status, error = 0; |
524 | | |
525 | 3.33k | (void) error; /* silence unused-but-set-variable warning */ |
526 | 3.33k | dprintf(("PyThread_release_lock(%p) called\n", lock)); |
527 | | |
528 | 3.33k | status = sem_post(thelock); |
529 | 3.33k | CHECK_STATUS("sem_post"); |
530 | 3.33k | } |
531 | | |
532 | | #else /* USE_SEMAPHORES */ |
533 | | |
534 | | /* |
535 | | * Lock support. |
536 | | */ |
537 | | PyThread_type_lock |
538 | | PyThread_allocate_lock(void) |
539 | | { |
540 | | pthread_lock *lock; |
541 | | int status, error = 0; |
542 | | |
543 | | dprintf(("PyThread_allocate_lock called\n")); |
544 | | if (!initialized) |
545 | | PyThread_init_thread(); |
546 | | |
547 | | lock = (pthread_lock *) PyMem_RawMalloc(sizeof(pthread_lock)); |
548 | | if (lock) { |
549 | | memset((void *)lock, '\0', sizeof(pthread_lock)); |
550 | | lock->locked = 0; |
551 | | |
552 | | status = pthread_mutex_init(&lock->mut, NULL); |
553 | | CHECK_STATUS_PTHREAD("pthread_mutex_init"); |
554 | | /* Mark the pthread mutex underlying a Python mutex as |
555 | | pure happens-before. We can't simply mark the |
556 | | Python-level mutex as a mutex because it can be |
557 | | acquired and released in different threads, which |
558 | | will cause errors. */ |
559 | | _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut); |
560 | | |
561 | | status = _PyThread_cond_init(&lock->lock_released); |
562 | | CHECK_STATUS_PTHREAD("pthread_cond_init"); |
563 | | |
564 | | if (error) { |
565 | | PyMem_RawFree((void *)lock); |
566 | | lock = 0; |
567 | | } |
568 | | } |
569 | | |
570 | | dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock)); |
571 | | return (PyThread_type_lock) lock; |
572 | | } |
573 | | |
574 | | void |
575 | | PyThread_free_lock(PyThread_type_lock lock) |
576 | | { |
577 | | pthread_lock *thelock = (pthread_lock *)lock; |
578 | | int status, error = 0; |
579 | | |
580 | | (void) error; /* silence unused-but-set-variable warning */ |
581 | | dprintf(("PyThread_free_lock(%p) called\n", lock)); |
582 | | |
583 | | /* some pthread-like implementations tie the mutex to the cond |
584 | | * and must have the cond destroyed first. |
585 | | */ |
586 | | status = pthread_cond_destroy( &thelock->lock_released ); |
587 | | CHECK_STATUS_PTHREAD("pthread_cond_destroy"); |
588 | | |
589 | | status = pthread_mutex_destroy( &thelock->mut ); |
590 | | CHECK_STATUS_PTHREAD("pthread_mutex_destroy"); |
591 | | |
592 | | PyMem_RawFree((void *)thelock); |
593 | | } |
594 | | |
595 | | PyLockStatus |
596 | | PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds, |
597 | | int intr_flag) |
598 | | { |
599 | | PyLockStatus success = PY_LOCK_FAILURE; |
600 | | pthread_lock *thelock = (pthread_lock *)lock; |
601 | | int status, error = 0; |
602 | | |
603 | | dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n", |
604 | | lock, microseconds, intr_flag)); |
605 | | |
606 | | if (microseconds == 0) { |
607 | | status = pthread_mutex_trylock( &thelock->mut ); |
608 | | if (status != EBUSY) |
609 | | CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]"); |
610 | | } |
611 | | else { |
612 | | status = pthread_mutex_lock( &thelock->mut ); |
613 | | CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]"); |
614 | | } |
615 | | if (status == 0) { |
616 | | if (thelock->locked == 0) { |
617 | | success = PY_LOCK_ACQUIRED; |
618 | | } |
619 | | else if (microseconds != 0) { |
620 | | struct timespec abs; |
621 | | if (microseconds > 0) { |
622 | | _PyThread_cond_after(microseconds, &abs); |
623 | | } |
624 | | /* continue trying until we get the lock */ |
625 | | |
626 | | /* mut must be locked by me -- part of the condition |
627 | | * protocol */ |
628 | | while (success == PY_LOCK_FAILURE) { |
629 | | if (microseconds > 0) { |
630 | | status = pthread_cond_timedwait( |
631 | | &thelock->lock_released, |
632 | | &thelock->mut, &abs); |
633 | | if (status == 1) { |
634 | | break; |
635 | | } |
636 | | if (status == ETIMEDOUT) |
637 | | break; |
638 | | CHECK_STATUS_PTHREAD("pthread_cond_timedwait"); |
639 | | } |
640 | | else { |
641 | | status = pthread_cond_wait( |
642 | | &thelock->lock_released, |
643 | | &thelock->mut); |
644 | | CHECK_STATUS_PTHREAD("pthread_cond_wait"); |
645 | | } |
646 | | |
647 | | if (intr_flag && status == 0 && thelock->locked) { |
648 | | /* We were woken up, but didn't get the lock. We probably received |
649 | | * a signal. Return PY_LOCK_INTR to allow the caller to handle |
650 | | * it and retry. */ |
651 | | success = PY_LOCK_INTR; |
652 | | break; |
653 | | } |
654 | | else if (status == 0 && !thelock->locked) { |
655 | | success = PY_LOCK_ACQUIRED; |
656 | | } |
657 | | } |
658 | | } |
659 | | if (success == PY_LOCK_ACQUIRED) thelock->locked = 1; |
660 | | status = pthread_mutex_unlock( &thelock->mut ); |
661 | | CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]"); |
662 | | } |
663 | | |
664 | | if (error) success = PY_LOCK_FAILURE; |
665 | | dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n", |
666 | | lock, microseconds, intr_flag, success)); |
667 | | return success; |
668 | | } |
669 | | |
670 | | void |
671 | | PyThread_release_lock(PyThread_type_lock lock) |
672 | | { |
673 | | pthread_lock *thelock = (pthread_lock *)lock; |
674 | | int status, error = 0; |
675 | | |
676 | | (void) error; /* silence unused-but-set-variable warning */ |
677 | | dprintf(("PyThread_release_lock(%p) called\n", lock)); |
678 | | |
679 | | status = pthread_mutex_lock( &thelock->mut ); |
680 | | CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]"); |
681 | | |
682 | | thelock->locked = 0; |
683 | | |
684 | | /* wake up someone (anyone, if any) waiting on the lock */ |
685 | | status = pthread_cond_signal( &thelock->lock_released ); |
686 | | CHECK_STATUS_PTHREAD("pthread_cond_signal"); |
687 | | |
688 | | status = pthread_mutex_unlock( &thelock->mut ); |
689 | | CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]"); |
690 | | } |
691 | | |
692 | | #endif /* USE_SEMAPHORES */ |
693 | | |
694 | | int |
695 | | PyThread_acquire_lock(PyThread_type_lock lock, int waitflag) |
696 | 2.49k | { |
697 | 2.49k | return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0); |
698 | 2.49k | } |
699 | | |
700 | | /* set the thread stack size. |
701 | | * Return 0 if size is valid, -1 if size is invalid, |
702 | | * -2 if setting stack size is not supported. |
703 | | */ |
704 | | static int |
705 | | _pythread_pthread_set_stacksize(size_t size) |
706 | 0 | { |
707 | 0 | #if defined(THREAD_STACK_SIZE) |
708 | 0 | pthread_attr_t attrs; |
709 | 0 | size_t tss_min; |
710 | 0 | int rc = 0; |
711 | 0 | #endif |
712 | | |
713 | | /* set to default */ |
714 | 0 | if (size == 0) { |
715 | 0 | _PyInterpreterState_GET_UNSAFE()->pythread_stacksize = 0; |
716 | 0 | return 0; |
717 | 0 | } |
718 | | |
719 | 0 | #if defined(THREAD_STACK_SIZE) |
720 | 0 | #if defined(PTHREAD_STACK_MIN) |
721 | 0 | tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN |
722 | 0 | : THREAD_STACK_MIN; |
723 | | #else |
724 | | tss_min = THREAD_STACK_MIN; |
725 | | #endif |
726 | 0 | if (size >= tss_min) { |
727 | | /* validate stack size by setting thread attribute */ |
728 | 0 | if (pthread_attr_init(&attrs) == 0) { |
729 | 0 | rc = pthread_attr_setstacksize(&attrs, size); |
730 | 0 | pthread_attr_destroy(&attrs); |
731 | 0 | if (rc == 0) { |
732 | 0 | _PyInterpreterState_GET_UNSAFE()->pythread_stacksize = size; |
733 | 0 | return 0; |
734 | 0 | } |
735 | 0 | } |
736 | 0 | } |
737 | 0 | return -1; |
738 | | #else |
739 | | return -2; |
740 | | #endif |
741 | 0 | } |
742 | | |
743 | 0 | #define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x) |
744 | | |
745 | | |
746 | | /* Thread Local Storage (TLS) API |
747 | | |
748 | | This API is DEPRECATED since Python 3.7. See PEP 539 for details. |
749 | | */ |
750 | | |
751 | | /* Issue #25658: On platforms where native TLS key is defined in a way that |
752 | | cannot be safely cast to int, PyThread_create_key returns immediately a |
753 | | failure status and other TLS functions all are no-ops. This indicates |
754 | | clearly that the old API is not supported on platforms where it cannot be |
755 | | used reliably, and that no effort will be made to add such support. |
756 | | |
757 | | Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after |
758 | | removing this API. |
759 | | */ |
760 | | |
761 | | int |
762 | | PyThread_create_key(void) |
763 | 0 | { |
764 | 0 | #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT |
765 | 0 | pthread_key_t key; |
766 | 0 | int fail = pthread_key_create(&key, NULL); |
767 | 0 | if (fail) |
768 | 0 | return -1; |
769 | 0 | if (key > INT_MAX) { |
770 | | /* Issue #22206: handle integer overflow */ |
771 | 0 | pthread_key_delete(key); |
772 | 0 | errno = ENOMEM; |
773 | 0 | return -1; |
774 | 0 | } |
775 | 0 | return (int)key; |
776 | | #else |
777 | | return -1; /* never return valid key value. */ |
778 | | #endif |
779 | 0 | } |
780 | | |
781 | | void |
782 | | PyThread_delete_key(int key) |
783 | 0 | { |
784 | 0 | #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT |
785 | 0 | pthread_key_delete(key); |
786 | 0 | #endif |
787 | 0 | } |
788 | | |
789 | | void |
790 | | PyThread_delete_key_value(int key) |
791 | 0 | { |
792 | 0 | #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT |
793 | 0 | pthread_setspecific(key, NULL); |
794 | 0 | #endif |
795 | 0 | } |
796 | | |
797 | | int |
798 | | PyThread_set_key_value(int key, void *value) |
799 | 0 | { |
800 | 0 | #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT |
801 | 0 | int fail = pthread_setspecific(key, value); |
802 | 0 | return fail ? -1 : 0; |
803 | | #else |
804 | | return -1; |
805 | | #endif |
806 | 0 | } |
807 | | |
808 | | void * |
809 | | PyThread_get_key_value(int key) |
810 | 0 | { |
811 | 0 | #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT |
812 | 0 | return pthread_getspecific(key); |
813 | | #else |
814 | | return NULL; |
815 | | #endif |
816 | 0 | } |
817 | | |
818 | | |
819 | | void |
820 | | PyThread_ReInitTLS(void) |
821 | 0 | { |
822 | 0 | } |
823 | | |
824 | | |
825 | | /* Thread Specific Storage (TSS) API |
826 | | |
827 | | Platform-specific components of TSS API implementation. |
828 | | */ |
829 | | |
830 | | int |
831 | | PyThread_tss_create(Py_tss_t *key) |
832 | 14 | { |
833 | 14 | assert(key != NULL); |
834 | | /* If the key has been created, function is silently skipped. */ |
835 | 14 | if (key->_is_initialized) { |
836 | 0 | return 0; |
837 | 0 | } |
838 | | |
839 | 14 | int fail = pthread_key_create(&(key->_key), NULL); |
840 | 14 | if (fail) { |
841 | 0 | return -1; |
842 | 0 | } |
843 | 14 | key->_is_initialized = 1; |
844 | 14 | return 0; |
845 | 14 | } |
846 | | |
847 | | void |
848 | | PyThread_tss_delete(Py_tss_t *key) |
849 | 0 | { |
850 | 0 | assert(key != NULL); |
851 | | /* If the key has not been created, function is silently skipped. */ |
852 | 0 | if (!key->_is_initialized) { |
853 | 0 | return; |
854 | 0 | } |
855 | | |
856 | 0 | pthread_key_delete(key->_key); |
857 | | /* pthread has not provided the defined invalid value for the key. */ |
858 | 0 | key->_is_initialized = 0; |
859 | 0 | } |
860 | | |
861 | | int |
862 | | PyThread_tss_set(Py_tss_t *key, void *value) |
863 | 14 | { |
864 | 14 | assert(key != NULL); |
865 | 14 | int fail = pthread_setspecific(key->_key, value); |
866 | 14 | return fail ? -1 : 0; |
867 | 14 | } |
868 | | |
869 | | void * |
870 | | PyThread_tss_get(Py_tss_t *key) |
871 | 14 | { |
872 | 14 | assert(key != NULL); |
873 | 14 | return pthread_getspecific(key->_key); |
874 | 14 | } |