/src/CMake/Utilities/cmlibuv/src/unix/thread.c
Line | Count | Source |
1 | | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | | * |
3 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4 | | * of this software and associated documentation files (the "Software"), to |
5 | | * deal in the Software without restriction, including without limitation the |
6 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
7 | | * sell copies of the Software, and to permit persons to whom the Software is |
8 | | * furnished to do so, subject to the following conditions: |
9 | | * |
10 | | * The above copyright notice and this permission notice shall be included in |
11 | | * all copies or substantial portions of the Software. |
12 | | * |
13 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
16 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
17 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
18 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
19 | | * IN THE SOFTWARE. |
20 | | */ |
21 | | |
22 | | #include "uv.h" |
23 | | #include "internal.h" |
24 | | |
25 | | #include <pthread.h> |
26 | | #ifdef __OpenBSD__ |
27 | | #include <pthread_np.h> |
28 | | #endif |
29 | | #include <assert.h> |
30 | | #include <errno.h> |
31 | | |
32 | | #include <sys/time.h> |
33 | | #include <sys/resource.h> /* getrlimit() */ |
34 | | #include <unistd.h> /* getpagesize() */ |
35 | | |
36 | | #include <limits.h> |
37 | | |
38 | | #ifdef __MVS__ |
39 | | #include <sys/ipc.h> |
40 | | #include <sys/sem.h> |
41 | | #endif |
42 | | |
43 | | #if defined(__GLIBC__) && !defined(__UCLIBC__) |
44 | | #include <gnu/libc-version.h> /* gnu_get_libc_version() */ |
45 | | #endif |
46 | | |
47 | | #if defined(__linux__) |
48 | | # include <sched.h> |
49 | 0 | # define uv__cpu_set_t cpu_set_t |
50 | | #elif defined(__FreeBSD__) |
51 | | # include <sys/param.h> |
52 | | # include <sys/cpuset.h> |
53 | | # include <pthread_np.h> |
54 | | # define uv__cpu_set_t cpuset_t |
55 | | #endif |
56 | | |
57 | | |
58 | | #undef NANOSEC |
59 | 0 | #define NANOSEC ((uint64_t) 1e9) |
60 | | |
61 | | /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is |
62 | | * too small to safely receive signals on. |
63 | | * |
64 | | * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has |
65 | | * the largest MINSIGSTKSZ of the architectures that musl supports) so |
66 | | * let's use that as a lower bound. |
67 | | * |
68 | | * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ |
69 | | * is between 28 and 133 KB when compiling against glibc, depending |
70 | | * on the architecture. |
71 | | */ |
72 | 0 | static size_t uv__min_stack_size(void) { |
73 | 0 | static const size_t min = 8192; |
74 | |
|
75 | 0 | #ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */ |
76 | 0 | if (min < (size_t) PTHREAD_STACK_MIN) |
77 | 0 | return PTHREAD_STACK_MIN; |
78 | 0 | #endif /* PTHREAD_STACK_MIN */ |
79 | | |
80 | 0 | return min; |
81 | 0 | } |
82 | | |
83 | | |
84 | | /* On Linux, threads created by musl have a much smaller stack than threads |
85 | | * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency. |
86 | | */ |
87 | 0 | static size_t uv__default_stack_size(void) { |
88 | | #if !defined(__linux__) |
89 | | return 0; |
90 | | #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__) |
91 | | return 4 << 20; /* glibc default. */ |
92 | | #else |
93 | 0 | return 2 << 20; /* glibc default. */ |
94 | 0 | #endif |
95 | 0 | } |
96 | | |
97 | | |
98 | | /* On MacOS, threads other than the main thread are created with a reduced |
99 | | * stack size by default. Adjust to RLIMIT_STACK aligned to the page size. |
100 | | */ |
101 | 0 | size_t uv__thread_stack_size(void) { |
102 | 0 | #if defined(__APPLE__) || defined(__linux__) |
103 | 0 | struct rlimit lim; |
104 | | |
105 | | /* getrlimit() can fail on some aarch64 systems due to a glibc bug where |
106 | | * the system call wrapper invokes the wrong system call. Don't treat |
107 | | * that as fatal, just use the default stack size instead. |
108 | | */ |
109 | 0 | if (getrlimit(RLIMIT_STACK, &lim)) |
110 | 0 | return uv__default_stack_size(); |
111 | | |
112 | 0 | if (lim.rlim_cur == RLIM_INFINITY) |
113 | 0 | return uv__default_stack_size(); |
114 | | |
115 | | /* pthread_attr_setstacksize() expects page-aligned values. */ |
116 | 0 | lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize(); |
117 | |
|
118 | 0 | if (lim.rlim_cur >= (rlim_t) uv__min_stack_size()) |
119 | 0 | return lim.rlim_cur; |
120 | 0 | #endif |
121 | | |
122 | 0 | return uv__default_stack_size(); |
123 | 0 | } |
124 | | |
125 | | |
126 | 0 | int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { |
127 | 0 | uv_thread_options_t params; |
128 | 0 | params.flags = UV_THREAD_NO_FLAGS; |
129 | 0 | return uv_thread_create_ex(tid, ¶ms, entry, arg); |
130 | 0 | } |
131 | | |
132 | | |
133 | 0 | int uv_thread_detach(uv_thread_t *tid) { |
134 | 0 | return UV__ERR(pthread_detach(*tid)); |
135 | 0 | } |
136 | | |
137 | | |
138 | | int uv_thread_create_ex(uv_thread_t* tid, |
139 | | const uv_thread_options_t* params, |
140 | | void (*entry)(void *arg), |
141 | 0 | void *arg) { |
142 | 0 | int err; |
143 | 0 | pthread_attr_t* attr; |
144 | 0 | pthread_attr_t attr_storage; |
145 | 0 | size_t pagesize; |
146 | 0 | size_t stack_size; |
147 | 0 | size_t min_stack_size; |
148 | | |
149 | | /* Used to squelch a -Wcast-function-type warning. */ |
150 | 0 | union { |
151 | 0 | void (*in)(void*); |
152 | 0 | void* (*out)(void*); |
153 | 0 | } f; |
154 | |
|
155 | 0 | stack_size = |
156 | 0 | params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0; |
157 | |
|
158 | 0 | attr = NULL; |
159 | 0 | if (stack_size == 0) { |
160 | 0 | stack_size = uv__thread_stack_size(); |
161 | 0 | } else { |
162 | 0 | pagesize = (size_t)getpagesize(); |
163 | | /* Round up to the nearest page boundary. */ |
164 | 0 | stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1); |
165 | 0 | min_stack_size = uv__min_stack_size(); |
166 | 0 | if (stack_size < min_stack_size) |
167 | 0 | stack_size = min_stack_size; |
168 | 0 | } |
169 | |
|
170 | 0 | if (stack_size > 0) { |
171 | 0 | attr = &attr_storage; |
172 | |
|
173 | 0 | if (pthread_attr_init(attr)) |
174 | 0 | abort(); |
175 | | |
176 | 0 | if (pthread_attr_setstacksize(attr, stack_size)) |
177 | 0 | abort(); |
178 | 0 | } |
179 | | |
180 | 0 | f.in = entry; |
181 | 0 | err = pthread_create(tid, attr, f.out, arg); |
182 | |
|
183 | 0 | if (attr != NULL) |
184 | 0 | pthread_attr_destroy(attr); |
185 | |
|
186 | 0 | return UV__ERR(err); |
187 | 0 | } |
188 | | |
189 | | #if UV__CPU_AFFINITY_SUPPORTED |
190 | | |
191 | | int uv_thread_setaffinity(uv_thread_t* tid, |
192 | | char* cpumask, |
193 | | char* oldmask, |
194 | 0 | size_t mask_size) { |
195 | 0 | int i; |
196 | 0 | int r; |
197 | 0 | uv__cpu_set_t cpuset; |
198 | 0 | int cpumasksize; |
199 | |
|
200 | 0 | cpumasksize = uv_cpumask_size(); |
201 | 0 | if (cpumasksize < 0) |
202 | 0 | return cpumasksize; |
203 | 0 | if (mask_size < (size_t)cpumasksize) |
204 | 0 | return UV_EINVAL; |
205 | | |
206 | 0 | if (oldmask != NULL) { |
207 | 0 | r = uv_thread_getaffinity(tid, oldmask, mask_size); |
208 | 0 | if (r < 0) |
209 | 0 | return r; |
210 | 0 | } |
211 | | |
212 | 0 | CPU_ZERO(&cpuset); |
213 | 0 | for (i = 0; i < cpumasksize; i++) |
214 | 0 | if (cpumask[i]) |
215 | 0 | CPU_SET(i, &cpuset); |
216 | |
|
217 | | #if defined(__ANDROID__) || defined(__OHOS__) |
218 | | if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) |
219 | | r = errno; |
220 | | else |
221 | | r = 0; |
222 | | #else |
223 | 0 | r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset); |
224 | 0 | #endif |
225 | |
|
226 | 0 | return UV__ERR(r); |
227 | 0 | } |
228 | | |
229 | | |
230 | | int uv_thread_getaffinity(uv_thread_t* tid, |
231 | | char* cpumask, |
232 | 0 | size_t mask_size) { |
233 | 0 | int r; |
234 | 0 | int i; |
235 | 0 | uv__cpu_set_t cpuset; |
236 | 0 | int cpumasksize; |
237 | |
|
238 | 0 | cpumasksize = uv_cpumask_size(); |
239 | 0 | if (cpumasksize < 0) |
240 | 0 | return cpumasksize; |
241 | 0 | if (mask_size < (size_t)cpumasksize) |
242 | 0 | return UV_EINVAL; |
243 | | |
244 | 0 | CPU_ZERO(&cpuset); |
245 | | #if defined(__ANDROID__) || defined(__OHOS__) |
246 | | if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) |
247 | | r = errno; |
248 | | else |
249 | | r = 0; |
250 | | #else |
251 | 0 | r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset); |
252 | 0 | #endif |
253 | 0 | if (r) |
254 | 0 | return UV__ERR(r); |
255 | 0 | for (i = 0; i < cpumasksize; i++) |
256 | 0 | cpumask[i] = !!CPU_ISSET(i, &cpuset); |
257 | |
|
258 | 0 | return 0; |
259 | 0 | } |
260 | | #else |
261 | | int uv_thread_setaffinity(uv_thread_t* tid, |
262 | | char* cpumask, |
263 | | char* oldmask, |
264 | | size_t mask_size) { |
265 | | return UV_ENOTSUP; |
266 | | } |
267 | | |
268 | | |
269 | | int uv_thread_getaffinity(uv_thread_t* tid, |
270 | | char* cpumask, |
271 | | size_t mask_size) { |
272 | | return UV_ENOTSUP; |
273 | | } |
274 | | #endif /* defined(__linux__) || defined(UV_BSD_H) */ |
275 | | |
276 | 0 | int uv_thread_getcpu(void) { |
277 | 0 | #if UV__CPU_AFFINITY_SUPPORTED |
278 | 0 | int cpu; |
279 | |
|
280 | 0 | cpu = sched_getcpu(); |
281 | 0 | if (cpu < 0) |
282 | 0 | return UV__ERR(errno); |
283 | | |
284 | 0 | return cpu; |
285 | | #else |
286 | | return UV_ENOTSUP; |
287 | | #endif |
288 | 0 | } |
289 | | |
290 | 0 | uv_thread_t uv_thread_self(void) { |
291 | 0 | return pthread_self(); |
292 | 0 | } |
293 | | |
294 | 0 | int uv_thread_join(uv_thread_t *tid) { |
295 | 0 | return UV__ERR(pthread_join(*tid, NULL)); |
296 | 0 | } |
297 | | |
298 | | |
299 | 0 | int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { |
300 | 0 | return pthread_equal(*t1, *t2); |
301 | 0 | } |
302 | | |
303 | 0 | int uv_thread_setname(const char* name) { |
304 | 0 | if (name == NULL) |
305 | 0 | return UV_EINVAL; |
306 | 0 | return uv__thread_setname(name); |
307 | 0 | } |
308 | | |
309 | 0 | int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) { |
310 | 0 | if (name == NULL || size == 0) |
311 | 0 | return UV_EINVAL; |
312 | | |
313 | 0 | return uv__thread_getname(tid, name, size); |
314 | 0 | } |
315 | | |
316 | 0 | int uv_mutex_init(uv_mutex_t* mutex) { |
317 | 0 | #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) |
318 | 0 | return UV__ERR(pthread_mutex_init(mutex, NULL)); |
319 | | #else |
320 | | pthread_mutexattr_t attr; |
321 | | int err; |
322 | | |
323 | | if (pthread_mutexattr_init(&attr)) |
324 | | abort(); |
325 | | |
326 | | if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) |
327 | | abort(); |
328 | | |
329 | | err = pthread_mutex_init(mutex, &attr); |
330 | | |
331 | | if (pthread_mutexattr_destroy(&attr)) |
332 | | abort(); |
333 | | |
334 | | return UV__ERR(err); |
335 | | #endif |
336 | 0 | } |
337 | | |
338 | | |
339 | 0 | int uv_mutex_init_recursive(uv_mutex_t* mutex) { |
340 | 0 | pthread_mutexattr_t attr; |
341 | 0 | int err; |
342 | |
|
343 | 0 | if (pthread_mutexattr_init(&attr)) |
344 | 0 | abort(); |
345 | | |
346 | 0 | if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) |
347 | 0 | abort(); |
348 | | |
349 | 0 | err = pthread_mutex_init(mutex, &attr); |
350 | |
|
351 | 0 | if (pthread_mutexattr_destroy(&attr)) |
352 | 0 | abort(); |
353 | | |
354 | 0 | return UV__ERR(err); |
355 | 0 | } |
356 | | |
357 | | |
358 | 0 | void uv_mutex_destroy(uv_mutex_t* mutex) { |
359 | 0 | if (pthread_mutex_destroy(mutex)) |
360 | 0 | abort(); |
361 | 0 | } |
362 | | |
363 | | |
364 | 0 | void uv_mutex_lock(uv_mutex_t* mutex) { |
365 | 0 | if (pthread_mutex_lock(mutex)) |
366 | 0 | abort(); |
367 | 0 | } |
368 | | |
369 | | |
370 | 0 | int uv_mutex_trylock(uv_mutex_t* mutex) { |
371 | 0 | int err; |
372 | |
|
373 | 0 | err = pthread_mutex_trylock(mutex); |
374 | 0 | if (err) { |
375 | 0 | if (err != EBUSY && err != EAGAIN) |
376 | 0 | abort(); |
377 | 0 | return UV_EBUSY; |
378 | 0 | } |
379 | | |
380 | 0 | return 0; |
381 | 0 | } |
382 | | |
383 | | |
384 | 0 | void uv_mutex_unlock(uv_mutex_t* mutex) { |
385 | 0 | if (pthread_mutex_unlock(mutex)) |
386 | 0 | abort(); |
387 | 0 | } |
388 | | |
389 | | |
390 | 0 | int uv_rwlock_init(uv_rwlock_t* rwlock) { |
391 | 0 | return UV__ERR(pthread_rwlock_init(rwlock, NULL)); |
392 | 0 | } |
393 | | |
394 | | |
395 | 0 | void uv_rwlock_destroy(uv_rwlock_t* rwlock) { |
396 | 0 | if (pthread_rwlock_destroy(rwlock)) |
397 | 0 | abort(); |
398 | 0 | } |
399 | | |
400 | | |
401 | 0 | void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { |
402 | 0 | if (pthread_rwlock_rdlock(rwlock)) |
403 | 0 | abort(); |
404 | 0 | } |
405 | | |
406 | | |
407 | 0 | int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) { |
408 | 0 | int err; |
409 | |
|
410 | 0 | err = pthread_rwlock_tryrdlock(rwlock); |
411 | 0 | if (err) { |
412 | 0 | if (err != EBUSY && err != EAGAIN) |
413 | 0 | abort(); |
414 | 0 | return UV_EBUSY; |
415 | 0 | } |
416 | | |
417 | 0 | return 0; |
418 | 0 | } |
419 | | |
420 | | |
421 | 0 | void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { |
422 | 0 | if (pthread_rwlock_unlock(rwlock)) |
423 | 0 | abort(); |
424 | 0 | } |
425 | | |
426 | | |
427 | 0 | void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { |
428 | 0 | if (pthread_rwlock_wrlock(rwlock)) |
429 | 0 | abort(); |
430 | 0 | } |
431 | | |
432 | | |
433 | 0 | int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { |
434 | 0 | int err; |
435 | |
|
436 | 0 | err = pthread_rwlock_trywrlock(rwlock); |
437 | 0 | if (err) { |
438 | 0 | if (err != EBUSY && err != EAGAIN) |
439 | 0 | abort(); |
440 | 0 | return UV_EBUSY; |
441 | 0 | } |
442 | | |
443 | 0 | return 0; |
444 | 0 | } |
445 | | |
446 | | |
447 | 0 | void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { |
448 | 0 | if (pthread_rwlock_unlock(rwlock)) |
449 | 0 | abort(); |
450 | 0 | } |
451 | | |
452 | | |
453 | 0 | void uv_once(uv_once_t* guard, void (*callback)(void)) { |
454 | 0 | if (pthread_once(guard, callback)) |
455 | 0 | abort(); |
456 | 0 | } |
457 | | |
458 | | #if defined(__APPLE__) && defined(__MACH__) |
459 | | |
460 | | int uv_sem_init(uv_sem_t* sem, unsigned int value) { |
461 | | kern_return_t err; |
462 | | |
463 | | err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value); |
464 | | if (err == KERN_SUCCESS) |
465 | | return 0; |
466 | | if (err == KERN_INVALID_ARGUMENT) |
467 | | return UV_EINVAL; |
468 | | if (err == KERN_RESOURCE_SHORTAGE) |
469 | | return UV_ENOMEM; |
470 | | |
471 | | abort(); |
472 | | return UV_EINVAL; /* Satisfy the compiler. */ |
473 | | } |
474 | | |
475 | | |
476 | | void uv_sem_destroy(uv_sem_t* sem) { |
477 | | if (semaphore_destroy(mach_task_self(), *sem)) |
478 | | abort(); |
479 | | } |
480 | | |
481 | | |
482 | | void uv_sem_post(uv_sem_t* sem) { |
483 | | if (semaphore_signal(*sem)) |
484 | | abort(); |
485 | | } |
486 | | |
487 | | |
488 | | void uv_sem_wait(uv_sem_t* sem) { |
489 | | int r; |
490 | | |
491 | | do |
492 | | r = semaphore_wait(*sem); |
493 | | while (r == KERN_ABORTED); |
494 | | |
495 | | if (r != KERN_SUCCESS) |
496 | | abort(); |
497 | | } |
498 | | |
499 | | |
500 | | int uv_sem_trywait(uv_sem_t* sem) { |
501 | | mach_timespec_t interval; |
502 | | kern_return_t err; |
503 | | |
504 | | interval.tv_sec = 0; |
505 | | interval.tv_nsec = 0; |
506 | | |
507 | | err = semaphore_timedwait(*sem, interval); |
508 | | if (err == KERN_SUCCESS) |
509 | | return 0; |
510 | | if (err == KERN_OPERATION_TIMED_OUT) |
511 | | return UV_EAGAIN; |
512 | | |
513 | | abort(); |
514 | | return UV_EINVAL; /* Satisfy the compiler. */ |
515 | | } |
516 | | |
517 | | #else /* !(defined(__APPLE__) && defined(__MACH__)) */ |
518 | | |
519 | | #if defined(__GLIBC__) && !defined(__UCLIBC__) |
520 | | |
521 | | /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674 |
522 | | * by providing a custom implementation for glibc < 2.21 in terms of other |
523 | | * concurrency primitives. |
524 | | * Refs: https://github.com/nodejs/node/issues/19903 */ |
525 | | |
526 | | /* To preserve ABI compatibility, we treat the uv_sem_t as storage for |
527 | | * a pointer to the actual struct we're using underneath. */ |
528 | | |
529 | | static uv_once_t glibc_version_check_once = UV_ONCE_INIT; |
530 | | static int platform_needs_custom_semaphore = 0; |
531 | | |
532 | 0 | static void glibc_version_check(void) { |
533 | 0 | const char* version = gnu_get_libc_version(); |
534 | 0 | platform_needs_custom_semaphore = |
535 | 0 | version[0] == '2' && version[1] == '.' && |
536 | 0 | atoi(version + 2) < 21; |
537 | 0 | } |
538 | | |
539 | | #elif defined(__MVS__) |
540 | | |
541 | | #define platform_needs_custom_semaphore 1 |
542 | | |
543 | | #else /* !defined(__GLIBC__) && !defined(__MVS__) */ |
544 | | |
545 | | #define platform_needs_custom_semaphore 0 |
546 | | |
547 | | #endif |
548 | | |
549 | | typedef struct uv_semaphore_s { |
550 | | uv_mutex_t mutex; |
551 | | uv_cond_t cond; |
552 | | unsigned int value; |
553 | | } uv_semaphore_t; |
554 | | |
555 | | #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \ |
556 | | platform_needs_custom_semaphore |
557 | | STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*)); |
558 | | #endif |
559 | | |
560 | 0 | static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) { |
561 | 0 | int err; |
562 | 0 | uv_semaphore_t* sem; |
563 | |
|
564 | 0 | sem = uv__malloc(sizeof(*sem)); |
565 | 0 | if (sem == NULL) |
566 | 0 | return UV_ENOMEM; |
567 | | |
568 | 0 | if ((err = uv_mutex_init(&sem->mutex)) != 0) { |
569 | 0 | uv__free(sem); |
570 | 0 | return err; |
571 | 0 | } |
572 | | |
573 | 0 | if ((err = uv_cond_init(&sem->cond)) != 0) { |
574 | 0 | uv_mutex_destroy(&sem->mutex); |
575 | 0 | uv__free(sem); |
576 | 0 | return err; |
577 | 0 | } |
578 | | |
579 | 0 | sem->value = value; |
580 | 0 | *(uv_semaphore_t**)sem_ = sem; |
581 | 0 | return 0; |
582 | 0 | } |
583 | | |
584 | | |
585 | 0 | static void uv__custom_sem_destroy(uv_sem_t* sem_) { |
586 | 0 | uv_semaphore_t* sem; |
587 | |
|
588 | 0 | sem = *(uv_semaphore_t**)sem_; |
589 | 0 | uv_cond_destroy(&sem->cond); |
590 | 0 | uv_mutex_destroy(&sem->mutex); |
591 | 0 | uv__free(sem); |
592 | 0 | } |
593 | | |
594 | | |
595 | 0 | static void uv__custom_sem_post(uv_sem_t* sem_) { |
596 | 0 | uv_semaphore_t* sem; |
597 | |
|
598 | 0 | sem = *(uv_semaphore_t**)sem_; |
599 | 0 | uv_mutex_lock(&sem->mutex); |
600 | 0 | sem->value++; |
601 | 0 | if (sem->value == 1) |
602 | 0 | uv_cond_signal(&sem->cond); /* Release one to replace us. */ |
603 | 0 | uv_mutex_unlock(&sem->mutex); |
604 | 0 | } |
605 | | |
606 | | |
607 | 0 | static void uv__custom_sem_wait(uv_sem_t* sem_) { |
608 | 0 | uv_semaphore_t* sem; |
609 | |
|
610 | 0 | sem = *(uv_semaphore_t**)sem_; |
611 | 0 | uv_mutex_lock(&sem->mutex); |
612 | 0 | while (sem->value == 0) |
613 | 0 | uv_cond_wait(&sem->cond, &sem->mutex); |
614 | 0 | sem->value--; |
615 | 0 | uv_mutex_unlock(&sem->mutex); |
616 | 0 | } |
617 | | |
618 | | |
619 | 0 | static int uv__custom_sem_trywait(uv_sem_t* sem_) { |
620 | 0 | uv_semaphore_t* sem; |
621 | |
|
622 | 0 | sem = *(uv_semaphore_t**)sem_; |
623 | 0 | if (uv_mutex_trylock(&sem->mutex) != 0) |
624 | 0 | return UV_EAGAIN; |
625 | | |
626 | 0 | if (sem->value == 0) { |
627 | 0 | uv_mutex_unlock(&sem->mutex); |
628 | 0 | return UV_EAGAIN; |
629 | 0 | } |
630 | | |
631 | 0 | sem->value--; |
632 | 0 | uv_mutex_unlock(&sem->mutex); |
633 | |
|
634 | 0 | return 0; |
635 | 0 | } |
636 | | |
637 | 0 | static int uv__sem_init(uv_sem_t* sem, unsigned int value) { |
638 | 0 | if (sem_init(sem, 0, value)) |
639 | 0 | return UV__ERR(errno); |
640 | 0 | return 0; |
641 | 0 | } |
642 | | |
643 | | |
644 | 0 | static void uv__sem_destroy(uv_sem_t* sem) { |
645 | 0 | if (sem_destroy(sem)) |
646 | 0 | abort(); |
647 | 0 | } |
648 | | |
649 | | |
650 | 0 | static void uv__sem_post(uv_sem_t* sem) { |
651 | 0 | if (sem_post(sem)) |
652 | 0 | abort(); |
653 | 0 | } |
654 | | |
655 | | |
656 | 0 | static void uv__sem_wait(uv_sem_t* sem) { |
657 | 0 | int r; |
658 | |
|
659 | 0 | do |
660 | 0 | r = sem_wait(sem); |
661 | 0 | while (r == -1 && errno == EINTR); |
662 | |
|
663 | 0 | if (r) |
664 | 0 | abort(); |
665 | 0 | } |
666 | | |
667 | | |
668 | 0 | static int uv__sem_trywait(uv_sem_t* sem) { |
669 | 0 | int r; |
670 | |
|
671 | 0 | do |
672 | 0 | r = sem_trywait(sem); |
673 | 0 | while (r == -1 && errno == EINTR); |
674 | |
|
675 | 0 | if (r) { |
676 | 0 | if (errno == EAGAIN) |
677 | 0 | return UV_EAGAIN; |
678 | 0 | abort(); |
679 | 0 | } |
680 | | |
681 | 0 | return 0; |
682 | 0 | } |
683 | | |
684 | 0 | int uv_sem_init(uv_sem_t* sem, unsigned int value) { |
685 | 0 | #if defined(__GLIBC__) && !defined(__UCLIBC__) |
686 | 0 | uv_once(&glibc_version_check_once, glibc_version_check); |
687 | 0 | #endif |
688 | |
|
689 | 0 | if (platform_needs_custom_semaphore) |
690 | 0 | return uv__custom_sem_init(sem, value); |
691 | 0 | else |
692 | 0 | return uv__sem_init(sem, value); |
693 | 0 | } |
694 | | |
695 | | |
696 | 0 | void uv_sem_destroy(uv_sem_t* sem) { |
697 | 0 | if (platform_needs_custom_semaphore) |
698 | 0 | uv__custom_sem_destroy(sem); |
699 | 0 | else |
700 | 0 | uv__sem_destroy(sem); |
701 | 0 | } |
702 | | |
703 | | |
704 | 0 | void uv_sem_post(uv_sem_t* sem) { |
705 | 0 | if (platform_needs_custom_semaphore) |
706 | 0 | uv__custom_sem_post(sem); |
707 | 0 | else |
708 | 0 | uv__sem_post(sem); |
709 | 0 | } |
710 | | |
711 | | |
712 | 0 | void uv_sem_wait(uv_sem_t* sem) { |
713 | 0 | if (platform_needs_custom_semaphore) |
714 | 0 | uv__custom_sem_wait(sem); |
715 | 0 | else |
716 | 0 | uv__sem_wait(sem); |
717 | 0 | } |
718 | | |
719 | | |
720 | 0 | int uv_sem_trywait(uv_sem_t* sem) { |
721 | 0 | if (platform_needs_custom_semaphore) |
722 | 0 | return uv__custom_sem_trywait(sem); |
723 | 0 | else |
724 | 0 | return uv__sem_trywait(sem); |
725 | 0 | } |
726 | | |
727 | | #endif /* defined(__APPLE__) && defined(__MACH__) */ |
728 | | |
729 | | |
730 | | #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__) |
731 | | |
732 | | int uv_cond_init(uv_cond_t* cond) { |
733 | | return UV__ERR(pthread_cond_init(cond, NULL)); |
734 | | } |
735 | | |
736 | | #else /* !(defined(__APPLE__) && defined(__MACH__)) */ |
737 | | |
738 | 0 | int uv_cond_init(uv_cond_t* cond) { |
739 | 0 | pthread_condattr_t attr; |
740 | 0 | int err; |
741 | |
|
742 | 0 | err = pthread_condattr_init(&attr); |
743 | 0 | if (err) |
744 | 0 | return UV__ERR(err); |
745 | | |
746 | 0 | #if !defined(__hpux) |
747 | 0 | err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); |
748 | 0 | if (err) |
749 | 0 | goto error2; |
750 | 0 | #endif |
751 | | |
752 | 0 | err = pthread_cond_init(cond, &attr); |
753 | 0 | if (err) |
754 | 0 | goto error2; |
755 | | |
756 | 0 | err = pthread_condattr_destroy(&attr); |
757 | 0 | if (err) |
758 | 0 | goto error; |
759 | | |
760 | 0 | return 0; |
761 | | |
762 | 0 | error: |
763 | 0 | pthread_cond_destroy(cond); |
764 | 0 | error2: |
765 | 0 | pthread_condattr_destroy(&attr); |
766 | 0 | return UV__ERR(err); |
767 | 0 | } |
768 | | |
769 | | #endif /* defined(__APPLE__) && defined(__MACH__) */ |
770 | | |
771 | 0 | void uv_cond_destroy(uv_cond_t* cond) { |
772 | | #if defined(__APPLE__) && defined(__MACH__) |
773 | | /* It has been reported that destroying condition variables that have been |
774 | | * signalled but not waited on can sometimes result in application crashes. |
775 | | * See https://codereview.chromium.org/1323293005. |
776 | | */ |
777 | | pthread_mutex_t mutex; |
778 | | struct timespec ts; |
779 | | int err; |
780 | | |
781 | | if (pthread_mutex_init(&mutex, NULL)) |
782 | | abort(); |
783 | | |
784 | | if (pthread_mutex_lock(&mutex)) |
785 | | abort(); |
786 | | |
787 | | ts.tv_sec = 0; |
788 | | ts.tv_nsec = 1; |
789 | | |
790 | | err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts); |
791 | | if (err != 0 && err != ETIMEDOUT) |
792 | | abort(); |
793 | | |
794 | | if (pthread_mutex_unlock(&mutex)) |
795 | | abort(); |
796 | | |
797 | | if (pthread_mutex_destroy(&mutex)) |
798 | | abort(); |
799 | | #endif /* defined(__APPLE__) && defined(__MACH__) */ |
800 | |
|
801 | 0 | if (pthread_cond_destroy(cond)) |
802 | 0 | abort(); |
803 | 0 | } |
804 | | |
805 | 0 | void uv_cond_signal(uv_cond_t* cond) { |
806 | 0 | if (pthread_cond_signal(cond)) |
807 | 0 | abort(); |
808 | 0 | } |
809 | | |
810 | 0 | void uv_cond_broadcast(uv_cond_t* cond) { |
811 | 0 | if (pthread_cond_broadcast(cond)) |
812 | 0 | abort(); |
813 | 0 | } |
814 | | |
815 | | #if defined(__APPLE__) && defined(__MACH__) |
816 | | |
817 | | void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { |
818 | | int r; |
819 | | |
820 | | errno = 0; |
821 | | r = pthread_cond_wait(cond, mutex); |
822 | | |
823 | | /* Workaround for a bug in OS X at least up to 13.6 |
824 | | * See https://github.com/libuv/libuv/issues/4165 |
825 | | */ |
826 | | if (r == EINVAL) |
827 | | if (errno == EBUSY) |
828 | | return; |
829 | | |
830 | | if (r) |
831 | | abort(); |
832 | | } |
833 | | |
834 | | #else /* !(defined(__APPLE__) && defined(__MACH__)) */ |
835 | | |
836 | 0 | void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { |
837 | 0 | if (pthread_cond_wait(cond, mutex)) |
838 | 0 | abort(); |
839 | 0 | } |
840 | | |
841 | | #endif |
842 | | |
843 | 0 | int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { |
844 | 0 | int r; |
845 | 0 | struct timespec ts; |
846 | | #if defined(__MVS__) |
847 | | struct timeval tv; |
848 | | #endif |
849 | |
|
850 | | #if defined(__APPLE__) && defined(__MACH__) |
851 | | ts.tv_sec = timeout / NANOSEC; |
852 | | ts.tv_nsec = timeout % NANOSEC; |
853 | | r = pthread_cond_timedwait_relative_np(cond, mutex, &ts); |
854 | | #else |
855 | | #if defined(__MVS__) |
856 | | if (gettimeofday(&tv, NULL)) |
857 | | abort(); |
858 | | timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3; |
859 | | #else |
860 | 0 | timeout += uv__hrtime(UV_CLOCK_PRECISE); |
861 | 0 | #endif |
862 | 0 | ts.tv_sec = timeout / NANOSEC; |
863 | 0 | ts.tv_nsec = timeout % NANOSEC; |
864 | 0 | r = pthread_cond_timedwait(cond, mutex, &ts); |
865 | 0 | #endif |
866 | | |
867 | |
|
868 | 0 | if (r == 0) |
869 | 0 | return 0; |
870 | | |
871 | 0 | if (r == ETIMEDOUT) |
872 | 0 | return UV_ETIMEDOUT; |
873 | | |
874 | 0 | abort(); |
875 | 0 | return UV_EINVAL; /* Satisfy the compiler. */ |
876 | 0 | } |
877 | | |
878 | | |
879 | 0 | int uv_key_create(uv_key_t* key) { |
880 | 0 | return UV__ERR(pthread_key_create(key, NULL)); |
881 | 0 | } |
882 | | |
883 | | |
884 | 0 | void uv_key_delete(uv_key_t* key) { |
885 | 0 | if (pthread_key_delete(*key)) |
886 | 0 | abort(); |
887 | 0 | } |
888 | | |
889 | | |
890 | 0 | void* uv_key_get(uv_key_t* key) { |
891 | 0 | return pthread_getspecific(*key); |
892 | 0 | } |
893 | | |
894 | | |
895 | 0 | void uv_key_set(uv_key_t* key, void* value) { |
896 | 0 | if (pthread_setspecific(*key, value)) |
897 | 0 | abort(); |
898 | 0 | } |
899 | | |
900 | | #if defined(_AIX) || defined(__MVS__) || defined(__PASE__) || \ |
901 | | defined(SUNOS_NO_PTHREAD_NAME) |
902 | | int uv__thread_setname(const char* name) { |
903 | | return UV_ENOSYS; |
904 | | } |
905 | | #elif defined(__APPLE__) |
906 | | int uv__thread_setname(const char* name) { |
907 | | char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; |
908 | | strncpy(namebuf, name, sizeof(namebuf) - 1); |
909 | | namebuf[sizeof(namebuf) - 1] = '\0'; |
910 | | int err = pthread_setname_np(namebuf); |
911 | | if (err) |
912 | | return UV__ERR(errno); |
913 | | return 0; |
914 | | } |
915 | | #elif defined(__NetBSD__) |
916 | | int uv__thread_setname(const char* name) { |
917 | | char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; |
918 | | strncpy(namebuf, name, sizeof(namebuf) - 1); |
919 | | namebuf[sizeof(namebuf) - 1] = '\0'; |
920 | | return UV__ERR(pthread_setname_np(pthread_self(), "%s", namebuf)); |
921 | | } |
922 | | #elif defined(__OpenBSD__) |
923 | | int uv__thread_setname(const char* name) { |
924 | | char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; |
925 | | strncpy(namebuf, name, sizeof(namebuf) - 1); |
926 | | namebuf[sizeof(namebuf) - 1] = '\0'; |
927 | | pthread_set_name_np(pthread_self(), namebuf); |
928 | | return 0; |
929 | | } |
930 | | #else |
931 | 0 | int uv__thread_setname(const char* name) { |
932 | 0 | char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; |
933 | 0 | strncpy(namebuf, name, sizeof(namebuf) - 1); |
934 | 0 | namebuf[sizeof(namebuf) - 1] = '\0'; |
935 | 0 | return UV__ERR(pthread_setname_np(pthread_self(), namebuf)); |
936 | 0 | } |
937 | | #endif |
938 | | |
939 | | #if (defined(__ANDROID_API__) && __ANDROID_API__ < 26) || \ |
940 | | defined(_AIX) || \ |
941 | | defined(__MVS__) || \ |
942 | | defined(__PASE__) || \ |
943 | | defined(SUNOS_NO_PTHREAD_NAME) |
944 | | int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { |
945 | | return UV_ENOSYS; |
946 | | } |
947 | | #elif defined(__OpenBSD__) |
948 | | int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { |
949 | | char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; |
950 | | pthread_get_name_np(*tid, thread_name, sizeof(thread_name)); |
951 | | strncpy(name, thread_name, size - 1); |
952 | | name[size - 1] = '\0'; |
953 | | return 0; |
954 | | } |
955 | | #elif defined(__APPLE__) |
956 | | int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { |
957 | | char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; |
958 | | if (pthread_getname_np(*tid, thread_name, sizeof(thread_name)) != 0) |
959 | | return UV__ERR(errno); |
960 | | |
961 | | strncpy(name, thread_name, size - 1); |
962 | | name[size - 1] = '\0'; |
963 | | return 0; |
964 | | } |
965 | | #else |
966 | 0 | int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { |
967 | 0 | int r; |
968 | 0 | char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; |
969 | 0 | r = pthread_getname_np(*tid, thread_name, sizeof(thread_name)); |
970 | 0 | if (r != 0) |
971 | 0 | return UV__ERR(r); |
972 | | |
973 | 0 | strncpy(name, thread_name, size - 1); |
974 | 0 | name[size - 1] = '\0'; |
975 | 0 | return 0; |
976 | 0 | } |
977 | | #endif |