Line | Count | Source (jump to first uncovered line) |
1 | | /* npth.c - a lightweight implementation of pth over pthread. |
2 | | * Copyright (C) 2011 g10 Code GmbH |
3 | | * |
4 | | * This file is part of nPth. |
5 | | * |
6 | | * nPth is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU Lesser General Public License as |
8 | | * published by the Free Software Foundation; either version 2.1 of |
9 | | * the License, or (at your option) any later version. |
10 | | * |
11 | | * nPth is distributed in the hope that it will be useful, but |
12 | | * WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
14 | | * the GNU Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this program; if not, see <https://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #ifdef HAVE_CONFIG_H |
21 | | #include <config.h> |
22 | | #endif |
23 | | |
24 | | #include <stdlib.h> |
25 | | #include <stdio.h> |
26 | | #include <string.h> |
27 | | #include <assert.h> |
28 | | #include <errno.h> |
29 | | #include <pthread.h> |
30 | | #include <fcntl.h> |
31 | | #include <sys/stat.h> |
32 | | #ifdef HAVE_LIB_DISPATCH |
33 | | # include <dispatch/dispatch.h> |
34 | | typedef dispatch_semaphore_t sem_t; |
35 | | |
36 | | /* This glue code is for macOS which does not have full implementation |
37 | | of POSIX semaphore. On macOS, using semaphore in Grand Central |
38 | | Dispatch library is better than using the partial implementation of |
39 | | POSIX semaphore where sem_init doesn't work well. |
40 | | */ |
41 | | |
42 | | static int |
43 | | sem_init (sem_t *sem, int is_shared, unsigned int value) |
44 | | { |
45 | | (void)is_shared; |
46 | | if ((*sem = dispatch_semaphore_create (value)) == NULL) |
47 | | return -1; |
48 | | else |
49 | | return 0; |
50 | | } |
51 | | |
52 | | static int |
53 | | sem_post (sem_t *sem) |
54 | | { |
55 | | dispatch_semaphore_signal (*sem); |
56 | | return 0; |
57 | | } |
58 | | |
59 | | static int |
60 | | sem_wait (sem_t *sem) |
61 | | { |
62 | | dispatch_semaphore_wait (*sem, DISPATCH_TIME_FOREVER); |
63 | | return 0; |
64 | | } |
65 | | #else |
66 | | # include <semaphore.h> |
67 | | #endif |
68 | | #ifdef HAVE_UNISTD_H |
69 | | # include <unistd.h> |
70 | | #endif |
71 | | #ifndef HAVE_PSELECT |
72 | | # include <signal.h> |
73 | | #endif |
74 | | #ifdef HAVE_POLL_H |
75 | | #include <poll.h> |
76 | | #endif |
77 | | |
78 | | #include "npth.h" |
79 | | |
80 | | |
81 | | /* The global lock that excludes all threads but one. This is a |
82 | | semaphore, because these can be safely used in a library even if |
83 | | the application or other libraries call fork(), including from a |
84 | | signal handler. sem_post is async-signal-safe. (The reason a |
85 | | semaphore is safe and a mutex is not safe is that a mutex has an |
86 | | owner, while a semaphore does not.) We init sceptre to a static |
87 | | buffer for use by sem_init; in case sem_open is used instead |
88 | | SCEPTRE will changed to the value returned by sem_open. |
89 | | GOT_SCEPTRE is a flag used for debugging to tell wether we hold |
90 | | SCEPTRE. */ |
91 | | static sem_t sceptre_buffer; |
92 | | static sem_t *sceptre = &sceptre_buffer; |
93 | | static int got_sceptre; |
94 | | |
95 | | /* Configure defines HAVE_FORK_UNSAFE_SEMAPHORE if child process can't |
96 | | access non-shared unnamed semaphore which is created by its parent. |
97 | | |
98 | | We use unnamed semaphore (if available) for the global lock. The |
99 | | specific semaphore is only valid for those threads in a process, |
100 | | and it is no use by other processes. Thus, PSHARED argument for |
101 | | sem_init is naturally 0. |
102 | | |
103 | | However, there are daemon-like applications which use fork after |
104 | | npth's initialization by npth_init. In this case, a child process |
105 | | uses the semaphore which was created by its parent process, while |
106 | | parent does nothing with the semaphore. In some system (e.g. AIX), |
107 | | access by child process to non-shared unnamed semaphore is |
108 | | prohibited. For such a system, HAVE_FORK_UNSAFE_SEMAPHORE should |
109 | | be defined, so that unnamed semaphore will be created with the |
110 | | option PSHARED=1. The purpose of the setting of PSHARED=1 is only |
111 | | for allowing the access of the lock by child process. For NPTH, it |
112 | | does not mean any other interactions between processes. |
113 | | |
114 | | */ |
115 | | #ifdef HAVE_FORK_UNSAFE_SEMAPHORE |
116 | | #define NPTH_SEMAPHORE_PSHARED 1 |
117 | | #else |
118 | 0 | #define NPTH_SEMAPHORE_PSHARED 0 |
119 | | #endif |
120 | | |
121 | | /* The main thread is the active thread at the time pth_init was |
122 | | called. As of now it is only useful for debugging. The volatile |
123 | | make sure the compiler does not eliminate this set but not used |
124 | | variable. */ |
125 | | static volatile pthread_t main_thread; |
126 | | |
127 | | /* This flag is set as soon as npth_init has been called or if any |
128 | | * thread has been created. It will never be cleared again. The only |
129 | | * purpose is to make npth_protect and npth_unprotect more robust in |
130 | | * that they can be shortcut when npth_init has not yet been called. |
131 | | * This is important for libraries which want to support nPth by using |
132 | | * those two functions but may have be initialized before pPth. */ |
133 | | static int initialized_or_any_threads; |
134 | | |
135 | | /* Systems that don't have pthread_mutex_timedlock get a busy wait |
136 | | implementation that probes the lock every BUSY_WAIT_INTERVAL |
137 | | milliseconds. */ |
138 | | #define BUSY_WAIT_INTERVAL 200 |
139 | | |
140 | | typedef int (*trylock_func_t) (void *); |
141 | | |
142 | | #ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK |
143 | | static int |
144 | | busy_wait_for (trylock_func_t trylock, void *lock, |
145 | | const struct timespec *abstime) |
146 | | { |
147 | | int err; |
148 | | |
149 | | /* This is not great, but better than nothing. Only works for locks |
150 | | which are mostly uncontested. Provides absolutely no fairness at |
151 | | all. Creates many wake-ups. */ |
152 | | while (1) |
153 | | { |
154 | | struct timespec ts; |
155 | | err = npth_clock_gettime (&ts); |
156 | | if (err < 0) |
157 | | { |
158 | | /* Just for safety make sure we return some error. */ |
159 | | err = errno ? errno : EINVAL; |
160 | | break; |
161 | | } |
162 | | |
163 | | if (npth_timercmp (abstime, &ts, <)) |
164 | | { |
165 | | err = ETIMEDOUT; |
166 | | break; |
167 | | } |
168 | | |
169 | | err = (*trylock) (lock); |
170 | | if (err != EBUSY) |
171 | | break; |
172 | | |
173 | | /* Try again after waiting a bit. We could calculate the |
174 | | maximum wait time from ts and abstime, but we don't |
175 | | bother, as our granularity is pretty fine. */ |
176 | | usleep (BUSY_WAIT_INTERVAL * 1000); |
177 | | } |
178 | | |
179 | | return err; |
180 | | } |
181 | | #endif |
182 | | |
183 | | static void |
184 | | enter_npth (void) |
185 | 0 | { |
186 | 0 | int res; |
187 | |
|
188 | 0 | got_sceptre = 0; |
189 | 0 | res = sem_post (sceptre); |
190 | 0 | assert (res == 0); |
191 | 0 | } |
192 | | |
193 | | |
194 | | static void |
195 | | leave_npth (void) |
196 | 0 | { |
197 | 0 | int res; |
198 | 0 | int save_errno = errno; |
199 | |
|
200 | 0 | do { |
201 | 0 | res = sem_wait (sceptre); |
202 | 0 | } while (res < 0 && errno == EINTR); |
203 | |
|
204 | 0 | assert (!res); |
205 | 0 | got_sceptre = 1; |
206 | 0 | errno = save_errno; |
207 | 0 | } |
208 | | |
209 | 0 | #define ENTER() enter_npth () |
210 | 0 | #define LEAVE() leave_npth () |
211 | | |
212 | | |
213 | | int |
214 | | npth_init (void) |
215 | 0 | { |
216 | 0 | int res; |
217 | |
|
218 | 0 | main_thread = pthread_self(); |
219 | | |
220 | | /* Track that we have been initialized. */ |
221 | 0 | initialized_or_any_threads |= 1; |
222 | | |
223 | | /* Better reset ERRNO so that we know that it has been set by |
224 | | sem_init. */ |
225 | 0 | errno = 0; |
226 | | |
227 | | /* The semaphore is binary. */ |
228 | 0 | res = sem_init (sceptre, NPTH_SEMAPHORE_PSHARED, 1); |
229 | | /* There are some versions of operating systems which have sem_init |
230 | | symbol defined but the call actually returns ENOSYS at runtime. |
231 | | We know this problem for older versions of AIX (<= 4.3.3) and |
232 | | macOS. For macOS, we use semaphore in Grand Central Dispatch |
233 | | library, so ENOSYS doesn't happen. We only support AIX >= 5.2, |
234 | | where sem_init is supported. |
235 | | */ |
236 | 0 | if (res < 0) |
237 | 0 | { |
238 | | /* POSIX.1-2001 defines the semaphore interface but does not |
239 | | specify the return value for success. Thus we better |
240 | | bail out on error only on a POSIX.1-2008 system. */ |
241 | 0 | #if _POSIX_C_SOURCE >= 200809L |
242 | 0 | return errno; |
243 | 0 | #endif |
244 | 0 | } |
245 | | |
246 | 0 | LEAVE(); |
247 | 0 | return 0; |
248 | 0 | } |
249 | | |
250 | | |
251 | | int |
252 | | npth_getname_np (npth_t target_thread, char *buf, size_t buflen) |
253 | 0 | { |
254 | 0 | #ifdef HAVE_PTHREAD_GETNAME_NP |
255 | 0 | return pthread_getname_np (target_thread, buf, buflen); |
256 | | #else |
257 | | (void)target_thread; |
258 | | (void)buf; |
259 | | (void)buflen; |
260 | | return ENOSYS; |
261 | | #endif |
262 | 0 | } |
263 | | |
264 | | |
265 | | int |
266 | | npth_setname_np (npth_t target_thread, const char *name) |
267 | 0 | { |
268 | 0 | #ifdef HAVE_PTHREAD_SETNAME_NP |
269 | | #ifdef __NetBSD__ |
270 | | return pthread_setname_np (target_thread, "%s", (void*) name); |
271 | | #else |
272 | | #ifdef __APPLE__ |
273 | | if (target_thread == npth_self ()) |
274 | | return pthread_setname_np (name); |
275 | | else |
276 | | return ENOTSUP; |
277 | | #else |
278 | 0 | return pthread_setname_np (target_thread, name); |
279 | 0 | #endif |
280 | 0 | #endif |
281 | | #else |
282 | | (void)target_thread; |
283 | | (void)name; |
284 | | return ENOSYS; |
285 | | #endif |
286 | 0 | } |
287 | | |
288 | | |
289 | | |
290 | | struct startup_s |
291 | | { |
292 | | void *(*start_routine) (void *); |
293 | | void *arg; |
294 | | }; |
295 | | |
296 | | |
297 | | static void * |
298 | | thread_start (void *startup_arg) |
299 | 0 | { |
300 | 0 | struct startup_s *startup = startup_arg; |
301 | 0 | void *(*start_routine) (void *); |
302 | 0 | void *arg; |
303 | 0 | void *result; |
304 | |
|
305 | 0 | start_routine = startup->start_routine; |
306 | 0 | arg = startup->arg; |
307 | 0 | free (startup); |
308 | |
|
309 | 0 | LEAVE(); |
310 | 0 | result = (*start_routine) (arg); |
311 | | /* Note: instead of returning here, we might end up in |
312 | | npth_exit() instead. */ |
313 | 0 | ENTER(); |
314 | |
|
315 | 0 | return result; |
316 | 0 | } |
317 | | |
318 | | |
319 | | int |
320 | | npth_create (npth_t *thread, const npth_attr_t *attr, |
321 | | void *(*start_routine) (void *), void *arg) |
322 | 0 | { |
323 | 0 | int err; |
324 | 0 | struct startup_s *startup; |
325 | |
|
326 | 0 | startup = malloc (sizeof (*startup)); |
327 | 0 | if (!startup) |
328 | 0 | return errno; |
329 | | |
330 | 0 | initialized_or_any_threads |= 2; |
331 | |
|
332 | 0 | startup->start_routine = start_routine; |
333 | 0 | startup->arg = arg; |
334 | 0 | err = pthread_create (thread, attr, thread_start, startup); |
335 | 0 | if (err) |
336 | 0 | { |
337 | 0 | free (startup); |
338 | 0 | return err; |
339 | 0 | } |
340 | | |
341 | | /* Memory is released in thread_start. */ |
342 | 0 | return 0; |
343 | 0 | } |
344 | | |
345 | | |
346 | | int |
347 | | npth_join (npth_t thread, void **retval) |
348 | 0 | { |
349 | 0 | int err; |
350 | |
|
351 | 0 | #ifdef HAVE_PTHREAD_TRYJOIN_NP |
352 | | /* No need to allow competing threads to enter when we can get the |
353 | | lock immediately. pthread_tryjoin_np is a GNU extension. */ |
354 | 0 | err = pthread_tryjoin_np (thread, retval); |
355 | 0 | if (err != EBUSY) |
356 | 0 | return err; |
357 | 0 | #endif /*HAVE_PTHREAD_TRYJOIN_NP*/ |
358 | | |
359 | 0 | ENTER(); |
360 | 0 | err = pthread_join (thread, retval); |
361 | 0 | LEAVE(); |
362 | 0 | return err; |
363 | 0 | } |
364 | | |
365 | | |
366 | | void |
367 | | npth_exit (void *retval) |
368 | 0 | { |
369 | 0 | ENTER(); |
370 | 0 | pthread_exit (retval); |
371 | | /* Never reached. But just in case pthread_exit does return... */ |
372 | 0 | LEAVE(); |
373 | 0 | } |
374 | | |
375 | | |
376 | | int |
377 | | npth_mutex_lock (npth_mutex_t *mutex) |
378 | 0 | { |
379 | 0 | int err; |
380 | | |
381 | | /* No need to allow competing threads to enter when we can get the |
382 | | lock immediately. */ |
383 | 0 | err = pthread_mutex_trylock (mutex); |
384 | 0 | if (err != EBUSY) |
385 | 0 | return err; |
386 | | |
387 | 0 | ENTER(); |
388 | 0 | err = pthread_mutex_lock (mutex); |
389 | 0 | LEAVE(); |
390 | 0 | return err; |
391 | 0 | } |
392 | | |
393 | | |
394 | | int |
395 | | npth_mutex_timedlock (npth_mutex_t *mutex, const struct timespec *abstime) |
396 | 0 | { |
397 | 0 | int err; |
398 | | |
399 | | /* No need to allow competing threads to enter when we can get the |
400 | | lock immediately. */ |
401 | 0 | err = pthread_mutex_trylock (mutex); |
402 | 0 | if (err != EBUSY) |
403 | 0 | return err; |
404 | | |
405 | 0 | ENTER(); |
406 | 0 | #if HAVE_PTHREAD_MUTEX_TIMEDLOCK |
407 | 0 | err = pthread_mutex_timedlock (mutex, abstime); |
408 | | #else |
409 | | err = busy_wait_for ((trylock_func_t) pthread_mutex_trylock, mutex, abstime); |
410 | | #endif |
411 | 0 | LEAVE(); |
412 | 0 | return err; |
413 | 0 | } |
414 | | |
415 | | |
416 | | #ifndef _NPTH_NO_RWLOCK |
417 | | int |
418 | | npth_rwlock_rdlock (npth_rwlock_t *rwlock) |
419 | 0 | { |
420 | 0 | int err; |
421 | |
|
422 | 0 | #ifdef HAVE_PTHREAD_RWLOCK_TRYRDLOCK |
423 | | /* No need to allow competing threads to enter when we can get the |
424 | | lock immediately. */ |
425 | 0 | err = pthread_rwlock_tryrdlock (rwlock); |
426 | 0 | if (err != EBUSY) |
427 | 0 | return err; |
428 | 0 | #endif |
429 | | |
430 | 0 | ENTER(); |
431 | 0 | err = pthread_rwlock_rdlock (rwlock); |
432 | 0 | LEAVE(); |
433 | 0 | return err; |
434 | 0 | } |
435 | | |
436 | | |
437 | | int |
438 | | npth_rwlock_timedrdlock (npth_rwlock_t *rwlock, const struct timespec *abstime) |
439 | 0 | { |
440 | 0 | int err; |
441 | |
|
442 | 0 | #ifdef HAVE_PTHREAD_RWLOCK_TRYRDLOCK |
443 | | /* No need to allow competing threads to enter when we can get the |
444 | | lock immediately. */ |
445 | 0 | err = pthread_rwlock_tryrdlock (rwlock); |
446 | 0 | if (err != EBUSY) |
447 | 0 | return err; |
448 | 0 | #endif |
449 | | |
450 | 0 | ENTER(); |
451 | 0 | #if HAVE_PTHREAD_RWLOCK_TIMEDRDLOCK |
452 | 0 | err = pthread_rwlock_timedrdlock (rwlock, abstime); |
453 | | #else |
454 | | err = busy_wait_for ((trylock_func_t) pthread_rwlock_tryrdlock, rwlock, |
455 | | abstime); |
456 | | #endif |
457 | 0 | LEAVE(); |
458 | 0 | return err; |
459 | 0 | } |
460 | | |
461 | | |
462 | | int |
463 | | npth_rwlock_wrlock (npth_rwlock_t *rwlock) |
464 | 0 | { |
465 | 0 | int err; |
466 | |
|
467 | 0 | #ifdef HAVE_PTHREAD_RWLOCK_TRYWRLOCK |
468 | | /* No need to allow competing threads to enter when we can get the |
469 | | lock immediately. */ |
470 | 0 | err = pthread_rwlock_trywrlock (rwlock); |
471 | 0 | if (err != EBUSY) |
472 | 0 | return err; |
473 | 0 | #endif |
474 | | |
475 | 0 | ENTER(); |
476 | 0 | err = pthread_rwlock_wrlock (rwlock); |
477 | 0 | LEAVE(); |
478 | 0 | return err; |
479 | 0 | } |
480 | | |
481 | | |
482 | | int |
483 | | npth_rwlock_timedwrlock (npth_rwlock_t *rwlock, const struct timespec *abstime) |
484 | 0 | { |
485 | 0 | int err; |
486 | |
|
487 | 0 | #ifdef HAVE_PTHREAD_RWLOCK_TRYWRLOCK |
488 | | /* No need to allow competing threads to enter when we can get the |
489 | | lock immediately. */ |
490 | 0 | err = pthread_rwlock_trywrlock (rwlock); |
491 | 0 | if (err != EBUSY) |
492 | 0 | return err; |
493 | 0 | #endif |
494 | | |
495 | 0 | ENTER(); |
496 | 0 | #if HAVE_PTHREAD_RWLOCK_TIMEDWRLOCK |
497 | 0 | err = pthread_rwlock_timedwrlock (rwlock, abstime); |
498 | | #elif HAVE_PTHREAD_RWLOCK_TRYRDLOCK |
499 | | err = busy_wait_for ((trylock_func_t) pthread_rwlock_trywrlock, rwlock, |
500 | | abstime); |
501 | | #else |
502 | | err = ENOSYS; |
503 | | #endif |
504 | 0 | LEAVE(); |
505 | 0 | return err; |
506 | 0 | } |
507 | | #endif |
508 | | |
509 | | |
510 | | int |
511 | | npth_cond_wait (npth_cond_t *cond, npth_mutex_t *mutex) |
512 | 0 | { |
513 | 0 | int err; |
514 | |
|
515 | 0 | ENTER(); |
516 | 0 | err = pthread_cond_wait (cond, mutex); |
517 | 0 | LEAVE(); |
518 | 0 | return err; |
519 | 0 | } |
520 | | |
521 | | |
522 | | int |
523 | | npth_cond_timedwait (npth_cond_t *cond, npth_mutex_t *mutex, |
524 | | const struct timespec *abstime) |
525 | 0 | { |
526 | 0 | int err; |
527 | |
|
528 | 0 | ENTER(); |
529 | 0 | err = pthread_cond_timedwait (cond, mutex, abstime); |
530 | 0 | LEAVE(); |
531 | 0 | return err; |
532 | 0 | } |
533 | | |
534 | | |
535 | | /* Standard POSIX Replacement API */ |
536 | | |
537 | | int |
538 | | npth_usleep(unsigned int usec) |
539 | 0 | { |
540 | 0 | int res; |
541 | |
|
542 | 0 | ENTER(); |
543 | 0 | res = usleep(usec); |
544 | 0 | LEAVE(); |
545 | 0 | return res; |
546 | 0 | } |
547 | | |
548 | | |
549 | | unsigned int |
550 | | npth_sleep(unsigned int sec) |
551 | 0 | { |
552 | 0 | unsigned res; |
553 | |
|
554 | 0 | ENTER(); |
555 | 0 | res = sleep(sec); |
556 | 0 | LEAVE(); |
557 | 0 | return res; |
558 | 0 | } |
559 | | |
560 | | |
561 | | int |
562 | | npth_system(const char *cmd) |
563 | 0 | { |
564 | 0 | int res; |
565 | |
|
566 | 0 | ENTER(); |
567 | 0 | res = system(cmd); |
568 | 0 | LEAVE(); |
569 | 0 | return res; |
570 | 0 | } |
571 | | |
572 | | |
573 | | pid_t |
574 | | npth_waitpid(pid_t pid, int *status, int options) |
575 | 0 | { |
576 | 0 | pid_t res; |
577 | |
|
578 | 0 | ENTER(); |
579 | 0 | res = waitpid(pid,status, options); |
580 | 0 | LEAVE(); |
581 | 0 | return res; |
582 | 0 | } |
583 | | |
584 | | |
585 | | int |
586 | | npth_connect(int s, const struct sockaddr *addr, socklen_t addrlen) |
587 | 0 | { |
588 | 0 | int res; |
589 | |
|
590 | 0 | ENTER(); |
591 | 0 | res = connect(s, addr, addrlen); |
592 | 0 | LEAVE(); |
593 | 0 | return res; |
594 | 0 | } |
595 | | |
596 | | |
597 | | int |
598 | | npth_accept(int s, struct sockaddr *addr, socklen_t *addrlen) |
599 | 0 | { |
600 | 0 | int res; |
601 | |
|
602 | 0 | ENTER(); |
603 | 0 | res = accept(s, addr, addrlen); |
604 | 0 | LEAVE(); |
605 | 0 | return res; |
606 | 0 | } |
607 | | |
608 | | |
609 | | int |
610 | | npth_select(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds, |
611 | | struct timeval *timeout) |
612 | 0 | { |
613 | 0 | int res; |
614 | |
|
615 | 0 | ENTER(); |
616 | 0 | res = select(nfd, rfds, wfds, efds, timeout); |
617 | 0 | LEAVE(); |
618 | 0 | return res; |
619 | 0 | } |
620 | | |
621 | | |
622 | | int |
623 | | npth_pselect(int nfd, fd_set *rfds, fd_set *wfds, fd_set *efds, |
624 | | const struct timespec *timeout, const sigset_t *sigmask) |
625 | 0 | { |
626 | 0 | int res; |
627 | |
|
628 | 0 | ENTER(); |
629 | 0 | #ifdef HAVE_PSELECT |
630 | 0 | res = pselect (nfd, rfds, wfds, efds, timeout, sigmask); |
631 | | #else /*!HAVE_PSELECT*/ |
632 | | { |
633 | | /* A better emulation of pselect would be to create a pipe, wait |
634 | | in the select for one end and have a signal handler write to |
635 | | the other end. However, this is non-trivial to implement and |
636 | | thus we only print a compile time warning. */ |
637 | | # ifdef __GNUC__ |
638 | | # warning Using a non race free pselect emulation. |
639 | | # endif |
640 | | |
641 | | struct timeval t, *tp; |
642 | | |
643 | | tp = NULL; |
644 | | if (!timeout) |
645 | | ; |
646 | | else if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) |
647 | | { |
648 | | t.tv_sec = timeout->tv_sec; |
649 | | t.tv_usec = (timeout->tv_nsec + 999) / 1000; |
650 | | tp = &t; |
651 | | } |
652 | | else |
653 | | { |
654 | | errno = EINVAL; |
655 | | res = -1; |
656 | | goto leave; |
657 | | } |
658 | | |
659 | | if (sigmask) |
660 | | { |
661 | | int save_errno; |
662 | | sigset_t savemask; |
663 | | |
664 | | pthread_sigmask (SIG_SETMASK, sigmask, &savemask); |
665 | | res = select (nfd, rfds, wfds, efds, tp); |
666 | | save_errno = errno; |
667 | | pthread_sigmask (SIG_SETMASK, &savemask, NULL); |
668 | | errno = save_errno; |
669 | | } |
670 | | else |
671 | | res = select (nfd, rfds, wfds, efds, tp); |
672 | | |
673 | | leave: |
674 | | ; |
675 | | } |
676 | | #endif /*!HAVE_PSELECT*/ |
677 | 0 | LEAVE(); |
678 | 0 | return res; |
679 | 0 | } |
680 | | |
681 | | |
682 | | int |
683 | | npth_poll (struct pollfd *fds, unsigned long nfds, int timeout) |
684 | 0 | { |
685 | 0 | int res; |
686 | |
|
687 | 0 | ENTER(); |
688 | 0 | res = poll (fds, (nfds_t)nfds, timeout); |
689 | 0 | LEAVE(); |
690 | 0 | return res; |
691 | 0 | } |
692 | | |
693 | | |
694 | | int |
695 | | npth_ppoll (struct pollfd *fds, unsigned long nfds, |
696 | | const struct timespec *timeout, const sigset_t *sigmask) |
697 | 0 | { |
698 | 0 | int res; |
699 | |
|
700 | 0 | ENTER(); |
701 | 0 | #ifdef HAVE_PPOLL |
702 | 0 | res = ppoll (fds, (nfds_t)nfds, timeout, sigmask); |
703 | | #else /*!HAVE_PPOLL*/ |
704 | | { |
705 | | # ifdef __GNUC__ |
706 | | # warning Using a non race free ppoll emulation. |
707 | | # endif |
708 | | |
709 | | int t; |
710 | | |
711 | | if (!timeout) |
712 | | t = -1; |
713 | | else if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) |
714 | | t = timeout->tv_sec * 1000 + (timeout->tv_nsec + 999999) / 1000000; |
715 | | else |
716 | | { |
717 | | errno = EINVAL; |
718 | | res = -1; |
719 | | goto leave; |
720 | | } |
721 | | |
722 | | if (sigmask) |
723 | | { |
724 | | int save_errno; |
725 | | sigset_t savemask; |
726 | | |
727 | | pthread_sigmask (SIG_SETMASK, sigmask, &savemask); |
728 | | res = poll (fds, (nfds_t)nfds, t); |
729 | | save_errno = errno; |
730 | | pthread_sigmask (SIG_SETMASK, &savemask, NULL); |
731 | | errno = save_errno; |
732 | | } |
733 | | else |
734 | | res = poll (fds, (nfds_t)nfds, t); |
735 | | |
736 | | leave: |
737 | | ; |
738 | | } |
739 | | #endif |
740 | 0 | LEAVE(); |
741 | 0 | return res; |
742 | 0 | } |
743 | | |
744 | | |
745 | | ssize_t |
746 | | npth_read(int fd, void *buf, size_t nbytes) |
747 | 0 | { |
748 | 0 | ssize_t res; |
749 | |
|
750 | 0 | ENTER(); |
751 | 0 | res = read(fd, buf, nbytes); |
752 | 0 | LEAVE(); |
753 | 0 | return res; |
754 | 0 | } |
755 | | |
756 | | |
757 | | ssize_t |
758 | | npth_write(int fd, const void *buf, size_t nbytes) |
759 | 0 | { |
760 | 0 | ssize_t res; |
761 | |
|
762 | 0 | ENTER(); |
763 | 0 | res = write(fd, buf, nbytes); |
764 | 0 | LEAVE(); |
765 | 0 | return res; |
766 | 0 | } |
767 | | |
768 | | |
769 | | int |
770 | | npth_recvmsg (int fd, struct msghdr *msg, int flags) |
771 | 0 | { |
772 | 0 | int res; |
773 | |
|
774 | 0 | ENTER(); |
775 | 0 | res = recvmsg (fd, msg, flags); |
776 | 0 | LEAVE(); |
777 | 0 | return res; |
778 | 0 | } |
779 | | |
780 | | |
781 | | int |
782 | | npth_sendmsg (int fd, const struct msghdr *msg, int flags) |
783 | 0 | { |
784 | 0 | int res; |
785 | |
|
786 | 0 | ENTER(); |
787 | 0 | res = sendmsg (fd, msg, flags); |
788 | 0 | LEAVE(); |
789 | 0 | return res; |
790 | 0 | } |
791 | | |
792 | | |
793 | | void |
794 | | npth_unprotect (void) |
795 | 0 | { |
796 | | /* If we are not initialized we may not access the semaphore and |
797 | | * thus we shortcut it. Note that in this case the unprotect/protect |
798 | | * is not needed. For failsafe reasons if an nPth thread has ever |
799 | | * been created but nPth has accidentally not initialized we do not |
800 | | * shortcut so that a stack backtrace (due to the access of the |
801 | | * uninitialized semaphore) is more expressive. */ |
802 | 0 | if (initialized_or_any_threads) |
803 | 0 | ENTER(); |
804 | 0 | } |
805 | | |
806 | | |
807 | | void |
808 | | npth_protect (void) |
809 | 0 | { |
810 | | /* See npth_unprotect for commentary. */ |
811 | 0 | if (initialized_or_any_threads) |
812 | 0 | LEAVE(); |
813 | 0 | } |
814 | | |
815 | | |
816 | | int |
817 | | npth_is_protected (void) |
818 | 0 | { |
819 | 0 | return got_sceptre; |
820 | 0 | } |
821 | | |
822 | | |
823 | | int |
824 | | npth_clock_gettime (struct timespec *ts) |
825 | 0 | { |
826 | 0 | #if defined(CLOCK_REALTIME) && HAVE_CLOCK_GETTIME |
827 | 0 | return clock_gettime (CLOCK_REALTIME, ts); |
828 | | #elif HAVE_GETTIMEOFDAY |
829 | | { |
830 | | struct timeval tv; |
831 | | |
832 | | if (gettimeofday (&tv, NULL)) |
833 | | return -1; |
834 | | ts->tv_sec = tv.tv_sec; |
835 | | ts->tv_nsec = tv.tv_usec * 1000; |
836 | | return 0; |
837 | | } |
838 | | #else |
839 | | /* FIXME: fall back on time() with seconds resolution. */ |
840 | | # error clock_gettime not available - please provide a fallback. |
841 | | #endif |
842 | 0 | } |