/src/tarantool/third_party/libev/ev.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * libev event processing core, watcher management  | 
3  |  |  *  | 
4  |  |  * Copyright (c) 2007-2020 Marc Alexander Lehmann <libev@schmorp.de>  | 
5  |  |  * All rights reserved.  | 
6  |  |  *  | 
7  |  |  * Redistribution and use in source and binary forms, with or without modifica-  | 
8  |  |  * tion, are permitted provided that the following conditions are met:  | 
9  |  |  *  | 
10  |  |  *   1.  Redistributions of source code must retain the above copyright notice,  | 
11  |  |  *       this list of conditions and the following disclaimer.  | 
12  |  |  *  | 
13  |  |  *   2.  Redistributions in binary form must reproduce the above copyright  | 
14  |  |  *       notice, this list of conditions and the following disclaimer in the  | 
15  |  |  *       documentation and/or other materials provided with the distribution.  | 
16  |  |  *  | 
17  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED  | 
18  |  |  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-  | 
19  |  |  * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO  | 
20  |  |  * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-  | 
21  |  |  * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,  | 
22  |  |  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;  | 
23  |  |  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,  | 
24  |  |  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-  | 
25  |  |  * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED  | 
26  |  |  * OF THE POSSIBILITY OF SUCH DAMAGE.  | 
27  |  |  *  | 
28  |  |  * Alternatively, the contents of this file may be used under the terms of  | 
29  |  |  * the GNU General Public License ("GPL") version 2 or any later version, | 
30  |  |  * in which case the provisions of the GPL are applicable instead of  | 
31  |  |  * the above. If you wish to allow the use of your version of this file  | 
32  |  |  * only under the terms of the GPL and not to allow others to use your  | 
33  |  |  * version of this file under the BSD license, indicate your decision  | 
34  |  |  * by deleting the provisions above and replace them with the notice  | 
35  |  |  * and other provisions required by the GPL. If you do not delete the  | 
36  |  |  * provisions above, a recipient may use your version of this file under  | 
37  |  |  * either the BSD or the GPL.  | 
38  |  |  */  | 
39  |  |  | 
40  |  | /* this big block deduces configuration from config.h */  | 
41  |  | #ifndef EV_STANDALONE  | 
42  |  | # ifdef EV_CONFIG_H  | 
43  |  | #  include EV_CONFIG_H  | 
44  |  | # else  | 
45  |  | #  include "config.h"  | 
46  |  | # endif  | 
47  |  |  | 
48  |  | # if HAVE_FLOOR  | 
49  |  | #  ifndef EV_USE_FLOOR  | 
50  |  | #   define EV_USE_FLOOR 1  | 
51  |  | #  endif  | 
52  |  | # endif  | 
53  |  |  | 
54  |  | # if HAVE_CLOCK_SYSCALL  | 
55  |  | #  ifndef EV_USE_CLOCK_SYSCALL  | 
56  |  | #   define EV_USE_CLOCK_SYSCALL 1  | 
57  |  | #   ifndef EV_USE_REALTIME  | 
58  |  | #    define EV_USE_REALTIME  0  | 
59  |  | #   endif  | 
60  |  | #   ifndef EV_USE_MONOTONIC  | 
61  |  | #    define EV_USE_MONOTONIC 1  | 
62  |  | #   endif  | 
63  |  | #  endif  | 
64  |  | # elif !defined EV_USE_CLOCK_SYSCALL  | 
65  |  | #  define EV_USE_CLOCK_SYSCALL 0  | 
66  |  | # endif  | 
67  |  |  | 
68  |  | # if HAVE_CLOCK_GETTIME  | 
69  |  | #  ifndef EV_USE_MONOTONIC  | 
70  |  | #   define EV_USE_MONOTONIC 1  | 
71  |  | #  endif  | 
72  |  | #  ifndef EV_USE_REALTIME  | 
73  |  | #   define EV_USE_REALTIME  0  | 
74  |  | #  endif  | 
75  |  | # else  | 
76  |  | #  ifndef EV_USE_MONOTONIC  | 
77  |  | #   define EV_USE_MONOTONIC 0  | 
78  |  | #  endif  | 
79  |  | #  ifndef EV_USE_REALTIME  | 
80  |  | #   define EV_USE_REALTIME  0  | 
81  |  | #  endif  | 
82  |  | # endif  | 
83  |  |  | 
84  |  | # if HAVE_NANOSLEEP  | 
85  |  | #  ifndef EV_USE_NANOSLEEP  | 
86  |  | #    define EV_USE_NANOSLEEP EV_FEATURE_OS  | 
87  |  | #  endif  | 
88  |  | # else  | 
89  |  | #   undef EV_USE_NANOSLEEP  | 
90  |  | #   define EV_USE_NANOSLEEP 0  | 
91  |  | # endif  | 
92  |  |  | 
93  |  | # if HAVE_SELECT && HAVE_SYS_SELECT_H  | 
94  |  | #  ifndef EV_USE_SELECT  | 
95  |  | #   define EV_USE_SELECT EV_FEATURE_BACKENDS  | 
96  |  | #  endif  | 
97  |  | # else  | 
98  |  | #  undef EV_USE_SELECT  | 
99  |  | #  define EV_USE_SELECT 0  | 
100  |  | # endif  | 
101  |  |  | 
102  |  | # if HAVE_POLL && HAVE_POLL_H  | 
103  |  | #  ifndef EV_USE_POLL  | 
104  |  | #   define EV_USE_POLL EV_FEATURE_BACKENDS  | 
105  |  | #  endif  | 
106  |  | # else  | 
107  |  | #  undef EV_USE_POLL  | 
108  |  | #  define EV_USE_POLL 0  | 
109  |  | # endif  | 
110  |  |    | 
111  |  | # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H  | 
112  |  | #  ifndef EV_USE_EPOLL  | 
113  |  | #   define EV_USE_EPOLL EV_FEATURE_BACKENDS  | 
114  |  | #  endif  | 
115  |  | # else  | 
116  |  | #  undef EV_USE_EPOLL  | 
117  |  | #  define EV_USE_EPOLL 0  | 
118  |  | # endif  | 
119  |  |  | 
120  |  | # if HAVE_LINUX_AIO_ABI_H  | 
121  |  | #  ifndef EV_USE_LINUXAIO  | 
122  |  | #   define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */  | 
123  |  | #  endif  | 
124  |  | # else  | 
125  |  | #  undef EV_USE_LINUXAIO  | 
126  |  | #  define EV_USE_LINUXAIO 0  | 
127  |  | # endif  | 
128  |  |  | 
129  |  | # if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T  | 
130  |  | #  ifndef EV_USE_IOURING  | 
131  |  | #   define EV_USE_IOURING EV_FEATURE_BACKENDS  | 
132  |  | #  endif  | 
133  |  | # else  | 
134  |  | #  undef EV_USE_IOURING  | 
135  |  | #  define EV_USE_IOURING 0  | 
136  |  | # endif  | 
137  |  |  | 
138  |  | # if HAVE_KQUEUE && HAVE_SYS_EVENT_H  | 
139  |  | #  ifndef EV_USE_KQUEUE  | 
140  |  | #   define EV_USE_KQUEUE EV_FEATURE_BACKENDS  | 
141  |  | #  endif  | 
142  |  | # else  | 
143  |  | #  undef EV_USE_KQUEUE  | 
144  |  | #  define EV_USE_KQUEUE 0  | 
145  |  | # endif  | 
146  |  |    | 
147  |  | # if HAVE_PORT_H && HAVE_PORT_CREATE  | 
148  |  | #  ifndef EV_USE_PORT  | 
149  |  | #   define EV_USE_PORT EV_FEATURE_BACKENDS  | 
150  |  | #  endif  | 
151  |  | # else  | 
152  |  | #  undef EV_USE_PORT  | 
153  |  | #  define EV_USE_PORT 0  | 
154  |  | # endif  | 
155  |  |  | 
156  |  | # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H  | 
157  |  | #  ifndef EV_USE_INOTIFY  | 
158  |  | #   define EV_USE_INOTIFY EV_FEATURE_OS  | 
159  |  | #  endif  | 
160  |  | # else  | 
161  |  | #  undef EV_USE_INOTIFY  | 
162  |  | #  define EV_USE_INOTIFY 0  | 
163  |  | # endif  | 
164  |  |  | 
165  |  | # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H  | 
166  |  | #  ifndef EV_USE_SIGNALFD  | 
167  |  | #   define EV_USE_SIGNALFD EV_FEATURE_OS  | 
168  |  | #  endif  | 
169  |  | # else  | 
170  |  | #  undef EV_USE_SIGNALFD  | 
171  |  | #  define EV_USE_SIGNALFD 0  | 
172  |  | # endif  | 
173  |  |  | 
174  |  | # if HAVE_EVENTFD  | 
175  |  | #  ifndef EV_USE_EVENTFD  | 
176  |  | #   define EV_USE_EVENTFD EV_FEATURE_OS  | 
177  |  | #  endif  | 
178  |  | # else  | 
179  |  | #  undef EV_USE_EVENTFD  | 
180  |  | #  define EV_USE_EVENTFD 0  | 
181  |  | # endif  | 
182  |  |  | 
183  |  | # if HAVE_SYS_TIMERFD_H  | 
184  |  | #  ifndef EV_USE_TIMERFD  | 
185  |  | #   define EV_USE_TIMERFD EV_FEATURE_OS  | 
186  |  | #  endif  | 
187  |  | # else  | 
188  |  | #  undef EV_USE_TIMERFD  | 
189  |  | #  define EV_USE_TIMERFD 0  | 
190  |  | # endif  | 
191  |  |  | 
192  |  | #endif  | 
193  |  |  | 
194  |  | /* OS X, in its infinite idiocy, actually HARDCODES  | 
195  |  |  * a limit of 1024 into their select. Where people have brains,  | 
196  |  |  * OS X engineers apparently have a vacuum. Or maybe they were  | 
197  |  |  * ordered to have a vacuum, or they do anything for money.  | 
198  |  |  * This might help. Or not.  | 
199  |  |  * Note that this must be defined early, as other include files  | 
200  |  |  * will rely on this define as well.  | 
201  |  |  */  | 
202  |  | #define _DARWIN_UNLIMITED_SELECT 1  | 
203  |  |  | 
204  |  | #include <stdlib.h>  | 
205  |  | #include <string.h>  | 
206  |  | #include <fcntl.h>  | 
207  |  | #include <stddef.h>  | 
208  |  |  | 
209  |  | #include <stdio.h>  | 
210  |  |  | 
211  |  | #include <assert.h>  | 
212  |  | #include <errno.h>  | 
213  |  | #include <sys/types.h>  | 
214  |  | #include <time.h>  | 
215  |  | #include <limits.h>  | 
216  |  |  | 
217  |  | #include <signal.h>  | 
218  |  |  | 
219  |  | #ifdef EV_H  | 
220  |  | # include EV_H  | 
221  |  | #else  | 
222  |  | # include "ev.h"  | 
223  |  | #endif  | 
224  |  |  | 
225  |  | #if EV_NO_THREADS  | 
226  |  | # undef EV_NO_SMP  | 
227  |  | # define EV_NO_SMP 1  | 
228  |  | # undef ECB_NO_THREADS  | 
229  |  | # define ECB_NO_THREADS 1  | 
230  |  | #endif  | 
231  |  | #if EV_NO_SMP  | 
232  |  | # undef EV_NO_SMP  | 
233  |  | # define ECB_NO_SMP 1  | 
234  |  | #endif  | 
235  |  |  | 
236  |  | #ifndef _WIN32  | 
237  |  | # include <sys/time.h>  | 
238  |  | # include <sys/wait.h>  | 
239  |  | # include <unistd.h>  | 
240  |  | #else  | 
241  |  | # include <io.h>  | 
242  |  | # define WIN32_LEAN_AND_MEAN  | 
243  |  | # include <winsock2.h>  | 
244  |  | # include <windows.h>  | 
245  |  | # ifndef EV_SELECT_IS_WINSOCKET  | 
246  |  | #  define EV_SELECT_IS_WINSOCKET 1  | 
247  |  | # endif  | 
248  |  | # undef EV_AVOID_STDIO  | 
249  |  | #endif  | 
250  |  |  | 
251  |  | /* this block tries to deduce configuration from header-defined symbols and defaults */  | 
252  |  |  | 
253  |  | /* try to deduce the maximum number of signals on this platform */  | 
254  |  | #if defined EV_NSIG  | 
255  |  | /* use what's provided */  | 
256  |  | #elif defined NSIG  | 
257  | 0  | # define EV_NSIG (NSIG)  | 
258  |  | #elif defined _NSIG  | 
259  |  | # define EV_NSIG (_NSIG)  | 
260  |  | #elif defined SIGMAX  | 
261  |  | # define EV_NSIG (SIGMAX+1)  | 
262  |  | #elif defined SIG_MAX  | 
263  |  | # define EV_NSIG (SIG_MAX+1)  | 
264  |  | #elif defined _SIG_MAX  | 
265  |  | # define EV_NSIG (_SIG_MAX+1)  | 
266  |  | #elif defined MAXSIG  | 
267  |  | # define EV_NSIG (MAXSIG+1)  | 
268  |  | #elif defined MAX_SIG  | 
269  |  | # define EV_NSIG (MAX_SIG+1)  | 
270  |  | #elif defined SIGARRAYSIZE  | 
271  |  | # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */  | 
272  |  | #elif defined _sys_nsig  | 
273  |  | # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */  | 
274  |  | #else  | 
275  |  | # define EV_NSIG (8 * sizeof (sigset_t) + 1)  | 
276  |  | #endif  | 
277  |  |  | 
278  |  | #ifndef EV_USE_FLOOR  | 
279  |  | # define EV_USE_FLOOR 0  | 
280  |  | #endif  | 
281  |  |  | 
282  |  | #ifndef EV_USE_CLOCK_SYSCALL  | 
283  |  | # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17  | 
284  |  | #  define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS  | 
285  |  | # else  | 
286  |  | #  define EV_USE_CLOCK_SYSCALL 0  | 
287  |  | # endif  | 
288  |  | #endif  | 
289  |  |  | 
290  |  | #if !(_POSIX_TIMERS > 0)  | 
291  |  | # ifndef EV_USE_MONOTONIC  | 
292  |  | #  define EV_USE_MONOTONIC 0  | 
293  |  | # endif  | 
294  |  | # ifndef EV_USE_REALTIME  | 
295  |  | #  define EV_USE_REALTIME 0  | 
296  |  | # endif  | 
297  |  | #endif  | 
298  |  |  | 
299  |  | #ifndef EV_USE_MONOTONIC  | 
300  |  | # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0  | 
301  |  | #  define EV_USE_MONOTONIC EV_FEATURE_OS  | 
302  |  | # else  | 
303  |  | #  define EV_USE_MONOTONIC 0  | 
304  |  | # endif  | 
305  |  | #endif  | 
306  |  |  | 
307  |  | #ifndef EV_USE_REALTIME  | 
308  |  | # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL  | 
309  |  | #endif  | 
310  |  |  | 
311  |  | #ifndef EV_USE_NANOSLEEP  | 
312  |  | # if _POSIX_C_SOURCE >= 199309L  | 
313  |  | #  define EV_USE_NANOSLEEP EV_FEATURE_OS  | 
314  |  | # else  | 
315  |  | #  define EV_USE_NANOSLEEP 0  | 
316  |  | # endif  | 
317  |  | #endif  | 
318  |  |  | 
319  |  | #ifndef EV_USE_SELECT  | 
320  |  | # define EV_USE_SELECT EV_FEATURE_BACKENDS  | 
321  |  | #endif  | 
322  |  |  | 
323  |  | #ifndef EV_USE_POLL  | 
324  |  | # ifdef _WIN32  | 
325  |  | #  define EV_USE_POLL 0  | 
326  |  | # else  | 
327  |  | #  define EV_USE_POLL EV_FEATURE_BACKENDS  | 
328  |  | # endif  | 
329  |  | #endif  | 
330  |  |  | 
331  |  | #ifndef EV_USE_EPOLL  | 
332  |  | # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))  | 
333  | 1.67k  | #  define EV_USE_EPOLL EV_FEATURE_BACKENDS  | 
334  |  | # else  | 
335  |  | #  define EV_USE_EPOLL 0  | 
336  |  | # endif  | 
337  |  | #endif  | 
338  |  |  | 
339  |  | #ifndef EV_USE_KQUEUE  | 
340  | 1.67k  | # define EV_USE_KQUEUE 0  | 
341  |  | #endif  | 
342  |  |  | 
343  |  | #ifndef EV_USE_PORT  | 
344  | 1.67k  | # define EV_USE_PORT 0  | 
345  |  | #endif  | 
346  |  |  | 
347  |  | #ifndef EV_USE_LINUXAIO  | 
348  |  | # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */  | 
349  | 1.67k  | #  define EV_USE_LINUXAIO 0 /* was: 1, always off by default */  | 
350  |  | # else  | 
351  |  | #  define EV_USE_LINUXAIO 0  | 
352  |  | # endif  | 
353  |  | #endif  | 
354  |  |  | 
355  |  | #ifndef EV_USE_IOURING  | 
356  |  | # if __linux /* later checks might disable again */  | 
357  | 3.34k  | #  define EV_USE_IOURING 1  | 
358  |  | # else  | 
359  |  | #  define EV_USE_IOURING 0  | 
360  |  | # endif  | 
361  |  | #endif  | 
362  |  |  | 
363  |  | #ifndef EV_USE_INOTIFY  | 
364  |  | # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))  | 
365  |  | #  define EV_USE_INOTIFY EV_FEATURE_OS  | 
366  |  | # else  | 
367  |  | #  define EV_USE_INOTIFY 0  | 
368  |  | # endif  | 
369  |  | #endif  | 
370  |  |  | 
371  |  | #ifndef EV_PID_HASHSIZE  | 
372  | 0  | # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1  | 
373  |  | #endif  | 
374  |  |  | 
375  |  | #ifndef EV_INOTIFY_HASHSIZE  | 
376  | 0  | # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1  | 
377  |  | #endif  | 
378  |  |  | 
379  |  | #ifndef EV_USE_EVENTFD  | 
380  |  | # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))  | 
381  |  | #  define EV_USE_EVENTFD EV_FEATURE_OS  | 
382  |  | # else  | 
383  |  | #  define EV_USE_EVENTFD 0  | 
384  |  | # endif  | 
385  |  | #endif  | 
386  |  |  | 
387  |  | #ifndef EV_USE_SIGNALFD  | 
388  |  | # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))  | 
389  |  | #  define EV_USE_SIGNALFD EV_FEATURE_OS  | 
390  |  | # else  | 
391  |  | #  define EV_USE_SIGNALFD 0  | 
392  |  | # endif  | 
393  |  | #endif  | 
394  |  |  | 
395  |  | #ifndef EV_USE_TIMERFD  | 
396  |  | # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))  | 
397  |  | #  define EV_USE_TIMERFD EV_FEATURE_OS  | 
398  |  | # else  | 
399  |  | #  define EV_USE_TIMERFD 0  | 
400  |  | # endif  | 
401  |  | #endif  | 
402  |  |  | 
403  |  | #if 0 /* debugging */  | 
404  |  | # define EV_VERIFY 3  | 
405  |  | # define EV_USE_4HEAP 1  | 
406  |  | # define EV_HEAP_CACHE_AT 1  | 
407  |  | #endif  | 
408  |  |  | 
409  |  | #ifndef EV_VERIFY  | 
410  |  | # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)  | 
411  |  | #endif  | 
412  |  |  | 
413  |  | #ifndef EV_USE_4HEAP  | 
414  |  | # define EV_USE_4HEAP EV_FEATURE_DATA  | 
415  |  | #endif  | 
416  |  |  | 
417  |  | #ifndef EV_HEAP_CACHE_AT  | 
418  |  | # define EV_HEAP_CACHE_AT EV_FEATURE_DATA  | 
419  |  | #endif  | 
420  |  |  | 
421  |  | #ifdef __ANDROID__  | 
422  |  | /* supposedly, android doesn't typedef fd_mask */  | 
423  |  | # undef EV_USE_SELECT  | 
424  |  | # define EV_USE_SELECT 0  | 
425  |  | /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */  | 
426  |  | # undef EV_USE_CLOCK_SYSCALL  | 
427  |  | # define EV_USE_CLOCK_SYSCALL 0  | 
428  |  | #endif  | 
429  |  |  | 
430  |  | /* aix's poll.h seems to cause lots of trouble */  | 
431  |  | #ifdef _AIX  | 
432  |  | /* AIX has a completely broken poll.h header */  | 
433  |  | # undef EV_USE_POLL  | 
434  |  | # define EV_USE_POLL 0  | 
435  |  | #endif  | 
436  |  |  | 
437  |  | /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */  | 
438  |  | /* which makes programs even slower. might work on other unices, too. */  | 
439  |  | #if EV_USE_CLOCK_SYSCALL  | 
440  |  | # include <sys/syscall.h>  | 
441  |  | # ifdef SYS_clock_gettime  | 
442  |  | #  define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))  | 
443  |  | #  undef EV_USE_MONOTONIC  | 
444  |  | #  define EV_USE_MONOTONIC 1  | 
445  |  | #  define EV_NEED_SYSCALL 1  | 
446  |  | # else  | 
447  |  | #  undef EV_USE_CLOCK_SYSCALL  | 
448  |  | #  define EV_USE_CLOCK_SYSCALL 0  | 
449  |  | # endif  | 
450  |  | #endif  | 
451  |  |  | 
452  |  | /* this block fixes any misconfiguration where we know we run into trouble otherwise */  | 
453  |  |  | 
454  |  | #ifndef CLOCK_MONOTONIC  | 
455  |  | # undef EV_USE_MONOTONIC  | 
456  |  | # define EV_USE_MONOTONIC 0  | 
457  |  | #endif  | 
458  |  |  | 
459  |  | #ifndef CLOCK_REALTIME  | 
460  |  | # undef EV_USE_REALTIME  | 
461  |  | # define EV_USE_REALTIME 0  | 
462  |  | #endif  | 
463  |  |  | 
464  |  | #if !EV_STAT_ENABLE  | 
465  |  | # undef EV_USE_INOTIFY  | 
466  |  | # define EV_USE_INOTIFY 0  | 
467  |  | #endif  | 
468  |  |  | 
469  |  | #if __linux && EV_USE_IOURING  | 
470  |  | # include <linux/version.h>  | 
471  |  | # if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)  | 
472  |  | #  undef EV_USE_IOURING  | 
473  |  | #  define EV_USE_IOURING 0  | 
474  |  | # endif  | 
475  |  | #endif  | 
476  |  |  | 
477  |  | #if !EV_USE_NANOSLEEP  | 
478  |  | /* hp-ux has it in sys/time.h, which we unconditionally include above */  | 
479  |  | # if !defined _WIN32 && !defined __hpux  | 
480  |  | #  include <sys/select.h>  | 
481  |  | # endif  | 
482  |  | #endif  | 
483  |  |  | 
484  |  | #if EV_USE_LINUXAIO  | 
485  |  | # include <sys/syscall.h>  | 
486  |  | # if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */  | 
487  |  | #  define EV_NEED_SYSCALL 1  | 
488  |  | # else  | 
489  |  | #  undef EV_USE_LINUXAIO  | 
490  |  | #  define EV_USE_LINUXAIO 0  | 
491  |  | # endif  | 
492  |  | #endif  | 
493  |  |  | 
494  |  | #if EV_USE_IOURING  | 
495  |  | # include <sys/syscall.h>  | 
496  |  | # if !SYS_io_uring_register && __linux && !__alpha  | 
497  |  | #  define SYS_io_uring_setup    425  | 
498  |  | #  define SYS_io_uring_enter    426  | 
499  |  | #  define SYS_io_uring_register 427  | 
500  |  | # endif  | 
501  |  | # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */  | 
502  |  | #  define EV_NEED_SYSCALL 1  | 
503  |  | # else  | 
504  |  | #  undef EV_USE_IOURING  | 
505  |  | #  define EV_USE_IOURING 0  | 
506  |  | # endif  | 
507  |  | #endif  | 
508  |  |  | 
509  |  | #if EV_USE_INOTIFY  | 
510  |  | # include <sys/statfs.h>  | 
511  |  | # include <sys/inotify.h>  | 
512  |  | /* some very old inotify.h headers don't have IN_DONT_FOLLOW */  | 
513  |  | # ifndef IN_DONT_FOLLOW  | 
514  |  | #  undef EV_USE_INOTIFY  | 
515  |  | #  define EV_USE_INOTIFY 0  | 
516  |  | # endif  | 
517  |  | #endif  | 
518  |  |  | 
519  |  | #if EV_USE_EVENTFD  | 
520  |  | /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */  | 
521  |  | # include <stdint.h>  | 
522  |  | # ifndef EFD_NONBLOCK  | 
523  | 1.67k  | #  define EFD_NONBLOCK O_NONBLOCK  | 
524  |  | # endif  | 
525  |  | # ifndef EFD_CLOEXEC  | 
526  |  | #  ifdef O_CLOEXEC  | 
527  | 1.67k  | #   define EFD_CLOEXEC O_CLOEXEC  | 
528  |  | #  else  | 
529  |  | #   define EFD_CLOEXEC 02000000  | 
530  |  | #  endif  | 
531  |  | # endif  | 
532  |  | EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);  | 
533  |  | #endif  | 
534  |  |  | 
535  |  | #if EV_USE_SIGNALFD  | 
536  |  | /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */  | 
537  |  | # include <stdint.h>  | 
538  |  | # ifndef SFD_NONBLOCK  | 
539  | 0  | #  define SFD_NONBLOCK O_NONBLOCK  | 
540  |  | # endif  | 
541  |  | # ifndef SFD_CLOEXEC  | 
542  |  | #  ifdef O_CLOEXEC  | 
543  | 0  | #   define SFD_CLOEXEC O_CLOEXEC  | 
544  |  | #  else  | 
545  |  | #   define SFD_CLOEXEC 02000000  | 
546  |  | #  endif  | 
547  |  | # endif  | 
548  |  | EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);  | 
549  |  |  | 
550  |  | struct signalfd_siginfo  | 
551  |  | { | 
552  |  |   uint32_t ssi_signo;  | 
553  |  |   char pad[128 - sizeof (uint32_t)];  | 
554  |  | };  | 
555  |  | #endif  | 
556  |  |  | 
557  |  | /* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */  | 
558  |  | #if EV_USE_TIMERFD  | 
559  |  | # include <sys/timerfd.h>  | 
560  |  | /* timerfd is only used for periodics */  | 
561  |  | # if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE  | 
562  |  | #  undef EV_USE_TIMERFD  | 
563  |  | #  define EV_USE_TIMERFD 0  | 
564  |  | # endif  | 
565  |  | #endif  | 
566  |  |  | 
567  |  | /*****************************************************************************/  | 
568  |  |  | 
569  |  | #if EV_VERIFY >= 3  | 
570  |  | # define EV_FREQUENT_CHECK ev_verify (EV_A)  | 
571  |  | #else  | 
572  | 10.0k  | # define EV_FREQUENT_CHECK do { } while (0) | 
573  |  | #endif  | 
574  |  |  | 
575  |  | /*  | 
576  |  |  * This is used to work around floating point rounding problems.  | 
577  |  |  * This value is good at least till the year 4000.  | 
578  |  |  */  | 
579  | 0  | #define MIN_INTERVAL  0.0001220703125 /* 1/2**13, good till 4000 */  | 
580  |  | /*#define MIN_INTERVAL  0.00000095367431640625 /* 1/2**20, good till 2200 */  | 
581  |  |  | 
582  |  | #define MIN_TIMEJUMP   1. /* minimum timejump that gets detected (if monotonic clock available) */  | 
583  |  | #define MAX_BLOCKTIME  59.743 /* never wait longer than this time (to detect time jumps) */  | 
584  | 0  | #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */  | 
585  |  |  | 
586  |  | /* find a portable timestamp that is "always" in the future but fits into time_t.  | 
587  |  |  * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,  | 
588  |  |  * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */  | 
589  |  | #define EV_TSTAMP_HUGE \  | 
590  | 0  |   (sizeof (time_t) >= 8     ? 10000000000000.  \  | 
591  | 0  |    : 0 < (time_t)4294967295 ?     4294967295.  \  | 
592  | 0  |    :                              2147483647.) \  | 
593  |  |  | 
594  |  | #ifndef EV_TS_CONST  | 
595  | 1.67k  | # define EV_TS_CONST(nv) nv  | 
596  | 0  | # define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999  | 
597  |  | # define EV_TS_FROM_USEC(us) us * 1e-6  | 
598  | 0  | # define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) | 
599  | 0  | # define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) | 
600  | 0  | # define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)  | 
601  | 3.34k  | # define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)  | 
602  |  | #endif  | 
603  |  |  | 
604  |  | /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */  | 
605  |  | /* ECB.H BEGIN */  | 
606  |  | /*  | 
607  |  |  * libecb - http://software.schmorp.de/pkg/libecb  | 
608  |  |  *  | 
609  |  |  * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>  | 
610  |  |  * Copyright (©) 2011 Emanuele Giaquinta  | 
611  |  |  * All rights reserved.  | 
612  |  |  *  | 
613  |  |  * Redistribution and use in source and binary forms, with or without modifica-  | 
614  |  |  * tion, are permitted provided that the following conditions are met:  | 
615  |  |  *  | 
616  |  |  *   1.  Redistributions of source code must retain the above copyright notice,  | 
617  |  |  *       this list of conditions and the following disclaimer.  | 
618  |  |  *  | 
619  |  |  *   2.  Redistributions in binary form must reproduce the above copyright  | 
620  |  |  *       notice, this list of conditions and the following disclaimer in the  | 
621  |  |  *       documentation and/or other materials provided with the distribution.  | 
622  |  |  *  | 
623  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED  | 
624  |  |  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-  | 
625  |  |  * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO  | 
626  |  |  * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-  | 
627  |  |  * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,  | 
628  |  |  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;  | 
629  |  |  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,  | 
630  |  |  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-  | 
631  |  |  * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED  | 
632  |  |  * OF THE POSSIBILITY OF SUCH DAMAGE.  | 
633  |  |  *  | 
634  |  |  * Alternatively, the contents of this file may be used under the terms of  | 
635  |  |  * the GNU General Public License ("GPL") version 2 or any later version, | 
636  |  |  * in which case the provisions of the GPL are applicable instead of  | 
637  |  |  * the above. If you wish to allow the use of your version of this file  | 
638  |  |  * only under the terms of the GPL and not to allow others to use your  | 
639  |  |  * version of this file under the BSD license, indicate your decision  | 
640  |  |  * by deleting the provisions above and replace them with the notice  | 
641  |  |  * and other provisions required by the GPL. If you do not delete the  | 
642  |  |  * provisions above, a recipient may use your version of this file under  | 
643  |  |  * either the BSD or the GPL.  | 
644  |  |  */  | 
645  |  |  | 
646  |  | #ifndef ECB_H  | 
647  |  | #define ECB_H  | 
648  |  |  | 
649  |  | /* 16 bits major, 16 bits minor */  | 
650  |  | #define ECB_VERSION 0x00010008  | 
651  |  |  | 
652  |  | #include <string.h> /* for memcpy */  | 
653  |  |  | 
654  |  | #if defined (_WIN32) && !defined (__MINGW32__)  | 
655  |  |   typedef   signed char   int8_t;  | 
656  |  |   typedef unsigned char  uint8_t;  | 
657  |  |   typedef   signed char   int_fast8_t;  | 
658  |  |   typedef unsigned char  uint_fast8_t;  | 
659  |  |   typedef   signed short  int16_t;  | 
660  |  |   typedef unsigned short uint16_t;  | 
661  |  |   typedef   signed int    int_fast16_t;  | 
662  |  |   typedef unsigned int   uint_fast16_t;  | 
663  |  |   typedef   signed int    int32_t;  | 
664  |  |   typedef unsigned int   uint32_t;  | 
665  |  |   typedef   signed int    int_fast32_t;  | 
666  |  |   typedef unsigned int   uint_fast32_t;  | 
667  |  |   #if __GNUC__  | 
668  |  |     typedef   signed long long int64_t;  | 
669  |  |     typedef unsigned long long uint64_t;  | 
670  |  |   #else /* _MSC_VER || __BORLANDC__ */  | 
671  |  |     typedef   signed __int64   int64_t;  | 
672  |  |     typedef unsigned __int64   uint64_t;  | 
673  |  |   #endif  | 
674  |  |   typedef  int64_t  int_fast64_t;  | 
675  |  |   typedef uint64_t uint_fast64_t;  | 
676  |  |   #ifdef _WIN64  | 
677  |  |     #define ECB_PTRSIZE 8  | 
678  |  |     typedef uint64_t uintptr_t;  | 
679  |  |     typedef  int64_t  intptr_t;  | 
680  |  |   #else  | 
681  |  |     #define ECB_PTRSIZE 4  | 
682  |  |     typedef uint32_t uintptr_t;  | 
683  |  |     typedef  int32_t  intptr_t;  | 
684  |  |   #endif  | 
685  |  | #else  | 
686  |  |   #include <inttypes.h>  | 
687  |  |   #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU  | 
688  |  |     #define ECB_PTRSIZE 8  | 
689  |  |   #else  | 
690  |  |     #define ECB_PTRSIZE 4  | 
691  |  |   #endif  | 
692  |  | #endif  | 
693  |  |  | 
694  |  | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)  | 
695  |  | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)  | 
696  |  |  | 
697  |  | #ifndef ECB_OPTIMIZE_SIZE  | 
698  |  |   #if __OPTIMIZE_SIZE__  | 
699  |  |     #define ECB_OPTIMIZE_SIZE 1  | 
700  |  |   #else  | 
701  |  |     #define ECB_OPTIMIZE_SIZE 0  | 
702  |  |   #endif  | 
703  |  | #endif  | 
704  |  |  | 
705  |  | /* work around x32 idiocy by defining proper macros */  | 
706  |  | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64  | 
707  |  |   #if _ILP32  | 
708  |  |     #define ECB_AMD64_X32 1  | 
709  |  |   #else  | 
710  |  |     #define ECB_AMD64 1  | 
711  |  |   #endif  | 
712  |  | #endif  | 
713  |  |  | 
714  |  | /* many compilers define _GNUC_ to some versions but then only implement  | 
715  |  |  * what their idiot authors think are the "more important" extensions,  | 
716  |  |  * causing enormous grief in return for some better fake benchmark numbers.  | 
717  |  |  * or so.  | 
718  |  |  * we try to detect these and simply assume they are not gcc - if they have  | 
719  |  |  * an issue with that they should have done it right in the first place.  | 
720  |  |  */  | 
721  |  | #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__  | 
722  |  |   #define ECB_GCC_VERSION(major,minor) 0  | 
723  |  | #else  | 
724  |  |   #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))  | 
725  |  | #endif  | 
726  |  |  | 
727  |  | #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))  | 
728  |  |  | 
729  |  | #if __clang__ && defined __has_builtin  | 
730  |  |   #define ECB_CLANG_BUILTIN(x) __has_builtin (x)  | 
731  |  | #else  | 
732  |  |   #define ECB_CLANG_BUILTIN(x) 0  | 
733  |  | #endif  | 
734  |  |  | 
735  |  | #if __clang__ && defined __has_extension  | 
736  |  |   #define ECB_CLANG_EXTENSION(x) __has_extension (x)  | 
737  |  | #else  | 
738  |  |   #define ECB_CLANG_EXTENSION(x) 0  | 
739  |  | #endif  | 
740  |  |  | 
741  |  | #define ECB_CPP   (__cplusplus+0)  | 
742  |  | #define ECB_CPP11 (__cplusplus >= 201103L)  | 
743  |  | #define ECB_CPP14 (__cplusplus >= 201402L)  | 
744  |  | #define ECB_CPP17 (__cplusplus >= 201703L)  | 
745  |  |  | 
746  |  | #if ECB_CPP  | 
747  |  |   #define ECB_C            0  | 
748  |  |   #define ECB_STDC_VERSION 0  | 
749  |  | #else  | 
750  |  |   #define ECB_C            1  | 
751  |  |   #define ECB_STDC_VERSION __STDC_VERSION__  | 
752  |  | #endif  | 
753  |  |  | 
754  |  | #define ECB_C99   (ECB_STDC_VERSION >= 199901L)  | 
755  |  | #define ECB_C11   (ECB_STDC_VERSION >= 201112L)  | 
756  |  | #define ECB_C17   (ECB_STDC_VERSION >= 201710L)  | 
757  |  |  | 
758  |  | #if ECB_CPP  | 
759  |  |   #define ECB_EXTERN_C extern "C"  | 
760  |  |   #define ECB_EXTERN_C_BEG ECB_EXTERN_C { | 
761  |  |   #define ECB_EXTERN_C_END }  | 
762  |  | #else  | 
763  |  |   #define ECB_EXTERN_C extern  | 
764  |  |   #define ECB_EXTERN_C_BEG  | 
765  |  |   #define ECB_EXTERN_C_END  | 
766  |  | #endif  | 
767  |  |  | 
768  |  | /*****************************************************************************/  | 
769  |  |  | 
770  |  | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */  | 
771  |  | /* ECB_NO_SMP     - ecb might be used in multiple threads, but only on a single cpu */  | 
772  |  |  | 
773  |  | #if ECB_NO_THREADS  | 
774  |  |   #define ECB_NO_SMP 1  | 
775  |  | #endif  | 
776  |  |  | 
777  |  | #if ECB_NO_SMP  | 
778  |  |   #define ECB_MEMORY_FENCE do { } while (0) | 
779  |  | #endif  | 
780  |  |  | 
781  |  | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */  | 
782  |  | #if __xlC__ && ECB_CPP  | 
783  |  |   #include <builtins.h>  | 
784  |  | #endif  | 
785  |  |  | 
786  |  | #if 1400 <= _MSC_VER  | 
787  |  |   #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */  | 
788  |  | #endif  | 
789  |  |  | 
790  |  | #ifndef ECB_MEMORY_FENCE  | 
791  |  |   #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110  | 
792  |  |     #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") | 
793  |  |     #if __i386 || __i386__  | 
794  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") | 
795  |  |       #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ (""                        : : : "memory") | 
796  |  |       #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ (""                        : : : "memory") | 
797  |  |     #elif ECB_GCC_AMD64  | 
798  | 18  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mfence"   : : : "memory") | 
799  | 9  |       #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ (""         : : : "memory") | 
800  | 1.68k  |       #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ (""         : : : "memory") | 
801  |  |     #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__  | 
802  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("sync"     : : : "memory") | 
803  |  |     #elif defined __ARM_ARCH_2__ \  | 
804  |  |       || defined __ARM_ARCH_3__  || defined __ARM_ARCH_3M__  \  | 
805  |  |       || defined __ARM_ARCH_4__  || defined __ARM_ARCH_4T__  \  | 
806  |  |       || defined __ARM_ARCH_5__  || defined __ARM_ARCH_5E__  \  | 
807  |  |       || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \  | 
808  |  |       || defined __ARM_ARCH_5TEJ__  | 
809  |  |       /* should not need any, unless running old code on newer cpu - arm doesn't support that */  | 
810  |  |     #elif defined __ARM_ARCH_6__  || defined __ARM_ARCH_6J__  \  | 
811  |  |        || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \  | 
812  |  |        || defined __ARM_ARCH_6T2__  | 
813  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") | 
814  |  |     #elif defined __ARM_ARCH_7__  || defined __ARM_ARCH_7A__  \  | 
815  |  |        || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__  | 
816  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("dmb"      : : : "memory") | 
817  |  |     #elif __aarch64__  | 
818  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("dmb ish"  : : : "memory") | 
819  |  |     #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)  | 
820  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") | 
821  |  |       #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad"                            : : : "memory") | 
822  |  |       #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore             | #StoreStore") | 
823  |  |     #elif defined __s390__ || defined __s390x__  | 
824  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("bcr 15,0" : : : "memory") | 
825  |  |     #elif defined __mips__  | 
826  |  |       /* GNU/Linux emulates sync on mips1 architectures, so we force its use */  | 
827  |  |       /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */  | 
828  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") | 
829  |  |     #elif defined __alpha__  | 
830  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mb"       : : : "memory") | 
831  |  |     #elif defined __hppa__  | 
832  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ (""         : : : "memory") | 
833  |  |       #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") | 
834  |  |     #elif defined __ia64__  | 
835  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mf"       : : : "memory") | 
836  |  |     #elif defined __m68k__  | 
837  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ (""         : : : "memory") | 
838  |  |     #elif defined __m88k__  | 
839  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") | 
840  |  |     #elif defined __sh__  | 
841  |  |       #define ECB_MEMORY_FENCE         __asm__ __volatile__ (""         : : : "memory") | 
842  |  |     #endif  | 
843  |  |   #endif  | 
844  |  | #endif  | 
845  |  |  | 
846  |  | #ifndef ECB_MEMORY_FENCE  | 
847  |  |   #if ECB_GCC_VERSION(4,7)  | 
848  |  |     /* see comment below (stdatomic.h) about the C11 memory model. */  | 
849  |  |     #define ECB_MEMORY_FENCE         __atomic_thread_fence (__ATOMIC_SEQ_CST)  | 
850  |  |     #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)  | 
851  |  |     #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)  | 
852  |  |     #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)  | 
853  |  |  | 
854  |  |   #elif ECB_CLANG_EXTENSION(c_atomic)  | 
855  |  |     /* see comment below (stdatomic.h) about the C11 memory model. */  | 
856  |  |     #define ECB_MEMORY_FENCE         __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)  | 
857  |  |     #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)  | 
858  |  |     #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)  | 
859  |  |     #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)  | 
860  |  |  | 
861  |  |   #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__  | 
862  |  |     #define ECB_MEMORY_FENCE         __sync_synchronize ()  | 
863  |  |   #elif _MSC_VER >= 1500 /* VC++ 2008 */  | 
864  |  |     /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */  | 
865  |  |     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)  | 
866  |  |     #define ECB_MEMORY_FENCE         _ReadWriteBarrier (); MemoryBarrier()  | 
867  |  |     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */  | 
868  |  |     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()  | 
869  |  |   #elif _MSC_VER >= 1400 /* VC++ 2005 */  | 
870  |  |     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)  | 
871  |  |     #define ECB_MEMORY_FENCE         _ReadWriteBarrier ()  | 
872  |  |     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */  | 
873  |  |     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()  | 
874  |  |   #elif defined _WIN32  | 
875  |  |     #include <WinNT.h>  | 
876  |  |     #define ECB_MEMORY_FENCE         MemoryBarrier () /* actually just xchg on x86... scary */  | 
877  |  |   #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110  | 
878  |  |     #include <mbarrier.h>  | 
879  |  |     #define ECB_MEMORY_FENCE         __machine_rw_barrier  ()  | 
880  |  |     #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()  | 
881  |  |     #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()  | 
882  |  |     #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()  | 
883  |  |   #elif __xlC__  | 
884  |  |     #define ECB_MEMORY_FENCE         __sync ()  | 
885  |  |   #endif  | 
886  |  | #endif  | 
887  |  |  | 
888  |  | #ifndef ECB_MEMORY_FENCE  | 
889  |  |   #if ECB_C11 && !defined __STDC_NO_ATOMICS__  | 
890  |  |     /* we assume that these memory fences work on all variables/all memory accesses, */  | 
891  |  |     /* not just C11 atomics and atomic accesses */  | 
892  |  |     #include <stdatomic.h>  | 
893  |  |     #define ECB_MEMORY_FENCE         atomic_thread_fence (memory_order_seq_cst)  | 
894  |  |     #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)  | 
895  |  |     #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)  | 
896  |  |   #endif  | 
897  |  | #endif  | 
898  |  |  | 
899  |  | #ifndef ECB_MEMORY_FENCE  | 
900  |  |   #if !ECB_AVOID_PTHREADS  | 
901  |  |     /*  | 
902  |  |      * if you get undefined symbol references to pthread_mutex_lock,  | 
903  |  |      * or failure to find pthread.h, then you should implement  | 
904  |  |      * the ECB_MEMORY_FENCE operations for your cpu/compiler  | 
905  |  |      * OR provide pthread.h and link against the posix thread library  | 
906  |  |      * of your system.  | 
907  |  |      */  | 
908  |  |     #include <pthread.h>  | 
909  |  |     #define ECB_NEEDS_PTHREADS 1  | 
910  |  |     #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1  | 
911  |  |  | 
912  |  |     static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;  | 
913  |  |     #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) | 
914  |  |   #endif  | 
915  |  | #endif  | 
916  |  |  | 
917  |  | #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE  | 
918  |  |   #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE  | 
919  |  | #endif  | 
920  |  |  | 
921  |  | #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE  | 
922  |  |   #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE  | 
923  |  | #endif  | 
924  |  |  | 
925  |  | #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE  | 
926  |  |   #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */  | 
927  |  | #endif  | 
928  |  |  | 
929  |  | /*****************************************************************************/  | 
930  |  |  | 
931  |  | #if ECB_CPP  | 
932  |  |   #define ecb_inline static inline  | 
933  |  | #elif ECB_GCC_VERSION(2,5)  | 
934  |  |   #define ecb_inline static __inline__  | 
935  |  | #elif ECB_C99  | 
936  |  |   #define ecb_inline static inline  | 
937  |  | #else  | 
938  |  |   #define ecb_inline static  | 
939  |  | #endif  | 
940  |  |  | 
941  |  | #if ECB_GCC_VERSION(3,3)  | 
942  |  |   #define ecb_restrict __restrict__  | 
943  |  | #elif ECB_C99  | 
944  |  |   #define ecb_restrict restrict  | 
945  |  | #else  | 
946  |  |   #define ecb_restrict  | 
947  |  | #endif  | 
948  |  |  | 
949  |  | typedef int ecb_bool;  | 
950  |  |  | 
951  |  | #define ECB_CONCAT_(a, b) a ## b  | 
952  |  | #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)  | 
953  |  | #define ECB_STRINGIFY_(a) # a  | 
954  |  | #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)  | 
955  |  | #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))  | 
956  |  |  | 
957  |  | #define ecb_function_ ecb_inline  | 
958  |  |  | 
959  |  | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)  | 
960  | 3.34k  |   #define ecb_attribute(attrlist)        __attribute__ (attrlist)  | 
961  |  | #else  | 
962  |  |   #define ecb_attribute(attrlist)  | 
963  |  | #endif  | 
964  |  |  | 
965  |  | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)  | 
966  |  |   #define ecb_is_constant(expr)          __builtin_constant_p (expr)  | 
967  |  | #else  | 
968  |  |   /* possible C11 impl for integral types  | 
969  |  |   typedef struct ecb_is_constant_struct ecb_is_constant_struct;  | 
970  |  |   #define ecb_is_constant(expr)          _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */  | 
971  |  |  | 
972  |  |   #define ecb_is_constant(expr)          0  | 
973  |  | #endif  | 
974  |  |  | 
975  |  | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)  | 
976  | 16.6k  |   #define ecb_expect(expr,value)         __builtin_expect ((expr),(value))  | 
977  |  | #else  | 
978  |  |   #define ecb_expect(expr,value)         (expr)  | 
979  |  | #endif  | 
980  |  |  | 
981  |  | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)  | 
982  |  |   #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)  | 
983  |  | #else  | 
984  |  |   #define ecb_prefetch(addr,rw,locality)  | 
985  |  | #endif  | 
986  |  |  | 
987  |  | /* no emulation for ecb_decltype */  | 
988  |  | #if ECB_CPP11  | 
989  |  |   // older implementations might have problems with decltype(x)::type, work around it  | 
990  |  |   template<class T> struct ecb_decltype_t { typedef T type; }; | 
991  |  |   #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type  | 
992  |  | #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)  | 
993  |  |   #define ecb_decltype(x) __typeof__ (x)  | 
994  |  | #endif  | 
995  |  |  | 
996  |  | #if _MSC_VER >= 1300  | 
997  |  |   #define ecb_deprecated __declspec (deprecated)  | 
998  |  | #else  | 
999  |  |   #define ecb_deprecated ecb_attribute ((__deprecated__))  | 
1000  |  | #endif  | 
1001  |  |  | 
1002  |  | #if _MSC_VER >= 1500  | 
1003  |  |   #define ecb_deprecated_message(msg) __declspec (deprecated (msg))  | 
1004  |  | #elif ECB_GCC_VERSION(4,5)  | 
1005  |  |   #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))  | 
1006  |  | #else  | 
1007  |  |   #define ecb_deprecated_message(msg) ecb_deprecated  | 
1008  |  | #endif  | 
1009  |  |  | 
1010  |  | #if _MSC_VER >= 1400  | 
1011  |  |   #define ecb_noinline __declspec (noinline)  | 
1012  |  | #else  | 
1013  |  |   #define ecb_noinline ecb_attribute ((__noinline__))  | 
1014  |  | #endif  | 
1015  |  |  | 
1016  | 3.34k  | #define ecb_unused     ecb_attribute ((__unused__))  | 
1017  |  | #define ecb_const      ecb_attribute ((__const__))  | 
1018  |  | #define ecb_pure       ecb_attribute ((__pure__))  | 
1019  |  |  | 
1020  |  | #if ECB_C11 || __IBMC_NORETURN  | 
1021  |  |   /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */  | 
1022  |  |   #define ecb_noreturn   _Noreturn  | 
1023  |  | #elif ECB_CPP11  | 
1024  |  |   #define ecb_noreturn   [[noreturn]]  | 
1025  |  | #elif _MSC_VER >= 1200  | 
1026  |  |   /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */  | 
1027  |  |   #define ecb_noreturn   __declspec (noreturn)  | 
1028  |  | #else  | 
1029  |  |   #define ecb_noreturn   ecb_attribute ((__noreturn__))  | 
1030  |  | #endif  | 
1031  |  |  | 
1032  |  | #if ECB_GCC_VERSION(4,3)  | 
1033  |  |   #define ecb_artificial ecb_attribute ((__artificial__))  | 
1034  |  |   #define ecb_hot        ecb_attribute ((__hot__))  | 
1035  |  |   #define ecb_cold       ecb_attribute ((__cold__))  | 
1036  |  | #else  | 
1037  |  |   #define ecb_artificial  | 
1038  |  |   #define ecb_hot  | 
1039  |  |   #define ecb_cold  | 
1040  |  | #endif  | 
1041  |  |  | 
1042  |  | /* put around conditional expressions if you are very sure that the  */  | 
1043  |  | /* expression is mostly true or mostly false. note that these return */  | 
1044  |  | /* booleans, not the expression.                                     */  | 
1045  | 10.0k  | #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)  | 
1046  | 6.68k  | #define ecb_expect_true(expr)  ecb_expect (!!(expr), 1)  | 
1047  |  | /* for compatibility to the rest of the world */  | 
1048  |  | #define ecb_likely(expr)   ecb_expect_true  (expr)  | 
1049  |  | #define ecb_unlikely(expr) ecb_expect_false (expr)  | 
1050  |  |  | 
1051  |  | /* count trailing zero bits and count # of one bits */  | 
1052  |  | #if ECB_GCC_VERSION(3,4) \  | 
1053  |  |     || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \  | 
1054  |  |         && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \  | 
1055  |  |         && ECB_CLANG_BUILTIN(__builtin_popcount))  | 
1056  |  |   /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */  | 
1057  |  |   #define ecb_ld32(x)      (__builtin_clz      (x) ^ 31)  | 
1058  |  |   #define ecb_ld64(x)      (__builtin_clzll    (x) ^ 63)  | 
1059  |  |   #define ecb_ctz32(x)      __builtin_ctz      (x)  | 
1060  |  |   #define ecb_ctz64(x)      __builtin_ctzll    (x)  | 
1061  |  |   #define ecb_popcount32(x) __builtin_popcount (x)  | 
1062  |  |   /* no popcountll */  | 
1063  |  | #else  | 
1064  |  |   ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);  | 
1065  |  |   ecb_function_ ecb_const int  | 
1066  |  |   ecb_ctz32 (uint32_t x)  | 
1067  |  |   { | 
1068  |  | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)  | 
1069  |  |     unsigned long r;  | 
1070  |  |     _BitScanForward (&r, x);  | 
1071  |  |     return (int)r;  | 
1072  |  | #else  | 
1073  |  |     int r = 0;  | 
1074  |  |  | 
1075  |  |     x &= ~x + 1; /* this isolates the lowest bit */  | 
1076  |  |  | 
1077  |  | #if ECB_branchless_on_i386  | 
1078  |  |     r += !!(x & 0xaaaaaaaa) << 0;  | 
1079  |  |     r += !!(x & 0xcccccccc) << 1;  | 
1080  |  |     r += !!(x & 0xf0f0f0f0) << 2;  | 
1081  |  |     r += !!(x & 0xff00ff00) << 3;  | 
1082  |  |     r += !!(x & 0xffff0000) << 4;  | 
1083  |  | #else  | 
1084  |  |     if (x & 0xaaaaaaaa) r +=  1;  | 
1085  |  |     if (x & 0xcccccccc) r +=  2;  | 
1086  |  |     if (x & 0xf0f0f0f0) r +=  4;  | 
1087  |  |     if (x & 0xff00ff00) r +=  8;  | 
1088  |  |     if (x & 0xffff0000) r += 16;  | 
1089  |  | #endif  | 
1090  |  |  | 
1091  |  |     return r;  | 
1092  |  | #endif  | 
1093  |  |   }  | 
1094  |  |  | 
1095  |  |   ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);  | 
1096  |  |   ecb_function_ ecb_const int  | 
1097  |  |   ecb_ctz64 (uint64_t x)  | 
1098  |  |   { | 
1099  |  | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)  | 
1100  |  |     unsigned long r;  | 
1101  |  |     _BitScanForward64 (&r, x);  | 
1102  |  |     return (int)r;  | 
1103  |  | #else  | 
1104  |  |     int shift = x & 0xffffffff ? 0 : 32;  | 
1105  |  |     return ecb_ctz32 (x >> shift) + shift;  | 
1106  |  | #endif  | 
1107  |  |   }  | 
1108  |  |  | 
1109  |  |   ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);  | 
1110  |  |   ecb_function_ ecb_const int  | 
1111  |  |   ecb_popcount32 (uint32_t x)  | 
1112  |  |   { | 
1113  |  |     x -=  (x >> 1) & 0x55555555;  | 
1114  |  |     x  = ((x >> 2) & 0x33333333) + (x & 0x33333333);  | 
1115  |  |     x  = ((x >> 4) + x) & 0x0f0f0f0f;  | 
1116  |  |     x *= 0x01010101;  | 
1117  |  |  | 
1118  |  |     return x >> 24;  | 
1119  |  |   }  | 
1120  |  |  | 
1121  |  |   ecb_function_ ecb_const int ecb_ld32 (uint32_t x);  | 
1122  |  |   ecb_function_ ecb_const int ecb_ld32 (uint32_t x)  | 
1123  |  |   { | 
1124  |  | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)  | 
1125  |  |     unsigned long r;  | 
1126  |  |     _BitScanReverse (&r, x);  | 
1127  |  |     return (int)r;  | 
1128  |  | #else  | 
1129  |  |     int r = 0;  | 
1130  |  |  | 
1131  |  |     if (x >> 16) { x >>= 16; r += 16; } | 
1132  |  |     if (x >>  8) { x >>=  8; r +=  8; } | 
1133  |  |     if (x >>  4) { x >>=  4; r +=  4; } | 
1134  |  |     if (x >>  2) { x >>=  2; r +=  2; } | 
1135  |  |     if (x >>  1) {           r +=  1; } | 
1136  |  |  | 
1137  |  |     return r;  | 
1138  |  | #endif  | 
1139  |  |   }  | 
1140  |  |  | 
1141  |  |   ecb_function_ ecb_const int ecb_ld64 (uint64_t x);  | 
1142  |  |   ecb_function_ ecb_const int ecb_ld64 (uint64_t x)  | 
1143  |  |   { | 
1144  |  | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)  | 
1145  |  |     unsigned long r;  | 
1146  |  |     _BitScanReverse64 (&r, x);  | 
1147  |  |     return (int)r;  | 
1148  |  | #else  | 
1149  |  |     int r = 0;  | 
1150  |  |  | 
1151  |  |     if (x >> 32) { x >>= 32; r += 32; } | 
1152  |  |  | 
1153  |  |     return r + ecb_ld32 (x);  | 
1154  |  | #endif  | 
1155  |  |   }  | 
1156  |  | #endif  | 
1157  |  |  | 
1158  |  | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);  | 
1159  | 0  | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } | 
1160  |  | ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);  | 
1161  | 0  | ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } | 
1162  |  |  | 
1163  |  | ecb_function_ ecb_const uint8_t  ecb_bitrev8  (uint8_t  x);  | 
1164  |  | ecb_function_ ecb_const uint8_t  ecb_bitrev8  (uint8_t  x)  | 
1165  | 0  | { | 
1166  | 0  |   return (  (x * 0x0802U & 0x22110U)  | 
1167  | 0  |           | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;  | 
1168  | 0  | }  | 
1169  |  |  | 
1170  |  | ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);  | 
1171  |  | ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)  | 
1172  | 0  | { | 
1173  | 0  |   x = ((x >>  1) &     0x5555) | ((x &     0x5555) <<  1);  | 
1174  | 0  |   x = ((x >>  2) &     0x3333) | ((x &     0x3333) <<  2);  | 
1175  | 0  |   x = ((x >>  4) &     0x0f0f) | ((x &     0x0f0f) <<  4);  | 
1176  | 0  |   x = ( x >>  8              ) | ( x               <<  8);  | 
1177  | 0  | 
  | 
1178  | 0  |   return x;  | 
1179  | 0  | }  | 
1180  |  |  | 
1181  |  | ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);  | 
1182  |  | ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)  | 
1183  | 0  | { | 
1184  | 0  |   x = ((x >>  1) & 0x55555555) | ((x & 0x55555555) <<  1);  | 
1185  | 0  |   x = ((x >>  2) & 0x33333333) | ((x & 0x33333333) <<  2);  | 
1186  | 0  |   x = ((x >>  4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) <<  4);  | 
1187  | 0  |   x = ((x >>  8) & 0x00ff00ff) | ((x & 0x00ff00ff) <<  8);  | 
1188  | 0  |   x = ( x >> 16              ) | ( x               << 16);  | 
1189  | 0  | 
  | 
1190  | 0  |   return x;  | 
1191  | 0  | }  | 
1192  |  |  | 
1193  |  | /* popcount64 is only available on 64 bit cpus as gcc builtin */  | 
1194  |  | /* so for this version we are lazy */  | 
1195  |  | ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);  | 
1196  |  | ecb_function_ ecb_const int  | 
1197  |  | ecb_popcount64 (uint64_t x)  | 
1198  | 0  | { | 
1199  | 0  |   return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);  | 
1200  | 0  | }  | 
1201  |  |  | 
1202  |  | ecb_inline ecb_const uint8_t  ecb_rotl8  (uint8_t  x, unsigned int count);  | 
1203  |  | ecb_inline ecb_const uint8_t  ecb_rotr8  (uint8_t  x, unsigned int count);  | 
1204  |  | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);  | 
1205  |  | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);  | 
1206  |  | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);  | 
1207  |  | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);  | 
1208  |  | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);  | 
1209  |  | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);  | 
1210  |  |  | 
1211  | 0  | ecb_inline ecb_const uint8_t  ecb_rotl8  (uint8_t  x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } | 
1212  | 0  | ecb_inline ecb_const uint8_t  ecb_rotr8  (uint8_t  x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } | 
1213  | 0  | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } | 
1214  | 0  | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } | 
1215  | 0  | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } | 
1216  | 0  | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } | 
1217  | 0  | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } | 
1218  | 0  | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } | 
1219  |  |  | 
1220  |  | #if ECB_CPP  | 
1221  |  |  | 
1222  |  | inline uint8_t  ecb_ctz (uint8_t  v) { return ecb_ctz32 (v); } | 
1223  |  | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } | 
1224  |  | inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } | 
1225  |  | inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } | 
1226  |  |  | 
1227  |  | inline bool ecb_is_pot (uint8_t  v) { return ecb_is_pot32 (v); } | 
1228  |  | inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } | 
1229  |  | inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } | 
1230  |  | inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } | 
1231  |  |  | 
1232  |  | inline int ecb_ld (uint8_t  v) { return ecb_ld32 (v); } | 
1233  |  | inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } | 
1234  |  | inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } | 
1235  |  | inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } | 
1236  |  |  | 
1237  |  | inline int ecb_popcount (uint8_t  v) { return ecb_popcount32 (v); } | 
1238  |  | inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } | 
1239  |  | inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } | 
1240  |  | inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } | 
1241  |  |  | 
1242  |  | inline uint8_t  ecb_bitrev (uint8_t  v) { return ecb_bitrev8  (v); } | 
1243  |  | inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } | 
1244  |  | inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } | 
1245  |  |  | 
1246  |  | inline uint8_t  ecb_rotl (uint8_t  v, unsigned int count) { return ecb_rotl8  (v, count); } | 
1247  |  | inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } | 
1248  |  | inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } | 
1249  |  | inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } | 
1250  |  |  | 
1251  |  | inline uint8_t  ecb_rotr (uint8_t  v, unsigned int count) { return ecb_rotr8  (v, count); } | 
1252  |  | inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } | 
1253  |  | inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } | 
1254  |  | inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } | 
1255  |  |  | 
1256  |  | #endif  | 
1257  |  |  | 
1258  |  | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))  | 
1259  |  |   #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)  | 
1260  |  |   #define ecb_bswap16(x)  __builtin_bswap16 (x)  | 
1261  |  |   #else  | 
1262  |  |   #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)  | 
1263  |  |   #endif  | 
1264  |  |   #define ecb_bswap32(x)  __builtin_bswap32 (x)  | 
1265  |  |   #define ecb_bswap64(x)  __builtin_bswap64 (x)  | 
1266  |  | #elif _MSC_VER  | 
1267  |  |   #include <stdlib.h>  | 
1268  |  |   #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))  | 
1269  |  |   #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong  ((uint32_t)(x)))  | 
1270  |  |   #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))  | 
1271  |  | #else  | 
1272  |  |   ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);  | 
1273  |  |   ecb_function_ ecb_const uint16_t  | 
1274  |  |   ecb_bswap16 (uint16_t x)  | 
1275  |  |   { | 
1276  |  |     return ecb_rotl16 (x, 8);  | 
1277  |  |   }  | 
1278  |  |  | 
1279  |  |   ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);  | 
1280  |  |   ecb_function_ ecb_const uint32_t  | 
1281  |  |   ecb_bswap32 (uint32_t x)  | 
1282  |  |   { | 
1283  |  |     return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);  | 
1284  |  |   }  | 
1285  |  |  | 
1286  |  |   ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);  | 
1287  |  |   ecb_function_ ecb_const uint64_t  | 
1288  |  |   ecb_bswap64 (uint64_t x)  | 
1289  |  |   { | 
1290  |  |     return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);  | 
1291  |  |   }  | 
1292  |  | #endif  | 
1293  |  |  | 
1294  |  | #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)  | 
1295  |  |   #define ecb_unreachable() __builtin_unreachable ()  | 
1296  |  | #else  | 
1297  |  |   /* this seems to work fine, but gcc always emits a warning for it :/ */  | 
1298  |  |   ecb_inline ecb_noreturn void ecb_unreachable (void);  | 
1299  |  |   ecb_inline ecb_noreturn void ecb_unreachable (void) { } | 
1300  |  | #endif  | 
1301  |  |  | 
1302  |  | /* try to tell the compiler that some condition is definitely true */  | 
1303  |  | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0  | 
1304  |  |  | 
1305  |  | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);  | 
1306  |  | ecb_inline ecb_const uint32_t  | 
1307  |  | ecb_byteorder_helper (void)  | 
1308  | 0  | { | 
1309  | 0  |   /* the union code still generates code under pressure in gcc, */  | 
1310  | 0  |   /* but less than using pointers, and always seems to */  | 
1311  | 0  |   /* successfully return a constant. */  | 
1312  | 0  |   /* the reason why we have this horrible preprocessor mess */  | 
1313  | 0  |   /* is to avoid it in all cases, at least on common architectures */  | 
1314  | 0  |   /* or when using a recent enough gcc version (>= 4.6) */  | 
1315  | 0  | #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \  | 
1316  | 0  |     || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)  | 
1317  | 0  |   #define ECB_LITTLE_ENDIAN 1  | 
1318  | 0  |   return 0x44332211;  | 
1319  | 0  | #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \  | 
1320  | 0  |       || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)  | 
1321  | 0  |   #define ECB_BIG_ENDIAN 1  | 
1322  | 0  |   return 0x11223344;  | 
1323  | 0  | #else  | 
1324  | 0  |   union  | 
1325  | 0  |   { | 
1326  | 0  |     uint8_t c[4];  | 
1327  | 0  |     uint32_t u;  | 
1328  | 0  |   } u = { 0x11, 0x22, 0x33, 0x44 }; | 
1329  | 0  |   return u.u;  | 
1330  | 0  | #endif  | 
1331  | 0  | }  | 
1332  |  |  | 
1333  |  | ecb_inline ecb_const ecb_bool ecb_big_endian    (void);  | 
1334  | 0  | ecb_inline ecb_const ecb_bool ecb_big_endian    (void) { return ecb_byteorder_helper () == 0x11223344; } | 
1335  |  | ecb_inline ecb_const ecb_bool ecb_little_endian (void);  | 
1336  | 0  | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } | 
1337  |  |  | 
1338  |  | /*****************************************************************************/  | 
1339  |  | /* unaligned load/store */  | 
1340  |  |  | 
1341  | 0  | ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } | 
1342  | 0  | ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } | 
1343  | 0  | ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } | 
1344  |  |  | 
1345  | 0  | ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian    () ? ecb_bswap16 (v) : v; } | 
1346  | 0  | ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian    () ? ecb_bswap32 (v) : v; } | 
1347  | 0  | ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian    () ? ecb_bswap64 (v) : v; } | 
1348  |  |  | 
1349  | 0  | ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } | 
1350  | 0  | ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } | 
1351  | 0  | ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } | 
1352  |  |  | 
1353  | 0  | ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } | 
1354  | 0  | ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } | 
1355  | 0  | ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } | 
1356  |  |  | 
1357  | 0  | ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } | 
1358  | 0  | ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } | 
1359  | 0  | ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } | 
1360  |  |  | 
1361  | 0  | ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } | 
1362  | 0  | ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } | 
1363  | 0  | ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } | 
1364  |  |  | 
1365  | 0  | ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian    () ? ecb_bswap16 (v) : v; } | 
1366  | 0  | ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian    () ? ecb_bswap32 (v) : v; } | 
1367  | 0  | ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian    () ? ecb_bswap64 (v) : v; } | 
1368  |  |  | 
1369  | 0  | ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } | 
1370  | 0  | ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } | 
1371  | 0  | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } | 
1372  |  |  | 
1373  | 0  | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } | 
1374  | 0  | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } | 
1375  | 0  | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } | 
1376  |  |  | 
1377  | 0  | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } | 
1378  | 0  | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } | 
1379  | 0  | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } | 
1380  |  |  | 
1381  |  | #if ECB_CPP  | 
1382  |  |  | 
1383  |  | inline uint8_t  ecb_bswap (uint8_t  v) { return v; } | 
1384  |  | inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } | 
1385  |  | inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } | 
1386  |  | inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } | 
1387  |  |  | 
1388  |  | template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } | 
1389  |  | template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian    () ? ecb_bswap (v) : v; } | 
1390  |  | template<typename T> inline T ecb_peek       (const void *ptr) { return *(const T *)ptr; } | 
1391  |  | template<typename T> inline T ecb_peek_be    (const void *ptr) { return ecb_be_to_host (ecb_peek  <T> (ptr)); } | 
1392  |  | template<typename T> inline T ecb_peek_le    (const void *ptr) { return ecb_le_to_host (ecb_peek  <T> (ptr)); } | 
1393  |  | template<typename T> inline T ecb_peek_u     (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } | 
1394  |  | template<typename T> inline T ecb_peek_be_u  (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); } | 
1395  |  | template<typename T> inline T ecb_peek_le_u  (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); } | 
1396  |  |  | 
1397  |  | template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } | 
1398  |  | template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian    () ? ecb_bswap (v) : v; } | 
1399  |  | template<typename T> inline void ecb_poke      (void *ptr, T v) { *(T *)ptr = v; } | 
1400  |  | template<typename T> inline void ecb_poke_be   (void *ptr, T v) { return ecb_poke  <T> (ptr, ecb_host_to_be (v)); } | 
1401  |  | template<typename T> inline void ecb_poke_le   (void *ptr, T v) { return ecb_poke  <T> (ptr, ecb_host_to_le (v)); } | 
1402  |  | template<typename T> inline void ecb_poke_u    (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } | 
1403  |  | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } | 
1404  |  | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } | 
1405  |  |  | 
1406  |  | #endif  | 
1407  |  |  | 
1408  |  | /*****************************************************************************/  | 
1409  |  |  | 
1410  |  | #if ECB_GCC_VERSION(3,0) || ECB_C99  | 
1411  |  |   #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))  | 
1412  |  | #else  | 
1413  |  |   #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))  | 
1414  |  | #endif  | 
1415  |  |  | 
1416  |  | #if ECB_CPP  | 
1417  |  |   template<typename T>  | 
1418  |  |   static inline T ecb_div_rd (T val, T div)  | 
1419  |  |   { | 
1420  |  |     return val < 0 ? - ((-val + div - 1) / div) : (val          ) / div;  | 
1421  |  |   }  | 
1422  |  |   template<typename T>  | 
1423  |  |   static inline T ecb_div_ru (T val, T div)  | 
1424  |  |   { | 
1425  |  |     return val < 0 ? - ((-val          ) / div) : (val + div - 1) / div;  | 
1426  |  |   }  | 
1427  |  | #else  | 
1428  |  |   #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val)            ) / (div))  | 
1429  |  |   #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val)            ) / (div)) : ((val) + (div) - 1) / (div))  | 
1430  |  | #endif  | 
1431  |  |  | 
1432  |  | #if ecb_cplusplus_does_not_suck  | 
1433  |  |   /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */  | 
1434  |  |   template<typename T, int N>  | 
1435  |  |   static inline int ecb_array_length (const T (&arr)[N])  | 
1436  |  |   { | 
1437  |  |     return N;  | 
1438  |  |   }  | 
1439  |  | #else  | 
1440  |  |   #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))  | 
1441  |  | #endif  | 
1442  |  |  | 
1443  |  | /*****************************************************************************/  | 
1444  |  |  | 
1445  |  | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);  | 
1446  |  | ecb_function_ ecb_const uint32_t  | 
1447  |  | ecb_binary16_to_binary32 (uint32_t x)  | 
1448  | 0  | { | 
1449  | 0  |   unsigned int s = (x & 0x8000) << (31 - 15);  | 
1450  | 0  |   int          e = (x >> 10) & 0x001f;  | 
1451  | 0  |   unsigned int m =  x        & 0x03ff;  | 
1452  | 0  | 
  | 
1453  | 0  |   if (ecb_expect_false (e == 31))  | 
1454  | 0  |     /* infinity or NaN */  | 
1455  | 0  |     e = 255 - (127 - 15);  | 
1456  | 0  |   else if (ecb_expect_false (!e))  | 
1457  | 0  |     { | 
1458  | 0  |       if (ecb_expect_true (!m))  | 
1459  | 0  |         /* zero, handled by code below by forcing e to 0 */  | 
1460  | 0  |         e = 0 - (127 - 15);  | 
1461  | 0  |       else  | 
1462  | 0  |         { | 
1463  | 0  |           /* subnormal, renormalise */  | 
1464  | 0  |           unsigned int s = 10 - ecb_ld32 (m);  | 
1465  | 0  | 
  | 
1466  | 0  |           m = (m << s) & 0x3ff; /* mask implicit bit */  | 
1467  | 0  |           e -= s - 1;  | 
1468  | 0  |         }  | 
1469  | 0  |     }  | 
1470  | 0  | 
  | 
1471  | 0  |   /* e and m now are normalised, or zero, (or inf or nan) */  | 
1472  | 0  |   e += 127 - 15;  | 
1473  | 0  | 
  | 
1474  | 0  |   return s | (e << 23) | (m << (23 - 10));  | 
1475  | 0  | }  | 
1476  |  |  | 
1477  |  | ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);  | 
1478  |  | ecb_function_ ecb_const uint16_t  | 
1479  |  | ecb_binary32_to_binary16 (uint32_t x)  | 
1480  | 0  | { | 
1481  | 0  |   unsigned int s =  (x >> 16) & 0x00008000; /* sign bit, the easy part */  | 
1482  | 0  |   unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */  | 
1483  | 0  |   unsigned int m =   x        & 0x007fffff;  | 
1484  | 0  | 
  | 
1485  | 0  |   x &= 0x7fffffff;  | 
1486  | 0  | 
  | 
1487  | 0  |   /* if it's within range of binary16 normals, use fast path */  | 
1488  | 0  |   if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))  | 
1489  | 0  |     { | 
1490  | 0  |       /* mantissa round-to-even */  | 
1491  | 0  |       m += 0x00000fff + ((m >> (23 - 10)) & 1);  | 
1492  | 0  | 
  | 
1493  | 0  |       /* handle overflow */  | 
1494  | 0  |       if (ecb_expect_false (m >= 0x00800000))  | 
1495  | 0  |         { | 
1496  | 0  |           m >>= 1;  | 
1497  | 0  |           e +=  1;  | 
1498  | 0  |         }  | 
1499  | 0  | 
  | 
1500  | 0  |       return s | (e << 10) | (m >> (23 - 10));  | 
1501  | 0  |     }  | 
1502  | 0  | 
  | 
1503  | 0  |   /* handle large numbers and infinity */  | 
1504  | 0  |   if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))  | 
1505  | 0  |     return s | 0x7c00;  | 
1506  | 0  | 
  | 
1507  | 0  |   /* handle zero, subnormals and small numbers */  | 
1508  | 0  |   if (ecb_expect_true (x < 0x38800000))  | 
1509  | 0  |     { | 
1510  | 0  |       /* zero */  | 
1511  | 0  |       if (ecb_expect_true (!x))  | 
1512  | 0  |         return s;  | 
1513  | 0  | 
  | 
1514  | 0  |       /* handle subnormals */  | 
1515  | 0  | 
  | 
1516  | 0  |       /* too small, will be zero */  | 
1517  | 0  |       if (e < (14 - 24)) /* might not be sharp, but is good enough */  | 
1518  | 0  |         return s;  | 
1519  | 0  | 
  | 
1520  | 0  |       m |= 0x00800000; /* make implicit bit explicit */  | 
1521  | 0  | 
  | 
1522  | 0  |       /* very tricky - we need to round to the nearest e (+10) bit value */  | 
1523  | 0  |       { | 
1524  | 0  |         unsigned int bits = 14 - e;  | 
1525  | 0  |         unsigned int half = (1 << (bits - 1)) - 1;  | 
1526  | 0  |         unsigned int even = (m >> bits) & 1;  | 
1527  | 0  | 
  | 
1528  | 0  |         /* if this overflows, we will end up with a normalised number */  | 
1529  | 0  |         m = (m + half + even) >> bits;  | 
1530  | 0  |       }  | 
1531  | 0  | 
  | 
1532  | 0  |       return s | m;  | 
1533  | 0  |     }  | 
1534  | 0  | 
  | 
1535  | 0  |   /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */  | 
1536  | 0  |   m >>= 13;  | 
1537  | 0  | 
  | 
1538  | 0  |   return s | 0x7c00 | m | !m;  | 
1539  | 0  | }  | 
1540  |  |  | 
1541  |  | /*******************************************************************************/  | 
1542  |  | /* floating point stuff, can be disabled by defining ECB_NO_LIBM */  | 
1543  |  |  | 
1544  |  | /* basically, everything uses "ieee pure-endian" floating point numbers */  | 
1545  |  | /* the only noteworthy exception is ancient armle, which uses order 43218765 */  | 
1546  |  | #if 0 \  | 
1547  |  |     || __i386 || __i386__ \  | 
1548  |  |     || ECB_GCC_AMD64 \  | 
1549  |  |     || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \  | 
1550  |  |     || defined __s390__ || defined __s390x__ \  | 
1551  |  |     || defined __mips__ \  | 
1552  |  |     || defined __alpha__ \  | 
1553  |  |     || defined __hppa__ \  | 
1554  |  |     || defined __ia64__ \  | 
1555  |  |     || defined __m68k__ \  | 
1556  |  |     || defined __m88k__ \  | 
1557  |  |     || defined __sh__ \  | 
1558  |  |     || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \  | 
1559  |  |     || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \  | 
1560  |  |     || defined __aarch64__  | 
1561  |  |   #define ECB_STDFP 1  | 
1562  |  | #else  | 
1563  |  |   #define ECB_STDFP 0  | 
1564  |  | #endif  | 
1565  |  |  | 
1566  |  | #ifndef ECB_NO_LIBM  | 
1567  |  |  | 
1568  |  |   #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */  | 
1569  |  |  | 
1570  |  |   /* only the oldest of old doesn't have this one. solaris. */  | 
1571  |  |   #ifdef INFINITY  | 
1572  |  |     #define ECB_INFINITY INFINITY  | 
1573  |  |   #else  | 
1574  |  |     #define ECB_INFINITY HUGE_VAL  | 
1575  |  |   #endif  | 
1576  |  |  | 
1577  |  |   #ifdef NAN  | 
1578  |  |     #define ECB_NAN NAN  | 
1579  |  |   #else  | 
1580  |  |     #define ECB_NAN ECB_INFINITY  | 
1581  |  |   #endif  | 
1582  |  |  | 
1583  |  |   #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L  | 
1584  |  |     #define ecb_ldexpf(x,e) ldexpf ((x), (e))  | 
1585  |  |     #define ecb_frexpf(x,e) frexpf ((x), (e))  | 
1586  |  |   #else  | 
1587  |  |     #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))  | 
1588  |  |     #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))  | 
1589  |  |   #endif  | 
1590  |  |  | 
1591  |  |   /* convert a float to ieee single/binary32 */  | 
1592  |  |   ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);  | 
1593  |  |   ecb_function_ ecb_const uint32_t  | 
1594  |  |   ecb_float_to_binary32 (float x)  | 
1595  | 0  |   { | 
1596  | 0  |     uint32_t r;  | 
1597  | 0  | 
  | 
1598  | 0  |     #if ECB_STDFP  | 
1599  | 0  |       memcpy (&r, &x, 4);  | 
1600  | 0  |     #else  | 
1601  | 0  |       /* slow emulation, works for anything but -0 */  | 
1602  | 0  |       uint32_t m;  | 
1603  | 0  |       int e;  | 
1604  | 0  | 
  | 
1605  | 0  |       if (x == 0e0f                    ) return 0x00000000U;  | 
1606  | 0  |       if (x > +3.40282346638528860e+38f) return 0x7f800000U;  | 
1607  | 0  |       if (x < -3.40282346638528860e+38f) return 0xff800000U;  | 
1608  | 0  |       if (x != x                       ) return 0x7fbfffffU;  | 
1609  | 0  | 
  | 
1610  | 0  |       m = ecb_frexpf (x, &e) * 0x1000000U;  | 
1611  | 0  | 
  | 
1612  | 0  |       r = m & 0x80000000U;  | 
1613  | 0  | 
  | 
1614  | 0  |       if (r)  | 
1615  | 0  |         m = -m;  | 
1616  | 0  | 
  | 
1617  | 0  |       if (e <= -126)  | 
1618  | 0  |         { | 
1619  | 0  |           m &= 0xffffffU;  | 
1620  | 0  |           m >>= (-125 - e);  | 
1621  | 0  |           e = -126;  | 
1622  | 0  |         }  | 
1623  | 0  | 
  | 
1624  | 0  |       r |= (e + 126) << 23;  | 
1625  | 0  |       r |= m & 0x7fffffU;  | 
1626  | 0  |     #endif  | 
1627  | 0  | 
  | 
1628  | 0  |     return r;  | 
1629  | 0  |   }  | 
1630  |  |  | 
1631  |  |   /* converts an ieee single/binary32 to a float */  | 
1632  |  |   ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);  | 
1633  |  |   ecb_function_ ecb_const float  | 
1634  |  |   ecb_binary32_to_float (uint32_t x)  | 
1635  | 0  |   { | 
1636  | 0  |     float r;  | 
1637  | 0  | 
  | 
1638  | 0  |     #if ECB_STDFP  | 
1639  | 0  |       memcpy (&r, &x, 4);  | 
1640  | 0  |     #else  | 
1641  | 0  |       /* emulation, only works for normals and subnormals and +0 */  | 
1642  | 0  |       int neg = x >> 31;  | 
1643  | 0  |       int e = (x >> 23) & 0xffU;  | 
1644  | 0  | 
  | 
1645  | 0  |       x &= 0x7fffffU;  | 
1646  | 0  | 
  | 
1647  | 0  |       if (e)  | 
1648  | 0  |         x |= 0x800000U;  | 
1649  | 0  |       else  | 
1650  | 0  |         e = 1;  | 
1651  | 0  | 
  | 
1652  | 0  |       /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */  | 
1653  | 0  |       r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);  | 
1654  | 0  | 
  | 
1655  | 0  |       r = neg ? -r : r;  | 
1656  | 0  |     #endif  | 
1657  | 0  | 
  | 
1658  | 0  |     return r;  | 
1659  | 0  |   }  | 
1660  |  |  | 
1661  |  |   /* convert a double to ieee double/binary64 */  | 
1662  |  |   ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);  | 
1663  |  |   ecb_function_ ecb_const uint64_t  | 
1664  |  |   ecb_double_to_binary64 (double x)  | 
1665  | 0  |   { | 
1666  | 0  |     uint64_t r;  | 
1667  | 0  | 
  | 
1668  | 0  |     #if ECB_STDFP  | 
1669  | 0  |       memcpy (&r, &x, 8);  | 
1670  | 0  |     #else  | 
1671  | 0  |       /* slow emulation, works for anything but -0 */  | 
1672  | 0  |       uint64_t m;  | 
1673  | 0  |       int e;  | 
1674  | 0  | 
  | 
1675  | 0  |       if (x == 0e0                     ) return 0x0000000000000000U;  | 
1676  | 0  |       if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;  | 
1677  | 0  |       if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;  | 
1678  | 0  |       if (x != x                       ) return 0X7ff7ffffffffffffU;  | 
1679  | 0  | 
  | 
1680  | 0  |       m = frexp (x, &e) * 0x20000000000000U;  | 
1681  | 0  | 
  | 
1682  | 0  |       r = m & 0x8000000000000000;;  | 
1683  | 0  | 
  | 
1684  | 0  |       if (r)  | 
1685  | 0  |         m = -m;  | 
1686  | 0  | 
  | 
1687  | 0  |       if (e <= -1022)  | 
1688  | 0  |         { | 
1689  | 0  |           m &= 0x1fffffffffffffU;  | 
1690  | 0  |           m >>= (-1021 - e);  | 
1691  | 0  |           e = -1022;  | 
1692  | 0  |         }  | 
1693  | 0  | 
  | 
1694  | 0  |       r |= ((uint64_t)(e + 1022)) << 52;  | 
1695  | 0  |       r |= m & 0xfffffffffffffU;  | 
1696  | 0  |     #endif  | 
1697  | 0  | 
  | 
1698  | 0  |     return r;  | 
1699  | 0  |   }  | 
1700  |  |  | 
1701  |  |   /* converts an ieee double/binary64 to a double */  | 
1702  |  |   ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);  | 
1703  |  |   ecb_function_ ecb_const double  | 
1704  |  |   ecb_binary64_to_double (uint64_t x)  | 
1705  | 0  |   { | 
1706  | 0  |     double r;  | 
1707  | 0  | 
  | 
1708  | 0  |     #if ECB_STDFP  | 
1709  | 0  |       memcpy (&r, &x, 8);  | 
1710  | 0  |     #else  | 
1711  | 0  |       /* emulation, only works for normals and subnormals and +0 */  | 
1712  | 0  |       int neg = x >> 63;  | 
1713  | 0  |       int e = (x >> 52) & 0x7ffU;  | 
1714  | 0  | 
  | 
1715  | 0  |       x &= 0xfffffffffffffU;  | 
1716  | 0  | 
  | 
1717  | 0  |       if (e)  | 
1718  | 0  |         x |= 0x10000000000000U;  | 
1719  | 0  |       else  | 
1720  | 0  |         e = 1;  | 
1721  | 0  | 
  | 
1722  | 0  |       /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */  | 
1723  | 0  |       r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);  | 
1724  | 0  | 
  | 
1725  | 0  |       r = neg ? -r : r;  | 
1726  | 0  |     #endif  | 
1727  | 0  | 
  | 
1728  | 0  |     return r;  | 
1729  | 0  |   }  | 
1730  |  |  | 
1731  |  |   /* convert a float to ieee half/binary16 */  | 
1732  |  |   ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);  | 
1733  |  |   ecb_function_ ecb_const uint16_t  | 
1734  |  |   ecb_float_to_binary16 (float x)  | 
1735  | 0  |   { | 
1736  | 0  |     return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));  | 
1737  | 0  |   }  | 
1738  |  |  | 
1739  |  |   /* convert an ieee half/binary16 to float */  | 
1740  |  |   ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);  | 
1741  |  |   ecb_function_ ecb_const float  | 
1742  |  |   ecb_binary16_to_float (uint16_t x)  | 
1743  | 0  |   { | 
1744  | 0  |     return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));  | 
1745  | 0  |   }  | 
1746  |  |  | 
1747  |  | #endif  | 
1748  |  |  | 
1749  |  | #endif  | 
1750  |  |  | 
1751  |  | /* ECB.H END */  | 
1752  |  |  | 
1753  |  | #if ECB_MEMORY_FENCE_NEEDS_PTHREADS  | 
1754  |  | /* if your architecture doesn't need memory fences, e.g. because it is  | 
1755  |  |  * single-cpu/core, or if you use libev in a project that doesn't use libev  | 
1756  |  |  * from multiple threads, then you can define ECB_NO_THREADS when compiling  | 
1757  |  |  * libev, in which cases the memory fences become nops.  | 
1758  |  |  * alternatively, you can remove this #error and link against libpthread,  | 
1759  |  |  * which will then provide the memory fences.  | 
1760  |  |  */  | 
1761  |  | # error "memory fences not defined for your architecture, please report"  | 
1762  |  | #endif  | 
1763  |  |  | 
1764  |  | #ifndef ECB_MEMORY_FENCE  | 
1765  |  | # define ECB_MEMORY_FENCE do { } while (0) | 
1766  |  | # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE  | 
1767  |  | # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE  | 
1768  |  | #endif  | 
1769  |  |  | 
1770  |  | #define inline_size        ecb_inline  | 
1771  |  |  | 
1772  |  | #if EV_FEATURE_CODE  | 
1773  |  | # define inline_speed      ecb_inline  | 
1774  |  | #else  | 
1775  |  | # define inline_speed      ecb_noinline static  | 
1776  |  | #endif  | 
1777  |  |  | 
1778  |  | /*****************************************************************************/  | 
1779  |  | /* raw syscall wrappers */  | 
1780  |  |  | 
1781  |  | #if EV_NEED_SYSCALL  | 
1782  |  |  | 
1783  |  | #include <sys/syscall.h>  | 
1784  |  |  | 
1785  |  | /*  | 
1786  |  |  * define some syscall wrappers for common architectures  | 
1787  |  |  * this is mostly for nice looks during debugging, not performance.  | 
1788  |  |  * our syscalls return < 0, not == -1, on error. which is good  | 
1789  |  |  * enough for linux aio.  | 
1790  |  |  * TODO: arm is also common nowadays, maybe even mips and x86  | 
1791  |  |  * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...  | 
1792  |  |  */  | 
1793  |  | #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE  | 
1794  |  |   /* the costly errno access probably kills this for size optimisation */  | 
1795  |  |  | 
1796  |  |   #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6)            \  | 
1797  |  |     ({                                                                 \ | 
1798  |  |         long res;                                                      \  | 
1799  |  |         register unsigned long r6 __asm__ ("r9" );                     \ | 
1800  |  |         register unsigned long r5 __asm__ ("r8" );                     \ | 
1801  |  |         register unsigned long r4 __asm__ ("r10");                     \ | 
1802  |  |         register unsigned long r3 __asm__ ("rdx");                     \ | 
1803  |  |         register unsigned long r2 __asm__ ("rsi");                     \ | 
1804  |  |         register unsigned long r1 __asm__ ("rdi");                     \ | 
1805  |  |         if (narg >= 6) r6 = (unsigned long)(arg6);                     \  | 
1806  |  |         if (narg >= 5) r5 = (unsigned long)(arg5);                     \  | 
1807  |  |         if (narg >= 4) r4 = (unsigned long)(arg4);                     \  | 
1808  |  |         if (narg >= 3) r3 = (unsigned long)(arg3);                     \  | 
1809  |  |         if (narg >= 2) r2 = (unsigned long)(arg2);                     \  | 
1810  |  |         if (narg >= 1) r1 = (unsigned long)(arg1);                     \  | 
1811  |  |         __asm__ __volatile__ (                                         \  | 
1812  |  |           "syscall\n\t"                                                \  | 
1813  |  |           : "=a" (res)                                                 \  | 
1814  |  |           : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \  | 
1815  |  |           : "cc", "r11", "cx", "memory");                              \  | 
1816  |  |         errno = -res;                                                  \  | 
1817  |  |         res;                                                           \  | 
1818  |  |     })  | 
1819  |  |  | 
1820  |  | #endif  | 
1821  |  |  | 
1822  |  | #ifdef ev_syscall  | 
1823  |  |   #define ev_syscall0(nr)                               ev_syscall (nr, 0,    0,    0,    0,    0,    0,   0)  | 
1824  |  |   #define ev_syscall1(nr,arg1)                          ev_syscall (nr, 1, arg1,    0,    0,    0,    0,   0)  | 
1825  |  |   #define ev_syscall2(nr,arg1,arg2)                     ev_syscall (nr, 2, arg1, arg2,    0,    0,    0,   0)  | 
1826  |  |   #define ev_syscall3(nr,arg1,arg2,arg3)                ev_syscall (nr, 3, arg1, arg2, arg3,    0,    0,   0)  | 
1827  |  |   #define ev_syscall4(nr,arg1,arg2,arg3,arg4)           ev_syscall (nr, 3, arg1, arg2, arg3, arg4,    0,   0)  | 
1828  |  |   #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5)      ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5,   0)  | 
1829  |  |   #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)  | 
1830  |  | #else  | 
1831  |  |   #define ev_syscall0(nr)                               syscall (nr)  | 
1832  |  |   #define ev_syscall1(nr,arg1)                          syscall (nr, arg1)  | 
1833  | 0  |   #define ev_syscall2(nr,arg1,arg2)                     syscall (nr, arg1, arg2)  | 
1834  |  |   #define ev_syscall3(nr,arg1,arg2,arg3)                syscall (nr, arg1, arg2, arg3)  | 
1835  |  |   #define ev_syscall4(nr,arg1,arg2,arg3,arg4)           syscall (nr, arg1, arg2, arg3, arg4)  | 
1836  |  |   #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5)      syscall (nr, arg1, arg2, arg3, arg4, arg5)  | 
1837  | 0  |   #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)  | 
1838  |  | #endif  | 
1839  |  |  | 
1840  |  | #endif  | 
1841  |  |  | 
1842  |  | /*****************************************************************************/  | 
1843  |  |  | 
1844  | 1.65k  | #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)  | 
1845  |  |  | 
1846  |  | #if EV_MINPRI == EV_MAXPRI  | 
1847  |  | # define ABSPRI(w) (((W)w), 0)  | 
1848  |  | #else  | 
1849  | 2  | # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)  | 
1850  |  | #endif  | 
1851  |  |  | 
1852  |  | #define EMPTY /* required for microsofts broken pseudo-c compiler */  | 
1853  |  |  | 
1854  |  | typedef ev_watcher *W;  | 
1855  |  | typedef ev_watcher_list *WL;  | 
1856  |  | typedef ev_watcher_time *WT;  | 
1857  |  |  | 
1858  | 0  | #define ev_active(w) ((W)(w))->active  | 
1859  | 0  | #define ev_at(w) ((WT)(w))->at  | 
1860  |  |  | 
1861  |  | #if EV_USE_REALTIME  | 
1862  |  | /* sig_atomic_t is used to avoid per-thread variables or locking but still */  | 
1863  |  | /* giving it a reasonably high chance of working on typical architectures */  | 
1864  |  | static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */  | 
1865  |  | #endif  | 
1866  |  |  | 
1867  |  | #if EV_USE_MONOTONIC  | 
1868  |  | static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */  | 
1869  |  | #endif  | 
1870  |  |  | 
1871  |  | #ifndef EV_FD_TO_WIN32_HANDLE  | 
1872  |  | # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)  | 
1873  |  | #endif  | 
1874  |  | #ifndef EV_WIN32_HANDLE_TO_FD  | 
1875  |  | # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)  | 
1876  |  | #endif  | 
1877  |  | #ifndef EV_WIN32_CLOSE_FD  | 
1878  | 1.65k  | # define EV_WIN32_CLOSE_FD(fd) close (fd)  | 
1879  |  | #endif  | 
1880  |  |  | 
1881  |  | #ifdef _WIN32  | 
1882  |  | # include "ev_win32.c"  | 
1883  |  | #endif  | 
1884  |  |  | 
1885  |  | /*****************************************************************************/  | 
1886  |  |  | 
1887  |  | #if EV_USE_LINUXAIO  | 
1888  |  | # include <linux/aio_abi.h> /* probably only needed for aio_context_t */  | 
1889  |  | #endif  | 
1890  |  |  | 
1891  |  | /* define a suitable floor function (only used by periodics atm) */  | 
1892  |  |  | 
1893  |  | #if EV_USE_FLOOR  | 
1894  |  | # include <math.h>  | 
1895  | 0  | # define ev_floor(v) floor (v)  | 
1896  |  | #else  | 
1897  |  |  | 
1898  |  | #include <float.h>  | 
1899  |  |  | 
1900  |  | /* a floor() replacement function, should be independent of ev_tstamp type */  | 
1901  |  | ecb_noinline  | 
1902  |  | static ev_tstamp  | 
1903  |  | ev_floor (ev_tstamp v)  | 
1904  |  | { | 
1905  |  |   /* the choice of shift factor is not terribly important */  | 
1906  |  | #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */  | 
1907  |  |   const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;  | 
1908  |  | #else  | 
1909  |  |   const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;  | 
1910  |  | #endif  | 
1911  |  |  | 
1912  |  |   /* special treatment for negative arguments */  | 
1913  |  |   if (ecb_expect_false (v < 0.))  | 
1914  |  |     { | 
1915  |  |       ev_tstamp f = -ev_floor (-v);  | 
1916  |  |  | 
1917  |  |       return f - (f == v ? 0 : 1);  | 
1918  |  |     }  | 
1919  |  |  | 
1920  |  |   /* argument too large for an unsigned long? then reduce it */  | 
1921  |  |   if (ecb_expect_false (v >= shift))  | 
1922  |  |     { | 
1923  |  |       ev_tstamp f;  | 
1924  |  |  | 
1925  |  |       if (v == v - 1.)  | 
1926  |  |         return v; /* very large numbers are assumed to be integer */  | 
1927  |  |  | 
1928  |  |       f = shift * ev_floor (v * (1. / shift));  | 
1929  |  |       return f + ev_floor (v - f);  | 
1930  |  |     }  | 
1931  |  |  | 
1932  |  |   /* fits into an unsigned long */  | 
1933  |  |   return (unsigned long)v;  | 
1934  |  | }  | 
1935  |  |  | 
1936  |  | #endif  | 
1937  |  |  | 
1938  |  | /*****************************************************************************/  | 
1939  |  |  | 
1940  |  | #ifdef __linux  | 
1941  |  | # include <sys/utsname.h>  | 
1942  |  | #endif  | 
1943  |  |  | 
1944  |  | ecb_noinline ecb_cold  | 
1945  |  | static unsigned int  | 
1946  |  | ev_linux_version (void)  | 
1947  | 1.67k  | { | 
1948  | 1.67k  | #ifdef __linux  | 
1949  | 1.67k  |   unsigned int v = 0;  | 
1950  | 1.67k  |   struct utsname buf;  | 
1951  | 1.67k  |   int i;  | 
1952  | 1.67k  |   char *p = buf.release;  | 
1953  |  |  | 
1954  | 1.67k  |   if (uname (&buf))  | 
1955  | 0  |     return 0;  | 
1956  |  |  | 
1957  | 6.69k  |   for (i = 3+1; --i; )  | 
1958  | 5.01k  |     { | 
1959  | 5.01k  |       unsigned int c = 0;  | 
1960  |  |  | 
1961  | 5.01k  |       for (;;)  | 
1962  | 11.7k  |         { | 
1963  | 11.7k  |           if (*p >= '0' && *p <= '9')  | 
1964  | 6.69k  |             c = c * 10 + *p++ - '0';  | 
1965  | 5.01k  |           else  | 
1966  | 5.01k  |             { | 
1967  | 5.01k  |               p += *p == '.';  | 
1968  | 5.01k  |               break;  | 
1969  | 5.01k  |             }  | 
1970  | 11.7k  |         }  | 
1971  |  |  | 
1972  | 5.01k  |       v = (v << 8) | c;  | 
1973  | 5.01k  |     }  | 
1974  |  |  | 
1975  | 1.67k  |   return v;  | 
1976  |  | #else  | 
1977  |  |   return 0;  | 
1978  |  | #endif  | 
1979  | 1.67k  | }  | 
1980  |  |  | 
1981  |  | /*****************************************************************************/  | 
1982  |  |  | 
1983  |  | #if EV_AVOID_STDIO  | 
1984  |  | ecb_noinline ecb_cold  | 
1985  |  | static void  | 
1986  |  | ev_printerr (const char *msg)  | 
1987  |  | { | 
1988  |  |   write (STDERR_FILENO, msg, strlen (msg));  | 
1989  |  | }  | 
1990  |  | #endif  | 
1991  |  |  | 
1992  |  | static void (*syserr_cb)(const char *msg) EV_NOEXCEPT;  | 
1993  |  |  | 
1994  |  | ecb_cold  | 
1995  |  | void  | 
1996  |  | ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT  | 
1997  | 0  | { | 
1998  | 0  |   syserr_cb = cb;  | 
1999  | 0  | }  | 
2000  |  |  | 
2001  |  | ecb_noinline ecb_cold  | 
2002  |  | static void  | 
2003  |  | ev_syserr (const char *msg)  | 
2004  | 0  | { | 
2005  | 0  |   if (!msg)  | 
2006  | 0  |     msg = "(libev) system error";  | 
2007  |  | 
  | 
2008  | 0  |   if (syserr_cb)  | 
2009  | 0  |     syserr_cb (msg);  | 
2010  | 0  |   else  | 
2011  | 0  |     { | 
2012  |  | #if EV_AVOID_STDIO  | 
2013  |  |       ev_printerr (msg);  | 
2014  |  |       ev_printerr (": "); | 
2015  |  |       ev_printerr (strerror (errno));  | 
2016  |  |       ev_printerr ("\n"); | 
2017  |  | #else  | 
2018  | 0  |       perror (msg);  | 
2019  | 0  | #endif  | 
2020  | 0  |       abort ();  | 
2021  | 0  |     }  | 
2022  | 0  | }  | 
2023  |  |  | 
2024  |  | static void *  | 
2025  |  | ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT  | 
2026  | 41.4k  | { | 
2027  |  |   /* some systems, notably openbsd and darwin, fail to properly  | 
2028  |  |    * implement realloc (x, 0) (as required by both ansi c-89 and  | 
2029  |  |    * the single unix specification, so work around them here.  | 
2030  |  |    * recently, also (at least) fedora and debian started breaking it,  | 
2031  |  |    * despite documenting it otherwise.  | 
2032  |  |    */  | 
2033  |  |  | 
2034  | 41.4k  |   if (size)  | 
2035  | 5.02k  |     return realloc (ptr, size);  | 
2036  |  |  | 
2037  | 36.4k  |   free (ptr);  | 
2038  | 36.4k  |   return 0;  | 
2039  | 41.4k  | }  | 
2040  |  |  | 
2041  |  | static void *(*alloc)(void *ptr, long size) EV_NOEXCEPT = ev_realloc_emul;  | 
2042  |  |  | 
2043  |  | ecb_cold  | 
2044  |  | void  | 
2045  |  | ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT  | 
2046  | 0  | { | 
2047  | 0  |   alloc = cb;  | 
2048  | 0  | }  | 
2049  |  |  | 
2050  |  | inline_speed void *  | 
2051  |  | ev_realloc (void *ptr, long size)  | 
2052  | 41.4k  | { | 
2053  | 41.4k  |   ptr = alloc (ptr, size);  | 
2054  |  |  | 
2055  | 41.4k  |   if (!ptr && size)  | 
2056  | 0  |     { | 
2057  |  | #if EV_AVOID_STDIO  | 
2058  |  |       ev_printerr ("(libev) memory allocation failed, aborting.\n"); | 
2059  |  | #else  | 
2060  | 0  |       fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);  | 
2061  | 0  | #endif  | 
2062  | 0  |       abort ();  | 
2063  | 0  |     }  | 
2064  |  |  | 
2065  | 41.4k  |   return ptr;  | 
2066  | 41.4k  | }  | 
2067  |  |  | 
2068  | 1.67k  | #define ev_malloc(size) ev_realloc (0, (size))  | 
2069  | 36.4k  | #define ev_free(ptr)    ev_realloc ((ptr), 0)  | 
2070  |  |  | 
2071  |  | /*****************************************************************************/  | 
2072  |  |  | 
2073  |  | /* set in reify when reification needed */  | 
2074  | 1.67k  | #define EV_ANFD_REIFY 1  | 
2075  |  |  | 
2076  |  | /* file descriptor info structure */  | 
2077  |  | typedef struct  | 
2078  |  | { | 
2079  |  |   WL head;  | 
2080  |  |   unsigned char events; /* the events watched for */  | 
2081  |  |   unsigned char reify;  /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */  | 
2082  |  |   unsigned char emask;  /* some backends store the actual kernel mask in here */  | 
2083  |  |   unsigned char eflags; /* flags field for use by backends */  | 
2084  |  | #if EV_USE_EPOLL  | 
2085  |  |   unsigned int egen;    /* generation counter to counter epoll bugs */  | 
2086  |  | #endif  | 
2087  |  | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP  | 
2088  |  |   SOCKET handle;  | 
2089  |  | #endif  | 
2090  |  | #if EV_USE_IOCP  | 
2091  |  |   OVERLAPPED or, ow;  | 
2092  |  | #endif  | 
2093  |  | } ANFD;  | 
2094  |  |  | 
2095  |  | /* stores the pending event set for a given watcher */  | 
2096  |  | typedef struct  | 
2097  |  | { | 
2098  |  |   W w;  | 
2099  |  |   int events; /* the pending event set for the given watcher */  | 
2100  |  | } ANPENDING;  | 
2101  |  |  | 
2102  |  | #if EV_USE_INOTIFY  | 
2103  |  | /* hash table entry per inotify-id */  | 
2104  |  | typedef struct  | 
2105  |  | { | 
2106  |  |   WL head;  | 
2107  |  | } ANFS;  | 
2108  |  | #endif  | 
2109  |  |  | 
2110  |  | /* Heap Entry */  | 
2111  |  | #if EV_HEAP_CACHE_AT  | 
2112  |  |   /* a heap element */  | 
2113  |  |   typedef struct { | 
2114  |  |     ev_tstamp at;  | 
2115  |  |     WT w;  | 
2116  |  |   } ANHE;  | 
2117  |  |  | 
2118  | 0  |   #define ANHE_w(he)        (he).w     /* access watcher, read-write */  | 
2119  | 0  |   #define ANHE_at(he)       (he).at    /* access cached at, read-only */  | 
2120  | 0  |   #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */  | 
2121  |  | #else  | 
2122  |  |   /* a heap element */  | 
2123  |  |   typedef WT ANHE;  | 
2124  |  |  | 
2125  |  |   #define ANHE_w(he)        (he)  | 
2126  |  |   #define ANHE_at(he)       (he)->at  | 
2127  |  |   #define ANHE_at_cache(he)  | 
2128  |  | #endif  | 
2129  |  |  | 
2130  |  | #if EV_MULTIPLICITY  | 
2131  |  |  | 
2132  |  |   struct ev_loop  | 
2133  |  |   { | 
2134  |  |     ev_tstamp ev_rt_now;  | 
2135  | 3.34k  |     #define ev_rt_now ((loop)->ev_rt_now)  | 
2136  |  |     #define VAR(name,decl) decl;  | 
2137  |  |       #include "ev_vars.h"  | 
2138  |  |     #undef VAR  | 
2139  |  |   };  | 
2140  |  |   #include "ev_wrap.h"  | 
2141  |  |  | 
2142  |  |   static struct ev_loop default_loop_struct;  | 
2143  |  |   EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */  | 
2144  |  |  | 
2145  |  | #else  | 
2146  |  |  | 
2147  |  |   EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */  | 
2148  |  |   #define VAR(name,decl) static decl;  | 
2149  |  |     #include "ev_vars.h"  | 
2150  |  |   #undef VAR  | 
2151  |  |  | 
2152  |  |   static int ev_default_loop_ptr;  | 
2153  |  |  | 
2154  |  | #endif  | 
2155  |  |  | 
2156  |  | #if EV_FEATURE_API  | 
2157  | 0  | # define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)  | 
2158  | 0  | # define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)  | 
2159  | 0  | # define EV_INVOKE_PENDING invoke_cb (EV_A)  | 
2160  |  | #else  | 
2161  |  | # define EV_RELEASE_CB (void)0  | 
2162  |  | # define EV_ACQUIRE_CB (void)0  | 
2163  |  | # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)  | 
2164  |  | #endif  | 
2165  |  |  | 
2166  |  | #define EVBREAK_RECURSE 0x80  | 
2167  |  |  | 
2168  |  | /*****************************************************************************/  | 
2169  |  |  | 
2170  |  | #ifndef EV_HAVE_EV_TIME  | 
2171  |  | ev_tstamp  | 
2172  |  | ev_time (void) EV_NOEXCEPT  | 
2173  | 1.67k  | { | 
2174  | 1.67k  | #if EV_USE_REALTIME  | 
2175  | 1.67k  |   if (ecb_expect_true (have_realtime))  | 
2176  | 1.67k  |     { | 
2177  | 1.67k  |       struct timespec ts;  | 
2178  | 1.67k  |       clock_gettime (CLOCK_REALTIME, &ts);  | 
2179  | 1.67k  |       return EV_TS_GET (ts);  | 
2180  | 1.67k  |     }  | 
2181  | 0  | #endif  | 
2182  |  |  | 
2183  | 0  |   { | 
2184  | 0  |     struct timeval tv;  | 
2185  | 0  |     gettimeofday (&tv, 0);  | 
2186  | 0  |     return EV_TV_GET (tv);  | 
2187  | 1.67k  |   }  | 
2188  | 1.67k  | }  | 
2189  |  | #endif  | 
2190  |  |  | 
2191  |  | inline_size ev_tstamp  | 
2192  |  | get_clock (void)  | 
2193  | 1.67k  | { | 
2194  | 1.67k  | #if EV_USE_MONOTONIC  | 
2195  | 1.67k  |   if (ecb_expect_true (have_monotonic))  | 
2196  | 1.67k  |     { | 
2197  | 1.67k  |       struct timespec ts;  | 
2198  | 1.67k  |       clock_gettime (CLOCK_MONOTONIC, &ts);  | 
2199  | 1.67k  |       return EV_TS_GET (ts);  | 
2200  | 1.67k  |     }  | 
2201  | 0  | #endif  | 
2202  |  |  | 
2203  | 0  |   return ev_time ();  | 
2204  | 1.67k  | }  | 
2205  |  |  | 
2206  |  | #if EV_MULTIPLICITY  | 
2207  |  | ev_tstamp  | 
2208  |  | ev_now (EV_P) EV_NOEXCEPT  | 
2209  | 0  | { | 
2210  | 0  |   return ev_rt_now;  | 
2211  | 0  | }  | 
2212  |  | #endif  | 
2213  |  |  | 
2214  |  | ev_tstamp  | 
2215  |  | ev_monotonic_now (EV_P) EV_THROW  | 
2216  | 0  | { | 
2217  | 0  |   return mn_now;  | 
2218  | 0  | }  | 
2219  |  |  | 
2220  |  | ev_tstamp  | 
2221  |  | ev_monotonic_time (void) EV_THROW  | 
2222  | 0  | { | 
2223  | 0  |   return get_clock();  | 
2224  | 0  | }  | 
2225  |  |  | 
2226  |  | void  | 
2227  |  | ev_sleep (ev_tstamp delay) EV_NOEXCEPT  | 
2228  | 0  | { | 
2229  | 0  |   if (delay > EV_TS_CONST (0.))  | 
2230  | 0  |     { | 
2231  | 0  | #if EV_USE_NANOSLEEP  | 
2232  | 0  |       struct timespec ts;  | 
2233  |  | 
  | 
2234  | 0  |       EV_TS_SET (ts, delay);  | 
2235  | 0  |       nanosleep (&ts, 0);  | 
2236  |  | #elif defined _WIN32  | 
2237  |  |       /* maybe this should round up, as ms is very low resolution */  | 
2238  |  |       /* compared to select (µs) or nanosleep (ns) */  | 
2239  |  |       Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));  | 
2240  |  | #else  | 
2241  |  |       struct timeval tv;  | 
2242  |  |  | 
2243  |  |       /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */  | 
2244  |  |       /* something not guaranteed by newer posix versions, but guaranteed */  | 
2245  |  |       /* by older ones */  | 
2246  |  |       EV_TV_SET (tv, delay);  | 
2247  |  |       select (0, 0, 0, 0, &tv);  | 
2248  |  | #endif  | 
2249  | 0  |     }  | 
2250  | 0  | }  | 
2251  |  |  | 
2252  |  | /*****************************************************************************/  | 
2253  |  |  | 
2254  | 3.34k  | #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */  | 
2255  |  |  | 
2256  |  | /* find a suitable new size for the given array, */  | 
2257  |  | /* hopefully by rounding to a nice-to-malloc size */  | 
2258  |  | inline_size int  | 
2259  |  | array_nextsize (int elem, int cur, int cnt)  | 
2260  | 3.34k  | { | 
2261  | 3.34k  |   int ncur = cur + 1;  | 
2262  |  |  | 
2263  | 3.34k  |   do  | 
2264  | 6.67k  |     ncur <<= 1;  | 
2265  | 6.67k  |   while (cnt > ncur);  | 
2266  |  |  | 
2267  |  |   /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */  | 
2268  | 3.34k  |   if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)  | 
2269  | 0  |     { | 
2270  | 0  |       ncur *= elem;  | 
2271  | 0  |       ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);  | 
2272  | 0  |       ncur = ncur - sizeof (void *) * 4;  | 
2273  | 0  |       ncur /= elem;  | 
2274  | 0  |     }  | 
2275  |  |  | 
2276  | 3.34k  |   return ncur;  | 
2277  | 3.34k  | }  | 
2278  |  |  | 
2279  |  | ecb_noinline ecb_cold  | 
2280  |  | static void *  | 
2281  |  | array_realloc (int elem, void *base, int *cur, int cnt)  | 
2282  | 3.34k  | { | 
2283  | 3.34k  |   *cur = array_nextsize (elem, *cur, cnt);  | 
2284  | 3.34k  |   return ev_realloc (base, elem * *cur);  | 
2285  | 3.34k  | }  | 
2286  |  |  | 
2287  |  | #define array_needsize_noinit(base,offset,count)  | 
2288  |  |  | 
2289  |  | #define array_needsize_zerofill(base,offset,count)  \  | 
2290  | 1.67k  |   memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))  | 
2291  |  |  | 
2292  |  | #define array_needsize(type,base,cur,cnt,init)      \  | 
2293  | 3.34k  |   if (ecb_expect_false ((cnt) > (cur)))       \  | 
2294  | 3.34k  |     {               \ | 
2295  | 3.34k  |       ecb_unused int ocur_ = (cur);        \  | 
2296  | 3.34k  |       (base) = (type *)array_realloc       \  | 
2297  | 3.34k  |          (sizeof (type), (base), &(cur), (cnt));    \  | 
2298  | 3.34k  |       init ((base), ocur_, ((cur) - ocur_));      \  | 
2299  | 3.34k  |     }  | 
2300  |  |  | 
2301  |  | #if 0  | 
2302  |  | #define array_slim(type,stem)         \  | 
2303  |  |   if (stem ## max < array_roundsize (stem ## cnt >> 2))   \  | 
2304  |  |     {               \ | 
2305  |  |       stem ## max = array_roundsize (stem ## cnt >> 1);   \  | 
2306  |  |       base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\  | 
2307  |  |       fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\  | 
2308  |  |     }  | 
2309  |  | #endif  | 
2310  |  |  | 
2311  |  | #define array_free(stem, idx) \  | 
2312  | 33.1k  |   ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0  | 
2313  |  |  | 
2314  |  | /*****************************************************************************/  | 
2315  |  |  | 
2316  |  | /* dummy callback for pending events */  | 
2317  |  | ecb_noinline  | 
2318  |  | static void  | 
2319  |  | pendingcb (EV_P_ ev_prepare *w, int revents)  | 
2320  | 0  | { | 
2321  | 0  | }  | 
2322  |  |  | 
2323  |  | ecb_noinline  | 
2324  |  | void  | 
2325  |  | ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT  | 
2326  | 2  | { | 
2327  | 2  |   W w_ = (W)w;  | 
2328  | 2  |   int pri = ABSPRI (w_);  | 
2329  |  |  | 
2330  | 2  |   if (ecb_expect_false (w_->pending))  | 
2331  | 0  |     pendings [pri][w_->pending - 1].events |= revents;  | 
2332  | 2  |   else  | 
2333  | 2  |     { | 
2334  | 2  |       w_->pending = ++pendingcnt [pri];  | 
2335  | 2  |       array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);  | 
2336  | 2  |       pendings [pri][w_->pending - 1].w      = w_;  | 
2337  | 2  |       pendings [pri][w_->pending - 1].events = revents;  | 
2338  | 2  |     }  | 
2339  |  |  | 
2340  | 2  |   pendingpri = NUMPRI - 1;  | 
2341  | 2  | }  | 
2342  |  |  | 
2343  |  | inline_speed void  | 
2344  |  | feed_reverse (EV_P_ W w)  | 
2345  | 0  | { | 
2346  | 0  |   array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit);  | 
2347  | 0  |   rfeeds [rfeedcnt++] = w;  | 
2348  | 0  | }  | 
2349  |  |  | 
2350  |  | inline_size void  | 
2351  |  | feed_reverse_done (EV_P_ int revents)  | 
2352  | 0  | { | 
2353  | 0  |   do  | 
2354  | 0  |     ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);  | 
2355  | 0  |   while (rfeedcnt);  | 
2356  | 0  | }  | 
2357  |  |  | 
2358  |  | inline_speed void  | 
2359  |  | queue_events (EV_P_ W *events, int eventcnt, int type)  | 
2360  | 0  | { | 
2361  | 0  |   int i;  | 
2362  |  | 
  | 
2363  | 0  |   for (i = 0; i < eventcnt; ++i)  | 
2364  | 0  |     ev_feed_event (EV_A_ events [i], type);  | 
2365  | 0  | }  | 
2366  |  |  | 
2367  |  | /*****************************************************************************/  | 
2368  |  |  | 
2369  |  | inline_speed void  | 
2370  |  | fd_event_nocheck (EV_P_ int fd, int revents)  | 
2371  | 0  | { | 
2372  | 0  |   ANFD *anfd = anfds + fd;  | 
2373  | 0  |   ev_io *w;  | 
2374  |  | 
  | 
2375  | 0  |   for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)  | 
2376  | 0  |     { | 
2377  | 0  |       int ev = w->events & revents;  | 
2378  |  | 
  | 
2379  | 0  |       if (ev)  | 
2380  | 0  |         ev_feed_event (EV_A_ (W)w, ev);  | 
2381  | 0  |     }  | 
2382  | 0  | }  | 
2383  |  |  | 
2384  |  | /* do not submit kernel events for fds that have reify set */  | 
2385  |  | /* because that means they changed while we were polling for new events */  | 
2386  |  | inline_speed void  | 
2387  |  | fd_event (EV_P_ int fd, int revents)  | 
2388  | 0  | { | 
2389  | 0  |   ANFD *anfd = anfds + fd;  | 
2390  |  | 
  | 
2391  | 0  |   if (ecb_expect_true (!anfd->reify))  | 
2392  | 0  |     fd_event_nocheck (EV_A_ fd, revents);  | 
2393  | 0  | }  | 
2394  |  |  | 
2395  |  | void  | 
2396  |  | ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT  | 
2397  | 0  | { | 
2398  | 0  |   if (fd >= 0 && fd < anfdmax)  | 
2399  | 0  |     fd_event_nocheck (EV_A_ fd, revents);  | 
2400  | 0  | }  | 
2401  |  |  | 
2402  |  | /* make sure the external fd watch events are in-sync */  | 
2403  |  | /* with the kernel/libev internal state */  | 
2404  |  | inline_size void  | 
2405  |  | fd_reify (EV_P)  | 
2406  | 0  | { | 
2407  | 0  |   int i;  | 
2408  |  |  | 
2409  |  |   /* most backends do not modify the fdchanges list in backend_modfiy.  | 
2410  |  |    * except io_uring, which has fixed-size buffers which might force us  | 
2411  |  |    * to handle events in backend_modify, causing fdchanges to be amended,  | 
2412  |  |    * which could result in an endless loop.  | 
2413  |  |    * to avoid this, we do not dynamically handle fds that were added  | 
2414  |  |    * during fd_reify. that means that for those backends, fdchangecnt  | 
2415  |  |    * might be non-zero during poll, which must cause them to not block.  | 
2416  |  |    * to not put too much of a burden on other backends, this detail  | 
2417  |  |    * needs to be handled in the backend.  | 
2418  |  |    */  | 
2419  | 0  |   int changecnt = fdchangecnt;  | 
2420  |  | 
  | 
2421  |  | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP  | 
2422  |  |   for (i = 0; i < changecnt; ++i)  | 
2423  |  |     { | 
2424  |  |       int fd = fdchanges [i];  | 
2425  |  |       ANFD *anfd = anfds + fd;  | 
2426  |  |  | 
2427  |  |       if (anfd->reify & EV__IOFDSET && anfd->head)  | 
2428  |  |         { | 
2429  |  |           SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);  | 
2430  |  |  | 
2431  |  |           if (handle != anfd->handle)  | 
2432  |  |             { | 
2433  |  |               unsigned long arg;  | 
2434  |  |  | 
2435  |  |               assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0)); | 
2436  |  |  | 
2437  |  |               /* handle changed, but fd didn't - we need to do it in two steps */  | 
2438  |  |               backend_modify (EV_A_ fd, anfd->events, 0);  | 
2439  |  |               anfd->events = 0;  | 
2440  |  |               anfd->handle = handle;  | 
2441  |  |             }  | 
2442  |  |         }  | 
2443  |  |     }  | 
2444  |  | #endif  | 
2445  |  | 
  | 
2446  | 0  |   for (i = 0; i < changecnt; ++i)  | 
2447  | 0  |     { | 
2448  | 0  |       int fd = fdchanges [i];  | 
2449  | 0  |       ANFD *anfd = anfds + fd;  | 
2450  | 0  |       ev_io *w;  | 
2451  |  | 
  | 
2452  | 0  |       unsigned char o_events = anfd->events;  | 
2453  | 0  |       unsigned char o_reify  = anfd->reify;  | 
2454  |  | 
  | 
2455  | 0  |       anfd->reify = 0;  | 
2456  |  |  | 
2457  |  |       /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */  | 
2458  | 0  |         { | 
2459  | 0  |           anfd->events = 0;  | 
2460  |  | 
  | 
2461  | 0  |           for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)  | 
2462  | 0  |             anfd->events |= (unsigned char)w->events;  | 
2463  |  | 
  | 
2464  | 0  |           if (o_events != anfd->events)  | 
2465  | 0  |             o_reify = EV__IOFDSET; /* actually |= */  | 
2466  | 0  |         }  | 
2467  |  | 
  | 
2468  | 0  |       if (o_reify & EV__IOFDSET)  | 
2469  | 0  |         backend_modify (EV_A_ fd, o_events, anfd->events);  | 
2470  | 0  |     }  | 
2471  |  |  | 
2472  |  |   /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.  | 
2473  |  |    * this is a rare case (see beginning comment in this function), so we copy them to the  | 
2474  |  |    * front and hope the backend handles this case.  | 
2475  |  |    */  | 
2476  | 0  |   if (ecb_expect_false (fdchangecnt != changecnt))  | 
2477  | 0  |     memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));  | 
2478  |  | 
  | 
2479  | 0  |   fdchangecnt -= changecnt;  | 
2480  | 0  | }  | 
2481  |  |  | 
2482  |  | /* something about the given fd changed */  | 
2483  |  | inline_size  | 
2484  |  | void  | 
2485  |  | fd_change (EV_P_ int fd, int flags)  | 
2486  | 1.67k  | { | 
2487  | 1.67k  |   unsigned char reify = anfds [fd].reify;  | 
2488  | 1.67k  |   anfds [fd].reify = reify | flags;  | 
2489  |  |  | 
2490  | 1.67k  |   if (ecb_expect_true (!reify))  | 
2491  | 1.67k  |     { | 
2492  | 1.67k  |       ++fdchangecnt;  | 
2493  | 1.67k  |       array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);  | 
2494  | 1.67k  |       fdchanges [fdchangecnt - 1] = fd;  | 
2495  | 1.67k  |     }  | 
2496  | 1.67k  | }  | 
2497  |  |  | 
2498  |  | /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */  | 
2499  |  | inline_speed ecb_cold void  | 
2500  |  | fd_kill (EV_P_ int fd)  | 
2501  | 0  | { | 
2502  | 0  |   ev_io *w;  | 
2503  |  | 
  | 
2504  | 0  |   while ((w = (ev_io *)anfds [fd].head))  | 
2505  | 0  |     { | 
2506  | 0  |       ev_io_stop (EV_A_ w);  | 
2507  | 0  |       ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);  | 
2508  | 0  |     }  | 
2509  | 0  | }  | 
2510  |  |  | 
2511  |  | /* check whether the given fd is actually valid, for error recovery */  | 
2512  |  | inline_size ecb_cold int  | 
2513  |  | fd_valid (int fd)  | 
2514  | 0  | { | 
2515  |  | #ifdef _WIN32  | 
2516  |  |   return EV_FD_TO_WIN32_HANDLE (fd) != -1;  | 
2517  |  | #else  | 
2518  | 0  |   return fcntl (fd, F_GETFD) != -1;  | 
2519  | 0  | #endif  | 
2520  | 0  | }  | 
2521  |  |  | 
2522  |  | /* called on EBADF to verify fds */  | 
2523  |  | ecb_noinline ecb_cold  | 
2524  |  | static void  | 
2525  |  | fd_ebadf (EV_P)  | 
2526  | 0  | { | 
2527  | 0  |   int fd;  | 
2528  |  | 
  | 
2529  | 0  |   for (fd = 0; fd < anfdmax; ++fd)  | 
2530  | 0  |     if (anfds [fd].events)  | 
2531  | 0  |       if (!fd_valid (fd) && errno == EBADF)  | 
2532  | 0  |         fd_kill (EV_A_ fd);  | 
2533  | 0  | }  | 
2534  |  |  | 
2535  |  | /* called on ENOMEM in select/poll to kill some fds and retry */  | 
2536  |  | ecb_noinline ecb_cold  | 
2537  |  | static void  | 
2538  |  | fd_enomem (EV_P)  | 
2539  | 0  | { | 
2540  | 0  |   int fd;  | 
2541  |  | 
  | 
2542  | 0  |   for (fd = anfdmax; fd--; )  | 
2543  | 0  |     if (anfds [fd].events)  | 
2544  | 0  |       { | 
2545  | 0  |         fd_kill (EV_A_ fd);  | 
2546  | 0  |         break;  | 
2547  | 0  |       }  | 
2548  | 0  | }  | 
2549  |  |  | 
2550  |  | /* usually called after fork if backend needs to re-arm all fds from scratch */  | 
2551  |  | ecb_noinline  | 
2552  |  | static void  | 
2553  |  | fd_rearm_all (EV_P)  | 
2554  | 0  | { | 
2555  | 0  |   int fd;  | 
2556  |  | 
  | 
2557  | 0  |   for (fd = 0; fd < anfdmax; ++fd)  | 
2558  | 0  |     if (anfds [fd].events)  | 
2559  | 0  |       { | 
2560  | 0  |         anfds [fd].events = 0;  | 
2561  | 0  |         anfds [fd].emask  = 0;  | 
2562  | 0  |         fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);  | 
2563  | 0  |       }  | 
2564  | 0  | }  | 
2565  |  |  | 
2566  |  | /* used to prepare libev internal fd's */  | 
2567  |  | /* this is not fork-safe */  | 
2568  |  | inline_speed void  | 
2569  |  | fd_intern (int fd)  | 
2570  | 1.67k  | { | 
2571  |  | #ifdef _WIN32  | 
2572  |  |   unsigned long arg = 1;  | 
2573  |  |   ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);  | 
2574  |  | #else  | 
2575  | 1.67k  |   fcntl (fd, F_SETFD, FD_CLOEXEC);  | 
2576  | 1.67k  |   fcntl (fd, F_SETFL, O_NONBLOCK);  | 
2577  | 1.67k  | #endif  | 
2578  | 1.67k  | }  | 
2579  |  |  | 
2580  |  | /*****************************************************************************/  | 
2581  |  |  | 
2582  |  | /*  | 
2583  |  |  * the heap functions want a real array index. array index 0 is guaranteed to not  | 
2584  |  |  * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives  | 
2585  |  |  * the branching factor of the d-tree.  | 
2586  |  |  */  | 
2587  |  |  | 
2588  |  | /*  | 
2589  |  |  * at the moment we allow libev the luxury of two heaps,  | 
2590  |  |  * a small-code-size 2-heap one and a ~1.5kb larger 4-heap  | 
2591  |  |  * which is more cache-efficient.  | 
2592  |  |  * the difference is about 5% with 50000+ watchers.  | 
2593  |  |  */  | 
2594  |  | #if EV_USE_4HEAP  | 
2595  |  |  | 
2596  | 0  | #define DHEAP 4  | 
2597  | 0  | #define HEAP0 (DHEAP - 1) /* index of first element in heap */  | 
2598  | 0  | #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)  | 
2599  | 0  | #define UPHEAP_DONE(p,k) ((p) == (k))  | 
2600  |  |  | 
2601  |  | /* away from the root */  | 
2602  |  | inline_speed void  | 
2603  |  | downheap (ANHE *heap, int N, int k)  | 
2604  | 0  | { | 
2605  | 0  |   ANHE he = heap [k];  | 
2606  | 0  |   ANHE *E = heap + N + HEAP0;  | 
2607  |  | 
  | 
2608  | 0  |   for (;;)  | 
2609  | 0  |     { | 
2610  | 0  |       ev_tstamp minat;  | 
2611  | 0  |       ANHE *minpos;  | 
2612  | 0  |       ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;  | 
2613  |  |  | 
2614  |  |       /* find minimum child */  | 
2615  | 0  |       if (ecb_expect_true (pos + DHEAP - 1 < E))  | 
2616  | 0  |         { | 
2617  | 0  |           /* fast path */                               (minpos = pos + 0), (minat = ANHE_at (*minpos));  | 
2618  | 0  |           if (               minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));  | 
2619  | 0  |           if (               minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));  | 
2620  | 0  |           if (               minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));  | 
2621  | 0  |         }  | 
2622  | 0  |       else if (pos < E)  | 
2623  | 0  |         { | 
2624  | 0  |           /* slow path */                               (minpos = pos + 0), (minat = ANHE_at (*minpos));  | 
2625  | 0  |           if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));  | 
2626  | 0  |           if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));  | 
2627  | 0  |           if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));  | 
2628  | 0  |         }  | 
2629  | 0  |       else  | 
2630  | 0  |         break;  | 
2631  |  |  | 
2632  | 0  |       if (ANHE_at (he) <= minat)  | 
2633  | 0  |         break;  | 
2634  |  |  | 
2635  | 0  |       heap [k] = *minpos;  | 
2636  | 0  |       ev_active (ANHE_w (*minpos)) = k;  | 
2637  |  | 
  | 
2638  | 0  |       k = minpos - heap;  | 
2639  | 0  |     }  | 
2640  |  | 
  | 
2641  | 0  |   heap [k] = he;  | 
2642  | 0  |   ev_active (ANHE_w (he)) = k;  | 
2643  | 0  | }  | 
2644  |  |  | 
2645  |  | #else /* not 4HEAP */  | 
2646  |  |  | 
2647  |  | #define HEAP0 1  | 
2648  |  | #define HPARENT(k) ((k) >> 1)  | 
2649  |  | #define UPHEAP_DONE(p,k) (!(p))  | 
2650  |  |  | 
2651  |  | /* away from the root */  | 
2652  |  | inline_speed void  | 
2653  |  | downheap (ANHE *heap, int N, int k)  | 
2654  |  | { | 
2655  |  |   ANHE he = heap [k];  | 
2656  |  |  | 
2657  |  |   for (;;)  | 
2658  |  |     { | 
2659  |  |       int c = k << 1;  | 
2660  |  |  | 
2661  |  |       if (c >= N + HEAP0)  | 
2662  |  |         break;  | 
2663  |  |  | 
2664  |  |       c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])  | 
2665  |  |            ? 1 : 0;  | 
2666  |  |  | 
2667  |  |       if (ANHE_at (he) <= ANHE_at (heap [c]))  | 
2668  |  |         break;  | 
2669  |  |  | 
2670  |  |       heap [k] = heap [c];  | 
2671  |  |       ev_active (ANHE_w (heap [k])) = k;  | 
2672  |  |  | 
2673  |  |       k = c;  | 
2674  |  |     }  | 
2675  |  |  | 
2676  |  |   heap [k] = he;  | 
2677  |  |   ev_active (ANHE_w (he)) = k;  | 
2678  |  | }  | 
2679  |  | #endif  | 
2680  |  |  | 
2681  |  | /* towards the root */  | 
2682  |  | inline_speed void  | 
2683  |  | upheap (ANHE *heap, int k)  | 
2684  | 0  | { | 
2685  | 0  |   ANHE he = heap [k];  | 
2686  |  | 
  | 
2687  | 0  |   for (;;)  | 
2688  | 0  |     { | 
2689  | 0  |       int p = HPARENT (k);  | 
2690  |  | 
  | 
2691  | 0  |       if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))  | 
2692  | 0  |         break;  | 
2693  |  |  | 
2694  | 0  |       heap [k] = heap [p];  | 
2695  | 0  |       ev_active (ANHE_w (heap [k])) = k;  | 
2696  | 0  |       k = p;  | 
2697  | 0  |     }  | 
2698  |  | 
  | 
2699  | 0  |   heap [k] = he;  | 
2700  | 0  |   ev_active (ANHE_w (he)) = k;  | 
2701  | 0  | }  | 
2702  |  |  | 
2703  |  | /* move an element suitably so it is in a correct place */  | 
2704  |  | inline_size void  | 
2705  |  | adjustheap (ANHE *heap, int N, int k)  | 
2706  | 0  | { | 
2707  | 0  |   if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))  | 
2708  | 0  |     upheap (heap, k);  | 
2709  | 0  |   else  | 
2710  | 0  |     downheap (heap, N, k);  | 
2711  | 0  | }  | 
2712  |  |  | 
2713  |  | /* rebuild the heap: this function is used only once and executed rarely */  | 
2714  |  | inline_size void  | 
2715  |  | reheap (ANHE *heap, int N)  | 
2716  | 0  | { | 
2717  | 0  |   int i;  | 
2718  |  |  | 
2719  |  |   /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */  | 
2720  |  |   /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */  | 
2721  | 0  |   for (i = 0; i < N; ++i)  | 
2722  | 0  |     upheap (heap, i + HEAP0);  | 
2723  | 0  | }  | 
2724  |  |  | 
2725  |  | /*****************************************************************************/  | 
2726  |  |  | 
2727  |  | /* associate signal watchers to a signal */  | 
2728  |  | typedef struct  | 
2729  |  | { | 
2730  |  |   EV_ATOMIC_T pending;  | 
2731  |  | #if EV_MULTIPLICITY  | 
2732  |  |   EV_P;  | 
2733  |  | #endif  | 
2734  |  |   WL head;  | 
2735  |  | } ANSIG;  | 
2736  |  |  | 
2737  |  | static ANSIG signals [EV_NSIG - 1];  | 
2738  |  |  | 
2739  |  | /*****************************************************************************/  | 
2740  |  |  | 
2741  |  | #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE  | 
2742  |  |  | 
2743  |  | static int  | 
2744  |  | evpipe_alloc(EV_P)  | 
2745  | 1.67k  | { | 
2746  | 1.67k  |   int fds [2];  | 
2747  |  |  | 
2748  | 1.67k  | # if EV_USE_EVENTFD  | 
2749  | 1.67k  |   fds [0] = -1;  | 
2750  | 1.67k  |   fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);  | 
2751  | 1.67k  |   if (fds [1] < 0 && errno == EINVAL)  | 
2752  | 0  |     fds [1] = eventfd (0, 0);  | 
2753  |  |  | 
2754  | 1.67k  |   if (fds [1] < 0)  | 
2755  | 0  | # endif  | 
2756  | 0  |     { | 
2757  | 0  |       if (pipe(fds))  | 
2758  | 0  |          return -1;  | 
2759  | 0  |       fd_intern (fds [0]);  | 
2760  | 0  |     }  | 
2761  |  |  | 
2762  | 1.67k  |   evpipe [0] = fds [0];  | 
2763  |  |  | 
2764  | 1.67k  |   if (evpipe [1] < 0)  | 
2765  | 1.67k  |     evpipe [1] = fds [1]; /* first call, set write fd */  | 
2766  | 0  |   else  | 
2767  | 0  |     { | 
2768  |  |       /* on subsequent calls, do not change evpipe [1] */  | 
2769  |  |       /* so that evpipe_write can always rely on its value. */  | 
2770  |  |       /* this branch does not do anything sensible on windows, */  | 
2771  |  |       /* so must not be executed on windows */  | 
2772  | 0  |       dup2 (fds [1], evpipe [1]);  | 
2773  | 0  |       close (fds [1]);  | 
2774  | 0  |     }  | 
2775  |  |  | 
2776  | 1.67k  |   fd_intern (evpipe [1]);  | 
2777  | 1.67k  |   return 1;  | 
2778  | 1.67k  | }  | 
2779  |  |  | 
2780  |  | ecb_noinline ecb_cold  | 
2781  |  | static void  | 
2782  |  | evpipe_init (EV_P)  | 
2783  | 1.67k  | { | 
2784  | 1.67k  |   if (!ev_is_active (&pipe_w))  | 
2785  | 0  |     { | 
2786  | 0  |       while (evpipe_alloc(loop) == -1)  | 
2787  | 0  |         ev_syserr("(libev) error creating signal/async pipe"); | 
2788  | 0  |       ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);  | 
2789  | 0  |       ev_io_start (EV_A_ &pipe_w);  | 
2790  | 0  |       ev_unref (EV_A); /* watcher should not keep loop alive */  | 
2791  | 0  |     }  | 
2792  | 1.67k  | }  | 
2793  |  |  | 
2794  |  | inline_speed void  | 
2795  |  | evpipe_write (EV_P_ EV_ATOMIC_T *flag)  | 
2796  | 9  | { | 
2797  | 9  |   ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */  | 
2798  |  |  | 
2799  | 9  |   if (ecb_expect_true (*flag))  | 
2800  | 0  |     return;  | 
2801  |  |  | 
2802  | 9  |   *flag = 1;  | 
2803  | 9  |   ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */  | 
2804  |  |  | 
2805  | 9  |   pipe_write_skipped = 1;  | 
2806  |  |  | 
2807  | 9  |   ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */  | 
2808  |  |  | 
2809  | 9  |   if (pipe_write_wanted)  | 
2810  | 0  |     { | 
2811  | 0  |       int old_errno;  | 
2812  |  | 
  | 
2813  | 0  |       pipe_write_skipped = 0;  | 
2814  | 0  |       ECB_MEMORY_FENCE_RELEASE;  | 
2815  |  | 
  | 
2816  | 0  |       old_errno = errno; /* save errno because write will clobber it */  | 
2817  |  | 
  | 
2818  | 0  | #if EV_USE_EVENTFD  | 
2819  | 0  |       if (evpipe [0] < 0)  | 
2820  | 0  |         { | 
2821  | 0  |           uint64_t counter = 1;  | 
2822  | 0  |           write (evpipe [1], &counter, sizeof (uint64_t));  | 
2823  | 0  |         }  | 
2824  | 0  |       else  | 
2825  | 0  | #endif  | 
2826  | 0  |         { | 
2827  |  | #ifdef _WIN32  | 
2828  |  |           WSABUF buf;  | 
2829  |  |           DWORD sent;  | 
2830  |  |           buf.buf = (char *)&buf;  | 
2831  |  |           buf.len = 1;  | 
2832  |  |           WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);  | 
2833  |  | #else  | 
2834  | 0  |           write (evpipe [1], &(evpipe [1]), 1);  | 
2835  | 0  | #endif  | 
2836  | 0  |         }  | 
2837  |  | 
  | 
2838  | 0  |       errno = old_errno;  | 
2839  | 0  |     }  | 
2840  | 9  | }  | 
2841  |  |  | 
2842  |  | /* called whenever the libev signal pipe */  | 
2843  |  | /* got some events (signal, async) */  | 
2844  |  | static void  | 
2845  |  | pipecb (EV_P_ ev_io *iow, int revents)  | 
2846  | 0  | { | 
2847  | 0  |   int i;  | 
2848  |  | 
  | 
2849  | 0  |   if (revents & EV_READ)  | 
2850  | 0  |     { | 
2851  | 0  | #if EV_USE_EVENTFD  | 
2852  | 0  |       if (evpipe [0] < 0)  | 
2853  | 0  |         { | 
2854  | 0  |           uint64_t counter;  | 
2855  | 0  |           read (evpipe [1], &counter, sizeof (uint64_t));  | 
2856  | 0  |         }  | 
2857  | 0  |       else  | 
2858  | 0  | #endif  | 
2859  | 0  |         { | 
2860  | 0  |           char dummy[4];  | 
2861  |  | #ifdef _WIN32  | 
2862  |  |           WSABUF buf;  | 
2863  |  |           DWORD recvd;  | 
2864  |  |           DWORD flags = 0;  | 
2865  |  |           buf.buf = dummy;  | 
2866  |  |           buf.len = sizeof (dummy);  | 
2867  |  |           WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0);  | 
2868  |  | #else  | 
2869  | 0  |           read (evpipe [0], &dummy, sizeof (dummy));  | 
2870  | 0  | #endif  | 
2871  | 0  |         }  | 
2872  | 0  |     }  | 
2873  |  | 
  | 
2874  | 0  |   pipe_write_skipped = 0;  | 
2875  |  | 
  | 
2876  | 0  |   ECB_MEMORY_FENCE; /* push out skipped, acquire flags */  | 
2877  |  | 
  | 
2878  | 0  | #if EV_SIGNAL_ENABLE  | 
2879  | 0  |   if (sig_pending)  | 
2880  | 0  |     { | 
2881  | 0  |       sig_pending = 0;  | 
2882  |  | 
  | 
2883  | 0  |       ECB_MEMORY_FENCE;  | 
2884  |  | 
  | 
2885  | 0  |       for (i = EV_NSIG - 1; i--; )  | 
2886  | 0  |         if (ecb_expect_false (signals [i].pending))  | 
2887  | 0  |           ev_feed_signal_event (EV_A_ i + 1);  | 
2888  | 0  |     }  | 
2889  | 0  | #endif  | 
2890  |  | 
  | 
2891  | 0  | #if EV_ASYNC_ENABLE  | 
2892  | 0  |   if (async_pending)  | 
2893  | 0  |     { | 
2894  | 0  |       async_pending = 0;  | 
2895  |  | 
  | 
2896  | 0  |       ECB_MEMORY_FENCE;  | 
2897  |  | 
  | 
2898  | 0  |       for (i = asynccnt; i--; )  | 
2899  | 0  |         if (asyncs [i]->sent)  | 
2900  | 0  |           { | 
2901  | 0  |             asyncs [i]->sent = 0;  | 
2902  | 0  |             ECB_MEMORY_FENCE_RELEASE;  | 
2903  | 0  |             ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);  | 
2904  | 0  |           }  | 
2905  | 0  |     }  | 
2906  | 0  | #endif  | 
2907  | 0  | }  | 
2908  |  |  | 
2909  |  | /*****************************************************************************/  | 
2910  |  |  | 
2911  |  | void  | 
2912  |  | ev_feed_signal (int signum) EV_NOEXCEPT  | 
2913  | 9  | { | 
2914  | 9  | #if EV_MULTIPLICITY  | 
2915  | 9  |   EV_P;  | 
2916  | 9  |   ECB_MEMORY_FENCE_ACQUIRE;  | 
2917  | 9  |   EV_A = signals [signum - 1].loop;  | 
2918  |  |  | 
2919  | 9  |   if (!EV_A)  | 
2920  | 0  |     return;  | 
2921  | 9  | #endif  | 
2922  |  |  | 
2923  | 9  |   signals [signum - 1].pending = 1;  | 
2924  | 9  |   evpipe_write (EV_A_ &sig_pending);  | 
2925  | 9  | }  | 
2926  |  |  | 
2927  |  | static void  | 
2928  |  | ev_sighandler (int signum)  | 
2929  | 9  | { | 
2930  |  | #ifdef _WIN32  | 
2931  |  |   signal (signum, ev_sighandler);  | 
2932  |  | #endif  | 
2933  |  |  | 
2934  | 9  |   ev_feed_signal (signum);  | 
2935  | 9  | }  | 
2936  |  |  | 
2937  |  | ecb_noinline  | 
2938  |  | void  | 
2939  |  | ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT  | 
2940  | 0  | { | 
2941  | 0  |   WL w;  | 
2942  |  | 
  | 
2943  | 0  |   if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))  | 
2944  | 0  |     return;  | 
2945  |  |  | 
2946  | 0  |   --signum;  | 
2947  |  | 
  | 
2948  | 0  | #if EV_MULTIPLICITY  | 
2949  |  |   /* it is permissible to try to feed a signal to the wrong loop */  | 
2950  |  |   /* or, likely more useful, feeding a signal nobody is waiting for */  | 
2951  |  | 
  | 
2952  | 0  |   if (ecb_expect_false (signals [signum].loop != EV_A))  | 
2953  | 0  |     return;  | 
2954  | 0  | #endif  | 
2955  |  |  | 
2956  | 0  |   signals [signum].pending = 0;  | 
2957  | 0  |   ECB_MEMORY_FENCE_RELEASE;  | 
2958  |  | 
  | 
2959  | 0  |   for (w = signals [signum].head; w; w = w->next)  | 
2960  | 0  |     ev_feed_event (EV_A_ (W)w, EV_SIGNAL);  | 
2961  | 0  | }  | 
2962  |  |  | 
2963  |  | #if EV_USE_SIGNALFD  | 
2964  |  | static void  | 
2965  |  | sigfdcb (EV_P_ ev_io *iow, int revents)  | 
2966  | 0  | { | 
2967  | 0  |   struct signalfd_siginfo si[2], *sip; /* these structs are big */  | 
2968  |  | 
  | 
2969  | 0  |   for (;;)  | 
2970  | 0  |     { | 
2971  | 0  |       ssize_t res = read (sigfd, si, sizeof (si));  | 
2972  |  |  | 
2973  |  |       /* not ISO-C, as res might be -1, but works with SuS */  | 
2974  | 0  |       for (sip = si; (char *)sip < (char *)si + res; ++sip)  | 
2975  | 0  |         ev_feed_signal_event (EV_A_ sip->ssi_signo);  | 
2976  |  | 
  | 
2977  | 0  |       if (res < (ssize_t)sizeof (si))  | 
2978  | 0  |         break;  | 
2979  | 0  |     }  | 
2980  | 0  | }  | 
2981  |  | #endif  | 
2982  |  |  | 
2983  |  | #endif  | 
2984  |  |  | 
2985  |  | /*****************************************************************************/  | 
2986  |  |  | 
2987  |  | #if EV_CHILD_ENABLE  | 
2988  |  | static WL childs [EV_PID_HASHSIZE];  | 
2989  |  |  | 
2990  |  | static ev_signal childev;  | 
2991  |  |  | 
2992  |  | #ifndef WIFCONTINUED  | 
2993  |  | # define WIFCONTINUED(status) 0  | 
2994  |  | #endif  | 
2995  |  |  | 
2996  |  | /* handle a single child status event */  | 
2997  |  | inline_speed void  | 
2998  |  | child_reap (EV_P_ int chain, int pid, int status)  | 
2999  | 0  | { | 
3000  | 0  |   ev_child *w;  | 
3001  | 0  |   int traced = WIFSTOPPED (status) || WIFCONTINUED (status);  | 
3002  |  | 
  | 
3003  | 0  |   for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)  | 
3004  | 0  |     { | 
3005  | 0  |       if ((w->pid == pid || !w->pid)  | 
3006  | 0  |           && (!traced || (w->flags & 1)))  | 
3007  | 0  |         { | 
3008  | 0  |           ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */  | 
3009  | 0  |           w->rpid    = pid;  | 
3010  | 0  |           w->rstatus = status;  | 
3011  | 0  |           ev_feed_event (EV_A_ (W)w, EV_CHILD);  | 
3012  | 0  |         }  | 
3013  | 0  |     }  | 
3014  | 0  | }  | 
3015  |  |  | 
3016  |  | #ifndef WCONTINUED  | 
3017  |  | # define WCONTINUED 0  | 
3018  |  | #endif  | 
3019  |  |  | 
3020  |  | /* called on sigchld etc., calls waitpid */  | 
3021  |  | static void  | 
3022  |  | childcb (EV_P_ ev_signal *sw, int revents)  | 
3023  | 0  | { | 
3024  | 0  |   int pid, status;  | 
3025  |  |  | 
3026  |  |   /* some systems define WCONTINUED but then fail to support it (linux 2.4) */  | 
3027  | 0  |   if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))  | 
3028  | 0  |     if (!WCONTINUED  | 
3029  | 0  |         || errno != EINVAL  | 
3030  | 0  |         || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))  | 
3031  | 0  |       return;  | 
3032  |  |  | 
3033  |  |   /* make sure we are called again until all children have been reaped */  | 
3034  |  |   /* we need to do it this way so that the callback gets called before we continue */  | 
3035  | 0  |   ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);  | 
3036  |  | 
  | 
3037  | 0  |   child_reap (EV_A_ pid, pid, status);  | 
3038  | 0  |   if ((EV_PID_HASHSIZE) > 1)  | 
3039  | 0  |     child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */  | 
3040  | 0  | }  | 
3041  |  |  | 
3042  |  | #endif  | 
3043  |  |  | 
3044  |  | /*****************************************************************************/  | 
3045  |  |  | 
3046  |  | #if EV_USE_TIMERFD  | 
3047  |  |  | 
3048  |  | static void periodics_reschedule (EV_P);  | 
3049  |  |  | 
3050  |  | static void  | 
3051  |  | timerfdcb (EV_P_ ev_io *iow, int revents)  | 
3052  | 0  | { | 
3053  | 0  |   struct itimerspec its = { 0 }; | 
3054  |  | 
  | 
3055  | 0  |   its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;  | 
3056  | 0  |   timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);  | 
3057  |  | 
  | 
3058  | 0  |   ev_rt_now = ev_time ();  | 
3059  |  |   /* periodics_reschedule only needs ev_rt_now */  | 
3060  |  |   /* but maybe in the future we want the full treatment. */  | 
3061  |  |   /*  | 
3062  |  |   now_floor = EV_TS_CONST (0.);  | 
3063  |  |   time_update (EV_A_ EV_TSTAMP_HUGE);  | 
3064  |  |   */  | 
3065  | 0  | #if EV_PERIODIC_ENABLE  | 
3066  | 0  |   periodics_reschedule (EV_A);  | 
3067  | 0  | #endif  | 
3068  | 0  | }  | 
3069  |  |  | 
3070  |  | ecb_noinline ecb_cold  | 
3071  |  | static void  | 
3072  |  | evtimerfd_init (EV_P)  | 
3073  | 0  | { | 
3074  | 0  |   if (!ev_is_active (&timerfd_w))  | 
3075  | 0  |     { | 
3076  | 0  |       timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);  | 
3077  |  | 
  | 
3078  | 0  |       if (timerfd >= 0)  | 
3079  | 0  |         { | 
3080  | 0  |           fd_intern (timerfd); /* just to be sure */  | 
3081  |  | 
  | 
3082  | 0  |           ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);  | 
3083  | 0  |           ev_set_priority (&timerfd_w, EV_MINPRI);  | 
3084  | 0  |           ev_io_start (EV_A_ &timerfd_w);  | 
3085  | 0  |           ev_unref (EV_A); /* watcher should not keep loop alive */  | 
3086  |  |  | 
3087  |  |           /* (re-) arm timer */  | 
3088  | 0  |           timerfdcb (EV_A_ 0, 0);  | 
3089  | 0  |         }  | 
3090  | 0  |     }  | 
3091  | 0  | }  | 
3092  |  |  | 
3093  |  | #endif  | 
3094  |  |  | 
3095  |  | /*****************************************************************************/  | 
3096  |  |  | 
3097  |  | #if EV_USE_IOCP  | 
3098  |  | # include "ev_iocp.c"  | 
3099  |  | #endif  | 
3100  |  | #if EV_USE_PORT  | 
3101  |  | # include "ev_port.c"  | 
3102  |  | #endif  | 
3103  |  | #if EV_USE_KQUEUE  | 
3104  |  | # include "ev_kqueue.c"  | 
3105  |  | #endif  | 
3106  |  | #if EV_USE_EPOLL  | 
3107  |  | # include "ev_epoll.c"  | 
3108  |  | #endif  | 
3109  |  | #if EV_USE_LINUXAIO  | 
3110  |  | # include "ev_linuxaio.c"  | 
3111  |  | #endif  | 
3112  |  | #if EV_USE_IOURING  | 
3113  |  | # include "ev_iouring.c"  | 
3114  |  | #endif  | 
3115  |  | #if EV_USE_POLL  | 
3116  |  | # include "ev_poll.c"  | 
3117  |  | #endif  | 
3118  |  | #if EV_USE_SELECT  | 
3119  |  | # include "ev_select.c"  | 
3120  |  | #endif  | 
3121  |  |  | 
3122  |  | ecb_cold int  | 
3123  |  | ev_version_major (void) EV_NOEXCEPT  | 
3124  | 0  | { | 
3125  | 0  |   return EV_VERSION_MAJOR;  | 
3126  | 0  | }  | 
3127  |  |  | 
3128  |  | ecb_cold int  | 
3129  |  | ev_version_minor (void) EV_NOEXCEPT  | 
3130  | 0  | { | 
3131  | 0  |   return EV_VERSION_MINOR;  | 
3132  | 0  | }  | 
3133  |  |  | 
3134  |  | /* return true if we are running with elevated privileges and should ignore env variables */  | 
3135  |  | inline_size ecb_cold int  | 
3136  |  | enable_secure (void)  | 
3137  | 0  | { | 
3138  |  | #ifdef _WIN32  | 
3139  |  |   return 0;  | 
3140  |  | #else  | 
3141  | 0  |   return getuid () != geteuid ()  | 
3142  | 0  |       || getgid () != getegid ();  | 
3143  | 0  | #endif  | 
3144  | 0  | }  | 
3145  |  |  | 
3146  |  | ecb_cold  | 
3147  |  | unsigned int  | 
3148  |  | ev_supported_backends (void) EV_NOEXCEPT  | 
3149  | 1.67k  | { | 
3150  | 1.67k  |   unsigned int flags = 0;  | 
3151  |  |  | 
3152  | 1.67k  |   if (EV_USE_PORT                                      ) flags |= EVBACKEND_PORT;  | 
3153  | 1.67k  |   if (EV_USE_KQUEUE                                    ) flags |= EVBACKEND_KQUEUE;  | 
3154  | 1.67k  |   if (EV_USE_EPOLL                                     ) flags |= EVBACKEND_EPOLL;  | 
3155  | 1.67k  |   if (EV_USE_LINUXAIO                                  ) flags |= EVBACKEND_LINUXAIO;  | 
3156  | 1.67k  |   if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */  | 
3157  | 1.67k  |   if (EV_USE_POLL                                      ) flags |= EVBACKEND_POLL;  | 
3158  | 1.67k  |   if (EV_USE_SELECT                                    ) flags |= EVBACKEND_SELECT;  | 
3159  |  |  | 
3160  | 1.67k  |   return flags;  | 
3161  | 1.67k  | }  | 
3162  |  |  | 
3163  |  | ecb_cold  | 
3164  |  | unsigned int  | 
3165  |  | ev_recommended_backends (void) EV_NOEXCEPT  | 
3166  | 1.67k  | { | 
3167  | 1.67k  |   unsigned int flags = ev_supported_backends ();  | 
3168  |  |  | 
3169  | 1.67k  | #if !defined(__NetBSD__) && !defined(__FreeBSD__)  | 
3170  |  |   /* kqueue is borked on everything but netbsd apparently */  | 
3171  |  |   /* it usually doesn't work correctly on anything but sockets and pipes */  | 
3172  | 1.67k  |   flags &= ~EVBACKEND_KQUEUE;  | 
3173  | 1.67k  | #endif  | 
3174  |  | #ifdef __APPLE__  | 
3175  |  |   /* only select works correctly on that "unix-certified" platform */  | 
3176  |  |   flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */  | 
3177  |  |   flags &= ~EVBACKEND_POLL;   /* poll is based on kqueue from 10.5 onwards */  | 
3178  |  | #endif  | 
3179  |  | #ifdef __FreeBSD__  | 
3180  |  |   flags &= ~EVBACKEND_POLL;   /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */  | 
3181  |  | #endif  | 
3182  |  |  | 
3183  |  |   /* TODO: linuxaio is very experimental */  | 
3184  | 1.67k  | #if !EV_RECOMMEND_LINUXAIO  | 
3185  | 1.67k  |   flags &= ~EVBACKEND_LINUXAIO;  | 
3186  | 1.67k  | #endif  | 
3187  |  |   /* TODO: iouring is super experimental */  | 
3188  | 1.67k  | #if !EV_RECOMMEND_IOURING  | 
3189  | 1.67k  |   flags &= ~EVBACKEND_IOURING;  | 
3190  | 1.67k  | #endif  | 
3191  |  |  | 
3192  | 1.67k  |   return flags;  | 
3193  | 1.67k  | }  | 
3194  |  |  | 
3195  |  | ecb_cold  | 
3196  |  | unsigned int  | 
3197  |  | ev_embeddable_backends (void) EV_NOEXCEPT  | 
3198  | 0  | { | 
3199  | 0  |   int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;  | 
3200  |  |  | 
3201  |  |   /* epoll embeddability broken on all linux versions up to at least 2.6.23 */  | 
3202  | 0  |   if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */  | 
3203  | 0  |     flags &= ~EVBACKEND_EPOLL;  | 
3204  |  |  | 
3205  |  |   /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */  | 
3206  |  | 
  | 
3207  | 0  |   return flags;  | 
3208  | 0  | }  | 
3209  |  |  | 
3210  |  | unsigned int  | 
3211  |  | ev_backend (EV_P) EV_NOEXCEPT  | 
3212  | 1.67k  | { | 
3213  | 1.67k  |   return backend;  | 
3214  | 1.67k  | }  | 
3215  |  |  | 
3216  |  | #if EV_FEATURE_API  | 
3217  |  | unsigned int  | 
3218  |  | ev_iteration (EV_P) EV_NOEXCEPT  | 
3219  | 0  | { | 
3220  | 0  |   return loop_count;  | 
3221  | 0  | }  | 
3222  |  |  | 
3223  |  | unsigned int  | 
3224  |  | ev_depth (EV_P) EV_NOEXCEPT  | 
3225  | 0  | { | 
3226  | 0  |   return loop_depth;  | 
3227  | 0  | }  | 
3228  |  |  | 
3229  |  | void  | 
3230  |  | ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT  | 
3231  | 0  | { | 
3232  | 0  |   io_blocktime = interval;  | 
3233  | 0  | }  | 
3234  |  |  | 
3235  |  | void  | 
3236  |  | ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT  | 
3237  | 0  | { | 
3238  | 0  |   timeout_blocktime = interval;  | 
3239  | 0  | }  | 
3240  |  |  | 
3241  |  | void  | 
3242  |  | ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT  | 
3243  | 0  | { | 
3244  | 0  |   userdata = data;  | 
3245  | 0  | }  | 
3246  |  |  | 
3247  |  | void *  | 
3248  |  | ev_userdata (EV_P) EV_NOEXCEPT  | 
3249  | 0  | { | 
3250  | 0  |   return userdata;  | 
3251  | 0  | }  | 
3252  |  |  | 
3253  |  | void  | 
3254  |  | ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT  | 
3255  | 0  | { | 
3256  | 0  |   invoke_cb = invoke_pending_cb;  | 
3257  | 0  | }  | 
3258  |  |  | 
3259  |  | void  | 
3260  |  | ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT  | 
3261  | 0  | { | 
3262  | 0  |   release_cb = release;  | 
3263  | 0  |   acquire_cb = acquire;  | 
3264  | 0  | }  | 
3265  |  | #endif  | 
3266  |  |  | 
3267  |  | /* initialise a loop structure, must be zero-initialised */  | 
3268  |  | ecb_noinline ecb_cold  | 
3269  |  | static void  | 
3270  |  | loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT  | 
3271  | 1.67k  | { | 
3272  | 1.67k  |   if (!backend)  | 
3273  | 1.67k  |     { | 
3274  | 1.67k  |       origflags = flags;  | 
3275  |  |  | 
3276  | 1.67k  | #if EV_USE_REALTIME  | 
3277  | 1.67k  |       if (!have_realtime)  | 
3278  | 20  |         { | 
3279  | 20  |           struct timespec ts;  | 
3280  |  |  | 
3281  | 20  |           if (!clock_gettime (CLOCK_REALTIME, &ts))  | 
3282  | 20  |             have_realtime = 1;  | 
3283  | 20  |         }  | 
3284  | 1.67k  | #endif  | 
3285  |  |  | 
3286  | 1.67k  | #if EV_USE_MONOTONIC  | 
3287  | 1.67k  |       if (!have_monotonic)  | 
3288  | 20  |         { | 
3289  | 20  |           struct timespec ts;  | 
3290  |  |  | 
3291  | 20  |           if (!clock_gettime (CLOCK_MONOTONIC, &ts))  | 
3292  | 20  |             have_monotonic = 1;  | 
3293  | 20  |         }  | 
3294  | 1.67k  | #endif  | 
3295  |  |  | 
3296  |  |       /* pid check not overridable via env */  | 
3297  | 1.67k  | #ifndef _WIN32  | 
3298  | 1.67k  |       if (flags & EVFLAG_FORKCHECK)  | 
3299  | 0  |         curpid = getpid ();  | 
3300  | 1.67k  | #endif  | 
3301  |  |  | 
3302  | 1.67k  |       if (!(flags & EVFLAG_NOENV)  | 
3303  | 1.67k  |           && !enable_secure ()  | 
3304  | 1.67k  |           && getenv ("LIBEV_FLAGS")) | 
3305  | 0  |         flags = atoi (getenv ("LIBEV_FLAGS")); | 
3306  |  |  | 
3307  | 1.67k  |       ev_rt_now          = ev_time ();  | 
3308  | 1.67k  |       mn_now             = get_clock ();  | 
3309  | 1.67k  |       now_floor          = mn_now;  | 
3310  | 1.67k  |       rtmn_diff          = ev_rt_now - mn_now;  | 
3311  | 1.67k  | #if EV_FEATURE_API  | 
3312  | 1.67k  |       invoke_cb          = ev_invoke_pending;  | 
3313  | 1.67k  | #endif  | 
3314  |  |  | 
3315  | 1.67k  |       io_blocktime       = 0.;  | 
3316  | 1.67k  |       timeout_blocktime  = 0.;  | 
3317  | 1.67k  |       backend            = 0;  | 
3318  | 1.67k  |       backend_fd         = -1;  | 
3319  | 1.67k  |       sig_pending        = 0;  | 
3320  | 1.67k  | #if EV_ASYNC_ENABLE  | 
3321  | 1.67k  |       async_pending      = 0;  | 
3322  | 1.67k  | #endif  | 
3323  | 1.67k  |       pipe_write_skipped = 0;  | 
3324  | 1.67k  |       pipe_write_wanted  = 0;  | 
3325  | 1.67k  |       evpipe [0]         = -1;  | 
3326  | 1.67k  |       evpipe [1]         = -1;  | 
3327  | 1.67k  | #if EV_USE_INOTIFY  | 
3328  | 1.67k  |       fs_fd              = flags & EVFLAG_NOINOTIFY ? -1 : -2;  | 
3329  | 1.67k  | #endif  | 
3330  | 1.67k  | #if EV_USE_SIGNALFD  | 
3331  | 1.67k  |       sigfd              = flags & EVFLAG_SIGNALFD  ? -2 : -1;  | 
3332  | 1.67k  | #endif  | 
3333  | 1.67k  | #if EV_USE_TIMERFD  | 
3334  | 1.67k  |       timerfd            = flags & EVFLAG_NOTIMERFD ? -1 : -2;  | 
3335  | 1.67k  | #endif  | 
3336  |  |  | 
3337  | 1.67k  |       if (!(flags & EVBACKEND_MASK))  | 
3338  | 1.67k  |         flags |= ev_recommended_backends ();  | 
3339  |  |  | 
3340  | 1.67k  |       if (flags & EVFLAG_ALLOCFD)  | 
3341  | 1.67k  |         if (evpipe_alloc(EV_A) < 0)  | 
3342  | 0  |           return;  | 
3343  |  |  | 
3344  |  | #if EV_USE_IOCP  | 
3345  |  |       if (!backend && (flags & EVBACKEND_IOCP    )) backend = iocp_init      (EV_A_ flags);  | 
3346  |  | #endif  | 
3347  |  | #if EV_USE_PORT  | 
3348  |  |       if (!backend && (flags & EVBACKEND_PORT    )) backend = port_init      (EV_A_ flags);  | 
3349  |  | #endif  | 
3350  |  | #if EV_USE_KQUEUE  | 
3351  |  |       if (!backend && (flags & EVBACKEND_KQUEUE  )) backend = kqueue_init    (EV_A_ flags);  | 
3352  |  | #endif  | 
3353  | 1.67k  | #if EV_USE_IOURING  | 
3354  | 1.67k  |       if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init   (EV_A_ flags);  | 
3355  | 1.67k  | #endif  | 
3356  |  | #if EV_USE_LINUXAIO  | 
3357  |  |       if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init  (EV_A_ flags);  | 
3358  |  | #endif  | 
3359  | 1.67k  | #if EV_USE_EPOLL  | 
3360  | 1.67k  |       if (!backend && (flags & EVBACKEND_EPOLL   )) backend = epoll_init     (EV_A_ flags);  | 
3361  | 1.67k  | #endif  | 
3362  | 1.67k  | #if EV_USE_POLL  | 
3363  | 1.67k  |       if (!backend && (flags & EVBACKEND_POLL    )) backend = poll_init      (EV_A_ flags);  | 
3364  | 1.67k  | #endif  | 
3365  | 1.67k  | #if EV_USE_SELECT  | 
3366  | 1.67k  |       if (!backend && (flags & EVBACKEND_SELECT  )) backend = select_init    (EV_A_ flags);  | 
3367  | 1.67k  | #endif  | 
3368  |  |  | 
3369  | 1.67k  |       ev_prepare_init (&pending_w, pendingcb);  | 
3370  |  |  | 
3371  | 1.67k  | #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE  | 
3372  | 1.67k  |       ev_init (&pipe_w, pipecb);  | 
3373  | 1.67k  |       ev_set_priority (&pipe_w, EV_MAXPRI);  | 
3374  | 1.67k  |       if (flags & EVFLAG_ALLOCFD)  | 
3375  | 1.67k  |         { | 
3376  | 1.67k  |           ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);  | 
3377  | 1.67k  |           ev_io_start (EV_A_ &pipe_w);  | 
3378  | 1.67k  |           ev_unref (EV_A);  | 
3379  | 1.67k  |         }  | 
3380  | 1.67k  | #endif  | 
3381  | 1.67k  |     }  | 
3382  | 1.67k  | }  | 
3383  |  |  | 
3384  |  | /* free up a loop structure */  | 
3385  |  | ecb_cold  | 
3386  |  | void  | 
3387  |  | ev_loop_destroy (EV_P)  | 
3388  | 1.65k  | { | 
3389  | 1.65k  |   int i;  | 
3390  |  |  | 
3391  | 1.65k  | #if EV_MULTIPLICITY  | 
3392  |  |   /* mimic free (0) */  | 
3393  | 1.65k  |   if (!EV_A)  | 
3394  | 0  |     return;  | 
3395  | 1.65k  | #endif  | 
3396  |  |  | 
3397  | 1.65k  | #if EV_CLEANUP_ENABLE  | 
3398  |  |   /* queue cleanup watchers (and execute them) */  | 
3399  | 1.65k  |   if (ecb_expect_false (cleanupcnt))  | 
3400  | 0  |     { | 
3401  | 0  |       queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);  | 
3402  | 0  |       EV_INVOKE_PENDING;  | 
3403  | 0  |     }  | 
3404  | 1.65k  | #endif  | 
3405  |  |  | 
3406  | 1.65k  | #if EV_CHILD_ENABLE  | 
3407  | 1.65k  |   if (ev_is_default_loop (EV_A) && ev_is_active (&childev))  | 
3408  | 1.65k  |     { | 
3409  | 1.65k  |       ev_ref (EV_A); /* child watcher */  | 
3410  | 1.65k  |       ev_signal_stop (EV_A_ &childev);  | 
3411  | 1.65k  |     }  | 
3412  | 1.65k  | #endif  | 
3413  |  |  | 
3414  | 1.65k  |   if (ev_is_active (&pipe_w))  | 
3415  | 1.65k  |     { | 
3416  |  |       /*ev_ref (EV_A);*/  | 
3417  |  |       /*ev_io_stop (EV_A_ &pipe_w);*/  | 
3418  |  |  | 
3419  | 1.65k  |       if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);  | 
3420  | 1.65k  |       if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);  | 
3421  | 1.65k  |     }  | 
3422  |  |  | 
3423  | 1.65k  | #if EV_USE_SIGNALFD  | 
3424  | 1.65k  |   if (ev_is_active (&sigfd_w))  | 
3425  | 0  |     close (sigfd);  | 
3426  | 1.65k  | #endif  | 
3427  |  |  | 
3428  | 1.65k  | #if EV_USE_TIMERFD  | 
3429  | 1.65k  |   if (ev_is_active (&timerfd_w))  | 
3430  | 0  |     close (timerfd);  | 
3431  | 1.65k  | #endif  | 
3432  |  |  | 
3433  | 1.65k  | #if EV_USE_INOTIFY  | 
3434  | 1.65k  |   if (fs_fd >= 0)  | 
3435  | 0  |     close (fs_fd);  | 
3436  | 1.65k  | #endif  | 
3437  |  |  | 
3438  | 1.65k  |   if (backend_fd >= 0)  | 
3439  | 1.65k  |     close (backend_fd);  | 
3440  |  |  | 
3441  |  | #if EV_USE_IOCP  | 
3442  |  |   if (backend == EVBACKEND_IOCP    ) iocp_destroy     (EV_A);  | 
3443  |  | #endif  | 
3444  |  | #if EV_USE_PORT  | 
3445  |  |   if (backend == EVBACKEND_PORT    ) port_destroy     (EV_A);  | 
3446  |  | #endif  | 
3447  |  | #if EV_USE_KQUEUE  | 
3448  |  |   if (backend == EVBACKEND_KQUEUE  ) kqueue_destroy   (EV_A);  | 
3449  |  | #endif  | 
3450  | 1.65k  | #if EV_USE_IOURING  | 
3451  | 1.65k  |   if (backend == EVBACKEND_IOURING ) iouring_destroy  (EV_A);  | 
3452  | 1.65k  | #endif  | 
3453  |  | #if EV_USE_LINUXAIO  | 
3454  |  |   if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);  | 
3455  |  | #endif  | 
3456  | 1.65k  | #if EV_USE_EPOLL  | 
3457  | 1.65k  |   if (backend == EVBACKEND_EPOLL   ) epoll_destroy    (EV_A);  | 
3458  | 1.65k  | #endif  | 
3459  | 1.65k  | #if EV_USE_POLL  | 
3460  | 1.65k  |   if (backend == EVBACKEND_POLL    ) poll_destroy     (EV_A);  | 
3461  | 1.65k  | #endif  | 
3462  | 1.65k  | #if EV_USE_SELECT  | 
3463  | 1.65k  |   if (backend == EVBACKEND_SELECT  ) select_destroy   (EV_A);  | 
3464  | 1.65k  | #endif  | 
3465  |  |  | 
3466  | 9.93k  |   for (i = NUMPRI; i--; )  | 
3467  | 8.27k  |     { | 
3468  | 8.27k  |       array_free (pending, [i]);  | 
3469  | 8.27k  | #if EV_IDLE_ENABLE  | 
3470  | 8.27k  |       array_free (idle, [i]);  | 
3471  | 8.27k  | #endif  | 
3472  | 8.27k  |     }  | 
3473  |  |  | 
3474  | 1.65k  |   ev_free (anfds); anfds = 0; anfdmax = 0;  | 
3475  |  |  | 
3476  |  |   /* have to use the microsoft-never-gets-it-right macro */  | 
3477  | 1.65k  |   array_free (rfeed, EMPTY);  | 
3478  | 1.65k  |   array_free (fdchange, EMPTY);  | 
3479  | 1.65k  |   array_free (timer, EMPTY);  | 
3480  | 1.65k  | #if EV_PERIODIC_ENABLE  | 
3481  | 1.65k  |   array_free (periodic, EMPTY);  | 
3482  | 1.65k  | #endif  | 
3483  | 1.65k  | #if EV_FORK_ENABLE  | 
3484  | 1.65k  |   array_free (fork, EMPTY);  | 
3485  | 1.65k  | #endif  | 
3486  | 1.65k  | #if EV_CLEANUP_ENABLE  | 
3487  | 1.65k  |   array_free (cleanup, EMPTY);  | 
3488  | 1.65k  | #endif  | 
3489  | 1.65k  |   array_free (prepare, EMPTY);  | 
3490  | 1.65k  |   array_free (check, EMPTY);  | 
3491  | 1.65k  | #if EV_ASYNC_ENABLE  | 
3492  | 1.65k  |   array_free (async, EMPTY);  | 
3493  | 1.65k  | #endif  | 
3494  |  |  | 
3495  | 1.65k  |   backend = 0;  | 
3496  |  |  | 
3497  | 1.65k  | #if EV_MULTIPLICITY  | 
3498  | 1.65k  |   if (ev_is_default_loop (EV_A))  | 
3499  | 1.65k  | #endif  | 
3500  | 1.65k  |     ev_default_loop_ptr = 0;  | 
3501  | 0  | #if EV_MULTIPLICITY  | 
3502  | 0  |   else  | 
3503  | 0  |     ev_free (EV_A);  | 
3504  | 1.65k  | #endif  | 
3505  | 1.65k  | }  | 
3506  |  |  | 
3507  |  | #if EV_USE_INOTIFY  | 
3508  |  | inline_size void infy_fork (EV_P);  | 
3509  |  | #endif  | 
3510  |  |  | 
3511  |  | inline_size void  | 
3512  |  | loop_fork (EV_P)  | 
3513  | 0  | { | 
3514  |  | #if EV_USE_PORT  | 
3515  |  |   if (backend == EVBACKEND_PORT    ) port_fork     (EV_A);  | 
3516  |  | #endif  | 
3517  |  | #if EV_USE_KQUEUE  | 
3518  |  |   if (backend == EVBACKEND_KQUEUE  ) kqueue_fork   (EV_A);  | 
3519  |  | #endif  | 
3520  | 0  | #if EV_USE_IOURING  | 
3521  | 0  |   if (backend == EVBACKEND_IOURING ) iouring_fork  (EV_A);  | 
3522  | 0  | #endif  | 
3523  |  | #if EV_USE_LINUXAIO  | 
3524  |  |   if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);  | 
3525  |  | #endif  | 
3526  | 0  | #if EV_USE_EPOLL  | 
3527  | 0  |   if (backend == EVBACKEND_EPOLL   ) epoll_fork    (EV_A);  | 
3528  | 0  | #endif  | 
3529  | 0  | #if EV_USE_INOTIFY  | 
3530  | 0  |   infy_fork (EV_A);  | 
3531  | 0  | #endif  | 
3532  |  | 
  | 
3533  | 0  |   if (postfork != 2)  | 
3534  | 0  |     { | 
3535  | 0  |       #if EV_USE_SIGNALFD  | 
3536  |  |         /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */  | 
3537  | 0  |       #endif  | 
3538  |  | 
  | 
3539  | 0  |       #if EV_USE_TIMERFD  | 
3540  | 0  |         if (ev_is_active (&timerfd_w))  | 
3541  | 0  |           { | 
3542  | 0  |             ev_ref (EV_A);  | 
3543  | 0  |             ev_io_stop (EV_A_ &timerfd_w);  | 
3544  |  | 
  | 
3545  | 0  |             close (timerfd);  | 
3546  | 0  |             timerfd = -2;  | 
3547  |  | 
  | 
3548  | 0  |             evtimerfd_init (EV_A);  | 
3549  |  |             /* reschedule periodics, in case we missed something */  | 
3550  | 0  |             ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);  | 
3551  | 0  |           }  | 
3552  | 0  |       #endif  | 
3553  |  | 
  | 
3554  | 0  |       #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE  | 
3555  | 0  |         if (ev_is_active (&pipe_w))  | 
3556  | 0  |           { | 
3557  |  |             /* pipe_write_wanted must be false now, so modifying fd vars should be safe */  | 
3558  |  | 
  | 
3559  | 0  |             ev_ref (EV_A);  | 
3560  | 0  |             ev_io_stop (EV_A_ &pipe_w);  | 
3561  |  | 
  | 
3562  | 0  |             if (evpipe [0] >= 0)  | 
3563  | 0  |               EV_WIN32_CLOSE_FD (evpipe [0]);  | 
3564  |  | 
  | 
3565  | 0  |             evpipe_init (EV_A);  | 
3566  |  |             /* iterate over everything, in case we missed something before */  | 
3567  | 0  |             ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);  | 
3568  | 0  |           }  | 
3569  | 0  |       #endif  | 
3570  | 0  |     }  | 
3571  |  | 
  | 
3572  | 0  |   postfork = 0;  | 
3573  | 0  | }  | 
3574  |  |  | 
3575  |  | #if EV_MULTIPLICITY  | 
3576  |  |  | 
3577  |  | ecb_cold  | 
3578  |  | struct ev_loop *  | 
3579  |  | ev_loop_new (unsigned int flags) EV_NOEXCEPT  | 
3580  | 0  | { | 
3581  | 0  |   EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));  | 
3582  |  | 
  | 
3583  | 0  |   memset (EV_A, 0, sizeof (struct ev_loop));  | 
3584  | 0  |   loop_init (EV_A_ flags);  | 
3585  |  | 
  | 
3586  | 0  |   if (ev_backend (EV_A))  | 
3587  | 0  |     return EV_A;  | 
3588  |  |  | 
3589  | 0  |   ev_free (EV_A);  | 
3590  | 0  |   return 0;  | 
3591  | 0  | }  | 
3592  |  |  | 
3593  |  | #endif /* multiplicity */  | 
3594  |  |  | 
3595  |  | #if EV_VERIFY  | 
3596  |  | ecb_noinline ecb_cold  | 
3597  |  | static void  | 
3598  |  | verify_watcher (EV_P_ W w)  | 
3599  | 0  | { | 
3600  | 0  |   assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); | 
3601  |  |  | 
3602  | 0  |   if (w->pending)  | 
3603  | 0  |     assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); | 
3604  | 0  | }  | 
3605  |  |  | 
3606  |  | ecb_noinline ecb_cold  | 
3607  |  | static void  | 
3608  |  | verify_heap (EV_P_ ANHE *heap, int N)  | 
3609  | 0  | { | 
3610  | 0  |   int i;  | 
3611  |  | 
  | 
3612  | 0  |   for (i = HEAP0; i < N + HEAP0; ++i)  | 
3613  | 0  |     { | 
3614  | 0  |       assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); | 
3615  | 0  |       assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); | 
3616  | 0  |       assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); | 
3617  |  |  | 
3618  | 0  |       verify_watcher (EV_A_ (W)ANHE_w (heap [i]));  | 
3619  | 0  |     }  | 
3620  | 0  | }  | 
3621  |  |  | 
3622  |  | ecb_noinline ecb_cold  | 
3623  |  | static void  | 
3624  |  | array_verify (EV_P_ W *ws, int cnt)  | 
3625  | 0  | { | 
3626  | 0  |   while (cnt--)  | 
3627  | 0  |     { | 
3628  | 0  |       assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); | 
3629  | 0  |       verify_watcher (EV_A_ ws [cnt]);  | 
3630  | 0  |     }  | 
3631  | 0  | }  | 
3632  |  | #endif  | 
3633  |  |  | 
3634  |  | #if EV_FEATURE_API  | 
3635  |  | void ecb_cold  | 
3636  |  | ev_verify (EV_P) EV_NOEXCEPT  | 
3637  | 0  | { | 
3638  | 0  | #if EV_VERIFY  | 
3639  | 0  |   int i;  | 
3640  | 0  |   WL w, w2;  | 
3641  |  | 
  | 
3642  | 0  |   assert (activecnt >= -1);  | 
3643  |  |  | 
3644  | 0  |   assert (fdchangemax >= fdchangecnt);  | 
3645  | 0  |   for (i = 0; i < fdchangecnt; ++i)  | 
3646  | 0  |     assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0)); | 
3647  |  |  | 
3648  | 0  |   assert (anfdmax >= 0);  | 
3649  | 0  |   for (i = 0; i < anfdmax; ++i)  | 
3650  | 0  |     { | 
3651  | 0  |       int j = 0;  | 
3652  |  | 
  | 
3653  | 0  |       for (w = w2 = anfds [i].head; w; w = w->next)  | 
3654  | 0  |         { | 
3655  | 0  |           verify_watcher (EV_A_ (W)w);  | 
3656  |  | 
  | 
3657  | 0  |           if (j++ & 1)  | 
3658  | 0  |             { | 
3659  | 0  |               assert (("libev: io watcher list contains a loop", w != w2)); | 
3660  | 0  |               w2 = w2->next;  | 
3661  | 0  |             }  | 
3662  |  |  | 
3663  | 0  |           assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1)); | 
3664  | 0  |           assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i)); | 
3665  | 0  |         }  | 
3666  | 0  |     }  | 
3667  |  |  | 
3668  | 0  |   assert (timermax >= timercnt);  | 
3669  | 0  |   verify_heap (EV_A_ timers, timercnt);  | 
3670  |  | 
  | 
3671  | 0  | #if EV_PERIODIC_ENABLE  | 
3672  | 0  |   assert (periodicmax >= periodiccnt);  | 
3673  | 0  |   verify_heap (EV_A_ periodics, periodiccnt);  | 
3674  | 0  | #endif  | 
3675  |  | 
  | 
3676  | 0  |   for (i = NUMPRI; i--; )  | 
3677  | 0  |     { | 
3678  | 0  |       assert (pendingmax [i] >= pendingcnt [i]);  | 
3679  | 0  | #if EV_IDLE_ENABLE  | 
3680  | 0  |       assert (idleall >= 0);  | 
3681  | 0  |       assert (idlemax [i] >= idlecnt [i]);  | 
3682  | 0  |       array_verify (EV_A_ (W *)idles [i], idlecnt [i]);  | 
3683  | 0  | #endif  | 
3684  | 0  |     }  | 
3685  |  |  | 
3686  | 0  | #if EV_FORK_ENABLE  | 
3687  | 0  |   assert (forkmax >= forkcnt);  | 
3688  | 0  |   array_verify (EV_A_ (W *)forks, forkcnt);  | 
3689  | 0  | #endif  | 
3690  |  | 
  | 
3691  | 0  | #if EV_CLEANUP_ENABLE  | 
3692  | 0  |   assert (cleanupmax >= cleanupcnt);  | 
3693  | 0  |   array_verify (EV_A_ (W *)cleanups, cleanupcnt);  | 
3694  | 0  | #endif  | 
3695  |  | 
  | 
3696  | 0  | #if EV_ASYNC_ENABLE  | 
3697  | 0  |   assert (asyncmax >= asynccnt);  | 
3698  | 0  |   array_verify (EV_A_ (W *)asyncs, asynccnt);  | 
3699  | 0  | #endif  | 
3700  |  | 
  | 
3701  | 0  | #if EV_PREPARE_ENABLE  | 
3702  | 0  |   assert (preparemax >= preparecnt);  | 
3703  | 0  |   array_verify (EV_A_ (W *)prepares, preparecnt);  | 
3704  | 0  | #endif  | 
3705  |  | 
  | 
3706  | 0  | #if EV_CHECK_ENABLE  | 
3707  | 0  |   assert (checkmax >= checkcnt);  | 
3708  | 0  |   array_verify (EV_A_ (W *)checks, checkcnt);  | 
3709  | 0  | #endif  | 
3710  |  | 
  | 
3711  |  | # if 0  | 
3712  |  | #if EV_CHILD_ENABLE  | 
3713  |  |   for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)  | 
3714  |  |   for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)  | 
3715  |  | #endif  | 
3716  |  | # endif  | 
3717  | 0  | #endif  | 
3718  | 0  | }  | 
3719  |  | #endif  | 
3720  |  |  | 
3721  |  | #if EV_MULTIPLICITY  | 
3722  |  | ecb_cold  | 
3723  |  | struct ev_loop *  | 
3724  |  | #else  | 
3725  |  | int  | 
3726  |  | #endif  | 
3727  |  | ev_default_loop (unsigned int flags) EV_NOEXCEPT  | 
3728  | 1.67k  | { | 
3729  | 1.67k  |   if (!ev_default_loop_ptr)  | 
3730  | 1.67k  |     { | 
3731  | 1.67k  | #if EV_MULTIPLICITY  | 
3732  | 1.67k  |       EV_P = ev_default_loop_ptr = &default_loop_struct;  | 
3733  |  | #else  | 
3734  |  |       ev_default_loop_ptr = 1;  | 
3735  |  | #endif  | 
3736  |  |  | 
3737  | 1.67k  |       loop_init (EV_A_ flags);  | 
3738  |  |  | 
3739  | 1.67k  |       if (ev_backend (EV_A))  | 
3740  | 1.67k  |         { | 
3741  | 1.67k  | #if EV_CHILD_ENABLE  | 
3742  | 1.67k  |           ev_signal_init (&childev, childcb, SIGCHLD);  | 
3743  | 1.67k  |           ev_set_priority (&childev, EV_MAXPRI);  | 
3744  | 1.67k  |           ev_signal_start (EV_A_ &childev);  | 
3745  | 1.67k  |           ev_unref (EV_A); /* child watcher should not keep loop alive */  | 
3746  | 1.67k  | #endif  | 
3747  | 1.67k  |         }  | 
3748  | 0  |       else  | 
3749  | 0  |         ev_default_loop_ptr = 0;  | 
3750  | 1.67k  |     }  | 
3751  |  |  | 
3752  | 1.67k  |   return ev_default_loop_ptr;  | 
3753  | 1.67k  | }  | 
3754  |  |  | 
3755  |  | void  | 
3756  |  | ev_loop_fork (EV_P) EV_NOEXCEPT  | 
3757  | 0  | { | 
3758  | 0  |   postfork = 1;  | 
3759  | 0  | }  | 
3760  |  |  | 
3761  |  | /*****************************************************************************/  | 
3762  |  |  | 
3763  |  | void  | 
3764  |  | ev_invoke (EV_P_ void *w, int revents)  | 
3765  | 0  | { | 
3766  | 0  |   EV_CB_INVOKE ((W)w, revents);  | 
3767  | 0  | }  | 
3768  |  |  | 
3769  |  | unsigned int  | 
3770  |  | ev_pending_count (EV_P) EV_NOEXCEPT  | 
3771  | 0  | { | 
3772  | 0  |   int pri;  | 
3773  | 0  |   unsigned int count = 0;  | 
3774  |  | 
  | 
3775  | 0  |   for (pri = NUMPRI; pri--; )  | 
3776  | 0  |     count += pendingcnt [pri];  | 
3777  |  | 
  | 
3778  | 0  |   return count;  | 
3779  | 0  | }  | 
3780  |  |  | 
3781  |  | ecb_noinline  | 
3782  |  | void  | 
3783  |  | ev_invoke_pending (EV_P)  | 
3784  | 0  | { | 
3785  | 0  |   pendingpri = NUMPRI;  | 
3786  |  | 
  | 
3787  | 0  |   do  | 
3788  | 0  |     { | 
3789  | 0  |       --pendingpri;  | 
3790  |  |  | 
3791  |  |       /* pendingpri possibly gets modified in the inner loop */  | 
3792  | 0  |       while (pendingcnt [pendingpri])  | 
3793  | 0  |         { | 
3794  | 0  |           ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];  | 
3795  |  | 
  | 
3796  | 0  |           p->w->pending = 0;  | 
3797  | 0  |           EV_CB_INVOKE (p->w, p->events);  | 
3798  | 0  |           EV_FREQUENT_CHECK;  | 
3799  | 0  |         }  | 
3800  | 0  |     }  | 
3801  | 0  |   while (pendingpri);  | 
3802  | 0  | }  | 
3803  |  |  | 
3804  |  | #if EV_IDLE_ENABLE  | 
3805  |  | /* make idle watchers pending. this handles the "call-idle */  | 
3806  |  | /* only when higher priorities are idle" logic */  | 
3807  |  | inline_size void  | 
3808  |  | idle_reify (EV_P)  | 
3809  | 0  | { | 
3810  | 0  |   if (ecb_expect_false (idleall))  | 
3811  | 0  |     { | 
3812  | 0  |       int pri;  | 
3813  |  | 
  | 
3814  | 0  |       for (pri = NUMPRI; pri--; )  | 
3815  | 0  |         { | 
3816  | 0  |           if (pendingcnt [pri])  | 
3817  | 0  |             break;  | 
3818  |  |  | 
3819  | 0  |           if (idlecnt [pri])  | 
3820  | 0  |             { | 
3821  | 0  |               queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);  | 
3822  | 0  |               break;  | 
3823  | 0  |             }  | 
3824  | 0  |         }  | 
3825  | 0  |     }  | 
3826  | 0  | }  | 
3827  |  | #endif  | 
3828  |  |  | 
3829  |  | /* make timers pending */  | 
3830  |  | inline_size void  | 
3831  |  | timers_reify (EV_P)  | 
3832  | 0  | { | 
3833  | 0  |   EV_FREQUENT_CHECK;  | 
3834  |  | 
  | 
3835  | 0  |   if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)  | 
3836  | 0  |     { | 
3837  | 0  |       do  | 
3838  | 0  |         { | 
3839  | 0  |           ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);  | 
3840  |  |  | 
3841  |  |           /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ | 
3842  |  |  | 
3843  |  |           /* first reschedule or stop timer */  | 
3844  | 0  |           if (w->repeat)  | 
3845  | 0  |             { | 
3846  | 0  |               ev_at (w) += w->repeat;  | 
3847  | 0  |               if (ev_at (w) < mn_now)  | 
3848  | 0  |                 ev_at (w) = mn_now;  | 
3849  |  | 
  | 
3850  | 0  |               assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.))); | 
3851  |  |  | 
3852  | 0  |               ANHE_at_cache (timers [HEAP0]);  | 
3853  | 0  |               downheap (timers, timercnt, HEAP0);  | 
3854  | 0  |             }  | 
3855  | 0  |           else  | 
3856  | 0  |             ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */  | 
3857  |  |  | 
3858  | 0  |           EV_FREQUENT_CHECK;  | 
3859  | 0  |           feed_reverse (EV_A_ (W)w);  | 
3860  | 0  |         }  | 
3861  | 0  |       while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);  | 
3862  |  |  | 
3863  | 0  |       feed_reverse_done (EV_A_ EV_TIMER);  | 
3864  | 0  |     }  | 
3865  | 0  | }  | 
3866  |  |  | 
3867  |  | #if EV_PERIODIC_ENABLE  | 
3868  |  |  | 
3869  |  | ecb_noinline  | 
3870  |  | static void  | 
3871  |  | periodic_recalc (EV_P_ ev_periodic *w)  | 
3872  | 0  | { | 
3873  | 0  |   ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;  | 
3874  | 0  |   ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);  | 
3875  |  |  | 
3876  |  |   /* the above almost always errs on the low side */  | 
3877  | 0  |   while (at <= ev_rt_now)  | 
3878  | 0  |     { | 
3879  | 0  |       ev_tstamp nat = at + w->interval;  | 
3880  |  |  | 
3881  |  |       /* when resolution fails us, we use ev_rt_now */  | 
3882  | 0  |       if (ecb_expect_false (nat == at))  | 
3883  | 0  |         { | 
3884  | 0  |           at = ev_rt_now;  | 
3885  | 0  |           break;  | 
3886  | 0  |         }  | 
3887  |  |  | 
3888  | 0  |       at = nat;  | 
3889  | 0  |     }  | 
3890  |  | 
  | 
3891  | 0  |   ev_at (w) = at;  | 
3892  | 0  | }  | 
3893  |  |  | 
3894  |  | /* make periodics pending */  | 
3895  |  | inline_size void  | 
3896  |  | periodics_reify (EV_P)  | 
3897  | 0  | { | 
3898  | 0  |   EV_FREQUENT_CHECK;  | 
3899  |  | 
  | 
3900  | 0  |   while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)  | 
3901  | 0  |     { | 
3902  | 0  |       do  | 
3903  | 0  |         { | 
3904  | 0  |           ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);  | 
3905  |  |  | 
3906  |  |           /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ | 
3907  |  |  | 
3908  |  |           /* first reschedule or stop timer */  | 
3909  | 0  |           if (w->reschedule_cb)  | 
3910  | 0  |             { | 
3911  | 0  |               ev_at (w) = w->reschedule_cb (w, ev_rt_now);  | 
3912  |  | 
  | 
3913  | 0  |               assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); | 
3914  |  |  | 
3915  | 0  |               ANHE_at_cache (periodics [HEAP0]);  | 
3916  | 0  |               downheap (periodics, periodiccnt, HEAP0);  | 
3917  | 0  |             }  | 
3918  | 0  |           else if (w->interval)  | 
3919  | 0  |             { | 
3920  | 0  |               periodic_recalc (EV_A_ w);  | 
3921  | 0  |               ANHE_at_cache (periodics [HEAP0]);  | 
3922  | 0  |               downheap (periodics, periodiccnt, HEAP0);  | 
3923  | 0  |             }  | 
3924  | 0  |           else  | 
3925  | 0  |             ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */  | 
3926  |  |  | 
3927  | 0  |           EV_FREQUENT_CHECK;  | 
3928  | 0  |           feed_reverse (EV_A_ (W)w);  | 
3929  | 0  |         }  | 
3930  | 0  |       while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);  | 
3931  |  |  | 
3932  | 0  |       feed_reverse_done (EV_A_ EV_PERIODIC);  | 
3933  | 0  |     }  | 
3934  | 0  | }  | 
3935  |  |  | 
3936  |  | /* simply recalculate all periodics */  | 
3937  |  | /* TODO: maybe ensure that at least one event happens when jumping forward? */  | 
3938  |  | ecb_noinline ecb_cold  | 
3939  |  | static void  | 
3940  |  | periodics_reschedule (EV_P)  | 
3941  | 0  | { | 
3942  | 0  |   int i;  | 
3943  |  |  | 
3944  |  |   /* adjust periodics after time jump */  | 
3945  | 0  |   for (i = HEAP0; i < periodiccnt + HEAP0; ++i)  | 
3946  | 0  |     { | 
3947  | 0  |       ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);  | 
3948  |  | 
  | 
3949  | 0  |       if (w->reschedule_cb)  | 
3950  | 0  |         ev_at (w) = w->reschedule_cb (w, ev_rt_now);  | 
3951  | 0  |       else if (w->interval)  | 
3952  | 0  |         periodic_recalc (EV_A_ w);  | 
3953  |  | 
  | 
3954  | 0  |       ANHE_at_cache (periodics [i]);  | 
3955  | 0  |     }  | 
3956  |  | 
  | 
3957  | 0  |   reheap (periodics, periodiccnt);  | 
3958  | 0  | }  | 
3959  |  | #endif  | 
3960  |  |  | 
3961  |  | /* adjust all timers by a given offset */  | 
3962  |  | ecb_noinline ecb_cold  | 
3963  |  | static void  | 
3964  |  | timers_reschedule (EV_P_ ev_tstamp adjust)  | 
3965  | 0  | { | 
3966  | 0  |   int i;  | 
3967  |  | 
  | 
3968  | 0  |   for (i = 0; i < timercnt; ++i)  | 
3969  | 0  |     { | 
3970  | 0  |       ANHE *he = timers + i + HEAP0;  | 
3971  | 0  |       ANHE_w (*he)->at += adjust;  | 
3972  | 0  |       ANHE_at_cache (*he);  | 
3973  | 0  |     }  | 
3974  | 0  | }  | 
3975  |  |  | 
3976  |  | /* fetch new monotonic and realtime times from the kernel */  | 
3977  |  | /* also detect if there was a timejump, and act accordingly */  | 
3978  |  | inline_speed void  | 
3979  |  | time_update (EV_P_ ev_tstamp max_block)  | 
3980  | 0  | { | 
3981  | 0  | #if EV_USE_MONOTONIC  | 
3982  | 0  |   if (ecb_expect_true (have_monotonic))  | 
3983  | 0  |     { | 
3984  | 0  |       int i;  | 
3985  | 0  |       ev_tstamp odiff = rtmn_diff;  | 
3986  |  | 
  | 
3987  | 0  |       mn_now = get_clock ();  | 
3988  |  |  | 
3989  |  |       /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */  | 
3990  |  |       /* interpolate in the meantime */  | 
3991  | 0  |       if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))  | 
3992  | 0  |         { | 
3993  | 0  |           ev_rt_now = rtmn_diff + mn_now;  | 
3994  | 0  |           return;  | 
3995  | 0  |         }  | 
3996  |  |  | 
3997  | 0  |       now_floor = mn_now;  | 
3998  | 0  |       ev_rt_now = ev_time ();  | 
3999  |  |  | 
4000  |  |       /* loop a few times, before making important decisions.  | 
4001  |  |        * on the choice of "4": one iteration isn't enough,  | 
4002  |  |        * in case we get preempted during the calls to  | 
4003  |  |        * ev_time and get_clock. a second call is almost guaranteed  | 
4004  |  |        * to succeed in that case, though. and looping a few more times  | 
4005  |  |        * doesn't hurt either as we only do this on time-jumps or  | 
4006  |  |        * in the unlikely event of having been preempted here.  | 
4007  |  |        */  | 
4008  | 0  |       for (i = 4; --i; )  | 
4009  | 0  |         { | 
4010  | 0  |           ev_tstamp diff;  | 
4011  | 0  |           rtmn_diff = ev_rt_now - mn_now;  | 
4012  |  | 
  | 
4013  | 0  |           diff = odiff - rtmn_diff;  | 
4014  |  | 
  | 
4015  | 0  |           if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))  | 
4016  | 0  |             return; /* all is well */  | 
4017  |  |  | 
4018  | 0  |           ev_rt_now = ev_time ();  | 
4019  | 0  |           mn_now    = get_clock ();  | 
4020  | 0  |           now_floor = mn_now;  | 
4021  | 0  |         }  | 
4022  |  |  | 
4023  |  |       /* no timer adjustment, as the monotonic clock doesn't jump */  | 
4024  |  |       /* timers_reschedule (EV_A_ rtmn_diff - odiff) */  | 
4025  | 0  | # if EV_PERIODIC_ENABLE  | 
4026  | 0  |       periodics_reschedule (EV_A);  | 
4027  | 0  | # endif  | 
4028  | 0  |     }  | 
4029  | 0  |   else  | 
4030  | 0  | #endif  | 
4031  | 0  |     { | 
4032  | 0  |       ev_rt_now = ev_time ();  | 
4033  |  | 
  | 
4034  | 0  |       if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))  | 
4035  | 0  |         { | 
4036  |  |           /* adjust timers. this is easy, as the offset is the same for all of them */  | 
4037  | 0  |           timers_reschedule (EV_A_ ev_rt_now - mn_now);  | 
4038  | 0  | #if EV_PERIODIC_ENABLE  | 
4039  | 0  |           periodics_reschedule (EV_A);  | 
4040  | 0  | #endif  | 
4041  | 0  |         }  | 
4042  |  | 
  | 
4043  | 0  |       mn_now = ev_rt_now;  | 
4044  | 0  |     }  | 
4045  | 0  | }  | 
4046  |  |  | 
4047  |  | int  | 
4048  |  | ev_run (EV_P_ int flags)  | 
4049  | 0  | { | 
4050  | 0  | #if EV_FEATURE_API  | 
4051  | 0  |   ++loop_depth;  | 
4052  | 0  | #endif  | 
4053  |  | 
  | 
4054  | 0  |   assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE)); | 
4055  |  |  | 
4056  | 0  |   loop_done = EVBREAK_CANCEL;  | 
4057  |  | 
  | 
4058  | 0  |   EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */  | 
4059  |  | 
  | 
4060  | 0  |   do  | 
4061  | 0  |     { | 
4062  |  | #if EV_VERIFY >= 2  | 
4063  |  |       ev_verify (EV_A);  | 
4064  |  | #endif  | 
4065  |  | 
  | 
4066  | 0  | #ifndef _WIN32  | 
4067  | 0  |       if (ecb_expect_false (curpid)) /* penalise the forking check even more */  | 
4068  | 0  |         if (ecb_expect_false (getpid () != curpid))  | 
4069  | 0  |           { | 
4070  | 0  |             curpid = getpid ();  | 
4071  | 0  |             postfork = 1;  | 
4072  | 0  |           }  | 
4073  | 0  | #endif  | 
4074  |  | 
  | 
4075  | 0  | #if EV_FORK_ENABLE  | 
4076  |  |       /* we might have forked, so queue fork handlers */  | 
4077  | 0  |       if (ecb_expect_false (postfork))  | 
4078  | 0  |         if (forkcnt)  | 
4079  | 0  |           { | 
4080  | 0  |             queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);  | 
4081  | 0  |             EV_INVOKE_PENDING;  | 
4082  | 0  |           }  | 
4083  | 0  | #endif  | 
4084  |  | 
  | 
4085  | 0  | #if EV_PREPARE_ENABLE  | 
4086  |  |       /* queue prepare watchers (and execute them) */  | 
4087  | 0  |       if (ecb_expect_false (preparecnt))  | 
4088  | 0  |         { | 
4089  | 0  |           queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);  | 
4090  | 0  |           EV_INVOKE_PENDING;  | 
4091  | 0  |         }  | 
4092  | 0  | #endif  | 
4093  |  | 
  | 
4094  | 0  |       if (ecb_expect_false (loop_done))  | 
4095  | 0  |         break;  | 
4096  |  |  | 
4097  |  |       /* we might have forked, so reify kernel state if necessary */  | 
4098  | 0  |       if (ecb_expect_false (postfork))  | 
4099  | 0  |         loop_fork (EV_A);  | 
4100  |  |  | 
4101  |  |       /* update fd-related kernel structures */  | 
4102  | 0  |       fd_reify (EV_A);  | 
4103  |  |  | 
4104  |  |       /* calculate blocking time */  | 
4105  | 0  |       { | 
4106  | 0  |         ev_tstamp waittime  = 0.;  | 
4107  | 0  |         ev_tstamp sleeptime = 0.;  | 
4108  |  |  | 
4109  |  |         /* remember old timestamp for io_blocktime calculation */  | 
4110  | 0  |         ev_tstamp prev_mn_now = mn_now;  | 
4111  |  |  | 
4112  |  |         /* update time to cancel out callback processing overhead */  | 
4113  | 0  |         time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));  | 
4114  |  |  | 
4115  |  |         /* from now on, we want a pipe-wake-up */  | 
4116  | 0  |         pipe_write_wanted = 1;  | 
4117  |  | 
  | 
4118  | 0  |         ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */  | 
4119  |  | 
  | 
4120  | 0  |         if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))  | 
4121  | 0  |           { | 
4122  | 0  |             waittime = EV_TS_CONST (MAX_BLOCKTIME);  | 
4123  |  | 
  | 
4124  | 0  | #if EV_USE_MONOTONIC  | 
4125  | 0  |             if (ecb_expect_true (have_monotonic))  | 
4126  | 0  |               { | 
4127  | 0  | #if EV_USE_TIMERFD  | 
4128  |  |                 /* sleep a lot longer when we can reliably detect timejumps */  | 
4129  | 0  |                 if (ecb_expect_true (timerfd != -1))  | 
4130  | 0  |                   waittime = EV_TS_CONST (MAX_BLOCKTIME2);  | 
4131  | 0  | #endif  | 
4132  |  | #if !EV_PERIODIC_ENABLE  | 
4133  |  |                 /* without periodics but with monotonic clock there is no need */  | 
4134  |  |                 /* for any time jump detection, so sleep longer */  | 
4135  |  |                 waittime = EV_TS_CONST (MAX_BLOCKTIME2);  | 
4136  |  | #endif  | 
4137  | 0  |               }  | 
4138  | 0  | #endif  | 
4139  |  | 
  | 
4140  | 0  |             if (timercnt)  | 
4141  | 0  |               { | 
4142  | 0  |                 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;  | 
4143  | 0  |                 if (waittime > to) waittime = to;  | 
4144  | 0  |               }  | 
4145  |  | 
  | 
4146  | 0  | #if EV_PERIODIC_ENABLE  | 
4147  | 0  |             if (periodiccnt)  | 
4148  | 0  |               { | 
4149  | 0  |                 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;  | 
4150  | 0  |                 if (waittime > to) waittime = to;  | 
4151  | 0  |               }  | 
4152  | 0  | #endif  | 
4153  |  |  | 
4154  |  |             /* don't let timeouts decrease the waittime below timeout_blocktime */  | 
4155  | 0  |             if (ecb_expect_false (waittime < timeout_blocktime))  | 
4156  | 0  |               waittime = timeout_blocktime;  | 
4157  |  |  | 
4158  |  |             /* now there are two more special cases left, either we have  | 
4159  |  |              * already-expired timers, so we should not sleep, or we have timers  | 
4160  |  |              * that expire very soon, in which case we need to wait for a minimum  | 
4161  |  |              * amount of time for some event loop backends.  | 
4162  |  |              */  | 
4163  | 0  |             if (ecb_expect_false (waittime < backend_mintime))  | 
4164  | 0  |               waittime = waittime <= EV_TS_CONST (0.)  | 
4165  | 0  |                  ? EV_TS_CONST (0.)  | 
4166  | 0  |                  : backend_mintime;  | 
4167  |  |  | 
4168  |  |             /* extra check because io_blocktime is commonly 0 */  | 
4169  | 0  |             if (ecb_expect_false (io_blocktime))  | 
4170  | 0  |               { | 
4171  | 0  |                 sleeptime = io_blocktime - (mn_now - prev_mn_now);  | 
4172  |  | 
  | 
4173  | 0  |                 if (sleeptime > waittime - backend_mintime)  | 
4174  | 0  |                   sleeptime = waittime - backend_mintime;  | 
4175  |  | 
  | 
4176  | 0  |                 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))  | 
4177  | 0  |                   { | 
4178  | 0  |                     ev_sleep (sleeptime);  | 
4179  | 0  |                     waittime -= sleeptime;  | 
4180  | 0  |                   }  | 
4181  | 0  |               }  | 
4182  | 0  |           }  | 
4183  |  | 
  | 
4184  | 0  | #if EV_FEATURE_API  | 
4185  | 0  |         ++loop_count;  | 
4186  | 0  | #endif  | 
4187  | 0  |         assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */  | 
4188  | 0  |         backend_poll (EV_A_ waittime);  | 
4189  | 0  |         assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */  | 
4190  |  |  | 
4191  | 0  |         pipe_write_wanted = 0; /* just an optimisation, no fence needed */  | 
4192  |  | 
  | 
4193  | 0  |         ECB_MEMORY_FENCE_ACQUIRE;  | 
4194  | 0  |         if (pipe_write_skipped)  | 
4195  | 0  |           { | 
4196  | 0  |             assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); | 
4197  | 0  |             ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);  | 
4198  | 0  |           }  | 
4199  |  |  | 
4200  |  |         /* update ev_rt_now, do magic */  | 
4201  | 0  |         time_update (EV_A_ waittime + sleeptime);  | 
4202  | 0  |       }  | 
4203  |  |  | 
4204  |  |       /* queue pending timers and reschedule them */  | 
4205  | 0  |       timers_reify (EV_A); /* relative timers called last */  | 
4206  | 0  | #if EV_PERIODIC_ENABLE  | 
4207  | 0  |       periodics_reify (EV_A); /* absolute timers called first */  | 
4208  | 0  | #endif  | 
4209  |  | 
  | 
4210  | 0  | #if EV_IDLE_ENABLE  | 
4211  |  |       /* queue idle watchers unless other events are pending */  | 
4212  | 0  |       idle_reify (EV_A);  | 
4213  | 0  | #endif  | 
4214  |  | 
  | 
4215  | 0  | #if EV_CHECK_ENABLE  | 
4216  |  |       /* queue check watchers, to be executed first */  | 
4217  | 0  |       if (ecb_expect_false (checkcnt))  | 
4218  | 0  |         queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);  | 
4219  | 0  | #endif  | 
4220  |  | 
  | 
4221  | 0  |       EV_INVOKE_PENDING;  | 
4222  | 0  |     }  | 
4223  | 0  |   while (ecb_expect_true (  | 
4224  | 0  |     activecnt  | 
4225  | 0  |     && !loop_done  | 
4226  | 0  |     && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))  | 
4227  | 0  |   ));  | 
4228  |  |  | 
4229  | 0  |   if (loop_done == EVBREAK_ONE)  | 
4230  | 0  |     loop_done = EVBREAK_CANCEL;  | 
4231  |  | 
  | 
4232  | 0  | #if EV_FEATURE_API  | 
4233  | 0  |   --loop_depth;  | 
4234  | 0  | #endif  | 
4235  |  | 
  | 
4236  | 0  |   return activecnt;  | 
4237  | 0  | }  | 
4238  |  |  | 
4239  |  | void  | 
4240  |  | ev_break (EV_P_ int how) EV_NOEXCEPT  | 
4241  | 0  | { | 
4242  | 0  |   loop_done = how;  | 
4243  | 0  | }  | 
4244  |  |  | 
4245  |  | void  | 
4246  |  | ev_ref (EV_P) EV_NOEXCEPT  | 
4247  | 5.00k  | { | 
4248  | 5.00k  |   ++activecnt;  | 
4249  | 5.00k  | }  | 
4250  |  |  | 
4251  |  | void  | 
4252  |  | ev_unref (EV_P) EV_NOEXCEPT  | 
4253  | 5.00k  | { | 
4254  | 5.00k  |   --activecnt;  | 
4255  | 5.00k  | }  | 
4256  |  |  | 
4257  |  | int  | 
4258  |  | ev_activecnt (EV_P) EV_NOEXCEPT  | 
4259  | 0  | { | 
4260  | 0  |   return activecnt;  | 
4261  | 0  | }  | 
4262  |  |  | 
4263  |  | void  | 
4264  |  | ev_now_update (EV_P) EV_NOEXCEPT  | 
4265  | 0  | { | 
4266  | 0  |   time_update (EV_A_ EV_TSTAMP_HUGE);  | 
4267  | 0  | }  | 
4268  |  |  | 
4269  |  | void  | 
4270  |  | ev_suspend (EV_P) EV_NOEXCEPT  | 
4271  | 0  | { | 
4272  | 0  |   ev_now_update (EV_A);  | 
4273  | 0  | }  | 
4274  |  |  | 
4275  |  | void  | 
4276  |  | ev_resume (EV_P) EV_NOEXCEPT  | 
4277  | 0  | { | 
4278  | 0  |   ev_tstamp mn_prev = mn_now;  | 
4279  |  | 
  | 
4280  | 0  |   ev_now_update (EV_A);  | 
4281  | 0  |   timers_reschedule (EV_A_ mn_now - mn_prev);  | 
4282  | 0  | #if EV_PERIODIC_ENABLE  | 
4283  |  |   /* TODO: really do this? */  | 
4284  | 0  |   periodics_reschedule (EV_A);  | 
4285  | 0  | #endif  | 
4286  | 0  | }  | 
4287  |  |  | 
4288  |  | /*****************************************************************************/  | 
4289  |  | /* singly-linked list management, used when the expected list length is short */  | 
4290  |  |  | 
4291  |  | inline_size void  | 
4292  |  | wlist_add (WL *head, WL elem)  | 
4293  | 3.34k  | { | 
4294  | 3.34k  |   elem->next = *head;  | 
4295  | 3.34k  |   *head = elem;  | 
4296  | 3.34k  | }  | 
4297  |  |  | 
4298  |  | inline_size void  | 
4299  |  | wlist_del (WL *head, WL elem)  | 
4300  | 1.65k  | { | 
4301  | 1.65k  |   while (*head)  | 
4302  | 1.65k  |     { | 
4303  | 1.65k  |       if (ecb_expect_true (*head == elem))  | 
4304  | 1.65k  |         { | 
4305  | 1.65k  |           *head = elem->next;  | 
4306  | 1.65k  |           break;  | 
4307  | 1.65k  |         }  | 
4308  |  |  | 
4309  | 0  |       head = &(*head)->next;  | 
4310  | 0  |     }  | 
4311  | 1.65k  | }  | 
4312  |  |  | 
4313  |  | /* internal, faster, version of ev_clear_pending */  | 
4314  |  | inline_speed void  | 
4315  |  | clear_pending (EV_P_ W w)  | 
4316  | 1.65k  | { | 
4317  | 1.65k  |   if (w->pending)  | 
4318  | 0  |     { | 
4319  | 0  |       pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;  | 
4320  | 0  |       w->pending = 0;  | 
4321  | 0  |     }  | 
4322  | 1.65k  | }  | 
4323  |  |  | 
4324  |  | int  | 
4325  |  | ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT  | 
4326  | 0  | { | 
4327  | 0  |   W w_ = (W)w;  | 
4328  | 0  |   int pending = w_->pending;  | 
4329  |  | 
  | 
4330  | 0  |   if (ecb_expect_true (pending))  | 
4331  | 0  |     { | 
4332  | 0  |       ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;  | 
4333  | 0  |       p->w = (W)&pending_w;  | 
4334  | 0  |       w_->pending = 0;  | 
4335  | 0  |       return p->events;  | 
4336  | 0  |     }  | 
4337  | 0  |   else  | 
4338  | 0  |     return 0;  | 
4339  | 0  | }  | 
4340  |  |  | 
4341  |  | inline_size void  | 
4342  |  | pri_adjust (EV_P_ W w)  | 
4343  | 3.34k  | { | 
4344  | 3.34k  |   int pri = ev_priority (w);  | 
4345  | 3.34k  |   pri = pri < EV_MINPRI ? EV_MINPRI : pri;  | 
4346  | 3.34k  |   pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;  | 
4347  | 3.34k  |   ev_set_priority (w, pri);  | 
4348  | 3.34k  | }  | 
4349  |  |  | 
4350  |  | inline_speed void  | 
4351  |  | ev_start (EV_P_ W w, int active)  | 
4352  | 3.34k  | { | 
4353  | 3.34k  |   pri_adjust (EV_A_ w);  | 
4354  | 3.34k  |   w->active = active;  | 
4355  | 3.34k  |   ev_ref (EV_A);  | 
4356  | 3.34k  | }  | 
4357  |  |  | 
4358  |  | inline_size void  | 
4359  |  | ev_stop (EV_P_ W w)  | 
4360  | 1.65k  | { | 
4361  | 1.65k  |   ev_unref (EV_A);  | 
4362  | 1.65k  |   w->active = 0;  | 
4363  | 1.65k  | }  | 
4364  |  |  | 
4365  |  | /*****************************************************************************/  | 
4366  |  |  | 
4367  |  | ecb_noinline  | 
4368  |  | void  | 
4369  |  | ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT  | 
4370  | 1.67k  | { | 
4371  | 1.67k  |   int fd = w->fd;  | 
4372  |  |  | 
4373  | 1.67k  |   if (ecb_expect_false (ev_is_active (w)))  | 
4374  | 0  |     return;  | 
4375  |  |  | 
4376  | 1.67k  |   assert (("libev: ev_io_start called with negative fd", fd >= 0)); | 
4377  | 1.67k  |   assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); | 
4378  |  |  | 
4379  |  | #if EV_VERIFY >= 2  | 
4380  |  |   assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd))); | 
4381  |  | #endif  | 
4382  | 1.67k  |   EV_FREQUENT_CHECK;  | 
4383  |  |  | 
4384  | 1.67k  |   ev_start (EV_A_ (W)w, 1);  | 
4385  | 1.67k  |   array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);  | 
4386  | 1.67k  |   wlist_add (&anfds[fd].head, (WL)w);  | 
4387  |  |  | 
4388  |  |   /* common bug, apparently */  | 
4389  | 1.67k  |   assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w)); | 
4390  |  |  | 
4391  | 1.67k  |   fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);  | 
4392  | 1.67k  |   w->events &= ~EV__IOFDSET;  | 
4393  |  |  | 
4394  | 1.67k  |   EV_FREQUENT_CHECK;  | 
4395  | 1.67k  | }  | 
4396  |  |  | 
4397  |  | ecb_noinline  | 
4398  |  | void  | 
4399  |  | ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT  | 
4400  | 0  | { | 
4401  | 0  |   clear_pending (EV_A_ (W)w);  | 
4402  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
4403  | 0  |     return;  | 
4404  |  |  | 
4405  | 0  |   assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); | 
4406  |  |  | 
4407  |  | #if EV_VERIFY >= 2  | 
4408  |  |   assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd))); | 
4409  |  | #endif  | 
4410  | 0  |   EV_FREQUENT_CHECK;  | 
4411  |  | 
  | 
4412  | 0  |   wlist_del (&anfds[w->fd].head, (WL)w);  | 
4413  | 0  |   ev_stop (EV_A_ (W)w);  | 
4414  |  | 
  | 
4415  | 0  |   fd_change (EV_A_ w->fd, EV_ANFD_REIFY);  | 
4416  |  | 
  | 
4417  | 0  |   EV_FREQUENT_CHECK;  | 
4418  | 0  | }  | 
4419  |  |  | 
4420  |  | /*  | 
4421  |  |  * Modelled after fd_kill(), which is called when the library detects an  | 
4422  |  |  * invalid fd. Feeding events into stopped watchers is ok.  | 
4423  |  |  * Since every watcher is stopped, the select/poll/epoll/whatever  | 
4424  |  |  * backend is properly updated.  | 
4425  |  |  */  | 
4426  |  | ecb_noinline  | 
4427  |  | void  | 
4428  |  | ev_io_closing (EV_P_ int fd) EV_NOEXCEPT  | 
4429  | 0  | { | 
4430  | 0  |   ev_io *w;  | 
4431  | 0  |   if (fd < 0 || fd >= anfdmax)  | 
4432  | 0  |     return;  | 
4433  |  |  | 
4434  | 0  |   fd_kill(EV_A_ fd);  | 
4435  | 0  | }  | 
4436  |  |  | 
4437  |  | ecb_noinline  | 
4438  |  | void  | 
4439  |  | ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT  | 
4440  | 0  | { | 
4441  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
4442  | 0  |     return;  | 
4443  |  |  | 
4444  | 0  |   ev_at (w) += mn_now;  | 
4445  |  | 
  | 
4446  | 0  |   assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); | 
4447  |  |  | 
4448  | 0  |   EV_FREQUENT_CHECK;  | 
4449  |  | 
  | 
4450  | 0  |   ++timercnt;  | 
4451  | 0  |   ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);  | 
4452  | 0  |   array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit);  | 
4453  | 0  |   ANHE_w (timers [ev_active (w)]) = (WT)w;  | 
4454  | 0  |   ANHE_at_cache (timers [ev_active (w)]);  | 
4455  | 0  |   upheap (timers, ev_active (w));  | 
4456  |  | 
  | 
4457  | 0  |   EV_FREQUENT_CHECK;  | 
4458  |  |  | 
4459  |  |   /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ | 
4460  | 0  | }  | 
4461  |  |  | 
4462  |  | ecb_noinline  | 
4463  |  | void  | 
4464  |  | ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT  | 
4465  | 0  | { | 
4466  | 0  |   clear_pending (EV_A_ (W)w);  | 
4467  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
4468  | 0  |     return;  | 
4469  |  |  | 
4470  | 0  |   EV_FREQUENT_CHECK;  | 
4471  |  | 
  | 
4472  | 0  |   { | 
4473  | 0  |     int active = ev_active (w);  | 
4474  |  | 
  | 
4475  | 0  |     assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); | 
4476  |  |  | 
4477  | 0  |     --timercnt;  | 
4478  |  | 
  | 
4479  | 0  |     if (ecb_expect_true (active < timercnt + HEAP0))  | 
4480  | 0  |       { | 
4481  | 0  |         timers [active] = timers [timercnt + HEAP0];  | 
4482  | 0  |         adjustheap (timers, timercnt, active);  | 
4483  | 0  |       }  | 
4484  | 0  |   }  | 
4485  |  |  | 
4486  | 0  |   ev_at (w) -= mn_now;  | 
4487  |  | 
  | 
4488  | 0  |   ev_stop (EV_A_ (W)w);  | 
4489  |  | 
  | 
4490  | 0  |   EV_FREQUENT_CHECK;  | 
4491  | 0  | }  | 
4492  |  |  | 
4493  |  | ecb_noinline  | 
4494  |  | void  | 
4495  |  | ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT  | 
4496  | 0  | { | 
4497  | 0  |   EV_FREQUENT_CHECK;  | 
4498  |  | 
  | 
4499  | 0  |   clear_pending (EV_A_ (W)w);  | 
4500  |  | 
  | 
4501  | 0  |   if (ev_is_active (w))  | 
4502  | 0  |     { | 
4503  | 0  |       if (w->repeat)  | 
4504  | 0  |         { | 
4505  | 0  |           ev_at (w) = mn_now + w->repeat;  | 
4506  | 0  |           ANHE_at_cache (timers [ev_active (w)]);  | 
4507  | 0  |           adjustheap (timers, timercnt, ev_active (w));  | 
4508  | 0  |         }  | 
4509  | 0  |       else  | 
4510  | 0  |         ev_timer_stop (EV_A_ w);  | 
4511  | 0  |     }  | 
4512  | 0  |   else if (w->repeat)  | 
4513  | 0  |     { | 
4514  | 0  |       ev_at (w) = w->repeat;  | 
4515  | 0  |       ev_timer_start (EV_A_ w);  | 
4516  | 0  |     }  | 
4517  |  | 
  | 
4518  | 0  |   EV_FREQUENT_CHECK;  | 
4519  | 0  | }  | 
4520  |  |  | 
4521  |  | ev_tstamp  | 
4522  |  | ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT  | 
4523  | 0  | { | 
4524  | 0  |   return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));  | 
4525  | 0  | }  | 
4526  |  |  | 
4527  |  | #if EV_PERIODIC_ENABLE  | 
4528  |  | ecb_noinline  | 
4529  |  | void  | 
4530  |  | ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT  | 
4531  | 0  | { | 
4532  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
4533  | 0  |     return;  | 
4534  |  |  | 
4535  | 0  | #if EV_USE_TIMERFD  | 
4536  | 0  |   if (timerfd == -2)  | 
4537  | 0  |     evtimerfd_init (EV_A);  | 
4538  | 0  | #endif  | 
4539  |  | 
  | 
4540  | 0  |   if (w->reschedule_cb)  | 
4541  | 0  |     ev_at (w) = w->reschedule_cb (w, ev_rt_now);  | 
4542  | 0  |   else if (w->interval)  | 
4543  | 0  |     { | 
4544  | 0  |       assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.)); | 
4545  | 0  |       periodic_recalc (EV_A_ w);  | 
4546  | 0  |     }  | 
4547  | 0  |   else  | 
4548  | 0  |     ev_at (w) = w->offset;  | 
4549  |  |  | 
4550  | 0  |   EV_FREQUENT_CHECK;  | 
4551  |  | 
  | 
4552  | 0  |   ++periodiccnt;  | 
4553  | 0  |   ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);  | 
4554  | 0  |   array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit);  | 
4555  | 0  |   ANHE_w (periodics [ev_active (w)]) = (WT)w;  | 
4556  | 0  |   ANHE_at_cache (periodics [ev_active (w)]);  | 
4557  | 0  |   upheap (periodics, ev_active (w));  | 
4558  |  | 
  | 
4559  | 0  |   EV_FREQUENT_CHECK;  | 
4560  |  |  | 
4561  |  |   /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ | 
4562  | 0  | }  | 
4563  |  |  | 
4564  |  | ecb_noinline  | 
4565  |  | void  | 
4566  |  | ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT  | 
4567  | 0  | { | 
4568  | 0  |   clear_pending (EV_A_ (W)w);  | 
4569  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
4570  | 0  |     return;  | 
4571  |  |  | 
4572  | 0  |   EV_FREQUENT_CHECK;  | 
4573  |  | 
  | 
4574  | 0  |   { | 
4575  | 0  |     int active = ev_active (w);  | 
4576  |  | 
  | 
4577  | 0  |     assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); | 
4578  |  |  | 
4579  | 0  |     --periodiccnt;  | 
4580  |  | 
  | 
4581  | 0  |     if (ecb_expect_true (active < periodiccnt + HEAP0))  | 
4582  | 0  |       { | 
4583  | 0  |         periodics [active] = periodics [periodiccnt + HEAP0];  | 
4584  | 0  |         adjustheap (periodics, periodiccnt, active);  | 
4585  | 0  |       }  | 
4586  | 0  |   }  | 
4587  |  |  | 
4588  | 0  |   ev_stop (EV_A_ (W)w);  | 
4589  |  | 
  | 
4590  | 0  |   EV_FREQUENT_CHECK;  | 
4591  | 0  | }  | 
4592  |  |  | 
4593  |  | ecb_noinline  | 
4594  |  | void  | 
4595  |  | ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT  | 
4596  | 0  | { | 
4597  |  |   /* TODO: use adjustheap and recalculation */  | 
4598  | 0  |   ev_periodic_stop (EV_A_ w);  | 
4599  | 0  |   ev_periodic_start (EV_A_ w);  | 
4600  | 0  | }  | 
4601  |  | #endif  | 
4602  |  |  | 
4603  |  | #ifndef SA_RESTART  | 
4604  |  | # define SA_RESTART 0  | 
4605  |  | #endif  | 
4606  |  |  | 
4607  |  | #if EV_SIGNAL_ENABLE  | 
4608  |  |  | 
4609  |  | ecb_noinline  | 
4610  |  | void  | 
4611  |  | ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT  | 
4612  | 1.67k  | { | 
4613  | 1.67k  |   if (ecb_expect_false (ev_is_active (w)))  | 
4614  | 0  |     return;  | 
4615  |  |  | 
4616  | 1.67k  |   assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); | 
4617  |  |  | 
4618  | 1.67k  | #if EV_MULTIPLICITY  | 
4619  | 1.67k  |   assert (("libev: a signal must not be attached to two different loops", | 
4620  | 1.67k  |            !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));  | 
4621  |  |  | 
4622  | 1.67k  |   signals [w->signum - 1].loop = EV_A;  | 
4623  | 1.67k  |   ECB_MEMORY_FENCE_RELEASE;  | 
4624  | 1.67k  | #endif  | 
4625  |  |  | 
4626  | 1.67k  |   EV_FREQUENT_CHECK;  | 
4627  |  |  | 
4628  | 1.67k  | #if EV_USE_SIGNALFD  | 
4629  | 1.67k  |   if (sigfd == -2)  | 
4630  | 0  |     { | 
4631  | 0  |       sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);  | 
4632  | 0  |       if (sigfd < 0 && errno == EINVAL)  | 
4633  | 0  |         sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */  | 
4634  |  | 
  | 
4635  | 0  |       if (sigfd >= 0)  | 
4636  | 0  |         { | 
4637  | 0  |           fd_intern (sigfd); /* doing it twice will not hurt */  | 
4638  |  | 
  | 
4639  | 0  |           sigemptyset (&sigfd_set);  | 
4640  |  | 
  | 
4641  | 0  |           ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);  | 
4642  | 0  |           ev_set_priority (&sigfd_w, EV_MAXPRI);  | 
4643  | 0  |           ev_io_start (EV_A_ &sigfd_w);  | 
4644  | 0  |           ev_unref (EV_A); /* signalfd watcher should not keep loop alive */  | 
4645  | 0  |         }  | 
4646  | 0  |     }  | 
4647  |  |  | 
4648  | 1.67k  |   if (sigfd >= 0)  | 
4649  | 0  |     { | 
4650  |  |       /* TODO: check .head */  | 
4651  | 0  |       sigaddset (&sigfd_set, w->signum);  | 
4652  | 0  |       pthread_sigmask (SIG_BLOCK, &sigfd_set, 0);  | 
4653  |  | 
  | 
4654  | 0  |       signalfd (sigfd, &sigfd_set, 0);  | 
4655  | 0  |     }  | 
4656  | 1.67k  | #endif  | 
4657  |  |  | 
4658  | 1.67k  |   ev_start (EV_A_ (W)w, 1);  | 
4659  | 1.67k  |   wlist_add (&signals [w->signum - 1].head, (WL)w);  | 
4660  |  |  | 
4661  | 1.67k  |   if (!((WL)w)->next)  | 
4662  | 1.67k  | # if EV_USE_SIGNALFD  | 
4663  | 1.67k  |     if (sigfd < 0) /*TODO*/  | 
4664  | 1.67k  | # endif  | 
4665  | 1.67k  |       { | 
4666  |  | # ifdef _WIN32  | 
4667  |  |         evpipe_init (EV_A);  | 
4668  |  |  | 
4669  |  |         signal (w->signum, ev_sighandler);  | 
4670  |  | # else  | 
4671  | 1.67k  |         struct sigaction sa;  | 
4672  |  |  | 
4673  | 1.67k  |         evpipe_init (EV_A);  | 
4674  |  |  | 
4675  | 1.67k  |         sa.sa_handler = ev_sighandler;  | 
4676  | 1.67k  |         sigfillset (&sa.sa_mask);  | 
4677  | 1.67k  |         sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */  | 
4678  | 1.67k  |         sigaction (w->signum, &sa, 0);  | 
4679  |  |  | 
4680  | 1.67k  |         if (origflags & EVFLAG_NOSIGMASK)  | 
4681  | 0  |           { | 
4682  | 0  |             sigemptyset (&sa.sa_mask);  | 
4683  | 0  |             sigaddset (&sa.sa_mask, w->signum);  | 
4684  | 0  |             pthread_sigmask (SIG_UNBLOCK, &sa.sa_mask, 0);  | 
4685  | 0  |           }  | 
4686  | 1.67k  | #endif  | 
4687  | 1.67k  |       }  | 
4688  |  |  | 
4689  | 1.67k  |   EV_FREQUENT_CHECK;  | 
4690  | 1.67k  | }  | 
4691  |  |  | 
4692  |  | ecb_noinline  | 
4693  |  | void  | 
4694  |  | ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT  | 
4695  | 1.65k  | { | 
4696  | 1.65k  |   clear_pending (EV_A_ (W)w);  | 
4697  | 1.65k  |   if (ecb_expect_false (!ev_is_active (w)))  | 
4698  | 0  |     return;  | 
4699  |  |  | 
4700  | 1.65k  |   EV_FREQUENT_CHECK;  | 
4701  |  |  | 
4702  | 1.65k  |   wlist_del (&signals [w->signum - 1].head, (WL)w);  | 
4703  | 1.65k  |   ev_stop (EV_A_ (W)w);  | 
4704  |  |  | 
4705  | 1.65k  |   if (!signals [w->signum - 1].head)  | 
4706  | 1.65k  |     { | 
4707  | 1.65k  | #if EV_MULTIPLICITY  | 
4708  | 1.65k  |       signals [w->signum - 1].loop = 0; /* unattach from signal */  | 
4709  | 1.65k  | #endif  | 
4710  | 1.65k  | #if EV_USE_SIGNALFD  | 
4711  | 1.65k  |       if (sigfd >= 0)  | 
4712  | 0  |         { | 
4713  | 0  |           sigset_t ss;  | 
4714  |  | 
  | 
4715  | 0  |           sigemptyset (&ss);  | 
4716  | 0  |           sigaddset (&ss, w->signum);  | 
4717  | 0  |           sigdelset (&sigfd_set, w->signum);  | 
4718  |  | 
  | 
4719  | 0  |           signalfd (sigfd, &sigfd_set, 0);  | 
4720  | 0  |           pthread_sigmask (SIG_UNBLOCK, &ss, 0);  | 
4721  | 0  |         }  | 
4722  | 1.65k  |       else  | 
4723  | 1.65k  | #endif  | 
4724  | 1.65k  |         signal (w->signum, SIG_DFL);  | 
4725  | 1.65k  |     }  | 
4726  |  |  | 
4727  | 1.65k  |   EV_FREQUENT_CHECK;  | 
4728  | 1.65k  | }  | 
4729  |  |  | 
4730  |  | #endif  | 
4731  |  |  | 
4732  |  | #if EV_CHILD_ENABLE  | 
4733  |  |  | 
4734  |  | void  | 
4735  |  | ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT  | 
4736  | 0  | { | 
4737  | 0  | #if EV_MULTIPLICITY  | 
4738  | 0  |   assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); | 
4739  | 0  | #endif  | 
4740  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
4741  | 0  |     return;  | 
4742  |  |  | 
4743  | 0  |   EV_FREQUENT_CHECK;  | 
4744  |  | 
  | 
4745  | 0  |   ev_start (EV_A_ (W)w, 1);  | 
4746  | 0  |   wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);  | 
4747  |  | 
  | 
4748  | 0  |   EV_FREQUENT_CHECK;  | 
4749  | 0  | }  | 
4750  |  |  | 
4751  |  | void  | 
4752  |  | ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT  | 
4753  | 0  | { | 
4754  | 0  |   clear_pending (EV_A_ (W)w);  | 
4755  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
4756  | 0  |     return;  | 
4757  |  |  | 
4758  | 0  |   EV_FREQUENT_CHECK;  | 
4759  |  | 
  | 
4760  | 0  |   wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);  | 
4761  | 0  |   ev_stop (EV_A_ (W)w);  | 
4762  |  | 
  | 
4763  | 0  |   EV_FREQUENT_CHECK;  | 
4764  | 0  | }  | 
4765  |  |  | 
4766  |  | #endif  | 
4767  |  |  | 
4768  |  | #if EV_STAT_ENABLE  | 
4769  |  |  | 
4770  |  | # ifdef _WIN32  | 
4771  |  | #  undef lstat  | 
4772  |  | #  define lstat(a,b) _stati64 (a,b)  | 
4773  |  | # endif  | 
4774  |  |  | 
4775  | 0  | #define DEF_STAT_INTERVAL  5.0074891  | 
4776  | 0  | #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */  | 
4777  | 0  | #define MIN_STAT_INTERVAL  0.1074891  | 
4778  |  |  | 
4779  |  | ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);  | 
4780  |  |  | 
4781  |  | #if EV_USE_INOTIFY  | 
4782  |  |  | 
4783  |  | /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */  | 
4784  |  | # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)  | 
4785  |  |  | 
4786  |  | ecb_noinline  | 
4787  |  | static void  | 
4788  |  | infy_add (EV_P_ ev_stat *w)  | 
4789  | 0  | { | 
4790  | 0  |   w->wd = inotify_add_watch (fs_fd, w->path,  | 
4791  | 0  |                              IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY  | 
4792  | 0  |                              | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO  | 
4793  | 0  |                              | IN_DONT_FOLLOW | IN_MASK_ADD);  | 
4794  |  | 
  | 
4795  | 0  |   if (w->wd >= 0)  | 
4796  | 0  |     { | 
4797  | 0  |       struct statfs sfs;  | 
4798  |  |  | 
4799  |  |       /* now local changes will be tracked by inotify, but remote changes won't */  | 
4800  |  |       /* unless the filesystem is known to be local, we therefore still poll */  | 
4801  |  |       /* also do poll on <2.6.25, but with normal frequency */  | 
4802  |  | 
  | 
4803  | 0  |       if (!fs_2625)  | 
4804  | 0  |         w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;  | 
4805  | 0  |       else if (!statfs (w->path, &sfs)  | 
4806  | 0  |                && (sfs.f_type == 0x1373 /* devfs */  | 
4807  | 0  |                    || sfs.f_type == 0x4006 /* fat */  | 
4808  | 0  |                    || sfs.f_type == 0x4d44 /* msdos */  | 
4809  | 0  |                    || sfs.f_type == 0xEF53 /* ext2/3 */  | 
4810  | 0  |                    || sfs.f_type == 0x72b6 /* jffs2 */  | 
4811  | 0  |                    || sfs.f_type == 0x858458f6 /* ramfs */  | 
4812  | 0  |                    || sfs.f_type == 0x5346544e /* ntfs */  | 
4813  | 0  |                    || sfs.f_type == 0x3153464a /* jfs */  | 
4814  | 0  |                    || sfs.f_type == 0x9123683e /* btrfs */  | 
4815  | 0  |                    || sfs.f_type == 0x52654973 /* reiser3 */  | 
4816  | 0  |                    || sfs.f_type == 0x01021994 /* tmpfs */  | 
4817  | 0  |                    || sfs.f_type == 0x58465342 /* xfs */))  | 
4818  | 0  |         w->timer.repeat = 0.; /* filesystem is local, kernel new enough */  | 
4819  | 0  |       else  | 
4820  | 0  |         w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */  | 
4821  | 0  |     }  | 
4822  | 0  |   else  | 
4823  | 0  |     { | 
4824  |  |       /* can't use inotify, continue to stat */  | 
4825  | 0  |       w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;  | 
4826  |  |  | 
4827  |  |       /* if path is not there, monitor some parent directory for speedup hints */  | 
4828  |  |       /* note that exceeding the hardcoded path limit is not a correctness issue, */  | 
4829  |  |       /* but an efficiency issue only */  | 
4830  | 0  |       if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)  | 
4831  | 0  |         { | 
4832  | 0  |           char path [4096];  | 
4833  | 0  |           strcpy (path, w->path);  | 
4834  |  | 
  | 
4835  | 0  |           do  | 
4836  | 0  |             { | 
4837  | 0  |               int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF  | 
4838  | 0  |                        | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);  | 
4839  |  | 
  | 
4840  | 0  |               char *pend = strrchr (path, '/');  | 
4841  |  | 
  | 
4842  | 0  |               if (!pend || pend == path)  | 
4843  | 0  |                 break;  | 
4844  |  |  | 
4845  | 0  |               *pend = 0;  | 
4846  | 0  |               w->wd = inotify_add_watch (fs_fd, path, mask);  | 
4847  | 0  |             }  | 
4848  | 0  |           while (w->wd < 0 && (errno == ENOENT || errno == EACCES));  | 
4849  | 0  |         }  | 
4850  | 0  |     }  | 
4851  |  |  | 
4852  | 0  |   if (w->wd >= 0)  | 
4853  | 0  |     wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);  | 
4854  |  |  | 
4855  |  |   /* now re-arm timer, if required */  | 
4856  | 0  |   if (ev_is_active (&w->timer)) ev_ref (EV_A);  | 
4857  | 0  |   ev_timer_again (EV_A_ &w->timer);  | 
4858  | 0  |   if (ev_is_active (&w->timer)) ev_unref (EV_A);  | 
4859  | 0  | }  | 
4860  |  |  | 
4861  |  | ecb_noinline  | 
4862  |  | static void  | 
4863  |  | infy_del (EV_P_ ev_stat *w)  | 
4864  | 0  | { | 
4865  | 0  |   int slot;  | 
4866  | 0  |   int wd = w->wd;  | 
4867  |  | 
  | 
4868  | 0  |   if (wd < 0)  | 
4869  | 0  |     return;  | 
4870  |  |  | 
4871  | 0  |   w->wd = -2;  | 
4872  | 0  |   slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);  | 
4873  | 0  |   wlist_del (&fs_hash [slot].head, (WL)w);  | 
4874  |  |  | 
4875  |  |   /* remove this watcher, if others are watching it, they will rearm */  | 
4876  | 0  |   inotify_rm_watch (fs_fd, wd);  | 
4877  | 0  | }  | 
4878  |  |  | 
4879  |  | ecb_noinline  | 
4880  |  | static void  | 
4881  |  | infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)  | 
4882  | 0  | { | 
4883  | 0  |   if (slot < 0)  | 
4884  |  |     /* overflow, need to check for all hash slots */  | 
4885  | 0  |     for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)  | 
4886  | 0  |       infy_wd (EV_A_ slot, wd, ev);  | 
4887  | 0  |   else  | 
4888  | 0  |     { | 
4889  | 0  |       WL w_;  | 
4890  |  | 
  | 
4891  | 0  |       for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )  | 
4892  | 0  |         { | 
4893  | 0  |           ev_stat *w = (ev_stat *)w_;  | 
4894  | 0  |           w_ = w_->next; /* lets us remove this watcher and all before it */  | 
4895  |  | 
  | 
4896  | 0  |           if (w->wd == wd || wd == -1)  | 
4897  | 0  |             { | 
4898  | 0  |               if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))  | 
4899  | 0  |                 { | 
4900  | 0  |                   wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);  | 
4901  | 0  |                   w->wd = -1;  | 
4902  | 0  |                   infy_add (EV_A_ w); /* re-add, no matter what */  | 
4903  | 0  |                 }  | 
4904  |  | 
  | 
4905  | 0  |               stat_timer_cb (EV_A_ &w->timer, 0);  | 
4906  | 0  |             }  | 
4907  | 0  |         }  | 
4908  | 0  |     }  | 
4909  | 0  | }  | 
4910  |  |  | 
4911  |  | static void  | 
4912  |  | infy_cb (EV_P_ ev_io *w, int revents)  | 
4913  | 0  | { | 
4914  | 0  |   char buf [EV_INOTIFY_BUFSIZE];  | 
4915  | 0  |   int ofs;  | 
4916  | 0  |   int len = read (fs_fd, buf, sizeof (buf));  | 
4917  |  | 
  | 
4918  | 0  |   for (ofs = 0; ofs < len; )  | 
4919  | 0  |     { | 
4920  | 0  |       struct inotify_event *ev = (struct inotify_event *)(buf + ofs);  | 
4921  | 0  |       infy_wd (EV_A_ ev->wd, ev->wd, ev);  | 
4922  | 0  |       ofs += sizeof (struct inotify_event) + ev->len;  | 
4923  | 0  |     }  | 
4924  | 0  | }  | 
4925  |  |  | 
4926  |  | inline_size ecb_cold  | 
4927  |  | void  | 
4928  |  | ev_check_2625 (EV_P)  | 
4929  | 0  | { | 
4930  |  |   /* kernels < 2.6.25 are borked  | 
4931  |  |    * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html  | 
4932  |  |    */  | 
4933  | 0  |   if (ev_linux_version () < 0x020619)  | 
4934  | 0  |     return;  | 
4935  |  |  | 
4936  | 0  |   fs_2625 = 1;  | 
4937  | 0  | }  | 
4938  |  |  | 
4939  |  | inline_size int  | 
4940  |  | infy_newfd (void)  | 
4941  | 0  | { | 
4942  | 0  | #if defined IN_CLOEXEC && defined IN_NONBLOCK  | 
4943  | 0  |   int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);  | 
4944  | 0  |   if (fd >= 0)  | 
4945  | 0  |     return fd;  | 
4946  | 0  | #endif  | 
4947  | 0  |   return inotify_init ();  | 
4948  | 0  | }  | 
4949  |  |  | 
4950  |  | inline_size void  | 
4951  |  | infy_init (EV_P)  | 
4952  | 0  | { | 
4953  | 0  |   if (fs_fd != -2)  | 
4954  | 0  |     return;  | 
4955  |  |  | 
4956  | 0  |   fs_fd = -1;  | 
4957  |  | 
  | 
4958  | 0  |   ev_check_2625 (EV_A);  | 
4959  |  | 
  | 
4960  | 0  |   fs_fd = infy_newfd ();  | 
4961  |  | 
  | 
4962  | 0  |   if (fs_fd >= 0)  | 
4963  | 0  |     { | 
4964  | 0  |       fd_intern (fs_fd);  | 
4965  | 0  |       ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);  | 
4966  | 0  |       ev_set_priority (&fs_w, EV_MAXPRI);  | 
4967  | 0  |       ev_io_start (EV_A_ &fs_w);  | 
4968  | 0  |       ev_unref (EV_A);  | 
4969  | 0  |     }  | 
4970  | 0  | }  | 
4971  |  |  | 
4972  |  | inline_size void  | 
4973  |  | infy_fork (EV_P)  | 
4974  | 0  | { | 
4975  | 0  |   int slot;  | 
4976  |  | 
  | 
4977  | 0  |   if (fs_fd < 0)  | 
4978  | 0  |     return;  | 
4979  |  |  | 
4980  | 0  |   ev_ref (EV_A);  | 
4981  | 0  |   ev_io_stop (EV_A_ &fs_w);  | 
4982  | 0  |   close (fs_fd);  | 
4983  | 0  |   fs_fd = infy_newfd ();  | 
4984  |  | 
  | 
4985  | 0  |   if (fs_fd >= 0)  | 
4986  | 0  |     { | 
4987  | 0  |       fd_intern (fs_fd);  | 
4988  | 0  |       ev_io_set (&fs_w, fs_fd, EV_READ);  | 
4989  | 0  |       ev_io_start (EV_A_ &fs_w);  | 
4990  | 0  |       ev_unref (EV_A);  | 
4991  | 0  |     }  | 
4992  |  | 
  | 
4993  | 0  |   for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)  | 
4994  | 0  |     { | 
4995  | 0  |       WL w_ = fs_hash [slot].head;  | 
4996  | 0  |       fs_hash [slot].head = 0;  | 
4997  |  | 
  | 
4998  | 0  |       while (w_)  | 
4999  | 0  |         { | 
5000  | 0  |           ev_stat *w = (ev_stat *)w_;  | 
5001  | 0  |           w_ = w_->next; /* lets us add this watcher */  | 
5002  |  | 
  | 
5003  | 0  |           w->wd = -1;  | 
5004  |  | 
  | 
5005  | 0  |           if (fs_fd >= 0)  | 
5006  | 0  |             infy_add (EV_A_ w); /* re-add, no matter what */  | 
5007  | 0  |           else  | 
5008  | 0  |             { | 
5009  | 0  |               w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;  | 
5010  | 0  |               if (ev_is_active (&w->timer)) ev_ref (EV_A);  | 
5011  | 0  |               ev_timer_again (EV_A_ &w->timer);  | 
5012  | 0  |               if (ev_is_active (&w->timer)) ev_unref (EV_A);  | 
5013  | 0  |             }  | 
5014  | 0  |         }  | 
5015  | 0  |     }  | 
5016  | 0  | }  | 
5017  |  |  | 
5018  |  | #endif  | 
5019  |  |  | 
5020  |  | #ifdef _WIN32  | 
5021  |  | # define EV_LSTAT(p,b) _stati64 (p, b)  | 
5022  |  | #else  | 
5023  |  | # define EV_LSTAT(p,b) lstat (p, b)  | 
5024  |  | #endif  | 
5025  |  |  | 
5026  |  | void  | 
5027  |  | ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT  | 
5028  | 0  | { | 
5029  | 0  |   if (lstat (w->path, &w->attr) < 0)  | 
5030  | 0  |     w->attr.st_nlink = 0;  | 
5031  | 0  |   else if (!w->attr.st_nlink)  | 
5032  | 0  |     w->attr.st_nlink = 1;  | 
5033  | 0  | }  | 
5034  |  |  | 
5035  |  | ecb_noinline  | 
5036  |  | static void  | 
5037  |  | stat_timer_cb (EV_P_ ev_timer *w_, int revents)  | 
5038  | 0  | { | 
5039  | 0  |   ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));  | 
5040  |  | 
  | 
5041  | 0  |   ev_statdata prev = w->attr;  | 
5042  | 0  |   ev_stat_stat (EV_A_ w);  | 
5043  |  |  | 
5044  |  |   /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */  | 
5045  | 0  |   if (  | 
5046  | 0  |     prev.st_dev      != w->attr.st_dev  | 
5047  | 0  |     || prev.st_ino   != w->attr.st_ino  | 
5048  | 0  |     || prev.st_mode  != w->attr.st_mode  | 
5049  | 0  |     || prev.st_nlink != w->attr.st_nlink  | 
5050  | 0  |     || prev.st_uid   != w->attr.st_uid  | 
5051  | 0  |     || prev.st_gid   != w->attr.st_gid  | 
5052  | 0  |     || prev.st_rdev  != w->attr.st_rdev  | 
5053  | 0  |     || prev.st_size  != w->attr.st_size  | 
5054  | 0  |     || prev.st_atime != w->attr.st_atime  | 
5055  | 0  |     || prev.st_mtime != w->attr.st_mtime  | 
5056  | 0  |     || prev.st_ctime != w->attr.st_ctime  | 
5057  | 0  | # if HAVE_STRUCT_STAT_ST_MTIM  | 
5058  | 0  |     || prev.st_atim.tv_nsec != w->attr.st_atim.tv_nsec  | 
5059  | 0  |     || prev.st_mtim.tv_nsec != w->attr.st_mtim.tv_nsec  | 
5060  | 0  |     || prev.st_ctim.tv_nsec != w->attr.st_ctim.tv_nsec  | 
5061  |  | # elif HAVE_STRUCT_STAT_ST_MTIMENSEC  | 
5062  |  |     || prev.st_atimensec != w->attr.st_atimensec  | 
5063  |  |     || prev.st_mtimensec != w->attr.st_mtimensec  | 
5064  |  |     || prev.st_ctimensec != w->attr.st_ctimensec  | 
5065  |  | # endif  | 
5066  | 0  |   ) { | 
5067  |  |       /* we only update w->prev on actual differences */  | 
5068  |  |       /* in case we test more often than invoke the callback, */  | 
5069  |  |       /* to ensure that prev is always different to attr */  | 
5070  | 0  |       w->prev = prev;  | 
5071  |  | 
  | 
5072  | 0  |       #if EV_USE_INOTIFY  | 
5073  | 0  |         if (fs_fd >= 0)  | 
5074  | 0  |           { | 
5075  | 0  |             infy_del (EV_A_ w);  | 
5076  | 0  |             infy_add (EV_A_ w);  | 
5077  | 0  |             ev_stat_stat (EV_A_ w); /* avoid race... */  | 
5078  | 0  |           }  | 
5079  | 0  |       #endif  | 
5080  |  | 
  | 
5081  | 0  |       ev_feed_event (EV_A_ w, EV_STAT);  | 
5082  | 0  |     }  | 
5083  | 0  | }  | 
5084  |  |  | 
5085  |  | void  | 
5086  |  | ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT  | 
5087  | 0  | { | 
5088  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5089  | 0  |     return;  | 
5090  |  |  | 
5091  | 0  |   ev_stat_stat (EV_A_ w);  | 
5092  |  | 
  | 
5093  | 0  |   if (w->interval < MIN_STAT_INTERVAL && w->interval)  | 
5094  | 0  |     w->interval = MIN_STAT_INTERVAL;  | 
5095  |  | 
  | 
5096  | 0  |   ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);  | 
5097  | 0  |   ev_set_priority (&w->timer, ev_priority (w));  | 
5098  |  | 
  | 
5099  | 0  | #if EV_USE_INOTIFY  | 
5100  | 0  |   infy_init (EV_A);  | 
5101  |  | 
  | 
5102  | 0  |   if (fs_fd >= 0)  | 
5103  | 0  |     infy_add (EV_A_ w);  | 
5104  | 0  |   else  | 
5105  | 0  | #endif  | 
5106  | 0  |     { | 
5107  | 0  |       ev_timer_again (EV_A_ &w->timer);  | 
5108  | 0  |       ev_unref (EV_A);  | 
5109  | 0  |     }  | 
5110  |  | 
  | 
5111  | 0  |   ev_start (EV_A_ (W)w, 1);  | 
5112  |  | 
  | 
5113  | 0  |   EV_FREQUENT_CHECK;  | 
5114  | 0  | }  | 
5115  |  |  | 
5116  |  | void  | 
5117  |  | ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT  | 
5118  | 0  | { | 
5119  | 0  |   clear_pending (EV_A_ (W)w);  | 
5120  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5121  | 0  |     return;  | 
5122  |  |  | 
5123  | 0  |   EV_FREQUENT_CHECK;  | 
5124  |  | 
  | 
5125  | 0  | #if EV_USE_INOTIFY  | 
5126  | 0  |   infy_del (EV_A_ w);  | 
5127  | 0  | #endif  | 
5128  |  | 
  | 
5129  | 0  |   if (ev_is_active (&w->timer))  | 
5130  | 0  |     { | 
5131  | 0  |       ev_ref (EV_A);  | 
5132  | 0  |       ev_timer_stop (EV_A_ &w->timer);  | 
5133  | 0  |     }  | 
5134  |  | 
  | 
5135  | 0  |   ev_stop (EV_A_ (W)w);  | 
5136  |  | 
  | 
5137  | 0  |   EV_FREQUENT_CHECK;  | 
5138  | 0  | }  | 
5139  |  | #endif  | 
5140  |  |  | 
5141  |  | #if EV_IDLE_ENABLE  | 
5142  |  | void  | 
5143  |  | ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT  | 
5144  | 0  | { | 
5145  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5146  | 0  |     return;  | 
5147  |  |  | 
5148  | 0  |   pri_adjust (EV_A_ (W)w);  | 
5149  |  | 
  | 
5150  | 0  |   EV_FREQUENT_CHECK;  | 
5151  |  | 
  | 
5152  | 0  |   { | 
5153  | 0  |     int active = ++idlecnt [ABSPRI (w)];  | 
5154  |  | 
  | 
5155  | 0  |     ++idleall;  | 
5156  | 0  |     ev_start (EV_A_ (W)w, active);  | 
5157  |  | 
  | 
5158  | 0  |     array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit);  | 
5159  | 0  |     idles [ABSPRI (w)][active - 1] = w;  | 
5160  | 0  |   }  | 
5161  |  | 
  | 
5162  | 0  |   EV_FREQUENT_CHECK;  | 
5163  | 0  | }  | 
5164  |  |  | 
5165  |  | void  | 
5166  |  | ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT  | 
5167  | 0  | { | 
5168  | 0  |   clear_pending (EV_A_ (W)w);  | 
5169  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5170  | 0  |     return;  | 
5171  |  |  | 
5172  | 0  |   EV_FREQUENT_CHECK;  | 
5173  |  | 
  | 
5174  | 0  |   { | 
5175  | 0  |     int active = ev_active (w);  | 
5176  |  | 
  | 
5177  | 0  |     idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];  | 
5178  | 0  |     ev_active (idles [ABSPRI (w)][active - 1]) = active;  | 
5179  |  | 
  | 
5180  | 0  |     ev_stop (EV_A_ (W)w);  | 
5181  | 0  |     --idleall;  | 
5182  | 0  |   }  | 
5183  |  | 
  | 
5184  | 0  |   EV_FREQUENT_CHECK;  | 
5185  | 0  | }  | 
5186  |  | #endif  | 
5187  |  |  | 
5188  |  | #if EV_PREPARE_ENABLE  | 
5189  |  | void  | 
5190  |  | ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT  | 
5191  | 0  | { | 
5192  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5193  | 0  |     return;  | 
5194  |  |  | 
5195  | 0  |   EV_FREQUENT_CHECK;  | 
5196  |  | 
  | 
5197  | 0  |   ev_start (EV_A_ (W)w, ++preparecnt);  | 
5198  | 0  |   array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit);  | 
5199  | 0  |   prepares [preparecnt - 1] = w;  | 
5200  |  | 
  | 
5201  | 0  |   EV_FREQUENT_CHECK;  | 
5202  | 0  | }  | 
5203  |  |  | 
5204  |  | void  | 
5205  |  | ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT  | 
5206  | 0  | { | 
5207  | 0  |   clear_pending (EV_A_ (W)w);  | 
5208  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5209  | 0  |     return;  | 
5210  |  |  | 
5211  | 0  |   EV_FREQUENT_CHECK;  | 
5212  |  | 
  | 
5213  | 0  |   { | 
5214  | 0  |     int active = ev_active (w);  | 
5215  |  | 
  | 
5216  | 0  |     prepares [active - 1] = prepares [--preparecnt];  | 
5217  | 0  |     ev_active (prepares [active - 1]) = active;  | 
5218  | 0  |   }  | 
5219  |  | 
  | 
5220  | 0  |   ev_stop (EV_A_ (W)w);  | 
5221  |  | 
  | 
5222  | 0  |   EV_FREQUENT_CHECK;  | 
5223  | 0  | }  | 
5224  |  | #endif  | 
5225  |  |  | 
5226  |  | #if EV_CHECK_ENABLE  | 
5227  |  | void  | 
5228  |  | ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT  | 
5229  | 0  | { | 
5230  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5231  | 0  |     return;  | 
5232  |  |  | 
5233  | 0  |   EV_FREQUENT_CHECK;  | 
5234  |  | 
  | 
5235  | 0  |   ev_start (EV_A_ (W)w, ++checkcnt);  | 
5236  | 0  |   array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit);  | 
5237  | 0  |   checks [checkcnt - 1] = w;  | 
5238  |  | 
  | 
5239  | 0  |   EV_FREQUENT_CHECK;  | 
5240  | 0  | }  | 
5241  |  |  | 
5242  |  | void  | 
5243  |  | ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT  | 
5244  | 0  | { | 
5245  | 0  |   clear_pending (EV_A_ (W)w);  | 
5246  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5247  | 0  |     return;  | 
5248  |  |  | 
5249  | 0  |   EV_FREQUENT_CHECK;  | 
5250  |  | 
  | 
5251  | 0  |   { | 
5252  | 0  |     int active = ev_active (w);  | 
5253  |  | 
  | 
5254  | 0  |     checks [active - 1] = checks [--checkcnt];  | 
5255  | 0  |     ev_active (checks [active - 1]) = active;  | 
5256  | 0  |   }  | 
5257  |  | 
  | 
5258  | 0  |   ev_stop (EV_A_ (W)w);  | 
5259  |  | 
  | 
5260  | 0  |   EV_FREQUENT_CHECK;  | 
5261  | 0  | }  | 
5262  |  | #endif  | 
5263  |  |  | 
5264  |  | #if EV_EMBED_ENABLE  | 
5265  |  | ecb_noinline  | 
5266  |  | void  | 
5267  |  | ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT  | 
5268  | 0  | { | 
5269  | 0  |   ev_run (w->other, EVRUN_NOWAIT);  | 
5270  | 0  | }  | 
5271  |  |  | 
5272  |  | static void  | 
5273  |  | embed_io_cb (EV_P_ ev_io *io, int revents)  | 
5274  | 0  | { | 
5275  | 0  |   ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));  | 
5276  |  | 
  | 
5277  | 0  |   if (ev_cb (w))  | 
5278  | 0  |     ev_feed_event (EV_A_ (W)w, EV_EMBED);  | 
5279  | 0  |   else  | 
5280  | 0  |     ev_run (w->other, EVRUN_NOWAIT);  | 
5281  | 0  | }  | 
5282  |  |  | 
5283  |  | static void  | 
5284  |  | embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)  | 
5285  | 0  | { | 
5286  | 0  |   ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));  | 
5287  |  | 
  | 
5288  | 0  |   { | 
5289  | 0  |     EV_P = w->other;  | 
5290  |  | 
  | 
5291  | 0  |     while (fdchangecnt)  | 
5292  | 0  |       { | 
5293  | 0  |         fd_reify (EV_A);  | 
5294  | 0  |         ev_run (EV_A_ EVRUN_NOWAIT);  | 
5295  | 0  |       }  | 
5296  | 0  |   }  | 
5297  | 0  | }  | 
5298  |  |  | 
5299  |  | #if EV_FORK_ENABLE  | 
5300  |  | static void  | 
5301  |  | embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)  | 
5302  | 0  | { | 
5303  | 0  |   ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));  | 
5304  |  | 
  | 
5305  | 0  |   ev_embed_stop (EV_A_ w);  | 
5306  |  | 
  | 
5307  | 0  |   { | 
5308  | 0  |     EV_P = w->other;  | 
5309  |  | 
  | 
5310  | 0  |     ev_loop_fork (EV_A);  | 
5311  | 0  |     ev_run (EV_A_ EVRUN_NOWAIT);  | 
5312  | 0  |   }  | 
5313  |  | 
  | 
5314  | 0  |   ev_embed_start (EV_A_ w);  | 
5315  | 0  | }  | 
5316  |  | #endif  | 
5317  |  |  | 
5318  |  | #if 0  | 
5319  |  | static void  | 
5320  |  | embed_idle_cb (EV_P_ ev_idle *idle, int revents)  | 
5321  |  | { | 
5322  |  |   ev_idle_stop (EV_A_ idle);  | 
5323  |  | }  | 
5324  |  | #endif  | 
5325  |  |  | 
5326  |  | void  | 
5327  |  | ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT  | 
5328  | 0  | { | 
5329  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5330  | 0  |     return;  | 
5331  |  |  | 
5332  | 0  |   { | 
5333  | 0  |     EV_P = w->other;  | 
5334  | 0  |     assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); | 
5335  | 0  |     ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);  | 
5336  | 0  |   }  | 
5337  |  |  | 
5338  | 0  |   EV_FREQUENT_CHECK;  | 
5339  |  | 
  | 
5340  | 0  |   ev_set_priority (&w->io, ev_priority (w));  | 
5341  | 0  |   ev_io_start (EV_A_ &w->io);  | 
5342  |  | 
  | 
5343  | 0  |   ev_prepare_init (&w->prepare, embed_prepare_cb);  | 
5344  | 0  |   ev_set_priority (&w->prepare, EV_MINPRI);  | 
5345  | 0  |   ev_prepare_start (EV_A_ &w->prepare);  | 
5346  |  | 
  | 
5347  | 0  | #if EV_FORK_ENABLE  | 
5348  | 0  |   ev_fork_init (&w->fork, embed_fork_cb);  | 
5349  | 0  |   ev_fork_start (EV_A_ &w->fork);  | 
5350  | 0  | #endif  | 
5351  |  |  | 
5352  |  |   /*ev_idle_init (&w->idle, e,bed_idle_cb);*/  | 
5353  |  | 
  | 
5354  | 0  |   ev_start (EV_A_ (W)w, 1);  | 
5355  |  | 
  | 
5356  | 0  |   EV_FREQUENT_CHECK;  | 
5357  | 0  | }  | 
5358  |  |  | 
5359  |  | void  | 
5360  |  | ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT  | 
5361  | 0  | { | 
5362  | 0  |   clear_pending (EV_A_ (W)w);  | 
5363  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5364  | 0  |     return;  | 
5365  |  |  | 
5366  | 0  |   EV_FREQUENT_CHECK;  | 
5367  |  | 
  | 
5368  | 0  |   ev_io_stop      (EV_A_ &w->io);  | 
5369  | 0  |   ev_prepare_stop (EV_A_ &w->prepare);  | 
5370  | 0  | #if EV_FORK_ENABLE  | 
5371  | 0  |   ev_fork_stop    (EV_A_ &w->fork);  | 
5372  | 0  | #endif  | 
5373  |  | 
  | 
5374  | 0  |   ev_stop (EV_A_ (W)w);  | 
5375  |  | 
  | 
5376  | 0  |   EV_FREQUENT_CHECK;  | 
5377  | 0  | }  | 
5378  |  | #endif  | 
5379  |  |  | 
5380  |  | #if EV_FORK_ENABLE  | 
5381  |  | void  | 
5382  |  | ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT  | 
5383  | 0  | { | 
5384  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5385  | 0  |     return;  | 
5386  |  |  | 
5387  | 0  |   EV_FREQUENT_CHECK;  | 
5388  |  | 
  | 
5389  | 0  |   ev_start (EV_A_ (W)w, ++forkcnt);  | 
5390  | 0  |   array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit);  | 
5391  | 0  |   forks [forkcnt - 1] = w;  | 
5392  |  | 
  | 
5393  | 0  |   EV_FREQUENT_CHECK;  | 
5394  | 0  | }  | 
5395  |  |  | 
5396  |  | void  | 
5397  |  | ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT  | 
5398  | 0  | { | 
5399  | 0  |   clear_pending (EV_A_ (W)w);  | 
5400  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5401  | 0  |     return;  | 
5402  |  |  | 
5403  | 0  |   EV_FREQUENT_CHECK;  | 
5404  |  | 
  | 
5405  | 0  |   { | 
5406  | 0  |     int active = ev_active (w);  | 
5407  |  | 
  | 
5408  | 0  |     forks [active - 1] = forks [--forkcnt];  | 
5409  | 0  |     ev_active (forks [active - 1]) = active;  | 
5410  | 0  |   }  | 
5411  |  | 
  | 
5412  | 0  |   ev_stop (EV_A_ (W)w);  | 
5413  |  | 
  | 
5414  | 0  |   EV_FREQUENT_CHECK;  | 
5415  | 0  | }  | 
5416  |  | #endif  | 
5417  |  |  | 
5418  |  | #if EV_CLEANUP_ENABLE  | 
5419  |  | void  | 
5420  |  | ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT  | 
5421  | 0  | { | 
5422  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5423  | 0  |     return;  | 
5424  |  |  | 
5425  | 0  |   EV_FREQUENT_CHECK;  | 
5426  |  | 
  | 
5427  | 0  |   ev_start (EV_A_ (W)w, ++cleanupcnt);  | 
5428  | 0  |   array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit);  | 
5429  | 0  |   cleanups [cleanupcnt - 1] = w;  | 
5430  |  |  | 
5431  |  |   /* cleanup watchers should never keep a refcount on the loop */  | 
5432  | 0  |   ev_unref (EV_A);  | 
5433  | 0  |   EV_FREQUENT_CHECK;  | 
5434  | 0  | }  | 
5435  |  |  | 
5436  |  | void  | 
5437  |  | ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT  | 
5438  | 0  | { | 
5439  | 0  |   clear_pending (EV_A_ (W)w);  | 
5440  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5441  | 0  |     return;  | 
5442  |  |  | 
5443  | 0  |   EV_FREQUENT_CHECK;  | 
5444  | 0  |   ev_ref (EV_A);  | 
5445  |  | 
  | 
5446  | 0  |   { | 
5447  | 0  |     int active = ev_active (w);  | 
5448  |  | 
  | 
5449  | 0  |     cleanups [active - 1] = cleanups [--cleanupcnt];  | 
5450  | 0  |     ev_active (cleanups [active - 1]) = active;  | 
5451  | 0  |   }  | 
5452  |  | 
  | 
5453  | 0  |   ev_stop (EV_A_ (W)w);  | 
5454  |  | 
  | 
5455  | 0  |   EV_FREQUENT_CHECK;  | 
5456  | 0  | }  | 
5457  |  | #endif  | 
5458  |  |  | 
5459  |  | #if EV_ASYNC_ENABLE  | 
5460  |  | void  | 
5461  |  | ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT  | 
5462  | 0  | { | 
5463  | 0  |   if (ecb_expect_false (ev_is_active (w)))  | 
5464  | 0  |     return;  | 
5465  |  |  | 
5466  | 0  |   w->sent = 0;  | 
5467  |  | 
  | 
5468  | 0  |   evpipe_init (EV_A);  | 
5469  |  | 
  | 
5470  | 0  |   EV_FREQUENT_CHECK;  | 
5471  |  | 
  | 
5472  | 0  |   ev_start (EV_A_ (W)w, ++asynccnt);  | 
5473  | 0  |   array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit);  | 
5474  | 0  |   asyncs [asynccnt - 1] = w;  | 
5475  |  | 
  | 
5476  | 0  |   EV_FREQUENT_CHECK;  | 
5477  | 0  | }  | 
5478  |  |  | 
5479  |  | void  | 
5480  |  | ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT  | 
5481  | 0  | { | 
5482  | 0  |   clear_pending (EV_A_ (W)w);  | 
5483  | 0  |   if (ecb_expect_false (!ev_is_active (w)))  | 
5484  | 0  |     return;  | 
5485  |  |  | 
5486  | 0  |   EV_FREQUENT_CHECK;  | 
5487  |  | 
  | 
5488  | 0  |   { | 
5489  | 0  |     int active = ev_active (w);  | 
5490  |  | 
  | 
5491  | 0  |     asyncs [active - 1] = asyncs [--asynccnt];  | 
5492  | 0  |     ev_active (asyncs [active - 1]) = active;  | 
5493  | 0  |   }  | 
5494  |  | 
  | 
5495  | 0  |   ev_stop (EV_A_ (W)w);  | 
5496  |  | 
  | 
5497  | 0  |   EV_FREQUENT_CHECK;  | 
5498  | 0  | }  | 
5499  |  |  | 
5500  |  | void  | 
5501  |  | ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT  | 
5502  | 0  | { | 
5503  | 0  |   w->sent = 1;  | 
5504  | 0  |   evpipe_write (EV_A_ &async_pending);  | 
5505  | 0  | }  | 
5506  |  | #endif  | 
5507  |  |  | 
5508  |  | /*****************************************************************************/  | 
5509  |  |  | 
5510  |  | struct ev_once  | 
5511  |  | { | 
5512  |  |   ev_io io;  | 
5513  |  |   ev_timer to;  | 
5514  |  |   void (*cb)(int revents, void *arg);  | 
5515  |  |   void *arg;  | 
5516  |  | };  | 
5517  |  |  | 
5518  |  | static void  | 
5519  |  | once_cb (EV_P_ struct ev_once *once, int revents)  | 
5520  | 0  | { | 
5521  | 0  |   void (*cb)(int revents, void *arg) = once->cb;  | 
5522  | 0  |   void *arg = once->arg;  | 
5523  |  | 
  | 
5524  | 0  |   ev_io_stop    (EV_A_ &once->io);  | 
5525  | 0  |   ev_timer_stop (EV_A_ &once->to);  | 
5526  | 0  |   ev_free (once);  | 
5527  |  | 
  | 
5528  | 0  |   cb (revents, arg);  | 
5529  | 0  | }  | 
5530  |  |  | 
5531  |  | static void  | 
5532  |  | once_cb_io (EV_P_ ev_io *w, int revents)  | 
5533  | 0  | { | 
5534  | 0  |   struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));  | 
5535  |  | 
  | 
5536  | 0  |   once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));  | 
5537  | 0  | }  | 
5538  |  |  | 
5539  |  | static void  | 
5540  |  | once_cb_to (EV_P_ ev_timer *w, int revents)  | 
5541  | 0  | { | 
5542  | 0  |   struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));  | 
5543  |  | 
  | 
5544  | 0  |   once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));  | 
5545  | 0  | }  | 
5546  |  |  | 
5547  |  | void  | 
5548  |  | ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT  | 
5549  | 0  | { | 
5550  | 0  |   struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));  | 
5551  |  | 
  | 
5552  | 0  |   once->cb  = cb;  | 
5553  | 0  |   once->arg = arg;  | 
5554  |  | 
  | 
5555  | 0  |   ev_init (&once->io, once_cb_io);  | 
5556  | 0  |   if (fd >= 0)  | 
5557  | 0  |     { | 
5558  | 0  |       ev_io_set (&once->io, fd, events);  | 
5559  | 0  |       ev_io_start (EV_A_ &once->io);  | 
5560  | 0  |     }  | 
5561  |  | 
  | 
5562  | 0  |   ev_init (&once->to, once_cb_to);  | 
5563  | 0  |   if (timeout >= 0.)  | 
5564  | 0  |     { | 
5565  | 0  |       ev_timer_set (&once->to, timeout, 0.);  | 
5566  | 0  |       ev_timer_start (EV_A_ &once->to);  | 
5567  | 0  |     }  | 
5568  | 0  | }  | 
5569  |  |  | 
5570  |  | /*****************************************************************************/  | 
5571  |  |  | 
5572  |  | #if EV_WALK_ENABLE  | 
5573  |  | ecb_cold  | 
5574  |  | void  | 
5575  |  | ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT  | 
5576  |  | { | 
5577  |  |   int i, j;  | 
5578  |  |   ev_watcher_list *wl, *wn;  | 
5579  |  |  | 
5580  |  |   if (types & (EV_IO | EV_EMBED))  | 
5581  |  |     for (i = 0; i < anfdmax; ++i)  | 
5582  |  |       for (wl = anfds [i].head; wl; )  | 
5583  |  |         { | 
5584  |  |           wn = wl->next;  | 
5585  |  |  | 
5586  |  | #if EV_EMBED_ENABLE  | 
5587  |  |           if (ev_cb ((ev_io *)wl) == embed_io_cb)  | 
5588  |  |             { | 
5589  |  |               if (types & EV_EMBED)  | 
5590  |  |                 cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));  | 
5591  |  |             }  | 
5592  |  |           else  | 
5593  |  | #endif  | 
5594  |  | #if EV_USE_INOTIFY  | 
5595  |  |           if (ev_cb ((ev_io *)wl) == infy_cb)  | 
5596  |  |             ;  | 
5597  |  |           else  | 
5598  |  | #endif  | 
5599  |  |           if ((ev_io *)wl != &pipe_w)  | 
5600  |  |             if (types & EV_IO)  | 
5601  |  |               cb (EV_A_ EV_IO, wl);  | 
5602  |  |  | 
5603  |  |           wl = wn;  | 
5604  |  |         }  | 
5605  |  |  | 
5606  |  |   if (types & (EV_TIMER | EV_STAT))  | 
5607  |  |     for (i = timercnt + HEAP0; i-- > HEAP0; )  | 
5608  |  | #if EV_STAT_ENABLE  | 
5609  |  |       /*TODO: timer is not always active*/  | 
5610  |  |       if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)  | 
5611  |  |         { | 
5612  |  |           if (types & EV_STAT)  | 
5613  |  |             cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));  | 
5614  |  |         }  | 
5615  |  |       else  | 
5616  |  | #endif  | 
5617  |  |       if (types & EV_TIMER)  | 
5618  |  |         cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));  | 
5619  |  |  | 
5620  |  | #if EV_PERIODIC_ENABLE  | 
5621  |  |   if (types & EV_PERIODIC)  | 
5622  |  |     for (i = periodiccnt + HEAP0; i-- > HEAP0; )  | 
5623  |  |       cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));  | 
5624  |  | #endif  | 
5625  |  |  | 
5626  |  | #if EV_IDLE_ENABLE  | 
5627  |  |   if (types & EV_IDLE)  | 
5628  |  |     for (j = NUMPRI; j--; )  | 
5629  |  |       for (i = idlecnt [j]; i--; )  | 
5630  |  |         cb (EV_A_ EV_IDLE, idles [j][i]);  | 
5631  |  | #endif  | 
5632  |  |  | 
5633  |  | #if EV_FORK_ENABLE  | 
5634  |  |   if (types & EV_FORK)  | 
5635  |  |     for (i = forkcnt; i--; )  | 
5636  |  |       if (ev_cb (forks [i]) != embed_fork_cb)  | 
5637  |  |         cb (EV_A_ EV_FORK, forks [i]);  | 
5638  |  | #endif  | 
5639  |  |  | 
5640  |  | #if EV_ASYNC_ENABLE  | 
5641  |  |   if (types & EV_ASYNC)  | 
5642  |  |     for (i = asynccnt; i--; )  | 
5643  |  |       cb (EV_A_ EV_ASYNC, asyncs [i]);  | 
5644  |  | #endif  | 
5645  |  |  | 
5646  |  | #if EV_PREPARE_ENABLE  | 
5647  |  |   if (types & EV_PREPARE)  | 
5648  |  |     for (i = preparecnt; i--; )  | 
5649  |  | # if EV_EMBED_ENABLE  | 
5650  |  |       if (ev_cb (prepares [i]) != embed_prepare_cb)  | 
5651  |  | # endif  | 
5652  |  |         cb (EV_A_ EV_PREPARE, prepares [i]);  | 
5653  |  | #endif  | 
5654  |  |  | 
5655  |  | #if EV_CHECK_ENABLE  | 
5656  |  |   if (types & EV_CHECK)  | 
5657  |  |     for (i = checkcnt; i--; )  | 
5658  |  |       cb (EV_A_ EV_CHECK, checks [i]);  | 
5659  |  | #endif  | 
5660  |  |  | 
5661  |  | #if EV_SIGNAL_ENABLE  | 
5662  |  |   if (types & EV_SIGNAL)  | 
5663  |  |     for (i = 0; i < EV_NSIG - 1; ++i)  | 
5664  |  |       for (wl = signals [i].head; wl; )  | 
5665  |  |         { | 
5666  |  |           wn = wl->next;  | 
5667  |  |           cb (EV_A_ EV_SIGNAL, wl);  | 
5668  |  |           wl = wn;  | 
5669  |  |         }  | 
5670  |  | #endif  | 
5671  |  |  | 
5672  |  | #if EV_CHILD_ENABLE  | 
5673  |  |   if (types & EV_CHILD)  | 
5674  |  |     for (i = (EV_PID_HASHSIZE); i--; )  | 
5675  |  |       for (wl = childs [i]; wl; )  | 
5676  |  |         { | 
5677  |  |           wn = wl->next;  | 
5678  |  |           cb (EV_A_ EV_CHILD, wl);  | 
5679  |  |           wl = wn;  | 
5680  |  |         }  | 
5681  |  | #endif  | 
5682  |  | /* EV_STAT     0x00001000 /* stat data changed */  | 
5683  |  | /* EV_EMBED    0x00010000 /* embedded event loop needs sweep */  | 
5684  |  | }  | 
5685  |  | #endif  | 
5686  |  |  | 
5687  |  | #if EV_MULTIPLICITY  | 
5688  |  |   #include "ev_wrap.h"  | 
5689  |  | #endif  | 
5690  |  |  |