/src/freeradius-server/src/lib/util/event.c
Line | Count | Source |
1 | | /* |
2 | | * This program is free software; you can redistribute it and/or modify |
3 | | * it under the terms of the GNU General Public License as published by |
4 | | * the Free Software Foundation; either version 2 of the License, or |
5 | | * (at your option) any later version. |
6 | | * |
7 | | * This program is distributed in the hope that it will be useful, |
8 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | | * GNU General Public License for more details. |
11 | | * |
12 | | * You should have received a copy of the GNU General Public License |
13 | | * along with this program; if not, write to the Free Software |
14 | | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA |
15 | | */ |
16 | | |
17 | | /** Wrapper around libkqueue to make managing events easier |
18 | | * |
19 | | * Non-thread-safe event handling specific to FreeRADIUS. |
20 | | * |
21 | | * By non-thread-safe we mean multiple threads can't insert/delete |
22 | | * events concurrently into the same event list without synchronization. |
23 | | * |
24 | | * @file src/lib/util/event.c |
25 | | * |
26 | | * @copyright 2007-2016 The FreeRADIUS server project |
27 | | * @copyright 2016 Arran Cudbard-Bell (a.cudbardb@freeradius.org) |
28 | | * @copyright 2007 Alan DeKok (aland@freeradius.org) |
29 | | */ |
30 | | RCSID("$Id: 21252acfb9902b8ea03afc14212c538119506754 $") |
31 | | |
32 | | #define _EVENT_LIST_PRIVATE 1 |
33 | | typedef struct fr_event_list_s fr_event_list_t; |
34 | | |
35 | | #include <freeradius-devel/util/dlist.h> |
36 | | #include <freeradius-devel/util/event.h> |
37 | | #include <freeradius-devel/util/log.h> |
38 | | #include <freeradius-devel/util/rb.h> |
39 | | #include <freeradius-devel/util/strerror.h> |
40 | | #include <freeradius-devel/util/syserror.h> |
41 | | #include <freeradius-devel/util/token.h> |
42 | | #include <freeradius-devel/util/atexit.h> |
43 | | |
44 | | #include <sys/stat.h> |
45 | | #include <sys/wait.h> |
46 | | |
47 | | #ifdef NDEBUG |
48 | | /* |
49 | | * Turn off documentation warnings as file/line |
50 | | * args aren't used for non-debug builds. |
51 | | */ |
52 | | DIAG_OFF(DIAG_UNKNOWN_PRAGMAS) |
53 | | DIAG_OFF(documentation) |
54 | | DIAG_ON(DIAG_UNKNOWN_PRAGMAS) |
55 | | #endif |
56 | | |
57 | 0 | #define FR_EV_BATCH_FDS (256) |
58 | | |
59 | | DIAG_OFF(unused-macros) |
60 | | #define fr_time() static_assert(0, "Use el->time for event loop timing") |
61 | | DIAG_ON(unused-macros) |
62 | | |
63 | | #if !defined(SO_GET_FILTER) && defined(SO_ATTACH_FILTER) |
64 | | # define SO_GET_FILTER SO_ATTACH_FILTER |
65 | | #endif |
66 | | |
67 | | static fr_table_num_sorted_t const kevent_filter_table[] = { |
68 | | #ifdef EVFILT_AIO |
69 | | { L("EVFILT_AIO"), EVFILT_AIO }, |
70 | | #endif |
71 | | #ifdef EVFILT_EXCEPT |
72 | | { L("EVFILT_EXCEPT"), EVFILT_EXCEPT }, |
73 | | #endif |
74 | | #ifdef EVFILT_MACHPORT |
75 | | { L("EVFILT_MACHPORT"), EVFILT_MACHPORT }, |
76 | | #endif |
77 | | { L("EVFILT_PROC"), EVFILT_PROC }, |
78 | | { L("EVFILT_READ"), EVFILT_READ }, |
79 | | { L("EVFILT_SIGNAL"), EVFILT_SIGNAL }, |
80 | | { L("EVFILT_TIMER"), EVFILT_TIMER }, |
81 | | { L("EVFILT_VNODE"), EVFILT_VNODE }, |
82 | | { L("EVFILT_WRITE"), EVFILT_WRITE } |
83 | | }; |
84 | | static size_t kevent_filter_table_len = NUM_ELEMENTS(kevent_filter_table); |
85 | | |
86 | | #ifdef EVFILT_LIBKQUEUE |
87 | | static int log_conf_kq; |
88 | | #endif |
89 | | |
90 | | typedef enum { |
91 | | FR_EVENT_FD_SOCKET = 1, //!< is a socket. |
92 | | FR_EVENT_FD_FILE = 2, //!< is a file. |
93 | | FR_EVENT_FD_DIRECTORY = 4, //!< is a directory. |
94 | | |
95 | | #ifdef SO_GET_FILTER |
96 | | FR_EVENT_FD_PCAP = 8, |
97 | | #endif |
98 | | } fr_event_fd_type_t; |
99 | | |
100 | | typedef enum { |
101 | | FR_EVENT_FUNC_IDX_NONE = 0, |
102 | | |
103 | | FR_EVENT_FUNC_IDX_FILTER, //!< Sign flip is performed i.e. -1 = 0The filter is used |
104 | | //// as the index in the ev to func index. |
105 | | FR_EVENT_FUNC_IDX_FFLAGS //!< The bit position of the flags in FFLAGS |
106 | | ///< is used to provide the index. |
107 | | ///< i.e. 0x01 -> 0, 0x02 -> 1, 0x08 -> 3 etc.. |
108 | | } fr_event_func_idx_type_t; |
109 | | |
110 | | #ifndef SO_GET_FILTER |
111 | | # define FR_EVENT_FD_PCAP 0 |
112 | | #endif |
113 | | |
114 | | /** Specifies a mapping between a function pointer in a structure and its respective event |
115 | | * |
116 | | * If the function pointer at the specified offset is set, then a matching event |
117 | | * will be added. |
118 | | * |
119 | | * If the function pointer is NULL, then any existing events will be removed. |
120 | | */ |
121 | | typedef struct { |
122 | | size_t offset; //!< Offset of function pointer in structure. |
123 | | char const *name; //!< Name of the event. |
124 | | int16_t filter; //!< Filter to apply. |
125 | | uint16_t flags; //!< Flags to use for inserting event. |
126 | | uint32_t fflags; //!< fflags to pass to filter. |
127 | | int type; //!< Type this filter applies to. |
128 | | bool coalesce; //!< Coalesce this map with the next. |
129 | | } fr_event_func_map_entry_t; |
130 | | |
131 | | typedef struct { |
132 | | fr_event_func_idx_type_t idx_type; //!< What type of index we use for |
133 | | ///< event to function mapping. |
134 | | fr_event_func_map_entry_t *func_to_ev; //!< Function -> Event maps coalesced, out of order. |
135 | | fr_event_func_map_entry_t **ev_to_func; //!< Function -> Event maps in index order. |
136 | | } fr_event_func_map_t; |
137 | | |
138 | | static fr_event_func_map_t filter_maps[] = { |
139 | | [FR_EVENT_FILTER_IO] = { |
140 | | .idx_type = FR_EVENT_FUNC_IDX_FILTER, |
141 | | .func_to_ev = (fr_event_func_map_entry_t[]){ |
142 | | { |
143 | | .offset = offsetof(fr_event_io_func_t, read), |
144 | | .name = "read", |
145 | | .filter = EVFILT_READ, |
146 | | .flags = EV_ADD | EV_ENABLE, |
147 | | #ifdef NOTE_NONE |
148 | | .fflags = NOTE_NONE, |
149 | | #else |
150 | | .fflags = 0, |
151 | | #endif |
152 | | .type = FR_EVENT_FD_SOCKET | FR_EVENT_FD_FILE | FR_EVENT_FD_PCAP |
153 | | }, |
154 | | { |
155 | | .offset = offsetof(fr_event_io_func_t, write), |
156 | | .name = "write", |
157 | | .filter = EVFILT_WRITE, |
158 | | .flags = EV_ADD | EV_ENABLE, |
159 | | .fflags = 0, |
160 | | .type = FR_EVENT_FD_SOCKET | FR_EVENT_FD_FILE | FR_EVENT_FD_PCAP |
161 | | }, |
162 | | { 0 } |
163 | | } |
164 | | }, |
165 | | [FR_EVENT_FILTER_VNODE] = { |
166 | | .idx_type = FR_EVENT_FUNC_IDX_FFLAGS, |
167 | | .func_to_ev = (fr_event_func_map_entry_t[]){ |
168 | | { |
169 | | .offset = offsetof(fr_event_vnode_func_t, delete), |
170 | | .name = "delete", |
171 | | .filter = EVFILT_VNODE, |
172 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
173 | | .fflags = NOTE_DELETE, |
174 | | .type = FR_EVENT_FD_FILE | FR_EVENT_FD_DIRECTORY, |
175 | | .coalesce = true |
176 | | }, |
177 | | { |
178 | | .offset = offsetof(fr_event_vnode_func_t, write), |
179 | | .name = "write", |
180 | | .filter = EVFILT_VNODE, |
181 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
182 | | .fflags = NOTE_WRITE, |
183 | | .type = FR_EVENT_FD_FILE, |
184 | | .coalesce = true |
185 | | }, |
186 | | { |
187 | | .offset = offsetof(fr_event_vnode_func_t, extend), |
188 | | .name = "extend", |
189 | | .filter = EVFILT_VNODE, |
190 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
191 | | .fflags = NOTE_EXTEND, |
192 | | .type = FR_EVENT_FD_FILE | FR_EVENT_FD_DIRECTORY, |
193 | | .coalesce = true |
194 | | }, |
195 | | { |
196 | | .offset = offsetof(fr_event_vnode_func_t, attrib), |
197 | | .name = "attrib", |
198 | | .filter = EVFILT_VNODE, |
199 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
200 | | .fflags = NOTE_ATTRIB, |
201 | | .type = FR_EVENT_FD_FILE, |
202 | | .coalesce = true |
203 | | }, |
204 | | { |
205 | | .offset = offsetof(fr_event_vnode_func_t, link), |
206 | | .name = "link", |
207 | | .filter = EVFILT_VNODE, |
208 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
209 | | .fflags = NOTE_LINK, |
210 | | .type = FR_EVENT_FD_FILE, |
211 | | .coalesce = true |
212 | | }, |
213 | | { |
214 | | .offset = offsetof(fr_event_vnode_func_t, rename), |
215 | | .name = "rename", |
216 | | .filter = EVFILT_VNODE, |
217 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
218 | | .fflags = NOTE_RENAME, |
219 | | .type = FR_EVENT_FD_FILE, |
220 | | .coalesce = true |
221 | | }, |
222 | | #ifdef NOTE_REVOKE |
223 | | { |
224 | | .offset = offsetof(fr_event_vnode_func_t, revoke), |
225 | | .name = "revoke", |
226 | | .filter = EVFILT_VNODE, |
227 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
228 | | .fflags = NOTE_REVOKE, |
229 | | .type = FR_EVENT_FD_FILE, |
230 | | .coalesce = true |
231 | | }, |
232 | | #endif |
233 | | #ifdef NOTE_FUNLOCK |
234 | | { |
235 | | .offset = offsetof(fr_event_vnode_func_t, funlock), |
236 | | .name = "funlock", |
237 | | .filter = EVFILT_VNODE, |
238 | | .flags = EV_ADD | EV_ENABLE | EV_CLEAR, |
239 | | .fflags = NOTE_FUNLOCK, |
240 | | .type = FR_EVENT_FD_FILE, |
241 | | .coalesce = true |
242 | | }, |
243 | | #endif |
244 | | { 0 } |
245 | | } |
246 | | } |
247 | | }; |
248 | | |
249 | | static fr_table_num_sorted_t const fr_event_fd_type_table[] = { |
250 | | { L("directory"), FR_EVENT_FD_DIRECTORY }, |
251 | | { L("file"), FR_EVENT_FD_FILE }, |
252 | | { L("pcap"), FR_EVENT_FD_PCAP }, |
253 | | { L("socket"), FR_EVENT_FD_SOCKET } |
254 | | }; |
255 | | static size_t fr_event_fd_type_table_len = NUM_ELEMENTS(fr_event_fd_type_table); |
256 | | |
257 | | /** A file descriptor/filter event |
258 | | * |
259 | | */ |
260 | | struct fr_event_fd { |
261 | | fr_rb_node_t node; //!< Entry in the tree of file descriptor handles. |
262 | | ///< this should really go away and we should pass around |
263 | | ///< handles directly. |
264 | | |
265 | | fr_event_list_t *el; //!< Event list this event belongs to. |
266 | | fr_event_filter_t filter; |
267 | | int fd; //!< File descriptor we're listening for events on. |
268 | | |
269 | | fr_event_fd_type_t type; //!< Type of events we're interested in. |
270 | | |
271 | | int sock_type; //!< The type of socket SOCK_STREAM, SOCK_RAW etc... |
272 | | |
273 | | fr_event_funcs_t active; //!< Active filter functions. |
274 | | fr_event_funcs_t stored; //!< Stored (set, but inactive) filter functions. |
275 | | |
276 | | fr_event_error_cb_t error; //!< Callback for when an error occurs on the FD. |
277 | | |
278 | | fr_event_func_map_t const *map; //!< Function map between #fr_event_funcs_t and kevent filters. |
279 | | |
280 | | bool is_registered; //!< Whether this fr_event_fd_t's FD has been registered with |
281 | | ///< kevent. Mostly for debugging. |
282 | | |
283 | | void *uctx; //!< Context pointer to pass to each file descriptor callback. |
284 | | TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to. |
285 | | |
286 | | fr_dlist_t entry; //!< Entry in free list. |
287 | | |
288 | | #ifndef NDEBUG |
289 | | uintptr_t armour; //!< protection flag from being deleted. |
290 | | #endif |
291 | | |
292 | | #ifndef NDEBUG |
293 | | char const *file; //!< Source file this event was last updated in. |
294 | | int line; //!< Line this event was last updated on. |
295 | | #endif |
296 | | }; |
297 | | |
298 | | struct fr_event_pid { |
299 | | fr_event_list_t *el; //!< Event list this event belongs to. |
300 | | |
301 | | bool is_registered; //!< Whether this user event has been registered |
302 | | ///< with the event loop. |
303 | | |
304 | | pid_t pid; //!< child to wait for |
305 | | fr_event_pid_t const **parent; |
306 | | |
307 | | fr_event_pid_cb_t callback; //!< callback to run when the child exits |
308 | | void *uctx; //!< Context pointer to pass to each file descriptor callback. |
309 | | |
310 | | /** Fields that are only used if we're being triggered by a user event |
311 | | */ |
312 | | struct { |
313 | | fr_event_user_t *ev; //!< Fallback user event we use to raise a PID event when |
314 | | ///< a race occurs with kevent. |
315 | | int status; //!< Status we got from waitid. |
316 | | } early_exit; |
317 | | #ifndef NDEBUG |
318 | | char const *file; //!< Source file this event was last updated in. |
319 | | int line; //!< Line this event was last updated on. |
320 | | #endif |
321 | | }; |
322 | | |
323 | | /** Hold additional information for automatically reaped PIDs |
324 | | */ |
325 | | typedef struct { |
326 | | fr_event_list_t *el; //!< Event list this event belongs to. |
327 | | fr_event_pid_t const *pid_ev; //!< pid_ev this reaper is bound to. |
328 | | |
329 | | fr_dlist_t entry; //!< If the fr_event_pid is in the detached, reap state, |
330 | | ///< it's inserted into a list associated with the event. |
331 | | //!< We then send SIGKILL, and forcefully reap the process |
332 | | ///< on exit. |
333 | | |
334 | | fr_event_pid_cb_t callback; //!< callback to run when the child exits |
335 | | void *uctx; //!< Context pointer to pass to each file descriptor callback. |
336 | | } fr_event_pid_reap_t; |
337 | | |
338 | | /** Callbacks for kevent() user events |
339 | | * |
340 | | */ |
341 | | struct fr_event_user_s { |
342 | | fr_event_list_t *el; //!< Event list this event belongs to. |
343 | | |
344 | | bool is_registered; //!< Whether this user event has been registered |
345 | | ///< with the event loop. |
346 | | |
347 | | fr_event_user_cb_t callback; //!< The callback to call. |
348 | | void *uctx; //!< Context for the callback. |
349 | | |
350 | | #ifndef NDEBUG |
351 | | char const *file; //!< Source file this event was last updated in. |
352 | | int line; //!< Line this event was last updated on. |
353 | | #endif |
354 | | }; |
355 | | |
356 | | /** Callbacks to perform when the event handler is about to check the events |
357 | | * |
358 | | */ |
359 | | typedef struct { |
360 | | fr_dlist_t entry; //!< Linked list of callback. |
361 | | fr_event_status_cb_t callback; //!< The callback to call. |
362 | | void *uctx; //!< Context for the callback. |
363 | | } fr_event_pre_t; |
364 | | |
365 | | /** Callbacks to perform after all timers and FDs have been checked |
366 | | * |
367 | | */ |
368 | | typedef struct { |
369 | | fr_dlist_t entry; //!< Linked list of callback. |
370 | | fr_event_post_cb_t callback; //!< The callback to call. |
371 | | void *uctx; //!< Context for the callback. |
372 | | } fr_event_post_t; |
373 | | |
374 | | /** Stores all information relating to an event list |
375 | | * |
376 | | */ |
377 | | struct fr_event_list_s { |
378 | | struct fr_event_list_pub_s pub; //!< Next event list in the chain. |
379 | | fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue. |
380 | | |
381 | | int will_exit; //!< Will exit on next call to fr_event_corral. |
382 | | int exit; //!< If non-zero event loop will prevent the addition |
383 | | ///< of new events, and will return immediately |
384 | | ///< from the corral/service function. |
385 | | |
386 | | bool dispatch; //!< Whether the event list is currently dispatching events. |
387 | | |
388 | | int num_fd_events; //!< Number of events in this event list. |
389 | | |
390 | | int kq; //!< instance associated with this event list. |
391 | | |
392 | | fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle... |
393 | | fr_dlist_head_t post_callbacks; //!< post-processing callbacks |
394 | | |
395 | | fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're |
396 | | ///< waiting to reap. |
397 | | |
398 | | struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */ |
399 | | |
400 | | bool in_handler; //!< Deletes should be deferred until after the |
401 | | ///< handlers complete. |
402 | | |
403 | | fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion. |
404 | | |
405 | | #ifdef WITH_EVENT_DEBUG |
406 | | fr_timer_t *report; //!< Report event. |
407 | | #endif |
408 | | }; |
409 | | |
410 | | static void event_fd_func_index_build(fr_event_func_map_t *map) |
411 | 0 | { |
412 | 0 | switch (map->idx_type) { |
413 | 0 | default: |
414 | 0 | return; |
415 | | |
416 | | /* |
417 | | * - Figure out the lowest filter value |
418 | | * - Invert it |
419 | | * - Allocate an array |
420 | | * - Populate the array |
421 | | */ |
422 | 0 | case FR_EVENT_FUNC_IDX_FILTER: |
423 | 0 | { |
424 | 0 | int low = 0; |
425 | 0 | fr_event_func_map_entry_t *entry; |
426 | |
|
427 | 0 | for (entry = map->func_to_ev; entry->name; entry++) if (entry->filter < low) low = entry->filter; |
428 | |
|
429 | 0 | map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, ~low + 1); |
430 | 0 | if (unlikely(!map->ev_to_func)) abort(); |
431 | | |
432 | 0 | for (entry = map->func_to_ev; entry->name; entry++) map->ev_to_func[~entry->filter] = entry; |
433 | 0 | } |
434 | 0 | break; |
435 | | |
436 | | /* |
437 | | * - Figure out the highest bit position |
438 | | * - Allocate an array |
439 | | * - Populate the array |
440 | | */ |
441 | 0 | case FR_EVENT_FUNC_IDX_FFLAGS: |
442 | 0 | { |
443 | 0 | uint8_t high = 0, pos; |
444 | 0 | fr_event_func_map_entry_t *entry; |
445 | |
|
446 | 0 | for (entry = map->func_to_ev; entry->name; entry++) { |
447 | 0 | pos = fr_high_bit_pos(entry->fflags); |
448 | 0 | if (pos > high) high = pos; |
449 | 0 | } |
450 | |
|
451 | 0 | map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, high); |
452 | 0 | if (unlikely(!map->ev_to_func)) abort(); |
453 | | |
454 | 0 | for (entry = map->func_to_ev; entry->name; entry++) { |
455 | 0 | typeof_field(fr_event_func_map_entry_t, fflags) fflags = entry->fflags; |
456 | | |
457 | | /* |
458 | | * Multiple notes can be associated |
459 | | * with the same function. |
460 | | */ |
461 | 0 | while ((pos = fr_high_bit_pos(fflags))) { |
462 | 0 | pos -= 1; |
463 | 0 | map->ev_to_func[pos] = entry; |
464 | | /* |
465 | | * Coverity thinks that after this decrement, pos |
466 | | * can be 255 even though the loop condition precludes |
467 | | * it. Adding a Coverity-only check won't change that, |
468 | | * so we're stuck with annotation. |
469 | | */ |
470 | | /* coverity[overflow_const] */ |
471 | 0 | fflags &= ~(1 << pos); |
472 | 0 | } |
473 | 0 | } |
474 | 0 | } |
475 | 0 | break; |
476 | 0 | } |
477 | 0 | } |
478 | | |
479 | | /** Figure out which function to call given a kevent |
480 | | * |
481 | | * This function should be called in a loop until it returns NULL. |
482 | | * |
483 | | * @param[in] ef File descriptor state handle. |
484 | | * @param[in] filter from the kevent. |
485 | | * @param[in,out] fflags from the kevent. Each call will return the function |
486 | | * from the next most significant NOTE_*, with each |
487 | | * NOTE_* before unset from fflags. |
488 | | * @return |
489 | | * - NULL there are no more callbacks to call. |
490 | | * - The next callback to call. |
491 | | */ |
492 | | static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags) |
493 | 0 | { |
494 | 0 | fr_event_func_map_t const *map = ef->map; |
495 | |
|
496 | 0 | #define GET_FUNC(_ef, _offset) *((fr_event_fd_cb_t const *)((uint8_t const *)&(_ef)->active + _offset)) |
497 | |
|
498 | 0 | switch (map->idx_type) { |
499 | 0 | default: |
500 | 0 | fr_assert_fail("Invalid index type %u", map->idx_type); |
501 | 0 | return NULL; |
502 | | |
503 | 0 | case FR_EVENT_FUNC_IDX_FILTER: |
504 | 0 | { |
505 | 0 | int idx; |
506 | |
|
507 | 0 | if (!*filter) return NULL; |
508 | | |
509 | 0 | idx = ~*filter; /* Consume the filter */ |
510 | 0 | *filter = 0; |
511 | |
|
512 | 0 | return GET_FUNC(ef, map->ev_to_func[idx]->offset); |
513 | 0 | } |
514 | | |
515 | 0 | case FR_EVENT_FUNC_IDX_FFLAGS: |
516 | 0 | { |
517 | 0 | int our_fflags = *fflags; |
518 | 0 | uint8_t pos = fr_high_bit_pos(our_fflags); |
519 | |
|
520 | 0 | if (!pos) return NULL; /* No more fflags to consume */ |
521 | 0 | pos -= 1; /* Saves an array element */ |
522 | |
|
523 | 0 | *fflags = our_fflags & ~(1 << pos); /* Consume the knote */ |
524 | |
|
525 | 0 | return GET_FUNC(ef, map->ev_to_func[pos]->offset); |
526 | 0 | } |
527 | 0 | } |
528 | 0 | } |
529 | | |
530 | | /** Compare two file descriptor handles |
531 | | * |
532 | | * @param[in] one the first file descriptor handle. |
533 | | * @param[in] two the second file descriptor handle. |
534 | | * @return CMP(one, two) |
535 | | */ |
536 | | static int8_t fr_event_fd_cmp(void const *one, void const *two) |
537 | 0 | { |
538 | 0 | fr_event_fd_t const *a = one, *b = two; |
539 | |
|
540 | 0 | CMP_RETURN(a, b, fd); |
541 | | |
542 | 0 | return CMP(a->filter, b->filter); |
543 | 0 | } |
544 | | |
545 | | /** Return the number of file descriptors is_registered with this event loop |
546 | | * |
547 | | */ |
548 | | uint64_t fr_event_list_num_fds(fr_event_list_t *el) |
549 | 0 | { |
550 | 0 | if (unlikely(!el)) return 0; |
551 | | |
552 | 0 | return fr_rb_num_elements(el->fds); |
553 | 0 | } |
554 | | |
555 | | /** Return the number of timer events currently scheduled |
556 | | * |
557 | | * @param[in] el to return timer events for. |
558 | | * @return number of timer events. |
559 | | */ |
560 | | uint64_t fr_event_list_num_timers(fr_event_list_t *el) |
561 | 0 | { |
562 | 0 | if (unlikely(!el)) return 0; |
563 | | |
564 | 0 | return fr_timer_list_num_events(el->pub.tl); |
565 | 0 | } |
566 | | |
567 | | /** Return the kq associated with an event list. |
568 | | * |
569 | | * @param[in] el to return timer events for. |
570 | | * @return kq |
571 | | */ |
572 | | int fr_event_list_kq(fr_event_list_t *el) |
573 | 0 | { |
574 | 0 | if (unlikely(!el)) return -1; |
575 | | |
576 | 0 | return el->kq; |
577 | 0 | } |
578 | | |
579 | | /** Get the current server time according to the event list |
580 | | * |
581 | | * If the event list is currently dispatching events, we return the time |
582 | | * this iteration of the event list started. |
583 | | * |
584 | | * If the event list is not currently dispatching events, we return the |
585 | | * current system time. |
586 | | * |
587 | | * @param[in] el to get time from. |
588 | | * @return the current time according to the event list. |
589 | | */ |
590 | | fr_time_t fr_event_list_time(fr_event_list_t *el) |
591 | 0 | { |
592 | 0 | return el->pub.tl->time(); |
593 | 0 | } |
594 | | |
595 | | /** Placeholder callback to avoid branches in service loop |
596 | | * |
597 | | * This is set in place of any NULL function pointers, so that the event loop doesn't |
598 | | * SEGV if a filter callback function is unset between corral and service. |
599 | | */ |
600 | | static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx) |
601 | 0 | { |
602 | 0 | return; |
603 | 0 | } |
604 | | |
605 | | /** Build a new evset based on function pointers present |
606 | | * |
607 | | * @note The contents of active functions may be inconsistent if this function errors. But the |
608 | | * only time that will occur is if the caller passed invalid arguments. |
609 | | * |
610 | | * @param[in] el we're building events for. |
611 | | * @param[out] out_kev where to write the evset. |
612 | | * @param[in] outlen length of output buffer. |
613 | | * @param[out] active The set of function pointers with active filters. |
614 | | * @param[in] ef event to insert. |
615 | | * @param[in] new Functions to map to filters. |
616 | | * @param[in] prev Previous set of functions mapped to filters. |
617 | | * @return |
618 | | * - >= 0 the number of changes written to out. |
619 | | * - < 0 an error occurred. |
620 | | */ |
621 | | static ssize_t fr_event_build_evset( |
622 | | #ifndef WITH_EVENT_DEBUG |
623 | | UNUSED |
624 | | #endif |
625 | | fr_event_list_t *el, |
626 | | struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active, |
627 | | fr_event_fd_t *ef, |
628 | | fr_event_funcs_t const *new, fr_event_funcs_t const *prev) |
629 | 0 | { |
630 | 0 | struct kevent *out = out_kev, *end = out + outlen; |
631 | 0 | fr_event_func_map_entry_t const *map; |
632 | 0 | struct kevent add[10], *add_p = add; |
633 | 0 | size_t i; |
634 | |
|
635 | 0 | EVENT_DEBUG("%p - Building new evset for FD %i (new %p, prev %p)", el, ef->fd, new, prev); |
636 | | |
637 | | /* |
638 | | * Iterate over the function map, setting/unsetting |
639 | | * filters and filter flags. |
640 | | */ |
641 | 0 | for (map = ef->map->func_to_ev; map->name; map++) { |
642 | 0 | bool has_current_func = false; |
643 | 0 | bool has_prev_func = false; |
644 | 0 | uint32_t current_fflags = 0; |
645 | 0 | uint32_t prev_fflags = 0; |
646 | |
|
647 | 0 | do { |
648 | 0 | fr_event_fd_cb_t prev_func; |
649 | 0 | fr_event_fd_cb_t new_func; |
650 | | |
651 | | /* |
652 | | * If the previous value was the 'noop' |
653 | | * callback, it's identical to being unset. |
654 | | */ |
655 | 0 | prev_func = *(fr_event_fd_cb_t const *)((uint8_t const *)prev + map->offset); |
656 | 0 | if (prev_func && (prev_func != fr_event_fd_noop)) { |
657 | 0 | EVENT_DEBUG("\t%s prev set (%p)", map->name, prev_func); |
658 | 0 | prev_fflags |= map->fflags; |
659 | 0 | has_prev_func = true; |
660 | 0 | } else { |
661 | 0 | EVENT_DEBUG("\t%s prev unset", map->name); |
662 | 0 | } |
663 | |
|
664 | 0 | new_func = *(fr_event_fd_cb_t const *)((uint8_t const *)new + map->offset); |
665 | 0 | if (new_func && (new_func != fr_event_fd_noop)) { |
666 | 0 | EVENT_DEBUG("\t%s curr set (%p)", map->name, new_func); |
667 | 0 | current_fflags |= map->fflags; |
668 | 0 | has_current_func = true; |
669 | | |
670 | | /* |
671 | | * Check the filter will work for the |
672 | | * type of file descriptor specified. |
673 | | */ |
674 | 0 | if (!(map->type & ef->type)) { |
675 | 0 | fr_strerror_printf("kevent %s (%s), can't be applied to fd of type %s", |
676 | 0 | map->name, |
677 | 0 | fr_table_str_by_value(kevent_filter_table, map->filter, "<INVALID>"), |
678 | 0 | fr_table_str_by_value(fr_event_fd_type_table, |
679 | 0 | map->type, "<INVALID>")); |
680 | 0 | return -1; |
681 | 0 | } |
682 | | |
683 | | /* |
684 | | * Mark this filter function as active |
685 | | */ |
686 | 0 | memcpy((uint8_t *)active + map->offset, (uint8_t const *)new + map->offset, |
687 | 0 | sizeof(fr_event_fd_cb_t)); |
688 | 0 | } else { |
689 | 0 | EVENT_DEBUG("\t%s curr unset", map->name); |
690 | | |
691 | | /* |
692 | | * Mark this filter function as inactive |
693 | | * by setting it to the 'noop' callback. |
694 | | */ |
695 | 0 | *((fr_event_fd_cb_t *)((uint8_t *)active + map->offset)) = fr_event_fd_noop; |
696 | 0 | } |
697 | | |
698 | 0 | if (!(map + 1)->coalesce) break; |
699 | 0 | map++; |
700 | 0 | } while (1); |
701 | | |
702 | 0 | if (out >= end) { |
703 | 0 | fr_strerror_const("Out of memory to store kevent filters"); |
704 | 0 | return -1; |
705 | 0 | } |
706 | | |
707 | | /* |
708 | | * Upsert if we add a function or change the flags. |
709 | | */ |
710 | 0 | if (has_current_func && |
711 | 0 | (!has_prev_func || (current_fflags != prev_fflags))) { |
712 | 0 | if ((size_t)(add_p - add) >= (NUM_ELEMENTS(add))) { |
713 | 0 | fr_strerror_const("Out of memory to store kevent EV_ADD filters"); |
714 | 0 | return -1; |
715 | 0 | } |
716 | 0 | EVENT_DEBUG("\tEV_SET EV_ADD filter %s (%i), flags %i, fflags %i", |
717 | 0 | fr_table_str_by_value(kevent_filter_table, map->filter, "<INVALID>"), |
718 | 0 | map->filter, map->flags, current_fflags); |
719 | 0 | EV_SET(add_p++, ef->fd, map->filter, map->flags, current_fflags, 0, ef); |
720 | | |
721 | | /* |
722 | | * Delete if we remove a function. |
723 | | */ |
724 | 0 | } else if (!has_current_func && has_prev_func) { |
725 | 0 | EVENT_DEBUG("\tEV_SET EV_DELETE filter %s (%i), flags %i, fflags %i", |
726 | 0 | fr_table_str_by_value(kevent_filter_table, map->filter, "<INVALID>"), |
727 | 0 | map->filter, EV_DELETE, 0); |
728 | 0 | EV_SET(out++, ef->fd, map->filter, EV_DELETE, 0, 0, ef); |
729 | 0 | } |
730 | 0 | } |
731 | | |
732 | | /* |
733 | | * kevent is fine with adds/deletes in the same operation |
734 | | * on the same file descriptor, but libkqueue doesn't do |
735 | | * any kind of coalescing or ordering so you get an EEXIST |
736 | | * error. |
737 | | */ |
738 | 0 | for (i = 0; i < (size_t)(add_p - add); i++) memcpy(out++, &add[i], sizeof(*out)); |
739 | |
|
740 | 0 | return out - out_kev; |
741 | 0 | } |
742 | | |
743 | | /** Discover the type of a file descriptor |
744 | | * |
745 | | * This function writes the result of the discovery to the ef->type, |
746 | | * and ef->sock_type fields. |
747 | | * |
748 | | * @param[out] ef to write type data to. |
749 | | * @param[in] fd to discover the type of. |
750 | | * @return |
751 | | * - 0 on success. |
752 | | * - -1 on failure. |
753 | | */ |
754 | | static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd) |
755 | 0 | { |
756 | 0 | socklen_t opt_len = sizeof(ef->sock_type); |
757 | | |
758 | | /* |
759 | | * It's a socket or PCAP socket |
760 | | */ |
761 | 0 | if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &ef->sock_type, &opt_len) == 0) { |
762 | 0 | #ifdef SO_GET_FILTER |
763 | 0 | opt_len = 0; |
764 | 0 | if (unlikely(getsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, NULL, &opt_len) < 0)) { |
765 | 0 | fr_strerror_printf("Failed determining PF status: %s", fr_syserror(errno)); |
766 | 0 | return -1; |
767 | 0 | } |
768 | 0 | if (opt_len) { |
769 | 0 | ef->type = FR_EVENT_FD_PCAP; |
770 | 0 | } else |
771 | 0 | #endif |
772 | 0 | { |
773 | 0 | ef->type = FR_EVENT_FD_SOCKET; |
774 | 0 | } |
775 | | |
776 | | /* |
777 | | * It's a file or directory |
778 | | */ |
779 | 0 | } else { |
780 | 0 | struct stat buf; |
781 | |
|
782 | 0 | if (errno != ENOTSOCK) { |
783 | 0 | fr_strerror_printf("Failed retrieving socket type: %s", fr_syserror(errno)); |
784 | 0 | return -1; |
785 | 0 | } |
786 | | |
787 | 0 | if (fstat(fd, &buf) < 0) { |
788 | 0 | fr_strerror_printf("Failed calling stat() on file: %s", fr_syserror(errno)); |
789 | 0 | return -1; |
790 | 0 | } |
791 | | |
792 | 0 | if (S_ISDIR(buf.st_mode)) { |
793 | 0 | ef->type = FR_EVENT_FD_DIRECTORY; |
794 | 0 | } else { |
795 | 0 | ef->type = FR_EVENT_FD_FILE; |
796 | 0 | } |
797 | 0 | } |
798 | 0 | ef->fd = fd; |
799 | |
|
800 | 0 | return 0; |
801 | 0 | } |
802 | | |
803 | | /** Remove a file descriptor from the event loop and rbtree but don't explicitly free it |
804 | | * |
805 | | * |
806 | | * @param[in] ef to remove. |
807 | | * @return |
808 | | * - 0 on success. |
809 | | * - -1 on error; |
810 | | */ |
811 | | static int _event_fd_delete(fr_event_fd_t *ef) |
812 | 0 | { |
813 | 0 | struct kevent evset[10]; |
814 | 0 | int count = 0; |
815 | 0 | fr_event_list_t *el = ef->el; |
816 | 0 | fr_event_funcs_t funcs; |
817 | | |
818 | | /* |
819 | | * Already been removed from the various trees and |
820 | | * the event loop. |
821 | | */ |
822 | 0 | if (ef->is_registered) { |
823 | 0 | memset(&funcs, 0, sizeof(funcs)); |
824 | |
|
825 | 0 | fr_assert(ef->armour == 0); |
826 | | |
827 | | /* |
828 | | * If this fails, it's a pretty catastrophic error. |
829 | | */ |
830 | 0 | count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), |
831 | 0 | &ef->active, ef, &funcs, &ef->active); |
832 | 0 | if (count > 0) { |
833 | 0 | int ret; |
834 | | |
835 | | /* |
836 | | * If this fails, assert on debug builds. |
837 | | */ |
838 | 0 | ret = kevent(el->kq, evset, count, NULL, 0, NULL); |
839 | 0 | if (!fr_cond_assert_msg(ret >= 0, |
840 | 0 | "FD %i was closed without being removed from the KQ: %s", |
841 | 0 | ef->fd, fr_syserror(errno))) { |
842 | 0 | return -1; /* Prevent the free, and leave the fd in the trees */ |
843 | 0 | } |
844 | 0 | } |
845 | | |
846 | 0 | fr_rb_delete(el->fds, ef); |
847 | 0 | ef->is_registered = false; |
848 | 0 | } |
849 | | |
850 | | /* |
851 | | * Insert into the deferred free list, event will be |
852 | | * freed later. |
853 | | */ |
854 | 0 | if (el->in_handler) { |
855 | | /* |
856 | | * Don't allow the same event to be |
857 | | * inserted into the free list multiple |
858 | | * times. |
859 | | * |
860 | | * This can happen if the same ef is |
861 | | * delivered by multiple filters, i.e. |
862 | | * if EVFILT_READ and EVFILT_WRITE |
863 | | * were both high, and both handlers |
864 | | * attempted to delete the event |
865 | | * we'd need to prevent the event being |
866 | | * inserted into the free list multiple |
867 | | * times. |
868 | | */ |
869 | 0 | if (!fr_dlist_entry_in_list(&ef->entry)) fr_dlist_insert_tail(&el->fd_to_free, ef); |
870 | 0 | return -1; /* Will be freed later */ |
871 | 0 | } else if (fr_dlist_entry_in_list(&ef->entry)) { |
872 | 0 | fr_dlist_remove(&el->fd_to_free, ef); |
873 | 0 | } |
874 | | |
875 | 0 | return 0; |
876 | 0 | } |
877 | | |
878 | | /** Move a file descriptor event from one event list to another |
879 | | * |
880 | | * FIXME - Move suspended events too. |
881 | | * |
882 | | * @note Any pending events will not be transferred. |
883 | | * |
884 | | * @param[in] dst Event list to move file descriptor event to. |
885 | | * @param[in] src Event list to move file descriptor from. |
886 | | * @param[in] fd of the event to move. |
887 | | * @param[in] filter of the event to move. |
888 | | * @return |
889 | | * - 0 on success. |
890 | | * - -1 on failure. The event will remain active in the src list. |
891 | | */ |
892 | | int _fr_event_fd_move(NDEBUG_LOCATION_ARGS |
893 | | fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter) |
894 | 0 | { |
895 | 0 | fr_event_fd_t *ef; |
896 | 0 | int ret; |
897 | |
|
898 | 0 | if (fr_event_loop_exiting(dst)) { |
899 | 0 | fr_strerror_const("Destination event loop exiting"); |
900 | 0 | return -1; |
901 | 0 | } |
902 | | |
903 | | /* |
904 | | * Ensure this exists |
905 | | */ |
906 | 0 | ef = fr_rb_find(src->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
907 | 0 | if (unlikely(!ef)) { |
908 | 0 | fr_strerror_printf("No events are registered for fd %i", fd); |
909 | 0 | return -1; |
910 | 0 | } |
911 | | |
912 | 0 | ret = _fr_event_filter_insert(NDEBUG_LOCATION_VALS |
913 | 0 | ef->linked_ctx, NULL, |
914 | 0 | dst, ef->fd, ef->filter, &ef->active, ef->error, ef->uctx); |
915 | 0 | if (ret < 0) return -1; |
916 | | |
917 | 0 | (void)fr_event_fd_delete(src, ef->fd, ef->filter); |
918 | |
|
919 | 0 | return ret; |
920 | 0 | } |
921 | | |
922 | | |
923 | | /** Suspend/resume a subset of filters |
924 | | * |
925 | | * This function trades producing useful errors for speed. |
926 | | * |
927 | | * An example of suspending the read filter for an FD would be: |
928 | | @code {.c} |
929 | | static fr_event_update_t pause_read[] = { |
930 | | FR_EVENT_SUSPEND(fr_event_io_func_t, read), |
931 | | { 0 } |
932 | | } |
933 | | |
934 | | fr_event_filter_update(el, fd, FR_EVENT_FILTER_IO, pause_read); |
935 | | @endcode |
936 | | * |
937 | | * @param[in] el to update descriptor in. |
938 | | * @param[in] fd to update filters for. |
939 | | * @param[in] filter The type of filter to update. |
940 | | * @param[in] updates An array of updates to toggle filters on/off without removing |
941 | | * the callback function. |
942 | | */ |
943 | | int _fr_event_filter_update(NDEBUG_LOCATION_ARGS |
944 | | fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[]) |
945 | 0 | { |
946 | 0 | fr_event_fd_t *ef; |
947 | 0 | size_t i; |
948 | 0 | fr_event_funcs_t curr_active, curr_stored; |
949 | 0 | struct kevent evset[10]; |
950 | 0 | int count = 0; |
951 | |
|
952 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
953 | 0 | if (unlikely(!ef)) { |
954 | 0 | fr_strerror_printf("No events are registered for fd %i", fd); |
955 | 0 | return -1; |
956 | 0 | } |
957 | | |
958 | 0 | #ifndef NDEBUG |
959 | 0 | ef->file = file; |
960 | 0 | ef->line = line; |
961 | 0 | #endif |
962 | | |
963 | | /* |
964 | | * Cheapest way of ensuring this function can error without |
965 | | * leaving everything in an inconsistent state. |
966 | | */ |
967 | 0 | memcpy(&curr_active, &ef->active, sizeof(curr_active)); |
968 | 0 | memcpy(&curr_stored, &ef->stored, sizeof(curr_stored)); |
969 | | |
970 | | /* |
971 | | * Apply modifications to our copies of the active/stored array. |
972 | | */ |
973 | 0 | for (i = 0; updates[i].op; i++) { |
974 | 0 | switch (updates[i].op) { |
975 | 0 | default: |
976 | 0 | case FR_EVENT_OP_SUSPEND: |
977 | 0 | fr_assert(ef->armour == 0); /* can't suspect protected FDs */ |
978 | 0 | memcpy((uint8_t *)&ef->stored + updates[i].offset, |
979 | 0 | (uint8_t *)&ef->active + updates[i].offset, sizeof(fr_event_fd_cb_t)); |
980 | 0 | memset((uint8_t *)&ef->active + updates[i].offset, 0, sizeof(fr_event_fd_cb_t)); |
981 | 0 | break; |
982 | | |
983 | 0 | case FR_EVENT_OP_RESUME: |
984 | 0 | memcpy((uint8_t *)&ef->active + updates[i].offset, |
985 | 0 | (uint8_t *)&ef->stored + updates[i].offset, sizeof(fr_event_fd_cb_t)); |
986 | 0 | memset((uint8_t *)&ef->stored + updates[i].offset, 0, sizeof(fr_event_fd_cb_t)); |
987 | 0 | break; |
988 | 0 | } |
989 | 0 | } |
990 | | |
991 | 0 | count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), &ef->active, |
992 | 0 | ef, &ef->active, &curr_active); |
993 | 0 | if (unlikely(count < 0)) { |
994 | 0 | error: |
995 | 0 | memcpy(&ef->active, &curr_active, sizeof(curr_active)); |
996 | 0 | memcpy(&ef->stored, &curr_stored, sizeof(curr_stored)); |
997 | 0 | return -1; |
998 | 0 | } |
999 | | |
1000 | 0 | if (count && unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0)) { |
1001 | 0 | fr_strerror_printf("Failed updating filters for FD %i: %s", ef->fd, fr_syserror(errno)); |
1002 | 0 | goto error; |
1003 | 0 | } |
1004 | | |
1005 | 0 | return 0; |
1006 | 0 | } |
1007 | | |
1008 | | /** Insert a filter for the specified fd |
1009 | | * |
1010 | | * @param[in] ctx to bind lifetime of the event to. |
1011 | | * @param[out] ef_out Previously allocated ef, or NULL. |
1012 | | * @param[in] el to insert fd callback into. |
1013 | | * @param[in] fd to install filters for. |
1014 | | * @param[in] filter one of the #fr_event_filter_t values. |
1015 | | * @param[in] funcs Structure containing callback functions. If a function pointer |
1016 | | * is set, the equivalent kevent filter will be installed. |
1017 | | * @param[in] error function to call when an error occurs on the fd. |
1018 | | * @param[in] uctx to pass to handler. |
1019 | | */ |
1020 | | int _fr_event_filter_insert(NDEBUG_LOCATION_ARGS |
1021 | | TALLOC_CTX *ctx, fr_event_fd_t **ef_out, |
1022 | | fr_event_list_t *el, int fd, |
1023 | | fr_event_filter_t filter, |
1024 | | void *funcs, fr_event_error_cb_t error, |
1025 | | void *uctx) |
1026 | 0 | { |
1027 | 0 | ssize_t count; |
1028 | 0 | fr_event_fd_t *ef; |
1029 | 0 | fr_event_funcs_t active; |
1030 | 0 | struct kevent evset[10]; |
1031 | |
|
1032 | 0 | if (unlikely(!el)) { |
1033 | 0 | fr_strerror_const("Invalid argument: NULL event list"); |
1034 | 0 | return -1; |
1035 | 0 | } |
1036 | | |
1037 | 0 | if (unlikely(fd < 0)) { |
1038 | 0 | fr_strerror_printf("Invalid arguments: Bad FD %i", fd); |
1039 | 0 | return -1; |
1040 | 0 | } |
1041 | | |
1042 | 0 | if (unlikely(el->exit)) { |
1043 | 0 | fr_strerror_const("Event loop exiting"); |
1044 | 0 | return -1; |
1045 | 0 | } |
1046 | | |
1047 | 0 | if (!ef_out || !*ef_out) { |
1048 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
1049 | 0 | } else { |
1050 | 0 | ef = *ef_out; |
1051 | 0 | fr_assert((fd < 0) || (ef->fd == fd)); |
1052 | 0 | } |
1053 | | |
1054 | | /* |
1055 | | * Need to free the event to change the talloc link. |
1056 | | * |
1057 | | * This is generally bad. If you hit this |
1058 | | * code path you probably screwed up somewhere. |
1059 | | */ |
1060 | 0 | if (unlikely(ef && (ef->linked_ctx != ctx))) TALLOC_FREE(ef); |
1061 | | |
1062 | | /* |
1063 | | * No pre-existing event. Allocate an entry |
1064 | | * for insertion into the rbtree. |
1065 | | */ |
1066 | 0 | if (!ef) { |
1067 | 0 | ef = talloc_zero(el, fr_event_fd_t); |
1068 | 0 | if (unlikely(!ef)) { |
1069 | 0 | fr_strerror_const("Out of memory"); |
1070 | 0 | return -1; |
1071 | 0 | } |
1072 | 0 | talloc_set_destructor(ef, _event_fd_delete); |
1073 | | |
1074 | | /* |
1075 | | * Bind the lifetime of the event to the specified |
1076 | | * talloc ctx. If the talloc ctx is freed, the |
1077 | | * event will also be freed. |
1078 | | */ |
1079 | 0 | if (ctx != el) talloc_link_ctx(ctx, ef); |
1080 | 0 | ef->linked_ctx = ctx; |
1081 | 0 | ef->el = el; |
1082 | | |
1083 | | /* |
1084 | | * Determine what type of file descriptor |
1085 | | * this is. |
1086 | | */ |
1087 | 0 | if (fr_event_fd_type_set(ef, fd) < 0) { |
1088 | 0 | free: |
1089 | 0 | talloc_free(ef); |
1090 | 0 | return -1; |
1091 | 0 | } |
1092 | | |
1093 | | /* |
1094 | | * Check the filter value is valid |
1095 | | */ |
1096 | 0 | if ((filter > (NUM_ELEMENTS(filter_maps) - 1))) { |
1097 | 0 | not_supported: |
1098 | 0 | fr_strerror_printf("Filter %u not supported", filter); |
1099 | 0 | goto free; |
1100 | 0 | } |
1101 | 0 | ef->map = &filter_maps[filter]; |
1102 | 0 | if (ef->map->idx_type == FR_EVENT_FUNC_IDX_NONE) goto not_supported; |
1103 | | |
1104 | 0 | count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), |
1105 | 0 | &ef->active, ef, funcs, &ef->active); |
1106 | 0 | if (count < 0) goto free; |
1107 | 0 | if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) { |
1108 | 0 | fr_strerror_printf("Failed inserting filters for FD %i: %s", fd, fr_syserror(errno)); |
1109 | 0 | goto free; |
1110 | 0 | } |
1111 | | |
1112 | 0 | ef->filter = filter; |
1113 | 0 | fr_rb_insert(el->fds, ef); |
1114 | 0 | ef->is_registered = true; |
1115 | | |
1116 | | /* |
1117 | | * Pre-existing event, update the filters and |
1118 | | * functions associated with the file descriptor. |
1119 | | */ |
1120 | 0 | } else { |
1121 | 0 | fr_assert(ef->is_registered == true); |
1122 | | |
1123 | | /* |
1124 | | * Take a copy of the current set of active |
1125 | | * functions, so we can error out in a |
1126 | | * consistent state. |
1127 | | */ |
1128 | 0 | memcpy(&active, &ef->active, sizeof(ef->active)); |
1129 | |
|
1130 | 0 | fr_assert((ef->armour == 0) || ef->active.io.read); |
1131 | |
|
1132 | 0 | count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), |
1133 | 0 | &ef->active, ef, funcs, &ef->active); |
1134 | 0 | if (count < 0) { |
1135 | 0 | error: |
1136 | 0 | memcpy(&ef->active, &active, sizeof(ef->active)); |
1137 | 0 | return -1; |
1138 | 0 | } |
1139 | 0 | if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) { |
1140 | 0 | fr_strerror_printf("Failed modifying filters for FD %i: %s", fd, fr_syserror(errno)); |
1141 | 0 | goto error; |
1142 | 0 | } |
1143 | | |
1144 | | /* |
1145 | | * Clear any previously suspended functions |
1146 | | */ |
1147 | 0 | memset(&ef->stored, 0, sizeof(ef->stored)); |
1148 | 0 | } |
1149 | | |
1150 | 0 | #ifndef NDEBUG |
1151 | 0 | ef->file = file; |
1152 | 0 | ef->line = line; |
1153 | 0 | #endif |
1154 | 0 | ef->error = error; |
1155 | 0 | ef->uctx = uctx; |
1156 | |
|
1157 | 0 | if (ef_out) *ef_out = ef; |
1158 | |
|
1159 | 0 | return 0; |
1160 | 0 | } |
1161 | | |
1162 | | /** Associate I/O callbacks with a file descriptor |
1163 | | * |
1164 | | * @param[in] ctx to bind lifetime of the event to. |
1165 | | * @param[out] ef_out Where to store the output event |
1166 | | * @param[in] el to insert fd callback into. |
1167 | | * @param[in] fd to install filters for. |
1168 | | * @param[in] read_fn function to call when fd is readable. |
1169 | | * @param[in] write_fn function to call when fd is writable. |
1170 | | * @param[in] error function to call when an error occurs on the fd. |
1171 | | * @param[in] uctx to pass to handler. |
1172 | | * @return |
1173 | | * - 0 on success. |
1174 | | * - -1 on failure. |
1175 | | */ |
1176 | | int _fr_event_fd_insert(NDEBUG_LOCATION_ARGS |
1177 | | TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, |
1178 | | fr_event_fd_cb_t read_fn, |
1179 | | fr_event_fd_cb_t write_fn, |
1180 | | fr_event_error_cb_t error, |
1181 | | void *uctx) |
1182 | 0 | { |
1183 | 0 | fr_event_io_func_t funcs = { .read = read_fn, .write = write_fn }; |
1184 | |
|
1185 | 0 | if (unlikely(!read_fn && !write_fn)) { |
1186 | 0 | fr_strerror_const("Invalid arguments: All callbacks are NULL"); |
1187 | 0 | return -1; |
1188 | 0 | } |
1189 | | |
1190 | 0 | return _fr_event_filter_insert(NDEBUG_LOCATION_VALS |
1191 | 0 | ctx, ef_out, el, fd, FR_EVENT_FILTER_IO, &funcs, error, uctx); |
1192 | 0 | } |
1193 | | |
1194 | | /** Remove a file descriptor from the event loop |
1195 | | * |
1196 | | * @param[in] el to remove file descriptor from. |
1197 | | * @param[in] fd to remove. |
1198 | | * @param[in] filter The type of filter to remove. |
1199 | | * @return |
1200 | | * - 0 if file descriptor was removed. |
1201 | | * - <0 on error. |
1202 | | */ |
1203 | | int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter) |
1204 | 0 | { |
1205 | 0 | fr_event_fd_t *ef; |
1206 | |
|
1207 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
1208 | 0 | if (unlikely(!ef)) { |
1209 | 0 | fr_strerror_printf("No events are registered for fd %d, filter %u", fd, filter); |
1210 | 0 | return -1; |
1211 | 0 | } |
1212 | | |
1213 | | /* |
1214 | | * Free will normally fail if it's |
1215 | | * a deferred free. There is a special |
1216 | | * case for kevent failures though. |
1217 | | * |
1218 | | * We distinguish between the two by |
1219 | | * looking to see if the ef is still |
1220 | | * in the even tree. |
1221 | | * |
1222 | | * Talloc returning -1 guarantees the |
1223 | | * memory has not been freed. |
1224 | | */ |
1225 | 0 | if ((talloc_free(ef) == -1) && ef->is_registered) return -1; |
1226 | | |
1227 | 0 | return 0; |
1228 | 0 | } |
1229 | | |
1230 | | /** Get the opaque event handle from a file descriptor |
1231 | | * |
1232 | | * @param[in] el to search for fd/filter in. |
1233 | | * @param[in] fd to search for. |
1234 | | * @param[in] filter to search for. |
1235 | | * @return |
1236 | | * - NULL if no event could be found. |
1237 | | * - The opaque handle representing an fd event. |
1238 | | */ |
1239 | | fr_event_fd_t *fr_event_fd_handle(fr_event_list_t *el, int fd, fr_event_filter_t filter) |
1240 | 0 | { |
1241 | 0 | fr_event_fd_t *ef; |
1242 | |
|
1243 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
1244 | 0 | if (unlikely(!ef)) { |
1245 | 0 | fr_strerror_printf("No events are registered for fd %i", fd); |
1246 | 0 | return NULL; |
1247 | 0 | } |
1248 | | |
1249 | 0 | return ef; |
1250 | 0 | } |
1251 | | |
1252 | | /** Returns the appropriate callback function for a given event |
1253 | | * |
1254 | | * @param[in] ef the event filter fd handle. |
1255 | | * @param[in] kq_filter If the callbacks are indexed by filter. |
1256 | | * @param[in] kq_fflags If the callbacks are indexed by NOTES (fflags). |
1257 | | * @return |
1258 | | * - NULL if no event it associated with the given ef/kq_filter or kq_fflags combo. |
1259 | | * - The callback that would be called if an event with this filter/fflag combo was received. |
1260 | | */ |
1261 | | fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags) |
1262 | 0 | { |
1263 | 0 | return event_fd_func(ef, &kq_filter, &kq_fflags); |
1264 | 0 | } |
1265 | | |
1266 | | /** Returns the uctx associated with an fr_event_fd_t handle |
1267 | | * |
1268 | | */ |
1269 | | void *fr_event_fd_uctx(fr_event_fd_t *ef) |
1270 | 0 | { |
1271 | 0 | return ef->uctx; |
1272 | 0 | } |
1273 | | |
1274 | | #ifndef NDEBUG |
1275 | | /** Armour an FD |
1276 | | * |
1277 | | * @param[in] el to remove file descriptor from. |
1278 | | * @param[in] fd to remove. |
1279 | | * @param[in] filter The type of filter to remove. |
1280 | | * @param[in] armour The armour to add. |
1281 | | * @return |
1282 | | * - 0 if file descriptor was armoured |
1283 | | * - <0 on error. |
1284 | | */ |
1285 | | int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour) |
1286 | 0 | { |
1287 | 0 | fr_event_fd_t *ef; |
1288 | |
|
1289 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
1290 | 0 | if (unlikely(!ef)) { |
1291 | 0 | fr_strerror_printf("No events are registered for fd %i", fd); |
1292 | 0 | return -1; |
1293 | 0 | } |
1294 | | |
1295 | 0 | if (ef->armour != 0) { |
1296 | 0 | fr_strerror_printf("FD %i is already armoured", fd); |
1297 | 0 | return -1; |
1298 | 0 | } |
1299 | | |
1300 | 0 | ef->armour = armour; |
1301 | |
|
1302 | 0 | return 0; |
1303 | 0 | } |
1304 | | |
1305 | | /** Unarmour an FD |
1306 | | * |
1307 | | * @param[in] el to remove file descriptor from. |
1308 | | * @param[in] fd to remove. |
1309 | | * @param[in] filter The type of filter to remove. |
1310 | | * @param[in] armour The armour to remove |
1311 | | * @return |
1312 | | * - 0 if file descriptor was unarmoured |
1313 | | * - <0 on error. |
1314 | | */ |
1315 | | int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour) |
1316 | 0 | { |
1317 | 0 | fr_event_fd_t *ef; |
1318 | |
|
1319 | 0 | ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter }); |
1320 | 0 | if (unlikely(!ef)) { |
1321 | 0 | fr_strerror_printf("No events are registered for fd %i", fd); |
1322 | 0 | return -1; |
1323 | 0 | } |
1324 | | |
1325 | 0 | fr_assert(ef->armour == armour); |
1326 | |
|
1327 | 0 | ef->armour = 0; |
1328 | 0 | return 0; |
1329 | 0 | } |
1330 | | #endif |
1331 | | |
1332 | | /** Remove PID wait event from kevent if the fr_event_pid_t is freed |
1333 | | * |
1334 | | * @param[in] ev to free. |
1335 | | * @return 0 |
1336 | | */ |
1337 | | static int _event_pid_free(fr_event_pid_t *ev) |
1338 | 0 | { |
1339 | 0 | struct kevent evset; |
1340 | |
|
1341 | 0 | if (ev->parent) *ev->parent = NULL; |
1342 | 0 | if (!ev->is_registered || (ev->pid < 0)) return 0; /* already deleted from kevent */ |
1343 | | |
1344 | 0 | EVENT_DEBUG("%p - Disabling event for PID %u - %p was freed", ev->el, (unsigned int)ev->pid, ev); |
1345 | |
|
1346 | 0 | EV_SET(&evset, ev->pid, EVFILT_PROC, EV_DELETE, NOTE_EXIT, 0, ev); |
1347 | |
|
1348 | 0 | (void) kevent(ev->el->kq, &evset, 1, NULL, 0, NULL); |
1349 | |
|
1350 | 0 | return 0; |
1351 | 0 | } |
1352 | | |
1353 | | /** Evaluate a EVFILT_PROC event |
1354 | | * |
1355 | | */ |
1356 | | CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */ |
1357 | | static inline CC_HINT(always_inline) |
1358 | | void event_pid_eval(fr_event_list_t *el, struct kevent *kev) |
1359 | 0 | { |
1360 | 0 | pid_t pid; |
1361 | 0 | fr_event_pid_t *ev; |
1362 | 0 | fr_event_pid_cb_t callback; |
1363 | 0 | void *uctx; |
1364 | |
|
1365 | 0 | EVENT_DEBUG("%p - PID %u exited with status %i", |
1366 | 0 | el, (unsigned int)kev->ident, (unsigned int)kev->data); |
1367 | |
|
1368 | 0 | ev = talloc_get_type_abort((void *)kev->udata, fr_event_pid_t); |
1369 | |
|
1370 | 0 | fr_assert(ev->pid == (pid_t) kev->ident); |
1371 | 0 | fr_assert((kev->fflags & NOTE_EXIT) != 0); |
1372 | |
|
1373 | 0 | pid = ev->pid; |
1374 | 0 | callback = ev->callback; |
1375 | 0 | uctx = ev->uctx; |
1376 | |
|
1377 | 0 | ev->is_registered = false; /* so we won't hit kevent again when it's freed */ |
1378 | | |
1379 | | /* |
1380 | | * Delete the event before calling it. |
1381 | | * |
1382 | | * This also sets the parent pointer |
1383 | | * to NULL, so the thing that started |
1384 | | * monitoring the process knows the |
1385 | | * handle is no longer valid. |
1386 | | * |
1387 | | * EVFILT_PROC NOTE_EXIT events are always |
1388 | | * oneshot no matter what flags we pass, |
1389 | | * so we're just reflecting the state of |
1390 | | * the kqueue. |
1391 | | */ |
1392 | 0 | talloc_free(ev); |
1393 | |
|
1394 | 0 | if (callback) callback(el, pid, (int) kev->data, uctx); |
1395 | 0 | } |
1396 | | |
1397 | | /** Called on the next loop through the event loop when inserting an EVFILT_PROC event fails |
1398 | | * |
1399 | | * This is just a trampoleen function which takes the user event and simulates |
1400 | | * an EVFILT_PROC event from it. |
1401 | | * |
1402 | | * @param[in] el That received the event. |
1403 | | * @param[in] uctx An fr_event_pid_t to process. |
1404 | | */ |
1405 | | static void _fr_event_pid_early_exit(fr_event_list_t *el, void *uctx) |
1406 | 0 | { |
1407 | 0 | fr_event_pid_t *ev = talloc_get_type_abort(uctx, fr_event_pid_t); |
1408 | |
|
1409 | 0 | EVENT_DEBUG("%p - PID %ld exited early, triggered through user event", el, (long)ev->pid); |
1410 | | |
1411 | | /* |
1412 | | * Simulate a real struct kevent with the values we |
1413 | | * recorded in fr_event_pid_wait. |
1414 | | */ |
1415 | 0 | event_pid_eval(el, &(struct kevent){ .ident = ev->pid, .data = ev->early_exit.status, .fflags = NOTE_EXIT, .udata = ev }); |
1416 | 0 | } |
1417 | | |
1418 | | /** Insert a PID event into an event list |
1419 | | * |
1420 | | * @note The talloc parent of the memory returned in ev_p must not be changed. |
1421 | | * If the lifetime of the event needs to be bound to another context |
1422 | | * this function should be called with the existing event pointed to by |
1423 | | * ev_p. |
1424 | | * |
1425 | | * @param[in] ctx to bind lifetime of the event to. |
1426 | | * @param[in] el to insert event into. |
1427 | | * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent |
1428 | | * in a temporal sense, not in a memory structure or dependency sense. |
1429 | | * @param[in] pid child PID to wait for |
1430 | | * @param[in] callback function to execute if the event fires. |
1431 | | * @param[in] uctx user data to pass to the event. |
1432 | | * @return |
1433 | | * - 0 on success. |
1434 | | * - -1 on failure. |
1435 | | */ |
1436 | | int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS |
1437 | | TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, |
1438 | | pid_t pid, fr_event_pid_cb_t callback, void *uctx) |
1439 | 0 | { |
1440 | 0 | fr_event_pid_t *ev; |
1441 | 0 | struct kevent evset; |
1442 | |
|
1443 | 0 | ev = talloc(ctx, fr_event_pid_t); |
1444 | 0 | if (unlikely(ev == NULL)) { |
1445 | 0 | fr_strerror_const("Out of memory"); |
1446 | 0 | return -1; |
1447 | 0 | } |
1448 | 0 | *ev = (fr_event_pid_t) { |
1449 | 0 | .el = el, |
1450 | 0 | .pid = pid, |
1451 | 0 | .callback = callback, |
1452 | 0 | .uctx = uctx, |
1453 | 0 | .parent = ev_p, |
1454 | 0 | #ifndef NDEBUG |
1455 | 0 | .file = file, |
1456 | 0 | .line = line, |
1457 | 0 | #endif |
1458 | 0 | }; |
1459 | 0 | talloc_set_destructor(ev, _event_pid_free); |
1460 | | |
1461 | | /* |
1462 | | * macOS only, on FreeBSD NOTE_EXIT always provides |
1463 | | * the status anyway. |
1464 | | */ |
1465 | 0 | #ifndef NOTE_EXITSTATUS |
1466 | 0 | #define NOTE_EXITSTATUS (0) |
1467 | 0 | #endif |
1468 | |
|
1469 | 0 | EVENT_DEBUG("%p - Adding exit waiter for PID %u", el, (unsigned int)pid); |
1470 | |
|
1471 | 0 | EV_SET(&evset, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT | NOTE_EXITSTATUS, 0, ev); |
1472 | 0 | ev->is_registered = true; |
1473 | | |
1474 | | /* |
1475 | | * This deals with the race where the process exited |
1476 | | * before we could add it to the kqueue. |
1477 | | * |
1478 | | * Unless our caller is broken, the process should |
1479 | | * still be available for reaping, so we check |
1480 | | * waitid to see if there is a pending process and |
1481 | | * then call the callback as kqueue would have done. |
1482 | | */ |
1483 | 0 | if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) { |
1484 | 0 | siginfo_t info; |
1485 | 0 | int ret; |
1486 | | |
1487 | | /* |
1488 | | * Ensure we don't accidentally pick up the error |
1489 | | * from kevent. |
1490 | | */ |
1491 | 0 | fr_strerror_clear(); |
1492 | |
|
1493 | 0 | ev->is_registered = false; |
1494 | | |
1495 | | /* |
1496 | | * If the child exited before kevent() was |
1497 | | * called, we need to get its status via |
1498 | | * waitid(). |
1499 | | * |
1500 | | * We don't reap the process here to emulate |
1501 | | * what kqueue does (notify but not reap). |
1502 | | * |
1503 | | * waitid returns >0 on success, 0 if the |
1504 | | * process is still running, and -1 on failure. |
1505 | | * |
1506 | | * If we get a 0, then that's extremely strange |
1507 | | * as adding the kevent failed for a reason |
1508 | | * other than the process already having exited. |
1509 | | * |
1510 | | * On Linux waitid will always return 1 to |
1511 | | * indicate the process exited. |
1512 | | * |
1513 | | * On macOS we seem to get a mix of 1 or 0, |
1514 | | * even if the si_code is one of the values |
1515 | | * we'd consider to indicate that the process |
1516 | | * had completed. |
1517 | | */ |
1518 | 0 | ret = waitid(P_PID, pid, &info, WEXITED | WNOHANG | WNOWAIT); |
1519 | 0 | if (ret > 0) { |
1520 | 0 | static fr_table_num_sorted_t const si_codes[] = { |
1521 | 0 | { L("exited"), CLD_EXITED }, |
1522 | 0 | { L("killed"), CLD_KILLED }, |
1523 | 0 | { L("dumped"), CLD_DUMPED }, |
1524 | 0 | { L("trapped"), CLD_TRAPPED }, |
1525 | 0 | { L("stopped"), CLD_STOPPED }, |
1526 | 0 | { L("continued"), CLD_CONTINUED } |
1527 | 0 | }; |
1528 | 0 | static size_t si_codes_len = NUM_ELEMENTS(si_codes); |
1529 | |
|
1530 | 0 | switch (info.si_code) { |
1531 | 0 | case CLD_EXITED: |
1532 | 0 | case CLD_KILLED: |
1533 | 0 | case CLD_DUMPED: |
1534 | 0 | EVENT_DEBUG("%p - PID %ld early exit - code %s (%d), status %d", |
1535 | 0 | el, (long)pid, fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"), |
1536 | 0 | info.si_code, info.si_status); |
1537 | | |
1538 | | /* |
1539 | | * Record the status for later |
1540 | | */ |
1541 | 0 | ev->early_exit.status = info.si_status; |
1542 | | |
1543 | | /* |
1544 | | * The user event acts as a surrogate for |
1545 | | * an EVFILT_PROC event, and will be evaluated |
1546 | | * during the next loop through the event loop. |
1547 | | * |
1548 | | * It will be automatically deleted when the |
1549 | | * fr_event_pid_t is freed. |
1550 | | * |
1551 | | * Previously we tried to evaluate the proc |
1552 | | * callback here directly, but this lead to |
1553 | | * multiple problems, the biggest being that |
1554 | | * setting requests back to resumable failed |
1555 | | * because they were not yet yielded, |
1556 | | * leading to hangs. |
1557 | | */ |
1558 | 0 | early_exit: |
1559 | 0 | if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) { |
1560 | 0 | fr_strerror_printf_push("Failed adding wait for PID %ld, and failed adding " |
1561 | 0 | "backup user event", (long) pid); |
1562 | 0 | error: |
1563 | 0 | talloc_free(ev); |
1564 | 0 | return -1; |
1565 | 0 | } |
1566 | 0 | break; |
1567 | | |
1568 | 0 | default: |
1569 | 0 | fr_strerror_printf("Unexpected code %s (%d) whilst waiting on PID %ld", |
1570 | 0 | fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"), |
1571 | 0 | info.si_code, (long) pid); |
1572 | |
|
1573 | 0 | goto error; |
1574 | 0 | } |
1575 | | /* |
1576 | | * Failed adding waiter for process, but process has not completed... |
1577 | | * |
1578 | | * This weird, but seems to happen on macOS occasionally. |
1579 | | * |
1580 | | * Add an event to run early exit... |
1581 | | * |
1582 | | * Man pages for waitid say if it returns 0 the info struct can be in |
1583 | | * a nondeterministic state, so there's nothing more to do. |
1584 | | */ |
1585 | 0 | } else if (ret == 0) { |
1586 | 0 | goto early_exit; |
1587 | 0 | } else { |
1588 | | /* |
1589 | | * Print this error here, so that the caller gets |
1590 | | * the error from kevent(), and not waitpid(). |
1591 | | */ |
1592 | 0 | fr_strerror_printf("Failed adding waiter for PID %ld - kevent %s, waitid %s", |
1593 | 0 | (long) pid, fr_syserror(evset.flags), fr_syserror(errno)); |
1594 | |
|
1595 | 0 | goto error; |
1596 | 0 | } |
1597 | 0 | } |
1598 | | |
1599 | | /* |
1600 | | * Sometimes the caller doesn't care about getting the |
1601 | | * PID. But we still want to clean it up. |
1602 | | */ |
1603 | 0 | if (ev_p) *ev_p = ev; |
1604 | |
|
1605 | 0 | return 0; |
1606 | 0 | } |
1607 | | |
1608 | | /** Saves some boilerplate... |
1609 | | * |
1610 | | */ |
1611 | | static inline CC_HINT(always_inline) |
1612 | | void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status) |
1613 | 0 | { |
1614 | 0 | if (reap->callback) reap->callback(reap->el, pid, status, reap->uctx); |
1615 | 0 | } |
1616 | | |
1617 | | /** Does the actual reaping of PIDs |
1618 | | * |
1619 | | */ |
1620 | | static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx) |
1621 | 0 | { |
1622 | 0 | fr_event_pid_reap_t *reap = talloc_get_type_abort(uctx, fr_event_pid_reap_t); |
1623 | |
|
1624 | 0 | waitpid(pid, &status, WNOHANG); /* Don't block the process if there's a logic error somewhere */ |
1625 | |
|
1626 | 0 | EVENT_DEBUG("%s - Reaper reaped PID %u, status %u - %p", __FUNCTION__, pid, status, reap); |
1627 | |
|
1628 | 0 | event_list_reap_run_callback(reap, pid, status); |
1629 | |
|
1630 | 0 | talloc_free(reap); |
1631 | 0 | } |
1632 | | |
1633 | | static int _fr_event_reap_free(fr_event_pid_reap_t *reap) |
1634 | 0 | { |
1635 | | /* |
1636 | | * Clear out the entry in the pid_to_reap |
1637 | | * list if the event was inserted. |
1638 | | */ |
1639 | 0 | if (fr_dlist_entry_in_list(&reap->entry)) { |
1640 | 0 | EVENT_DEBUG("%s - Removing entry from pid_to_reap %i - %p", __FUNCTION__, |
1641 | 0 | reap->pid_ev ? reap->pid_ev->pid : -1, reap); |
1642 | 0 | fr_dlist_remove(&reap->el->pid_to_reap, reap); |
1643 | 0 | } |
1644 | |
|
1645 | 0 | return 0; |
1646 | 0 | } |
1647 | | |
1648 | | /** Asynchronously wait for a PID to exit, then reap it |
1649 | | * |
1650 | | * This is intended to be used when we no longer care about a process |
1651 | | * exiting, but we still want to clean up its state so we don't have |
1652 | | * zombie processes sticking around. |
1653 | | * |
1654 | | * @param[in] el to use to reap the process. |
1655 | | * @param[in] pid to reap. |
1656 | | * @param[in] callback to call when the process is reaped. |
1657 | | * May be NULL. |
1658 | | * @param[in] uctx to pass to callback. |
1659 | | * @return |
1660 | | * - -1 if we couldn't find the process or it has already exited/been reaped. |
1661 | | * - 0 on success (we setup a process handler). |
1662 | | */ |
1663 | | int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS fr_event_list_t *el, pid_t pid, fr_event_pid_cb_t callback, void *uctx) |
1664 | 0 | { |
1665 | 0 | int ret; |
1666 | 0 | fr_event_pid_reap_t *reap; |
1667 | |
|
1668 | 0 | reap = talloc_zero(NULL, fr_event_pid_reap_t); |
1669 | 0 | if (unlikely(!reap)) { |
1670 | 0 | fr_strerror_const("Out of memory"); |
1671 | 0 | return -1; |
1672 | 0 | } |
1673 | 0 | talloc_set_destructor(reap, _fr_event_reap_free); |
1674 | |
|
1675 | 0 | ret = _fr_event_pid_wait(NDEBUG_LOCATION_VALS reap, el, &reap->pid_ev, pid, _fr_event_pid_reap_cb, reap); |
1676 | 0 | if (ret < 0) { |
1677 | 0 | talloc_free(reap); |
1678 | 0 | return ret; |
1679 | 0 | } |
1680 | | |
1681 | 0 | reap->el = el; |
1682 | 0 | reap->callback = callback; |
1683 | 0 | reap->uctx = uctx; |
1684 | |
|
1685 | 0 | EVENT_DEBUG("%s - Adding reaper for PID %u - %p", __FUNCTION__, pid, reap); |
1686 | |
|
1687 | 0 | fr_dlist_insert_tail(&el->pid_to_reap, reap); |
1688 | |
|
1689 | 0 | return ret; |
1690 | 0 | } |
1691 | | |
1692 | | /** Send a signal to all the processes we have in our reap list, and reap them |
1693 | | * |
1694 | | * @param[in] el containing the processes to reap. |
1695 | | * @param[in] timeout how long to wait before we signal the processes. |
1696 | | * @param[in] signal to send to processes. Should be a fatal signal. |
1697 | | * @return The number of processes reaped. |
1698 | | */ |
1699 | | unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal) |
1700 | 0 | { |
1701 | 0 | unsigned int processed = fr_dlist_num_elements(&el->pid_to_reap); |
1702 | 0 | fr_event_pid_reap_t *reap = NULL; |
1703 | | |
1704 | | /* |
1705 | | * If we've got a timeout, our best option |
1706 | | * is to use a kqueue instance to monitor |
1707 | | * for process exit. |
1708 | | */ |
1709 | 0 | if (fr_time_delta_ispos(timeout) && fr_dlist_num_elements(&el->pid_to_reap)) { |
1710 | 0 | int status; |
1711 | 0 | struct kevent evset; |
1712 | 0 | int waiting = 0; |
1713 | 0 | int kq = kqueue(); |
1714 | 0 | fr_time_t now, start = el->pub.tl->time(), end = fr_time_add(start, timeout); |
1715 | |
|
1716 | 0 | if (unlikely(kq < 0)) goto force; |
1717 | | |
1718 | 0 | fr_dlist_foreach(&el->pid_to_reap, fr_event_pid_reap_t, i) { |
1719 | 0 | if (!i->pid_ev) { |
1720 | 0 | EVENT_DEBUG("%p - %s - Reaper already called (logic error)... - %p", |
1721 | 0 | el, __FUNCTION__, i); |
1722 | |
|
1723 | 0 | event_list_reap_run_callback(i, -1, SIGKILL); |
1724 | 0 | talloc_free(i); |
1725 | 0 | continue; |
1726 | 0 | } |
1727 | | |
1728 | | /* |
1729 | | * See if any processes have exited already |
1730 | | */ |
1731 | 0 | if (waitpid(i->pid_ev->pid, &status, WNOHANG) == i->pid_ev->pid) { /* reap */ |
1732 | 0 | EVENT_DEBUG("%p - %s - Reaper PID %u already exited - %p", |
1733 | 0 | el, __FUNCTION__, i->pid_ev->pid, i); |
1734 | 0 | event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL); |
1735 | 0 | talloc_free(i); |
1736 | 0 | continue; |
1737 | 0 | } |
1738 | | |
1739 | | /* |
1740 | | * Add the rest to a temporary event loop |
1741 | | */ |
1742 | 0 | EV_SET(&evset, i->pid_ev->pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, i); |
1743 | 0 | if (kevent(kq, &evset, 1, NULL, 0, NULL) < 0) { |
1744 | 0 | EVENT_DEBUG("%p - %s - Failed adding reaper PID %u to tmp event loop - %p", |
1745 | 0 | el, __FUNCTION__, i->pid_ev->pid, i); |
1746 | 0 | event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL); |
1747 | 0 | talloc_free(i); |
1748 | 0 | continue; |
1749 | 0 | } |
1750 | 0 | waiting++; |
1751 | 0 | } |
1752 | | |
1753 | | /* |
1754 | | * Keep draining process exits as they come in... |
1755 | | */ |
1756 | 0 | while ((waiting > 0) && fr_time_gt(end, (now = el->pub.tl->time()))) { |
1757 | 0 | struct kevent kev; |
1758 | 0 | int ret; |
1759 | |
|
1760 | 0 | ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(fr_time_sub(end, now))); |
1761 | 0 | switch (ret) { |
1762 | 0 | default: |
1763 | 0 | EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping", |
1764 | 0 | el, __FUNCTION__, fr_syserror(errno)); |
1765 | 0 | close(kq); |
1766 | 0 | goto force; |
1767 | | |
1768 | 0 | case 0: |
1769 | 0 | EVENT_DEBUG("%p - %s - Reaper timeout waiting for process exit, forcing process reaping", |
1770 | 0 | el, __FUNCTION__); |
1771 | 0 | close(kq); |
1772 | 0 | goto force; |
1773 | | |
1774 | 0 | case 1: |
1775 | 0 | reap = talloc_get_type_abort(kev.udata, fr_event_pid_reap_t); |
1776 | |
|
1777 | 0 | EVENT_DEBUG("%p - %s - Reaper reaped PID %u, status %u - %p", |
1778 | 0 | el, __FUNCTION__, (unsigned int)kev.ident, (unsigned int)kev.data, reap); |
1779 | 0 | waitpid(reap->pid_ev->pid, &status, WNOHANG); /* reap */ |
1780 | |
|
1781 | 0 | event_list_reap_run_callback(reap, reap->pid_ev->pid, status); |
1782 | 0 | talloc_free(reap); |
1783 | 0 | break; |
1784 | 0 | } |
1785 | 0 | waiting--; |
1786 | 0 | } |
1787 | | |
1788 | 0 | close(kq); |
1789 | 0 | } |
1790 | | |
1791 | 0 | force: |
1792 | | /* |
1793 | | * Deal with any lingering reap requests |
1794 | | */ |
1795 | 0 | while ((reap = fr_dlist_head(&el->pid_to_reap))) { |
1796 | 0 | int status; |
1797 | |
|
1798 | 0 | EVENT_DEBUG("%s - Reaper forcefully reaping PID %u - %p", __FUNCTION__, reap->pid_ev->pid, reap); |
1799 | |
|
1800 | 0 | if (kill(reap->pid_ev->pid, signal) < 0) { |
1801 | | /* |
1802 | | * Make sure we don't hang if the |
1803 | | * process has actually exited. |
1804 | | * |
1805 | | * We could check for ESRCH but it's |
1806 | | * not clear if that'd be returned |
1807 | | * for a PID in the unreaped state |
1808 | | * or not... |
1809 | | */ |
1810 | 0 | waitpid(reap->pid_ev->pid, &status, WNOHANG); |
1811 | 0 | event_list_reap_run_callback(reap, reap->pid_ev->pid, status); |
1812 | 0 | talloc_free(reap); |
1813 | 0 | continue; |
1814 | 0 | } |
1815 | | |
1816 | | /* |
1817 | | * Wait until the child process exits |
1818 | | */ |
1819 | 0 | waitpid(reap->pid_ev->pid, &status, 0); |
1820 | 0 | event_list_reap_run_callback(reap, reap->pid_ev->pid, status); |
1821 | 0 | talloc_free(reap); |
1822 | 0 | } |
1823 | |
|
1824 | 0 | return processed; |
1825 | 0 | } |
1826 | | |
1827 | | /** Memory will not be freed if we fail to remove the event from the kqueue |
1828 | | * |
1829 | | * It's easier to debug memory leaks with modern tooling than it is |
1830 | | * to determine why we get random failures and event leaks inside of kqueue. |
1831 | | * |
1832 | | * @return |
1833 | | * - 0 on success. |
1834 | | * - -1 on failure. |
1835 | | */ |
1836 | | static int _event_user_delete(fr_event_user_t *ev) |
1837 | 0 | { |
1838 | 0 | if (ev->is_registered) { |
1839 | 0 | struct kevent evset; |
1840 | |
|
1841 | 0 | EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_DELETE, 0, 0, 0); |
1842 | |
|
1843 | 0 | if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) { |
1844 | 0 | fr_strerror_printf("Failed removing user event - kevent %s", fr_syserror(evset.flags)); |
1845 | 0 | return -1; |
1846 | 0 | } |
1847 | 0 | ev->is_registered = false; |
1848 | 0 | } |
1849 | | |
1850 | 0 | return 0; |
1851 | 0 | } |
1852 | | |
1853 | | static inline CC_HINT(always_inline) |
1854 | | void event_user_eval(fr_event_list_t *el, struct kevent *kev) |
1855 | 0 | { |
1856 | 0 | fr_event_user_t *ev; |
1857 | | |
1858 | | /* |
1859 | | * This is just a "wakeup" event, which |
1860 | | * is always ignored. |
1861 | | */ |
1862 | 0 | if (kev->ident == 0) return; |
1863 | | |
1864 | 0 | ev = talloc_get_type_abort((void *)kev->ident, fr_event_user_t); |
1865 | 0 | fr_assert((uintptr_t)ev == kev->ident); |
1866 | |
|
1867 | 0 | ev->callback(el, ev->uctx); |
1868 | 0 | } |
1869 | | |
1870 | | /** Add a user callback to the event list. |
1871 | | * |
1872 | | * @param[in] ctx to allocate the event in. |
1873 | | * @param[in] el Containing the timer events. |
1874 | | * @param[out] ev_p Where to write a pointer. |
1875 | | * @param[in] trigger Whether the user event is triggered initially. |
1876 | | * @param[in] callback for EVFILT_USER. |
1877 | | * @param[in] uctx for the callback. |
1878 | | * @return |
1879 | | * - 0 on success. |
1880 | | * - -1 on error. |
1881 | | */ |
1882 | | int _fr_event_user_insert(NDEBUG_LOCATION_ARGS |
1883 | | TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, |
1884 | | bool trigger, fr_event_user_cb_t callback, void *uctx) |
1885 | 0 | { |
1886 | 0 | fr_event_user_t *ev; |
1887 | 0 | struct kevent evset; |
1888 | |
|
1889 | 0 | ev = talloc(ctx, fr_event_user_t); |
1890 | 0 | if (unlikely(ev == NULL)) { |
1891 | 0 | fr_strerror_const("Out of memory"); |
1892 | 0 | return -1; |
1893 | 0 | } |
1894 | 0 | *ev = (fr_event_user_t) { |
1895 | 0 | .el = el, |
1896 | 0 | .callback = callback, |
1897 | 0 | .uctx = uctx, |
1898 | 0 | #ifndef NDEBUG |
1899 | 0 | .file = file, |
1900 | 0 | .line = line, |
1901 | 0 | #endif |
1902 | 0 | }; |
1903 | |
|
1904 | 0 | EV_SET(&evset, (uintptr_t)ev, |
1905 | 0 | EVFILT_USER, EV_ADD | EV_DISPATCH, (trigger * NOTE_TRIGGER), 0, ev); |
1906 | |
|
1907 | 0 | if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) { |
1908 | 0 | fr_strerror_printf("Failed adding user event - kevent %s", fr_syserror(evset.flags)); |
1909 | 0 | talloc_free(ev); |
1910 | 0 | return -1; |
1911 | 0 | } |
1912 | 0 | ev->is_registered = true; |
1913 | 0 | talloc_set_destructor(ev, _event_user_delete); |
1914 | |
|
1915 | 0 | if (ev_p) *ev_p = ev; |
1916 | |
|
1917 | 0 | return 0; |
1918 | 0 | } |
1919 | | |
1920 | | /** Trigger a user event |
1921 | | * |
1922 | | * @param[in] ev Handle for the user event. |
1923 | | * @return |
1924 | | * - 0 on success. |
1925 | | * - -1 on error. |
1926 | | */ |
1927 | | int fr_event_user_trigger(fr_event_user_t *ev) |
1928 | 0 | { |
1929 | 0 | struct kevent evset; |
1930 | |
|
1931 | 0 | EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); |
1932 | |
|
1933 | 0 | if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) { |
1934 | 0 | fr_strerror_printf("Failed triggering user event - kevent %s", fr_syserror(evset.flags)); |
1935 | 0 | return -1; |
1936 | 0 | } |
1937 | | |
1938 | 0 | return 0; |
1939 | 0 | } |
1940 | | |
1941 | | /** Add a pre-event callback to the event list. |
1942 | | * |
1943 | | * Events are serviced in insert order. i.e. insert A, B, we then |
1944 | | * have A running before B. |
1945 | | * |
1946 | | * @param[in] el Containing the timer events. |
1947 | | * @param[in] callback The pre-processing callback. |
1948 | | * @param[in] uctx for the callback. |
1949 | | * @return |
1950 | | * - < 0 on error |
1951 | | * - 0 on success |
1952 | | */ |
1953 | | int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) |
1954 | 0 | { |
1955 | 0 | fr_event_pre_t *pre; |
1956 | |
|
1957 | 0 | pre = talloc(el, fr_event_pre_t); |
1958 | 0 | pre->callback = callback; |
1959 | 0 | pre->uctx = uctx; |
1960 | |
|
1961 | 0 | fr_dlist_insert_tail(&el->pre_callbacks, pre); |
1962 | |
|
1963 | 0 | return 0; |
1964 | 0 | } |
1965 | | |
1966 | | /** Delete a pre-event callback from the event list. |
1967 | | * |
1968 | | * @param[in] el Containing the timer events. |
1969 | | * @param[in] callback The pre-processing callback. |
1970 | | * @param[in] uctx for the callback. |
1971 | | * @return |
1972 | | * - < 0 on error |
1973 | | * - 0 on success |
1974 | | */ |
1975 | | int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) |
1976 | 0 | { |
1977 | 0 | fr_dlist_foreach(&el->pre_callbacks, fr_event_pre_t, pre) { |
1978 | 0 | if ((pre->callback == callback) && |
1979 | 0 | (pre->uctx == uctx)) { |
1980 | 0 | fr_dlist_remove(&el->pre_callbacks, pre); |
1981 | 0 | return talloc_free(pre); |
1982 | 0 | } |
1983 | 0 | } |
1984 | | |
1985 | 0 | return -1; |
1986 | 0 | } |
1987 | | |
1988 | | /** Add a post-event callback to the event list. |
1989 | | * |
1990 | | * Events are serviced in insert order. i.e. insert A, B, we then |
1991 | | * have A running before B. |
1992 | | * |
1993 | | * @param[in] el Containing the timer events. |
1994 | | * @param[in] callback The post-processing callback. |
1995 | | * @param[in] uctx for the callback. |
1996 | | * @return |
1997 | | * - < 0 on error |
1998 | | * - 0 on success |
1999 | | */ |
2000 | | int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) |
2001 | 0 | { |
2002 | 0 | fr_event_post_t *post; |
2003 | |
|
2004 | 0 | post = talloc(el, fr_event_post_t); |
2005 | 0 | if (!post) return -1; |
2006 | 0 | post->callback = callback; |
2007 | 0 | post->uctx = uctx; |
2008 | |
|
2009 | 0 | fr_dlist_insert_tail(&el->post_callbacks, post); |
2010 | |
|
2011 | 0 | return 0; |
2012 | 0 | } |
2013 | | |
2014 | | /** Delete a post-event callback from the event list. |
2015 | | * |
2016 | | * @param[in] el Containing the timer events. |
2017 | | * @param[in] callback The post-processing callback. |
2018 | | * @param[in] uctx for the callback. |
2019 | | * @return |
2020 | | * - < 0 on error |
2021 | | * - 0 on success |
2022 | | */ |
2023 | | int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) |
2024 | 0 | { |
2025 | 0 | fr_dlist_foreach(&el->post_callbacks, fr_event_post_t, post) { |
2026 | 0 | if ((post->callback == callback) && |
2027 | 0 | (post->uctx == uctx)) { |
2028 | 0 | fr_dlist_remove(&el->post_callbacks, post); |
2029 | 0 | return talloc_free(post); |
2030 | 0 | } |
2031 | 0 | } |
2032 | | |
2033 | 0 | return -1; |
2034 | 0 | } |
2035 | | |
2036 | | /** Gather outstanding timer and file descriptor events |
2037 | | * |
2038 | | * @param[in] el to process events for. |
2039 | | * @param[in] now The current time. |
2040 | | * @param[in] wait if true, block on the kevent() call until a timer or file descriptor event occurs. |
2041 | | * @return |
2042 | | * - <0 error, or the event loop is exiting |
2043 | | * - the number of outstanding I/O events, +1 if at least one timer will fire. |
2044 | | */ |
2045 | | int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait) |
2046 | 0 | { |
2047 | 0 | fr_time_delta_t when, *wake; |
2048 | 0 | struct timespec ts_when, *ts_wake; |
2049 | 0 | int num_fd_events; |
2050 | 0 | bool timer_event_ready = false; |
2051 | 0 | fr_time_t next; |
2052 | |
|
2053 | 0 | el->num_fd_events = 0; |
2054 | |
|
2055 | 0 | if (el->will_exit || el->exit) { |
2056 | 0 | el->exit = el->will_exit; |
2057 | |
|
2058 | 0 | fr_strerror_const("Event loop exiting"); |
2059 | 0 | return -1; |
2060 | 0 | } |
2061 | | |
2062 | | /* |
2063 | | * By default we wait for 0ns, which means returning |
2064 | | * immediately from kevent(). |
2065 | | */ |
2066 | 0 | when = fr_time_delta_wrap(0); |
2067 | 0 | wake = &when; |
2068 | | |
2069 | | /* |
2070 | | * See when we have to wake up. Either now, if the timer |
2071 | | * events are in the past. Or, we wait for a future |
2072 | | * timer event. |
2073 | | */ |
2074 | 0 | next = fr_timer_list_when(el->pub.tl); |
2075 | 0 | if (fr_time_neq(next, fr_time_wrap(0))) { |
2076 | 0 | if (fr_time_lteq(next, now)) { |
2077 | 0 | timer_event_ready = true; |
2078 | |
|
2079 | 0 | } else if (wait) { |
2080 | 0 | when = fr_time_sub(next, now); |
2081 | |
|
2082 | 0 | } /* else we're not waiting, leave "when == 0" */ |
2083 | |
|
2084 | 0 | } else if (wait) { |
2085 | | /* |
2086 | | * We're asked to wait, but there's no timer |
2087 | | * event. We can then sleep forever. |
2088 | | */ |
2089 | 0 | wake = NULL; |
2090 | 0 | } |
2091 | | |
2092 | | /* |
2093 | | * Run the status callbacks. It may tell us that the |
2094 | | * application has more work to do, in which case we |
2095 | | * re-set the timeout to be instant. |
2096 | | * |
2097 | | * We only run these callbacks if the caller is otherwise |
2098 | | * idle. |
2099 | | */ |
2100 | 0 | if (wait) { |
2101 | 0 | fr_dlist_foreach(&el->pre_callbacks, fr_event_pre_t, pre) { |
2102 | 0 | if (pre->callback(now, wake ? *wake : fr_time_delta_wrap(0), pre->uctx) > 0) { |
2103 | 0 | wake = &when; |
2104 | 0 | when = fr_time_delta_wrap(0); |
2105 | 0 | } |
2106 | 0 | } |
2107 | 0 | } |
2108 | | |
2109 | | /* |
2110 | | * Wake is the delta between el->now |
2111 | | * (the event loops view of the current time) |
2112 | | * and when the event should occur. |
2113 | | */ |
2114 | 0 | if (wake) { |
2115 | 0 | ts_when = fr_time_delta_to_timespec(when); |
2116 | 0 | ts_wake = &ts_when; |
2117 | 0 | } else { |
2118 | 0 | ts_wake = NULL; |
2119 | 0 | } |
2120 | | |
2121 | | /* |
2122 | | * Populate el->events with the list of I/O events |
2123 | | * that occurred since this function was last called |
2124 | | * or wait for the next timer event. |
2125 | | */ |
2126 | 0 | num_fd_events = kevent(el->kq, NULL, 0, el->events, FR_EV_BATCH_FDS, ts_wake); |
2127 | | |
2128 | | /* |
2129 | | * Interrupt is different from timeout / FD events. |
2130 | | */ |
2131 | 0 | if (unlikely(num_fd_events < 0)) { |
2132 | 0 | if (errno == EINTR) { |
2133 | 0 | return 0; |
2134 | 0 | } else { |
2135 | 0 | fr_strerror_printf("Failed calling kevent: %s", fr_syserror(errno)); |
2136 | 0 | return -1; |
2137 | 0 | } |
2138 | 0 | } |
2139 | | |
2140 | 0 | el->num_fd_events = num_fd_events; |
2141 | |
|
2142 | 0 | EVENT_DEBUG("%p - %s - kevent returned %u FD events", el, __FUNCTION__, el->num_fd_events); |
2143 | | |
2144 | | /* |
2145 | | * If there are no FD events, we must have woken up from a timer |
2146 | | */ |
2147 | 0 | if (!num_fd_events) { |
2148 | 0 | if (wait) timer_event_ready = true; |
2149 | 0 | } |
2150 | | /* |
2151 | | * The caller doesn't really care what the value of the |
2152 | | * return code is. Just that it's greater than zero if |
2153 | | * events needs servicing. |
2154 | | * |
2155 | | * num_fd_events > 0 - if kevent() returns FD events |
2156 | | * timer_event_ready > 0 - if there were timers ready BEFORE or AFTER calling kevent() |
2157 | | */ |
2158 | 0 | return num_fd_events + timer_event_ready; |
2159 | 0 | } |
2160 | | |
2161 | | CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private fr_event_list_t trips --fsanitize=function*/ |
2162 | | static inline CC_HINT(always_inline) |
2163 | | void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags) |
2164 | 0 | { |
2165 | 0 | fr_event_fd_cb_t fd_cb; |
2166 | |
|
2167 | 0 | while ((fd_cb = event_fd_func(ef, filter, fflags))) { |
2168 | 0 | fd_cb(el, ef->fd, flags, ef->uctx); |
2169 | 0 | } |
2170 | 0 | } |
2171 | | |
2172 | | /** Service any outstanding timer or file descriptor events |
2173 | | * |
2174 | | * @param[in] el containing events to service. |
2175 | | */ |
2176 | | CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */ |
2177 | | void fr_event_service(fr_event_list_t *el) |
2178 | 0 | { |
2179 | 0 | fr_timer_list_t *etl = el->pub.tl; |
2180 | 0 | int i; |
2181 | 0 | fr_event_post_t *post; |
2182 | 0 | fr_time_t when, now; |
2183 | |
|
2184 | 0 | if (unlikely(el->exit)) return; |
2185 | | |
2186 | 0 | EVENT_DEBUG("%p - %s - Servicing %u FD events", el, __FUNCTION__, el->num_fd_events); |
2187 | | |
2188 | | /* |
2189 | | * Run all of the file descriptor events. |
2190 | | */ |
2191 | 0 | el->in_handler = true; |
2192 | 0 | for (i = 0; i < el->num_fd_events; i++) { |
2193 | | /* |
2194 | | * Process any user events |
2195 | | */ |
2196 | 0 | switch (el->events[i].filter) { |
2197 | 0 | case EVFILT_USER: |
2198 | 0 | event_user_eval(el, &el->events[i]); |
2199 | 0 | continue; |
2200 | | |
2201 | | /* |
2202 | | * Process proc events |
2203 | | */ |
2204 | 0 | case EVFILT_PROC: |
2205 | 0 | event_pid_eval(el, &el->events[i]); |
2206 | 0 | continue; |
2207 | | |
2208 | | /* |
2209 | | * Process various types of file descriptor events |
2210 | | */ |
2211 | 0 | default: |
2212 | 0 | { |
2213 | 0 | fr_event_fd_t *ef = talloc_get_type_abort(el->events[i].udata, fr_event_fd_t); |
2214 | 0 | int fd_errno = 0; |
2215 | |
|
2216 | 0 | int fflags = el->events[i].fflags; /* mutable */ |
2217 | 0 | int filter = el->events[i].filter; |
2218 | 0 | int flags = el->events[i].flags; |
2219 | |
|
2220 | 0 | if (!ef->is_registered) continue; /* Was deleted between corral and service */ |
2221 | | |
2222 | 0 | if (unlikely(flags & EV_ERROR)) { |
2223 | 0 | fd_errno = el->events[i].data; |
2224 | 0 | ev_error: |
2225 | | /* |
2226 | | * Call the error handler, but only if the socket hasn't been deleted at EOF |
2227 | | * below. |
2228 | | */ |
2229 | 0 | if (ef->is_registered && ef->error) ef->error(el, ef->fd, flags, fd_errno, ef->uctx); |
2230 | 0 | TALLOC_FREE(ef); |
2231 | 0 | continue; |
2232 | 0 | } |
2233 | | |
2234 | | /* |
2235 | | * EOF can indicate we've actually reached |
2236 | | * the end of a file, but for sockets it usually |
2237 | | * indicates the other end of the connection |
2238 | | * has gone away. |
2239 | | */ |
2240 | 0 | if (flags & EV_EOF) { |
2241 | | /* |
2242 | | * This is fine, the callback will get notified |
2243 | | * via the flags field. |
2244 | | */ |
2245 | 0 | if (ef->type == FR_EVENT_FD_FILE) goto service; |
2246 | 0 | #if defined(__linux__) && defined(SO_GET_FILTER) |
2247 | | /* |
2248 | | * There seems to be an issue with the |
2249 | | * ioctl(...SIOCNQ...) call libkqueue |
2250 | | * uses to determine the number of bytes |
2251 | | * readable. When ioctl returns, the number |
2252 | | * of bytes available is set to zero, which |
2253 | | * libkqueue interprets as EOF. |
2254 | | * |
2255 | | * As a workaround, if we're not reading |
2256 | | * a file, and are operating on a raw socket |
2257 | | * with a packet filter attached, we ignore |
2258 | | * the EOF flag and continue. |
2259 | | */ |
2260 | 0 | if ((ef->sock_type == SOCK_RAW) && (ef->type == FR_EVENT_FD_PCAP)) goto service; |
2261 | 0 | #endif |
2262 | | |
2263 | | /* |
2264 | | * If we see an EV_EOF flag that means the |
2265 | | * read side of the socket has been closed |
2266 | | * but there may still be pending data. |
2267 | | * |
2268 | | * Dispatch the read event and then error. |
2269 | | */ |
2270 | 0 | if ((el->events[i].filter == EVFILT_READ) && (el->events[i].data > 0)) { |
2271 | 0 | event_callback(el, ef, &filter, flags, &fflags); |
2272 | 0 | } |
2273 | |
|
2274 | 0 | fd_errno = el->events[i].fflags; |
2275 | |
|
2276 | 0 | goto ev_error; |
2277 | 0 | } |
2278 | | |
2279 | 0 | service: |
2280 | 0 | #ifndef NDEBUG |
2281 | 0 | EVENT_DEBUG("Running event for fd %d, from %s[%d]", ef->fd, ef->file, ef->line); |
2282 | 0 | #endif |
2283 | | |
2284 | | /* |
2285 | | * Service the event_fd events |
2286 | | */ |
2287 | 0 | event_callback(el, ef, &filter, flags, &fflags); |
2288 | 0 | } |
2289 | 0 | } |
2290 | 0 | } |
2291 | | |
2292 | | /* |
2293 | | * Process any deferred frees performed |
2294 | | * by the I/O handlers. |
2295 | | * |
2296 | | * The events are removed from the FD rbtree |
2297 | | * and kevent immediately, but frees are |
2298 | | * deferred to allow stale events to be |
2299 | | * skipped sans SEGV. |
2300 | | */ |
2301 | 0 | el->in_handler = false; /* Allow events to be deleted */ |
2302 | 0 | { |
2303 | 0 | fr_event_fd_t *ef; |
2304 | |
|
2305 | 0 | while ((ef = fr_dlist_head(&el->fd_to_free))) talloc_free(ef); |
2306 | 0 | } |
2307 | | |
2308 | | /* |
2309 | | * We must call el->time() again here, else the event |
2310 | | * list's time gets updated too infrequently, and we |
2311 | | * can end up with a situation where timers are |
2312 | | * serviced much later than they should be, which can |
2313 | | * cause strange interaction effects, spurious calls |
2314 | | * to kevent, and busy loops. |
2315 | | */ |
2316 | 0 | now = etl->time(); |
2317 | | |
2318 | | /* |
2319 | | * Run all of the timer events. Note that these can add |
2320 | | * new timers! |
2321 | | */ |
2322 | 0 | if (fr_time_neq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0))) { |
2323 | 0 | int ret; |
2324 | |
|
2325 | 0 | when = now; |
2326 | |
|
2327 | 0 | ret = fr_timer_list_run(etl, &when); |
2328 | 0 | if (!fr_cond_assert(ret >= 0)) { /* catastrophic error, trigger event loop exit */ |
2329 | 0 | el->exit = 1; |
2330 | 0 | return; |
2331 | 0 | } |
2332 | | |
2333 | 0 | EVENT_DEBUG("%p - %s - Serviced %u timer(s)", el, __FUNCTION__, (unsigned int)ret); |
2334 | 0 | } |
2335 | | |
2336 | 0 | now = etl->time(); |
2337 | | |
2338 | | /* |
2339 | | * Run all of the post-processing events. |
2340 | | */ |
2341 | 0 | for (post = fr_dlist_head(&el->post_callbacks); |
2342 | 0 | post != NULL; |
2343 | 0 | post = fr_dlist_next(&el->post_callbacks, post)) { |
2344 | 0 | post->callback(el, now, post->uctx); |
2345 | 0 | } |
2346 | 0 | } |
2347 | | |
2348 | | /** Signal an event loop exit with the specified code |
2349 | | * |
2350 | | * The event loop will complete its current iteration, and then exit with the specified code. |
2351 | | * |
2352 | | * @param[in] el to signal to exit. |
2353 | | * @param[in] code for #fr_event_loop to return. |
2354 | | */ |
2355 | | void fr_event_loop_exit(fr_event_list_t *el, int code) |
2356 | 0 | { |
2357 | 0 | if (unlikely(!el)) return; |
2358 | | |
2359 | 0 | el->will_exit = code; |
2360 | 0 | } |
2361 | | |
2362 | | /** Check to see whether the event loop is in the process of exiting |
2363 | | * |
2364 | | * @param[in] el to check. |
2365 | | */ |
2366 | | bool fr_event_loop_exiting(fr_event_list_t *el) |
2367 | 0 | { |
2368 | 0 | return ((el->will_exit != 0) || (el->exit != 0)); |
2369 | 0 | } |
2370 | | |
2371 | | /** Run an event loop |
2372 | | * |
2373 | | * @note Will not return until #fr_event_loop_exit is called. |
2374 | | * |
2375 | | * @param[in] el to start processing. |
2376 | | */ |
2377 | | CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el) |
2378 | 0 | { |
2379 | 0 | el->will_exit = el->exit = 0; |
2380 | |
|
2381 | 0 | el->dispatch = true; |
2382 | 0 | while (!el->exit) { |
2383 | 0 | if (unlikely(fr_event_corral(el, el->pub.tl->time(), true)) < 0) break; |
2384 | 0 | fr_event_service(el); |
2385 | 0 | } |
2386 | | |
2387 | | /* |
2388 | | * Give processes five seconds to exit. |
2389 | | * This means any triggers that we may |
2390 | | * have issued when the server exited |
2391 | | * have a chance to complete. |
2392 | | */ |
2393 | 0 | fr_event_list_reap_signal(el, fr_time_delta_from_sec(5), SIGKILL); |
2394 | 0 | el->dispatch = false; |
2395 | |
|
2396 | 0 | return el->exit; |
2397 | 0 | } |
2398 | | |
2399 | | /** Cleanup an event list |
2400 | | * |
2401 | | * Frees/destroys any resources associated with an event list |
2402 | | * |
2403 | | * @param[in] el to free resources for. |
2404 | | */ |
2405 | | static int _event_list_free(fr_event_list_t *el) |
2406 | 0 | { |
2407 | 0 | fr_event_list_reap_signal(el, fr_time_delta_wrap(0), SIGKILL); |
2408 | |
|
2409 | 0 | talloc_free_children(el); |
2410 | |
|
2411 | 0 | if (el->kq >= 0) close(el->kq); |
2412 | |
|
2413 | 0 | return 0; |
2414 | 0 | } |
2415 | | |
2416 | | /** Free any memory we allocated for indexes |
2417 | | * |
2418 | | */ |
2419 | | static int _event_free_indexes(UNUSED void *uctx) |
2420 | 0 | { |
2421 | 0 | unsigned int i; |
2422 | |
|
2423 | 0 | for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) if (talloc_free(filter_maps[i].ev_to_func) < 0) return -1; |
2424 | 0 | return 0; |
2425 | 0 | } |
2426 | | |
2427 | | static int _event_build_indexes(UNUSED void *uctx) |
2428 | 0 | { |
2429 | 0 | unsigned int i; |
2430 | |
|
2431 | 0 | for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) event_fd_func_index_build(&filter_maps[i]); |
2432 | 0 | return 0; |
2433 | 0 | } |
2434 | | |
2435 | | #ifdef EVFILT_LIBKQUEUE |
2436 | | /** kqueue logging wrapper function |
2437 | | * |
2438 | | */ |
2439 | | static CC_HINT(format (printf, 1, 2)) CC_HINT(nonnull) |
2440 | | void _event_kqueue_log(char const *fmt, ...) |
2441 | 0 | { |
2442 | 0 | va_list ap; |
2443 | |
|
2444 | 0 | va_start(ap, fmt); |
2445 | 0 | fr_vlog(&default_log, L_DBG, __FILE__, __LINE__, fmt, ap); |
2446 | 0 | va_end(ap); |
2447 | 0 | } |
2448 | | |
2449 | | /** If we're building with libkqueue, and at debug level 4 or higher, enable libkqueue debugging output |
2450 | | * |
2451 | | * This requires a debug build of libkqueue |
2452 | | */ |
2453 | | static int _event_kqueue_logging(UNUSED void *uctx) |
2454 | 0 | { |
2455 | 0 | struct kevent kev, receipt; |
2456 | |
|
2457 | 0 | log_conf_kq = kqueue(); |
2458 | 0 | if (unlikely(log_conf_kq < 0)) { |
2459 | 0 | fr_strerror_const("Failed initialising logging configuration kqueue"); |
2460 | 0 | return -1; |
2461 | 0 | } |
2462 | | |
2463 | 0 | EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, (intptr_t)_event_kqueue_log, NULL); |
2464 | 0 | if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) { |
2465 | 0 | close(log_conf_kq); |
2466 | 0 | log_conf_kq = -1; |
2467 | 0 | return 1; |
2468 | 0 | } |
2469 | | |
2470 | 0 | if (fr_debug_lvl >= L_DBG_LVL_3) { |
2471 | 0 | EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG, 1, NULL); |
2472 | 0 | if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) { |
2473 | 0 | fr_strerror_const("Failed enabling libkqueue debug logging"); |
2474 | 0 | close(log_conf_kq); |
2475 | 0 | log_conf_kq = -1; |
2476 | 0 | return -1; |
2477 | 0 | } |
2478 | 0 | } |
2479 | | |
2480 | 0 | return 0; |
2481 | 0 | } |
2482 | | |
2483 | | static int _event_kqueue_logging_stop(UNUSED void *uctx) |
2484 | 0 | { |
2485 | 0 | struct kevent kev, receipt; |
2486 | |
|
2487 | 0 | EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, 0, NULL); |
2488 | 0 | (void)kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}); |
2489 | |
|
2490 | 0 | close(log_conf_kq); |
2491 | 0 | log_conf_kq = -1; |
2492 | |
|
2493 | 0 | return 0; |
2494 | 0 | } |
2495 | | #endif |
2496 | | |
2497 | | /** Initialise a new event list |
2498 | | * |
2499 | | * @param[in] ctx to allocate memory in. |
2500 | | * @param[in] status callback, called on each iteration of the event list. |
2501 | | * @param[in] status_uctx context for the status callback |
2502 | | * @return |
2503 | | * - A pointer to a new event list on success (free with talloc_free). |
2504 | | * - NULL on error. |
2505 | | */ |
2506 | | fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx) |
2507 | 0 | { |
2508 | 0 | fr_event_list_t *el; |
2509 | 0 | struct kevent kev; |
2510 | 0 | int ret; |
2511 | | |
2512 | | /* |
2513 | | * Build the map indexes the first time this |
2514 | | * function is called. |
2515 | | */ |
2516 | 0 | fr_atexit_global_once_ret(&ret, _event_build_indexes, _event_free_indexes, NULL); |
2517 | 0 | #ifdef EVFILT_LIBKQUEUE |
2518 | 0 | fr_atexit_global_once_ret(&ret, _event_kqueue_logging, _event_kqueue_logging_stop, NULL); |
2519 | 0 | #endif |
2520 | |
|
2521 | 0 | el = talloc_zero(ctx, fr_event_list_t); |
2522 | 0 | if (!fr_cond_assert(el)) { |
2523 | 0 | fr_strerror_const("Out of memory"); |
2524 | 0 | return NULL; |
2525 | 0 | } |
2526 | 0 | el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */ |
2527 | 0 | talloc_set_destructor(el, _event_list_free); |
2528 | |
|
2529 | 0 | el->pub.tl = fr_timer_list_lst_alloc(el, NULL); |
2530 | 0 | if (!el->pub.tl) { |
2531 | 0 | fr_strerror_const("Failed allocating timer list"); |
2532 | 0 | error: |
2533 | 0 | talloc_free(el); |
2534 | 0 | return NULL; |
2535 | 0 | } |
2536 | | |
2537 | 0 | el->fds = fr_rb_inline_talloc_alloc(el, fr_event_fd_t, node, fr_event_fd_cmp, NULL); |
2538 | 0 | if (!el->fds) { |
2539 | 0 | fr_strerror_const("Failed allocating FD tree"); |
2540 | 0 | goto error; |
2541 | 0 | } |
2542 | | |
2543 | 0 | el->kq = kqueue(); |
2544 | 0 | if (el->kq < 0) { |
2545 | 0 | fr_strerror_printf("Failed allocating kqueue: %s", fr_syserror(errno)); |
2546 | 0 | goto error; |
2547 | 0 | } |
2548 | | |
2549 | 0 | fr_dlist_talloc_init(&el->pre_callbacks, fr_event_pre_t, entry); |
2550 | 0 | fr_dlist_talloc_init(&el->post_callbacks, fr_event_post_t, entry); |
2551 | 0 | fr_dlist_talloc_init(&el->pid_to_reap, fr_event_pid_reap_t, entry); |
2552 | 0 | fr_dlist_talloc_init(&el->fd_to_free, fr_event_fd_t, entry); |
2553 | 0 | if (status) (void) fr_event_pre_insert(el, status, status_uctx); |
2554 | | |
2555 | | /* |
2556 | | * Set our "exit" callback as ident 0. |
2557 | | */ |
2558 | 0 | EV_SET(&kev, 0, EVFILT_USER, EV_ADD | EV_CLEAR, NOTE_FFNOP, 0, NULL); |
2559 | 0 | if (kevent(el->kq, &kev, 1, NULL, 0, NULL) < 0) { |
2560 | 0 | fr_strerror_printf("Failed adding exit callback to kqueue: %s", fr_syserror(errno)); |
2561 | 0 | goto error; |
2562 | 0 | } |
2563 | | |
2564 | 0 | return el; |
2565 | 0 | } |
2566 | | |
2567 | | /** Return whether the event loop has any active events |
2568 | | * |
2569 | | */ |
2570 | | bool fr_event_list_empty(fr_event_list_t *el) |
2571 | 0 | { |
2572 | 0 | return fr_time_eq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0)) && (fr_rb_num_elements(el->fds) == 0); |
2573 | 0 | } |
2574 | | #ifdef TESTING |
2575 | | /* |
2576 | | * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event |
2577 | | * |
2578 | | * ./event |
2579 | | * |
2580 | | * And hit CTRL-S to stop the output, CTRL-Q to continue. |
2581 | | * It normally alternates printing the time and sleeping, |
2582 | | * but when you hit CTRL-S/CTRL-Q, you should see a number |
2583 | | * of events run right after each other. |
2584 | | * |
2585 | | * OR |
2586 | | * |
2587 | | * valgrind --tool=memcheck --leak-check=full --show-reachable=yes ./event |
2588 | | */ |
2589 | | |
2590 | | static void print_time(void *ctx) |
2591 | | { |
2592 | | fr_time_t when; |
2593 | | int64_t usec; |
2594 | | |
2595 | | when = *(fr_time_t *) ctx; |
2596 | | usec = fr_time_to_usec(when); |
2597 | | |
2598 | | printf("%d.%06d\n", usec / USEC, usec % USEC); |
2599 | | fflush(stdout); |
2600 | | } |
2601 | | |
2602 | | static fr_randctx rand_pool; |
2603 | | |
2604 | | static uint32_t event_rand(void) |
2605 | | { |
2606 | | uint32_t num; |
2607 | | |
2608 | | num = rand_pool.randrsl[rand_pool.randcnt++]; |
2609 | | if (rand_pool.randcnt == 256) { |
2610 | | fr_isaac(&rand_pool); |
2611 | | rand_pool.randcnt = 0; |
2612 | | } |
2613 | | |
2614 | | return num; |
2615 | | } |
2616 | | |
2617 | | |
2618 | | #define MAX 100 |
2619 | | int main(int argc, char **argv) |
2620 | | { |
2621 | | int i, rcode; |
2622 | | fr_time_t array[MAX]; |
2623 | | fr_time_t now, when; |
2624 | | fr_event_list_t *el; |
2625 | | |
2626 | | el = fr_event_list_alloc(NULL, NULL); |
2627 | | if (!el) fr_exit_now(1); |
2628 | | |
2629 | | memset(&rand_pool, 0, sizeof(rand_pool)); |
2630 | | rand_pool.randrsl[1] = time(NULL); |
2631 | | |
2632 | | fr_rand_init(&rand_pool, 1); |
2633 | | rand_pool.randcnt = 0; |
2634 | | |
2635 | | array[0] = el->time(); |
2636 | | for (i = 1; i < MAX; i++) { |
2637 | | array[i] = array[i - 1]; |
2638 | | array[i] += event_rand() & 0xffff; |
2639 | | |
2640 | | fr_timer_at(NULL, el, array[i], false, print_time, array[i]); |
2641 | | } |
2642 | | |
2643 | | while (fr_event_list_num_timers(el)) { |
2644 | | now = el->time(); |
2645 | | when = now; |
2646 | | if (!fr_timer_run(el, &when)) { |
2647 | | int delay = (when - now) / 1000; /* nanoseconds to microseconds */ |
2648 | | |
2649 | | printf("\tsleep %d microseconds\n", delay); |
2650 | | fflush(stdout); |
2651 | | usleep(delay); |
2652 | | } |
2653 | | } |
2654 | | |
2655 | | talloc_free(el); |
2656 | | |
2657 | | return 0; |
2658 | | } |
2659 | | #endif |