Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | #include "event2/event-config.h" |
27 | | #include "evconfig-private.h" |
28 | | |
29 | | #ifdef _WIN32 |
30 | | #include <winsock2.h> |
31 | | #define WIN32_LEAN_AND_MEAN |
32 | | #include <windows.h> |
33 | | #undef WIN32_LEAN_AND_MEAN |
34 | | #endif |
35 | | #include <sys/types.h> |
36 | | #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) |
37 | | #include <sys/time.h> |
38 | | #endif |
39 | | #include <sys/queue.h> |
40 | | #include <stdio.h> |
41 | | #include <stdlib.h> |
42 | | #ifndef _WIN32 |
43 | | #include <unistd.h> |
44 | | #endif |
45 | | #include <errno.h> |
46 | | #include <signal.h> |
47 | | #include <string.h> |
48 | | #include <time.h> |
49 | | |
50 | | #include "event-internal.h" |
51 | | #include "evmap-internal.h" |
52 | | #include "mm-internal.h" |
53 | | #include "changelist-internal.h" |
54 | | |
55 | | /** An entry for an evmap_io list: notes all the events that want to read or |
56 | | write on a given fd, and the number of each. |
57 | | */ |
58 | | struct evmap_io { |
59 | | struct event_dlist events; |
60 | | ev_uint16_t nread; |
61 | | ev_uint16_t nwrite; |
62 | | ev_uint16_t nclose; |
63 | | }; |
64 | | |
65 | | /* An entry for an evmap_signal list: notes all the events that want to know |
66 | | when a signal triggers. */ |
67 | | struct evmap_signal { |
68 | | struct event_dlist events; |
69 | | }; |
70 | | |
71 | | /* On some platforms, fds start at 0 and increment by 1 as they are |
72 | | allocated, and old numbers get used. For these platforms, we |
73 | | implement io maps just like signal maps: as an array of pointers to |
74 | | struct evmap_io. But on other platforms (windows), sockets are not |
75 | | 0-indexed, not necessarily consecutive, and not necessarily reused. |
76 | | There, we use a hashtable to implement evmap_io. |
77 | | */ |
78 | | #ifdef EVMAP_USE_HT |
79 | | struct event_map_entry { |
80 | | HT_ENTRY(event_map_entry) map_node; |
81 | | evutil_socket_t fd; |
82 | | union { /* This is a union in case we need to make more things that can |
83 | | be in the hashtable. */ |
84 | | struct evmap_io evmap_io; |
85 | | } ent; |
86 | | }; |
87 | | |
88 | | /* Helper used by the event_io_map hashtable code; tries to return a good hash |
89 | | * of the fd in e->fd. */ |
90 | | static inline unsigned |
91 | | hashsocket(struct event_map_entry *e) |
92 | | { |
93 | | /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to |
94 | | * matter. Our hashtable implementation really likes low-order bits, |
95 | | * though, so let's do the rotate-and-add trick. */ |
96 | | unsigned h = (unsigned) e->fd; |
97 | | h += (h >> 2) | (h << 30); |
98 | | return h; |
99 | | } |
100 | | |
101 | | /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 |
102 | | * have the same e->fd. */ |
103 | | static inline int |
104 | | eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) |
105 | | { |
106 | | return e1->fd == e2->fd; |
107 | | } |
108 | | |
109 | | HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) |
110 | | HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, |
111 | | 0.5, mm_malloc, mm_realloc, mm_free) |
112 | | |
113 | | #define GET_IO_SLOT(x, map, slot, type) \ |
114 | | do { \ |
115 | | struct event_map_entry key_, *ent_; \ |
116 | | key_.fd = slot; \ |
117 | | ent_ = HT_FIND(event_io_map, map, &key_); \ |
118 | | (x) = ent_ ? &ent_->ent.type : NULL; \ |
119 | | } while (0); |
120 | | |
121 | | #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
122 | | do { \ |
123 | | struct event_map_entry key_, *ent_; \ |
124 | | key_.fd = slot; \ |
125 | | HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \ |
126 | | event_map_entry, &key_, ptr, \ |
127 | | { \ |
128 | | ent_ = *ptr; \ |
129 | | }, \ |
130 | | { \ |
131 | | ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ |
132 | | if (EVUTIL_UNLIKELY(ent_ == NULL)) \ |
133 | | return (-1); \ |
134 | | ent_->fd = slot; \ |
135 | | (ctor)(&ent_->ent.type); \ |
136 | | HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \ |
137 | | }); \ |
138 | | (x) = &ent_->ent.type; \ |
139 | | } while (0) |
140 | | |
141 | | void evmap_io_initmap_(struct event_io_map *ctx) |
142 | | { |
143 | | HT_INIT(event_io_map, ctx); |
144 | | } |
145 | | |
146 | | void evmap_io_clear_(struct event_io_map *ctx) |
147 | | { |
148 | | struct event_map_entry **ent, **next, *this; |
149 | | for (ent = HT_START(event_io_map, ctx); ent; ent = next) { |
150 | | this = *ent; |
151 | | next = HT_NEXT_RMV(event_io_map, ctx, ent); |
152 | | mm_free(this); |
153 | | } |
154 | | HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ |
155 | | } |
156 | | #endif |
157 | | |
158 | | /* Set the variable 'x' to the field in event_map 'map' with fields of type |
159 | | 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL |
160 | | if there are no entries for 'slot'. Does no bounds-checking. */ |
161 | | #define GET_SIGNAL_SLOT(x, map, slot, type) \ |
162 | 0 | (x) = (struct type *)((map)->entries[slot]) |
163 | | /* As GET_SLOT, but construct the entry for 'slot' if it is not present, |
164 | | by allocating enough memory for a 'struct type', and initializing the new |
165 | | value by calling the function 'ctor' on it. Makes the function |
166 | | return -1 on allocation failure. |
167 | | */ |
168 | | #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
169 | 0 | do { \ |
170 | 0 | if ((map)->entries[slot] == NULL) { \ |
171 | 0 | (map)->entries[slot] = \ |
172 | 0 | mm_calloc(1,sizeof(struct type)+fdinfo_len); \ |
173 | 0 | if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ |
174 | 0 | return (-1); \ |
175 | 0 | (ctor)((struct type *)(map)->entries[slot]); \ |
176 | 0 | } \ |
177 | 0 | (x) = (struct type *)((map)->entries[slot]); \ |
178 | 0 | } while (0) |
179 | | |
180 | | /* If we aren't using hashtables, then define the IO_SLOT macros and functions |
181 | | as thin aliases over the SIGNAL_SLOT versions. */ |
182 | | #ifndef EVMAP_USE_HT |
183 | 0 | #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) |
184 | | #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ |
185 | 0 | GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) |
186 | | #define FDINFO_OFFSET sizeof(struct evmap_io) |
187 | | void |
188 | | evmap_io_initmap_(struct event_io_map* ctx) |
189 | 0 | { |
190 | 0 | evmap_signal_initmap_(ctx); |
191 | 0 | } |
192 | | void |
193 | | evmap_io_clear_(struct event_io_map* ctx) |
194 | 0 | { |
195 | 0 | evmap_signal_clear_(ctx); |
196 | 0 | } |
197 | | #endif |
198 | | |
199 | | |
200 | | /** Expand 'map' with new entries of width 'msize' until it is big enough |
201 | | to store a value in 'slot'. |
202 | | */ |
203 | | static int |
204 | | evmap_make_space(struct event_signal_map *map, int slot, int msize) |
205 | 0 | { |
206 | 0 | if (map->nentries <= slot) { |
207 | 0 | int nentries = map->nentries ? map->nentries : 32; |
208 | 0 | void **tmp; |
209 | 0 |
|
210 | 0 | while (nentries <= slot) |
211 | 0 | nentries <<= 1; |
212 | 0 |
|
213 | 0 | tmp = (void **)mm_realloc(map->entries, nentries * msize); |
214 | 0 | if (tmp == NULL) |
215 | 0 | return (-1); |
216 | 0 | |
217 | 0 | memset(&tmp[map->nentries], 0, |
218 | 0 | (nentries - map->nentries) * msize); |
219 | 0 |
|
220 | 0 | map->nentries = nentries; |
221 | 0 | map->entries = tmp; |
222 | 0 | } |
223 | 0 |
|
224 | 0 | return (0); |
225 | 0 | } |
226 | | |
227 | | void |
228 | | evmap_signal_initmap_(struct event_signal_map *ctx) |
229 | 0 | { |
230 | 0 | ctx->nentries = 0; |
231 | 0 | ctx->entries = NULL; |
232 | 0 | } |
233 | | |
234 | | void |
235 | | evmap_signal_clear_(struct event_signal_map *ctx) |
236 | 0 | { |
237 | 0 | if (ctx->entries != NULL) { |
238 | 0 | int i; |
239 | 0 | for (i = 0; i < ctx->nentries; ++i) { |
240 | 0 | if (ctx->entries[i] != NULL) |
241 | 0 | mm_free(ctx->entries[i]); |
242 | 0 | } |
243 | 0 | mm_free(ctx->entries); |
244 | 0 | ctx->entries = NULL; |
245 | 0 | } |
246 | 0 | ctx->nentries = 0; |
247 | 0 | } |
248 | | |
249 | | |
250 | | /* code specific to file descriptors */ |
251 | | |
252 | | /** Constructor for struct evmap_io */ |
253 | | static void |
254 | | evmap_io_init(struct evmap_io *entry) |
255 | 0 | { |
256 | 0 | LIST_INIT(&entry->events); |
257 | 0 | entry->nread = 0; |
258 | 0 | entry->nwrite = 0; |
259 | 0 | entry->nclose = 0; |
260 | 0 | } |
261 | | |
262 | | |
263 | | /* return -1 on error, 0 on success if nothing changed in the event backend, |
264 | | * and 1 on success if something did. */ |
265 | | int |
266 | | evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev) |
267 | 0 | { |
268 | 0 | const struct eventop *evsel = base->evsel; |
269 | 0 | struct event_io_map *io = &base->io; |
270 | 0 | struct evmap_io *ctx = NULL; |
271 | 0 | int nread, nwrite, nclose, retval = 0; |
272 | 0 | short res = 0, old = 0; |
273 | 0 | struct event *old_ev; |
274 | 0 |
|
275 | 0 | EVUTIL_ASSERT(fd == ev->ev_fd); |
276 | 0 |
|
277 | 0 | if (fd < 0) |
278 | 0 | return 0; |
279 | 0 | |
280 | 0 | #ifndef EVMAP_USE_HT |
281 | 0 | if (fd >= io->nentries) { |
282 | 0 | if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) |
283 | 0 | return (-1); |
284 | 0 | } |
285 | 0 | #endif |
286 | 0 | GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, |
287 | 0 | evsel->fdinfo_len); |
288 | 0 |
|
289 | 0 | nread = ctx->nread; |
290 | 0 | nwrite = ctx->nwrite; |
291 | 0 | nclose = ctx->nclose; |
292 | 0 |
|
293 | 0 | if (nread) |
294 | 0 | old |= EV_READ; |
295 | 0 | if (nwrite) |
296 | 0 | old |= EV_WRITE; |
297 | 0 | if (nclose) |
298 | 0 | old |= EV_CLOSED; |
299 | 0 |
|
300 | 0 | if (ev->ev_events & EV_READ) { |
301 | 0 | if (++nread == 1) |
302 | 0 | res |= EV_READ; |
303 | 0 | } |
304 | 0 | if (ev->ev_events & EV_WRITE) { |
305 | 0 | if (++nwrite == 1) |
306 | 0 | res |= EV_WRITE; |
307 | 0 | } |
308 | 0 | if (ev->ev_events & EV_CLOSED) { |
309 | 0 | if (++nclose == 1) |
310 | 0 | res |= EV_CLOSED; |
311 | 0 | } |
312 | 0 | if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) { |
313 | 0 | event_warnx("Too many events reading or writing on fd %d", |
314 | 0 | (int)fd); |
315 | 0 | return -1; |
316 | 0 | } |
317 | 0 | if (EVENT_DEBUG_MODE_IS_ON() && |
318 | 0 | (old_ev = LIST_FIRST(&ctx->events)) && |
319 | 0 | (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { |
320 | 0 | event_warnx("Tried to mix edge-triggered and non-edge-triggered" |
321 | 0 | " events on fd %d", (int)fd); |
322 | 0 | return -1; |
323 | 0 | } |
324 | 0 | |
325 | 0 | if (res) { |
326 | 0 | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
327 | 0 | /* XXX(niels): we cannot mix edge-triggered and |
328 | 0 | * level-triggered, we should probably assert on |
329 | 0 | * this. */ |
330 | 0 | if (evsel->add(base, ev->ev_fd, |
331 | 0 | old, (ev->ev_events & EV_ET) | res, extra) == -1) |
332 | 0 | return (-1); |
333 | 0 | retval = 1; |
334 | 0 | } |
335 | 0 |
|
336 | 0 | ctx->nread = (ev_uint16_t) nread; |
337 | 0 | ctx->nwrite = (ev_uint16_t) nwrite; |
338 | 0 | ctx->nclose = (ev_uint16_t) nclose; |
339 | 0 | LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next); |
340 | 0 |
|
341 | 0 | return (retval); |
342 | 0 | } |
343 | | |
344 | | /* return -1 on error, 0 on success if nothing changed in the event backend, |
345 | | * and 1 on success if something did. */ |
346 | | int |
347 | | evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev) |
348 | 0 | { |
349 | 0 | const struct eventop *evsel = base->evsel; |
350 | 0 | struct event_io_map *io = &base->io; |
351 | 0 | struct evmap_io *ctx; |
352 | 0 | int nread, nwrite, nclose, retval = 0; |
353 | 0 | short res = 0, old = 0; |
354 | 0 |
|
355 | 0 | if (fd < 0) |
356 | 0 | return 0; |
357 | 0 | |
358 | 0 | EVUTIL_ASSERT(fd == ev->ev_fd); |
359 | 0 |
|
360 | 0 | #ifndef EVMAP_USE_HT |
361 | 0 | if (fd >= io->nentries) |
362 | 0 | return (-1); |
363 | 0 | #endif |
364 | 0 | |
365 | 0 | GET_IO_SLOT(ctx, io, fd, evmap_io); |
366 | 0 |
|
367 | 0 | nread = ctx->nread; |
368 | 0 | nwrite = ctx->nwrite; |
369 | 0 | nclose = ctx->nclose; |
370 | 0 |
|
371 | 0 | if (nread) |
372 | 0 | old |= EV_READ; |
373 | 0 | if (nwrite) |
374 | 0 | old |= EV_WRITE; |
375 | 0 | if (nclose) |
376 | 0 | old |= EV_CLOSED; |
377 | 0 |
|
378 | 0 | if (ev->ev_events & EV_READ) { |
379 | 0 | if (--nread == 0) |
380 | 0 | res |= EV_READ; |
381 | 0 | EVUTIL_ASSERT(nread >= 0); |
382 | 0 | } |
383 | 0 | if (ev->ev_events & EV_WRITE) { |
384 | 0 | if (--nwrite == 0) |
385 | 0 | res |= EV_WRITE; |
386 | 0 | EVUTIL_ASSERT(nwrite >= 0); |
387 | 0 | } |
388 | 0 | if (ev->ev_events & EV_CLOSED) { |
389 | 0 | if (--nclose == 0) |
390 | 0 | res |= EV_CLOSED; |
391 | 0 | EVUTIL_ASSERT(nclose >= 0); |
392 | 0 | } |
393 | 0 |
|
394 | 0 | if (res) { |
395 | 0 | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
396 | 0 | if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) { |
397 | 0 | retval = -1; |
398 | 0 | } else { |
399 | 0 | retval = 1; |
400 | 0 | } |
401 | 0 | } |
402 | 0 |
|
403 | 0 | ctx->nread = nread; |
404 | 0 | ctx->nwrite = nwrite; |
405 | 0 | ctx->nclose = nclose; |
406 | 0 | LIST_REMOVE(ev, ev_io_next); |
407 | 0 |
|
408 | 0 | return (retval); |
409 | 0 | } |
410 | | |
411 | | void |
412 | | evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events) |
413 | 0 | { |
414 | 0 | struct event_io_map *io = &base->io; |
415 | 0 | struct evmap_io *ctx; |
416 | 0 | struct event *ev; |
417 | 0 |
|
418 | 0 | #ifndef EVMAP_USE_HT |
419 | 0 | if (fd < 0 || fd >= io->nentries) |
420 | 0 | return; |
421 | 0 | #endif |
422 | 0 | GET_IO_SLOT(ctx, io, fd, evmap_io); |
423 | 0 |
|
424 | 0 | if (NULL == ctx) |
425 | 0 | return; |
426 | 0 | LIST_FOREACH(ev, &ctx->events, ev_io_next) { |
427 | 0 | if (ev->ev_events & events) |
428 | 0 | event_active_nolock_(ev, ev->ev_events & events, 1); |
429 | 0 | } |
430 | 0 | } |
431 | | |
432 | | /* code specific to signals */ |
433 | | |
434 | | static void |
435 | | evmap_signal_init(struct evmap_signal *entry) |
436 | 0 | { |
437 | 0 | LIST_INIT(&entry->events); |
438 | 0 | } |
439 | | |
440 | | |
441 | | int |
442 | | evmap_signal_add_(struct event_base *base, int sig, struct event *ev) |
443 | 0 | { |
444 | 0 | const struct eventop *evsel = base->evsigsel; |
445 | 0 | struct event_signal_map *map = &base->sigmap; |
446 | 0 | struct evmap_signal *ctx = NULL; |
447 | 0 |
|
448 | 0 | if (sig >= map->nentries) { |
449 | 0 | if (evmap_make_space( |
450 | 0 | map, sig, sizeof(struct evmap_signal *)) == -1) |
451 | 0 | return (-1); |
452 | 0 | } |
453 | 0 | GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, |
454 | 0 | base->evsigsel->fdinfo_len); |
455 | 0 |
|
456 | 0 | if (LIST_EMPTY(&ctx->events)) { |
457 | 0 | if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) |
458 | 0 | == -1) |
459 | 0 | return (-1); |
460 | 0 | } |
461 | 0 | |
462 | 0 | LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next); |
463 | 0 |
|
464 | 0 | return (1); |
465 | 0 | } |
466 | | |
467 | | int |
468 | | evmap_signal_del_(struct event_base *base, int sig, struct event *ev) |
469 | 0 | { |
470 | 0 | const struct eventop *evsel = base->evsigsel; |
471 | 0 | struct event_signal_map *map = &base->sigmap; |
472 | 0 | struct evmap_signal *ctx; |
473 | 0 |
|
474 | 0 | if (sig >= map->nentries) |
475 | 0 | return (-1); |
476 | 0 | |
477 | 0 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
478 | 0 |
|
479 | 0 | LIST_REMOVE(ev, ev_signal_next); |
480 | 0 |
|
481 | 0 | if (LIST_FIRST(&ctx->events) == NULL) { |
482 | 0 | if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) |
483 | 0 | return (-1); |
484 | 0 | } |
485 | 0 | |
486 | 0 | return (1); |
487 | 0 | } |
488 | | |
489 | | void |
490 | | evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls) |
491 | 0 | { |
492 | 0 | struct event_signal_map *map = &base->sigmap; |
493 | 0 | struct evmap_signal *ctx; |
494 | 0 | struct event *ev; |
495 | 0 |
|
496 | 0 | if (sig < 0 || sig >= map->nentries) |
497 | 0 | return; |
498 | 0 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
499 | 0 |
|
500 | 0 | if (!ctx) |
501 | 0 | return; |
502 | 0 | LIST_FOREACH(ev, &ctx->events, ev_signal_next) |
503 | 0 | event_active_nolock_(ev, EV_SIGNAL, ncalls); |
504 | 0 | } |
505 | | |
506 | | void * |
507 | | evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd) |
508 | 0 | { |
509 | 0 | struct evmap_io *ctx; |
510 | 0 | GET_IO_SLOT(ctx, map, fd, evmap_io); |
511 | 0 | if (ctx) |
512 | 0 | return ((char*)ctx) + sizeof(struct evmap_io); |
513 | 0 | else |
514 | 0 | return NULL; |
515 | 0 | } |
516 | | |
517 | | /* Callback type for evmap_io_foreach_fd */ |
518 | | typedef int (*evmap_io_foreach_fd_cb)( |
519 | | struct event_base *, evutil_socket_t, struct evmap_io *, void *); |
520 | | |
521 | | /* Multipurpose helper function: Iterate over every file descriptor event_base |
522 | | * for which we could have EV_READ or EV_WRITE events. For each such fd, call |
523 | | * fn(base, signum, evmap_io, arg), where fn is the user-provided |
524 | | * function, base is the event_base, signum is the signal number, evmap_io |
525 | | * is an evmap_io structure containing a list of events pending on the |
526 | | * file descriptor, and arg is the user-supplied argument. |
527 | | * |
528 | | * If fn returns 0, continue on to the next signal. Otherwise, return the same |
529 | | * value that fn returned. |
530 | | * |
531 | | * Note that there is no guarantee that the file descriptors will be processed |
532 | | * in any particular order. |
533 | | */ |
534 | | static int |
535 | | evmap_io_foreach_fd(struct event_base *base, |
536 | | evmap_io_foreach_fd_cb fn, |
537 | | void *arg) |
538 | 0 | { |
539 | 0 | evutil_socket_t fd; |
540 | 0 | struct event_io_map *iomap = &base->io; |
541 | 0 | int r = 0; |
542 | | #ifdef EVMAP_USE_HT |
543 | | struct event_map_entry **mapent; |
544 | | HT_FOREACH(mapent, event_io_map, iomap) { |
545 | | struct evmap_io *ctx = &(*mapent)->ent.evmap_io; |
546 | | fd = (*mapent)->fd; |
547 | | #else |
548 | 0 | for (fd = 0; fd < iomap->nentries; ++fd) { |
549 | 0 | struct evmap_io *ctx = iomap->entries[fd]; |
550 | 0 | if (!ctx) |
551 | 0 | continue; |
552 | 0 | #endif |
553 | 0 | if ((r = fn(base, fd, ctx, arg))) |
554 | 0 | break; |
555 | 0 | } |
556 | 0 | return r; |
557 | 0 | } |
558 | | |
559 | | /* Callback type for evmap_signal_foreach_signal */ |
560 | | typedef int (*evmap_signal_foreach_signal_cb)( |
561 | | struct event_base *, int, struct evmap_signal *, void *); |
562 | | |
563 | | /* Multipurpose helper function: Iterate over every signal number in the |
564 | | * event_base for which we could have signal events. For each such signal, |
565 | | * call fn(base, signum, evmap_signal, arg), where fn is the user-provided |
566 | | * function, base is the event_base, signum is the signal number, evmap_signal |
567 | | * is an evmap_signal structure containing a list of events pending on the |
568 | | * signal, and arg is the user-supplied argument. |
569 | | * |
570 | | * If fn returns 0, continue on to the next signal. Otherwise, return the same |
571 | | * value that fn returned. |
572 | | */ |
573 | | static int |
574 | | evmap_signal_foreach_signal(struct event_base *base, |
575 | | evmap_signal_foreach_signal_cb fn, |
576 | | void *arg) |
577 | 0 | { |
578 | 0 | struct event_signal_map *sigmap = &base->sigmap; |
579 | 0 | int r = 0; |
580 | 0 | int signum; |
581 | 0 |
|
582 | 0 | for (signum = 0; signum < sigmap->nentries; ++signum) { |
583 | 0 | struct evmap_signal *ctx = sigmap->entries[signum]; |
584 | 0 | if (!ctx) |
585 | 0 | continue; |
586 | 0 | if ((r = fn(base, signum, ctx, arg))) |
587 | 0 | break; |
588 | 0 | } |
589 | 0 | return r; |
590 | 0 | } |
591 | | |
592 | | /* Helper for evmap_reinit_: tell the backend to add every fd for which we have |
593 | | * pending events, with the appropriate combination of EV_READ, EV_WRITE, and |
594 | | * EV_ET. */ |
595 | | static int |
596 | | evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd, |
597 | | struct evmap_io *ctx, void *arg) |
598 | 0 | { |
599 | 0 | const struct eventop *evsel = base->evsel; |
600 | 0 | void *extra; |
601 | 0 | int *result = arg; |
602 | 0 | short events = 0; |
603 | 0 | struct event *ev; |
604 | 0 | EVUTIL_ASSERT(ctx); |
605 | 0 |
|
606 | 0 | extra = ((char*)ctx) + sizeof(struct evmap_io); |
607 | 0 | if (ctx->nread) |
608 | 0 | events |= EV_READ; |
609 | 0 | if (ctx->nwrite) |
610 | 0 | events |= EV_WRITE; |
611 | 0 | if (ctx->nclose) |
612 | 0 | events |= EV_CLOSED; |
613 | 0 | if (evsel->fdinfo_len) |
614 | 0 | memset(extra, 0, evsel->fdinfo_len); |
615 | 0 | if (events && |
616 | 0 | (ev = LIST_FIRST(&ctx->events)) && |
617 | 0 | (ev->ev_events & EV_ET)) |
618 | 0 | events |= EV_ET; |
619 | 0 | if (evsel->add(base, fd, 0, events, extra) == -1) |
620 | 0 | *result = -1; |
621 | 0 |
|
622 | 0 | return 0; |
623 | 0 | } |
624 | | |
625 | | /* Helper for evmap_reinit_: tell the backend to add every signal for which we |
626 | | * have pending events. */ |
627 | | static int |
628 | | evmap_signal_reinit_iter_fn(struct event_base *base, |
629 | | int signum, struct evmap_signal *ctx, void *arg) |
630 | 0 | { |
631 | 0 | const struct eventop *evsel = base->evsigsel; |
632 | 0 | int *result = arg; |
633 | 0 |
|
634 | 0 | if (!LIST_EMPTY(&ctx->events)) { |
635 | 0 | if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1) |
636 | 0 | *result = -1; |
637 | 0 | } |
638 | 0 | return 0; |
639 | 0 | } |
640 | | |
641 | | int |
642 | | evmap_reinit_(struct event_base *base) |
643 | 0 | { |
644 | 0 | int result = 0; |
645 | 0 |
|
646 | 0 | evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result); |
647 | 0 | if (result < 0) |
648 | 0 | return -1; |
649 | 0 | evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result); |
650 | 0 | if (result < 0) |
651 | 0 | return -1; |
652 | 0 | return 0; |
653 | 0 | } |
654 | | |
655 | | /* Helper for evmap_delete_all_: delete every event in an event_dlist. */ |
656 | | static int |
657 | | delete_all_in_dlist(struct event_dlist *dlist) |
658 | 0 | { |
659 | 0 | struct event *ev; |
660 | 0 | while ((ev = LIST_FIRST(dlist))) |
661 | 0 | event_del(ev); |
662 | 0 | return 0; |
663 | 0 | } |
664 | | |
665 | | /* Helper for evmap_delete_all_: delete every event pending on an fd. */ |
666 | | static int |
667 | | evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd, |
668 | | struct evmap_io *io_info, void *arg) |
669 | 0 | { |
670 | 0 | return delete_all_in_dlist(&io_info->events); |
671 | 0 | } |
672 | | |
673 | | /* Helper for evmap_delete_all_: delete every event pending on a signal. */ |
674 | | static int |
675 | | evmap_signal_delete_all_iter_fn(struct event_base *base, int signum, |
676 | | struct evmap_signal *sig_info, void *arg) |
677 | 0 | { |
678 | 0 | return delete_all_in_dlist(&sig_info->events); |
679 | 0 | } |
680 | | |
681 | | void |
682 | | evmap_delete_all_(struct event_base *base) |
683 | 0 | { |
684 | 0 | evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL); |
685 | 0 | evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL); |
686 | 0 | } |
687 | | |
688 | | /** Per-fd structure for use with changelists. It keeps track, for each fd or |
689 | | * signal using the changelist, of where its entry in the changelist is. |
690 | | */ |
691 | | struct event_changelist_fdinfo { |
692 | | int idxplus1; /* this is the index +1, so that memset(0) will make it |
693 | | * a no-such-element */ |
694 | | }; |
695 | | |
696 | | void |
697 | | event_changelist_init_(struct event_changelist *changelist) |
698 | 0 | { |
699 | 0 | changelist->changes = NULL; |
700 | 0 | changelist->changes_size = 0; |
701 | 0 | changelist->n_changes = 0; |
702 | 0 | } |
703 | | |
704 | | /** Helper: return the changelist_fdinfo corresponding to a given change. */ |
705 | | static inline struct event_changelist_fdinfo * |
706 | | event_change_get_fdinfo(struct event_base *base, |
707 | | const struct event_change *change) |
708 | 0 | { |
709 | 0 | char *ptr; |
710 | 0 | if (change->read_change & EV_CHANGE_SIGNAL) { |
711 | 0 | struct evmap_signal *ctx; |
712 | 0 | GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); |
713 | 0 | ptr = ((char*)ctx) + sizeof(struct evmap_signal); |
714 | 0 | } else { |
715 | 0 | struct evmap_io *ctx; |
716 | 0 | GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); |
717 | 0 | ptr = ((char*)ctx) + sizeof(struct evmap_io); |
718 | 0 | } |
719 | 0 | return (void*)ptr; |
720 | 0 | } |
721 | | |
722 | | /** Callback helper for event_changelist_assert_ok */ |
723 | | static int |
724 | | event_changelist_assert_ok_foreach_iter_fn( |
725 | | struct event_base *base, |
726 | | evutil_socket_t fd, struct evmap_io *io, void *arg) |
727 | 0 | { |
728 | 0 | struct event_changelist *changelist = &base->changelist; |
729 | 0 | struct event_changelist_fdinfo *f; |
730 | 0 | f = (void*) |
731 | 0 | ( ((char*)io) + sizeof(struct evmap_io) ); |
732 | 0 | if (f->idxplus1) { |
733 | 0 | struct event_change *c = &changelist->changes[f->idxplus1 - 1]; |
734 | 0 | EVUTIL_ASSERT(c->fd == fd); |
735 | 0 | } |
736 | 0 | return 0; |
737 | 0 | } |
738 | | |
739 | | /** Make sure that the changelist is consistent with the evmap structures. */ |
740 | | static void |
741 | | event_changelist_assert_ok(struct event_base *base) |
742 | 0 | { |
743 | 0 | int i; |
744 | 0 | struct event_changelist *changelist = &base->changelist; |
745 | 0 |
|
746 | 0 | EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); |
747 | 0 | for (i = 0; i < changelist->n_changes; ++i) { |
748 | 0 | struct event_change *c = &changelist->changes[i]; |
749 | 0 | struct event_changelist_fdinfo *f; |
750 | 0 | EVUTIL_ASSERT(c->fd >= 0); |
751 | 0 | f = event_change_get_fdinfo(base, c); |
752 | 0 | EVUTIL_ASSERT(f); |
753 | 0 | EVUTIL_ASSERT(f->idxplus1 == i + 1); |
754 | 0 | } |
755 | 0 |
|
756 | 0 | evmap_io_foreach_fd(base, |
757 | 0 | event_changelist_assert_ok_foreach_iter_fn, |
758 | 0 | NULL); |
759 | 0 | } |
760 | | |
761 | | #ifdef DEBUG_CHANGELIST |
762 | | #define event_changelist_check(base) event_changelist_assert_ok((base)) |
763 | | #else |
764 | 0 | #define event_changelist_check(base) ((void)0) |
765 | | #endif |
766 | | |
767 | | void |
768 | | event_changelist_remove_all_(struct event_changelist *changelist, |
769 | | struct event_base *base) |
770 | 0 | { |
771 | 0 | int i; |
772 | 0 |
|
773 | 0 | event_changelist_check(base); |
774 | 0 |
|
775 | 0 | for (i = 0; i < changelist->n_changes; ++i) { |
776 | 0 | struct event_change *ch = &changelist->changes[i]; |
777 | 0 | struct event_changelist_fdinfo *fdinfo = |
778 | 0 | event_change_get_fdinfo(base, ch); |
779 | 0 | EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); |
780 | 0 | fdinfo->idxplus1 = 0; |
781 | 0 | } |
782 | 0 |
|
783 | 0 | changelist->n_changes = 0; |
784 | 0 |
|
785 | 0 | event_changelist_check(base); |
786 | 0 | } |
787 | | |
788 | | void |
789 | | event_changelist_freemem_(struct event_changelist *changelist) |
790 | 0 | { |
791 | 0 | if (changelist->changes) |
792 | 0 | mm_free(changelist->changes); |
793 | 0 | event_changelist_init_(changelist); /* zero it all out. */ |
794 | 0 | } |
795 | | |
796 | | /** Increase the size of 'changelist' to hold more changes. */ |
797 | | static int |
798 | | event_changelist_grow(struct event_changelist *changelist) |
799 | 0 | { |
800 | 0 | int new_size; |
801 | 0 | struct event_change *new_changes; |
802 | 0 | if (changelist->changes_size < 64) |
803 | 0 | new_size = 64; |
804 | 0 | else |
805 | 0 | new_size = changelist->changes_size * 2; |
806 | 0 |
|
807 | 0 | new_changes = mm_realloc(changelist->changes, |
808 | 0 | new_size * sizeof(struct event_change)); |
809 | 0 |
|
810 | 0 | if (EVUTIL_UNLIKELY(new_changes == NULL)) |
811 | 0 | return (-1); |
812 | 0 | |
813 | 0 | changelist->changes = new_changes; |
814 | 0 | changelist->changes_size = new_size; |
815 | 0 |
|
816 | 0 | return (0); |
817 | 0 | } |
818 | | |
819 | | /** Return a pointer to the changelist entry for the file descriptor or signal |
820 | | * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its |
821 | | * old_events field to old_events. |
822 | | */ |
823 | | static struct event_change * |
824 | | event_changelist_get_or_construct(struct event_changelist *changelist, |
825 | | evutil_socket_t fd, |
826 | | short old_events, |
827 | | struct event_changelist_fdinfo *fdinfo) |
828 | 0 | { |
829 | 0 | struct event_change *change; |
830 | 0 |
|
831 | 0 | if (fdinfo->idxplus1 == 0) { |
832 | 0 | int idx; |
833 | 0 | EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); |
834 | 0 |
|
835 | 0 | if (changelist->n_changes == changelist->changes_size) { |
836 | 0 | if (event_changelist_grow(changelist) < 0) |
837 | 0 | return NULL; |
838 | 0 | } |
839 | 0 | |
840 | 0 | idx = changelist->n_changes++; |
841 | 0 | change = &changelist->changes[idx]; |
842 | 0 | fdinfo->idxplus1 = idx + 1; |
843 | 0 |
|
844 | 0 | memset(change, 0, sizeof(struct event_change)); |
845 | 0 | change->fd = fd; |
846 | 0 | change->old_events = old_events; |
847 | 0 | } else { |
848 | 0 | change = &changelist->changes[fdinfo->idxplus1 - 1]; |
849 | 0 | EVUTIL_ASSERT(change->fd == fd); |
850 | 0 | } |
851 | 0 | return change; |
852 | 0 | } |
853 | | |
854 | | int |
855 | | event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events, |
856 | | void *p) |
857 | 0 | { |
858 | 0 | struct event_changelist *changelist = &base->changelist; |
859 | 0 | struct event_changelist_fdinfo *fdinfo = p; |
860 | 0 | struct event_change *change; |
861 | 0 |
|
862 | 0 | event_changelist_check(base); |
863 | 0 |
|
864 | 0 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
865 | 0 | if (!change) |
866 | 0 | return -1; |
867 | 0 | |
868 | 0 | /* An add replaces any previous delete, but doesn't result in a no-op, |
869 | 0 | * since the delete might fail (because the fd had been closed since |
870 | 0 | * the last add, for instance. */ |
871 | 0 | |
872 | 0 | if (events & (EV_READ|EV_SIGNAL)) { |
873 | 0 | change->read_change = EV_CHANGE_ADD | |
874 | 0 | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
875 | 0 | } |
876 | 0 | if (events & EV_WRITE) { |
877 | 0 | change->write_change = EV_CHANGE_ADD | |
878 | 0 | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
879 | 0 | } |
880 | 0 | if (events & EV_CLOSED) { |
881 | 0 | change->close_change = EV_CHANGE_ADD | |
882 | 0 | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
883 | 0 | } |
884 | 0 |
|
885 | 0 | event_changelist_check(base); |
886 | 0 | return (0); |
887 | 0 | } |
888 | | |
889 | | int |
890 | | event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events, |
891 | | void *p) |
892 | 0 | { |
893 | 0 | struct event_changelist *changelist = &base->changelist; |
894 | 0 | struct event_changelist_fdinfo *fdinfo = p; |
895 | 0 | struct event_change *change; |
896 | 0 |
|
897 | 0 | event_changelist_check(base); |
898 | 0 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
899 | 0 | event_changelist_check(base); |
900 | 0 | if (!change) |
901 | 0 | return -1; |
902 | 0 | |
903 | 0 | /* A delete on an event set that doesn't contain the event to be |
904 | 0 | deleted produces a no-op. This effectively emoves any previous |
905 | 0 | uncommitted add, rather than replacing it: on those platforms where |
906 | 0 | "add, delete, dispatch" is not the same as "no-op, dispatch", we |
907 | 0 | want the no-op behavior. |
908 | 0 | |
909 | 0 | If we have a no-op item, we could remove it it from the list |
910 | 0 | entirely, but really there's not much point: skipping the no-op |
911 | 0 | change when we do the dispatch later is far cheaper than rejuggling |
912 | 0 | the array now. |
913 | 0 | |
914 | 0 | As this stands, it also lets through deletions of events that are |
915 | 0 | not currently set. |
916 | 0 | */ |
917 | 0 | |
918 | 0 | if (events & (EV_READ|EV_SIGNAL)) { |
919 | 0 | if (!(change->old_events & (EV_READ | EV_SIGNAL))) |
920 | 0 | change->read_change = 0; |
921 | 0 | else |
922 | 0 | change->read_change = EV_CHANGE_DEL; |
923 | 0 | } |
924 | 0 | if (events & EV_WRITE) { |
925 | 0 | if (!(change->old_events & EV_WRITE)) |
926 | 0 | change->write_change = 0; |
927 | 0 | else |
928 | 0 | change->write_change = EV_CHANGE_DEL; |
929 | 0 | } |
930 | 0 | if (events & EV_CLOSED) { |
931 | 0 | if (!(change->old_events & EV_CLOSED)) |
932 | 0 | change->close_change = 0; |
933 | 0 | else |
934 | 0 | change->close_change = EV_CHANGE_DEL; |
935 | 0 | } |
936 | 0 |
|
937 | 0 | event_changelist_check(base); |
938 | 0 | return (0); |
939 | 0 | } |
940 | | |
941 | | /* Helper for evmap_check_integrity_: verify that all of the events pending on |
942 | | * given fd are set up correctly, and that the nread and nwrite counts on that |
943 | | * fd are correct. */ |
944 | | static int |
945 | | evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd, |
946 | | struct evmap_io *io_info, void *arg) |
947 | 0 | { |
948 | 0 | struct event *ev; |
949 | 0 | int n_read = 0, n_write = 0, n_close = 0; |
950 | 0 |
|
951 | 0 | /* First, make sure the list itself isn't corrupt. Otherwise, |
952 | 0 | * running LIST_FOREACH could be an exciting adventure. */ |
953 | 0 | EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next); |
954 | 0 |
|
955 | 0 | LIST_FOREACH(ev, &io_info->events, ev_io_next) { |
956 | 0 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); |
957 | 0 | EVUTIL_ASSERT(ev->ev_fd == fd); |
958 | 0 | EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL)); |
959 | 0 | EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); |
960 | 0 | if (ev->ev_events & EV_READ) |
961 | 0 | ++n_read; |
962 | 0 | if (ev->ev_events & EV_WRITE) |
963 | 0 | ++n_write; |
964 | 0 | if (ev->ev_events & EV_CLOSED) |
965 | 0 | ++n_close; |
966 | 0 | } |
967 | 0 |
|
968 | 0 | EVUTIL_ASSERT(n_read == io_info->nread); |
969 | 0 | EVUTIL_ASSERT(n_write == io_info->nwrite); |
970 | 0 | EVUTIL_ASSERT(n_close == io_info->nclose); |
971 | 0 |
|
972 | 0 | return 0; |
973 | 0 | } |
974 | | |
975 | | /* Helper for evmap_check_integrity_: verify that all of the events pending |
976 | | * on given signal are set up correctly. */ |
977 | | static int |
978 | | evmap_signal_check_integrity_fn(struct event_base *base, |
979 | | int signum, struct evmap_signal *sig_info, void *arg) |
980 | 0 | { |
981 | 0 | struct event *ev; |
982 | 0 | /* First, make sure the list itself isn't corrupt. */ |
983 | 0 | EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next); |
984 | 0 |
|
985 | 0 | LIST_FOREACH(ev, &sig_info->events, ev_io_next) { |
986 | 0 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); |
987 | 0 | EVUTIL_ASSERT(ev->ev_fd == signum); |
988 | 0 | EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL)); |
989 | 0 | EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); |
990 | 0 | } |
991 | 0 | return 0; |
992 | 0 | } |
993 | | |
994 | | void |
995 | | evmap_check_integrity_(struct event_base *base) |
996 | 0 | { |
997 | 0 | evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL); |
998 | 0 | evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL); |
999 | 0 |
|
1000 | 0 | if (base->evsel->add == event_changelist_add_) |
1001 | 0 | event_changelist_assert_ok(base); |
1002 | 0 | } |
1003 | | |
1004 | | /* Helper type for evmap_foreach_event_: Bundles a function to call on every |
1005 | | * event, and the user-provided void* to use as its third argument. */ |
1006 | | struct evmap_foreach_event_helper { |
1007 | | event_base_foreach_event_cb fn; |
1008 | | void *arg; |
1009 | | }; |
1010 | | |
1011 | | /* Helper for evmap_foreach_event_: calls a provided function on every event |
1012 | | * pending on a given fd. */ |
1013 | | static int |
1014 | | evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd, |
1015 | | struct evmap_io *io_info, void *arg) |
1016 | 0 | { |
1017 | 0 | struct evmap_foreach_event_helper *h = arg; |
1018 | 0 | struct event *ev; |
1019 | 0 | int r; |
1020 | 0 | LIST_FOREACH(ev, &io_info->events, ev_io_next) { |
1021 | 0 | if ((r = h->fn(base, ev, h->arg))) |
1022 | 0 | return r; |
1023 | 0 | } |
1024 | 0 | return 0; |
1025 | 0 | } |
1026 | | |
1027 | | /* Helper for evmap_foreach_event_: calls a provided function on every event |
1028 | | * pending on a given signal. */ |
1029 | | static int |
1030 | | evmap_signal_foreach_event_fn(struct event_base *base, int signum, |
1031 | | struct evmap_signal *sig_info, void *arg) |
1032 | 0 | { |
1033 | 0 | struct event *ev; |
1034 | 0 | struct evmap_foreach_event_helper *h = arg; |
1035 | 0 | int r; |
1036 | 0 | LIST_FOREACH(ev, &sig_info->events, ev_signal_next) { |
1037 | 0 | if ((r = h->fn(base, ev, h->arg))) |
1038 | 0 | return r; |
1039 | 0 | } |
1040 | 0 | return 0; |
1041 | 0 | } |
1042 | | |
1043 | | int |
1044 | | evmap_foreach_event_(struct event_base *base, |
1045 | | event_base_foreach_event_cb fn, void *arg) |
1046 | 0 | { |
1047 | 0 | struct evmap_foreach_event_helper h; |
1048 | 0 | int r; |
1049 | 0 | h.fn = fn; |
1050 | 0 | h.arg = arg; |
1051 | 0 | if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h))) |
1052 | 0 | return r; |
1053 | 0 | return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h); |
1054 | 0 | } |
1055 | | |