Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson  | 
3  |  |  *  | 
4  |  |  * Redistribution and use in source and binary forms, with or without  | 
5  |  |  * modification, are permitted provided that the following conditions  | 
6  |  |  * are met:  | 
7  |  |  * 1. Redistributions of source code must retain the above copyright  | 
8  |  |  *    notice, this list of conditions and the following disclaimer.  | 
9  |  |  * 2. Redistributions in binary form must reproduce the above copyright  | 
10  |  |  *    notice, this list of conditions and the following disclaimer in the  | 
11  |  |  *    documentation and/or other materials provided with the distribution.  | 
12  |  |  * 3. The name of the author may not be used to endorse or promote products  | 
13  |  |  *    derived from this software without specific prior written permission.  | 
14  |  |  *  | 
15  |  |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR  | 
16  |  |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES  | 
17  |  |  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  | 
18  |  |  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,  | 
19  |  |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT  | 
20  |  |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,  | 
21  |  |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY  | 
22  |  |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT  | 
23  |  |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF  | 
24  |  |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  | 
25  |  |  */  | 
26  |  | #include "event2/event-config.h"  | 
27  |  | #include "evconfig-private.h"  | 
28  |  |  | 
29  |  | #ifdef _WIN32  | 
30  |  | #include <winsock2.h>  | 
31  |  | #define WIN32_LEAN_AND_MEAN  | 
32  |  | #include <windows.h>  | 
33  |  | #undef WIN32_LEAN_AND_MEAN  | 
34  |  | #endif  | 
35  |  | #include <sys/types.h>  | 
36  |  | #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)  | 
37  |  | #include <sys/time.h>  | 
38  |  | #endif  | 
39  |  | #include <sys/queue.h>  | 
40  |  | #include <stdio.h>  | 
41  |  | #include <stdlib.h>  | 
42  |  | #ifndef _WIN32  | 
43  |  | #include <unistd.h>  | 
44  |  | #endif  | 
45  |  | #include <errno.h>  | 
46  |  | #include <limits.h>  | 
47  |  | #include <signal.h>  | 
48  |  | #include <string.h>  | 
49  |  | #include <time.h>  | 
50  |  |  | 
51  |  | #include "event-internal.h"  | 
52  |  | #include "evmap-internal.h"  | 
53  |  | #include "mm-internal.h"  | 
54  |  | #include "changelist-internal.h"  | 
55  |  |  | 
56  |  | /** An entry for an evmap_io list: notes all the events that want to read or  | 
57  |  |   write on a given fd, and the number of each.  | 
58  |  |   */  | 
59  |  | struct evmap_io { | 
60  |  |   struct event_dlist events;  | 
61  |  |   ev_uint16_t nread;  | 
62  |  |   ev_uint16_t nwrite;  | 
63  |  |   ev_uint16_t nclose;  | 
64  |  | };  | 
65  |  |  | 
66  |  | /* An entry for an evmap_signal list: notes all the events that want to know  | 
67  |  |    when a signal triggers. */  | 
68  |  | struct evmap_signal { | 
69  |  |   struct event_dlist events;  | 
70  |  | };  | 
71  |  |  | 
72  |  | /* On some platforms, fds start at 0 and increment by 1 as they are  | 
73  |  |    allocated, and old numbers get used.  For these platforms, we  | 
74  |  |    implement io maps just like signal maps: as an array of pointers to  | 
75  |  |    struct evmap_io.  But on other platforms (windows), sockets are not  | 
76  |  |    0-indexed, not necessarily consecutive, and not necessarily reused.  | 
77  |  |    There, we use a hashtable to implement evmap_io.  | 
78  |  | */  | 
79  |  | #ifdef EVMAP_USE_HT  | 
80  |  | struct event_map_entry { | 
81  |  |   HT_ENTRY(event_map_entry) map_node;  | 
82  |  |   evutil_socket_t fd;  | 
83  |  |   union { /* This is a union in case we need to make more things that can | 
84  |  |          be in the hashtable. */  | 
85  |  |     struct evmap_io evmap_io;  | 
86  |  |   } ent;  | 
87  |  | };  | 
88  |  |  | 
89  |  | /* Helper used by the event_io_map hashtable code; tries to return a good hash  | 
90  |  |  * of the fd in e->fd. */  | 
91  |  | static inline unsigned  | 
92  |  | hashsocket(struct event_map_entry *e)  | 
93  |  | { | 
94  |  |   /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to  | 
95  |  |    * matter.  Our hashtable implementation really likes low-order bits,  | 
96  |  |    * though, so let's do the rotate-and-add trick. */  | 
97  |  |   unsigned h = (unsigned) e->fd;  | 
98  |  |   h += (h >> 2) | (h << 30);  | 
99  |  |   return h;  | 
100  |  | }  | 
101  |  |  | 
102  |  | /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2  | 
103  |  |  * have the same e->fd. */  | 
104  |  | static inline int  | 
105  |  | eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)  | 
106  |  | { | 
107  |  |   return e1->fd == e2->fd;  | 
108  |  | }  | 
109  |  |  | 
110  |  | HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)  | 
111  |  | HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,  | 
112  |  |       0.5, mm_malloc, mm_realloc, mm_free)  | 
113  |  |  | 
114  |  | #define GET_IO_SLOT(x, map, slot, type)         \  | 
115  |  |   do {                \ | 
116  |  |     struct event_map_entry key_, *ent_;     \  | 
117  |  |     key_.fd = slot;           \  | 
118  |  |     ent_ = HT_FIND(event_io_map, map, &key_);   \  | 
119  |  |     (x) = ent_ ? &ent_->ent.type : NULL;      \  | 
120  |  |   } while (0);  | 
121  |  |  | 
122  |  | #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)  \  | 
123  |  |   do {                \ | 
124  |  |     struct event_map_entry key_, *ent_;     \  | 
125  |  |     key_.fd = slot;           \  | 
126  |  |     HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \  | 
127  |  |         event_map_entry, &key_, ptr,      \  | 
128  |  |         {             \ | 
129  |  |           ent_ = *ptr;        \  | 
130  |  |         },              \  | 
131  |  |         {             \ | 
132  |  |           ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \  | 
133  |  |           if (EVUTIL_UNLIKELY(ent_ == NULL))    \  | 
134  |  |             return (-1);      \  | 
135  |  |           ent_->fd = slot;        \  | 
136  |  |           (ctor)(&ent_->ent.type);      \  | 
137  |  |           HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \  | 
138  |  |         });         \  | 
139  |  |     (x) = &ent_->ent.type;          \  | 
140  |  |   } while (0)  | 
141  |  |  | 
142  |  | void evmap_io_initmap_(struct event_io_map *ctx)  | 
143  |  | { | 
144  |  |   HT_INIT(event_io_map, ctx);  | 
145  |  | }  | 
146  |  |  | 
147  |  | void evmap_io_clear_(struct event_io_map *ctx)  | 
148  |  | { | 
149  |  |   struct event_map_entry **ent, **next, *this;  | 
150  |  |   for (ent = HT_START(event_io_map, ctx); ent; ent = next) { | 
151  |  |     this = *ent;  | 
152  |  |     next = HT_NEXT_RMV(event_io_map, ctx, ent);  | 
153  |  |     mm_free(this);  | 
154  |  |   }  | 
155  |  |   HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */  | 
156  |  | }  | 
157  |  | #endif  | 
158  |  |  | 
159  |  | /* Set the variable 'x' to the field in event_map 'map' with fields of type  | 
160  |  |    'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL  | 
161  |  |    if there are no entries for 'slot'.  Does no bounds-checking. */  | 
162  |  | #define GET_SIGNAL_SLOT(x, map, slot, type)     \  | 
163  | 0  |   (x) = (struct type *)((map)->entries[slot])  | 
164  |  | /* As GET_SLOT, but construct the entry for 'slot' if it is not present,  | 
165  |  |    by allocating enough memory for a 'struct type', and initializing the new  | 
166  |  |    value by calling the function 'ctor' on it.  Makes the function  | 
167  |  |    return -1 on allocation failure.  | 
168  |  |  */  | 
169  |  | #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)  \  | 
170  | 0  |   do {               \ | 
171  | 0  |     if ((map)->entries[slot] == NULL) {     \ | 
172  | 0  |       (map)->entries[slot] =        \  | 
173  | 0  |           mm_calloc(1,sizeof(struct type)+fdinfo_len); \  | 
174  | 0  |       if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \  | 
175  | 0  |         return (-1);       \  | 
176  | 0  |       (ctor)((struct type *)(map)->entries[slot]);  \  | 
177  | 0  |     }              \  | 
178  | 0  |     (x) = (struct type *)((map)->entries[slot]);    \  | 
179  | 0  |   } while (0)  | 
180  |  |  | 
181  |  | /* If we aren't using hashtables, then define the IO_SLOT macros and functions  | 
182  |  |    as thin aliases over the SIGNAL_SLOT versions. */  | 
183  |  | #ifndef EVMAP_USE_HT  | 
184  | 0  | #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)  | 
185  |  | #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \  | 
186  | 0  |   GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)  | 
187  |  | #define FDINFO_OFFSET sizeof(struct evmap_io)  | 
188  |  | void  | 
189  |  | evmap_io_initmap_(struct event_io_map* ctx)  | 
190  | 0  | { | 
191  | 0  |   evmap_signal_initmap_(ctx);  | 
192  | 0  | }  | 
193  |  | void  | 
194  |  | evmap_io_clear_(struct event_io_map* ctx)  | 
195  | 0  | { | 
196  | 0  |   evmap_signal_clear_(ctx);  | 
197  | 0  | }  | 
198  |  | #endif  | 
199  |  |  | 
200  |  |  | 
201  |  | /** Expand 'map' with new entries of width 'msize' until it is big enough  | 
202  |  |   to store a value in 'slot'.  | 
203  |  |  */  | 
204  |  | static int  | 
205  |  | evmap_make_space(struct event_signal_map *map, int slot, int msize)  | 
206  | 0  | { | 
207  | 0  |   if (map->nentries <= slot) { | 
208  | 0  |     int nentries = map->nentries ? map->nentries : 32;  | 
209  | 0  |     void **tmp;  | 
210  |  | 
  | 
211  | 0  |     if (slot > INT_MAX / 2)  | 
212  | 0  |       return (-1);  | 
213  |  |  | 
214  | 0  |     while (nentries <= slot)  | 
215  | 0  |       nentries <<= 1;  | 
216  |  | 
  | 
217  | 0  |     if (nentries > INT_MAX / msize)  | 
218  | 0  |       return (-1);  | 
219  |  |  | 
220  | 0  |     tmp = (void **)mm_realloc(map->entries, nentries * msize);  | 
221  | 0  |     if (tmp == NULL)  | 
222  | 0  |       return (-1);  | 
223  |  |  | 
224  | 0  |     memset(&tmp[map->nentries], 0,  | 
225  | 0  |         (nentries - map->nentries) * msize);  | 
226  |  | 
  | 
227  | 0  |     map->nentries = nentries;  | 
228  | 0  |     map->entries = tmp;  | 
229  | 0  |   }  | 
230  |  |  | 
231  | 0  |   return (0);  | 
232  | 0  | }  | 
233  |  |  | 
234  |  | void  | 
235  |  | evmap_signal_initmap_(struct event_signal_map *ctx)  | 
236  | 0  | { | 
237  | 0  |   ctx->nentries = 0;  | 
238  | 0  |   ctx->entries = NULL;  | 
239  | 0  | }  | 
240  |  |  | 
241  |  | void  | 
242  |  | evmap_signal_clear_(struct event_signal_map *ctx)  | 
243  | 0  | { | 
244  | 0  |   if (ctx->entries != NULL) { | 
245  | 0  |     int i;  | 
246  | 0  |     for (i = 0; i < ctx->nentries; ++i) { | 
247  | 0  |       if (ctx->entries[i] != NULL)  | 
248  | 0  |         mm_free(ctx->entries[i]);  | 
249  | 0  |     }  | 
250  | 0  |     mm_free(ctx->entries);  | 
251  | 0  |     ctx->entries = NULL;  | 
252  | 0  |   }  | 
253  | 0  |   ctx->nentries = 0;  | 
254  | 0  | }  | 
255  |  |  | 
256  |  |  | 
257  |  | /* code specific to file descriptors */  | 
258  |  |  | 
259  |  | /** Constructor for struct evmap_io */  | 
260  |  | static void  | 
261  |  | evmap_io_init(struct evmap_io *entry)  | 
262  | 0  | { | 
263  | 0  |   LIST_INIT(&entry->events);  | 
264  | 0  |   entry->nread = 0;  | 
265  | 0  |   entry->nwrite = 0;  | 
266  | 0  |   entry->nclose = 0;  | 
267  | 0  | }  | 
268  |  |  | 
269  |  |  | 
270  |  | /* return -1 on error, 0 on success if nothing changed in the event backend,  | 
271  |  |  * and 1 on success if something did. */  | 
272  |  | int  | 
273  |  | evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)  | 
274  | 0  | { | 
275  | 0  |   const struct eventop *evsel = base->evsel;  | 
276  | 0  |   struct event_io_map *io = &base->io;  | 
277  | 0  |   struct evmap_io *ctx = NULL;  | 
278  | 0  |   int nread, nwrite, nclose, retval = 0;  | 
279  | 0  |   short res = 0, old = 0;  | 
280  | 0  |   struct event *old_ev;  | 
281  |  | 
  | 
282  | 0  |   EVUTIL_ASSERT(fd == ev->ev_fd);  | 
283  |  | 
  | 
284  | 0  |   if (fd < 0)  | 
285  | 0  |     return 0;  | 
286  |  |  | 
287  | 0  | #ifndef EVMAP_USE_HT  | 
288  | 0  |   if (fd >= io->nentries) { | 
289  | 0  |     if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)  | 
290  | 0  |       return (-1);  | 
291  | 0  |   }  | 
292  | 0  | #endif  | 
293  | 0  |   GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,  | 
294  | 0  |              evsel->fdinfo_len);  | 
295  |  |  | 
296  | 0  |   nread = ctx->nread;  | 
297  | 0  |   nwrite = ctx->nwrite;  | 
298  | 0  |   nclose = ctx->nclose;  | 
299  |  | 
  | 
300  | 0  |   if (nread)  | 
301  | 0  |     old |= EV_READ;  | 
302  | 0  |   if (nwrite)  | 
303  | 0  |     old |= EV_WRITE;  | 
304  | 0  |   if (nclose)  | 
305  | 0  |     old |= EV_CLOSED;  | 
306  |  | 
  | 
307  | 0  |   if (ev->ev_events & EV_READ) { | 
308  | 0  |     if (++nread == 1)  | 
309  | 0  |       res |= EV_READ;  | 
310  | 0  |   }  | 
311  | 0  |   if (ev->ev_events & EV_WRITE) { | 
312  | 0  |     if (++nwrite == 1)  | 
313  | 0  |       res |= EV_WRITE;  | 
314  | 0  |   }  | 
315  | 0  |   if (ev->ev_events & EV_CLOSED) { | 
316  | 0  |     if (++nclose == 1)  | 
317  | 0  |       res |= EV_CLOSED;  | 
318  | 0  |   }  | 
319  | 0  |   if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) { | 
320  | 0  |     event_warnx("Too many events reading or writing on fd %d", | 
321  | 0  |         (int)fd);  | 
322  | 0  |     return -1;  | 
323  | 0  |   }  | 
324  | 0  |   if (EVENT_DEBUG_MODE_IS_ON() &&  | 
325  | 0  |       (old_ev = LIST_FIRST(&ctx->events)) &&  | 
326  | 0  |       (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { | 
327  | 0  |     event_warnx("Tried to mix edge-triggered and non-edge-triggered" | 
328  | 0  |         " events on fd %d", (int)fd);  | 
329  | 0  |     return -1;  | 
330  | 0  |   }  | 
331  |  |  | 
332  | 0  |   if (res) { | 
333  | 0  |     void *extra = ((char*)ctx) + sizeof(struct evmap_io);  | 
334  |  |     /* XXX(niels): we cannot mix edge-triggered and  | 
335  |  |      * level-triggered, we should probably assert on  | 
336  |  |      * this. */  | 
337  | 0  |     if (evsel->add(base, ev->ev_fd,  | 
338  | 0  |       old, (ev->ev_events & EV_ET) | res, extra) == -1)  | 
339  | 0  |       return (-1);  | 
340  | 0  |     retval = 1;  | 
341  | 0  |   }  | 
342  |  |  | 
343  | 0  |   ctx->nread = (ev_uint16_t) nread;  | 
344  | 0  |   ctx->nwrite = (ev_uint16_t) nwrite;  | 
345  | 0  |   ctx->nclose = (ev_uint16_t) nclose;  | 
346  | 0  |   LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);  | 
347  |  | 
  | 
348  | 0  |   return (retval);  | 
349  | 0  | }  | 
350  |  |  | 
351  |  | /* return -1 on error, 0 on success if nothing changed in the event backend,  | 
352  |  |  * and 1 on success if something did. */  | 
353  |  | int  | 
354  |  | evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)  | 
355  | 0  | { | 
356  | 0  |   const struct eventop *evsel = base->evsel;  | 
357  | 0  |   struct event_io_map *io = &base->io;  | 
358  | 0  |   struct evmap_io *ctx;  | 
359  | 0  |   int nread, nwrite, nclose, retval = 0;  | 
360  | 0  |   short res = 0, old = 0;  | 
361  |  | 
  | 
362  | 0  |   if (fd < 0)  | 
363  | 0  |     return 0;  | 
364  |  |  | 
365  | 0  |   EVUTIL_ASSERT(fd == ev->ev_fd);  | 
366  |  | 
  | 
367  | 0  | #ifndef EVMAP_USE_HT  | 
368  | 0  |   if (fd >= io->nentries)  | 
369  | 0  |     return (-1);  | 
370  | 0  | #endif  | 
371  |  |  | 
372  | 0  |   GET_IO_SLOT(ctx, io, fd, evmap_io);  | 
373  |  | 
  | 
374  | 0  |   nread = ctx->nread;  | 
375  | 0  |   nwrite = ctx->nwrite;  | 
376  | 0  |   nclose = ctx->nclose;  | 
377  |  | 
  | 
378  | 0  |   if (nread)  | 
379  | 0  |     old |= EV_READ;  | 
380  | 0  |   if (nwrite)  | 
381  | 0  |     old |= EV_WRITE;  | 
382  | 0  |   if (nclose)  | 
383  | 0  |     old |= EV_CLOSED;  | 
384  |  | 
  | 
385  | 0  |   if (ev->ev_events & EV_READ) { | 
386  | 0  |     if (--nread == 0)  | 
387  | 0  |       res |= EV_READ;  | 
388  | 0  |     EVUTIL_ASSERT(nread >= 0);  | 
389  | 0  |   }  | 
390  | 0  |   if (ev->ev_events & EV_WRITE) { | 
391  | 0  |     if (--nwrite == 0)  | 
392  | 0  |       res |= EV_WRITE;  | 
393  | 0  |     EVUTIL_ASSERT(nwrite >= 0);  | 
394  | 0  |   }  | 
395  | 0  |   if (ev->ev_events & EV_CLOSED) { | 
396  | 0  |     if (--nclose == 0)  | 
397  | 0  |       res |= EV_CLOSED;  | 
398  | 0  |     EVUTIL_ASSERT(nclose >= 0);  | 
399  | 0  |   }  | 
400  |  | 
  | 
401  | 0  |   if (res) { | 
402  | 0  |     void *extra = ((char*)ctx) + sizeof(struct evmap_io);  | 
403  | 0  |     if (evsel->del(base, ev->ev_fd,  | 
404  | 0  |       old, (ev->ev_events & EV_ET) | res, extra) == -1) { | 
405  | 0  |       retval = -1;  | 
406  | 0  |     } else { | 
407  | 0  |       retval = 1;  | 
408  | 0  |     }  | 
409  | 0  |   }  | 
410  |  | 
  | 
411  | 0  |   ctx->nread = nread;  | 
412  | 0  |   ctx->nwrite = nwrite;  | 
413  | 0  |   ctx->nclose = nclose;  | 
414  | 0  |   LIST_REMOVE(ev, ev_io_next);  | 
415  |  | 
  | 
416  | 0  |   return (retval);  | 
417  | 0  | }  | 
418  |  |  | 
419  |  | void  | 
420  |  | evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)  | 
421  | 0  | { | 
422  | 0  |   struct event_io_map *io = &base->io;  | 
423  | 0  |   struct evmap_io *ctx;  | 
424  | 0  |   struct event *ev;  | 
425  |  | 
  | 
426  | 0  | #ifndef EVMAP_USE_HT  | 
427  | 0  |   if (fd < 0 || fd >= io->nentries)  | 
428  | 0  |     return;  | 
429  | 0  | #endif  | 
430  | 0  |   GET_IO_SLOT(ctx, io, fd, evmap_io);  | 
431  |  | 
  | 
432  | 0  |   if (NULL == ctx)  | 
433  | 0  |     return;  | 
434  | 0  |   LIST_FOREACH(ev, &ctx->events, ev_io_next) { | 
435  | 0  |     if (ev->ev_events & (events & ~EV_ET))  | 
436  | 0  |       event_active_nolock_(ev, ev->ev_events & events, 1);  | 
437  | 0  |   }  | 
438  | 0  | }  | 
439  |  |  | 
440  |  | /* code specific to signals */  | 
441  |  |  | 
442  |  | static void  | 
443  |  | evmap_signal_init(struct evmap_signal *entry)  | 
444  | 0  | { | 
445  | 0  |   LIST_INIT(&entry->events);  | 
446  | 0  | }  | 
447  |  |  | 
448  |  |  | 
449  |  | int  | 
450  |  | evmap_signal_add_(struct event_base *base, int sig, struct event *ev)  | 
451  | 0  | { | 
452  | 0  |   const struct eventop *evsel = base->evsigsel;  | 
453  | 0  |   struct event_signal_map *map = &base->sigmap;  | 
454  | 0  |   struct evmap_signal *ctx = NULL;  | 
455  |  | 
  | 
456  | 0  |   if (sig < 0 || sig >= NSIG)  | 
457  | 0  |     return (-1);  | 
458  |  |  | 
459  | 0  |   if (sig >= map->nentries) { | 
460  | 0  |     if (evmap_make_space(  | 
461  | 0  |       map, sig, sizeof(struct evmap_signal *)) == -1)  | 
462  | 0  |       return (-1);  | 
463  | 0  |   }  | 
464  | 0  |   GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,  | 
465  | 0  |       base->evsigsel->fdinfo_len);  | 
466  |  |  | 
467  | 0  |   if (LIST_EMPTY(&ctx->events)) { | 
468  | 0  |     if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, ev)  | 
469  | 0  |         == -1)  | 
470  | 0  |       return (-1);  | 
471  | 0  |   }  | 
472  |  |  | 
473  | 0  |   LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);  | 
474  |  | 
  | 
475  | 0  |   return (1);  | 
476  | 0  | }  | 
477  |  |  | 
478  |  | int  | 
479  |  | evmap_signal_del_(struct event_base *base, int sig, struct event *ev)  | 
480  | 0  | { | 
481  | 0  |   const struct eventop *evsel = base->evsigsel;  | 
482  | 0  |   struct event_signal_map *map = &base->sigmap;  | 
483  | 0  |   struct evmap_signal *ctx;  | 
484  |  | 
  | 
485  | 0  |   if (sig < 0 || sig >= map->nentries)  | 
486  | 0  |     return (-1);  | 
487  |  |  | 
488  | 0  |   GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);  | 
489  |  | 
  | 
490  | 0  |   LIST_REMOVE(ev, ev_signal_next);  | 
491  |  | 
  | 
492  | 0  |   if (LIST_FIRST(&ctx->events) == NULL) { | 
493  | 0  |     if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)  | 
494  | 0  |       return (-1);  | 
495  | 0  |   }  | 
496  |  |  | 
497  | 0  |   return (1);  | 
498  | 0  | }  | 
499  |  |  | 
500  |  | void  | 
501  |  | evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)  | 
502  | 0  | { | 
503  | 0  |   struct event_signal_map *map = &base->sigmap;  | 
504  | 0  |   struct evmap_signal *ctx;  | 
505  | 0  |   struct event *ev;  | 
506  |  | 
  | 
507  | 0  |   if (sig < 0 || sig >= map->nentries)  | 
508  | 0  |     return;  | 
509  | 0  |   GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);  | 
510  |  | 
  | 
511  | 0  |   if (!ctx)  | 
512  | 0  |     return;  | 
513  | 0  |   LIST_FOREACH(ev, &ctx->events, ev_signal_next)  | 
514  | 0  |     event_active_nolock_(ev, EV_SIGNAL, ncalls);  | 
515  | 0  | }  | 
516  |  |  | 
517  |  | void *  | 
518  |  | evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)  | 
519  | 0  | { | 
520  | 0  |   struct evmap_io *ctx;  | 
521  | 0  |   GET_IO_SLOT(ctx, map, fd, evmap_io);  | 
522  | 0  |   if (ctx)  | 
523  | 0  |     return ((char*)ctx) + sizeof(struct evmap_io);  | 
524  | 0  |   else  | 
525  | 0  |     return NULL;  | 
526  | 0  | }  | 
527  |  |  | 
528  |  | /* Callback type for evmap_io_foreach_fd */  | 
529  |  | typedef int (*evmap_io_foreach_fd_cb)(  | 
530  |  |   struct event_base *, evutil_socket_t, struct evmap_io *, void *);  | 
531  |  |  | 
532  |  | /* Multipurpose helper function: Iterate over every file descriptor event_base  | 
533  |  |  * for which we could have EV_READ or EV_WRITE events.  For each such fd, call  | 
534  |  |  * fn(base, signum, evmap_io, arg), where fn is the user-provided  | 
535  |  |  * function, base is the event_base, signum is the signal number, evmap_io  | 
536  |  |  * is an evmap_io structure containing a list of events pending on the  | 
537  |  |  * file descriptor, and arg is the user-supplied argument.  | 
538  |  |  *  | 
539  |  |  * If fn returns 0, continue on to the next signal. Otherwise, return the same  | 
540  |  |  * value that fn returned.  | 
541  |  |  *  | 
542  |  |  * Note that there is no guarantee that the file descriptors will be processed  | 
543  |  |  * in any particular order.  | 
544  |  |  */  | 
545  |  | static int  | 
546  |  | evmap_io_foreach_fd(struct event_base *base,  | 
547  |  |     evmap_io_foreach_fd_cb fn,  | 
548  |  |     void *arg)  | 
549  | 0  | { | 
550  | 0  |   evutil_socket_t fd;  | 
551  | 0  |   struct event_io_map *iomap = &base->io;  | 
552  | 0  |   int r = 0;  | 
553  |  | #ifdef EVMAP_USE_HT  | 
554  |  |   struct event_map_entry **mapent;  | 
555  |  |   HT_FOREACH(mapent, event_io_map, iomap) { | 
556  |  |     struct evmap_io *ctx = &(*mapent)->ent.evmap_io;  | 
557  |  |     fd = (*mapent)->fd;  | 
558  |  | #else  | 
559  | 0  |   for (fd = 0; fd < iomap->nentries; ++fd) { | 
560  | 0  |     struct evmap_io *ctx = iomap->entries[fd];  | 
561  | 0  |     if (!ctx)  | 
562  | 0  |       continue;  | 
563  | 0  | #endif  | 
564  | 0  |     if ((r = fn(base, fd, ctx, arg)))  | 
565  | 0  |       break;  | 
566  | 0  |   }  | 
567  | 0  |   return r;  | 
568  | 0  | }  | 
569  |  |  | 
570  |  | /* Callback type for evmap_signal_foreach_signal */  | 
571  |  | typedef int (*evmap_signal_foreach_signal_cb)(  | 
572  |  |   struct event_base *, int, struct evmap_signal *, void *);  | 
573  |  |  | 
574  |  | /* Multipurpose helper function: Iterate over every signal number in the  | 
575  |  |  * event_base for which we could have signal events.  For each such signal,  | 
576  |  |  * call fn(base, signum, evmap_signal, arg), where fn is the user-provided  | 
577  |  |  * function, base is the event_base, signum is the signal number, evmap_signal  | 
578  |  |  * is an evmap_signal structure containing a list of events pending on the  | 
579  |  |  * signal, and arg is the user-supplied argument.  | 
580  |  |  *  | 
581  |  |  * If fn returns 0, continue on to the next signal. Otherwise, return the same  | 
582  |  |  * value that fn returned.  | 
583  |  |  */  | 
584  |  | static int  | 
585  |  | evmap_signal_foreach_signal(struct event_base *base,  | 
586  |  |     evmap_signal_foreach_signal_cb fn,  | 
587  |  |     void *arg)  | 
588  | 0  | { | 
589  | 0  |   struct event_signal_map *sigmap = &base->sigmap;  | 
590  | 0  |   int r = 0;  | 
591  | 0  |   int signum;  | 
592  |  | 
  | 
593  | 0  |   for (signum = 0; signum < sigmap->nentries; ++signum) { | 
594  | 0  |     struct evmap_signal *ctx = sigmap->entries[signum];  | 
595  | 0  |     if (!ctx)  | 
596  | 0  |       continue;  | 
597  | 0  |     if ((r = fn(base, signum, ctx, arg)))  | 
598  | 0  |       break;  | 
599  | 0  |   }  | 
600  | 0  |   return r;  | 
601  | 0  | }  | 
602  |  |  | 
603  |  | /* Helper for evmap_reinit_: tell the backend to add every fd for which we have  | 
604  |  |  * pending events, with the appropriate combination of EV_READ, EV_WRITE, and  | 
605  |  |  * EV_ET. */  | 
606  |  | static int  | 
607  |  | evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,  | 
608  |  |     struct evmap_io *ctx, void *arg)  | 
609  | 0  | { | 
610  | 0  |   const struct eventop *evsel = base->evsel;  | 
611  | 0  |   void *extra;  | 
612  | 0  |   int *result = arg;  | 
613  | 0  |   short events = 0;  | 
614  | 0  |   struct event *ev;  | 
615  | 0  |   EVUTIL_ASSERT(ctx);  | 
616  |  | 
  | 
617  | 0  |   extra = ((char*)ctx) + sizeof(struct evmap_io);  | 
618  | 0  |   if (ctx->nread)  | 
619  | 0  |     events |= EV_READ;  | 
620  | 0  |   if (ctx->nwrite)  | 
621  | 0  |     events |= EV_WRITE;  | 
622  | 0  |   if (ctx->nclose)  | 
623  | 0  |     events |= EV_CLOSED;  | 
624  | 0  |   if (evsel->fdinfo_len)  | 
625  | 0  |     memset(extra, 0, evsel->fdinfo_len);  | 
626  | 0  |   if (events &&  | 
627  | 0  |       (ev = LIST_FIRST(&ctx->events)) &&  | 
628  | 0  |       (ev->ev_events & EV_ET))  | 
629  | 0  |     events |= EV_ET;  | 
630  | 0  |   if (evsel->add(base, fd, 0, events, extra) == -1)  | 
631  | 0  |     *result = -1;  | 
632  |  | 
  | 
633  | 0  |   return 0;  | 
634  | 0  | }  | 
635  |  |  | 
636  |  | /* Helper for evmap_reinit_: tell the backend to add every signal for which we  | 
637  |  |  * have pending events.  */  | 
638  |  | static int  | 
639  |  | evmap_signal_reinit_iter_fn(struct event_base *base,  | 
640  |  |     int signum, struct evmap_signal *ctx, void *arg)  | 
641  | 0  | { | 
642  | 0  |   const struct eventop *evsel = base->evsigsel;  | 
643  | 0  |   int *result = arg;  | 
644  |  | 
  | 
645  | 0  |   if (!LIST_EMPTY(&ctx->events)) { | 
646  | 0  |     if (evsel->add(base, signum, 1, EV_SIGNAL,  | 
647  | 0  |              LIST_FIRST(&ctx->events)) == -1)  | 
648  | 0  |       *result = -1;  | 
649  | 0  |   }  | 
650  | 0  |   return 0;  | 
651  | 0  | }  | 
652  |  |  | 
653  |  | int  | 
654  |  | evmap_reinit_(struct event_base *base)  | 
655  | 0  | { | 
656  | 0  |   int result = 0;  | 
657  |  | 
  | 
658  | 0  |   evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);  | 
659  | 0  |   if (result < 0)  | 
660  | 0  |     return -1;  | 
661  | 0  |   evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);  | 
662  | 0  |   if (result < 0)  | 
663  | 0  |     return -1;  | 
664  | 0  |   return 0;  | 
665  | 0  | }  | 
666  |  |  | 
667  |  | /* Helper for evmap_delete_all_: delete every event in an event_dlist. */  | 
668  |  | static int  | 
669  |  | delete_all_in_dlist(struct event_dlist *dlist)  | 
670  | 0  | { | 
671  | 0  |   struct event *ev;  | 
672  | 0  |   while ((ev = LIST_FIRST(dlist)))  | 
673  | 0  |     event_del(ev);  | 
674  | 0  |   return 0;  | 
675  | 0  | }  | 
676  |  |  | 
677  |  | /* Helper for evmap_delete_all_: delete every event pending on an fd. */  | 
678  |  | static int  | 
679  |  | evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,  | 
680  |  |     struct evmap_io *io_info, void *arg)  | 
681  | 0  | { | 
682  | 0  |   return delete_all_in_dlist(&io_info->events);  | 
683  | 0  | }  | 
684  |  |  | 
685  |  | /* Helper for evmap_delete_all_: delete every event pending on a signal. */  | 
686  |  | static int  | 
687  |  | evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,  | 
688  |  |     struct evmap_signal *sig_info, void *arg)  | 
689  | 0  | { | 
690  | 0  |   return delete_all_in_dlist(&sig_info->events);  | 
691  | 0  | }  | 
692  |  |  | 
693  |  | void  | 
694  |  | evmap_delete_all_(struct event_base *base)  | 
695  | 0  | { | 
696  | 0  |   evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);  | 
697  | 0  |   evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);  | 
698  | 0  | }  | 
699  |  |  | 
700  |  | /** Per-fd structure for use with changelists.  It keeps track, for each fd or  | 
701  |  |  * signal using the changelist, of where its entry in the changelist is.  | 
702  |  |  */  | 
703  |  | struct event_changelist_fdinfo { | 
704  |  |   int idxplus1; /* this is the index +1, so that memset(0) will make it  | 
705  |  |            * a no-such-element */  | 
706  |  | };  | 
707  |  |  | 
708  |  | void  | 
709  |  | event_changelist_init_(struct event_changelist *changelist)  | 
710  | 0  | { | 
711  | 0  |   changelist->changes = NULL;  | 
712  | 0  |   changelist->changes_size = 0;  | 
713  | 0  |   changelist->n_changes = 0;  | 
714  | 0  | }  | 
715  |  |  | 
716  |  | /** Helper: return the changelist_fdinfo corresponding to a given change. */  | 
717  |  | static inline struct event_changelist_fdinfo *  | 
718  |  | event_change_get_fdinfo(struct event_base *base,  | 
719  |  |     const struct event_change *change)  | 
720  | 0  | { | 
721  | 0  |   char *ptr;  | 
722  | 0  |   if (change->read_change & EV_CHANGE_SIGNAL) { | 
723  | 0  |     struct evmap_signal *ctx;  | 
724  | 0  |     GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);  | 
725  | 0  |     ptr = ((char*)ctx) + sizeof(struct evmap_signal);  | 
726  | 0  |   } else { | 
727  | 0  |     struct evmap_io *ctx;  | 
728  | 0  |     GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);  | 
729  | 0  |     ptr = ((char*)ctx) + sizeof(struct evmap_io);  | 
730  | 0  |   }  | 
731  | 0  |   return (void*)ptr;  | 
732  | 0  | }  | 
733  |  |  | 
734  |  | /** Callback helper for event_changelist_assert_ok */  | 
735  |  | static int  | 
736  |  | event_changelist_assert_ok_foreach_iter_fn(  | 
737  |  |   struct event_base *base,  | 
738  |  |   evutil_socket_t fd, struct evmap_io *io, void *arg)  | 
739  | 0  | { | 
740  | 0  |   struct event_changelist *changelist = &base->changelist;  | 
741  | 0  |   struct event_changelist_fdinfo *f;  | 
742  | 0  |   f = (void*)  | 
743  | 0  |       ( ((char*)io) + sizeof(struct evmap_io) );  | 
744  | 0  |   if (f->idxplus1) { | 
745  | 0  |     struct event_change *c = &changelist->changes[f->idxplus1 - 1];  | 
746  | 0  |     EVUTIL_ASSERT(c->fd == fd);  | 
747  | 0  |   }  | 
748  | 0  |   return 0;  | 
749  | 0  | }  | 
750  |  |  | 
751  |  | /** Make sure that the changelist is consistent with the evmap structures. */  | 
752  |  | static void  | 
753  |  | event_changelist_assert_ok(struct event_base *base)  | 
754  | 0  | { | 
755  | 0  |   int i;  | 
756  | 0  |   struct event_changelist *changelist = &base->changelist;  | 
757  |  | 
  | 
758  | 0  |   EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);  | 
759  | 0  |   for (i = 0; i < changelist->n_changes; ++i) { | 
760  | 0  |     struct event_change *c = &changelist->changes[i];  | 
761  | 0  |     struct event_changelist_fdinfo *f;  | 
762  | 0  |     EVUTIL_ASSERT(c->fd >= 0);  | 
763  | 0  |     f = event_change_get_fdinfo(base, c);  | 
764  | 0  |     EVUTIL_ASSERT(f);  | 
765  | 0  |     EVUTIL_ASSERT(f->idxplus1 == i + 1);  | 
766  | 0  |   }  | 
767  |  | 
  | 
768  | 0  |   evmap_io_foreach_fd(base,  | 
769  | 0  |       event_changelist_assert_ok_foreach_iter_fn,  | 
770  | 0  |       NULL);  | 
771  | 0  | }  | 
772  |  |  | 
773  |  | #ifdef DEBUG_CHANGELIST  | 
774  |  | #define event_changelist_check(base)  event_changelist_assert_ok((base))  | 
775  |  | #else  | 
776  | 0  | #define event_changelist_check(base)  ((void)0)  | 
777  |  | #endif  | 
778  |  |  | 
779  |  | void  | 
780  |  | event_changelist_remove_all_(struct event_changelist *changelist,  | 
781  |  |     struct event_base *base)  | 
782  | 0  | { | 
783  | 0  |   int i;  | 
784  |  | 
  | 
785  | 0  |   event_changelist_check(base);  | 
786  |  | 
  | 
787  | 0  |   for (i = 0; i < changelist->n_changes; ++i) { | 
788  | 0  |     struct event_change *ch = &changelist->changes[i];  | 
789  | 0  |     struct event_changelist_fdinfo *fdinfo =  | 
790  | 0  |         event_change_get_fdinfo(base, ch);  | 
791  | 0  |     EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);  | 
792  | 0  |     fdinfo->idxplus1 = 0;  | 
793  | 0  |   }  | 
794  |  | 
  | 
795  | 0  |   changelist->n_changes = 0;  | 
796  |  | 
  | 
797  | 0  |   event_changelist_check(base);  | 
798  | 0  | }  | 
799  |  |  | 
800  |  | void  | 
801  |  | event_changelist_freemem_(struct event_changelist *changelist)  | 
802  | 0  | { | 
803  | 0  |   if (changelist->changes)  | 
804  | 0  |     mm_free(changelist->changes);  | 
805  | 0  |   event_changelist_init_(changelist); /* zero it all out. */  | 
806  | 0  | }  | 
807  |  |  | 
808  |  | /** Increase the size of 'changelist' to hold more changes. */  | 
809  |  | static int  | 
810  |  | event_changelist_grow(struct event_changelist *changelist)  | 
811  | 0  | { | 
812  | 0  |   int new_size;  | 
813  | 0  |   struct event_change *new_changes;  | 
814  | 0  |   if (changelist->changes_size < 64)  | 
815  | 0  |     new_size = 64;  | 
816  | 0  |   else  | 
817  | 0  |     new_size = changelist->changes_size * 2;  | 
818  |  | 
  | 
819  | 0  |   new_changes = mm_realloc(changelist->changes,  | 
820  | 0  |       new_size * sizeof(struct event_change));  | 
821  |  | 
  | 
822  | 0  |   if (EVUTIL_UNLIKELY(new_changes == NULL))  | 
823  | 0  |     return (-1);  | 
824  |  |  | 
825  | 0  |   changelist->changes = new_changes;  | 
826  | 0  |   changelist->changes_size = new_size;  | 
827  |  | 
  | 
828  | 0  |   return (0);  | 
829  | 0  | }  | 
830  |  |  | 
831  |  | /** Return a pointer to the changelist entry for the file descriptor or signal  | 
832  |  |  * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its  | 
833  |  |  * old_events field to old_events.  | 
834  |  |  */  | 
835  |  | static struct event_change *  | 
836  |  | event_changelist_get_or_construct(struct event_changelist *changelist,  | 
837  |  |     evutil_socket_t fd,  | 
838  |  |     short old_events,  | 
839  |  |     struct event_changelist_fdinfo *fdinfo)  | 
840  | 0  | { | 
841  | 0  |   struct event_change *change;  | 
842  |  | 
  | 
843  | 0  |   if (fdinfo->idxplus1 == 0) { | 
844  | 0  |     int idx;  | 
845  | 0  |     EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);  | 
846  |  | 
  | 
847  | 0  |     if (changelist->n_changes == changelist->changes_size) { | 
848  | 0  |       if (event_changelist_grow(changelist) < 0)  | 
849  | 0  |         return NULL;  | 
850  | 0  |     }  | 
851  |  |  | 
852  | 0  |     idx = changelist->n_changes++;  | 
853  | 0  |     change = &changelist->changes[idx];  | 
854  | 0  |     fdinfo->idxplus1 = idx + 1;  | 
855  |  | 
  | 
856  | 0  |     memset(change, 0, sizeof(struct event_change));  | 
857  | 0  |     change->fd = fd;  | 
858  | 0  |     change->old_events = old_events;  | 
859  | 0  |   } else { | 
860  | 0  |     change = &changelist->changes[fdinfo->idxplus1 - 1];  | 
861  | 0  |     EVUTIL_ASSERT(change->fd == fd);  | 
862  | 0  |   }  | 
863  | 0  |   return change;  | 
864  | 0  | }  | 
865  |  |  | 
866  |  | int  | 
867  |  | event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,  | 
868  |  |     void *p)  | 
869  | 0  | { | 
870  | 0  |   struct event_changelist *changelist = &base->changelist;  | 
871  | 0  |   struct event_changelist_fdinfo *fdinfo = p;  | 
872  | 0  |   struct event_change *change;  | 
873  | 0  |   ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));  | 
874  |  | 
  | 
875  | 0  |   event_changelist_check(base);  | 
876  |  | 
  | 
877  | 0  |   change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);  | 
878  | 0  |   if (!change)  | 
879  | 0  |     return -1;  | 
880  |  |  | 
881  |  |   /* An add replaces any previous delete, but doesn't result in a no-op,  | 
882  |  |    * since the delete might fail (because the fd had been closed since  | 
883  |  |    * the last add, for instance. */  | 
884  |  |  | 
885  | 0  |   if (events & (EV_READ|EV_SIGNAL))  | 
886  | 0  |     change->read_change = evchange;  | 
887  | 0  |   if (events & EV_WRITE)  | 
888  | 0  |     change->write_change = evchange;  | 
889  | 0  |   if (events & EV_CLOSED)  | 
890  | 0  |     change->close_change = evchange;  | 
891  |  | 
  | 
892  | 0  |   event_changelist_check(base);  | 
893  | 0  |   return (0);  | 
894  | 0  | }  | 
895  |  |  | 
896  |  | int  | 
897  |  | event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,  | 
898  |  |     void *p)  | 
899  | 0  | { | 
900  | 0  |   struct event_changelist *changelist = &base->changelist;  | 
901  | 0  |   struct event_changelist_fdinfo *fdinfo = p;  | 
902  | 0  |   struct event_change *change;  | 
903  | 0  |   ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);  | 
904  |  | 
  | 
905  | 0  |   event_changelist_check(base);  | 
906  | 0  |   change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);  | 
907  | 0  |   event_changelist_check(base);  | 
908  | 0  |   if (!change)  | 
909  | 0  |     return -1;  | 
910  |  |  | 
911  |  |   /* A delete on an event set that doesn't contain the event to be  | 
912  |  |      deleted produces a no-op.  This effectively emoves any previous  | 
913  |  |      uncommitted add, rather than replacing it: on those platforms where  | 
914  |  |      "add, delete, dispatch" is not the same as "no-op, dispatch", we  | 
915  |  |      want the no-op behavior.  | 
916  |  |  | 
917  |  |      If we have a no-op item, we could remove it it from the list  | 
918  |  |      entirely, but really there's not much point: skipping the no-op  | 
919  |  |      change when we do the dispatch later is far cheaper than rejuggling  | 
920  |  |      the array now.  | 
921  |  |  | 
922  |  |      As this stands, it also lets through deletions of events that are  | 
923  |  |      not currently set.  | 
924  |  |    */  | 
925  |  |  | 
926  | 0  |   if (events & (EV_READ|EV_SIGNAL)) { | 
927  | 0  |     if (!(change->old_events & (EV_READ | EV_SIGNAL)))  | 
928  | 0  |       change->read_change = 0;  | 
929  | 0  |     else  | 
930  | 0  |       change->read_change = del;  | 
931  | 0  |   }  | 
932  | 0  |   if (events & EV_WRITE) { | 
933  | 0  |     if (!(change->old_events & EV_WRITE))  | 
934  | 0  |       change->write_change = 0;  | 
935  | 0  |     else  | 
936  | 0  |       change->write_change = del;  | 
937  | 0  |   }  | 
938  | 0  |   if (events & EV_CLOSED) { | 
939  | 0  |     if (!(change->old_events & EV_CLOSED))  | 
940  | 0  |       change->close_change = 0;  | 
941  | 0  |     else  | 
942  | 0  |       change->close_change = del;  | 
943  | 0  |   }  | 
944  |  | 
  | 
945  | 0  |   event_changelist_check(base);  | 
946  | 0  |   return (0);  | 
947  | 0  | }  | 
948  |  |  | 
949  |  | /* Helper for evmap_check_integrity_: verify that all of the events pending on  | 
950  |  |  * given fd are set up correctly, and that the nread and nwrite counts on that  | 
951  |  |  * fd are correct. */  | 
952  |  | static int  | 
953  |  | evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,  | 
954  |  |     struct evmap_io *io_info, void *arg)  | 
955  | 0  | { | 
956  | 0  |   struct event *ev;  | 
957  | 0  |   int n_read = 0, n_write = 0, n_close = 0;  | 
958  |  |  | 
959  |  |   /* First, make sure the list itself isn't corrupt. Otherwise,  | 
960  |  |    * running LIST_FOREACH could be an exciting adventure. */  | 
961  | 0  |   EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);  | 
962  |  |  | 
963  | 0  |   LIST_FOREACH(ev, &io_info->events, ev_io_next) { | 
964  | 0  |     EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);  | 
965  | 0  |     EVUTIL_ASSERT(ev->ev_fd == fd);  | 
966  | 0  |     EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));  | 
967  | 0  |     EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));  | 
968  | 0  |     if (ev->ev_events & EV_READ)  | 
969  | 0  |       ++n_read;  | 
970  | 0  |     if (ev->ev_events & EV_WRITE)  | 
971  | 0  |       ++n_write;  | 
972  | 0  |     if (ev->ev_events & EV_CLOSED)  | 
973  | 0  |       ++n_close;  | 
974  | 0  |   }  | 
975  |  | 
  | 
976  | 0  |   EVUTIL_ASSERT(n_read == io_info->nread);  | 
977  | 0  |   EVUTIL_ASSERT(n_write == io_info->nwrite);  | 
978  | 0  |   EVUTIL_ASSERT(n_close == io_info->nclose);  | 
979  |  | 
  | 
980  | 0  |   return 0;  | 
981  | 0  | }  | 
982  |  |  | 
983  |  | /* Helper for evmap_check_integrity_: verify that all of the events pending  | 
984  |  |  * on given signal are set up correctly. */  | 
985  |  | static int  | 
986  |  | evmap_signal_check_integrity_fn(struct event_base *base,  | 
987  |  |     int signum, struct evmap_signal *sig_info, void *arg)  | 
988  | 0  | { | 
989  | 0  |   struct event *ev;  | 
990  |  |   /* First, make sure the list itself isn't corrupt. */  | 
991  | 0  |   EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);  | 
992  |  |  | 
993  | 0  |   LIST_FOREACH(ev, &sig_info->events, ev_io_next) { | 
994  | 0  |     EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);  | 
995  | 0  |     EVUTIL_ASSERT(ev->ev_fd == signum);  | 
996  | 0  |     EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));  | 
997  | 0  |     EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));  | 
998  | 0  |   }  | 
999  | 0  |   return 0;  | 
1000  | 0  | }  | 
1001  |  |  | 
1002  |  | void  | 
1003  |  | evmap_check_integrity_(struct event_base *base)  | 
1004  | 0  | { | 
1005  | 0  |   evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);  | 
1006  | 0  |   evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);  | 
1007  |  | 
  | 
1008  | 0  |   if (base->evsel->add == event_changelist_add_)  | 
1009  | 0  |     event_changelist_assert_ok(base);  | 
1010  | 0  | }  | 
1011  |  |  | 
1012  |  | /* Helper type for evmap_foreach_event_: Bundles a function to call on every  | 
1013  |  |  * event, and the user-provided void* to use as its third argument. */  | 
1014  |  | struct evmap_foreach_event_helper { | 
1015  |  |   event_base_foreach_event_cb fn;  | 
1016  |  |   void *arg;  | 
1017  |  | };  | 
1018  |  |  | 
1019  |  | /* Helper for evmap_foreach_event_: calls a provided function on every event  | 
1020  |  |  * pending on a given fd.  */  | 
1021  |  | static int  | 
1022  |  | evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,  | 
1023  |  |     struct evmap_io *io_info, void *arg)  | 
1024  | 0  | { | 
1025  | 0  |   struct evmap_foreach_event_helper *h = arg;  | 
1026  | 0  |   struct event *ev;  | 
1027  | 0  |   int r;  | 
1028  | 0  |   LIST_FOREACH(ev, &io_info->events, ev_io_next) { | 
1029  | 0  |     if ((r = h->fn(base, ev, h->arg)))  | 
1030  | 0  |       return r;  | 
1031  | 0  |   }  | 
1032  | 0  |   return 0;  | 
1033  | 0  | }  | 
1034  |  |  | 
1035  |  | /* Helper for evmap_foreach_event_: calls a provided function on every event  | 
1036  |  |  * pending on a given signal.  */  | 
1037  |  | static int  | 
1038  |  | evmap_signal_foreach_event_fn(struct event_base *base, int signum,  | 
1039  |  |     struct evmap_signal *sig_info, void *arg)  | 
1040  | 0  | { | 
1041  | 0  |   struct event *ev;  | 
1042  | 0  |   struct evmap_foreach_event_helper *h = arg;  | 
1043  | 0  |   int r;  | 
1044  | 0  |   LIST_FOREACH(ev, &sig_info->events, ev_signal_next) { | 
1045  | 0  |     if ((r = h->fn(base, ev, h->arg)))  | 
1046  | 0  |       return r;  | 
1047  | 0  |   }  | 
1048  | 0  |   return 0;  | 
1049  | 0  | }  | 
1050  |  |  | 
1051  |  | int  | 
1052  |  | evmap_foreach_event_(struct event_base *base,  | 
1053  |  |     event_base_foreach_event_cb fn, void *arg)  | 
1054  | 0  | { | 
1055  | 0  |   struct evmap_foreach_event_helper h;  | 
1056  | 0  |   int r;  | 
1057  | 0  |   h.fn = fn;  | 
1058  | 0  |   h.arg = arg;  | 
1059  | 0  |   if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))  | 
1060  | 0  |     return r;  | 
1061  | 0  |   return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);  | 
1062  | 0  | }  | 
1063  |  |  |