Coverage Report

Created: 2025-07-01 07:02

/src/lldpd/libevent/evmap.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 * 1. Redistributions of source code must retain the above copyright
8
 *    notice, this list of conditions and the following disclaimer.
9
 * 2. Redistributions in binary form must reproduce the above copyright
10
 *    notice, this list of conditions and the following disclaimer in the
11
 *    documentation and/or other materials provided with the distribution.
12
 * 3. The name of the author may not be used to endorse or promote products
13
 *    derived from this software without specific prior written permission.
14
 *
15
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
 */
26
#include "event2/event-config.h"
27
#include "evconfig-private.h"
28
29
#ifdef _WIN32
30
#include <winsock2.h>
31
#define WIN32_LEAN_AND_MEAN
32
#include <windows.h>
33
#undef WIN32_LEAN_AND_MEAN
34
#endif
35
#include <sys/types.h>
36
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
37
#include <sys/time.h>
38
#endif
39
#include <sys/queue.h>
40
#include <stdio.h>
41
#include <stdlib.h>
42
#ifndef _WIN32
43
#include <unistd.h>
44
#endif
45
#include <errno.h>
46
#include <limits.h>
47
#include <signal.h>
48
#include <string.h>
49
#include <time.h>
50
51
#include "event-internal.h"
52
#include "evmap-internal.h"
53
#include "mm-internal.h"
54
#include "changelist-internal.h"
55
56
/** An entry for an evmap_io list: notes all the events that want to read or
57
  write on a given fd, and the number of each.
58
  */
59
struct evmap_io {
60
  struct event_dlist events;
61
  ev_uint16_t nread;
62
  ev_uint16_t nwrite;
63
  ev_uint16_t nclose;
64
};
65
66
/* An entry for an evmap_signal list: notes all the events that want to know
67
   when a signal triggers. */
68
struct evmap_signal {
69
  struct event_dlist events;
70
};
71
72
/* On some platforms, fds start at 0 and increment by 1 as they are
73
   allocated, and old numbers get used.  For these platforms, we
74
   implement io maps just like signal maps: as an array of pointers to
75
   struct evmap_io.  But on other platforms (windows), sockets are not
76
   0-indexed, not necessarily consecutive, and not necessarily reused.
77
   There, we use a hashtable to implement evmap_io.
78
*/
79
#ifdef EVMAP_USE_HT
80
struct event_map_entry {
81
  HT_ENTRY(event_map_entry) map_node;
82
  evutil_socket_t fd;
83
  union { /* This is a union in case we need to make more things that can
84
         be in the hashtable. */
85
    struct evmap_io evmap_io;
86
  } ent;
87
};
88
89
/* Helper used by the event_io_map hashtable code; tries to return a good hash
90
 * of the fd in e->fd. */
91
static inline unsigned
92
hashsocket(struct event_map_entry *e)
93
{
94
  /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95
   * matter.  Our hashtable implementation really likes low-order bits,
96
   * though, so let's do the rotate-and-add trick. */
97
  unsigned h = (unsigned) e->fd;
98
  h += (h >> 2) | (h << 30);
99
  return h;
100
}
101
102
/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103
 * have the same e->fd. */
104
static inline int
105
eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
106
{
107
  return e1->fd == e2->fd;
108
}
109
110
HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111
HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112
      0.5, mm_malloc, mm_realloc, mm_free)
113
114
#define GET_IO_SLOT(x, map, slot, type)         \
115
  do {                \
116
    struct event_map_entry key_, *ent_;     \
117
    key_.fd = slot;           \
118
    ent_ = HT_FIND(event_io_map, map, &key_);   \
119
    (x) = ent_ ? &ent_->ent.type : NULL;      \
120
  } while (0);
121
122
#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)  \
123
  do {                \
124
    struct event_map_entry key_, *ent_;     \
125
    key_.fd = slot;           \
126
    HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
127
        event_map_entry, &key_, ptr,      \
128
        {             \
129
          ent_ = *ptr;        \
130
        },              \
131
        {             \
132
          ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133
          if (EVUTIL_UNLIKELY(ent_ == NULL))    \
134
            return (-1);      \
135
          ent_->fd = slot;        \
136
          (ctor)(&ent_->ent.type);      \
137
          HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
138
        });         \
139
    (x) = &ent_->ent.type;          \
140
  } while (0)
141
142
void evmap_io_initmap_(struct event_io_map *ctx)
143
{
144
  HT_INIT(event_io_map, ctx);
145
}
146
147
void evmap_io_clear_(struct event_io_map *ctx)
148
{
149
  struct event_map_entry **ent, **next, *this;
150
  for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151
    this = *ent;
152
    next = HT_NEXT_RMV(event_io_map, ctx, ent);
153
    mm_free(this);
154
  }
155
  HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
156
}
157
#endif
158
159
/* Set the variable 'x' to the field in event_map 'map' with fields of type
160
   'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
161
   if there are no entries for 'slot'.  Does no bounds-checking. */
162
#define GET_SIGNAL_SLOT(x, map, slot, type)     \
163
0
  (x) = (struct type *)((map)->entries[slot])
164
/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165
   by allocating enough memory for a 'struct type', and initializing the new
166
   value by calling the function 'ctor' on it.  Makes the function
167
   return -1 on allocation failure.
168
 */
169
#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)  \
170
0
  do {               \
171
0
    if ((map)->entries[slot] == NULL) {     \
172
0
      (map)->entries[slot] =        \
173
0
          mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174
0
      if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175
0
        return (-1);       \
176
0
      (ctor)((struct type *)(map)->entries[slot]);  \
177
0
    }              \
178
0
    (x) = (struct type *)((map)->entries[slot]);    \
179
0
  } while (0)
180
181
/* If we aren't using hashtables, then define the IO_SLOT macros and functions
182
   as thin aliases over the SIGNAL_SLOT versions. */
183
#ifndef EVMAP_USE_HT
184
0
#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185
#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186
0
  GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187
#define FDINFO_OFFSET sizeof(struct evmap_io)
188
void
189
evmap_io_initmap_(struct event_io_map* ctx)
190
0
{
191
0
  evmap_signal_initmap_(ctx);
192
0
}
193
void
194
evmap_io_clear_(struct event_io_map* ctx)
195
0
{
196
0
  evmap_signal_clear_(ctx);
197
0
}
198
#endif
199
200
201
/** Expand 'map' with new entries of width 'msize' until it is big enough
202
  to store a value in 'slot'.
203
 */
204
static int
205
evmap_make_space(struct event_signal_map *map, int slot, int msize)
206
0
{
207
0
  if (map->nentries <= slot) {
208
0
    int nentries = map->nentries ? map->nentries : 32;
209
0
    void **tmp;
210
211
0
    if (slot > INT_MAX / 2)
212
0
      return (-1);
213
214
0
    while (nentries <= slot)
215
0
      nentries <<= 1;
216
217
0
    if (nentries > INT_MAX / msize)
218
0
      return (-1);
219
220
0
    tmp = (void **)mm_realloc(map->entries, nentries * msize);
221
0
    if (tmp == NULL)
222
0
      return (-1);
223
224
0
    memset(&tmp[map->nentries], 0,
225
0
        (nentries - map->nentries) * msize);
226
227
0
    map->nentries = nentries;
228
0
    map->entries = tmp;
229
0
  }
230
231
0
  return (0);
232
0
}
233
234
void
235
evmap_signal_initmap_(struct event_signal_map *ctx)
236
0
{
237
0
  ctx->nentries = 0;
238
0
  ctx->entries = NULL;
239
0
}
240
241
void
242
evmap_signal_clear_(struct event_signal_map *ctx)
243
0
{
244
0
  if (ctx->entries != NULL) {
245
0
    int i;
246
0
    for (i = 0; i < ctx->nentries; ++i) {
247
0
      if (ctx->entries[i] != NULL)
248
0
        mm_free(ctx->entries[i]);
249
0
    }
250
0
    mm_free(ctx->entries);
251
0
    ctx->entries = NULL;
252
0
  }
253
0
  ctx->nentries = 0;
254
0
}
255
256
257
/* code specific to file descriptors */
258
259
/** Constructor for struct evmap_io */
260
static void
261
evmap_io_init(struct evmap_io *entry)
262
0
{
263
0
  LIST_INIT(&entry->events);
264
0
  entry->nread = 0;
265
0
  entry->nwrite = 0;
266
0
  entry->nclose = 0;
267
0
}
268
269
270
/* return -1 on error, 0 on success if nothing changed in the event backend,
271
 * and 1 on success if something did. */
272
int
273
evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
274
0
{
275
0
  const struct eventop *evsel = base->evsel;
276
0
  struct event_io_map *io = &base->io;
277
0
  struct evmap_io *ctx = NULL;
278
0
  int nread, nwrite, nclose, retval = 0;
279
0
  short res = 0, old = 0;
280
0
  struct event *old_ev;
281
282
0
  EVUTIL_ASSERT(fd == ev->ev_fd);
283
284
0
  if (fd < 0)
285
0
    return 0;
286
287
0
#ifndef EVMAP_USE_HT
288
0
  if (fd >= io->nentries) {
289
0
    if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
290
0
      return (-1);
291
0
  }
292
0
#endif
293
0
  GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
294
0
             evsel->fdinfo_len);
295
296
0
  nread = ctx->nread;
297
0
  nwrite = ctx->nwrite;
298
0
  nclose = ctx->nclose;
299
300
0
  if (nread)
301
0
    old |= EV_READ;
302
0
  if (nwrite)
303
0
    old |= EV_WRITE;
304
0
  if (nclose)
305
0
    old |= EV_CLOSED;
306
307
0
  if (ev->ev_events & EV_READ) {
308
0
    if (++nread == 1)
309
0
      res |= EV_READ;
310
0
  }
311
0
  if (ev->ev_events & EV_WRITE) {
312
0
    if (++nwrite == 1)
313
0
      res |= EV_WRITE;
314
0
  }
315
0
  if (ev->ev_events & EV_CLOSED) {
316
0
    if (++nclose == 1)
317
0
      res |= EV_CLOSED;
318
0
  }
319
0
  if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
320
0
    event_warnx("Too many events reading or writing on fd %d",
321
0
        (int)fd);
322
0
    return -1;
323
0
  }
324
0
  if (EVENT_DEBUG_MODE_IS_ON() &&
325
0
      (old_ev = LIST_FIRST(&ctx->events)) &&
326
0
      (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
327
0
    event_warnx("Tried to mix edge-triggered and non-edge-triggered"
328
0
        " events on fd %d", (int)fd);
329
0
    return -1;
330
0
  }
331
332
0
  if (res) {
333
0
    void *extra = ((char*)ctx) + sizeof(struct evmap_io);
334
    /* XXX(niels): we cannot mix edge-triggered and
335
     * level-triggered, we should probably assert on
336
     * this. */
337
0
    if (evsel->add(base, ev->ev_fd,
338
0
      old, (ev->ev_events & EV_ET) | res, extra) == -1)
339
0
      return (-1);
340
0
    retval = 1;
341
0
  }
342
343
0
  ctx->nread = (ev_uint16_t) nread;
344
0
  ctx->nwrite = (ev_uint16_t) nwrite;
345
0
  ctx->nclose = (ev_uint16_t) nclose;
346
0
  LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
347
348
0
  return (retval);
349
0
}
350
351
/* return -1 on error, 0 on success if nothing changed in the event backend,
352
 * and 1 on success if something did. */
353
int
354
evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
355
0
{
356
0
  const struct eventop *evsel = base->evsel;
357
0
  struct event_io_map *io = &base->io;
358
0
  struct evmap_io *ctx;
359
0
  int nread, nwrite, nclose, retval = 0;
360
0
  short res = 0, old = 0;
361
362
0
  if (fd < 0)
363
0
    return 0;
364
365
0
  EVUTIL_ASSERT(fd == ev->ev_fd);
366
367
0
#ifndef EVMAP_USE_HT
368
0
  if (fd >= io->nentries)
369
0
    return (-1);
370
0
#endif
371
372
0
  GET_IO_SLOT(ctx, io, fd, evmap_io);
373
374
0
  nread = ctx->nread;
375
0
  nwrite = ctx->nwrite;
376
0
  nclose = ctx->nclose;
377
378
0
  if (nread)
379
0
    old |= EV_READ;
380
0
  if (nwrite)
381
0
    old |= EV_WRITE;
382
0
  if (nclose)
383
0
    old |= EV_CLOSED;
384
385
0
  if (ev->ev_events & EV_READ) {
386
0
    if (--nread == 0)
387
0
      res |= EV_READ;
388
0
    EVUTIL_ASSERT(nread >= 0);
389
0
  }
390
0
  if (ev->ev_events & EV_WRITE) {
391
0
    if (--nwrite == 0)
392
0
      res |= EV_WRITE;
393
0
    EVUTIL_ASSERT(nwrite >= 0);
394
0
  }
395
0
  if (ev->ev_events & EV_CLOSED) {
396
0
    if (--nclose == 0)
397
0
      res |= EV_CLOSED;
398
0
    EVUTIL_ASSERT(nclose >= 0);
399
0
  }
400
401
0
  if (res) {
402
0
    void *extra = ((char*)ctx) + sizeof(struct evmap_io);
403
0
    if (evsel->del(base, ev->ev_fd,
404
0
      old, (ev->ev_events & EV_ET) | res, extra) == -1) {
405
0
      retval = -1;
406
0
    } else {
407
0
      retval = 1;
408
0
    }
409
0
  }
410
411
0
  ctx->nread = nread;
412
0
  ctx->nwrite = nwrite;
413
0
  ctx->nclose = nclose;
414
0
  LIST_REMOVE(ev, ev_io_next);
415
416
0
  return (retval);
417
0
}
418
419
void
420
evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
421
0
{
422
0
  struct event_io_map *io = &base->io;
423
0
  struct evmap_io *ctx;
424
0
  struct event *ev;
425
426
0
#ifndef EVMAP_USE_HT
427
0
  if (fd < 0 || fd >= io->nentries)
428
0
    return;
429
0
#endif
430
0
  GET_IO_SLOT(ctx, io, fd, evmap_io);
431
432
0
  if (NULL == ctx)
433
0
    return;
434
0
  LIST_FOREACH(ev, &ctx->events, ev_io_next) {
435
0
    if (ev->ev_events & (events & ~EV_ET))
436
0
      event_active_nolock_(ev, ev->ev_events & events, 1);
437
0
  }
438
0
}
439
440
/* code specific to signals */
441
442
static void
443
evmap_signal_init(struct evmap_signal *entry)
444
0
{
445
0
  LIST_INIT(&entry->events);
446
0
}
447
448
449
int
450
evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
451
0
{
452
0
  const struct eventop *evsel = base->evsigsel;
453
0
  struct event_signal_map *map = &base->sigmap;
454
0
  struct evmap_signal *ctx = NULL;
455
456
0
  if (sig < 0 || sig >= NSIG)
457
0
    return (-1);
458
459
0
  if (sig >= map->nentries) {
460
0
    if (evmap_make_space(
461
0
      map, sig, sizeof(struct evmap_signal *)) == -1)
462
0
      return (-1);
463
0
  }
464
0
  GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
465
0
      base->evsigsel->fdinfo_len);
466
467
0
  if (LIST_EMPTY(&ctx->events)) {
468
0
    if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
469
0
        == -1)
470
0
      return (-1);
471
0
  }
472
473
0
  LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
474
475
0
  return (1);
476
0
}
477
478
int
479
evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
480
0
{
481
0
  const struct eventop *evsel = base->evsigsel;
482
0
  struct event_signal_map *map = &base->sigmap;
483
0
  struct evmap_signal *ctx;
484
485
0
  if (sig < 0 || sig >= map->nentries)
486
0
    return (-1);
487
488
0
  GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
489
490
0
  LIST_REMOVE(ev, ev_signal_next);
491
492
0
  if (LIST_FIRST(&ctx->events) == NULL) {
493
0
    if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
494
0
      return (-1);
495
0
  }
496
497
0
  return (1);
498
0
}
499
500
void
501
evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
502
0
{
503
0
  struct event_signal_map *map = &base->sigmap;
504
0
  struct evmap_signal *ctx;
505
0
  struct event *ev;
506
507
0
  if (sig < 0 || sig >= map->nentries)
508
0
    return;
509
0
  GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
510
511
0
  if (!ctx)
512
0
    return;
513
0
  LIST_FOREACH(ev, &ctx->events, ev_signal_next)
514
0
    event_active_nolock_(ev, EV_SIGNAL, ncalls);
515
0
}
516
517
void *
518
evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
519
0
{
520
0
  struct evmap_io *ctx;
521
0
  GET_IO_SLOT(ctx, map, fd, evmap_io);
522
0
  if (ctx)
523
0
    return ((char*)ctx) + sizeof(struct evmap_io);
524
0
  else
525
0
    return NULL;
526
0
}
527
528
/* Callback type for evmap_io_foreach_fd */
529
typedef int (*evmap_io_foreach_fd_cb)(
530
  struct event_base *, evutil_socket_t, struct evmap_io *, void *);
531
532
/* Multipurpose helper function: Iterate over every file descriptor event_base
533
 * for which we could have EV_READ or EV_WRITE events.  For each such fd, call
534
 * fn(base, signum, evmap_io, arg), where fn is the user-provided
535
 * function, base is the event_base, signum is the signal number, evmap_io
536
 * is an evmap_io structure containing a list of events pending on the
537
 * file descriptor, and arg is the user-supplied argument.
538
 *
539
 * If fn returns 0, continue on to the next signal. Otherwise, return the same
540
 * value that fn returned.
541
 *
542
 * Note that there is no guarantee that the file descriptors will be processed
543
 * in any particular order.
544
 */
545
static int
546
evmap_io_foreach_fd(struct event_base *base,
547
    evmap_io_foreach_fd_cb fn,
548
    void *arg)
549
0
{
550
0
  evutil_socket_t fd;
551
0
  struct event_io_map *iomap = &base->io;
552
0
  int r = 0;
553
#ifdef EVMAP_USE_HT
554
  struct event_map_entry **mapent;
555
  HT_FOREACH(mapent, event_io_map, iomap) {
556
    struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
557
    fd = (*mapent)->fd;
558
#else
559
0
  for (fd = 0; fd < iomap->nentries; ++fd) {
560
0
    struct evmap_io *ctx = iomap->entries[fd];
561
0
    if (!ctx)
562
0
      continue;
563
0
#endif
564
0
    if ((r = fn(base, fd, ctx, arg)))
565
0
      break;
566
0
  }
567
0
  return r;
568
0
}
569
570
/* Callback type for evmap_signal_foreach_signal */
571
typedef int (*evmap_signal_foreach_signal_cb)(
572
  struct event_base *, int, struct evmap_signal *, void *);
573
574
/* Multipurpose helper function: Iterate over every signal number in the
575
 * event_base for which we could have signal events.  For each such signal,
576
 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
577
 * function, base is the event_base, signum is the signal number, evmap_signal
578
 * is an evmap_signal structure containing a list of events pending on the
579
 * signal, and arg is the user-supplied argument.
580
 *
581
 * If fn returns 0, continue on to the next signal. Otherwise, return the same
582
 * value that fn returned.
583
 */
584
static int
585
evmap_signal_foreach_signal(struct event_base *base,
586
    evmap_signal_foreach_signal_cb fn,
587
    void *arg)
588
0
{
589
0
  struct event_signal_map *sigmap = &base->sigmap;
590
0
  int r = 0;
591
0
  int signum;
592
593
0
  for (signum = 0; signum < sigmap->nentries; ++signum) {
594
0
    struct evmap_signal *ctx = sigmap->entries[signum];
595
0
    if (!ctx)
596
0
      continue;
597
0
    if ((r = fn(base, signum, ctx, arg)))
598
0
      break;
599
0
  }
600
0
  return r;
601
0
}
602
603
/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
604
 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
605
 * EV_ET. */
606
static int
607
evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
608
    struct evmap_io *ctx, void *arg)
609
0
{
610
0
  const struct eventop *evsel = base->evsel;
611
0
  void *extra;
612
0
  int *result = arg;
613
0
  short events = 0;
614
0
  struct event *ev;
615
0
  EVUTIL_ASSERT(ctx);
616
617
0
  extra = ((char*)ctx) + sizeof(struct evmap_io);
618
0
  if (ctx->nread)
619
0
    events |= EV_READ;
620
0
  if (ctx->nwrite)
621
0
    events |= EV_WRITE;
622
0
  if (ctx->nclose)
623
0
    events |= EV_CLOSED;
624
0
  if (evsel->fdinfo_len)
625
0
    memset(extra, 0, evsel->fdinfo_len);
626
0
  if (events &&
627
0
      (ev = LIST_FIRST(&ctx->events)) &&
628
0
      (ev->ev_events & EV_ET))
629
0
    events |= EV_ET;
630
0
  if (evsel->add(base, fd, 0, events, extra) == -1)
631
0
    *result = -1;
632
633
0
  return 0;
634
0
}
635
636
/* Helper for evmap_reinit_: tell the backend to add every signal for which we
637
 * have pending events.  */
638
static int
639
evmap_signal_reinit_iter_fn(struct event_base *base,
640
    int signum, struct evmap_signal *ctx, void *arg)
641
0
{
642
0
  const struct eventop *evsel = base->evsigsel;
643
0
  int *result = arg;
644
645
0
  if (!LIST_EMPTY(&ctx->events)) {
646
0
    if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
647
0
      *result = -1;
648
0
  }
649
0
  return 0;
650
0
}
651
652
int
653
evmap_reinit_(struct event_base *base)
654
0
{
655
0
  int result = 0;
656
657
0
  evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
658
0
  if (result < 0)
659
0
    return -1;
660
0
  evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
661
0
  if (result < 0)
662
0
    return -1;
663
0
  return 0;
664
0
}
665
666
/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
667
static int
668
delete_all_in_dlist(struct event_dlist *dlist)
669
0
{
670
0
  struct event *ev;
671
0
  while ((ev = LIST_FIRST(dlist)))
672
0
    event_del(ev);
673
0
  return 0;
674
0
}
675
676
/* Helper for evmap_delete_all_: delete every event pending on an fd. */
677
static int
678
evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
679
    struct evmap_io *io_info, void *arg)
680
0
{
681
0
  return delete_all_in_dlist(&io_info->events);
682
0
}
683
684
/* Helper for evmap_delete_all_: delete every event pending on a signal. */
685
static int
686
evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
687
    struct evmap_signal *sig_info, void *arg)
688
0
{
689
0
  return delete_all_in_dlist(&sig_info->events);
690
0
}
691
692
void
693
evmap_delete_all_(struct event_base *base)
694
0
{
695
0
  evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
696
0
  evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
697
0
}
698
699
/** Per-fd structure for use with changelists.  It keeps track, for each fd or
700
 * signal using the changelist, of where its entry in the changelist is.
701
 */
702
struct event_changelist_fdinfo {
703
  int idxplus1; /* this is the index +1, so that memset(0) will make it
704
           * a no-such-element */
705
};
706
707
void
708
event_changelist_init_(struct event_changelist *changelist)
709
0
{
710
0
  changelist->changes = NULL;
711
0
  changelist->changes_size = 0;
712
0
  changelist->n_changes = 0;
713
0
}
714
715
/** Helper: return the changelist_fdinfo corresponding to a given change. */
716
static inline struct event_changelist_fdinfo *
717
event_change_get_fdinfo(struct event_base *base,
718
    const struct event_change *change)
719
0
{
720
0
  char *ptr;
721
0
  if (change->read_change & EV_CHANGE_SIGNAL) {
722
0
    struct evmap_signal *ctx;
723
0
    GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
724
0
    ptr = ((char*)ctx) + sizeof(struct evmap_signal);
725
0
  } else {
726
0
    struct evmap_io *ctx;
727
0
    GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
728
0
    ptr = ((char*)ctx) + sizeof(struct evmap_io);
729
0
  }
730
0
  return (void*)ptr;
731
0
}
732
733
/** Callback helper for event_changelist_assert_ok */
734
static int
735
event_changelist_assert_ok_foreach_iter_fn(
736
  struct event_base *base,
737
  evutil_socket_t fd, struct evmap_io *io, void *arg)
738
0
{
739
0
  struct event_changelist *changelist = &base->changelist;
740
0
  struct event_changelist_fdinfo *f;
741
0
  f = (void*)
742
0
      ( ((char*)io) + sizeof(struct evmap_io) );
743
0
  if (f->idxplus1) {
744
0
    struct event_change *c = &changelist->changes[f->idxplus1 - 1];
745
0
    EVUTIL_ASSERT(c->fd == fd);
746
0
  }
747
0
  return 0;
748
0
}
749
750
/** Make sure that the changelist is consistent with the evmap structures. */
751
static void
752
event_changelist_assert_ok(struct event_base *base)
753
0
{
754
0
  int i;
755
0
  struct event_changelist *changelist = &base->changelist;
756
757
0
  EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
758
0
  for (i = 0; i < changelist->n_changes; ++i) {
759
0
    struct event_change *c = &changelist->changes[i];
760
0
    struct event_changelist_fdinfo *f;
761
0
    EVUTIL_ASSERT(c->fd >= 0);
762
0
    f = event_change_get_fdinfo(base, c);
763
0
    EVUTIL_ASSERT(f);
764
0
    EVUTIL_ASSERT(f->idxplus1 == i + 1);
765
0
  }
766
767
0
  evmap_io_foreach_fd(base,
768
0
      event_changelist_assert_ok_foreach_iter_fn,
769
0
      NULL);
770
0
}
771
772
#ifdef DEBUG_CHANGELIST
773
#define event_changelist_check(base)  event_changelist_assert_ok((base))
774
#else
775
0
#define event_changelist_check(base)  ((void)0)
776
#endif
777
778
void
779
event_changelist_remove_all_(struct event_changelist *changelist,
780
    struct event_base *base)
781
0
{
782
0
  int i;
783
784
0
  event_changelist_check(base);
785
786
0
  for (i = 0; i < changelist->n_changes; ++i) {
787
0
    struct event_change *ch = &changelist->changes[i];
788
0
    struct event_changelist_fdinfo *fdinfo =
789
0
        event_change_get_fdinfo(base, ch);
790
0
    EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
791
0
    fdinfo->idxplus1 = 0;
792
0
  }
793
794
0
  changelist->n_changes = 0;
795
796
0
  event_changelist_check(base);
797
0
}
798
799
void
800
event_changelist_freemem_(struct event_changelist *changelist)
801
0
{
802
0
  if (changelist->changes)
803
0
    mm_free(changelist->changes);
804
0
  event_changelist_init_(changelist); /* zero it all out. */
805
0
}
806
807
/** Increase the size of 'changelist' to hold more changes. */
808
static int
809
event_changelist_grow(struct event_changelist *changelist)
810
0
{
811
0
  int new_size;
812
0
  struct event_change *new_changes;
813
0
  if (changelist->changes_size < 64)
814
0
    new_size = 64;
815
0
  else
816
0
    new_size = changelist->changes_size * 2;
817
818
0
  new_changes = mm_realloc(changelist->changes,
819
0
      new_size * sizeof(struct event_change));
820
821
0
  if (EVUTIL_UNLIKELY(new_changes == NULL))
822
0
    return (-1);
823
824
0
  changelist->changes = new_changes;
825
0
  changelist->changes_size = new_size;
826
827
0
  return (0);
828
0
}
829
830
/** Return a pointer to the changelist entry for the file descriptor or signal
831
 * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
832
 * old_events field to old_events.
833
 */
834
static struct event_change *
835
event_changelist_get_or_construct(struct event_changelist *changelist,
836
    evutil_socket_t fd,
837
    short old_events,
838
    struct event_changelist_fdinfo *fdinfo)
839
0
{
840
0
  struct event_change *change;
841
842
0
  if (fdinfo->idxplus1 == 0) {
843
0
    int idx;
844
0
    EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
845
846
0
    if (changelist->n_changes == changelist->changes_size) {
847
0
      if (event_changelist_grow(changelist) < 0)
848
0
        return NULL;
849
0
    }
850
851
0
    idx = changelist->n_changes++;
852
0
    change = &changelist->changes[idx];
853
0
    fdinfo->idxplus1 = idx + 1;
854
855
0
    memset(change, 0, sizeof(struct event_change));
856
0
    change->fd = fd;
857
0
    change->old_events = old_events;
858
0
  } else {
859
0
    change = &changelist->changes[fdinfo->idxplus1 - 1];
860
0
    EVUTIL_ASSERT(change->fd == fd);
861
0
  }
862
0
  return change;
863
0
}
864
865
int
866
event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
867
    void *p)
868
0
{
869
0
  struct event_changelist *changelist = &base->changelist;
870
0
  struct event_changelist_fdinfo *fdinfo = p;
871
0
  struct event_change *change;
872
0
  ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
873
874
0
  event_changelist_check(base);
875
876
0
  change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
877
0
  if (!change)
878
0
    return -1;
879
880
  /* An add replaces any previous delete, but doesn't result in a no-op,
881
   * since the delete might fail (because the fd had been closed since
882
   * the last add, for instance. */
883
884
0
  if (events & (EV_READ|EV_SIGNAL))
885
0
    change->read_change = evchange;
886
0
  if (events & EV_WRITE)
887
0
    change->write_change = evchange;
888
0
  if (events & EV_CLOSED)
889
0
    change->close_change = evchange;
890
891
0
  event_changelist_check(base);
892
0
  return (0);
893
0
}
894
895
int
896
event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
897
    void *p)
898
0
{
899
0
  struct event_changelist *changelist = &base->changelist;
900
0
  struct event_changelist_fdinfo *fdinfo = p;
901
0
  struct event_change *change;
902
0
  ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
903
904
0
  event_changelist_check(base);
905
0
  change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
906
0
  event_changelist_check(base);
907
0
  if (!change)
908
0
    return -1;
909
910
  /* A delete on an event set that doesn't contain the event to be
911
     deleted produces a no-op.  This effectively emoves any previous
912
     uncommitted add, rather than replacing it: on those platforms where
913
     "add, delete, dispatch" is not the same as "no-op, dispatch", we
914
     want the no-op behavior.
915
916
     If we have a no-op item, we could remove it it from the list
917
     entirely, but really there's not much point: skipping the no-op
918
     change when we do the dispatch later is far cheaper than rejuggling
919
     the array now.
920
921
     As this stands, it also lets through deletions of events that are
922
     not currently set.
923
   */
924
925
0
  if (events & (EV_READ|EV_SIGNAL)) {
926
0
    if (!(change->old_events & (EV_READ | EV_SIGNAL)))
927
0
      change->read_change = 0;
928
0
    else
929
0
      change->read_change = del;
930
0
  }
931
0
  if (events & EV_WRITE) {
932
0
    if (!(change->old_events & EV_WRITE))
933
0
      change->write_change = 0;
934
0
    else
935
0
      change->write_change = del;
936
0
  }
937
0
  if (events & EV_CLOSED) {
938
0
    if (!(change->old_events & EV_CLOSED))
939
0
      change->close_change = 0;
940
0
    else
941
0
      change->close_change = del;
942
0
  }
943
944
0
  event_changelist_check(base);
945
0
  return (0);
946
0
}
947
948
/* Helper for evmap_check_integrity_: verify that all of the events pending on
949
 * given fd are set up correctly, and that the nread and nwrite counts on that
950
 * fd are correct. */
951
static int
952
evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
953
    struct evmap_io *io_info, void *arg)
954
0
{
955
0
  struct event *ev;
956
0
  int n_read = 0, n_write = 0, n_close = 0;
957
958
  /* First, make sure the list itself isn't corrupt. Otherwise,
959
   * running LIST_FOREACH could be an exciting adventure. */
960
0
  EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
961
962
0
  LIST_FOREACH(ev, &io_info->events, ev_io_next) {
963
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
964
0
    EVUTIL_ASSERT(ev->ev_fd == fd);
965
0
    EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
966
0
    EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
967
0
    if (ev->ev_events & EV_READ)
968
0
      ++n_read;
969
0
    if (ev->ev_events & EV_WRITE)
970
0
      ++n_write;
971
0
    if (ev->ev_events & EV_CLOSED)
972
0
      ++n_close;
973
0
  }
974
975
0
  EVUTIL_ASSERT(n_read == io_info->nread);
976
0
  EVUTIL_ASSERT(n_write == io_info->nwrite);
977
0
  EVUTIL_ASSERT(n_close == io_info->nclose);
978
979
0
  return 0;
980
0
}
981
982
/* Helper for evmap_check_integrity_: verify that all of the events pending
983
 * on given signal are set up correctly. */
984
static int
985
evmap_signal_check_integrity_fn(struct event_base *base,
986
    int signum, struct evmap_signal *sig_info, void *arg)
987
0
{
988
0
  struct event *ev;
989
  /* First, make sure the list itself isn't corrupt. */
990
0
  EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
991
992
0
  LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
993
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
994
0
    EVUTIL_ASSERT(ev->ev_fd == signum);
995
0
    EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
996
0
    EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
997
0
  }
998
0
  return 0;
999
0
}
1000
1001
void
1002
evmap_check_integrity_(struct event_base *base)
1003
0
{
1004
0
  evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
1005
0
  evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
1006
1007
0
  if (base->evsel->add == event_changelist_add_)
1008
0
    event_changelist_assert_ok(base);
1009
0
}
1010
1011
/* Helper type for evmap_foreach_event_: Bundles a function to call on every
1012
 * event, and the user-provided void* to use as its third argument. */
1013
struct evmap_foreach_event_helper {
1014
  event_base_foreach_event_cb fn;
1015
  void *arg;
1016
};
1017
1018
/* Helper for evmap_foreach_event_: calls a provided function on every event
1019
 * pending on a given fd.  */
1020
static int
1021
evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1022
    struct evmap_io *io_info, void *arg)
1023
0
{
1024
0
  struct evmap_foreach_event_helper *h = arg;
1025
0
  struct event *ev;
1026
0
  int r;
1027
0
  LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1028
0
    if ((r = h->fn(base, ev, h->arg)))
1029
0
      return r;
1030
0
  }
1031
0
  return 0;
1032
0
}
1033
1034
/* Helper for evmap_foreach_event_: calls a provided function on every event
1035
 * pending on a given signal.  */
1036
static int
1037
evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1038
    struct evmap_signal *sig_info, void *arg)
1039
0
{
1040
0
  struct event *ev;
1041
0
  struct evmap_foreach_event_helper *h = arg;
1042
0
  int r;
1043
0
  LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1044
0
    if ((r = h->fn(base, ev, h->arg)))
1045
0
      return r;
1046
0
  }
1047
0
  return 0;
1048
0
}
1049
1050
int
1051
evmap_foreach_event_(struct event_base *base,
1052
    event_base_foreach_event_cb fn, void *arg)
1053
0
{
1054
0
  struct evmap_foreach_event_helper h;
1055
0
  int r;
1056
0
  h.fn = fn;
1057
0
  h.arg = arg;
1058
0
  if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1059
0
    return r;
1060
0
  return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1061
0
}
1062