Coverage Report

Created: 2026-04-01 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libevent/event.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3
 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. The name of the author may not be used to endorse or promote products
14
 *    derived from this software without specific prior written permission.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 */
27
#include "event2/event-config.h"
28
#include "evconfig-private.h"
29
30
#ifdef _WIN32
31
#include <winsock2.h>
32
#define WIN32_LEAN_AND_MEAN
33
#include <windows.h>
34
#undef WIN32_LEAN_AND_MEAN
35
#endif
36
#include <sys/types.h>
37
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38
#include <sys/time.h>
39
#endif
40
#include <sys/queue.h>
41
#ifdef EVENT__HAVE_SYS_SOCKET_H
42
#include <sys/socket.h>
43
#endif
44
#include <stdio.h>
45
#include <stdlib.h>
46
#ifdef EVENT__HAVE_UNISTD_H
47
#include <unistd.h>
48
#endif
49
#include <ctype.h>
50
#include <errno.h>
51
#include <signal.h>
52
#include <string.h>
53
#include <time.h>
54
#include <limits.h>
55
#ifdef EVENT__HAVE_FCNTL_H
56
#include <fcntl.h>
57
#endif
58
59
#include "event2/event.h"
60
#include "event2/event_struct.h"
61
#include "event2/event_compat.h"
62
#include "event2/watch.h"
63
#include "event-internal.h"
64
#include "defer-internal.h"
65
#include "evthread-internal.h"
66
#include "event2/thread.h"
67
#include "event2/util.h"
68
#include "log-internal.h"
69
#include "evmap-internal.h"
70
#include "iocp-internal.h"
71
#include "changelist-internal.h"
72
#define HT_NO_CACHE_HASH_VALUES
73
#include "ht-internal.h"
74
#include "util-internal.h"
75
76
77
#ifdef EVENT__HAVE_WORKING_KQUEUE
78
#include "kqueue-internal.h"
79
#endif
80
81
#ifdef EVENT__HAVE_EVENT_PORTS
82
extern const struct eventop evportops;
83
#endif
84
#ifdef EVENT__HAVE_SELECT
85
extern const struct eventop selectops;
86
#endif
87
#ifdef EVENT__HAVE_POLL
88
extern const struct eventop pollops;
89
#endif
90
#ifdef EVENT__HAVE_EPOLL
91
extern const struct eventop epollops;
92
#endif
93
#ifdef EVENT__HAVE_WORKING_KQUEUE
94
extern const struct eventop kqops;
95
#endif
96
#ifdef EVENT__HAVE_DEVPOLL
97
extern const struct eventop devpollops;
98
#endif
99
#ifdef EVENT__HAVE_WEPOLL
100
extern const struct eventop wepollops;
101
#endif
102
#ifdef _WIN32
103
extern const struct eventop win32ops;
104
#endif
105
106
/* Array of backends in order of preference. */
107
static const struct eventop *eventops[] = {
108
#ifdef EVENT__HAVE_EVENT_PORTS
109
  &evportops,
110
#endif
111
#ifdef EVENT__HAVE_WORKING_KQUEUE
112
  &kqops,
113
#endif
114
#ifdef EVENT__HAVE_EPOLL
115
  &epollops,
116
#endif
117
#ifdef EVENT__HAVE_DEVPOLL
118
  &devpollops,
119
#endif
120
#ifdef EVENT__HAVE_POLL
121
  &pollops,
122
#endif
123
#ifdef EVENT__HAVE_SELECT
124
  &selectops,
125
#endif
126
#ifdef _WIN32
127
  &win32ops,
128
#endif
129
#ifdef EVENT__HAVE_WEPOLL
130
  &wepollops,
131
#endif
132
  NULL
133
};
134
135
/* Global state; deprecated */
136
EVENT2_EXPORT_SYMBOL
137
struct event_base *event_global_current_base_ = NULL;
138
0
#define current_base event_global_current_base_
139
140
/* Global state */
141
142
static void *event_self_cbarg_ptr_ = NULL;
143
144
/* Prototypes */
145
static void event_queue_insert_active(struct event_base *, struct event_callback *);
146
static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
147
static void event_queue_insert_timeout(struct event_base *, struct event *);
148
static void event_queue_insert_inserted(struct event_base *, struct event *);
149
static void event_queue_remove_active(struct event_base *, struct event_callback *);
150
static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
151
static void event_queue_remove_timeout(struct event_base *, struct event *);
152
static void event_queue_remove_inserted(struct event_base *, struct event *);
153
static void event_queue_make_later_events_active(struct event_base *base);
154
155
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
156
static int event_del_(struct event *ev, int blocking);
157
158
#ifdef USE_REINSERT_TIMEOUT
159
/* This code seems buggy; only turn it on if we find out what the trouble is. */
160
static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
161
#endif
162
163
static int  event_haveevents(struct event_base *);
164
165
static int  event_process_active(struct event_base *);
166
167
static int  timeout_next(struct event_base *, struct timeval **);
168
static void timeout_process(struct event_base *);
169
170
static inline void  event_signal_closure(struct event_base *, struct event *ev);
171
static inline void  event_persist_closure(struct event_base *, struct event *ev);
172
173
static int  evthread_notify_base(struct event_base *base);
174
175
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
176
    struct event *ev);
177
178
#ifndef EVENT__DISABLE_DEBUG_MODE
179
/* These functions implement a hashtable of which 'struct event *' structures
180
 * have been setup or added.  We don't want to trust the content of the struct
181
 * event itself, since we're trying to work through cases where an event gets
182
 * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
183
 */
184
185
struct event_debug_entry {
186
  HT_ENTRY(event_debug_entry) node;
187
  const struct event *ptr;
188
  unsigned added : 1;
189
};
190
191
static inline unsigned
192
hash_debug_entry(const struct event_debug_entry *e)
193
0
{
194
  /* We need to do this silliness to convince compilers that we
195
   * honestly mean to cast e->ptr to an integer, and discard any
196
   * part of it that doesn't fit in an unsigned.
197
   */
198
0
  unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
199
  /* Our hashtable implementation is pretty sensitive to low bits,
200
   * and every struct event is over 64 bytes in size, so we can
201
   * just say >>6. */
202
0
  return (u >> 6);
203
0
}
204
205
static inline int
206
eq_debug_entry(const struct event_debug_entry *a,
207
    const struct event_debug_entry *b)
208
0
{
209
0
  return a->ptr == b->ptr;
210
0
}
211
212
int event_debug_mode_on_ = 0;
213
214
215
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
216
/**
217
 * @brief debug mode variable which is set for any function/structure that needs
218
 *        to be shared across threads (if thread support is enabled).
219
 *
220
 *        When and if evthreads are initialized, this variable will be evaluated,
221
 *        and if set to something other than zero, this means the evthread setup
222
 *        functions were called out of order.
223
 *
224
 *        See: "Locks and threading" in the documentation.
225
 */
226
int event_debug_created_threadable_ctx_ = 0;
227
#endif
228
229
/* Set if it's too late to enable event_debug_mode. */
230
static int event_debug_mode_too_late = 0;
231
#ifndef EVENT__DISABLE_THREAD_SUPPORT
232
static void *event_debug_map_lock_ = NULL;
233
#endif
234
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
235
  HT_INITIALIZER();
236
237
0
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
Unexecuted instantiation: event.c:event_debug_map_HT_INIT
Unexecuted instantiation: event.c:event_debug_map_HT_START
Unexecuted instantiation: event.c:event_debug_map_HT_NEXT_RMV
Unexecuted instantiation: event.c:event_debug_map_HT_FIND
Unexecuted instantiation: event.c:event_debug_map_HT_FIND_P_
Unexecuted instantiation: event.c:event_debug_map_HT_REMOVE
238
    eq_debug_entry)
239
0
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
240
0
    eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
241
242
static void event_debug_mode_too_late_set(void)
243
0
{
244
  /* event_base_new_with_config() should have already set event_debug_mode_too_late under lock */
245
0
#ifndef EVENT__DISABLE_DEBUG_MODE
246
0
  if (!event_debug_mode_too_late) {
247
0
    EVLOCK_LOCK(event_debug_map_lock_, 0);
248
0
    if (!event_debug_mode_too_late)
249
0
      event_debug_mode_too_late = 1;
250
0
    EVLOCK_UNLOCK(event_debug_map_lock_, 0);
251
0
  }
252
0
#endif
253
0
}
254
255
/* record that ev is now setup (that is, ready for an add) */
256
static void event_debug_note_setup_(const struct event *ev)
257
0
{
258
0
  struct event_debug_entry *dent, find;
259
260
0
  if (!event_debug_mode_on_)
261
0
    goto out;
262
263
0
  find.ptr = ev;
264
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
265
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
266
0
  if (dent) {
267
0
    dent->added = 0;
268
0
  } else {
269
0
    dent = mm_malloc(sizeof(*dent));
270
0
    if (!dent)
271
0
      event_err(1,
272
0
          "Out of memory in debugging code");
273
0
    dent->ptr = ev;
274
0
    dent->added = 0;
275
0
    HT_INSERT(event_debug_map, &global_debug_map, dent);
276
0
  }
277
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
278
279
0
out:
280
0
  event_debug_mode_too_late_set();
281
0
}
282
/* record that ev is no longer setup */
283
static void event_debug_note_teardown_(const struct event *ev)
284
0
{
285
0
  struct event_debug_entry *dent, find;
286
287
0
  if (!event_debug_mode_on_)
288
0
    goto out;
289
290
0
  find.ptr = ev;
291
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
292
0
  dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
293
0
  if (dent)
294
0
    mm_free(dent);
295
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
296
297
0
out:
298
0
  event_debug_mode_too_late_set();
299
0
}
300
/* Macro: record that ev is now added */
301
static void event_debug_note_add_(const struct event *ev)
302
0
{
303
0
  struct event_debug_entry *dent,find;
304
305
0
  if (!event_debug_mode_on_)
306
0
    goto out;
307
308
0
  find.ptr = ev;
309
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
310
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
311
0
  if (dent) {
312
0
    dent->added = 1;
313
0
  } else {
314
0
    event_errx(EVENT_ERR_ABORT_,
315
0
        "%s: noting an add on a non-setup event %p"
316
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
317
0
        ", flags: 0x%x)",
318
0
        __func__, (void *)ev, ev->ev_events,
319
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
320
0
  }
321
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
322
323
0
out:
324
0
  event_debug_mode_too_late_set();
325
0
}
326
/* record that ev is no longer added */
327
static void event_debug_note_del_(const struct event *ev)
328
0
{
329
0
  struct event_debug_entry *dent, find;
330
331
0
  if (!event_debug_mode_on_)
332
0
    goto out;
333
334
0
  find.ptr = ev;
335
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
336
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
337
0
  if (dent) {
338
0
    dent->added = 0;
339
0
  } else {
340
0
    event_errx(EVENT_ERR_ABORT_,
341
0
        "%s: noting a del on a non-setup event %p"
342
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
343
0
        ", flags: 0x%x)",
344
0
        __func__, (void *)ev, ev->ev_events,
345
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
346
0
  }
347
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
348
349
0
out:
350
0
  event_debug_mode_too_late_set();
351
0
}
352
/* assert that ev is setup (i.e., okay to add or inspect) */
353
static void event_debug_assert_is_setup_(const struct event *ev)
354
0
{
355
0
  struct event_debug_entry *dent, find;
356
357
0
  if (!event_debug_mode_on_)
358
0
    return;
359
360
0
  find.ptr = ev;
361
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
362
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
363
0
  if (!dent) {
364
0
    event_errx(EVENT_ERR_ABORT_,
365
0
        "%s called on a non-initialized event %p"
366
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
367
0
        ", flags: 0x%x)",
368
0
        __func__, (void *)ev, ev->ev_events,
369
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
370
0
  }
371
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
372
0
}
373
/* assert that ev is not added (i.e., okay to tear down or set up again) */
374
static void event_debug_assert_not_added_(const struct event *ev)
375
0
{
376
0
  struct event_debug_entry *dent, find;
377
378
0
  if (!event_debug_mode_on_)
379
0
    return;
380
381
0
  find.ptr = ev;
382
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
383
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
384
0
  if (dent && dent->added) {
385
0
    event_errx(EVENT_ERR_ABORT_,
386
0
        "%s called on an already added event %p"
387
0
        " (events: 0x%x, fd: "EV_SOCK_FMT", "
388
0
        "flags: 0x%x)",
389
0
        __func__, (void *)ev, ev->ev_events,
390
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
391
0
  }
392
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
393
0
}
394
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
395
0
{
396
0
  if (!event_debug_mode_on_)
397
0
    return;
398
0
  if (fd < 0)
399
0
    return;
400
401
0
#ifndef _WIN32
402
0
  {
403
0
    int flags;
404
0
    if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
405
0
      EVUTIL_ASSERT(flags & O_NONBLOCK);
406
0
    }
407
0
  }
408
0
#endif
409
0
}
410
#else
411
static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
412
static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
413
static void event_debug_note_add_(const struct event *ev) { (void)ev; }
414
static void event_debug_note_del_(const struct event *ev) { (void)ev; }
415
static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
416
static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
417
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
418
#endif
419
420
#define EVENT_BASE_ASSERT_LOCKED(base)    \
421
0
  EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
422
423
/* How often (in seconds) do we check for changes in wall clock time relative
424
 * to monotonic time?  Set this to -1 for 'never.' */
425
0
#define CLOCK_SYNC_INTERVAL -1
426
427
/** Set 'tp' to the current time according to 'base'.  We must hold the lock
428
 * on 'base'.  If there is a cached time, return it.  Otherwise, use
429
 * clock_gettime or gettimeofday as appropriate to find out the right time.
430
 * Return 0 on success, -1 on failure.
431
 */
432
static int
433
gettime(struct event_base *base, struct timeval *tp)
434
0
{
435
0
  EVENT_BASE_ASSERT_LOCKED(base);
436
437
0
  if (base->tv_cache.tv_sec) {
438
0
    *tp = base->tv_cache;
439
0
    return (0);
440
0
  }
441
442
0
  if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
443
0
    return -1;
444
0
  }
445
446
0
  if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
447
0
      < tp->tv_sec) {
448
0
    struct timeval tv;
449
0
    evutil_gettimeofday(&tv,NULL);
450
0
    evutil_timersub(&tv, tp, &base->tv_clock_diff);
451
0
    base->last_updated_clock_diff = tp->tv_sec;
452
0
  }
453
454
0
  return 0;
455
0
}
456
457
int
458
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
459
0
{
460
0
  int r;
461
0
  if (!base) {
462
0
    base = current_base;
463
0
    if (!current_base)
464
0
      return evutil_gettimeofday(tv, NULL);
465
0
  }
466
467
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
468
0
  if (base->tv_cache.tv_sec == 0) {
469
0
    r = evutil_gettimeofday(tv, NULL);
470
0
  } else {
471
0
    evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
472
0
    r = 0;
473
0
  }
474
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
475
0
  return r;
476
0
}
477
478
/** Make 'base' have no current cached time. */
479
static inline void
480
clear_time_cache(struct event_base *base)
481
0
{
482
0
  base->tv_cache.tv_sec = 0;
483
0
}
484
485
/** Replace the cached time in 'base' with the current time. */
486
static inline void
487
update_time_cache(struct event_base *base)
488
0
{
489
0
  base->tv_cache.tv_sec = 0;
490
0
  if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
491
0
    gettime(base, &base->tv_cache);
492
0
}
493
494
int
495
event_base_update_cache_time(struct event_base *base)
496
0
{
497
498
0
  if (!base) {
499
0
    base = current_base;
500
0
    if (!current_base)
501
0
      return -1;
502
0
  }
503
504
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
505
0
  if (base->running_loop)
506
0
    update_time_cache(base);
507
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
508
0
  return 0;
509
0
}
510
511
static inline struct event *
512
event_callback_to_event(struct event_callback *evcb)
513
0
{
514
0
  EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
515
0
  return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
516
0
}
517
518
static inline struct event_callback *
519
event_to_event_callback(struct event *ev)
520
0
{
521
0
  return &ev->ev_evcallback;
522
0
}
523
524
struct event_base *
525
event_init(void)
526
0
{
527
0
  struct event_base *base = event_base_new_with_config(NULL);
528
529
0
  if (base == NULL) {
530
0
    event_errx(1, "%s: Unable to construct event_base", __func__);
531
0
    return NULL;
532
0
  }
533
534
0
  current_base = base;
535
536
0
  return (base);
537
0
}
538
539
struct event_base *
540
event_base_new(void)
541
0
{
542
0
  struct event_base *base = NULL;
543
0
  struct event_config *cfg = event_config_new();
544
0
  if (cfg) {
545
0
    base = event_base_new_with_config(cfg);
546
0
    event_config_free(cfg);
547
0
  }
548
0
  return base;
549
0
}
550
551
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
552
 * avoid. */
553
static int
554
event_config_is_avoided_method(const struct event_config *cfg,
555
    const char *method)
556
0
{
557
0
  struct event_config_entry *entry;
558
559
0
  TAILQ_FOREACH(entry, &cfg->entries, next) {
560
0
    if (entry->avoid_method != NULL &&
561
0
        strcmp(entry->avoid_method, method) == 0)
562
0
      return (1);
563
0
  }
564
565
0
  return (0);
566
0
}
567
568
/** Return true iff 'method' is disabled according to the environment. */
569
static int
570
event_is_method_disabled(const char *name)
571
0
{
572
0
  char environment[64];
573
0
  int i;
574
575
0
  evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
576
0
  for (i = 8; environment[i] != '\0'; ++i)
577
0
    environment[i] = EVUTIL_TOUPPER_(environment[i]);
578
  /* Note that evutil_getenv_() ignores the environment entirely if
579
   * we're setuid */
580
0
  return (evutil_getenv_(environment) != NULL);
581
0
}
582
583
int
584
event_base_get_features(const struct event_base *base)
585
0
{
586
0
  return base->evsel->features;
587
0
}
588
589
void
590
event_enable_debug_mode(void)
591
0
{
592
0
#ifndef EVENT__DISABLE_DEBUG_MODE
593
0
  if (event_debug_mode_on_)
594
0
    event_errx(1, "%s was called twice!", __func__);
595
0
  if (event_debug_mode_too_late)
596
0
    event_errx(1, "%s must be called *before* creating any events "
597
0
        "or event_bases",__func__);
598
599
0
  event_debug_mode_on_ = 1;
600
601
0
  HT_INIT(event_debug_map, &global_debug_map);
602
0
#endif
603
0
}
604
605
void
606
event_disable_debug_mode(void)
607
0
{
608
0
#ifndef EVENT__DISABLE_DEBUG_MODE
609
0
  struct event_debug_entry **ent, *victim;
610
611
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
612
0
  for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
613
0
    victim = *ent;
614
0
    ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
615
0
    mm_free(victim);
616
0
  }
617
0
  HT_CLEAR(event_debug_map, &global_debug_map);
618
0
  EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
619
620
0
  event_debug_mode_on_  = 0;
621
0
#endif
622
0
}
623
624
struct event_base *
625
event_base_new_with_config(const struct event_config *cfg)
626
0
{
627
0
  int i;
628
0
  struct event_base *base;
629
0
  int should_check_environment;
630
  /* event_base_new_with_config() should always be first to set event_debug_mode_too_late */
631
0
#ifndef EVENT__DISABLE_DEBUG_MODE
632
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
633
0
  if (!event_debug_mode_too_late)
634
0
      event_debug_mode_too_late = 1;
635
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
636
0
#endif
637
638
0
  if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
639
0
    event_warn("%s: calloc", __func__);
640
0
    return NULL;
641
0
  }
642
643
0
  if (cfg)
644
0
    base->flags = cfg->flags;
645
646
0
  should_check_environment =
647
0
      !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
648
649
0
  {
650
0
    struct timeval tmp;
651
0
    int precise_time =
652
0
        cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
653
0
    int flags;
654
0
    if (should_check_environment && !precise_time) {
655
0
      precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
656
0
      if (precise_time) {
657
0
        base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
658
0
      }
659
0
    }
660
0
    flags = precise_time ? EV_MONOT_PRECISE : 0;
661
0
    evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
662
663
0
    gettime(base, &tmp);
664
0
  }
665
666
0
  min_heap_ctor_(&base->timeheap);
667
668
0
  base->sig.ev_signal_pair[0] = -1;
669
0
  base->sig.ev_signal_pair[1] = -1;
670
0
  base->th_notify_fd[0] = -1;
671
0
  base->th_notify_fd[1] = -1;
672
673
0
  TAILQ_INIT(&base->active_later_queue);
674
675
0
  evmap_io_initmap_(&base->io);
676
0
  evmap_signal_initmap_(&base->sigmap);
677
0
  event_changelist_init_(&base->changelist);
678
679
0
  base->evbase = NULL;
680
681
0
  if (cfg) {
682
0
    memcpy(&base->max_dispatch_time,
683
0
        &cfg->max_dispatch_interval, sizeof(struct timeval));
684
0
    base->limit_callbacks_after_prio =
685
0
        cfg->limit_callbacks_after_prio;
686
0
  } else {
687
0
    base->max_dispatch_time.tv_sec = -1;
688
0
    base->limit_callbacks_after_prio = 1;
689
0
  }
690
0
  if (cfg && cfg->max_dispatch_callbacks >= 0) {
691
0
    base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
692
0
  } else {
693
0
    base->max_dispatch_callbacks = INT_MAX;
694
0
  }
695
0
  if (base->max_dispatch_callbacks == INT_MAX &&
696
0
      base->max_dispatch_time.tv_sec == -1)
697
0
    base->limit_callbacks_after_prio = INT_MAX;
698
699
0
  for (i = 0; eventops[i] && !base->evbase; i++) {
700
0
    if (cfg != NULL) {
701
      /* determine if this backend should be avoided */
702
0
      if (event_config_is_avoided_method(cfg,
703
0
        eventops[i]->name))
704
0
        continue;
705
0
      if ((eventops[i]->features & cfg->require_features)
706
0
          != cfg->require_features)
707
0
        continue;
708
0
    }
709
710
    /* also obey the environment variables */
711
0
    if (should_check_environment &&
712
0
        event_is_method_disabled(eventops[i]->name))
713
0
      continue;
714
715
0
    base->evsel = eventops[i];
716
717
0
    base->evbase = base->evsel->init(base);
718
0
  }
719
720
0
  if (base->evbase == NULL) {
721
0
    event_warnx("%s: no event mechanism available",
722
0
        __func__);
723
0
    base->evsel = NULL;
724
0
    event_base_free(base);
725
0
    return NULL;
726
0
  }
727
728
0
  if (evutil_getenv_("EVENT_SHOW_METHOD"))
729
0
    event_msgx("libevent using: %s", base->evsel->name);
730
731
  /* allocate a single active event queue */
732
0
  if (event_base_priority_init(base, 1) < 0) {
733
0
    event_base_free(base);
734
0
    return NULL;
735
0
  }
736
737
  /* prepare for threading */
738
739
0
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
740
0
  event_debug_created_threadable_ctx_ = 1;
741
0
#endif
742
743
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
744
0
  if (EVTHREAD_LOCKING_ENABLED() &&
745
0
      (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
746
0
    int r;
747
0
    EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
748
0
    EVTHREAD_ALLOC_COND(base->current_event_cond);
749
0
    r = evthread_make_base_notifiable(base);
750
0
    if (r<0) {
751
0
      event_warnx("%s: Unable to make base notifiable.", __func__);
752
0
      event_base_free(base);
753
0
      return NULL;
754
0
    }
755
0
  }
756
0
#endif
757
758
#ifdef _WIN32
759
  if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
760
    event_base_start_iocp_(base, cfg->n_cpus_hint);
761
#endif
762
763
  /* initialize watcher lists */
764
0
  for (i = 0; i < EVWATCH_MAX; ++i)
765
0
    TAILQ_INIT(&base->watchers[i]);
766
767
0
  return (base);
768
0
}
769
770
int
771
event_base_start_iocp_(struct event_base *base, int n_cpus)
772
0
{
773
#ifdef _WIN32
774
  if (base->iocp)
775
    return 0;
776
  base->iocp = event_iocp_port_launch_(n_cpus);
777
  if (!base->iocp) {
778
    event_warnx("%s: Couldn't launch IOCP", __func__);
779
    return -1;
780
  }
781
  return 0;
782
#else
783
0
  return -1;
784
0
#endif
785
0
}
786
787
void
788
event_base_stop_iocp_(struct event_base *base)
789
0
{
790
#ifdef _WIN32
791
  int rv;
792
793
  if (!base->iocp)
794
    return;
795
  rv = event_iocp_shutdown_(base->iocp, -1);
796
  EVUTIL_ASSERT(rv >= 0);
797
  base->iocp = NULL;
798
#endif
799
0
}
800
801
static int
802
event_base_cancel_single_callback_(struct event_base *base,
803
    struct event_callback *evcb,
804
    int run_finalizers)
805
0
{
806
0
  int result = 0;
807
808
0
  if (evcb->evcb_flags & EVLIST_INIT) {
809
0
    struct event *ev = event_callback_to_event(evcb);
810
0
    if (!(ev->ev_flags & EVLIST_INTERNAL)) {
811
0
      event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
812
0
      result = 1;
813
0
    }
814
0
  } else {
815
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
816
0
    event_callback_cancel_nolock_(base, evcb, 1);
817
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
818
0
    result = 1;
819
0
  }
820
821
0
  if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
822
0
    switch (evcb->evcb_closure) {
823
0
    case EV_CLOSURE_EVENT_FINALIZE:
824
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
825
0
      struct event *ev = event_callback_to_event(evcb);
826
0
      ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
827
0
      if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
828
0
        mm_free(ev);
829
0
      break;
830
0
    }
831
0
    case EV_CLOSURE_CB_FINALIZE:
832
0
      evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
833
0
      break;
834
0
    default:
835
0
      break;
836
0
    }
837
0
  }
838
0
  return result;
839
0
}
840
841
static int event_base_free_queues_(struct event_base *base, int run_finalizers)
842
0
{
843
0
  int deleted = 0, i;
844
845
0
  for (i = 0; i < base->nactivequeues; ++i) {
846
0
    struct event_callback *evcb, *next;
847
0
    for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
848
0
      next = TAILQ_NEXT(evcb, evcb_active_next);
849
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
850
0
      evcb = next;
851
0
    }
852
0
  }
853
854
0
  {
855
0
    struct event_callback *evcb;
856
0
    while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
857
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
858
0
    }
859
0
  }
860
861
0
  return deleted;
862
0
}
863
864
static void
865
event_base_free_(struct event_base *base, int run_finalizers)
866
0
{
867
0
  int i;
868
0
  size_t n_deleted=0;
869
0
  struct event *ev;
870
0
  struct evwatch *watcher;
871
  /* XXXX grab the lock? If there is contention when one thread frees
872
   * the base, then the contending thread will be very sad soon. */
873
874
  /* event_base_free(NULL) is how to free the current_base if we
875
   * made it with event_init and forgot to hold a reference to it. */
876
0
  if (base == NULL && current_base)
877
0
    base = current_base;
878
  /* Don't actually free NULL. */
879
0
  if (base == NULL) {
880
0
    event_warnx("%s: no base to free", __func__);
881
0
    return;
882
0
  }
883
  /* XXX(niels) - check for internal events first */
884
885
#ifdef _WIN32
886
  event_base_stop_iocp_(base);
887
#endif
888
889
  /* threading fds if we have them */
890
0
  if (base->th_notify_fd[0] != -1) {
891
0
    event_del(&base->th_notify);
892
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
893
0
    if (base->th_notify_fd[1] != -1)
894
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
895
0
    base->th_notify_fd[0] = -1;
896
0
    base->th_notify_fd[1] = -1;
897
0
    event_debug_unassign(&base->th_notify);
898
0
  }
899
900
  /* Delete all non-internal events. */
901
0
  evmap_delete_all_(base);
902
903
0
  while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
904
0
    event_del(ev);
905
0
    ++n_deleted;
906
0
  }
907
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
908
0
    struct common_timeout_list *ctl =
909
0
        base->common_timeout_queues[i];
910
0
    event_del(&ctl->timeout_event); /* Internal; doesn't count */
911
0
    event_debug_unassign(&ctl->timeout_event);
912
0
    for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
913
0
      struct event *next = TAILQ_NEXT(ev,
914
0
          ev_timeout_pos.ev_next_with_common_timeout);
915
0
      if (!(ev->ev_flags & EVLIST_INTERNAL)) {
916
0
        event_del(ev);
917
0
        ++n_deleted;
918
0
      }
919
0
      ev = next;
920
0
    }
921
0
    mm_free(ctl);
922
0
  }
923
0
  if (base->common_timeout_queues)
924
0
    mm_free(base->common_timeout_queues);
925
926
0
  for (;;) {
927
    /* For finalizers we can register yet another finalizer out from
928
     * finalizer, and iff finalizer will be in active_later_queue we can
929
     * add finalizer to activequeues, and we will have events in
930
     * activequeues after this function returns, which is not what we want
931
     * (we even have an assertion for this).
932
     *
933
     * A simple case is bufferevent with underlying (i.e. filters).
934
     */
935
0
    int i = event_base_free_queues_(base, run_finalizers);
936
0
    event_debug(("%s: %d events freed", __func__, i));
937
0
    if (!i) {
938
0
      break;
939
0
    }
940
0
    n_deleted += i;
941
0
  }
942
943
0
  if (n_deleted)
944
0
    event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
945
0
      __func__, n_deleted));
946
947
0
  while (LIST_FIRST(&base->once_events)) {
948
0
    struct event_once *eonce = LIST_FIRST(&base->once_events);
949
0
    LIST_REMOVE(eonce, next_once);
950
0
    mm_free(eonce);
951
0
  }
952
953
0
  if (base->evsel != NULL && base->evsel->dealloc != NULL)
954
0
    base->evsel->dealloc(base);
955
956
0
  for (i = 0; i < base->nactivequeues; ++i)
957
0
    EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
958
959
0
  EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
960
0
  min_heap_dtor_(&base->timeheap);
961
962
0
  mm_free(base->activequeues);
963
964
0
  evmap_io_clear_(&base->io);
965
0
  evmap_signal_clear_(&base->sigmap);
966
0
  event_changelist_freemem_(&base->changelist);
967
968
0
  EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
969
0
  EVTHREAD_FREE_COND(base->current_event_cond);
970
971
  /* Free all event watchers */
972
0
  for (i = 0; i < EVWATCH_MAX; ++i) {
973
0
    while (!TAILQ_EMPTY(&base->watchers[i])) {
974
0
      watcher = TAILQ_FIRST(&base->watchers[i]);
975
0
      TAILQ_REMOVE(&base->watchers[i], watcher, next);
976
0
      mm_free(watcher);
977
0
    }
978
0
  }
979
980
  /* If we're freeing current_base, there won't be a current_base. */
981
0
  if (base == current_base)
982
0
    current_base = NULL;
983
0
  mm_free(base);
984
0
}
985
986
void
987
event_base_free_nofinalize(struct event_base *base)
988
0
{
989
0
  event_base_free_(base, 0);
990
0
}
991
992
void
993
event_base_free(struct event_base *base)
994
0
{
995
0
  event_base_free_(base, 1);
996
0
}
997
998
/* Fake eventop; used to disable the backend temporarily inside event_reinit
999
 * so that we can call event_del() on an event without telling the backend.
1000
 */
1001
static int
1002
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
1003
    short events, void *fdinfo)
1004
0
{
1005
0
  return 0;
1006
0
}
1007
const struct eventop nil_eventop = {
1008
  "nil",
1009
  NULL, /* init: unused. */
1010
  NULL, /* add: unused. */
1011
  nil_backend_del, /* del: used, so needs to be killed. */
1012
  NULL, /* dispatch: unused. */
1013
  NULL, /* dealloc: unused. */
1014
  0, 0, 0
1015
};
1016
1017
/* reinitialize the event base after a fork */
1018
int
1019
event_reinit(struct event_base *base)
1020
0
{
1021
0
  const struct eventop *evsel;
1022
0
  int res = 0;
1023
0
  int was_notifiable = 0;
1024
0
  int had_signal_added = 0;
1025
1026
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1027
1028
0
  evsel = base->evsel;
1029
1030
  /* check if this event mechanism requires reinit on the backend */
1031
0
  if (evsel->need_reinit) {
1032
    /* We're going to call event_del() on our notify events (the
1033
     * ones that tell about signals and wakeup events).  But we
1034
     * don't actually want to tell the backend to change its
1035
     * state, since it might still share some resource (a kqueue,
1036
     * an epoll fd) with the parent process, and we don't want to
1037
     * delete the fds from _that_ backend, we temporarily stub out
1038
     * the evsel with a replacement.
1039
     */
1040
0
    base->evsel = &nil_eventop;
1041
0
  }
1042
1043
  /* We need to re-create a new signal-notification fd and a new
1044
   * thread-notification fd.  Otherwise, we'll still share those with
1045
   * the parent process, which would make any notification sent to them
1046
   * get received by one or both of the event loops, more or less at
1047
   * random.
1048
   */
1049
0
  if (base->sig.ev_signal_added) {
1050
0
    event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1051
0
    event_debug_unassign(&base->sig.ev_signal);
1052
0
    memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1053
0
    had_signal_added = 1;
1054
0
    base->sig.ev_signal_added = 0;
1055
0
  }
1056
0
  if (base->sig.ev_signal_pair[0] != -1)
1057
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1058
0
  if (base->sig.ev_signal_pair[1] != -1)
1059
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1060
0
  if (base->th_notify_fn != NULL) {
1061
0
    was_notifiable = 1;
1062
0
    base->th_notify_fn = NULL;
1063
0
  }
1064
0
  if (base->th_notify_fd[0] != -1) {
1065
0
    event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1066
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1067
0
    if (base->th_notify_fd[1] != -1)
1068
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1069
0
    base->th_notify_fd[0] = -1;
1070
0
    base->th_notify_fd[1] = -1;
1071
0
    event_debug_unassign(&base->th_notify);
1072
0
  }
1073
1074
  /* Replace the original evsel. */
1075
0
        base->evsel = evsel;
1076
1077
0
  if (evsel->need_reinit) {
1078
    /* Reconstruct the backend through brute-force, so that we do
1079
     * not share any structures with the parent process. For some
1080
     * backends, this is necessary: epoll and kqueue, for
1081
     * instance, have events associated with a kernel
1082
     * structure. If didn't reinitialize, we'd share that
1083
     * structure with the parent process, and any changes made by
1084
     * the parent would affect our backend's behavior (and vice
1085
     * versa).
1086
     */
1087
0
    if (base->evsel->dealloc != NULL)
1088
0
      base->evsel->dealloc(base);
1089
0
    base->evbase = evsel->init(base);
1090
0
    if (base->evbase == NULL) {
1091
0
      event_errx(1,
1092
0
         "%s: could not reinitialize event mechanism",
1093
0
         __func__);
1094
0
      res = -1;
1095
0
      goto done;
1096
0
    }
1097
1098
    /* Empty out the changelist (if any): we are starting from a
1099
     * blank slate. */
1100
0
    event_changelist_freemem_(&base->changelist);
1101
1102
    /* Tell the event maps to re-inform the backend about all
1103
     * pending events. This will make the signal notification
1104
     * event get re-created if necessary. */
1105
0
    if (evmap_reinit_(base) < 0)
1106
0
      res = -1;
1107
0
  } else {
1108
0
    res = evsig_init_(base);
1109
0
    if (res == 0 && had_signal_added) {
1110
0
      res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1111
0
      if (res == 0)
1112
0
        base->sig.ev_signal_added = 1;
1113
0
    }
1114
0
  }
1115
1116
  /* If we were notifiable before, and nothing just exploded, become
1117
   * notifiable again. */
1118
0
  if (was_notifiable && res == 0)
1119
0
    res = evthread_make_base_notifiable_nolock_(base);
1120
1121
0
done:
1122
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1123
0
  return (res);
1124
0
}
1125
1126
/* Get the monotonic time for this event_base' timer */
1127
int
1128
event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1129
0
{
1130
0
  int rv = -1;
1131
1132
0
  if (base && tv) {
1133
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1134
0
    rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1135
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1136
0
  }
1137
1138
0
  return rv;
1139
0
}
1140
1141
const char **
1142
event_get_supported_methods(void)
1143
0
{
1144
0
  static const char **methods = NULL;
1145
0
  const struct eventop **method;
1146
0
  const char **tmp;
1147
0
  int i = 0, k;
1148
1149
  /* count all methods */
1150
0
  for (method = &eventops[0]; *method != NULL; ++method) {
1151
0
    ++i;
1152
0
  }
1153
1154
  /* allocate one more than we need for the NULL pointer */
1155
0
  tmp = mm_calloc((i + 1), sizeof(char *));
1156
0
  if (tmp == NULL)
1157
0
    return (NULL);
1158
1159
  /* populate the array with the supported methods */
1160
0
  for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1161
0
    tmp[i++] = eventops[k]->name;
1162
0
  }
1163
0
  tmp[i] = NULL;
1164
1165
0
  if (methods != NULL)
1166
0
    mm_free((char**)methods);
1167
1168
0
  methods = tmp;
1169
1170
0
  return (methods);
1171
0
}
1172
1173
struct event_config *
1174
event_config_new(void)
1175
0
{
1176
0
  struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1177
1178
0
  if (cfg == NULL)
1179
0
    return (NULL);
1180
1181
0
  TAILQ_INIT(&cfg->entries);
1182
0
  cfg->max_dispatch_interval.tv_sec = -1;
1183
0
  cfg->max_dispatch_callbacks = INT_MAX;
1184
0
  cfg->limit_callbacks_after_prio = 1;
1185
1186
0
  return (cfg);
1187
0
}
1188
1189
static void
1190
event_config_entry_free(struct event_config_entry *entry)
1191
0
{
1192
0
  if (entry->avoid_method != NULL)
1193
0
    mm_free((char *)entry->avoid_method);
1194
0
  mm_free(entry);
1195
0
}
1196
1197
void
1198
event_config_free(struct event_config *cfg)
1199
0
{
1200
0
  struct event_config_entry *entry;
1201
1202
0
  while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1203
0
    TAILQ_REMOVE(&cfg->entries, entry, next);
1204
0
    event_config_entry_free(entry);
1205
0
  }
1206
0
  mm_free(cfg);
1207
0
}
1208
1209
int
1210
event_config_set_flag(struct event_config *cfg, int flag)
1211
0
{
1212
0
  if (!cfg)
1213
0
    return -1;
1214
0
  cfg->flags |= flag;
1215
0
  return 0;
1216
0
}
1217
1218
int
1219
event_config_avoid_method(struct event_config *cfg, const char *method)
1220
0
{
1221
0
  struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1222
0
  if (entry == NULL)
1223
0
    return (-1);
1224
1225
0
  if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1226
0
    mm_free(entry);
1227
0
    return (-1);
1228
0
  }
1229
1230
0
  TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1231
1232
0
  return (0);
1233
0
}
1234
1235
int
1236
event_config_require_features(struct event_config *cfg,
1237
    int features)
1238
0
{
1239
0
  if (!cfg)
1240
0
    return (-1);
1241
0
  cfg->require_features = features;
1242
0
  return (0);
1243
0
}
1244
1245
int
1246
event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1247
0
{
1248
0
  if (!cfg)
1249
0
    return (-1);
1250
0
  cfg->n_cpus_hint = cpus;
1251
0
  return (0);
1252
0
}
1253
1254
int
1255
event_config_set_max_dispatch_interval(struct event_config *cfg,
1256
    const struct timeval *max_interval, int max_callbacks, int min_priority)
1257
0
{
1258
0
  if (max_interval)
1259
0
    memcpy(&cfg->max_dispatch_interval, max_interval,
1260
0
        sizeof(struct timeval));
1261
0
  else
1262
0
    cfg->max_dispatch_interval.tv_sec = -1;
1263
0
  cfg->max_dispatch_callbacks =
1264
0
      max_callbacks >= 0 ? max_callbacks : INT_MAX;
1265
0
  if (min_priority < 0)
1266
0
    min_priority = 0;
1267
0
  cfg->limit_callbacks_after_prio = min_priority;
1268
0
  return (0);
1269
0
}
1270
1271
int
1272
event_priority_init(int npriorities)
1273
0
{
1274
0
  return event_base_priority_init(current_base, npriorities);
1275
0
}
1276
1277
int
1278
event_base_priority_init(struct event_base *base, int npriorities)
1279
0
{
1280
0
  int i, r;
1281
0
  r = -1;
1282
1283
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1284
1285
0
  if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1286
0
      || npriorities >= EVENT_MAX_PRIORITIES)
1287
0
    goto err;
1288
1289
0
  if (npriorities == base->nactivequeues)
1290
0
    goto ok;
1291
1292
0
  if (base->nactivequeues) {
1293
0
    mm_free(base->activequeues);
1294
0
    base->nactivequeues = 0;
1295
0
  }
1296
1297
  /* Allocate our priority queues */
1298
0
  base->activequeues = (struct evcallback_list *)
1299
0
    mm_calloc(npriorities, sizeof(struct evcallback_list));
1300
0
  if (base->activequeues == NULL) {
1301
0
    event_warn("%s: calloc", __func__);
1302
0
    goto err;
1303
0
  }
1304
0
  base->nactivequeues = npriorities;
1305
1306
0
  for (i = 0; i < base->nactivequeues; ++i) {
1307
0
    TAILQ_INIT(&base->activequeues[i]);
1308
0
  }
1309
1310
0
ok:
1311
0
  r = 0;
1312
0
err:
1313
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1314
0
  return (r);
1315
0
}
1316
1317
int
1318
event_base_get_npriorities(struct event_base *base)
1319
0
{
1320
1321
0
  int n;
1322
0
  if (base == NULL)
1323
0
    base = current_base;
1324
1325
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1326
0
  n = base->nactivequeues;
1327
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1328
0
  return (n);
1329
0
}
1330
1331
int
1332
event_base_get_num_events(struct event_base *base, unsigned int type)
1333
0
{
1334
0
  int r = 0;
1335
1336
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1337
1338
0
  if (type & EVENT_BASE_COUNT_ACTIVE)
1339
0
    r += base->event_count_active;
1340
1341
0
  if (type & EVENT_BASE_COUNT_VIRTUAL)
1342
0
    r += base->virtual_event_count;
1343
1344
0
  if (type & EVENT_BASE_COUNT_ADDED)
1345
0
    r += base->event_count;
1346
1347
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1348
1349
0
  return r;
1350
0
}
1351
1352
int
1353
event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1354
0
{
1355
0
  int r = 0;
1356
1357
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1358
1359
0
  if (type & EVENT_BASE_COUNT_ACTIVE) {
1360
0
    r += base->event_count_active_max;
1361
0
    if (clear)
1362
0
      base->event_count_active_max = 0;
1363
0
  }
1364
1365
0
  if (type & EVENT_BASE_COUNT_VIRTUAL) {
1366
0
    r += base->virtual_event_count_max;
1367
0
    if (clear)
1368
0
      base->virtual_event_count_max = 0;
1369
0
  }
1370
1371
0
  if (type & EVENT_BASE_COUNT_ADDED) {
1372
0
    r += base->event_count_max;
1373
0
    if (clear)
1374
0
      base->event_count_max = 0;
1375
0
  }
1376
1377
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1378
1379
0
  return r;
1380
0
}
1381
1382
/* Returns true iff we're currently watching any events. */
1383
static int
1384
event_haveevents(struct event_base *base)
1385
0
{
1386
  /* Caller must hold th_base_lock */
1387
0
  return (base->virtual_event_count > 0 || base->event_count > 0);
1388
0
}
1389
1390
/* "closure" function called when processing active signal events */
1391
static inline void
1392
event_signal_closure(struct event_base *base, struct event *ev)
1393
0
{
1394
0
#if defined(__clang__)
1395
#elif defined(__GNUC__)
1396
#pragma GCC diagnostic push
1397
/* NOTE: it is better to avoid such code all together, by using separate
1398
 * variable to break the loop in the event structure, but now this code is safe
1399
 * */
1400
#pragma GCC diagnostic ignored "-Wdangling-pointer"
1401
#endif
1402
1403
0
  short ncalls;
1404
0
  int should_break;
1405
1406
  /* Allows deletes to work, see also event_del_nolock_() that has
1407
   * special treatment for signals */
1408
0
  ncalls = ev->ev_ncalls;
1409
0
  if (ncalls != 0)
1410
0
    ev->ev_pncalls = &ncalls;
1411
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1412
0
  while (ncalls) {
1413
0
    ncalls--;
1414
0
    ev->ev_ncalls = ncalls;
1415
0
    if (ncalls == 0)
1416
0
      ev->ev_pncalls = NULL;
1417
0
    (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1418
1419
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1420
0
    should_break = base->event_break;
1421
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1422
1423
0
    if (should_break) {
1424
0
      if (ncalls != 0)
1425
0
        ev->ev_pncalls = NULL;
1426
0
      return;
1427
0
    }
1428
0
  }
1429
1430
0
#if defined(__clang__)
1431
#elif defined(__GNUC__)
1432
#pragma GCC diagnostic pop
1433
#endif
1434
0
}
1435
1436
/* Common timeouts are special timeouts that are handled as queues rather than
1437
 * in the minheap.  This is more efficient than the minheap if we happen to
1438
 * know that we're going to get several thousands of timeout events all with
1439
 * the same timeout value.
1440
 *
1441
 * Since all our timeout handling code assumes timevals can be copied,
1442
 * assigned, etc, we can't use "magic pointer" to encode these common
1443
 * timeouts.  Searching through a list to see if every timeout is common could
1444
 * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1445
 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1446
 * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1447
 * of index into the event_base's aray of common timeouts.
1448
 */
1449
1450
0
#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1451
0
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1452
0
#define COMMON_TIMEOUT_IDX_SHIFT 20
1453
0
#define COMMON_TIMEOUT_MASK     0xf0000000
1454
0
#define COMMON_TIMEOUT_MAGIC    0x50000000
1455
1456
#define COMMON_TIMEOUT_IDX(tv) \
1457
0
  (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1458
1459
/** Return true iff if 'tv' is a common timeout in 'base' */
1460
static inline int
1461
is_common_timeout(const struct timeval *tv,
1462
    const struct event_base *base)
1463
0
{
1464
0
  int idx;
1465
0
  if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1466
0
    return 0;
1467
0
  idx = COMMON_TIMEOUT_IDX(tv);
1468
0
  return idx < base->n_common_timeouts;
1469
0
}
1470
1471
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
1472
 * one is a common timeout. */
1473
static inline int
1474
is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1475
0
{
1476
0
  return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1477
0
      (tv2->tv_usec & ~MICROSECONDS_MASK);
1478
0
}
1479
1480
/** Requires that 'tv' is a common timeout.  Return the corresponding
1481
 * common_timeout_list. */
1482
static inline struct common_timeout_list *
1483
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1484
0
{
1485
0
  return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1486
0
}
1487
1488
#if 0
1489
static inline int
1490
common_timeout_ok(const struct timeval *tv,
1491
    struct event_base *base)
1492
{
1493
  const struct timeval *expect =
1494
      &get_common_timeout_list(base, tv)->duration;
1495
  return tv->tv_sec == expect->tv_sec &&
1496
      tv->tv_usec == expect->tv_usec;
1497
}
1498
#endif
1499
1500
/* Add the timeout for the first event in given common timeout list to the
1501
 * event_base's minheap. */
1502
static void
1503
common_timeout_schedule(struct common_timeout_list *ctl,
1504
    const struct timeval *now, struct event *head)
1505
0
{
1506
0
  struct timeval timeout = head->ev_timeout;
1507
0
  timeout.tv_usec &= MICROSECONDS_MASK;
1508
0
  event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1509
0
}
1510
1511
/* Callback: invoked when the timeout for a common timeout queue triggers.
1512
 * This means that (at least) the first event in that queue should be run,
1513
 * and the timeout should be rescheduled if there are more events. */
1514
static void
1515
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1516
0
{
1517
0
  struct timeval now;
1518
0
  struct common_timeout_list *ctl = arg;
1519
0
  struct event_base *base = ctl->base;
1520
0
  struct event *ev = NULL;
1521
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1522
0
  gettime(base, &now);
1523
0
  while (1) {
1524
0
    int was_active;
1525
0
    ev = TAILQ_FIRST(&ctl->events);
1526
0
    if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1527
0
        (ev->ev_timeout.tv_sec == now.tv_sec &&
1528
0
      (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1529
0
      break;
1530
0
    was_active = ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER);
1531
0
    if (!was_active)
1532
0
      event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1533
0
    else
1534
0
      event_queue_remove_timeout(base, ev);
1535
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
1536
0
  }
1537
0
  if (ev)
1538
0
    common_timeout_schedule(ctl, &now, ev);
1539
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1540
0
}
1541
1542
0
#define MAX_COMMON_TIMEOUTS 256
1543
1544
const struct timeval *
1545
event_base_init_common_timeout(struct event_base *base,
1546
    const struct timeval *duration)
1547
0
{
1548
0
  int i;
1549
0
  struct timeval tv;
1550
0
  const struct timeval *result=NULL;
1551
0
  struct common_timeout_list *new_ctl;
1552
1553
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1554
0
  if (duration->tv_usec > 1000000) {
1555
0
    memcpy(&tv, duration, sizeof(struct timeval));
1556
0
    if (is_common_timeout(duration, base))
1557
0
      tv.tv_usec &= MICROSECONDS_MASK;
1558
0
    tv.tv_sec += tv.tv_usec / 1000000;
1559
0
    tv.tv_usec %= 1000000;
1560
0
    duration = &tv;
1561
0
  }
1562
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
1563
0
    const struct common_timeout_list *ctl =
1564
0
        base->common_timeout_queues[i];
1565
0
    if (duration->tv_sec == ctl->duration.tv_sec &&
1566
0
        duration->tv_usec ==
1567
0
        (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1568
0
      EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1569
0
      result = &ctl->duration;
1570
0
      goto done;
1571
0
    }
1572
0
  }
1573
0
  if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1574
0
    event_warnx("%s: Too many common timeouts already in use; "
1575
0
        "we only support %d per event_base", __func__,
1576
0
        MAX_COMMON_TIMEOUTS);
1577
0
    goto done;
1578
0
  }
1579
0
  if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1580
0
    int n = base->n_common_timeouts < 16 ? 16 :
1581
0
        base->n_common_timeouts*2;
1582
0
    struct common_timeout_list **newqueues =
1583
0
        mm_realloc(base->common_timeout_queues,
1584
0
      n*sizeof(struct common_timeout_queue *));
1585
0
    if (!newqueues) {
1586
0
      event_warn("%s: realloc",__func__);
1587
0
      goto done;
1588
0
    }
1589
0
    base->n_common_timeouts_allocated = n;
1590
0
    base->common_timeout_queues = newqueues;
1591
0
  }
1592
0
  new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1593
0
  if (!new_ctl) {
1594
0
    event_warn("%s: calloc",__func__);
1595
0
    goto done;
1596
0
  }
1597
0
  TAILQ_INIT(&new_ctl->events);
1598
0
  new_ctl->duration.tv_sec = duration->tv_sec;
1599
0
  new_ctl->duration.tv_usec =
1600
0
      duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1601
0
      (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1602
0
  evtimer_assign(&new_ctl->timeout_event, base,
1603
0
      common_timeout_callback, new_ctl);
1604
0
  new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1605
0
  event_priority_set(&new_ctl->timeout_event, 0);
1606
0
  new_ctl->base = base;
1607
0
  base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1608
0
  result = &new_ctl->duration;
1609
1610
0
done:
1611
0
  if (result)
1612
0
    EVUTIL_ASSERT(is_common_timeout(result, base));
1613
1614
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1615
0
  return result;
1616
0
}
1617
1618
/* Closure function invoked when we're activating a persistent event. */
1619
static inline void
1620
event_persist_closure(struct event_base *base, struct event *ev)
1621
0
{
1622
0
  void (*evcb_callback)(evutil_socket_t, short, void *);
1623
1624
  // Other fields of *ev that must be stored before executing
1625
0
  evutil_socket_t evcb_fd;
1626
0
  short evcb_res;
1627
0
  void *evcb_arg;
1628
1629
  /* reschedule the persistent event if we have a timeout. */
1630
0
  if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1631
    /* If there was a timeout, we want it to run at an interval of
1632
     * ev_io_timeout after the last time it was _scheduled_ for,
1633
     * not ev_io_timeout after _now_.  If it fired for another
1634
     * reason, though, the timeout ought to start ticking _now_. */
1635
0
    struct timeval run_at, relative_to, delay, now;
1636
0
    ev_uint32_t usec_mask = 0;
1637
0
    EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1638
0
      &ev->ev_io_timeout));
1639
0
    gettime(base, &now);
1640
0
    if (is_common_timeout(&ev->ev_timeout, base)) {
1641
0
      delay = ev->ev_io_timeout;
1642
0
      usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1643
0
      delay.tv_usec &= MICROSECONDS_MASK;
1644
0
      if (ev->ev_res & EV_TIMEOUT) {
1645
0
        relative_to = ev->ev_timeout;
1646
0
        relative_to.tv_usec &= MICROSECONDS_MASK;
1647
0
      } else {
1648
0
        relative_to = now;
1649
0
      }
1650
0
    } else {
1651
0
      delay = ev->ev_io_timeout;
1652
0
      if (ev->ev_res & EV_TIMEOUT) {
1653
0
        relative_to = ev->ev_timeout;
1654
0
      } else {
1655
0
        relative_to = now;
1656
0
      }
1657
0
    }
1658
0
    evutil_timeradd(&relative_to, &delay, &run_at);
1659
0
    if (evutil_timercmp(&run_at, &now, <)) {
1660
      /* Looks like we missed at least one invocation due to
1661
       * a clock jump, not running the event loop for a
1662
       * while, really slow callbacks, or
1663
       * something. Reschedule relative to now.
1664
       */
1665
0
      evutil_timeradd(&now, &delay, &run_at);
1666
0
    }
1667
0
    run_at.tv_usec |= usec_mask;
1668
0
    event_add_nolock_(ev, &run_at, 1);
1669
0
  }
1670
1671
  // Save our callback before we release the lock
1672
0
  evcb_callback = ev->ev_callback;
1673
0
  evcb_fd = ev->ev_fd;
1674
0
  evcb_res = ev->ev_res;
1675
0
  evcb_arg = ev->ev_arg;
1676
1677
  // Release the lock
1678
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1679
1680
  // Execute the callback
1681
0
  (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1682
0
}
1683
1684
/*
1685
  Helper for event_process_active to process all the events in a single queue,
1686
  releasing the lock as we go.  This function requires that the lock be held
1687
  when it's invoked.  Returns -1 if we get a signal or an event_break that
1688
  means we should stop processing any active events now.  Otherwise returns
1689
  the number of non-internal event_callbacks that we processed.
1690
*/
1691
static int
1692
event_process_active_single_queue(struct event_base *base,
1693
    struct evcallback_list *activeq,
1694
    int max_to_process, const struct timeval *endtime)
1695
0
{
1696
0
  struct event_callback *evcb;
1697
0
  int count = 0;
1698
1699
0
  EVUTIL_ASSERT(activeq != NULL);
1700
1701
0
  for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1702
0
    struct event *ev = NULL;
1703
0
    if (evcb->evcb_flags & EVLIST_INIT) {
1704
0
      ev = event_callback_to_event(evcb);
1705
1706
0
      if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1707
0
        event_queue_remove_active(base, evcb);
1708
0
      else
1709
0
        event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1710
0
      event_debug((
1711
0
          "event_process_active: event: %p, %s%s%scall %p",
1712
0
          (void *)ev,
1713
0
          ev->ev_res & EV_READ ? "EV_READ " : " ",
1714
0
          ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1715
0
          ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1716
0
          (void *)ev->ev_callback));
1717
0
    } else {
1718
0
      event_queue_remove_active(base, evcb);
1719
0
      event_debug(("event_process_active: event_callback %p, "
1720
0
        "closure %d, call %p",
1721
0
        (void *)evcb, evcb->evcb_closure, (void *)evcb->evcb_cb_union.evcb_callback));
1722
0
    }
1723
    // We don't want an infinite loop or use of memory after it is freed.
1724
    // Hence, for next loop iteration, it is expected that `event_queue_remove_active` or `event_del_nolock_` have removed current event from the queue at this point.
1725
0
    EVUTIL_ASSERT(evcb != TAILQ_FIRST(activeq));
1726
1727
0
    if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1728
0
      ++count;
1729
1730
1731
0
    base->current_event = evcb;
1732
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1733
0
    base->current_event_waiters = 0;
1734
0
#endif
1735
1736
0
    switch (evcb->evcb_closure) {
1737
0
    case EV_CLOSURE_EVENT_SIGNAL:
1738
0
      EVUTIL_ASSERT(ev != NULL);
1739
0
      event_signal_closure(base, ev);
1740
0
      break;
1741
0
    case EV_CLOSURE_EVENT_PERSIST:
1742
0
      EVUTIL_ASSERT(ev != NULL);
1743
0
      event_persist_closure(base, ev);
1744
0
      break;
1745
0
    case EV_CLOSURE_EVENT: {
1746
0
      void (*evcb_callback)(evutil_socket_t, short, void *);
1747
0
      short res;
1748
0
      EVUTIL_ASSERT(ev != NULL);
1749
0
      evcb_callback = *ev->ev_callback;
1750
0
      res = ev->ev_res;
1751
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1752
0
      evcb_callback(ev->ev_fd, res, ev->ev_arg);
1753
0
    }
1754
0
    break;
1755
0
    case EV_CLOSURE_CB_SELF: {
1756
0
      void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1757
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1758
0
      evcb_selfcb(evcb, evcb->evcb_arg);
1759
0
    }
1760
0
    break;
1761
0
    case EV_CLOSURE_EVENT_FINALIZE:
1762
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1763
0
      void (*evcb_evfinalize)(struct event *, void *);
1764
0
      int evcb_closure = evcb->evcb_closure;
1765
0
      EVUTIL_ASSERT(ev != NULL);
1766
0
      base->current_event = NULL;
1767
0
      evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1768
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1769
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1770
0
      event_debug_note_teardown_(ev);
1771
0
      evcb_evfinalize(ev, ev->ev_arg);
1772
0
      if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1773
0
        mm_free(ev);
1774
0
    }
1775
0
    break;
1776
0
    case EV_CLOSURE_CB_FINALIZE: {
1777
0
      void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1778
0
      base->current_event = NULL;
1779
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1780
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1781
0
      evcb_cbfinalize(evcb, evcb->evcb_arg);
1782
0
    }
1783
0
    break;
1784
0
    default:
1785
0
      EVUTIL_ASSERT(0);
1786
0
    }
1787
1788
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1789
0
    base->current_event = NULL;
1790
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1791
0
    if (base->current_event_waiters) {
1792
0
      base->current_event_waiters = 0;
1793
0
      EVTHREAD_COND_BROADCAST(base->current_event_cond);
1794
0
    }
1795
0
#endif
1796
1797
0
    if (base->event_break)
1798
0
      return -1;
1799
0
    if (count >= max_to_process)
1800
0
      return count;
1801
0
    if (count && endtime) {
1802
0
      struct timeval now;
1803
0
      update_time_cache(base);
1804
0
      gettime(base, &now);
1805
0
      if (evutil_timercmp(&now, endtime, >=))
1806
0
        return count;
1807
0
    }
1808
0
    if (base->event_continue)
1809
0
      break;
1810
0
  }
1811
0
  return count;
1812
0
}
1813
1814
/*
1815
 * Active events are stored in priority queues.  Lower priorities are always
1816
 * process before higher priorities.  Low priority events can starve high
1817
 * priority ones.
1818
 */
1819
1820
static int
1821
event_process_active(struct event_base *base)
1822
0
{
1823
  /* Caller must hold th_base_lock */
1824
0
  struct evcallback_list *activeq = NULL;
1825
0
  int i, c = 0;
1826
0
  const struct timeval *endtime;
1827
0
  struct timeval tv;
1828
0
  const int maxcb = base->max_dispatch_callbacks;
1829
0
  const int limit_after_prio = base->limit_callbacks_after_prio;
1830
0
  if (base->max_dispatch_time.tv_sec >= 0) {
1831
0
    update_time_cache(base);
1832
0
    gettime(base, &tv);
1833
0
    evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1834
0
    endtime = &tv;
1835
0
  } else {
1836
0
    endtime = NULL;
1837
0
  }
1838
1839
0
  for (i = 0; i < base->nactivequeues; ++i) {
1840
0
    if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1841
0
      base->event_running_priority = i;
1842
0
      activeq = &base->activequeues[i];
1843
0
      if (i < limit_after_prio)
1844
0
        c = event_process_active_single_queue(base, activeq,
1845
0
            INT_MAX, NULL);
1846
0
      else
1847
0
        c = event_process_active_single_queue(base, activeq,
1848
0
            maxcb, endtime);
1849
0
      if (c < 0) {
1850
0
        goto done;
1851
0
      } else if (c > 0)
1852
0
        break; /* Processed a real event; do not
1853
          * consider lower-priority events */
1854
      /* If we get here, all of the events we processed
1855
       * were internal.  Continue. */
1856
0
    }
1857
0
  }
1858
1859
0
done:
1860
0
  base->event_running_priority = -1;
1861
1862
0
  return c;
1863
0
}
1864
1865
/*
1866
 * Wait continuously for events.  We exit only if no events are left.
1867
 */
1868
1869
int
1870
event_dispatch(void)
1871
0
{
1872
0
  return (event_loop(0));
1873
0
}
1874
1875
int
1876
event_base_dispatch(struct event_base *event_base)
1877
0
{
1878
0
  return (event_base_loop(event_base, 0));
1879
0
}
1880
1881
const char *
1882
event_base_get_method(const struct event_base *base)
1883
0
{
1884
0
  EVUTIL_ASSERT(base);
1885
0
  return (base->evsel->name);
1886
0
}
1887
1888
const char *
1889
event_base_get_signal_method(const struct event_base *base)
1890
0
{
1891
0
  EVUTIL_ASSERT(base);
1892
0
  return (base->evsigsel->name);
1893
0
}
1894
1895
/** Callback: used to implement event_base_loopexit by telling the event_base
1896
 * that it's time to exit its loop. */
1897
static void
1898
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1899
0
{
1900
0
  struct event_base *base = arg;
1901
0
  base->event_gotterm = 1;
1902
0
}
1903
1904
int
1905
event_loopexit(const struct timeval *tv)
1906
0
{
1907
0
  return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1908
0
        current_base, tv));
1909
0
}
1910
1911
int
1912
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1913
0
{
1914
0
  return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1915
0
        event_base, tv));
1916
0
}
1917
1918
int
1919
event_loopbreak(void)
1920
0
{
1921
0
  return (event_base_loopbreak(current_base));
1922
0
}
1923
1924
int
1925
event_base_loopbreak(struct event_base *event_base)
1926
0
{
1927
0
  int r = 0;
1928
0
  if (event_base == NULL)
1929
0
    return (-1);
1930
1931
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1932
0
  event_base->event_break = 1;
1933
1934
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1935
0
    r = evthread_notify_base(event_base);
1936
0
  } else {
1937
0
    r = (0);
1938
0
  }
1939
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1940
0
  return r;
1941
0
}
1942
1943
int
1944
event_base_loopcontinue(struct event_base *event_base)
1945
0
{
1946
0
  int r = 0;
1947
0
  if (event_base == NULL)
1948
0
    return (-1);
1949
1950
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1951
0
  event_base->event_continue = 1;
1952
1953
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1954
0
    r = evthread_notify_base(event_base);
1955
0
  } else {
1956
0
    r = (0);
1957
0
  }
1958
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1959
0
  return r;
1960
0
}
1961
1962
int
1963
event_base_got_break(struct event_base *event_base)
1964
0
{
1965
0
  int res;
1966
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1967
0
  res = event_base->event_break;
1968
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1969
0
  return res;
1970
0
}
1971
1972
int
1973
event_base_got_exit(struct event_base *event_base)
1974
0
{
1975
0
  int res;
1976
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1977
0
  res = event_base->event_gotterm;
1978
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1979
0
  return res;
1980
0
}
1981
1982
/* not thread safe */
1983
1984
int
1985
event_loop(int flags)
1986
0
{
1987
0
  return event_base_loop(current_base, flags);
1988
0
}
1989
1990
int
1991
event_base_loop(struct event_base *base, int flags)
1992
0
{
1993
0
  const struct eventop *evsel = base->evsel;
1994
0
  struct timeval *tv_p;
1995
0
  int res, done, retval = 0;
1996
0
  struct evwatch_prepare_cb_info prepare_info;
1997
0
  struct evwatch_check_cb_info check_info = { NULL };
1998
0
  struct evwatch *watcher;
1999
2000
  /* Grab the lock.  We will release it inside evsel.dispatch, and again
2001
   * as we invoke watchers and user callbacks. */
2002
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2003
2004
0
  if (base->running_loop) {
2005
0
    event_warnx("%s: reentrant invocation.  Only one event_base_loop"
2006
0
        " can run on each event_base at once.", __func__);
2007
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
2008
0
    return -1;
2009
0
  }
2010
2011
0
  base->running_loop = 1;
2012
2013
0
  clear_time_cache(base);
2014
2015
0
  if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
2016
0
    evsig_set_base_(base);
2017
2018
0
  done = 0;
2019
2020
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2021
0
  base->th_owner_id = EVTHREAD_GET_ID();
2022
0
#endif
2023
2024
0
  base->event_gotterm = base->event_break = 0;
2025
2026
0
  while (!done) {
2027
0
    struct timeval tv;
2028
2029
0
    base->event_continue = 0;
2030
0
    base->n_deferreds_queued = 0;
2031
2032
    /* Terminate the loop if we have been asked to */
2033
0
    if (base->event_gotterm) {
2034
0
      break;
2035
0
    }
2036
2037
0
    if (base->event_break) {
2038
0
      break;
2039
0
    }
2040
2041
0
    tv_p = &tv;
2042
0
    if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
2043
0
      timeout_next(base, &tv_p);
2044
0
    } else {
2045
      /*
2046
       * if we have active events, we just poll new events
2047
       * without waiting.
2048
       */
2049
0
      evutil_timerclear(&tv);
2050
0
    }
2051
2052
    /* If we have no events, we just exit */
2053
0
    if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
2054
0
        !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
2055
0
      event_debug(("%s: no events registered.", __func__));
2056
0
      retval = 1;
2057
0
      goto done;
2058
0
    }
2059
2060
0
    event_queue_make_later_events_active(base);
2061
2062
    /* Invoke prepare watchers before polling for events */
2063
0
    prepare_info.timeout = tv_p;
2064
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next) {
2065
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2066
0
      (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
2067
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2068
0
    }
2069
2070
0
    clear_time_cache(base);
2071
2072
0
    res = evsel->dispatch(base, tv_p);
2073
2074
0
    if (res == -1) {
2075
0
      event_debug(("%s: dispatch returned unsuccessfully.",
2076
0
        __func__));
2077
0
      retval = -1;
2078
0
      goto done;
2079
0
    }
2080
2081
0
    update_time_cache(base);
2082
2083
    /* Invoke check watchers after polling for events, and before
2084
     * processing them */
2085
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next) {
2086
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2087
0
      (*watcher->callback.check)(watcher, &check_info, watcher->arg);
2088
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2089
0
    }
2090
2091
0
    timeout_process(base);
2092
2093
0
    if (N_ACTIVE_CALLBACKS(base)) {
2094
0
      int n = event_process_active(base);
2095
0
      if ((flags & EVLOOP_ONCE)
2096
0
          && N_ACTIVE_CALLBACKS(base) == 0
2097
0
          && n != 0)
2098
0
        done = 1;
2099
0
    } else if (flags & EVLOOP_NONBLOCK)
2100
0
      done = 1;
2101
0
  }
2102
0
  event_debug(("%s: asked to terminate loop.", __func__));
2103
2104
0
done:
2105
0
  clear_time_cache(base);
2106
0
  base->running_loop = 0;
2107
2108
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2109
2110
0
  return (retval);
2111
0
}
2112
2113
/* One-time callback to implement event_base_once: invokes the user callback,
2114
 * then deletes the allocated storage */
2115
static void
2116
event_once_cb(evutil_socket_t fd, short events, void *arg)
2117
0
{
2118
0
  struct event_once *eonce = arg;
2119
2120
0
  (*eonce->cb)(fd, events, eonce->arg);
2121
0
  EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2122
0
  LIST_REMOVE(eonce, next_once);
2123
0
  EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2124
0
  event_debug_unassign(&eonce->ev);
2125
0
  mm_free(eonce);
2126
0
}
2127
2128
/* not threadsafe, event scheduled once. */
2129
int
2130
event_once(evutil_socket_t fd, short events,
2131
    void (*callback)(evutil_socket_t, short, void *),
2132
    void *arg, const struct timeval *tv)
2133
0
{
2134
0
  return event_base_once(current_base, fd, events, callback, arg, tv);
2135
0
}
2136
2137
/* Schedules an event once */
2138
int
2139
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2140
    void (*callback)(evutil_socket_t, short, void *),
2141
    void *arg, const struct timeval *tv)
2142
0
{
2143
0
  struct event_once *eonce;
2144
0
  int res = 0;
2145
0
  int activate = 0;
2146
2147
0
  if (!base)
2148
0
    return (-1);
2149
2150
  /* We cannot support signals that just fire once, or persistent
2151
   * events. */
2152
0
  if (events & (EV_SIGNAL|EV_PERSIST))
2153
0
    return (-1);
2154
2155
0
  if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2156
0
    return (-1);
2157
2158
0
  eonce->cb = callback;
2159
0
  eonce->arg = arg;
2160
2161
0
  if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2162
0
    evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2163
2164
0
    if (tv == NULL || ! evutil_timerisset(tv)) {
2165
      /* If the event is going to become active immediately,
2166
       * don't put it on the timeout queue.  This is one
2167
       * idiom for scheduling a callback, so let's make
2168
       * it fast (and order-preserving). */
2169
0
      activate = 1;
2170
0
    }
2171
0
  } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2172
0
    events &= EV_READ|EV_WRITE|EV_CLOSED;
2173
2174
0
    event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2175
0
  } else {
2176
    /* Bad event combination */
2177
0
    mm_free(eonce);
2178
0
    return (-1);
2179
0
  }
2180
2181
2182
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2183
0
  if (activate)
2184
0
    event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2185
0
  else
2186
0
    res = event_add_nolock_(&eonce->ev, tv, 0);
2187
2188
0
  if (res != 0) {
2189
0
    mm_free(eonce);
2190
0
    return (res);
2191
0
  } else {
2192
0
    LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2193
0
  }
2194
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2195
2196
0
  return (0);
2197
0
}
2198
2199
int
2200
/* workaround for -Werror=maybe-uninitialized bug in gcc 11/12 */
2201
#if defined(__GNUC__) && (__GNUC__ == 11 || __GNUC__ == 12)
2202
__attribute__((noinline))
2203
#endif
2204
event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2205
0
{
2206
0
  if (!base)
2207
0
    base = current_base;
2208
0
  if (arg == &event_self_cbarg_ptr_)
2209
0
    arg = ev;
2210
2211
0
  if (!(events & EV_SIGNAL))
2212
0
    event_debug_assert_socket_nonblocking_(fd);
2213
0
  event_debug_assert_not_added_(ev);
2214
2215
0
  ev->ev_base = base;
2216
2217
0
  ev->ev_callback = callback;
2218
0
  ev->ev_arg = arg;
2219
0
  ev->ev_fd = fd;
2220
0
  ev->ev_events = events;
2221
0
  ev->ev_res = 0;
2222
0
  ev->ev_flags = EVLIST_INIT;
2223
0
  ev->ev_ncalls = 0;
2224
0
  ev->ev_pncalls = NULL;
2225
2226
0
  if (events & EV_SIGNAL) {
2227
0
    if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2228
0
      event_warnx("%s: EV_SIGNAL is not compatible with "
2229
0
          "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2230
0
      return -1;
2231
0
    }
2232
0
    ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2233
0
  } else {
2234
0
    if (events & EV_PERSIST) {
2235
0
      evutil_timerclear(&ev->ev_io_timeout);
2236
0
      ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2237
0
    } else {
2238
0
      ev->ev_closure = EV_CLOSURE_EVENT;
2239
0
    }
2240
0
  }
2241
2242
0
  min_heap_elem_init_(ev);
2243
2244
0
  if (base != NULL) {
2245
    /* by default, we put new events into the middle priority */
2246
0
    ev->ev_pri = base->nactivequeues / 2;
2247
0
  }
2248
2249
0
  event_debug_note_setup_(ev);
2250
2251
0
  return 0;
2252
0
}
2253
2254
int
2255
event_base_set(struct event_base *base, struct event *ev)
2256
0
{
2257
  /* Only innocent events may be assigned to a different base */
2258
0
  if (ev->ev_flags != EVLIST_INIT)
2259
0
    return (-1);
2260
2261
0
  event_debug_assert_is_setup_(ev);
2262
2263
0
  ev->ev_base = base;
2264
0
  ev->ev_pri = base->nactivequeues/2;
2265
2266
0
  return (0);
2267
0
}
2268
2269
void
2270
event_set(struct event *ev, evutil_socket_t fd, short events,
2271
    void (*callback)(evutil_socket_t, short, void *), void *arg)
2272
0
{
2273
0
  int r;
2274
0
  r = event_assign(ev, current_base, fd, events, callback, arg);
2275
0
  EVUTIL_ASSERT(r == 0);
2276
0
}
2277
2278
void *
2279
event_self_cbarg(void)
2280
0
{
2281
0
  return &event_self_cbarg_ptr_;
2282
0
}
2283
2284
struct event *
2285
event_base_get_running_event(struct event_base *base)
2286
0
{
2287
0
  struct event *ev = NULL;
2288
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2289
0
  if (EVBASE_IN_THREAD(base)) {
2290
0
    struct event_callback *evcb = base->current_event;
2291
0
    if (evcb->evcb_flags & EVLIST_INIT)
2292
0
      ev = event_callback_to_event(evcb);
2293
0
  }
2294
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2295
0
  return ev;
2296
0
}
2297
2298
struct event *
2299
event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2300
0
{
2301
0
  struct event *ev;
2302
0
  ev = mm_malloc(sizeof(struct event));
2303
0
  if (ev == NULL)
2304
0
    return (NULL);
2305
0
  if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2306
0
    mm_free(ev);
2307
0
    return (NULL);
2308
0
  }
2309
2310
0
  return (ev);
2311
0
}
2312
2313
void
2314
event_free(struct event *ev)
2315
0
{
2316
  /* This is disabled, so that events which have been finalized be a
2317
   * valid target for event_free(). That's */
2318
  // event_debug_assert_is_setup_(ev);
2319
2320
  /* make sure that this event won't be coming back to haunt us. */
2321
0
  event_del(ev);
2322
0
  event_debug_note_teardown_(ev);
2323
0
  mm_free(ev);
2324
2325
0
}
2326
2327
void
2328
event_debug_unassign(struct event *ev)
2329
0
{
2330
0
  event_debug_assert_not_added_(ev);
2331
0
  event_debug_note_teardown_(ev);
2332
2333
0
  ev->ev_flags &= ~EVLIST_INIT;
2334
0
}
2335
2336
0
#define EVENT_FINALIZE_FREE_ 0x10000
2337
static int
2338
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2339
0
{
2340
0
  ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2341
0
      EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2342
2343
0
  event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2344
0
  ev->ev_closure = closure;
2345
0
  ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2346
0
  event_active_nolock_(ev, EV_FINALIZE, 1);
2347
0
  ev->ev_flags |= EVLIST_FINALIZING;
2348
0
  return 0;
2349
0
}
2350
2351
static int
2352
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2353
0
{
2354
0
  int r;
2355
0
  struct event_base *base = ev->ev_base;
2356
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2357
0
    event_warnx("%s: event has no event_base set.", __func__);
2358
0
    return -1;
2359
0
  }
2360
2361
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2362
0
  r = event_finalize_nolock_(base, flags, ev, cb);
2363
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2364
0
  return r;
2365
0
}
2366
2367
int
2368
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2369
0
{
2370
0
  return event_finalize_impl_(flags, ev, cb);
2371
0
}
2372
2373
int
2374
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2375
0
{
2376
0
  return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2377
0
}
2378
2379
void
2380
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2381
0
{
2382
0
  struct event *ev = NULL;
2383
0
  if (evcb->evcb_flags & EVLIST_INIT) {
2384
0
    ev = event_callback_to_event(evcb);
2385
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2386
0
  } else {
2387
0
    event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2388
0
  }
2389
2390
0
  evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2391
0
  evcb->evcb_cb_union.evcb_cbfinalize = cb;
2392
0
  event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2393
0
  evcb->evcb_flags |= EVLIST_FINALIZING;
2394
0
}
2395
2396
void
2397
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2398
0
{
2399
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2400
0
  event_callback_finalize_nolock_(base, flags, evcb, cb);
2401
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2402
0
}
2403
2404
/** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2405
 * callback will be invoked on *one of them*, after they have *all* been
2406
 * finalized. */
2407
int
2408
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2409
0
{
2410
0
  int n_pending = 0, i;
2411
2412
0
  if (base == NULL)
2413
0
    base = current_base;
2414
2415
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2416
2417
0
  event_debug(("%s: %d events finalizing", __func__, n_cbs));
2418
2419
  /* At most one can be currently executing; the rest we just
2420
   * cancel... But we always make sure that the finalize callback
2421
   * runs. */
2422
0
  for (i = 0; i < n_cbs; ++i) {
2423
0
    struct event_callback *evcb = evcbs[i];
2424
0
    if (evcb == base->current_event) {
2425
0
      event_callback_finalize_nolock_(base, 0, evcb, cb);
2426
0
      ++n_pending;
2427
0
    } else {
2428
0
      event_callback_cancel_nolock_(base, evcb, 0);
2429
0
    }
2430
0
  }
2431
2432
0
  if (n_pending == 0) {
2433
    /* Just do the first one. */
2434
0
    event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2435
0
  }
2436
2437
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2438
0
  return 0;
2439
0
}
2440
2441
/*
2442
 * Set's the priority of an event - if an event is already scheduled
2443
 * changing the priority is going to fail.
2444
 */
2445
2446
int
2447
event_priority_set(struct event *ev, int pri)
2448
0
{
2449
0
  event_debug_assert_is_setup_(ev);
2450
2451
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2452
0
    return (-1);
2453
0
  if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2454
0
    return (-1);
2455
2456
0
  ev->ev_pri = pri;
2457
2458
0
  return (0);
2459
0
}
2460
2461
/*
2462
 * Checks if a specific event is pending or scheduled.
2463
 */
2464
2465
int
2466
event_pending(const struct event *ev, short event, struct timeval *tv)
2467
0
{
2468
0
  int flags = 0;
2469
2470
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2471
0
    event_warnx("%s: event has no event_base set.", __func__);
2472
0
    return 0;
2473
0
  }
2474
2475
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2476
0
  event_debug_assert_is_setup_(ev);
2477
2478
0
  if (ev->ev_flags & EVLIST_INSERTED)
2479
0
    flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2480
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2481
0
    flags |= ev->ev_res;
2482
0
  if (ev->ev_flags & EVLIST_TIMEOUT)
2483
0
    flags |= EV_TIMEOUT;
2484
2485
0
  event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2486
2487
  /* See if there is a timeout that we should report */
2488
0
  if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2489
0
    struct timeval tmp = ev->ev_timeout;
2490
0
    tmp.tv_usec &= MICROSECONDS_MASK;
2491
    /* correctly remamp to real time */
2492
0
    evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2493
0
  }
2494
2495
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2496
2497
0
  return (flags & event);
2498
0
}
2499
2500
int
2501
event_initialized(const struct event *ev)
2502
0
{
2503
0
  if (!(ev->ev_flags & EVLIST_INIT))
2504
0
    return 0;
2505
2506
0
  return 1;
2507
0
}
2508
2509
void
2510
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2511
0
{
2512
0
  event_debug_assert_is_setup_(event);
2513
2514
0
  if (base_out)
2515
0
    *base_out = event->ev_base;
2516
0
  if (fd_out)
2517
0
    *fd_out = event->ev_fd;
2518
0
  if (events_out)
2519
0
    *events_out = event->ev_events;
2520
0
  if (callback_out)
2521
0
    *callback_out = event->ev_callback;
2522
0
  if (arg_out)
2523
0
    *arg_out = event->ev_arg;
2524
0
}
2525
2526
size_t
2527
event_get_struct_event_size(void)
2528
0
{
2529
0
  return sizeof(struct event);
2530
0
}
2531
2532
evutil_socket_t
2533
event_get_fd(const struct event *ev)
2534
0
{
2535
0
  event_debug_assert_is_setup_(ev);
2536
0
  return ev->ev_fd;
2537
0
}
2538
2539
struct event_base *
2540
event_get_base(const struct event *ev)
2541
0
{
2542
0
  event_debug_assert_is_setup_(ev);
2543
0
  return ev->ev_base;
2544
0
}
2545
2546
short
2547
event_get_events(const struct event *ev)
2548
0
{
2549
0
  event_debug_assert_is_setup_(ev);
2550
0
  return ev->ev_events;
2551
0
}
2552
2553
event_callback_fn
2554
event_get_callback(const struct event *ev)
2555
0
{
2556
0
  event_debug_assert_is_setup_(ev);
2557
0
  return ev->ev_callback;
2558
0
}
2559
2560
void *
2561
event_get_callback_arg(const struct event *ev)
2562
0
{
2563
0
  event_debug_assert_is_setup_(ev);
2564
0
  return ev->ev_arg;
2565
0
}
2566
2567
int
2568
event_get_priority(const struct event *ev)
2569
0
{
2570
0
  event_debug_assert_is_setup_(ev);
2571
0
  return ev->ev_pri;
2572
0
}
2573
2574
int
2575
event_add(struct event *ev, const struct timeval *tv)
2576
0
{
2577
0
  int res;
2578
2579
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2580
0
    event_warnx("%s: event has no event_base set.", __func__);
2581
0
    return -1;
2582
0
  }
2583
2584
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2585
2586
0
  res = event_add_nolock_(ev, tv, 0);
2587
2588
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2589
2590
0
  return (res);
2591
0
}
2592
2593
/* Helper callback: wake an event_base from another thread.  This version
2594
 * works by writing a byte to one end of a socketpair, so that the event_base
2595
 * listening on the other end will wake up as the corresponding event
2596
 * triggers */
2597
static int
2598
evthread_notify_base_default(struct event_base *base)
2599
0
{
2600
0
  char buf[1];
2601
0
  ev_ssize_t r;
2602
0
  buf[0] = (char) 0;
2603
#ifdef _WIN32
2604
  r = send(base->th_notify_fd[1], buf, 1, 0);
2605
#else
2606
0
  r = write(base->th_notify_fd[1], buf, 1);
2607
0
#endif
2608
0
  return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2609
0
}
2610
2611
#ifdef EVENT__HAVE_EVENTFD
2612
/* Helper callback: wake an event_base from another thread.  This version
2613
 * assumes that you have a working eventfd() implementation. */
2614
static int
2615
evthread_notify_base_eventfd(struct event_base *base)
2616
0
{
2617
0
  int efd = base->th_notify_fd[0];
2618
0
  eventfd_t val;
2619
0
  int ret;
2620
0
  for (val=1;;val=1) {
2621
0
    ret = eventfd_write(efd, val);
2622
0
    if (ret < 0) {
2623
      // When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
2624
      // We need to first drain the eventfd and then write again.
2625
      //
2626
      // Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
2627
0
      if (errno == EAGAIN) {
2628
        // It's ready to retry.
2629
0
        if (eventfd_read(efd, &val) == 0 || errno == EAGAIN) {
2630
0
          continue;
2631
0
        }
2632
0
      }
2633
      // Unknown error occurs.
2634
0
      ret = -1;
2635
0
    }
2636
0
    break;
2637
0
  }
2638
2639
0
  return ret;
2640
0
}
2641
#endif
2642
2643
2644
/** Tell the thread currently running the event_loop for base (if any) that it
2645
 * needs to stop waiting in its dispatch function (if it is) and process all
2646
 * active callbacks. */
2647
static int
2648
evthread_notify_base(struct event_base *base)
2649
0
{
2650
0
  EVENT_BASE_ASSERT_LOCKED(base);
2651
0
  if (!base->th_notify_fn)
2652
0
    return -1;
2653
0
  if (base->is_notify_pending)
2654
0
    return 0;
2655
0
  base->is_notify_pending = 1;
2656
0
  return base->th_notify_fn(base);
2657
0
}
2658
2659
/* Implementation function to remove a timeout on a currently pending event.
2660
 */
2661
int
2662
event_remove_timer_nolock_(struct event *ev)
2663
0
{
2664
0
  struct event_base *base = ev->ev_base;
2665
2666
0
  EVENT_BASE_ASSERT_LOCKED(base);
2667
0
  event_debug_assert_is_setup_(ev);
2668
2669
0
  event_debug(("event_remove_timer_nolock: event: %p", (void *)ev));
2670
2671
  /* If it's not pending on a timeout, we don't need to do anything. */
2672
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2673
0
    event_queue_remove_timeout(base, ev);
2674
0
    evutil_timerclear(&ev->ev_io_timeout);
2675
0
  }
2676
2677
0
  return (0);
2678
0
}
2679
2680
int
2681
event_remove_timer(struct event *ev)
2682
0
{
2683
0
  int res;
2684
2685
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2686
0
    event_warnx("%s: event has no event_base set.", __func__);
2687
0
    return -1;
2688
0
  }
2689
2690
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2691
2692
0
  res = event_remove_timer_nolock_(ev);
2693
2694
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2695
2696
0
  return (res);
2697
0
}
2698
2699
/* Implementation function to add an event.  Works just like event_add,
2700
 * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2701
 * we treat tv as an absolute time, not as an interval to add to the current
2702
 * time */
2703
int
2704
event_add_nolock_(struct event *ev, const struct timeval *tv,
2705
    int tv_is_absolute)
2706
0
{
2707
0
  struct event_base *base = ev->ev_base;
2708
0
  int res = 0;
2709
0
  int notify = 0;
2710
2711
0
  EVENT_BASE_ASSERT_LOCKED(base);
2712
0
  event_debug_assert_is_setup_(ev);
2713
2714
0
  event_debug((
2715
0
     "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2716
0
     (void *)ev,
2717
0
     EV_SOCK_ARG(ev->ev_fd),
2718
0
     ev->ev_events & EV_READ ? "EV_READ " : " ",
2719
0
     ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2720
0
     ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2721
0
     tv ? "EV_TIMEOUT " : " ",
2722
0
     (void *)ev->ev_callback));
2723
2724
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2725
2726
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
2727
    /* XXXX debug */
2728
0
    return (-1);
2729
0
  }
2730
2731
  /*
2732
   * prepare for timeout insertion further below, if we get a
2733
   * failure on any step, we should not change any state.
2734
   */
2735
0
  if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2736
0
    if (min_heap_reserve_(&base->timeheap,
2737
0
      1 + min_heap_size_(&base->timeheap)) == -1)
2738
0
      return (-1);  /* ENOMEM == errno */
2739
0
  }
2740
2741
  /* If the main thread is currently executing a signal event's
2742
   * callback, and we are not the main thread, then we want to wait
2743
   * until the callback is done before we mess with the event, or else
2744
   * we can race on ev_ncalls and ev_pncalls below. */
2745
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2746
0
  if (base->current_event == event_to_event_callback(ev) &&
2747
0
      (ev->ev_events & EV_SIGNAL)
2748
0
      && !EVBASE_IN_THREAD(base)) {
2749
0
    ++base->current_event_waiters;
2750
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2751
0
  }
2752
0
#endif
2753
2754
0
  if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2755
0
      !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2756
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2757
0
      res = evmap_io_add_(base, ev->ev_fd, ev);
2758
0
    else if (ev->ev_events & EV_SIGNAL)
2759
0
      res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2760
0
    if (res != -1)
2761
0
      event_queue_insert_inserted(base, ev);
2762
0
    if (res == 1) {
2763
      /* evmap says we need to notify the main thread. */
2764
0
      notify = 1;
2765
0
      res = 0;
2766
0
    }
2767
0
  }
2768
2769
  /*
2770
   * we should change the timeout state only if the previous event
2771
   * addition succeeded.
2772
   */
2773
0
  if (res != -1 && tv != NULL) {
2774
0
    struct timeval now;
2775
0
    int common_timeout;
2776
#ifdef USE_REINSERT_TIMEOUT
2777
    int was_common;
2778
    int old_timeout_idx;
2779
#endif
2780
2781
    /*
2782
     * for persistent timeout events, we remember the
2783
     * timeout value and re-add the event.
2784
     *
2785
     * If tv_is_absolute, this was already set.
2786
     */
2787
0
    if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2788
0
      ev->ev_io_timeout = *tv;
2789
2790
0
#ifndef USE_REINSERT_TIMEOUT
2791
0
    if (ev->ev_flags & EVLIST_TIMEOUT) {
2792
0
      event_queue_remove_timeout(base, ev);
2793
0
    }
2794
0
#endif
2795
2796
    /* Check if it is active due to a timeout.  Rescheduling
2797
     * this timeout before the callback can be executed
2798
     * removes it from the active list. */
2799
0
    if ((ev->ev_flags & EVLIST_ACTIVE) &&
2800
0
        (ev->ev_res & EV_TIMEOUT)) {
2801
0
      if (ev->ev_events & EV_SIGNAL) {
2802
        /* See if we are just active executing
2803
         * this event in a loop
2804
         */
2805
0
        if (ev->ev_ncalls && ev->ev_pncalls) {
2806
          /* Abort loop */
2807
0
          *ev->ev_pncalls = 0;
2808
0
        }
2809
0
      }
2810
2811
0
      event_queue_remove_active(base, event_to_event_callback(ev));
2812
0
    }
2813
2814
0
    gettime(base, &now);
2815
2816
0
    common_timeout = is_common_timeout(tv, base);
2817
#ifdef USE_REINSERT_TIMEOUT
2818
    was_common = is_common_timeout(&ev->ev_timeout, base);
2819
    old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2820
#endif
2821
2822
0
    if (tv_is_absolute) {
2823
0
      ev->ev_timeout = *tv;
2824
0
    } else if (common_timeout) {
2825
0
      struct timeval tmp = *tv;
2826
0
      tmp.tv_usec &= MICROSECONDS_MASK;
2827
0
      evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2828
0
      ev->ev_timeout.tv_usec |=
2829
0
          (tv->tv_usec & ~MICROSECONDS_MASK);
2830
0
    } else {
2831
0
      evutil_timeradd(&now, tv, &ev->ev_timeout);
2832
0
    }
2833
2834
0
    event_debug((
2835
0
       "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2836
0
       (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec, (void *)ev->ev_callback));
2837
2838
#ifdef USE_REINSERT_TIMEOUT
2839
    event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2840
#else
2841
0
    event_queue_insert_timeout(base, ev);
2842
0
#endif
2843
2844
0
    if (common_timeout) {
2845
0
      struct common_timeout_list *ctl =
2846
0
          get_common_timeout_list(base, &ev->ev_timeout);
2847
0
      if (ev == TAILQ_FIRST(&ctl->events)) {
2848
0
        common_timeout_schedule(ctl, &now, ev);
2849
0
      }
2850
0
    } else {
2851
0
      struct event* top = NULL;
2852
      /* See if the earliest timeout is now earlier than it
2853
       * was before: if so, we will need to tell the main
2854
       * thread to wake up earlier than it would otherwise.
2855
       * We double check the timeout of the top element to
2856
       * handle time distortions due to system suspension.
2857
       */
2858
0
      if (min_heap_elt_is_top_(ev))
2859
0
        notify = 1;
2860
0
      else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2861
0
           evutil_timercmp(&top->ev_timeout, &now, <))
2862
0
        notify = 1;
2863
0
    }
2864
0
  }
2865
2866
  /* if we are not in the right thread, we need to wake up the loop */
2867
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2868
0
    evthread_notify_base(base);
2869
2870
0
  event_debug_note_add_(ev);
2871
2872
0
  return (res);
2873
0
}
2874
2875
static int
2876
event_del_(struct event *ev, int blocking)
2877
0
{
2878
0
  int res;
2879
0
  struct event_base *base = ev->ev_base;
2880
2881
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2882
0
    event_warnx("%s: event has no event_base set.", __func__);
2883
0
    return -1;
2884
0
  }
2885
2886
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2887
0
  res = event_del_nolock_(ev, blocking);
2888
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2889
2890
0
  return (res);
2891
0
}
2892
2893
int
2894
event_del(struct event *ev)
2895
0
{
2896
0
  return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2897
0
}
2898
2899
int
2900
event_del_block(struct event *ev)
2901
0
{
2902
0
  return event_del_(ev, EVENT_DEL_BLOCK);
2903
0
}
2904
2905
int
2906
event_del_noblock(struct event *ev)
2907
0
{
2908
0
  return event_del_(ev, EVENT_DEL_NOBLOCK);
2909
0
}
2910
2911
/** Helper for event_del: always called with th_base_lock held.
2912
 *
2913
 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2914
 * EVEN_IF_FINALIZING} values. See those for more information.
2915
 */
2916
int
2917
event_del_nolock_(struct event *ev, int blocking)
2918
0
{
2919
0
  struct event_base *base;
2920
0
  int res = 0, notify = 0;
2921
2922
0
  event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2923
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (void *)ev->ev_callback));
2924
2925
  /* An event without a base has not been added */
2926
0
  if (ev->ev_base == NULL)
2927
0
    return (-1);
2928
2929
0
  EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2930
2931
0
  if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2932
0
    if (ev->ev_flags & EVLIST_FINALIZING) {
2933
      /* XXXX Debug */
2934
0
      return 0;
2935
0
    }
2936
0
  }
2937
2938
0
  base = ev->ev_base;
2939
2940
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2941
2942
  /* See if we are just active executing this event in a loop */
2943
0
  if (ev->ev_events & EV_SIGNAL) {
2944
0
    if (ev->ev_ncalls && ev->ev_pncalls) {
2945
      /* Abort loop */
2946
0
      *ev->ev_pncalls = 0;
2947
0
    }
2948
0
  }
2949
2950
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2951
    /* Notify the base if this was the minimal timeout */
2952
0
    if (min_heap_top_(&base->timeheap) == ev)
2953
0
      notify = 1;
2954
0
    event_queue_remove_timeout(base, ev);
2955
0
  }
2956
2957
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2958
0
    event_queue_remove_active(base, event_to_event_callback(ev));
2959
0
  else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2960
0
    event_queue_remove_active_later(base, event_to_event_callback(ev));
2961
2962
0
  if (ev->ev_flags & EVLIST_INSERTED) {
2963
0
    event_queue_remove_inserted(base, ev);
2964
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2965
0
      res = evmap_io_del_(base, ev->ev_fd, ev);
2966
0
    else
2967
0
      res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2968
0
    if (res == 1) {
2969
      /* evmap says we need to notify the main thread. */
2970
0
      notify = 1;
2971
0
      res = 0;
2972
0
    }
2973
    /* If we do not have events, let's notify event base so it can
2974
     * exit without waiting */
2975
0
    if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2976
0
      notify = 1;
2977
0
  }
2978
2979
  /* if we are not in the right thread, we need to wake up the loop */
2980
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2981
0
    evthread_notify_base(base);
2982
2983
0
  event_debug_note_del_(ev);
2984
2985
  /* If the main thread is currently executing this event's callback,
2986
   * and we are not the main thread, then we want to wait until the
2987
   * callback is done before returning. That way, when this function
2988
   * returns, it will be safe to free the user-supplied argument.
2989
   */
2990
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2991
0
  if (blocking != EVENT_DEL_NOBLOCK &&
2992
0
      base->current_event == event_to_event_callback(ev) &&
2993
0
      !EVBASE_IN_THREAD(base) &&
2994
0
      (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2995
0
    ++base->current_event_waiters;
2996
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2997
0
  }
2998
0
#endif
2999
3000
0
  return (res);
3001
0
}
3002
3003
void
3004
event_active(struct event *ev, int res, short ncalls)
3005
0
{
3006
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
3007
0
    event_warnx("%s: event has no event_base set.", __func__);
3008
0
    return;
3009
0
  }
3010
3011
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3012
3013
0
  event_debug_assert_is_setup_(ev);
3014
3015
0
  event_active_nolock_(ev, res, ncalls);
3016
3017
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3018
0
}
3019
3020
3021
void
3022
event_active_nolock_(struct event *ev, int res, short ncalls)
3023
0
{
3024
0
  struct event_base *base;
3025
3026
0
  event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
3027
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (int)res, (void *)ev->ev_callback));
3028
3029
0
  base = ev->ev_base;
3030
0
  EVENT_BASE_ASSERT_LOCKED(base);
3031
3032
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
3033
    /* XXXX debug */
3034
0
    return;
3035
0
  }
3036
3037
0
  switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3038
0
  default:
3039
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3040
0
    EVUTIL_ASSERT(0);
3041
0
    break;
3042
0
  case EVLIST_ACTIVE:
3043
    /* We get different kinds of events, add them together */
3044
0
    ev->ev_res |= res;
3045
0
    return;
3046
0
  case EVLIST_ACTIVE_LATER:
3047
0
    ev->ev_res |= res;
3048
0
    break;
3049
0
  case 0:
3050
0
    ev->ev_res = res;
3051
0
    break;
3052
0
  }
3053
3054
0
  if (ev->ev_pri < base->event_running_priority)
3055
0
    base->event_continue = 1;
3056
3057
0
  if (ev->ev_events & EV_SIGNAL) {
3058
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3059
0
    if (base->current_event == event_to_event_callback(ev) &&
3060
0
        !EVBASE_IN_THREAD(base)) {
3061
0
      ++base->current_event_waiters;
3062
0
      EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
3063
0
    }
3064
0
#endif
3065
0
    ev->ev_ncalls = ncalls;
3066
0
    ev->ev_pncalls = NULL;
3067
0
  }
3068
3069
0
  event_callback_activate_nolock_(base, event_to_event_callback(ev));
3070
0
}
3071
3072
void
3073
event_active_later_(struct event *ev, int res)
3074
0
{
3075
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3076
0
  event_active_later_nolock_(ev, res);
3077
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3078
0
}
3079
3080
void
3081
event_active_later_nolock_(struct event *ev, int res)
3082
0
{
3083
0
  struct event_base *base = ev->ev_base;
3084
0
  EVENT_BASE_ASSERT_LOCKED(base);
3085
3086
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3087
    /* We get different kinds of events, add them together */
3088
0
    ev->ev_res |= res;
3089
0
    return;
3090
0
  }
3091
3092
0
  ev->ev_res = res;
3093
3094
0
  event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
3095
0
}
3096
3097
int
3098
event_callback_activate_(struct event_base *base,
3099
    struct event_callback *evcb)
3100
0
{
3101
0
  int r;
3102
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3103
0
  r = event_callback_activate_nolock_(base, evcb);
3104
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3105
0
  return r;
3106
0
}
3107
3108
int
3109
event_callback_activate_nolock_(struct event_base *base,
3110
    struct event_callback *evcb)
3111
0
{
3112
0
  int r = 1;
3113
3114
0
  if (evcb->evcb_flags & EVLIST_FINALIZING)
3115
0
    return 0;
3116
3117
0
  switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3118
0
  default:
3119
0
    EVUTIL_ASSERT(0);
3120
0
    EVUTIL_FALLTHROUGH;
3121
0
  case EVLIST_ACTIVE_LATER:
3122
0
    event_queue_remove_active_later(base, evcb);
3123
0
    r = 0;
3124
0
    break;
3125
0
  case EVLIST_ACTIVE:
3126
0
    return 0;
3127
0
  case 0:
3128
0
    break;
3129
0
  }
3130
3131
0
  event_queue_insert_active(base, evcb);
3132
3133
0
  if (EVBASE_NEED_NOTIFY(base))
3134
0
    evthread_notify_base(base);
3135
3136
0
  return r;
3137
0
}
3138
3139
int
3140
event_callback_activate_later_nolock_(struct event_base *base,
3141
    struct event_callback *evcb)
3142
0
{
3143
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3144
0
    return 0;
3145
3146
0
  event_queue_insert_active_later(base, evcb);
3147
0
  if (EVBASE_NEED_NOTIFY(base))
3148
0
    evthread_notify_base(base);
3149
0
  return 1;
3150
0
}
3151
3152
void
3153
event_callback_init_(struct event_base *base,
3154
    struct event_callback *cb)
3155
0
{
3156
0
  memset(cb, 0, sizeof(*cb));
3157
0
  cb->evcb_pri = base->nactivequeues - 1;
3158
0
}
3159
3160
int
3161
event_callback_cancel_(struct event_base *base,
3162
    struct event_callback *evcb)
3163
0
{
3164
0
  int r;
3165
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3166
0
  r = event_callback_cancel_nolock_(base, evcb, 0);
3167
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3168
0
  return r;
3169
0
}
3170
3171
int
3172
event_callback_cancel_nolock_(struct event_base *base,
3173
    struct event_callback *evcb, int even_if_finalizing)
3174
0
{
3175
0
  if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3176
0
    return 0;
3177
3178
0
  if (evcb->evcb_flags & EVLIST_INIT)
3179
0
    return event_del_nolock_(event_callback_to_event(evcb),
3180
0
        even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3181
3182
0
  switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3183
0
  default:
3184
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3185
0
    EVUTIL_ASSERT(0);
3186
0
    break;
3187
0
  case EVLIST_ACTIVE:
3188
    /* We get different kinds of events, add them together */
3189
0
    event_queue_remove_active(base, evcb);
3190
0
    return 0;
3191
0
  case EVLIST_ACTIVE_LATER:
3192
0
    event_queue_remove_active_later(base, evcb);
3193
0
    break;
3194
0
  case 0:
3195
0
    break;
3196
0
  }
3197
3198
0
  return 0;
3199
0
}
3200
3201
void
3202
event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3203
0
{
3204
0
  memset(cb, 0, sizeof(*cb));
3205
0
  cb->evcb_cb_union.evcb_selfcb = fn;
3206
0
  cb->evcb_arg = arg;
3207
0
  cb->evcb_pri = priority;
3208
0
  cb->evcb_closure = EV_CLOSURE_CB_SELF;
3209
0
}
3210
3211
void
3212
event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3213
0
{
3214
0
  cb->evcb_pri = priority;
3215
0
}
3216
3217
void
3218
event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3219
0
{
3220
0
  if (!base)
3221
0
    base = current_base;
3222
0
  event_callback_cancel_(base, cb);
3223
0
}
3224
3225
0
#define MAX_DEFERREDS_QUEUED 32
3226
int
3227
event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3228
0
{
3229
0
  int r = 1;
3230
0
  if (!base)
3231
0
    base = current_base;
3232
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3233
0
  if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3234
0
    r = event_callback_activate_later_nolock_(base, cb);
3235
0
  } else {
3236
0
    r = event_callback_activate_nolock_(base, cb);
3237
0
    if (r) {
3238
0
      ++base->n_deferreds_queued;
3239
0
    }
3240
0
  }
3241
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3242
0
  return r;
3243
0
}
3244
3245
static int
3246
timeout_next(struct event_base *base, struct timeval **tv_p)
3247
0
{
3248
  /* Caller must hold th_base_lock */
3249
0
  struct timeval now;
3250
0
  struct event *ev;
3251
0
  struct timeval *tv = *tv_p;
3252
0
  int res = 0;
3253
3254
0
  ev = min_heap_top_(&base->timeheap);
3255
3256
0
  if (ev == NULL) {
3257
    /* if no time-based events are active wait for I/O */
3258
0
    *tv_p = NULL;
3259
0
    goto out;
3260
0
  }
3261
3262
0
  if (gettime(base, &now) == -1) {
3263
0
    res = -1;
3264
0
    goto out;
3265
0
  }
3266
3267
0
  if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3268
0
    evutil_timerclear(tv);
3269
0
    goto out;
3270
0
  }
3271
3272
0
  evutil_timersub(&ev->ev_timeout, &now, tv);
3273
3274
0
  EVUTIL_ASSERT(tv->tv_sec >= 0);
3275
0
  EVUTIL_ASSERT(tv->tv_usec >= 0);
3276
0
  event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec));
3277
3278
0
out:
3279
0
  return (res);
3280
0
}
3281
3282
/* Activate every event whose timeout has elapsed. */
3283
static void
3284
timeout_process(struct event_base *base)
3285
0
{
3286
  /* Caller must hold lock. */
3287
0
  struct timeval now;
3288
0
  struct event *ev;
3289
3290
0
  if (min_heap_empty_(&base->timeheap)) {
3291
0
    return;
3292
0
  }
3293
3294
0
  gettime(base, &now);
3295
3296
0
  while ((ev = min_heap_top_(&base->timeheap))) {
3297
0
    int was_active = ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER);
3298
3299
0
    if (evutil_timercmp(&ev->ev_timeout, &now, >))
3300
0
      break;
3301
3302
0
    if (!was_active)
3303
0
      event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3304
0
    else
3305
0
      event_queue_remove_timeout(base, ev);
3306
3307
0
    event_debug(("timeout_process: event: %p, call %p (was active: %i)",
3308
0
       (void *)ev, (void *)ev->ev_callback, was_active));
3309
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
3310
0
  }
3311
0
}
3312
3313
#ifndef MAX
3314
0
#define MAX(a,b) (((a)>(b))?(a):(b))
3315
#endif
3316
3317
0
#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3318
3319
/* These are a fancy way to spell
3320
     if (~flags & EVLIST_INTERNAL)
3321
         base->event_count--/++;
3322
*/
3323
#define DECR_EVENT_COUNT(base,flags) \
3324
0
  ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3325
0
#define INCR_EVENT_COUNT(base,flags) do {         \
3326
0
  ((base)->event_count += !((flags) & EVLIST_INTERNAL));     \
3327
0
  MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);   \
3328
0
} while (0)
3329
3330
static void
3331
event_queue_remove_inserted(struct event_base *base, struct event *ev)
3332
0
{
3333
0
  EVENT_BASE_ASSERT_LOCKED(base);
3334
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3335
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3336
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3337
0
    return;
3338
0
  }
3339
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3340
0
  ev->ev_flags &= ~EVLIST_INSERTED;
3341
0
}
3342
static void
3343
event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3344
0
{
3345
0
  EVENT_BASE_ASSERT_LOCKED(base);
3346
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3347
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3348
0
                          (void *)evcb, EVLIST_ACTIVE);
3349
0
    return;
3350
0
  }
3351
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3352
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE;
3353
0
  base->event_count_active--;
3354
3355
0
  TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3356
0
      evcb, evcb_active_next);
3357
0
}
3358
static void
3359
event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3360
0
{
3361
0
  EVENT_BASE_ASSERT_LOCKED(base);
3362
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3363
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3364
0
                          (void *)evcb, EVLIST_ACTIVE_LATER);
3365
0
    return;
3366
0
  }
3367
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3368
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3369
0
  base->event_count_active--;
3370
3371
0
  TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3372
0
}
3373
static void
3374
event_queue_remove_timeout(struct event_base *base, struct event *ev)
3375
0
{
3376
0
  EVENT_BASE_ASSERT_LOCKED(base);
3377
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3378
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3379
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3380
0
    return;
3381
0
  }
3382
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3383
0
  ev->ev_flags &= ~EVLIST_TIMEOUT;
3384
3385
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3386
0
    struct common_timeout_list *ctl =
3387
0
        get_common_timeout_list(base, &ev->ev_timeout);
3388
0
    TAILQ_REMOVE(&ctl->events, ev,
3389
0
        ev_timeout_pos.ev_next_with_common_timeout);
3390
0
  } else {
3391
0
    min_heap_erase_(&base->timeheap, ev);
3392
0
  }
3393
0
}
3394
3395
#ifdef USE_REINSERT_TIMEOUT
3396
/* Remove and reinsert 'ev' into the timeout queue. */
3397
static void
3398
event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3399
    int was_common, int is_common, int old_timeout_idx)
3400
{
3401
  struct common_timeout_list *ctl;
3402
  if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3403
    event_queue_insert_timeout(base, ev);
3404
    return;
3405
  }
3406
3407
  switch ((was_common<<1) | is_common) {
3408
  case 3: /* Changing from one common timeout to another */
3409
    ctl = base->common_timeout_queues[old_timeout_idx];
3410
    TAILQ_REMOVE(&ctl->events, ev,
3411
        ev_timeout_pos.ev_next_with_common_timeout);
3412
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3413
    insert_common_timeout_inorder(ctl, ev);
3414
    break;
3415
  case 2: /* Was common; is no longer common */
3416
    ctl = base->common_timeout_queues[old_timeout_idx];
3417
    TAILQ_REMOVE(&ctl->events, ev,
3418
        ev_timeout_pos.ev_next_with_common_timeout);
3419
    min_heap_push_(&base->timeheap, ev);
3420
    break;
3421
  case 1: /* Wasn't common; has become common. */
3422
    min_heap_erase_(&base->timeheap, ev);
3423
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3424
    insert_common_timeout_inorder(ctl, ev);
3425
    break;
3426
  case 0: /* was in heap; is still on heap. */
3427
    min_heap_adjust_(&base->timeheap, ev);
3428
    break;
3429
  default:
3430
    EVUTIL_ASSERT(0); /* unreachable */
3431
    break;
3432
  }
3433
}
3434
#endif
3435
3436
/* Add 'ev' to the common timeout list in 'ev'. */
3437
static void
3438
insert_common_timeout_inorder(struct common_timeout_list *ctl,
3439
    struct event *ev)
3440
0
{
3441
0
  struct event *e;
3442
  /* By all logic, we should just be able to append 'ev' to the end of
3443
   * ctl->events, since the timeout on each 'ev' is set to {the common
3444
   * timeout} + {the time when we add the event}, and so the events
3445
   * should arrive in order of their timeouts.  But just in case
3446
   * there's some wacky threading issue going on, we do a search from
3447
   * the end of 'ev' to find the right insertion point.
3448
   */
3449
0
  TAILQ_FOREACH_REVERSE(e, &ctl->events,
3450
0
      event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3451
    /* This timercmp is a little sneaky, since both ev and e have
3452
     * magic values in tv_usec.  Fortunately, they ought to have
3453
     * the _same_ magic values in tv_usec.  Let's assert for that.
3454
     */
3455
0
    EVUTIL_ASSERT(
3456
0
      is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3457
0
    if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3458
0
      TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3459
0
          ev_timeout_pos.ev_next_with_common_timeout);
3460
0
      return;
3461
0
    }
3462
0
  }
3463
0
  TAILQ_INSERT_HEAD(&ctl->events, ev,
3464
0
      ev_timeout_pos.ev_next_with_common_timeout);
3465
0
}
3466
3467
static void
3468
event_queue_insert_inserted(struct event_base *base, struct event *ev)
3469
0
{
3470
0
  EVENT_BASE_ASSERT_LOCKED(base);
3471
3472
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3473
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3474
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3475
0
    return;
3476
0
  }
3477
3478
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3479
3480
0
  ev->ev_flags |= EVLIST_INSERTED;
3481
0
}
3482
3483
static void
3484
event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3485
0
{
3486
0
  EVENT_BASE_ASSERT_LOCKED(base);
3487
3488
0
  if (evcb->evcb_flags & EVLIST_ACTIVE) {
3489
    /* Double insertion is possible for active events */
3490
0
    return;
3491
0
  }
3492
3493
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3494
3495
0
  evcb->evcb_flags |= EVLIST_ACTIVE;
3496
3497
0
  base->event_count_active++;
3498
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3499
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3500
0
  TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3501
0
      evcb, evcb_active_next);
3502
0
}
3503
3504
static void
3505
event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3506
0
{
3507
0
  EVENT_BASE_ASSERT_LOCKED(base);
3508
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3509
    /* Double insertion is possible */
3510
0
    return;
3511
0
  }
3512
3513
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3514
0
  evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3515
0
  base->event_count_active++;
3516
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3517
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3518
0
  TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3519
0
}
3520
3521
static void
3522
event_queue_insert_timeout(struct event_base *base, struct event *ev)
3523
0
{
3524
0
  EVENT_BASE_ASSERT_LOCKED(base);
3525
3526
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3527
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3528
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3529
0
    return;
3530
0
  }
3531
3532
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3533
3534
0
  ev->ev_flags |= EVLIST_TIMEOUT;
3535
3536
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3537
0
    struct common_timeout_list *ctl =
3538
0
        get_common_timeout_list(base, &ev->ev_timeout);
3539
0
    insert_common_timeout_inorder(ctl, ev);
3540
0
  } else {
3541
0
    min_heap_push_(&base->timeheap, ev);
3542
0
  }
3543
0
}
3544
3545
static void
3546
event_queue_make_later_events_active(struct event_base *base)
3547
0
{
3548
0
  struct event_callback *evcb;
3549
0
  EVENT_BASE_ASSERT_LOCKED(base);
3550
3551
0
  while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3552
0
    TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3553
0
    evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3554
0
    EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3555
0
    TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3556
0
    base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3557
0
  }
3558
0
}
3559
3560
/* Functions for debugging */
3561
3562
const char *
3563
event_get_version(void)
3564
0
{
3565
0
  return (EVENT__VERSION);
3566
0
}
3567
3568
ev_uint32_t
3569
event_get_version_number(void)
3570
0
{
3571
0
  return (EVENT__NUMERIC_VERSION);
3572
0
}
3573
3574
/*
3575
 * No thread-safe interface needed - the information should be the same
3576
 * for all threads.
3577
 */
3578
3579
const char *
3580
event_get_method(void)
3581
0
{
3582
0
  return (current_base->evsel->name);
3583
0
}
3584
3585
#ifndef EVENT__DISABLE_MM_REPLACEMENT
3586
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3587
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3588
static void (*mm_free_fn_)(void *p) = NULL;
3589
3590
void *
3591
event_mm_malloc_(size_t sz)
3592
119k
{
3593
119k
  if (sz == 0)
3594
0
    return NULL;
3595
3596
119k
  if (mm_malloc_fn_)
3597
0
    return mm_malloc_fn_(sz);
3598
119k
  else
3599
119k
    return malloc(sz);
3600
119k
}
3601
3602
void *
3603
event_mm_calloc_(size_t count, size_t size)
3604
32.5k
{
3605
32.5k
  if (count == 0 || size == 0)
3606
0
    return NULL;
3607
3608
32.5k
  if (mm_malloc_fn_) {
3609
0
    size_t sz = count * size;
3610
0
    void *p = NULL;
3611
0
    if (count > EV_SIZE_MAX / size)
3612
0
      goto error;
3613
0
    p = mm_malloc_fn_(sz);
3614
0
    if (p)
3615
0
      return memset(p, 0, sz);
3616
32.5k
  } else {
3617
32.5k
    void *p = calloc(count, size);
3618
#ifdef _WIN32
3619
    /* Windows calloc doesn't reliably set ENOMEM */
3620
    if (p == NULL)
3621
      goto error;
3622
#endif
3623
32.5k
    return p;
3624
32.5k
  }
3625
3626
0
error:
3627
0
  errno = ENOMEM;
3628
0
  return NULL;
3629
32.5k
}
3630
3631
char *
3632
event_mm_strdup_(const char *str)
3633
11.6k
{
3634
11.6k
  if (!str) {
3635
0
    errno = EINVAL;
3636
0
    return NULL;
3637
0
  }
3638
3639
11.6k
  if (mm_malloc_fn_) {
3640
0
    size_t ln = strlen(str);
3641
0
    void *p = NULL;
3642
0
    if (ln == EV_SIZE_MAX)
3643
0
      goto error;
3644
0
    p = mm_malloc_fn_(ln+1);
3645
0
    if (p)
3646
0
      return memcpy(p, str, ln+1);
3647
0
  } else
3648
#ifdef _WIN32
3649
    return _strdup(str);
3650
#else
3651
11.6k
    return strdup(str);
3652
0
#endif
3653
3654
0
error:
3655
0
  errno = ENOMEM;
3656
0
  return NULL;
3657
11.6k
}
3658
3659
void *
3660
event_mm_realloc_(void *ptr, size_t sz)
3661
4.84k
{
3662
4.84k
  if (mm_realloc_fn_)
3663
0
    return mm_realloc_fn_(ptr, sz);
3664
4.84k
  else
3665
4.84k
    return realloc(ptr, sz);
3666
4.84k
}
3667
3668
void
3669
event_mm_free_(void *ptr)
3670
157k
{
3671
157k
  if (mm_free_fn_)
3672
0
    mm_free_fn_(ptr);
3673
157k
  else
3674
157k
    free(ptr);
3675
157k
}
3676
3677
void
3678
event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3679
      void *(*realloc_fn)(void *ptr, size_t sz),
3680
      void (*free_fn)(void *ptr))
3681
0
{
3682
0
  mm_malloc_fn_ = malloc_fn;
3683
0
  mm_realloc_fn_ = realloc_fn;
3684
0
  mm_free_fn_ = free_fn;
3685
0
}
3686
#endif
3687
3688
#ifdef EVENT__HAVE_EVENTFD
3689
static void
3690
evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3691
0
{
3692
0
  struct event_base *base = arg;
3693
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3694
0
  base->is_notify_pending = 0;
3695
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3696
0
}
3697
#endif
3698
3699
static void
3700
evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3701
0
{
3702
0
  unsigned char buf[1024];
3703
0
  struct event_base *base = arg;
3704
#ifdef _WIN32
3705
  while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3706
    ;
3707
#else
3708
0
  while (read(fd, (char*)buf, sizeof(buf)) > 0)
3709
0
    ;
3710
0
#endif
3711
3712
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3713
0
  base->is_notify_pending = 0;
3714
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3715
0
}
3716
3717
int
3718
evthread_make_base_notifiable(struct event_base *base)
3719
0
{
3720
0
  int r;
3721
0
  if (!base)
3722
0
    return -1;
3723
3724
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3725
0
  r = evthread_make_base_notifiable_nolock_(base);
3726
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3727
0
  return r;
3728
0
}
3729
3730
static int
3731
evthread_make_base_notifiable_nolock_(struct event_base *base)
3732
0
{
3733
0
  void (*cb)(evutil_socket_t, short, void *);
3734
0
  int (*notify)(struct event_base *);
3735
3736
0
  if (base->th_notify_fn != NULL) {
3737
    /* The base is already notifiable: we're doing fine. */
3738
0
    return 0;
3739
0
  }
3740
3741
#if defined(EVENT__HAVE_WORKING_KQUEUE)
3742
  if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3743
    base->th_notify_fn = event_kq_notify_base_;
3744
    /* No need to add an event here; the backend can wake
3745
     * itself up just fine. */
3746
    return 0;
3747
  }
3748
#endif
3749
3750
0
#ifdef EVENT__HAVE_EVENTFD
3751
0
  base->th_notify_fd[0] = evutil_eventfd_(0,
3752
0
      EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3753
0
  if (base->th_notify_fd[0] >= 0) {
3754
0
    base->th_notify_fd[1] = -1;
3755
0
    notify = evthread_notify_base_eventfd;
3756
0
    cb = evthread_notify_drain_eventfd;
3757
0
  } else
3758
0
#endif
3759
0
  if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3760
0
    notify = evthread_notify_base_default;
3761
0
    cb = evthread_notify_drain_default;
3762
0
  } else {
3763
0
    return -1;
3764
0
  }
3765
3766
0
  base->th_notify_fn = notify;
3767
3768
  /* prepare an event that we can use for wakeup */
3769
0
  event_assign(&base->th_notify, base, base->th_notify_fd[0],
3770
0
         EV_READ|EV_PERSIST|EV_ET, cb, base);
3771
3772
  /* we need to mark this as internal event */
3773
0
  base->th_notify.ev_flags |= EVLIST_INTERNAL;
3774
0
  event_priority_set(&base->th_notify, 0);
3775
3776
0
  return event_add_nolock_(&base->th_notify, NULL, 0);
3777
0
}
3778
3779
int
3780
event_base_foreach_event_nolock_(struct event_base *base,
3781
    event_base_foreach_event_cb fn, void *arg)
3782
0
{
3783
0
  int r, i;
3784
0
  size_t u;
3785
0
  struct event *ev;
3786
3787
  /* Start out with all the EVLIST_INSERTED events. */
3788
0
  if ((r = evmap_foreach_event_(base, fn, arg)))
3789
0
    return r;
3790
3791
  /* Okay, now we deal with those events that have timeouts and are in
3792
   * the min-heap. */
3793
0
  for (u = 0; u < base->timeheap.n; ++u) {
3794
0
    ev = base->timeheap.p[u];
3795
0
    if (ev->ev_flags & EVLIST_INSERTED) {
3796
      /* we already processed this one */
3797
0
      continue;
3798
0
    }
3799
0
    if ((r = fn(base, ev, arg)))
3800
0
      return r;
3801
0
  }
3802
3803
  /* Now for the events in one of the timeout queues.
3804
   * the min-heap. */
3805
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
3806
0
    struct common_timeout_list *ctl =
3807
0
        base->common_timeout_queues[i];
3808
0
    TAILQ_FOREACH(ev, &ctl->events,
3809
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3810
0
      if (ev->ev_flags & EVLIST_INSERTED) {
3811
        /* we already processed this one */
3812
0
        continue;
3813
0
      }
3814
0
      if ((r = fn(base, ev, arg)))
3815
0
        return r;
3816
0
    }
3817
0
  }
3818
3819
  /* Finally, we deal wit all the active events that we haven't touched
3820
   * yet. */
3821
0
  for (i = 0; i < base->nactivequeues; ++i) {
3822
0
    struct event_callback *evcb;
3823
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3824
0
      if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3825
        /* This isn't an event (evlist_init clear), or
3826
         * we already processed it. (inserted or
3827
         * timeout set */
3828
0
        continue;
3829
0
      }
3830
0
      ev = event_callback_to_event(evcb);
3831
0
      if ((r = fn(base, ev, arg)))
3832
0
        return r;
3833
0
    }
3834
0
  }
3835
3836
0
  return 0;
3837
0
}
3838
3839
/* Helper for event_base_dump_events: called on each event in the event base;
3840
 * dumps only the inserted events. */
3841
static int
3842
dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3843
0
{
3844
0
  FILE *output = arg;
3845
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3846
0
      "sig" : "fd ";
3847
3848
0
  if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3849
0
    return 0;
3850
3851
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3852
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3853
0
      (e->ev_events&EV_READ)?" Read":"",
3854
0
      (e->ev_events&EV_WRITE)?" Write":"",
3855
0
      (e->ev_events&EV_CLOSED)?" EOF":"",
3856
0
      (e->ev_events&EV_SIGNAL)?" Signal":"",
3857
0
      (e->ev_events&EV_PERSIST)?" Persist":"",
3858
0
      (e->ev_events&EV_ET)?" ET":"",
3859
0
      (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3860
0
  if (e->ev_flags & EVLIST_TIMEOUT) {
3861
0
    struct timeval tv;
3862
0
    tv.tv_sec = e->ev_timeout.tv_sec;
3863
0
    tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3864
0
    evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3865
0
    fprintf(output, " Timeout=%ld.%06d",
3866
0
        (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3867
0
  }
3868
0
  fputc('\n', output);
3869
3870
0
  return 0;
3871
0
}
3872
3873
/* Helper for event_base_dump_events: called on each event in the event base;
3874
 * dumps only the active events. */
3875
static int
3876
dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3877
0
{
3878
0
  FILE *output = arg;
3879
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3880
0
      "sig" : "fd ";
3881
3882
0
  if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3883
0
    return 0;
3884
3885
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3886
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3887
0
      (e->ev_res&EV_READ)?" Read":"",
3888
0
      (e->ev_res&EV_WRITE)?" Write":"",
3889
0
      (e->ev_res&EV_CLOSED)?" EOF":"",
3890
0
      (e->ev_res&EV_SIGNAL)?" Signal":"",
3891
0
      (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3892
0
      (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3893
0
      (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3894
3895
0
  return 0;
3896
0
}
3897
3898
int
3899
event_base_foreach_event(struct event_base *base,
3900
    event_base_foreach_event_cb fn, void *arg)
3901
0
{
3902
0
  int r;
3903
0
  if ((!fn) || (!base)) {
3904
0
    return -1;
3905
0
  }
3906
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3907
0
  r = event_base_foreach_event_nolock_(base, fn, arg);
3908
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3909
0
  return r;
3910
0
}
3911
3912
3913
void
3914
event_base_dump_events(struct event_base *base, FILE *output)
3915
0
{
3916
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3917
0
  fprintf(output, "Inserted events:\n");
3918
0
  event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3919
3920
0
  fprintf(output, "Active events:\n");
3921
0
  event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3922
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3923
0
}
3924
3925
void
3926
event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3927
0
{
3928
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3929
3930
  /* Activate any non timer events */
3931
0
  if (!(events & EV_TIMEOUT)) {
3932
0
    evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3933
0
  } else {
3934
    /* If we want to activate timer events, loop and activate each event with
3935
     * the same fd in both the timeheap and common timeouts list */
3936
0
    int i;
3937
0
    size_t u;
3938
0
    struct event *ev;
3939
3940
0
    for (u = 0; u < base->timeheap.n; ++u) {
3941
0
      ev = base->timeheap.p[u];
3942
0
      if (ev->ev_fd == fd) {
3943
0
        event_active_nolock_(ev, EV_TIMEOUT, 1);
3944
0
      }
3945
0
    }
3946
3947
0
    for (i = 0; i < base->n_common_timeouts; ++i) {
3948
0
      struct common_timeout_list *ctl = base->common_timeout_queues[i];
3949
0
      TAILQ_FOREACH(ev, &ctl->events,
3950
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3951
0
        if (ev->ev_fd == fd) {
3952
0
          event_active_nolock_(ev, EV_TIMEOUT, 1);
3953
0
        }
3954
0
      }
3955
0
    }
3956
0
  }
3957
3958
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3959
0
}
3960
3961
void
3962
event_base_active_by_signal(struct event_base *base, int sig)
3963
0
{
3964
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3965
0
  evmap_signal_active_(base, sig, 1);
3966
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3967
0
}
3968
3969
3970
void
3971
event_base_add_virtual_(struct event_base *base)
3972
0
{
3973
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3974
0
  base->virtual_event_count++;
3975
0
  MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3976
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3977
0
}
3978
3979
void
3980
event_base_del_virtual_(struct event_base *base)
3981
0
{
3982
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3983
0
  EVUTIL_ASSERT(base->virtual_event_count > 0);
3984
0
  base->virtual_event_count--;
3985
0
  if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3986
0
    evthread_notify_base(base);
3987
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3988
0
}
3989
3990
static void
3991
event_free_debug_globals_locks(void)
3992
0
{
3993
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3994
0
#ifndef EVENT__DISABLE_DEBUG_MODE
3995
0
  if (event_debug_map_lock_ != NULL) {
3996
0
    EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3997
0
    event_debug_map_lock_ = NULL;
3998
0
    evthreadimpl_disable_lock_debugging_();
3999
0
  }
4000
0
#endif /* EVENT__DISABLE_DEBUG_MODE */
4001
0
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
4002
0
  return;
4003
0
}
4004
4005
static void
4006
event_free_debug_globals(void)
4007
0
{
4008
0
  event_free_debug_globals_locks();
4009
0
}
4010
4011
static void
4012
event_free_evsig_globals(void)
4013
0
{
4014
0
  evsig_free_globals_();
4015
0
}
4016
4017
static void
4018
event_free_evutil_globals(void)
4019
0
{
4020
0
  evutil_free_globals_();
4021
0
}
4022
4023
static void
4024
event_free_globals(void)
4025
0
{
4026
0
  event_free_debug_globals();
4027
0
  event_free_evsig_globals();
4028
0
  event_free_evutil_globals();
4029
0
}
4030
4031
void
4032
libevent_global_shutdown(void)
4033
0
{
4034
0
  event_disable_debug_mode();
4035
0
  event_free_globals();
4036
0
}
4037
4038
#ifndef EVENT__DISABLE_THREAD_SUPPORT
4039
int
4040
event_global_setup_locks_(const int enable_locks)
4041
0
{
4042
0
#ifndef EVENT__DISABLE_DEBUG_MODE
4043
0
  EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
4044
0
#endif
4045
0
  if (evsig_global_setup_locks_(enable_locks) < 0)
4046
0
    return -1;
4047
0
  if (evutil_global_setup_locks_(enable_locks) < 0)
4048
0
    return -1;
4049
0
  if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
4050
0
    return -1;
4051
0
  return 0;
4052
0
}
4053
#endif
4054
4055
void
4056
event_base_assert_ok_(struct event_base *base)
4057
0
{
4058
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4059
0
  event_base_assert_ok_nolock_(base);
4060
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
4061
0
}
4062
4063
void
4064
event_base_assert_ok_nolock_(struct event_base *base)
4065
0
{
4066
0
  int i;
4067
0
  size_t u;
4068
0
  int count;
4069
4070
  /* First do checks on the per-fd and per-signal lists */
4071
0
  evmap_check_integrity_(base);
4072
4073
  /* Check the heap property */
4074
0
  for (u = 1; u < base->timeheap.n; ++u) {
4075
0
    size_t parent = (u - 1) / 2;
4076
0
    struct event *ev, *p_ev;
4077
0
    ev = base->timeheap.p[u];
4078
0
    p_ev = base->timeheap.p[parent];
4079
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4080
0
    EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
4081
0
    EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
4082
0
  }
4083
4084
  /* Check that the common timeouts are fine */
4085
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
4086
0
    struct common_timeout_list *ctl = base->common_timeout_queues[i];
4087
0
    struct event *last=NULL, *ev;
4088
4089
0
    EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
4090
4091
0
    TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
4092
0
      if (last)
4093
0
        EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
4094
0
      EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4095
0
      EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4096
0
      EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4097
0
      last = ev;
4098
0
    }
4099
0
  }
4100
4101
  /* Check the active queues. */
4102
0
  count = 0;
4103
0
  for (i = 0; i < base->nactivequeues; ++i) {
4104
0
    struct event_callback *evcb;
4105
0
    EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4106
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4107
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4108
0
      EVUTIL_ASSERT(evcb->evcb_pri == i);
4109
0
      ++count;
4110
0
    }
4111
0
  }
4112
4113
0
  {
4114
0
    struct event_callback *evcb;
4115
0
    TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4116
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4117
0
      ++count;
4118
0
    }
4119
0
  }
4120
0
  EVUTIL_ASSERT(count == base->event_count_active);
4121
0
}