Coverage Report

Created: 2025-07-11 06:57

/src/libevent/event.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3
 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. The name of the author may not be used to endorse or promote products
14
 *    derived from this software without specific prior written permission.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 */
27
#include "event2/event-config.h"
28
#include "evconfig-private.h"
29
30
#ifdef _WIN32
31
#include <winsock2.h>
32
#define WIN32_LEAN_AND_MEAN
33
#include <windows.h>
34
#undef WIN32_LEAN_AND_MEAN
35
#endif
36
#include <sys/types.h>
37
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38
#include <sys/time.h>
39
#endif
40
#include <sys/queue.h>
41
#ifdef EVENT__HAVE_SYS_SOCKET_H
42
#include <sys/socket.h>
43
#endif
44
#include <stdio.h>
45
#include <stdlib.h>
46
#ifdef EVENT__HAVE_UNISTD_H
47
#include <unistd.h>
48
#endif
49
#include <ctype.h>
50
#include <errno.h>
51
#include <signal.h>
52
#include <string.h>
53
#include <time.h>
54
#include <limits.h>
55
#ifdef EVENT__HAVE_FCNTL_H
56
#include <fcntl.h>
57
#endif
58
59
#include "event2/event.h"
60
#include "event2/event_struct.h"
61
#include "event2/event_compat.h"
62
#include "event2/watch.h"
63
#include "event-internal.h"
64
#include "defer-internal.h"
65
#include "evthread-internal.h"
66
#include "event2/thread.h"
67
#include "event2/util.h"
68
#include "log-internal.h"
69
#include "evmap-internal.h"
70
#include "iocp-internal.h"
71
#include "changelist-internal.h"
72
#define HT_NO_CACHE_HASH_VALUES
73
#include "ht-internal.h"
74
#include "util-internal.h"
75
76
77
#ifdef EVENT__HAVE_WORKING_KQUEUE
78
#include "kqueue-internal.h"
79
#endif
80
81
#ifdef EVENT__HAVE_EVENT_PORTS
82
extern const struct eventop evportops;
83
#endif
84
#ifdef EVENT__HAVE_SELECT
85
extern const struct eventop selectops;
86
#endif
87
#ifdef EVENT__HAVE_POLL
88
extern const struct eventop pollops;
89
#endif
90
#ifdef EVENT__HAVE_EPOLL
91
extern const struct eventop epollops;
92
#endif
93
#ifdef EVENT__HAVE_WORKING_KQUEUE
94
extern const struct eventop kqops;
95
#endif
96
#ifdef EVENT__HAVE_DEVPOLL
97
extern const struct eventop devpollops;
98
#endif
99
#ifdef EVENT__HAVE_WEPOLL
100
extern const struct eventop wepollops;
101
#endif
102
#ifdef _WIN32
103
extern const struct eventop win32ops;
104
#endif
105
106
/* Array of backends in order of preference. */
107
static const struct eventop *eventops[] = {
108
#ifdef EVENT__HAVE_EVENT_PORTS
109
  &evportops,
110
#endif
111
#ifdef EVENT__HAVE_WORKING_KQUEUE
112
  &kqops,
113
#endif
114
#ifdef EVENT__HAVE_EPOLL
115
  &epollops,
116
#endif
117
#ifdef EVENT__HAVE_DEVPOLL
118
  &devpollops,
119
#endif
120
#ifdef EVENT__HAVE_POLL
121
  &pollops,
122
#endif
123
#ifdef EVENT__HAVE_SELECT
124
  &selectops,
125
#endif
126
#ifdef _WIN32
127
  &win32ops,
128
#endif
129
#ifdef EVENT__HAVE_WEPOLL
130
  &wepollops,
131
#endif
132
  NULL
133
};
134
135
/* Global state; deprecated */
136
EVENT2_EXPORT_SYMBOL
137
struct event_base *event_global_current_base_ = NULL;
138
0
#define current_base event_global_current_base_
139
140
/* Global state */
141
142
static void *event_self_cbarg_ptr_ = NULL;
143
144
/* Prototypes */
145
static void event_queue_insert_active(struct event_base *, struct event_callback *);
146
static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
147
static void event_queue_insert_timeout(struct event_base *, struct event *);
148
static void event_queue_insert_inserted(struct event_base *, struct event *);
149
static void event_queue_remove_active(struct event_base *, struct event_callback *);
150
static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
151
static void event_queue_remove_timeout(struct event_base *, struct event *);
152
static void event_queue_remove_inserted(struct event_base *, struct event *);
153
static void event_queue_make_later_events_active(struct event_base *base);
154
155
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
156
static int event_del_(struct event *ev, int blocking);
157
158
#ifdef USE_REINSERT_TIMEOUT
159
/* This code seems buggy; only turn it on if we find out what the trouble is. */
160
static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
161
#endif
162
163
static int  event_haveevents(struct event_base *);
164
165
static int  event_process_active(struct event_base *);
166
167
static int  timeout_next(struct event_base *, struct timeval **);
168
static void timeout_process(struct event_base *);
169
170
static inline void  event_signal_closure(struct event_base *, struct event *ev);
171
static inline void  event_persist_closure(struct event_base *, struct event *ev);
172
173
static int  evthread_notify_base(struct event_base *base);
174
175
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
176
    struct event *ev);
177
178
#ifndef EVENT__DISABLE_DEBUG_MODE
179
/* These functions implement a hashtable of which 'struct event *' structures
180
 * have been setup or added.  We don't want to trust the content of the struct
181
 * event itself, since we're trying to work through cases where an event gets
182
 * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
183
 */
184
185
struct event_debug_entry {
186
  HT_ENTRY(event_debug_entry) node;
187
  const struct event *ptr;
188
  unsigned added : 1;
189
};
190
191
static inline unsigned
192
hash_debug_entry(const struct event_debug_entry *e)
193
0
{
194
  /* We need to do this silliness to convince compilers that we
195
   * honestly mean to cast e->ptr to an integer, and discard any
196
   * part of it that doesn't fit in an unsigned.
197
   */
198
0
  unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
199
  /* Our hashtable implementation is pretty sensitive to low bits,
200
   * and every struct event is over 64 bytes in size, so we can
201
   * just say >>6. */
202
0
  return (u >> 6);
203
0
}
204
205
static inline int
206
eq_debug_entry(const struct event_debug_entry *a,
207
    const struct event_debug_entry *b)
208
0
{
209
0
  return a->ptr == b->ptr;
210
0
}
211
212
int event_debug_mode_on_ = 0;
213
214
215
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
216
/**
217
 * @brief debug mode variable which is set for any function/structure that needs
218
 *        to be shared across threads (if thread support is enabled).
219
 *
220
 *        When and if evthreads are initialized, this variable will be evaluated,
221
 *        and if set to something other than zero, this means the evthread setup
222
 *        functions were called out of order.
223
 *
224
 *        See: "Locks and threading" in the documentation.
225
 */
226
int event_debug_created_threadable_ctx_ = 0;
227
#endif
228
229
/* Set if it's too late to enable event_debug_mode. */
230
static int event_debug_mode_too_late = 0;
231
#ifndef EVENT__DISABLE_THREAD_SUPPORT
232
static void *event_debug_map_lock_ = NULL;
233
#endif
234
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
235
  HT_INITIALIZER();
236
237
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
238
    eq_debug_entry)
239
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
240
0
    eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
241
242
/* record that ev is now setup (that is, ready for an add) */
243
static void event_debug_note_setup_(const struct event *ev)
244
0
{
245
0
  struct event_debug_entry *dent, find;
246
247
0
  if (!event_debug_mode_on_)
248
0
    goto out;
249
250
0
  find.ptr = ev;
251
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
252
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
253
0
  if (dent) {
254
0
    dent->added = 0;
255
0
  } else {
256
0
    dent = mm_malloc(sizeof(*dent));
257
0
    if (!dent)
258
0
      event_err(1,
259
0
          "Out of memory in debugging code");
260
0
    dent->ptr = ev;
261
0
    dent->added = 0;
262
0
    HT_INSERT(event_debug_map, &global_debug_map, dent);
263
0
  }
264
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
265
266
0
out:
267
0
  event_debug_mode_too_late = 1;
268
0
}
269
/* record that ev is no longer setup */
270
static void event_debug_note_teardown_(const struct event *ev)
271
0
{
272
0
  struct event_debug_entry *dent, find;
273
274
0
  if (!event_debug_mode_on_)
275
0
    goto out;
276
277
0
  find.ptr = ev;
278
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
279
0
  dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
280
0
  if (dent)
281
0
    mm_free(dent);
282
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
283
284
0
out:
285
0
  event_debug_mode_too_late = 1;
286
0
}
287
/* Macro: record that ev is now added */
288
static void event_debug_note_add_(const struct event *ev)
289
0
{
290
0
  struct event_debug_entry *dent,find;
291
292
0
  if (!event_debug_mode_on_)
293
0
    goto out;
294
295
0
  find.ptr = ev;
296
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
297
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
298
0
  if (dent) {
299
0
    dent->added = 1;
300
0
  } else {
301
0
    event_errx(EVENT_ERR_ABORT_,
302
0
        "%s: noting an add on a non-setup event %p"
303
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
304
0
        ", flags: 0x%x)",
305
0
        __func__, (void *)ev, ev->ev_events,
306
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
307
0
  }
308
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
309
310
0
out:
311
0
  event_debug_mode_too_late = 1;
312
0
}
313
/* record that ev is no longer added */
314
static void event_debug_note_del_(const struct event *ev)
315
0
{
316
0
  struct event_debug_entry *dent, find;
317
318
0
  if (!event_debug_mode_on_)
319
0
    goto out;
320
321
0
  find.ptr = ev;
322
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
323
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
324
0
  if (dent) {
325
0
    dent->added = 0;
326
0
  } else {
327
0
    event_errx(EVENT_ERR_ABORT_,
328
0
        "%s: noting a del on a non-setup event %p"
329
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
330
0
        ", flags: 0x%x)",
331
0
        __func__, (void *)ev, ev->ev_events,
332
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
333
0
  }
334
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
335
336
0
out:
337
0
  event_debug_mode_too_late = 1;
338
0
}
339
/* assert that ev is setup (i.e., okay to add or inspect) */
340
static void event_debug_assert_is_setup_(const struct event *ev)
341
0
{
342
0
  struct event_debug_entry *dent, find;
343
344
0
  if (!event_debug_mode_on_)
345
0
    return;
346
347
0
  find.ptr = ev;
348
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
349
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
350
0
  if (!dent) {
351
0
    event_errx(EVENT_ERR_ABORT_,
352
0
        "%s called on a non-initialized event %p"
353
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
354
0
        ", flags: 0x%x)",
355
0
        __func__, (void *)ev, ev->ev_events,
356
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
357
0
  }
358
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
359
0
}
360
/* assert that ev is not added (i.e., okay to tear down or set up again) */
361
static void event_debug_assert_not_added_(const struct event *ev)
362
0
{
363
0
  struct event_debug_entry *dent, find;
364
365
0
  if (!event_debug_mode_on_)
366
0
    return;
367
368
0
  find.ptr = ev;
369
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
370
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
371
0
  if (dent && dent->added) {
372
0
    event_errx(EVENT_ERR_ABORT_,
373
0
        "%s called on an already added event %p"
374
0
        " (events: 0x%x, fd: "EV_SOCK_FMT", "
375
0
        "flags: 0x%x)",
376
0
        __func__, (void *)ev, ev->ev_events,
377
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
378
0
  }
379
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
380
0
}
381
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
382
0
{
383
0
  if (!event_debug_mode_on_)
384
0
    return;
385
0
  if (fd < 0)
386
0
    return;
387
388
0
#ifndef _WIN32
389
0
  {
390
0
    int flags;
391
0
    if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
392
0
      EVUTIL_ASSERT(flags & O_NONBLOCK);
393
0
    }
394
0
  }
395
0
#endif
396
0
}
397
#else
398
static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
399
static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
400
static void event_debug_note_add_(const struct event *ev) { (void)ev; }
401
static void event_debug_note_del_(const struct event *ev) { (void)ev; }
402
static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
403
static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
404
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
405
#endif
406
407
#define EVENT_BASE_ASSERT_LOCKED(base)    \
408
0
  EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
409
410
/* How often (in seconds) do we check for changes in wall clock time relative
411
 * to monotonic time?  Set this to -1 for 'never.' */
412
0
#define CLOCK_SYNC_INTERVAL -1
413
414
/** Set 'tp' to the current time according to 'base'.  We must hold the lock
415
 * on 'base'.  If there is a cached time, return it.  Otherwise, use
416
 * clock_gettime or gettimeofday as appropriate to find out the right time.
417
 * Return 0 on success, -1 on failure.
418
 */
419
static int
420
gettime(struct event_base *base, struct timeval *tp)
421
0
{
422
0
  EVENT_BASE_ASSERT_LOCKED(base);
423
424
0
  if (base->tv_cache.tv_sec) {
425
0
    *tp = base->tv_cache;
426
0
    return (0);
427
0
  }
428
429
0
  if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
430
0
    return -1;
431
0
  }
432
433
0
  if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
434
0
      < tp->tv_sec) {
435
0
    struct timeval tv;
436
0
    evutil_gettimeofday(&tv,NULL);
437
0
    evutil_timersub(&tv, tp, &base->tv_clock_diff);
438
0
    base->last_updated_clock_diff = tp->tv_sec;
439
0
  }
440
441
0
  return 0;
442
0
}
443
444
int
445
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
446
0
{
447
0
  int r;
448
0
  if (!base) {
449
0
    base = current_base;
450
0
    if (!current_base)
451
0
      return evutil_gettimeofday(tv, NULL);
452
0
  }
453
454
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
455
0
  if (base->tv_cache.tv_sec == 0) {
456
0
    r = evutil_gettimeofday(tv, NULL);
457
0
  } else {
458
0
    evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
459
0
    r = 0;
460
0
  }
461
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
462
0
  return r;
463
0
}
464
465
/** Make 'base' have no current cached time. */
466
static inline void
467
clear_time_cache(struct event_base *base)
468
0
{
469
0
  base->tv_cache.tv_sec = 0;
470
0
}
471
472
/** Replace the cached time in 'base' with the current time. */
473
static inline void
474
update_time_cache(struct event_base *base)
475
0
{
476
0
  base->tv_cache.tv_sec = 0;
477
0
  if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
478
0
    gettime(base, &base->tv_cache);
479
0
}
480
481
int
482
event_base_update_cache_time(struct event_base *base)
483
0
{
484
485
0
  if (!base) {
486
0
    base = current_base;
487
0
    if (!current_base)
488
0
      return -1;
489
0
  }
490
491
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
492
0
  if (base->running_loop)
493
0
    update_time_cache(base);
494
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
495
0
  return 0;
496
0
}
497
498
static inline struct event *
499
event_callback_to_event(struct event_callback *evcb)
500
0
{
501
0
  EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
502
0
  return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
503
0
}
504
505
static inline struct event_callback *
506
event_to_event_callback(struct event *ev)
507
0
{
508
0
  return &ev->ev_evcallback;
509
0
}
510
511
struct event_base *
512
event_init(void)
513
0
{
514
0
  struct event_base *base = event_base_new_with_config(NULL);
515
516
0
  if (base == NULL) {
517
0
    event_errx(1, "%s: Unable to construct event_base", __func__);
518
0
    return NULL;
519
0
  }
520
521
0
  current_base = base;
522
523
0
  return (base);
524
0
}
525
526
struct event_base *
527
event_base_new(void)
528
0
{
529
0
  struct event_base *base = NULL;
530
0
  struct event_config *cfg = event_config_new();
531
0
  if (cfg) {
532
0
    base = event_base_new_with_config(cfg);
533
0
    event_config_free(cfg);
534
0
  }
535
0
  return base;
536
0
}
537
538
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
539
 * avoid. */
540
static int
541
event_config_is_avoided_method(const struct event_config *cfg,
542
    const char *method)
543
0
{
544
0
  struct event_config_entry *entry;
545
546
0
  TAILQ_FOREACH(entry, &cfg->entries, next) {
547
0
    if (entry->avoid_method != NULL &&
548
0
        strcmp(entry->avoid_method, method) == 0)
549
0
      return (1);
550
0
  }
551
552
0
  return (0);
553
0
}
554
555
/** Return true iff 'method' is disabled according to the environment. */
556
static int
557
event_is_method_disabled(const char *name)
558
0
{
559
0
  char environment[64];
560
0
  int i;
561
562
0
  evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
563
0
  for (i = 8; environment[i] != '\0'; ++i)
564
0
    environment[i] = EVUTIL_TOUPPER_(environment[i]);
565
  /* Note that evutil_getenv_() ignores the environment entirely if
566
   * we're setuid */
567
0
  return (evutil_getenv_(environment) != NULL);
568
0
}
569
570
int
571
event_base_get_features(const struct event_base *base)
572
0
{
573
0
  return base->evsel->features;
574
0
}
575
576
void
577
event_enable_debug_mode(void)
578
0
{
579
0
#ifndef EVENT__DISABLE_DEBUG_MODE
580
0
  if (event_debug_mode_on_)
581
0
    event_errx(1, "%s was called twice!", __func__);
582
0
  if (event_debug_mode_too_late)
583
0
    event_errx(1, "%s must be called *before* creating any events "
584
0
        "or event_bases",__func__);
585
586
0
  event_debug_mode_on_ = 1;
587
588
0
  HT_INIT(event_debug_map, &global_debug_map);
589
0
#endif
590
0
}
591
592
void
593
event_disable_debug_mode(void)
594
0
{
595
0
#ifndef EVENT__DISABLE_DEBUG_MODE
596
0
  struct event_debug_entry **ent, *victim;
597
598
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
599
0
  for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
600
0
    victim = *ent;
601
0
    ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
602
0
    mm_free(victim);
603
0
  }
604
0
  HT_CLEAR(event_debug_map, &global_debug_map);
605
0
  EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
606
607
0
  event_debug_mode_on_  = 0;
608
0
#endif
609
0
}
610
611
struct event_base *
612
event_base_new_with_config(const struct event_config *cfg)
613
0
{
614
0
  int i;
615
0
  struct event_base *base;
616
0
  int should_check_environment;
617
618
0
#ifndef EVENT__DISABLE_DEBUG_MODE
619
0
  event_debug_mode_too_late = 1;
620
0
#endif
621
622
0
  if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
623
0
    event_warn("%s: calloc", __func__);
624
0
    return NULL;
625
0
  }
626
627
0
  if (cfg)
628
0
    base->flags = cfg->flags;
629
630
0
  should_check_environment =
631
0
      !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
632
633
0
  {
634
0
    struct timeval tmp;
635
0
    int precise_time =
636
0
        cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
637
0
    int flags;
638
0
    if (should_check_environment && !precise_time) {
639
0
      precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
640
0
      if (precise_time) {
641
0
        base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
642
0
      }
643
0
    }
644
0
    flags = precise_time ? EV_MONOT_PRECISE : 0;
645
0
    evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
646
647
0
    gettime(base, &tmp);
648
0
  }
649
650
0
  min_heap_ctor_(&base->timeheap);
651
652
0
  base->sig.ev_signal_pair[0] = -1;
653
0
  base->sig.ev_signal_pair[1] = -1;
654
0
  base->th_notify_fd[0] = -1;
655
0
  base->th_notify_fd[1] = -1;
656
657
0
  TAILQ_INIT(&base->active_later_queue);
658
659
0
  evmap_io_initmap_(&base->io);
660
0
  evmap_signal_initmap_(&base->sigmap);
661
0
  event_changelist_init_(&base->changelist);
662
663
0
  base->evbase = NULL;
664
665
0
  if (cfg) {
666
0
    memcpy(&base->max_dispatch_time,
667
0
        &cfg->max_dispatch_interval, sizeof(struct timeval));
668
0
    base->limit_callbacks_after_prio =
669
0
        cfg->limit_callbacks_after_prio;
670
0
  } else {
671
0
    base->max_dispatch_time.tv_sec = -1;
672
0
    base->limit_callbacks_after_prio = 1;
673
0
  }
674
0
  if (cfg && cfg->max_dispatch_callbacks >= 0) {
675
0
    base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
676
0
  } else {
677
0
    base->max_dispatch_callbacks = INT_MAX;
678
0
  }
679
0
  if (base->max_dispatch_callbacks == INT_MAX &&
680
0
      base->max_dispatch_time.tv_sec == -1)
681
0
    base->limit_callbacks_after_prio = INT_MAX;
682
683
0
  for (i = 0; eventops[i] && !base->evbase; i++) {
684
0
    if (cfg != NULL) {
685
      /* determine if this backend should be avoided */
686
0
      if (event_config_is_avoided_method(cfg,
687
0
        eventops[i]->name))
688
0
        continue;
689
0
      if ((eventops[i]->features & cfg->require_features)
690
0
          != cfg->require_features)
691
0
        continue;
692
0
    }
693
694
    /* also obey the environment variables */
695
0
    if (should_check_environment &&
696
0
        event_is_method_disabled(eventops[i]->name))
697
0
      continue;
698
699
0
    base->evsel = eventops[i];
700
701
0
    base->evbase = base->evsel->init(base);
702
0
  }
703
704
0
  if (base->evbase == NULL) {
705
0
    event_warnx("%s: no event mechanism available",
706
0
        __func__);
707
0
    base->evsel = NULL;
708
0
    event_base_free(base);
709
0
    return NULL;
710
0
  }
711
712
0
  if (evutil_getenv_("EVENT_SHOW_METHOD"))
713
0
    event_msgx("libevent using: %s", base->evsel->name);
714
715
  /* allocate a single active event queue */
716
0
  if (event_base_priority_init(base, 1) < 0) {
717
0
    event_base_free(base);
718
0
    return NULL;
719
0
  }
720
721
  /* prepare for threading */
722
723
0
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
724
0
  event_debug_created_threadable_ctx_ = 1;
725
0
#endif
726
727
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
728
0
  if (EVTHREAD_LOCKING_ENABLED() &&
729
0
      (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
730
0
    int r;
731
0
    EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
732
0
    EVTHREAD_ALLOC_COND(base->current_event_cond);
733
0
    r = evthread_make_base_notifiable(base);
734
0
    if (r<0) {
735
0
      event_warnx("%s: Unable to make base notifiable.", __func__);
736
0
      event_base_free(base);
737
0
      return NULL;
738
0
    }
739
0
  }
740
0
#endif
741
742
#ifdef _WIN32
743
  if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
744
    event_base_start_iocp_(base, cfg->n_cpus_hint);
745
#endif
746
747
  /* initialize watcher lists */
748
0
  for (i = 0; i < EVWATCH_MAX; ++i)
749
0
    TAILQ_INIT(&base->watchers[i]);
750
751
0
  return (base);
752
0
}
753
754
int
755
event_base_start_iocp_(struct event_base *base, int n_cpus)
756
0
{
757
#ifdef _WIN32
758
  if (base->iocp)
759
    return 0;
760
  base->iocp = event_iocp_port_launch_(n_cpus);
761
  if (!base->iocp) {
762
    event_warnx("%s: Couldn't launch IOCP", __func__);
763
    return -1;
764
  }
765
  return 0;
766
#else
767
0
  return -1;
768
0
#endif
769
0
}
770
771
void
772
event_base_stop_iocp_(struct event_base *base)
773
0
{
774
#ifdef _WIN32
775
  int rv;
776
777
  if (!base->iocp)
778
    return;
779
  rv = event_iocp_shutdown_(base->iocp, -1);
780
  EVUTIL_ASSERT(rv >= 0);
781
  base->iocp = NULL;
782
#endif
783
0
}
784
785
static int
786
event_base_cancel_single_callback_(struct event_base *base,
787
    struct event_callback *evcb,
788
    int run_finalizers)
789
0
{
790
0
  int result = 0;
791
792
0
  if (evcb->evcb_flags & EVLIST_INIT) {
793
0
    struct event *ev = event_callback_to_event(evcb);
794
0
    if (!(ev->ev_flags & EVLIST_INTERNAL)) {
795
0
      event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
796
0
      result = 1;
797
0
    }
798
0
  } else {
799
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
800
0
    event_callback_cancel_nolock_(base, evcb, 1);
801
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
802
0
    result = 1;
803
0
  }
804
805
0
  if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
806
0
    switch (evcb->evcb_closure) {
807
0
    case EV_CLOSURE_EVENT_FINALIZE:
808
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
809
0
      struct event *ev = event_callback_to_event(evcb);
810
0
      ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
811
0
      if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
812
0
        mm_free(ev);
813
0
      break;
814
0
    }
815
0
    case EV_CLOSURE_CB_FINALIZE:
816
0
      evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
817
0
      break;
818
0
    default:
819
0
      break;
820
0
    }
821
0
  }
822
0
  return result;
823
0
}
824
825
static int event_base_free_queues_(struct event_base *base, int run_finalizers)
826
0
{
827
0
  int deleted = 0, i;
828
829
0
  for (i = 0; i < base->nactivequeues; ++i) {
830
0
    struct event_callback *evcb, *next;
831
0
    for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
832
0
      next = TAILQ_NEXT(evcb, evcb_active_next);
833
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
834
0
      evcb = next;
835
0
    }
836
0
  }
837
838
0
  {
839
0
    struct event_callback *evcb;
840
0
    while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
841
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
842
0
    }
843
0
  }
844
845
0
  return deleted;
846
0
}
847
848
static void
849
event_base_free_(struct event_base *base, int run_finalizers)
850
0
{
851
0
  int i;
852
0
  size_t n_deleted=0;
853
0
  struct event *ev;
854
0
  struct evwatch *watcher;
855
  /* XXXX grab the lock? If there is contention when one thread frees
856
   * the base, then the contending thread will be very sad soon. */
857
858
  /* event_base_free(NULL) is how to free the current_base if we
859
   * made it with event_init and forgot to hold a reference to it. */
860
0
  if (base == NULL && current_base)
861
0
    base = current_base;
862
  /* Don't actually free NULL. */
863
0
  if (base == NULL) {
864
0
    event_warnx("%s: no base to free", __func__);
865
0
    return;
866
0
  }
867
  /* XXX(niels) - check for internal events first */
868
869
#ifdef _WIN32
870
  event_base_stop_iocp_(base);
871
#endif
872
873
  /* threading fds if we have them */
874
0
  if (base->th_notify_fd[0] != -1) {
875
0
    event_del(&base->th_notify);
876
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
877
0
    if (base->th_notify_fd[1] != -1)
878
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
879
0
    base->th_notify_fd[0] = -1;
880
0
    base->th_notify_fd[1] = -1;
881
0
    event_debug_unassign(&base->th_notify);
882
0
  }
883
884
  /* Delete all non-internal events. */
885
0
  evmap_delete_all_(base);
886
887
0
  while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
888
0
    event_del(ev);
889
0
    ++n_deleted;
890
0
  }
891
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
892
0
    struct common_timeout_list *ctl =
893
0
        base->common_timeout_queues[i];
894
0
    event_del(&ctl->timeout_event); /* Internal; doesn't count */
895
0
    event_debug_unassign(&ctl->timeout_event);
896
0
    for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
897
0
      struct event *next = TAILQ_NEXT(ev,
898
0
          ev_timeout_pos.ev_next_with_common_timeout);
899
0
      if (!(ev->ev_flags & EVLIST_INTERNAL)) {
900
0
        event_del(ev);
901
0
        ++n_deleted;
902
0
      }
903
0
      ev = next;
904
0
    }
905
0
    mm_free(ctl);
906
0
  }
907
0
  if (base->common_timeout_queues)
908
0
    mm_free(base->common_timeout_queues);
909
910
0
  for (;;) {
911
    /* For finalizers we can register yet another finalizer out from
912
     * finalizer, and iff finalizer will be in active_later_queue we can
913
     * add finalizer to activequeues, and we will have events in
914
     * activequeues after this function returns, which is not what we want
915
     * (we even have an assertion for this).
916
     *
917
     * A simple case is bufferevent with underlying (i.e. filters).
918
     */
919
0
    int i = event_base_free_queues_(base, run_finalizers);
920
0
    event_debug(("%s: %d events freed", __func__, i));
921
0
    if (!i) {
922
0
      break;
923
0
    }
924
0
    n_deleted += i;
925
0
  }
926
927
0
  if (n_deleted)
928
0
    event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
929
0
      __func__, n_deleted));
930
931
0
  while (LIST_FIRST(&base->once_events)) {
932
0
    struct event_once *eonce = LIST_FIRST(&base->once_events);
933
0
    LIST_REMOVE(eonce, next_once);
934
0
    mm_free(eonce);
935
0
  }
936
937
0
  if (base->evsel != NULL && base->evsel->dealloc != NULL)
938
0
    base->evsel->dealloc(base);
939
940
0
  for (i = 0; i < base->nactivequeues; ++i)
941
0
    EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
942
943
0
  EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
944
0
  min_heap_dtor_(&base->timeheap);
945
946
0
  mm_free(base->activequeues);
947
948
0
  evmap_io_clear_(&base->io);
949
0
  evmap_signal_clear_(&base->sigmap);
950
0
  event_changelist_freemem_(&base->changelist);
951
952
0
  EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
953
0
  EVTHREAD_FREE_COND(base->current_event_cond);
954
955
  /* Free all event watchers */
956
0
  for (i = 0; i < EVWATCH_MAX; ++i) {
957
0
    while (!TAILQ_EMPTY(&base->watchers[i])) {
958
0
      watcher = TAILQ_FIRST(&base->watchers[i]);
959
0
      TAILQ_REMOVE(&base->watchers[i], watcher, next);
960
0
      mm_free(watcher);
961
0
    }
962
0
  }
963
964
  /* If we're freeing current_base, there won't be a current_base. */
965
0
  if (base == current_base)
966
0
    current_base = NULL;
967
0
  mm_free(base);
968
0
}
969
970
void
971
event_base_free_nofinalize(struct event_base *base)
972
0
{
973
0
  event_base_free_(base, 0);
974
0
}
975
976
void
977
event_base_free(struct event_base *base)
978
0
{
979
0
  event_base_free_(base, 1);
980
0
}
981
982
/* Fake eventop; used to disable the backend temporarily inside event_reinit
983
 * so that we can call event_del() on an event without telling the backend.
984
 */
985
static int
986
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
987
    short events, void *fdinfo)
988
0
{
989
0
  return 0;
990
0
}
991
const struct eventop nil_eventop = {
992
  "nil",
993
  NULL, /* init: unused. */
994
  NULL, /* add: unused. */
995
  nil_backend_del, /* del: used, so needs to be killed. */
996
  NULL, /* dispatch: unused. */
997
  NULL, /* dealloc: unused. */
998
  0, 0, 0
999
};
1000
1001
/* reinitialize the event base after a fork */
1002
int
1003
event_reinit(struct event_base *base)
1004
0
{
1005
0
  const struct eventop *evsel;
1006
0
  int res = 0;
1007
0
  int was_notifiable = 0;
1008
0
  int had_signal_added = 0;
1009
1010
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1011
1012
0
  evsel = base->evsel;
1013
1014
  /* check if this event mechanism requires reinit on the backend */
1015
0
  if (evsel->need_reinit) {
1016
    /* We're going to call event_del() on our notify events (the
1017
     * ones that tell about signals and wakeup events).  But we
1018
     * don't actually want to tell the backend to change its
1019
     * state, since it might still share some resource (a kqueue,
1020
     * an epoll fd) with the parent process, and we don't want to
1021
     * delete the fds from _that_ backend, we temporarily stub out
1022
     * the evsel with a replacement.
1023
     */
1024
0
    base->evsel = &nil_eventop;
1025
0
  }
1026
1027
  /* We need to re-create a new signal-notification fd and a new
1028
   * thread-notification fd.  Otherwise, we'll still share those with
1029
   * the parent process, which would make any notification sent to them
1030
   * get received by one or both of the event loops, more or less at
1031
   * random.
1032
   */
1033
0
  if (base->sig.ev_signal_added) {
1034
0
    event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1035
0
    event_debug_unassign(&base->sig.ev_signal);
1036
0
    memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1037
0
    had_signal_added = 1;
1038
0
    base->sig.ev_signal_added = 0;
1039
0
  }
1040
0
  if (base->sig.ev_signal_pair[0] != -1)
1041
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1042
0
  if (base->sig.ev_signal_pair[1] != -1)
1043
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1044
0
  if (base->th_notify_fn != NULL) {
1045
0
    was_notifiable = 1;
1046
0
    base->th_notify_fn = NULL;
1047
0
  }
1048
0
  if (base->th_notify_fd[0] != -1) {
1049
0
    event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1050
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1051
0
    if (base->th_notify_fd[1] != -1)
1052
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1053
0
    base->th_notify_fd[0] = -1;
1054
0
    base->th_notify_fd[1] = -1;
1055
0
    event_debug_unassign(&base->th_notify);
1056
0
  }
1057
1058
  /* Replace the original evsel. */
1059
0
        base->evsel = evsel;
1060
1061
0
  if (evsel->need_reinit) {
1062
    /* Reconstruct the backend through brute-force, so that we do
1063
     * not share any structures with the parent process. For some
1064
     * backends, this is necessary: epoll and kqueue, for
1065
     * instance, have events associated with a kernel
1066
     * structure. If didn't reinitialize, we'd share that
1067
     * structure with the parent process, and any changes made by
1068
     * the parent would affect our backend's behavior (and vice
1069
     * versa).
1070
     */
1071
0
    if (base->evsel->dealloc != NULL)
1072
0
      base->evsel->dealloc(base);
1073
0
    base->evbase = evsel->init(base);
1074
0
    if (base->evbase == NULL) {
1075
0
      event_errx(1,
1076
0
         "%s: could not reinitialize event mechanism",
1077
0
         __func__);
1078
0
      res = -1;
1079
0
      goto done;
1080
0
    }
1081
1082
    /* Empty out the changelist (if any): we are starting from a
1083
     * blank slate. */
1084
0
    event_changelist_freemem_(&base->changelist);
1085
1086
    /* Tell the event maps to re-inform the backend about all
1087
     * pending events. This will make the signal notification
1088
     * event get re-created if necessary. */
1089
0
    if (evmap_reinit_(base) < 0)
1090
0
      res = -1;
1091
0
  } else {
1092
0
    res = evsig_init_(base);
1093
0
    if (res == 0 && had_signal_added) {
1094
0
      res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1095
0
      if (res == 0)
1096
0
        base->sig.ev_signal_added = 1;
1097
0
    }
1098
0
  }
1099
1100
  /* If we were notifiable before, and nothing just exploded, become
1101
   * notifiable again. */
1102
0
  if (was_notifiable && res == 0)
1103
0
    res = evthread_make_base_notifiable_nolock_(base);
1104
1105
0
done:
1106
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1107
0
  return (res);
1108
0
}
1109
1110
/* Get the monotonic time for this event_base' timer */
1111
int
1112
event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1113
0
{
1114
0
  int rv = -1;
1115
1116
0
  if (base && tv) {
1117
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1118
0
    rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1119
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1120
0
  }
1121
1122
0
  return rv;
1123
0
}
1124
1125
const char **
1126
event_get_supported_methods(void)
1127
0
{
1128
0
  static const char **methods = NULL;
1129
0
  const struct eventop **method;
1130
0
  const char **tmp;
1131
0
  int i = 0, k;
1132
1133
  /* count all methods */
1134
0
  for (method = &eventops[0]; *method != NULL; ++method) {
1135
0
    ++i;
1136
0
  }
1137
1138
  /* allocate one more than we need for the NULL pointer */
1139
0
  tmp = mm_calloc((i + 1), sizeof(char *));
1140
0
  if (tmp == NULL)
1141
0
    return (NULL);
1142
1143
  /* populate the array with the supported methods */
1144
0
  for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1145
0
    tmp[i++] = eventops[k]->name;
1146
0
  }
1147
0
  tmp[i] = NULL;
1148
1149
0
  if (methods != NULL)
1150
0
    mm_free((char**)methods);
1151
1152
0
  methods = tmp;
1153
1154
0
  return (methods);
1155
0
}
1156
1157
struct event_config *
1158
event_config_new(void)
1159
0
{
1160
0
  struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1161
1162
0
  if (cfg == NULL)
1163
0
    return (NULL);
1164
1165
0
  TAILQ_INIT(&cfg->entries);
1166
0
  cfg->max_dispatch_interval.tv_sec = -1;
1167
0
  cfg->max_dispatch_callbacks = INT_MAX;
1168
0
  cfg->limit_callbacks_after_prio = 1;
1169
1170
0
  return (cfg);
1171
0
}
1172
1173
static void
1174
event_config_entry_free(struct event_config_entry *entry)
1175
0
{
1176
0
  if (entry->avoid_method != NULL)
1177
0
    mm_free((char *)entry->avoid_method);
1178
0
  mm_free(entry);
1179
0
}
1180
1181
void
1182
event_config_free(struct event_config *cfg)
1183
0
{
1184
0
  struct event_config_entry *entry;
1185
1186
0
  while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1187
0
    TAILQ_REMOVE(&cfg->entries, entry, next);
1188
0
    event_config_entry_free(entry);
1189
0
  }
1190
0
  mm_free(cfg);
1191
0
}
1192
1193
int
1194
event_config_set_flag(struct event_config *cfg, int flag)
1195
0
{
1196
0
  if (!cfg)
1197
0
    return -1;
1198
0
  cfg->flags |= flag;
1199
0
  return 0;
1200
0
}
1201
1202
int
1203
event_config_avoid_method(struct event_config *cfg, const char *method)
1204
0
{
1205
0
  struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1206
0
  if (entry == NULL)
1207
0
    return (-1);
1208
1209
0
  if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1210
0
    mm_free(entry);
1211
0
    return (-1);
1212
0
  }
1213
1214
0
  TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1215
1216
0
  return (0);
1217
0
}
1218
1219
int
1220
event_config_require_features(struct event_config *cfg,
1221
    int features)
1222
0
{
1223
0
  if (!cfg)
1224
0
    return (-1);
1225
0
  cfg->require_features = features;
1226
0
  return (0);
1227
0
}
1228
1229
int
1230
event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1231
0
{
1232
0
  if (!cfg)
1233
0
    return (-1);
1234
0
  cfg->n_cpus_hint = cpus;
1235
0
  return (0);
1236
0
}
1237
1238
int
1239
event_config_set_max_dispatch_interval(struct event_config *cfg,
1240
    const struct timeval *max_interval, int max_callbacks, int min_priority)
1241
0
{
1242
0
  if (max_interval)
1243
0
    memcpy(&cfg->max_dispatch_interval, max_interval,
1244
0
        sizeof(struct timeval));
1245
0
  else
1246
0
    cfg->max_dispatch_interval.tv_sec = -1;
1247
0
  cfg->max_dispatch_callbacks =
1248
0
      max_callbacks >= 0 ? max_callbacks : INT_MAX;
1249
0
  if (min_priority < 0)
1250
0
    min_priority = 0;
1251
0
  cfg->limit_callbacks_after_prio = min_priority;
1252
0
  return (0);
1253
0
}
1254
1255
int
1256
event_priority_init(int npriorities)
1257
0
{
1258
0
  return event_base_priority_init(current_base, npriorities);
1259
0
}
1260
1261
int
1262
event_base_priority_init(struct event_base *base, int npriorities)
1263
0
{
1264
0
  int i, r;
1265
0
  r = -1;
1266
1267
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1268
1269
0
  if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1270
0
      || npriorities >= EVENT_MAX_PRIORITIES)
1271
0
    goto err;
1272
1273
0
  if (npriorities == base->nactivequeues)
1274
0
    goto ok;
1275
1276
0
  if (base->nactivequeues) {
1277
0
    mm_free(base->activequeues);
1278
0
    base->nactivequeues = 0;
1279
0
  }
1280
1281
  /* Allocate our priority queues */
1282
0
  base->activequeues = (struct evcallback_list *)
1283
0
    mm_calloc(npriorities, sizeof(struct evcallback_list));
1284
0
  if (base->activequeues == NULL) {
1285
0
    event_warn("%s: calloc", __func__);
1286
0
    goto err;
1287
0
  }
1288
0
  base->nactivequeues = npriorities;
1289
1290
0
  for (i = 0; i < base->nactivequeues; ++i) {
1291
0
    TAILQ_INIT(&base->activequeues[i]);
1292
0
  }
1293
1294
0
ok:
1295
0
  r = 0;
1296
0
err:
1297
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1298
0
  return (r);
1299
0
}
1300
1301
int
1302
event_base_get_npriorities(struct event_base *base)
1303
0
{
1304
1305
0
  int n;
1306
0
  if (base == NULL)
1307
0
    base = current_base;
1308
1309
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1310
0
  n = base->nactivequeues;
1311
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1312
0
  return (n);
1313
0
}
1314
1315
int
1316
event_base_get_num_events(struct event_base *base, unsigned int type)
1317
0
{
1318
0
  int r = 0;
1319
1320
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1321
1322
0
  if (type & EVENT_BASE_COUNT_ACTIVE)
1323
0
    r += base->event_count_active;
1324
1325
0
  if (type & EVENT_BASE_COUNT_VIRTUAL)
1326
0
    r += base->virtual_event_count;
1327
1328
0
  if (type & EVENT_BASE_COUNT_ADDED)
1329
0
    r += base->event_count;
1330
1331
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1332
1333
0
  return r;
1334
0
}
1335
1336
int
1337
event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1338
0
{
1339
0
  int r = 0;
1340
1341
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1342
1343
0
  if (type & EVENT_BASE_COUNT_ACTIVE) {
1344
0
    r += base->event_count_active_max;
1345
0
    if (clear)
1346
0
      base->event_count_active_max = 0;
1347
0
  }
1348
1349
0
  if (type & EVENT_BASE_COUNT_VIRTUAL) {
1350
0
    r += base->virtual_event_count_max;
1351
0
    if (clear)
1352
0
      base->virtual_event_count_max = 0;
1353
0
  }
1354
1355
0
  if (type & EVENT_BASE_COUNT_ADDED) {
1356
0
    r += base->event_count_max;
1357
0
    if (clear)
1358
0
      base->event_count_max = 0;
1359
0
  }
1360
1361
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1362
1363
0
  return r;
1364
0
}
1365
1366
/* Returns true iff we're currently watching any events. */
1367
static int
1368
event_haveevents(struct event_base *base)
1369
0
{
1370
  /* Caller must hold th_base_lock */
1371
0
  return (base->virtual_event_count > 0 || base->event_count > 0);
1372
0
}
1373
1374
/* "closure" function called when processing active signal events */
1375
static inline void
1376
event_signal_closure(struct event_base *base, struct event *ev)
1377
0
{
1378
0
#if defined(__clang__)
1379
#elif defined(__GNUC__)
1380
#pragma GCC diagnostic push
1381
/* NOTE: it is better to avoid such code all together, by using separate
1382
 * variable to break the loop in the event structure, but now this code is safe
1383
 * */
1384
#pragma GCC diagnostic ignored "-Wdangling-pointer"
1385
#endif
1386
1387
0
  short ncalls;
1388
0
  int should_break;
1389
1390
  /* Allows deletes to work, see also event_del_nolock_() that has
1391
   * special treatment for signals */
1392
0
  ncalls = ev->ev_ncalls;
1393
0
  if (ncalls != 0)
1394
0
    ev->ev_pncalls = &ncalls;
1395
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1396
0
  while (ncalls) {
1397
0
    ncalls--;
1398
0
    ev->ev_ncalls = ncalls;
1399
0
    if (ncalls == 0)
1400
0
      ev->ev_pncalls = NULL;
1401
0
    (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1402
1403
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1404
0
    should_break = base->event_break;
1405
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1406
1407
0
    if (should_break) {
1408
0
      if (ncalls != 0)
1409
0
        ev->ev_pncalls = NULL;
1410
0
      return;
1411
0
    }
1412
0
  }
1413
1414
0
#if defined(__clang__)
1415
#elif defined(__GNUC__)
1416
#pragma GCC diagnostic pop
1417
#endif
1418
0
}
1419
1420
/* Common timeouts are special timeouts that are handled as queues rather than
1421
 * in the minheap.  This is more efficient than the minheap if we happen to
1422
 * know that we're going to get several thousands of timeout events all with
1423
 * the same timeout value.
1424
 *
1425
 * Since all our timeout handling code assumes timevals can be copied,
1426
 * assigned, etc, we can't use "magic pointer" to encode these common
1427
 * timeouts.  Searching through a list to see if every timeout is common could
1428
 * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1429
 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1430
 * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1431
 * of index into the event_base's aray of common timeouts.
1432
 */
1433
1434
0
#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1435
0
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1436
0
#define COMMON_TIMEOUT_IDX_SHIFT 20
1437
0
#define COMMON_TIMEOUT_MASK     0xf0000000
1438
0
#define COMMON_TIMEOUT_MAGIC    0x50000000
1439
1440
#define COMMON_TIMEOUT_IDX(tv) \
1441
0
  (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1442
1443
/** Return true iff if 'tv' is a common timeout in 'base' */
1444
static inline int
1445
is_common_timeout(const struct timeval *tv,
1446
    const struct event_base *base)
1447
0
{
1448
0
  int idx;
1449
0
  if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1450
0
    return 0;
1451
0
  idx = COMMON_TIMEOUT_IDX(tv);
1452
0
  return idx < base->n_common_timeouts;
1453
0
}
1454
1455
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
1456
 * one is a common timeout. */
1457
static inline int
1458
is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1459
0
{
1460
0
  return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1461
0
      (tv2->tv_usec & ~MICROSECONDS_MASK);
1462
0
}
1463
1464
/** Requires that 'tv' is a common timeout.  Return the corresponding
1465
 * common_timeout_list. */
1466
static inline struct common_timeout_list *
1467
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1468
0
{
1469
0
  return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1470
0
}
1471
1472
#if 0
1473
static inline int
1474
common_timeout_ok(const struct timeval *tv,
1475
    struct event_base *base)
1476
{
1477
  const struct timeval *expect =
1478
      &get_common_timeout_list(base, tv)->duration;
1479
  return tv->tv_sec == expect->tv_sec &&
1480
      tv->tv_usec == expect->tv_usec;
1481
}
1482
#endif
1483
1484
/* Add the timeout for the first event in given common timeout list to the
1485
 * event_base's minheap. */
1486
static void
1487
common_timeout_schedule(struct common_timeout_list *ctl,
1488
    const struct timeval *now, struct event *head)
1489
0
{
1490
0
  struct timeval timeout = head->ev_timeout;
1491
0
  timeout.tv_usec &= MICROSECONDS_MASK;
1492
0
  event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1493
0
}
1494
1495
/* Callback: invoked when the timeout for a common timeout queue triggers.
1496
 * This means that (at least) the first event in that queue should be run,
1497
 * and the timeout should be rescheduled if there are more events. */
1498
static void
1499
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1500
0
{
1501
0
  struct timeval now;
1502
0
  struct common_timeout_list *ctl = arg;
1503
0
  struct event_base *base = ctl->base;
1504
0
  struct event *ev = NULL;
1505
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1506
0
  gettime(base, &now);
1507
0
  while (1) {
1508
0
    int was_active;
1509
0
    ev = TAILQ_FIRST(&ctl->events);
1510
0
    if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1511
0
        (ev->ev_timeout.tv_sec == now.tv_sec &&
1512
0
      (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1513
0
      break;
1514
0
    was_active = ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER);
1515
0
    if (!was_active)
1516
0
      event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1517
0
    else
1518
0
      event_queue_remove_timeout(base, ev);
1519
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
1520
0
  }
1521
0
  if (ev)
1522
0
    common_timeout_schedule(ctl, &now, ev);
1523
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1524
0
}
1525
1526
0
#define MAX_COMMON_TIMEOUTS 256
1527
1528
const struct timeval *
1529
event_base_init_common_timeout(struct event_base *base,
1530
    const struct timeval *duration)
1531
0
{
1532
0
  int i;
1533
0
  struct timeval tv;
1534
0
  const struct timeval *result=NULL;
1535
0
  struct common_timeout_list *new_ctl;
1536
1537
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1538
0
  if (duration->tv_usec > 1000000) {
1539
0
    memcpy(&tv, duration, sizeof(struct timeval));
1540
0
    if (is_common_timeout(duration, base))
1541
0
      tv.tv_usec &= MICROSECONDS_MASK;
1542
0
    tv.tv_sec += tv.tv_usec / 1000000;
1543
0
    tv.tv_usec %= 1000000;
1544
0
    duration = &tv;
1545
0
  }
1546
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
1547
0
    const struct common_timeout_list *ctl =
1548
0
        base->common_timeout_queues[i];
1549
0
    if (duration->tv_sec == ctl->duration.tv_sec &&
1550
0
        duration->tv_usec ==
1551
0
        (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1552
0
      EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1553
0
      result = &ctl->duration;
1554
0
      goto done;
1555
0
    }
1556
0
  }
1557
0
  if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1558
0
    event_warnx("%s: Too many common timeouts already in use; "
1559
0
        "we only support %d per event_base", __func__,
1560
0
        MAX_COMMON_TIMEOUTS);
1561
0
    goto done;
1562
0
  }
1563
0
  if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1564
0
    int n = base->n_common_timeouts < 16 ? 16 :
1565
0
        base->n_common_timeouts*2;
1566
0
    struct common_timeout_list **newqueues =
1567
0
        mm_realloc(base->common_timeout_queues,
1568
0
      n*sizeof(struct common_timeout_queue *));
1569
0
    if (!newqueues) {
1570
0
      event_warn("%s: realloc",__func__);
1571
0
      goto done;
1572
0
    }
1573
0
    base->n_common_timeouts_allocated = n;
1574
0
    base->common_timeout_queues = newqueues;
1575
0
  }
1576
0
  new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1577
0
  if (!new_ctl) {
1578
0
    event_warn("%s: calloc",__func__);
1579
0
    goto done;
1580
0
  }
1581
0
  TAILQ_INIT(&new_ctl->events);
1582
0
  new_ctl->duration.tv_sec = duration->tv_sec;
1583
0
  new_ctl->duration.tv_usec =
1584
0
      duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1585
0
      (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1586
0
  evtimer_assign(&new_ctl->timeout_event, base,
1587
0
      common_timeout_callback, new_ctl);
1588
0
  new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1589
0
  event_priority_set(&new_ctl->timeout_event, 0);
1590
0
  new_ctl->base = base;
1591
0
  base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1592
0
  result = &new_ctl->duration;
1593
1594
0
done:
1595
0
  if (result)
1596
0
    EVUTIL_ASSERT(is_common_timeout(result, base));
1597
1598
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1599
0
  return result;
1600
0
}
1601
1602
/* Closure function invoked when we're activating a persistent event. */
1603
static inline void
1604
event_persist_closure(struct event_base *base, struct event *ev)
1605
0
{
1606
0
  void (*evcb_callback)(evutil_socket_t, short, void *);
1607
1608
  // Other fields of *ev that must be stored before executing
1609
0
  evutil_socket_t evcb_fd;
1610
0
  short evcb_res;
1611
0
  void *evcb_arg;
1612
1613
  /* reschedule the persistent event if we have a timeout. */
1614
0
  if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1615
    /* If there was a timeout, we want it to run at an interval of
1616
     * ev_io_timeout after the last time it was _scheduled_ for,
1617
     * not ev_io_timeout after _now_.  If it fired for another
1618
     * reason, though, the timeout ought to start ticking _now_. */
1619
0
    struct timeval run_at, relative_to, delay, now;
1620
0
    ev_uint32_t usec_mask = 0;
1621
0
    EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1622
0
      &ev->ev_io_timeout));
1623
0
    gettime(base, &now);
1624
0
    if (is_common_timeout(&ev->ev_timeout, base)) {
1625
0
      delay = ev->ev_io_timeout;
1626
0
      usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1627
0
      delay.tv_usec &= MICROSECONDS_MASK;
1628
0
      if (ev->ev_res & EV_TIMEOUT) {
1629
0
        relative_to = ev->ev_timeout;
1630
0
        relative_to.tv_usec &= MICROSECONDS_MASK;
1631
0
      } else {
1632
0
        relative_to = now;
1633
0
      }
1634
0
    } else {
1635
0
      delay = ev->ev_io_timeout;
1636
0
      if (ev->ev_res & EV_TIMEOUT) {
1637
0
        relative_to = ev->ev_timeout;
1638
0
      } else {
1639
0
        relative_to = now;
1640
0
      }
1641
0
    }
1642
0
    evutil_timeradd(&relative_to, &delay, &run_at);
1643
0
    if (evutil_timercmp(&run_at, &now, <)) {
1644
      /* Looks like we missed at least one invocation due to
1645
       * a clock jump, not running the event loop for a
1646
       * while, really slow callbacks, or
1647
       * something. Reschedule relative to now.
1648
       */
1649
0
      evutil_timeradd(&now, &delay, &run_at);
1650
0
    }
1651
0
    run_at.tv_usec |= usec_mask;
1652
0
    event_add_nolock_(ev, &run_at, 1);
1653
0
  }
1654
1655
  // Save our callback before we release the lock
1656
0
  evcb_callback = ev->ev_callback;
1657
0
  evcb_fd = ev->ev_fd;
1658
0
  evcb_res = ev->ev_res;
1659
0
  evcb_arg = ev->ev_arg;
1660
1661
  // Release the lock
1662
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1663
1664
  // Execute the callback
1665
0
  (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1666
0
}
1667
1668
/*
1669
  Helper for event_process_active to process all the events in a single queue,
1670
  releasing the lock as we go.  This function requires that the lock be held
1671
  when it's invoked.  Returns -1 if we get a signal or an event_break that
1672
  means we should stop processing any active events now.  Otherwise returns
1673
  the number of non-internal event_callbacks that we processed.
1674
*/
1675
static int
1676
event_process_active_single_queue(struct event_base *base,
1677
    struct evcallback_list *activeq,
1678
    int max_to_process, const struct timeval *endtime)
1679
0
{
1680
0
  struct event_callback *evcb;
1681
0
  int count = 0;
1682
1683
0
  EVUTIL_ASSERT(activeq != NULL);
1684
1685
0
  for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1686
0
    struct event *ev = NULL;
1687
0
    if (evcb->evcb_flags & EVLIST_INIT) {
1688
0
      ev = event_callback_to_event(evcb);
1689
1690
0
      if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1691
0
        event_queue_remove_active(base, evcb);
1692
0
      else
1693
0
        event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1694
0
      event_debug((
1695
0
          "event_process_active: event: %p, %s%s%scall %p",
1696
0
          (void *)ev,
1697
0
          ev->ev_res & EV_READ ? "EV_READ " : " ",
1698
0
          ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1699
0
          ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1700
0
          (void *)ev->ev_callback));
1701
0
    } else {
1702
0
      event_queue_remove_active(base, evcb);
1703
0
      event_debug(("event_process_active: event_callback %p, "
1704
0
        "closure %d, call %p",
1705
0
        (void *)evcb, evcb->evcb_closure, (void *)evcb->evcb_cb_union.evcb_callback));
1706
0
    }
1707
    // We don't want an infinite loop or use of memory after it is freed.
1708
    // Hence, for next loop iteration, it is expected that `event_queue_remove_active` or `event_del_nolock_` have removed current event from the queue at this point.
1709
0
    EVUTIL_ASSERT(evcb != TAILQ_FIRST(activeq));
1710
1711
0
    if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1712
0
      ++count;
1713
1714
1715
0
    base->current_event = evcb;
1716
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1717
0
    base->current_event_waiters = 0;
1718
0
#endif
1719
1720
0
    switch (evcb->evcb_closure) {
1721
0
    case EV_CLOSURE_EVENT_SIGNAL:
1722
0
      EVUTIL_ASSERT(ev != NULL);
1723
0
      event_signal_closure(base, ev);
1724
0
      break;
1725
0
    case EV_CLOSURE_EVENT_PERSIST:
1726
0
      EVUTIL_ASSERT(ev != NULL);
1727
0
      event_persist_closure(base, ev);
1728
0
      break;
1729
0
    case EV_CLOSURE_EVENT: {
1730
0
      void (*evcb_callback)(evutil_socket_t, short, void *);
1731
0
      short res;
1732
0
      EVUTIL_ASSERT(ev != NULL);
1733
0
      evcb_callback = *ev->ev_callback;
1734
0
      res = ev->ev_res;
1735
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1736
0
      evcb_callback(ev->ev_fd, res, ev->ev_arg);
1737
0
    }
1738
0
    break;
1739
0
    case EV_CLOSURE_CB_SELF: {
1740
0
      void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1741
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1742
0
      evcb_selfcb(evcb, evcb->evcb_arg);
1743
0
    }
1744
0
    break;
1745
0
    case EV_CLOSURE_EVENT_FINALIZE:
1746
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1747
0
      void (*evcb_evfinalize)(struct event *, void *);
1748
0
      int evcb_closure = evcb->evcb_closure;
1749
0
      EVUTIL_ASSERT(ev != NULL);
1750
0
      base->current_event = NULL;
1751
0
      evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1752
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1753
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1754
0
      event_debug_note_teardown_(ev);
1755
0
      evcb_evfinalize(ev, ev->ev_arg);
1756
0
      if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1757
0
        mm_free(ev);
1758
0
    }
1759
0
    break;
1760
0
    case EV_CLOSURE_CB_FINALIZE: {
1761
0
      void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1762
0
      base->current_event = NULL;
1763
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1764
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1765
0
      evcb_cbfinalize(evcb, evcb->evcb_arg);
1766
0
    }
1767
0
    break;
1768
0
    default:
1769
0
      EVUTIL_ASSERT(0);
1770
0
    }
1771
1772
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1773
0
    base->current_event = NULL;
1774
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1775
0
    if (base->current_event_waiters) {
1776
0
      base->current_event_waiters = 0;
1777
0
      EVTHREAD_COND_BROADCAST(base->current_event_cond);
1778
0
    }
1779
0
#endif
1780
1781
0
    if (base->event_break)
1782
0
      return -1;
1783
0
    if (count >= max_to_process)
1784
0
      return count;
1785
0
    if (count && endtime) {
1786
0
      struct timeval now;
1787
0
      update_time_cache(base);
1788
0
      gettime(base, &now);
1789
0
      if (evutil_timercmp(&now, endtime, >=))
1790
0
        return count;
1791
0
    }
1792
0
    if (base->event_continue)
1793
0
      break;
1794
0
  }
1795
0
  return count;
1796
0
}
1797
1798
/*
1799
 * Active events are stored in priority queues.  Lower priorities are always
1800
 * process before higher priorities.  Low priority events can starve high
1801
 * priority ones.
1802
 */
1803
1804
static int
1805
event_process_active(struct event_base *base)
1806
0
{
1807
  /* Caller must hold th_base_lock */
1808
0
  struct evcallback_list *activeq = NULL;
1809
0
  int i, c = 0;
1810
0
  const struct timeval *endtime;
1811
0
  struct timeval tv;
1812
0
  const int maxcb = base->max_dispatch_callbacks;
1813
0
  const int limit_after_prio = base->limit_callbacks_after_prio;
1814
0
  if (base->max_dispatch_time.tv_sec >= 0) {
1815
0
    update_time_cache(base);
1816
0
    gettime(base, &tv);
1817
0
    evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1818
0
    endtime = &tv;
1819
0
  } else {
1820
0
    endtime = NULL;
1821
0
  }
1822
1823
0
  for (i = 0; i < base->nactivequeues; ++i) {
1824
0
    if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1825
0
      base->event_running_priority = i;
1826
0
      activeq = &base->activequeues[i];
1827
0
      if (i < limit_after_prio)
1828
0
        c = event_process_active_single_queue(base, activeq,
1829
0
            INT_MAX, NULL);
1830
0
      else
1831
0
        c = event_process_active_single_queue(base, activeq,
1832
0
            maxcb, endtime);
1833
0
      if (c < 0) {
1834
0
        goto done;
1835
0
      } else if (c > 0)
1836
0
        break; /* Processed a real event; do not
1837
          * consider lower-priority events */
1838
      /* If we get here, all of the events we processed
1839
       * were internal.  Continue. */
1840
0
    }
1841
0
  }
1842
1843
0
done:
1844
0
  base->event_running_priority = -1;
1845
1846
0
  return c;
1847
0
}
1848
1849
/*
1850
 * Wait continuously for events.  We exit only if no events are left.
1851
 */
1852
1853
int
1854
event_dispatch(void)
1855
0
{
1856
0
  return (event_loop(0));
1857
0
}
1858
1859
int
1860
event_base_dispatch(struct event_base *event_base)
1861
0
{
1862
0
  return (event_base_loop(event_base, 0));
1863
0
}
1864
1865
const char *
1866
event_base_get_method(const struct event_base *base)
1867
0
{
1868
0
  EVUTIL_ASSERT(base);
1869
0
  return (base->evsel->name);
1870
0
}
1871
1872
const char *
1873
event_base_get_signal_method(const struct event_base *base)
1874
0
{
1875
0
  EVUTIL_ASSERT(base);
1876
0
  return (base->evsigsel->name);
1877
0
}
1878
1879
/** Callback: used to implement event_base_loopexit by telling the event_base
1880
 * that it's time to exit its loop. */
1881
static void
1882
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1883
0
{
1884
0
  struct event_base *base = arg;
1885
0
  base->event_gotterm = 1;
1886
0
}
1887
1888
int
1889
event_loopexit(const struct timeval *tv)
1890
0
{
1891
0
  return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1892
0
        current_base, tv));
1893
0
}
1894
1895
int
1896
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1897
0
{
1898
0
  return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1899
0
        event_base, tv));
1900
0
}
1901
1902
int
1903
event_loopbreak(void)
1904
0
{
1905
0
  return (event_base_loopbreak(current_base));
1906
0
}
1907
1908
int
1909
event_base_loopbreak(struct event_base *event_base)
1910
0
{
1911
0
  int r = 0;
1912
0
  if (event_base == NULL)
1913
0
    return (-1);
1914
1915
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1916
0
  event_base->event_break = 1;
1917
1918
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1919
0
    r = evthread_notify_base(event_base);
1920
0
  } else {
1921
0
    r = (0);
1922
0
  }
1923
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1924
0
  return r;
1925
0
}
1926
1927
int
1928
event_base_loopcontinue(struct event_base *event_base)
1929
0
{
1930
0
  int r = 0;
1931
0
  if (event_base == NULL)
1932
0
    return (-1);
1933
1934
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1935
0
  event_base->event_continue = 1;
1936
1937
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1938
0
    r = evthread_notify_base(event_base);
1939
0
  } else {
1940
0
    r = (0);
1941
0
  }
1942
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1943
0
  return r;
1944
0
}
1945
1946
int
1947
event_base_got_break(struct event_base *event_base)
1948
0
{
1949
0
  int res;
1950
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1951
0
  res = event_base->event_break;
1952
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1953
0
  return res;
1954
0
}
1955
1956
int
1957
event_base_got_exit(struct event_base *event_base)
1958
0
{
1959
0
  int res;
1960
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1961
0
  res = event_base->event_gotterm;
1962
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1963
0
  return res;
1964
0
}
1965
1966
/* not thread safe */
1967
1968
int
1969
event_loop(int flags)
1970
0
{
1971
0
  return event_base_loop(current_base, flags);
1972
0
}
1973
1974
int
1975
event_base_loop(struct event_base *base, int flags)
1976
0
{
1977
0
  const struct eventop *evsel = base->evsel;
1978
0
  struct timeval *tv_p;
1979
0
  int res, done, retval = 0;
1980
0
  struct evwatch_prepare_cb_info prepare_info;
1981
0
  struct evwatch_check_cb_info check_info;
1982
0
  struct evwatch *watcher;
1983
1984
  /* Grab the lock.  We will release it inside evsel.dispatch, and again
1985
   * as we invoke watchers and user callbacks. */
1986
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1987
1988
0
  if (base->running_loop) {
1989
0
    event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1990
0
        " can run on each event_base at once.", __func__);
1991
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1992
0
    return -1;
1993
0
  }
1994
1995
0
  base->running_loop = 1;
1996
1997
0
  clear_time_cache(base);
1998
1999
0
  if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
2000
0
    evsig_set_base_(base);
2001
2002
0
  done = 0;
2003
2004
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2005
0
  base->th_owner_id = EVTHREAD_GET_ID();
2006
0
#endif
2007
2008
0
  base->event_gotterm = base->event_break = 0;
2009
2010
0
  while (!done) {
2011
0
    struct timeval tv;
2012
2013
0
    base->event_continue = 0;
2014
0
    base->n_deferreds_queued = 0;
2015
2016
    /* Terminate the loop if we have been asked to */
2017
0
    if (base->event_gotterm) {
2018
0
      break;
2019
0
    }
2020
2021
0
    if (base->event_break) {
2022
0
      break;
2023
0
    }
2024
2025
0
    tv_p = &tv;
2026
0
    if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
2027
0
      timeout_next(base, &tv_p);
2028
0
    } else {
2029
      /*
2030
       * if we have active events, we just poll new events
2031
       * without waiting.
2032
       */
2033
0
      evutil_timerclear(&tv);
2034
0
    }
2035
2036
    /* If we have no events, we just exit */
2037
0
    if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
2038
0
        !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
2039
0
      event_debug(("%s: no events registered.", __func__));
2040
0
      retval = 1;
2041
0
      goto done;
2042
0
    }
2043
2044
0
    event_queue_make_later_events_active(base);
2045
2046
    /* Invoke prepare watchers before polling for events */
2047
0
    prepare_info.timeout = tv_p;
2048
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next) {
2049
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2050
0
      (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
2051
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2052
0
    }
2053
2054
0
    clear_time_cache(base);
2055
2056
0
    res = evsel->dispatch(base, tv_p);
2057
2058
0
    if (res == -1) {
2059
0
      event_debug(("%s: dispatch returned unsuccessfully.",
2060
0
        __func__));
2061
0
      retval = -1;
2062
0
      goto done;
2063
0
    }
2064
2065
0
    update_time_cache(base);
2066
2067
    /* Invoke check watchers after polling for events, and before
2068
     * processing them */
2069
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next) {
2070
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2071
0
      (*watcher->callback.check)(watcher, &check_info, watcher->arg);
2072
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2073
0
    }
2074
2075
0
    timeout_process(base);
2076
2077
0
    if (N_ACTIVE_CALLBACKS(base)) {
2078
0
      int n = event_process_active(base);
2079
0
      if ((flags & EVLOOP_ONCE)
2080
0
          && N_ACTIVE_CALLBACKS(base) == 0
2081
0
          && n != 0)
2082
0
        done = 1;
2083
0
    } else if (flags & EVLOOP_NONBLOCK)
2084
0
      done = 1;
2085
0
  }
2086
0
  event_debug(("%s: asked to terminate loop.", __func__));
2087
2088
0
done:
2089
0
  clear_time_cache(base);
2090
0
  base->running_loop = 0;
2091
2092
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2093
2094
0
  return (retval);
2095
0
}
2096
2097
/* One-time callback to implement event_base_once: invokes the user callback,
2098
 * then deletes the allocated storage */
2099
static void
2100
event_once_cb(evutil_socket_t fd, short events, void *arg)
2101
0
{
2102
0
  struct event_once *eonce = arg;
2103
2104
0
  (*eonce->cb)(fd, events, eonce->arg);
2105
0
  EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2106
0
  LIST_REMOVE(eonce, next_once);
2107
0
  EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2108
0
  event_debug_unassign(&eonce->ev);
2109
0
  mm_free(eonce);
2110
0
}
2111
2112
/* not threadsafe, event scheduled once. */
2113
int
2114
event_once(evutil_socket_t fd, short events,
2115
    void (*callback)(evutil_socket_t, short, void *),
2116
    void *arg, const struct timeval *tv)
2117
0
{
2118
0
  return event_base_once(current_base, fd, events, callback, arg, tv);
2119
0
}
2120
2121
/* Schedules an event once */
2122
int
2123
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2124
    void (*callback)(evutil_socket_t, short, void *),
2125
    void *arg, const struct timeval *tv)
2126
0
{
2127
0
  struct event_once *eonce;
2128
0
  int res = 0;
2129
0
  int activate = 0;
2130
2131
0
  if (!base)
2132
0
    return (-1);
2133
2134
  /* We cannot support signals that just fire once, or persistent
2135
   * events. */
2136
0
  if (events & (EV_SIGNAL|EV_PERSIST))
2137
0
    return (-1);
2138
2139
0
  if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2140
0
    return (-1);
2141
2142
0
  eonce->cb = callback;
2143
0
  eonce->arg = arg;
2144
2145
0
  if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2146
0
    evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2147
2148
0
    if (tv == NULL || ! evutil_timerisset(tv)) {
2149
      /* If the event is going to become active immediately,
2150
       * don't put it on the timeout queue.  This is one
2151
       * idiom for scheduling a callback, so let's make
2152
       * it fast (and order-preserving). */
2153
0
      activate = 1;
2154
0
    }
2155
0
  } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2156
0
    events &= EV_READ|EV_WRITE|EV_CLOSED;
2157
2158
0
    event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2159
0
  } else {
2160
    /* Bad event combination */
2161
0
    mm_free(eonce);
2162
0
    return (-1);
2163
0
  }
2164
2165
2166
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2167
0
  if (activate)
2168
0
    event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2169
0
  else
2170
0
    res = event_add_nolock_(&eonce->ev, tv, 0);
2171
2172
0
  if (res != 0) {
2173
0
    mm_free(eonce);
2174
0
    return (res);
2175
0
  } else {
2176
0
    LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2177
0
  }
2178
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2179
2180
0
  return (0);
2181
0
}
2182
2183
int
2184
/* workaround for -Werror=maybe-uninitialized bug in gcc 11/12 */
2185
#if defined(__GNUC__) && (__GNUC__ == 11 || __GNUC__ == 12)
2186
__attribute__((noinline))
2187
#endif
2188
event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2189
0
{
2190
0
  if (!base)
2191
0
    base = current_base;
2192
0
  if (arg == &event_self_cbarg_ptr_)
2193
0
    arg = ev;
2194
2195
0
  if (!(events & EV_SIGNAL))
2196
0
    event_debug_assert_socket_nonblocking_(fd);
2197
0
  event_debug_assert_not_added_(ev);
2198
2199
0
  ev->ev_base = base;
2200
2201
0
  ev->ev_callback = callback;
2202
0
  ev->ev_arg = arg;
2203
0
  ev->ev_fd = fd;
2204
0
  ev->ev_events = events;
2205
0
  ev->ev_res = 0;
2206
0
  ev->ev_flags = EVLIST_INIT;
2207
0
  ev->ev_ncalls = 0;
2208
0
  ev->ev_pncalls = NULL;
2209
2210
0
  if (events & EV_SIGNAL) {
2211
0
    if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2212
0
      event_warnx("%s: EV_SIGNAL is not compatible with "
2213
0
          "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2214
0
      return -1;
2215
0
    }
2216
0
    ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2217
0
  } else {
2218
0
    if (events & EV_PERSIST) {
2219
0
      evutil_timerclear(&ev->ev_io_timeout);
2220
0
      ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2221
0
    } else {
2222
0
      ev->ev_closure = EV_CLOSURE_EVENT;
2223
0
    }
2224
0
  }
2225
2226
0
  min_heap_elem_init_(ev);
2227
2228
0
  if (base != NULL) {
2229
    /* by default, we put new events into the middle priority */
2230
0
    ev->ev_pri = base->nactivequeues / 2;
2231
0
  }
2232
2233
0
  event_debug_note_setup_(ev);
2234
2235
0
  return 0;
2236
0
}
2237
2238
int
2239
event_base_set(struct event_base *base, struct event *ev)
2240
0
{
2241
  /* Only innocent events may be assigned to a different base */
2242
0
  if (ev->ev_flags != EVLIST_INIT)
2243
0
    return (-1);
2244
2245
0
  event_debug_assert_is_setup_(ev);
2246
2247
0
  ev->ev_base = base;
2248
0
  ev->ev_pri = base->nactivequeues/2;
2249
2250
0
  return (0);
2251
0
}
2252
2253
void
2254
event_set(struct event *ev, evutil_socket_t fd, short events,
2255
    void (*callback)(evutil_socket_t, short, void *), void *arg)
2256
0
{
2257
0
  int r;
2258
0
  r = event_assign(ev, current_base, fd, events, callback, arg);
2259
0
  EVUTIL_ASSERT(r == 0);
2260
0
}
2261
2262
void *
2263
event_self_cbarg(void)
2264
0
{
2265
0
  return &event_self_cbarg_ptr_;
2266
0
}
2267
2268
struct event *
2269
event_base_get_running_event(struct event_base *base)
2270
0
{
2271
0
  struct event *ev = NULL;
2272
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2273
0
  if (EVBASE_IN_THREAD(base)) {
2274
0
    struct event_callback *evcb = base->current_event;
2275
0
    if (evcb->evcb_flags & EVLIST_INIT)
2276
0
      ev = event_callback_to_event(evcb);
2277
0
  }
2278
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2279
0
  return ev;
2280
0
}
2281
2282
struct event *
2283
event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2284
0
{
2285
0
  struct event *ev;
2286
0
  ev = mm_malloc(sizeof(struct event));
2287
0
  if (ev == NULL)
2288
0
    return (NULL);
2289
0
  if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2290
0
    mm_free(ev);
2291
0
    return (NULL);
2292
0
  }
2293
2294
0
  return (ev);
2295
0
}
2296
2297
void
2298
event_free(struct event *ev)
2299
0
{
2300
  /* This is disabled, so that events which have been finalized be a
2301
   * valid target for event_free(). That's */
2302
  // event_debug_assert_is_setup_(ev);
2303
2304
  /* make sure that this event won't be coming back to haunt us. */
2305
0
  event_del(ev);
2306
0
  event_debug_note_teardown_(ev);
2307
0
  mm_free(ev);
2308
2309
0
}
2310
2311
void
2312
event_debug_unassign(struct event *ev)
2313
0
{
2314
0
  event_debug_assert_not_added_(ev);
2315
0
  event_debug_note_teardown_(ev);
2316
2317
0
  ev->ev_flags &= ~EVLIST_INIT;
2318
0
}
2319
2320
0
#define EVENT_FINALIZE_FREE_ 0x10000
2321
static int
2322
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2323
0
{
2324
0
  ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2325
0
      EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2326
2327
0
  event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2328
0
  ev->ev_closure = closure;
2329
0
  ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2330
0
  event_active_nolock_(ev, EV_FINALIZE, 1);
2331
0
  ev->ev_flags |= EVLIST_FINALIZING;
2332
0
  return 0;
2333
0
}
2334
2335
static int
2336
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2337
0
{
2338
0
  int r;
2339
0
  struct event_base *base = ev->ev_base;
2340
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2341
0
    event_warnx("%s: event has no event_base set.", __func__);
2342
0
    return -1;
2343
0
  }
2344
2345
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2346
0
  r = event_finalize_nolock_(base, flags, ev, cb);
2347
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2348
0
  return r;
2349
0
}
2350
2351
int
2352
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2353
0
{
2354
0
  return event_finalize_impl_(flags, ev, cb);
2355
0
}
2356
2357
int
2358
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2359
0
{
2360
0
  return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2361
0
}
2362
2363
void
2364
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2365
0
{
2366
0
  struct event *ev = NULL;
2367
0
  if (evcb->evcb_flags & EVLIST_INIT) {
2368
0
    ev = event_callback_to_event(evcb);
2369
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2370
0
  } else {
2371
0
    event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2372
0
  }
2373
2374
0
  evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2375
0
  evcb->evcb_cb_union.evcb_cbfinalize = cb;
2376
0
  event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2377
0
  evcb->evcb_flags |= EVLIST_FINALIZING;
2378
0
}
2379
2380
void
2381
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2382
0
{
2383
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2384
0
  event_callback_finalize_nolock_(base, flags, evcb, cb);
2385
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2386
0
}
2387
2388
/** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2389
 * callback will be invoked on *one of them*, after they have *all* been
2390
 * finalized. */
2391
int
2392
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2393
0
{
2394
0
  int n_pending = 0, i;
2395
2396
0
  if (base == NULL)
2397
0
    base = current_base;
2398
2399
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2400
2401
0
  event_debug(("%s: %d events finalizing", __func__, n_cbs));
2402
2403
  /* At most one can be currently executing; the rest we just
2404
   * cancel... But we always make sure that the finalize callback
2405
   * runs. */
2406
0
  for (i = 0; i < n_cbs; ++i) {
2407
0
    struct event_callback *evcb = evcbs[i];
2408
0
    if (evcb == base->current_event) {
2409
0
      event_callback_finalize_nolock_(base, 0, evcb, cb);
2410
0
      ++n_pending;
2411
0
    } else {
2412
0
      event_callback_cancel_nolock_(base, evcb, 0);
2413
0
    }
2414
0
  }
2415
2416
0
  if (n_pending == 0) {
2417
    /* Just do the first one. */
2418
0
    event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2419
0
  }
2420
2421
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2422
0
  return 0;
2423
0
}
2424
2425
/*
2426
 * Set's the priority of an event - if an event is already scheduled
2427
 * changing the priority is going to fail.
2428
 */
2429
2430
int
2431
event_priority_set(struct event *ev, int pri)
2432
0
{
2433
0
  event_debug_assert_is_setup_(ev);
2434
2435
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2436
0
    return (-1);
2437
0
  if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2438
0
    return (-1);
2439
2440
0
  ev->ev_pri = pri;
2441
2442
0
  return (0);
2443
0
}
2444
2445
/*
2446
 * Checks if a specific event is pending or scheduled.
2447
 */
2448
2449
int
2450
event_pending(const struct event *ev, short event, struct timeval *tv)
2451
0
{
2452
0
  int flags = 0;
2453
2454
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2455
0
    event_warnx("%s: event has no event_base set.", __func__);
2456
0
    return 0;
2457
0
  }
2458
2459
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2460
0
  event_debug_assert_is_setup_(ev);
2461
2462
0
  if (ev->ev_flags & EVLIST_INSERTED)
2463
0
    flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2464
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2465
0
    flags |= ev->ev_res;
2466
0
  if (ev->ev_flags & EVLIST_TIMEOUT)
2467
0
    flags |= EV_TIMEOUT;
2468
2469
0
  event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2470
2471
  /* See if there is a timeout that we should report */
2472
0
  if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2473
0
    struct timeval tmp = ev->ev_timeout;
2474
0
    tmp.tv_usec &= MICROSECONDS_MASK;
2475
    /* correctly remamp to real time */
2476
0
    evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2477
0
  }
2478
2479
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2480
2481
0
  return (flags & event);
2482
0
}
2483
2484
int
2485
event_initialized(const struct event *ev)
2486
0
{
2487
0
  if (!(ev->ev_flags & EVLIST_INIT))
2488
0
    return 0;
2489
2490
0
  return 1;
2491
0
}
2492
2493
void
2494
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2495
0
{
2496
0
  event_debug_assert_is_setup_(event);
2497
2498
0
  if (base_out)
2499
0
    *base_out = event->ev_base;
2500
0
  if (fd_out)
2501
0
    *fd_out = event->ev_fd;
2502
0
  if (events_out)
2503
0
    *events_out = event->ev_events;
2504
0
  if (callback_out)
2505
0
    *callback_out = event->ev_callback;
2506
0
  if (arg_out)
2507
0
    *arg_out = event->ev_arg;
2508
0
}
2509
2510
size_t
2511
event_get_struct_event_size(void)
2512
0
{
2513
0
  return sizeof(struct event);
2514
0
}
2515
2516
evutil_socket_t
2517
event_get_fd(const struct event *ev)
2518
0
{
2519
0
  event_debug_assert_is_setup_(ev);
2520
0
  return ev->ev_fd;
2521
0
}
2522
2523
struct event_base *
2524
event_get_base(const struct event *ev)
2525
0
{
2526
0
  event_debug_assert_is_setup_(ev);
2527
0
  return ev->ev_base;
2528
0
}
2529
2530
short
2531
event_get_events(const struct event *ev)
2532
0
{
2533
0
  event_debug_assert_is_setup_(ev);
2534
0
  return ev->ev_events;
2535
0
}
2536
2537
event_callback_fn
2538
event_get_callback(const struct event *ev)
2539
0
{
2540
0
  event_debug_assert_is_setup_(ev);
2541
0
  return ev->ev_callback;
2542
0
}
2543
2544
void *
2545
event_get_callback_arg(const struct event *ev)
2546
0
{
2547
0
  event_debug_assert_is_setup_(ev);
2548
0
  return ev->ev_arg;
2549
0
}
2550
2551
int
2552
event_get_priority(const struct event *ev)
2553
0
{
2554
0
  event_debug_assert_is_setup_(ev);
2555
0
  return ev->ev_pri;
2556
0
}
2557
2558
int
2559
event_add(struct event *ev, const struct timeval *tv)
2560
0
{
2561
0
  int res;
2562
2563
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2564
0
    event_warnx("%s: event has no event_base set.", __func__);
2565
0
    return -1;
2566
0
  }
2567
2568
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2569
2570
0
  res = event_add_nolock_(ev, tv, 0);
2571
2572
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2573
2574
0
  return (res);
2575
0
}
2576
2577
/* Helper callback: wake an event_base from another thread.  This version
2578
 * works by writing a byte to one end of a socketpair, so that the event_base
2579
 * listening on the other end will wake up as the corresponding event
2580
 * triggers */
2581
static int
2582
evthread_notify_base_default(struct event_base *base)
2583
0
{
2584
0
  char buf[1];
2585
0
  ev_ssize_t r;
2586
0
  buf[0] = (char) 0;
2587
#ifdef _WIN32
2588
  r = send(base->th_notify_fd[1], buf, 1, 0);
2589
#else
2590
0
  r = write(base->th_notify_fd[1], buf, 1);
2591
0
#endif
2592
0
  return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2593
0
}
2594
2595
#ifdef EVENT__HAVE_EVENTFD
2596
/* Helper callback: wake an event_base from another thread.  This version
2597
 * assumes that you have a working eventfd() implementation. */
2598
static int
2599
evthread_notify_base_eventfd(struct event_base *base)
2600
0
{
2601
0
  int efd = base->th_notify_fd[0];
2602
0
  eventfd_t val;
2603
0
  int ret;
2604
0
  for (val=1;;val=1) {
2605
0
    ret = eventfd_write(efd, val);
2606
0
    if (ret < 0) {
2607
      // When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
2608
      // We need to first drain the eventfd and then write again.
2609
      //
2610
      // Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
2611
0
      if (errno == EAGAIN) {
2612
        // It's ready to retry.
2613
0
        if (eventfd_read(efd, &val) == 0 || errno == EAGAIN) {
2614
0
          continue;
2615
0
        }
2616
0
      }
2617
      // Unknown error occurs.
2618
0
      ret = -1;
2619
0
    }
2620
0
    break;
2621
0
  }
2622
2623
0
  return ret;
2624
0
}
2625
#endif
2626
2627
2628
/** Tell the thread currently running the event_loop for base (if any) that it
2629
 * needs to stop waiting in its dispatch function (if it is) and process all
2630
 * active callbacks. */
2631
static int
2632
evthread_notify_base(struct event_base *base)
2633
0
{
2634
0
  EVENT_BASE_ASSERT_LOCKED(base);
2635
0
  if (!base->th_notify_fn)
2636
0
    return -1;
2637
0
  if (base->is_notify_pending)
2638
0
    return 0;
2639
0
  base->is_notify_pending = 1;
2640
0
  return base->th_notify_fn(base);
2641
0
}
2642
2643
/* Implementation function to remove a timeout on a currently pending event.
2644
 */
2645
int
2646
event_remove_timer_nolock_(struct event *ev)
2647
0
{
2648
0
  struct event_base *base = ev->ev_base;
2649
2650
0
  EVENT_BASE_ASSERT_LOCKED(base);
2651
0
  event_debug_assert_is_setup_(ev);
2652
2653
0
  event_debug(("event_remove_timer_nolock: event: %p", (void *)ev));
2654
2655
  /* If it's not pending on a timeout, we don't need to do anything. */
2656
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2657
0
    event_queue_remove_timeout(base, ev);
2658
0
    evutil_timerclear(&ev->ev_io_timeout);
2659
0
  }
2660
2661
0
  return (0);
2662
0
}
2663
2664
int
2665
event_remove_timer(struct event *ev)
2666
0
{
2667
0
  int res;
2668
2669
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2670
0
    event_warnx("%s: event has no event_base set.", __func__);
2671
0
    return -1;
2672
0
  }
2673
2674
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2675
2676
0
  res = event_remove_timer_nolock_(ev);
2677
2678
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2679
2680
0
  return (res);
2681
0
}
2682
2683
/* Implementation function to add an event.  Works just like event_add,
2684
 * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2685
 * we treat tv as an absolute time, not as an interval to add to the current
2686
 * time */
2687
int
2688
event_add_nolock_(struct event *ev, const struct timeval *tv,
2689
    int tv_is_absolute)
2690
0
{
2691
0
  struct event_base *base = ev->ev_base;
2692
0
  int res = 0;
2693
0
  int notify = 0;
2694
2695
0
  EVENT_BASE_ASSERT_LOCKED(base);
2696
0
  event_debug_assert_is_setup_(ev);
2697
2698
0
  event_debug((
2699
0
     "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2700
0
     (void *)ev,
2701
0
     EV_SOCK_ARG(ev->ev_fd),
2702
0
     ev->ev_events & EV_READ ? "EV_READ " : " ",
2703
0
     ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2704
0
     ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2705
0
     tv ? "EV_TIMEOUT " : " ",
2706
0
     (void *)ev->ev_callback));
2707
2708
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2709
2710
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
2711
    /* XXXX debug */
2712
0
    return (-1);
2713
0
  }
2714
2715
  /*
2716
   * prepare for timeout insertion further below, if we get a
2717
   * failure on any step, we should not change any state.
2718
   */
2719
0
  if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2720
0
    if (min_heap_reserve_(&base->timeheap,
2721
0
      1 + min_heap_size_(&base->timeheap)) == -1)
2722
0
      return (-1);  /* ENOMEM == errno */
2723
0
  }
2724
2725
  /* If the main thread is currently executing a signal event's
2726
   * callback, and we are not the main thread, then we want to wait
2727
   * until the callback is done before we mess with the event, or else
2728
   * we can race on ev_ncalls and ev_pncalls below. */
2729
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2730
0
  if (base->current_event == event_to_event_callback(ev) &&
2731
0
      (ev->ev_events & EV_SIGNAL)
2732
0
      && !EVBASE_IN_THREAD(base)) {
2733
0
    ++base->current_event_waiters;
2734
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2735
0
  }
2736
0
#endif
2737
2738
0
  if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2739
0
      !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2740
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2741
0
      res = evmap_io_add_(base, ev->ev_fd, ev);
2742
0
    else if (ev->ev_events & EV_SIGNAL)
2743
0
      res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2744
0
    if (res != -1)
2745
0
      event_queue_insert_inserted(base, ev);
2746
0
    if (res == 1) {
2747
      /* evmap says we need to notify the main thread. */
2748
0
      notify = 1;
2749
0
      res = 0;
2750
0
    }
2751
0
  }
2752
2753
  /*
2754
   * we should change the timeout state only if the previous event
2755
   * addition succeeded.
2756
   */
2757
0
  if (res != -1 && tv != NULL) {
2758
0
    struct timeval now;
2759
0
    int common_timeout;
2760
#ifdef USE_REINSERT_TIMEOUT
2761
    int was_common;
2762
    int old_timeout_idx;
2763
#endif
2764
2765
    /*
2766
     * for persistent timeout events, we remember the
2767
     * timeout value and re-add the event.
2768
     *
2769
     * If tv_is_absolute, this was already set.
2770
     */
2771
0
    if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2772
0
      ev->ev_io_timeout = *tv;
2773
2774
0
#ifndef USE_REINSERT_TIMEOUT
2775
0
    if (ev->ev_flags & EVLIST_TIMEOUT) {
2776
0
      event_queue_remove_timeout(base, ev);
2777
0
    }
2778
0
#endif
2779
2780
    /* Check if it is active due to a timeout.  Rescheduling
2781
     * this timeout before the callback can be executed
2782
     * removes it from the active list. */
2783
0
    if ((ev->ev_flags & EVLIST_ACTIVE) &&
2784
0
        (ev->ev_res & EV_TIMEOUT)) {
2785
0
      if (ev->ev_events & EV_SIGNAL) {
2786
        /* See if we are just active executing
2787
         * this event in a loop
2788
         */
2789
0
        if (ev->ev_ncalls && ev->ev_pncalls) {
2790
          /* Abort loop */
2791
0
          *ev->ev_pncalls = 0;
2792
0
        }
2793
0
      }
2794
2795
0
      event_queue_remove_active(base, event_to_event_callback(ev));
2796
0
    }
2797
2798
0
    gettime(base, &now);
2799
2800
0
    common_timeout = is_common_timeout(tv, base);
2801
#ifdef USE_REINSERT_TIMEOUT
2802
    was_common = is_common_timeout(&ev->ev_timeout, base);
2803
    old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2804
#endif
2805
2806
0
    if (tv_is_absolute) {
2807
0
      ev->ev_timeout = *tv;
2808
0
    } else if (common_timeout) {
2809
0
      struct timeval tmp = *tv;
2810
0
      tmp.tv_usec &= MICROSECONDS_MASK;
2811
0
      evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2812
0
      ev->ev_timeout.tv_usec |=
2813
0
          (tv->tv_usec & ~MICROSECONDS_MASK);
2814
0
    } else {
2815
0
      evutil_timeradd(&now, tv, &ev->ev_timeout);
2816
0
    }
2817
2818
0
    event_debug((
2819
0
       "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2820
0
       (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec, (void *)ev->ev_callback));
2821
2822
#ifdef USE_REINSERT_TIMEOUT
2823
    event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2824
#else
2825
0
    event_queue_insert_timeout(base, ev);
2826
0
#endif
2827
2828
0
    if (common_timeout) {
2829
0
      struct common_timeout_list *ctl =
2830
0
          get_common_timeout_list(base, &ev->ev_timeout);
2831
0
      if (ev == TAILQ_FIRST(&ctl->events)) {
2832
0
        common_timeout_schedule(ctl, &now, ev);
2833
0
      }
2834
0
    } else {
2835
0
      struct event* top = NULL;
2836
      /* See if the earliest timeout is now earlier than it
2837
       * was before: if so, we will need to tell the main
2838
       * thread to wake up earlier than it would otherwise.
2839
       * We double check the timeout of the top element to
2840
       * handle time distortions due to system suspension.
2841
       */
2842
0
      if (min_heap_elt_is_top_(ev))
2843
0
        notify = 1;
2844
0
      else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2845
0
           evutil_timercmp(&top->ev_timeout, &now, <))
2846
0
        notify = 1;
2847
0
    }
2848
0
  }
2849
2850
  /* if we are not in the right thread, we need to wake up the loop */
2851
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2852
0
    evthread_notify_base(base);
2853
2854
0
  event_debug_note_add_(ev);
2855
2856
0
  return (res);
2857
0
}
2858
2859
static int
2860
event_del_(struct event *ev, int blocking)
2861
0
{
2862
0
  int res;
2863
0
  struct event_base *base = ev->ev_base;
2864
2865
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2866
0
    event_warnx("%s: event has no event_base set.", __func__);
2867
0
    return -1;
2868
0
  }
2869
2870
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2871
0
  res = event_del_nolock_(ev, blocking);
2872
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2873
2874
0
  return (res);
2875
0
}
2876
2877
int
2878
event_del(struct event *ev)
2879
0
{
2880
0
  return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2881
0
}
2882
2883
int
2884
event_del_block(struct event *ev)
2885
0
{
2886
0
  return event_del_(ev, EVENT_DEL_BLOCK);
2887
0
}
2888
2889
int
2890
event_del_noblock(struct event *ev)
2891
0
{
2892
0
  return event_del_(ev, EVENT_DEL_NOBLOCK);
2893
0
}
2894
2895
/** Helper for event_del: always called with th_base_lock held.
2896
 *
2897
 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2898
 * EVEN_IF_FINALIZING} values. See those for more information.
2899
 */
2900
int
2901
event_del_nolock_(struct event *ev, int blocking)
2902
0
{
2903
0
  struct event_base *base;
2904
0
  int res = 0, notify = 0;
2905
2906
0
  event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2907
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (void *)ev->ev_callback));
2908
2909
  /* An event without a base has not been added */
2910
0
  if (ev->ev_base == NULL)
2911
0
    return (-1);
2912
2913
0
  EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2914
2915
0
  if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2916
0
    if (ev->ev_flags & EVLIST_FINALIZING) {
2917
      /* XXXX Debug */
2918
0
      return 0;
2919
0
    }
2920
0
  }
2921
2922
0
  base = ev->ev_base;
2923
2924
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2925
2926
  /* See if we are just active executing this event in a loop */
2927
0
  if (ev->ev_events & EV_SIGNAL) {
2928
0
    if (ev->ev_ncalls && ev->ev_pncalls) {
2929
      /* Abort loop */
2930
0
      *ev->ev_pncalls = 0;
2931
0
    }
2932
0
  }
2933
2934
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2935
    /* Notify the base if this was the minimal timeout */
2936
0
    if (min_heap_top_(&base->timeheap) == ev)
2937
0
      notify = 1;
2938
0
    event_queue_remove_timeout(base, ev);
2939
0
  }
2940
2941
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2942
0
    event_queue_remove_active(base, event_to_event_callback(ev));
2943
0
  else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2944
0
    event_queue_remove_active_later(base, event_to_event_callback(ev));
2945
2946
0
  if (ev->ev_flags & EVLIST_INSERTED) {
2947
0
    event_queue_remove_inserted(base, ev);
2948
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2949
0
      res = evmap_io_del_(base, ev->ev_fd, ev);
2950
0
    else
2951
0
      res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2952
0
    if (res == 1) {
2953
      /* evmap says we need to notify the main thread. */
2954
0
      notify = 1;
2955
0
      res = 0;
2956
0
    }
2957
    /* If we do not have events, let's notify event base so it can
2958
     * exit without waiting */
2959
0
    if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2960
0
      notify = 1;
2961
0
  }
2962
2963
  /* if we are not in the right thread, we need to wake up the loop */
2964
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2965
0
    evthread_notify_base(base);
2966
2967
0
  event_debug_note_del_(ev);
2968
2969
  /* If the main thread is currently executing this event's callback,
2970
   * and we are not the main thread, then we want to wait until the
2971
   * callback is done before returning. That way, when this function
2972
   * returns, it will be safe to free the user-supplied argument.
2973
   */
2974
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2975
0
  if (blocking != EVENT_DEL_NOBLOCK &&
2976
0
      base->current_event == event_to_event_callback(ev) &&
2977
0
      !EVBASE_IN_THREAD(base) &&
2978
0
      (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2979
0
    ++base->current_event_waiters;
2980
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2981
0
  }
2982
0
#endif
2983
2984
0
  return (res);
2985
0
}
2986
2987
void
2988
event_active(struct event *ev, int res, short ncalls)
2989
0
{
2990
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2991
0
    event_warnx("%s: event has no event_base set.", __func__);
2992
0
    return;
2993
0
  }
2994
2995
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2996
2997
0
  event_debug_assert_is_setup_(ev);
2998
2999
0
  event_active_nolock_(ev, res, ncalls);
3000
3001
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3002
0
}
3003
3004
3005
void
3006
event_active_nolock_(struct event *ev, int res, short ncalls)
3007
0
{
3008
0
  struct event_base *base;
3009
3010
0
  event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
3011
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (int)res, (void *)ev->ev_callback));
3012
3013
0
  base = ev->ev_base;
3014
0
  EVENT_BASE_ASSERT_LOCKED(base);
3015
3016
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
3017
    /* XXXX debug */
3018
0
    return;
3019
0
  }
3020
3021
0
  switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3022
0
  default:
3023
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3024
0
    EVUTIL_ASSERT(0);
3025
0
    break;
3026
0
  case EVLIST_ACTIVE:
3027
    /* We get different kinds of events, add them together */
3028
0
    ev->ev_res |= res;
3029
0
    return;
3030
0
  case EVLIST_ACTIVE_LATER:
3031
0
    ev->ev_res |= res;
3032
0
    break;
3033
0
  case 0:
3034
0
    ev->ev_res = res;
3035
0
    break;
3036
0
  }
3037
3038
0
  if (ev->ev_pri < base->event_running_priority)
3039
0
    base->event_continue = 1;
3040
3041
0
  if (ev->ev_events & EV_SIGNAL) {
3042
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3043
0
    if (base->current_event == event_to_event_callback(ev) &&
3044
0
        !EVBASE_IN_THREAD(base)) {
3045
0
      ++base->current_event_waiters;
3046
0
      EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
3047
0
    }
3048
0
#endif
3049
0
    ev->ev_ncalls = ncalls;
3050
0
    ev->ev_pncalls = NULL;
3051
0
  }
3052
3053
0
  event_callback_activate_nolock_(base, event_to_event_callback(ev));
3054
0
}
3055
3056
void
3057
event_active_later_(struct event *ev, int res)
3058
0
{
3059
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3060
0
  event_active_later_nolock_(ev, res);
3061
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3062
0
}
3063
3064
void
3065
event_active_later_nolock_(struct event *ev, int res)
3066
0
{
3067
0
  struct event_base *base = ev->ev_base;
3068
0
  EVENT_BASE_ASSERT_LOCKED(base);
3069
3070
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3071
    /* We get different kinds of events, add them together */
3072
0
    ev->ev_res |= res;
3073
0
    return;
3074
0
  }
3075
3076
0
  ev->ev_res = res;
3077
3078
0
  event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
3079
0
}
3080
3081
int
3082
event_callback_activate_(struct event_base *base,
3083
    struct event_callback *evcb)
3084
0
{
3085
0
  int r;
3086
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3087
0
  r = event_callback_activate_nolock_(base, evcb);
3088
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3089
0
  return r;
3090
0
}
3091
3092
int
3093
event_callback_activate_nolock_(struct event_base *base,
3094
    struct event_callback *evcb)
3095
0
{
3096
0
  int r = 1;
3097
3098
0
  if (evcb->evcb_flags & EVLIST_FINALIZING)
3099
0
    return 0;
3100
3101
0
  switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3102
0
  default:
3103
0
    EVUTIL_ASSERT(0);
3104
0
    EVUTIL_FALLTHROUGH;
3105
0
  case EVLIST_ACTIVE_LATER:
3106
0
    event_queue_remove_active_later(base, evcb);
3107
0
    r = 0;
3108
0
    break;
3109
0
  case EVLIST_ACTIVE:
3110
0
    return 0;
3111
0
  case 0:
3112
0
    break;
3113
0
  }
3114
3115
0
  event_queue_insert_active(base, evcb);
3116
3117
0
  if (EVBASE_NEED_NOTIFY(base))
3118
0
    evthread_notify_base(base);
3119
3120
0
  return r;
3121
0
}
3122
3123
int
3124
event_callback_activate_later_nolock_(struct event_base *base,
3125
    struct event_callback *evcb)
3126
0
{
3127
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3128
0
    return 0;
3129
3130
0
  event_queue_insert_active_later(base, evcb);
3131
0
  if (EVBASE_NEED_NOTIFY(base))
3132
0
    evthread_notify_base(base);
3133
0
  return 1;
3134
0
}
3135
3136
void
3137
event_callback_init_(struct event_base *base,
3138
    struct event_callback *cb)
3139
0
{
3140
0
  memset(cb, 0, sizeof(*cb));
3141
0
  cb->evcb_pri = base->nactivequeues - 1;
3142
0
}
3143
3144
int
3145
event_callback_cancel_(struct event_base *base,
3146
    struct event_callback *evcb)
3147
0
{
3148
0
  int r;
3149
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3150
0
  r = event_callback_cancel_nolock_(base, evcb, 0);
3151
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3152
0
  return r;
3153
0
}
3154
3155
int
3156
event_callback_cancel_nolock_(struct event_base *base,
3157
    struct event_callback *evcb, int even_if_finalizing)
3158
0
{
3159
0
  if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3160
0
    return 0;
3161
3162
0
  if (evcb->evcb_flags & EVLIST_INIT)
3163
0
    return event_del_nolock_(event_callback_to_event(evcb),
3164
0
        even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3165
3166
0
  switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3167
0
  default:
3168
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3169
0
    EVUTIL_ASSERT(0);
3170
0
    break;
3171
0
  case EVLIST_ACTIVE:
3172
    /* We get different kinds of events, add them together */
3173
0
    event_queue_remove_active(base, evcb);
3174
0
    return 0;
3175
0
  case EVLIST_ACTIVE_LATER:
3176
0
    event_queue_remove_active_later(base, evcb);
3177
0
    break;
3178
0
  case 0:
3179
0
    break;
3180
0
  }
3181
3182
0
  return 0;
3183
0
}
3184
3185
void
3186
event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3187
0
{
3188
0
  memset(cb, 0, sizeof(*cb));
3189
0
  cb->evcb_cb_union.evcb_selfcb = fn;
3190
0
  cb->evcb_arg = arg;
3191
0
  cb->evcb_pri = priority;
3192
0
  cb->evcb_closure = EV_CLOSURE_CB_SELF;
3193
0
}
3194
3195
void
3196
event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3197
0
{
3198
0
  cb->evcb_pri = priority;
3199
0
}
3200
3201
void
3202
event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3203
0
{
3204
0
  if (!base)
3205
0
    base = current_base;
3206
0
  event_callback_cancel_(base, cb);
3207
0
}
3208
3209
0
#define MAX_DEFERREDS_QUEUED 32
3210
int
3211
event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3212
0
{
3213
0
  int r = 1;
3214
0
  if (!base)
3215
0
    base = current_base;
3216
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3217
0
  if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3218
0
    r = event_callback_activate_later_nolock_(base, cb);
3219
0
  } else {
3220
0
    r = event_callback_activate_nolock_(base, cb);
3221
0
    if (r) {
3222
0
      ++base->n_deferreds_queued;
3223
0
    }
3224
0
  }
3225
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3226
0
  return r;
3227
0
}
3228
3229
static int
3230
timeout_next(struct event_base *base, struct timeval **tv_p)
3231
0
{
3232
  /* Caller must hold th_base_lock */
3233
0
  struct timeval now;
3234
0
  struct event *ev;
3235
0
  struct timeval *tv = *tv_p;
3236
0
  int res = 0;
3237
3238
0
  ev = min_heap_top_(&base->timeheap);
3239
3240
0
  if (ev == NULL) {
3241
    /* if no time-based events are active wait for I/O */
3242
0
    *tv_p = NULL;
3243
0
    goto out;
3244
0
  }
3245
3246
0
  if (gettime(base, &now) == -1) {
3247
0
    res = -1;
3248
0
    goto out;
3249
0
  }
3250
3251
0
  if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3252
0
    evutil_timerclear(tv);
3253
0
    goto out;
3254
0
  }
3255
3256
0
  evutil_timersub(&ev->ev_timeout, &now, tv);
3257
3258
0
  EVUTIL_ASSERT(tv->tv_sec >= 0);
3259
0
  EVUTIL_ASSERT(tv->tv_usec >= 0);
3260
0
  event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec));
3261
3262
0
out:
3263
0
  return (res);
3264
0
}
3265
3266
/* Activate every event whose timeout has elapsed. */
3267
static void
3268
timeout_process(struct event_base *base)
3269
0
{
3270
  /* Caller must hold lock. */
3271
0
  struct timeval now;
3272
0
  struct event *ev;
3273
3274
0
  if (min_heap_empty_(&base->timeheap)) {
3275
0
    return;
3276
0
  }
3277
3278
0
  gettime(base, &now);
3279
3280
0
  while ((ev = min_heap_top_(&base->timeheap))) {
3281
0
    int was_active = ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER);
3282
3283
0
    if (evutil_timercmp(&ev->ev_timeout, &now, >))
3284
0
      break;
3285
3286
0
    if (!was_active)
3287
0
      event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3288
0
    else
3289
0
      event_queue_remove_timeout(base, ev);
3290
3291
0
    event_debug(("timeout_process: event: %p, call %p (was active: %i)",
3292
0
       (void *)ev, (void *)ev->ev_callback, was_active));
3293
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
3294
0
  }
3295
0
}
3296
3297
#ifndef MAX
3298
0
#define MAX(a,b) (((a)>(b))?(a):(b))
3299
#endif
3300
3301
0
#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3302
3303
/* These are a fancy way to spell
3304
     if (~flags & EVLIST_INTERNAL)
3305
         base->event_count--/++;
3306
*/
3307
#define DECR_EVENT_COUNT(base,flags) \
3308
0
  ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3309
0
#define INCR_EVENT_COUNT(base,flags) do {         \
3310
0
  ((base)->event_count += !((flags) & EVLIST_INTERNAL));     \
3311
0
  MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);   \
3312
0
} while (0)
3313
3314
static void
3315
event_queue_remove_inserted(struct event_base *base, struct event *ev)
3316
0
{
3317
0
  EVENT_BASE_ASSERT_LOCKED(base);
3318
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3319
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3320
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3321
0
    return;
3322
0
  }
3323
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3324
0
  ev->ev_flags &= ~EVLIST_INSERTED;
3325
0
}
3326
static void
3327
event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3328
0
{
3329
0
  EVENT_BASE_ASSERT_LOCKED(base);
3330
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3331
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3332
0
                          (void *)evcb, EVLIST_ACTIVE);
3333
0
    return;
3334
0
  }
3335
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3336
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE;
3337
0
  base->event_count_active--;
3338
3339
0
  TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3340
0
      evcb, evcb_active_next);
3341
0
}
3342
static void
3343
event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3344
0
{
3345
0
  EVENT_BASE_ASSERT_LOCKED(base);
3346
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3347
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3348
0
                          (void *)evcb, EVLIST_ACTIVE_LATER);
3349
0
    return;
3350
0
  }
3351
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3352
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3353
0
  base->event_count_active--;
3354
3355
0
  TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3356
0
}
3357
static void
3358
event_queue_remove_timeout(struct event_base *base, struct event *ev)
3359
0
{
3360
0
  EVENT_BASE_ASSERT_LOCKED(base);
3361
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3362
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3363
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3364
0
    return;
3365
0
  }
3366
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3367
0
  ev->ev_flags &= ~EVLIST_TIMEOUT;
3368
3369
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3370
0
    struct common_timeout_list *ctl =
3371
0
        get_common_timeout_list(base, &ev->ev_timeout);
3372
0
    TAILQ_REMOVE(&ctl->events, ev,
3373
0
        ev_timeout_pos.ev_next_with_common_timeout);
3374
0
  } else {
3375
0
    min_heap_erase_(&base->timeheap, ev);
3376
0
  }
3377
0
}
3378
3379
#ifdef USE_REINSERT_TIMEOUT
3380
/* Remove and reinsert 'ev' into the timeout queue. */
3381
static void
3382
event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3383
    int was_common, int is_common, int old_timeout_idx)
3384
{
3385
  struct common_timeout_list *ctl;
3386
  if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3387
    event_queue_insert_timeout(base, ev);
3388
    return;
3389
  }
3390
3391
  switch ((was_common<<1) | is_common) {
3392
  case 3: /* Changing from one common timeout to another */
3393
    ctl = base->common_timeout_queues[old_timeout_idx];
3394
    TAILQ_REMOVE(&ctl->events, ev,
3395
        ev_timeout_pos.ev_next_with_common_timeout);
3396
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3397
    insert_common_timeout_inorder(ctl, ev);
3398
    break;
3399
  case 2: /* Was common; is no longer common */
3400
    ctl = base->common_timeout_queues[old_timeout_idx];
3401
    TAILQ_REMOVE(&ctl->events, ev,
3402
        ev_timeout_pos.ev_next_with_common_timeout);
3403
    min_heap_push_(&base->timeheap, ev);
3404
    break;
3405
  case 1: /* Wasn't common; has become common. */
3406
    min_heap_erase_(&base->timeheap, ev);
3407
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3408
    insert_common_timeout_inorder(ctl, ev);
3409
    break;
3410
  case 0: /* was in heap; is still on heap. */
3411
    min_heap_adjust_(&base->timeheap, ev);
3412
    break;
3413
  default:
3414
    EVUTIL_ASSERT(0); /* unreachable */
3415
    break;
3416
  }
3417
}
3418
#endif
3419
3420
/* Add 'ev' to the common timeout list in 'ev'. */
3421
static void
3422
insert_common_timeout_inorder(struct common_timeout_list *ctl,
3423
    struct event *ev)
3424
0
{
3425
0
  struct event *e;
3426
  /* By all logic, we should just be able to append 'ev' to the end of
3427
   * ctl->events, since the timeout on each 'ev' is set to {the common
3428
   * timeout} + {the time when we add the event}, and so the events
3429
   * should arrive in order of their timeouts.  But just in case
3430
   * there's some wacky threading issue going on, we do a search from
3431
   * the end of 'ev' to find the right insertion point.
3432
   */
3433
0
  TAILQ_FOREACH_REVERSE(e, &ctl->events,
3434
0
      event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3435
    /* This timercmp is a little sneaky, since both ev and e have
3436
     * magic values in tv_usec.  Fortunately, they ought to have
3437
     * the _same_ magic values in tv_usec.  Let's assert for that.
3438
     */
3439
0
    EVUTIL_ASSERT(
3440
0
      is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3441
0
    if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3442
0
      TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3443
0
          ev_timeout_pos.ev_next_with_common_timeout);
3444
0
      return;
3445
0
    }
3446
0
  }
3447
0
  TAILQ_INSERT_HEAD(&ctl->events, ev,
3448
0
      ev_timeout_pos.ev_next_with_common_timeout);
3449
0
}
3450
3451
static void
3452
event_queue_insert_inserted(struct event_base *base, struct event *ev)
3453
0
{
3454
0
  EVENT_BASE_ASSERT_LOCKED(base);
3455
3456
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3457
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3458
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3459
0
    return;
3460
0
  }
3461
3462
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3463
3464
0
  ev->ev_flags |= EVLIST_INSERTED;
3465
0
}
3466
3467
static void
3468
event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3469
0
{
3470
0
  EVENT_BASE_ASSERT_LOCKED(base);
3471
3472
0
  if (evcb->evcb_flags & EVLIST_ACTIVE) {
3473
    /* Double insertion is possible for active events */
3474
0
    return;
3475
0
  }
3476
3477
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3478
3479
0
  evcb->evcb_flags |= EVLIST_ACTIVE;
3480
3481
0
  base->event_count_active++;
3482
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3483
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3484
0
  TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3485
0
      evcb, evcb_active_next);
3486
0
}
3487
3488
static void
3489
event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3490
0
{
3491
0
  EVENT_BASE_ASSERT_LOCKED(base);
3492
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3493
    /* Double insertion is possible */
3494
0
    return;
3495
0
  }
3496
3497
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3498
0
  evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3499
0
  base->event_count_active++;
3500
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3501
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3502
0
  TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3503
0
}
3504
3505
static void
3506
event_queue_insert_timeout(struct event_base *base, struct event *ev)
3507
0
{
3508
0
  EVENT_BASE_ASSERT_LOCKED(base);
3509
3510
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3511
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3512
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3513
0
    return;
3514
0
  }
3515
3516
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3517
3518
0
  ev->ev_flags |= EVLIST_TIMEOUT;
3519
3520
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3521
0
    struct common_timeout_list *ctl =
3522
0
        get_common_timeout_list(base, &ev->ev_timeout);
3523
0
    insert_common_timeout_inorder(ctl, ev);
3524
0
  } else {
3525
0
    min_heap_push_(&base->timeheap, ev);
3526
0
  }
3527
0
}
3528
3529
static void
3530
event_queue_make_later_events_active(struct event_base *base)
3531
0
{
3532
0
  struct event_callback *evcb;
3533
0
  EVENT_BASE_ASSERT_LOCKED(base);
3534
3535
0
  while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3536
0
    TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3537
0
    evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3538
0
    EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3539
0
    TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3540
0
    base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3541
0
  }
3542
0
}
3543
3544
/* Functions for debugging */
3545
3546
const char *
3547
event_get_version(void)
3548
0
{
3549
0
  return (EVENT__VERSION);
3550
0
}
3551
3552
ev_uint32_t
3553
event_get_version_number(void)
3554
0
{
3555
0
  return (EVENT__NUMERIC_VERSION);
3556
0
}
3557
3558
/*
3559
 * No thread-safe interface needed - the information should be the same
3560
 * for all threads.
3561
 */
3562
3563
const char *
3564
event_get_method(void)
3565
0
{
3566
0
  return (current_base->evsel->name);
3567
0
}
3568
3569
#ifndef EVENT__DISABLE_MM_REPLACEMENT
3570
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3571
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3572
static void (*mm_free_fn_)(void *p) = NULL;
3573
3574
void *
3575
event_mm_malloc_(size_t sz)
3576
0
{
3577
0
  if (sz == 0)
3578
0
    return NULL;
3579
3580
0
  if (mm_malloc_fn_)
3581
0
    return mm_malloc_fn_(sz);
3582
0
  else
3583
0
    return malloc(sz);
3584
0
}
3585
3586
void *
3587
event_mm_calloc_(size_t count, size_t size)
3588
0
{
3589
0
  if (count == 0 || size == 0)
3590
0
    return NULL;
3591
3592
0
  if (mm_malloc_fn_) {
3593
0
    size_t sz = count * size;
3594
0
    void *p = NULL;
3595
0
    if (count > EV_SIZE_MAX / size)
3596
0
      goto error;
3597
0
    p = mm_malloc_fn_(sz);
3598
0
    if (p)
3599
0
      return memset(p, 0, sz);
3600
0
  } else {
3601
0
    void *p = calloc(count, size);
3602
#ifdef _WIN32
3603
    /* Windows calloc doesn't reliably set ENOMEM */
3604
    if (p == NULL)
3605
      goto error;
3606
#endif
3607
0
    return p;
3608
0
  }
3609
3610
0
error:
3611
0
  errno = ENOMEM;
3612
0
  return NULL;
3613
0
}
3614
3615
char *
3616
event_mm_strdup_(const char *str)
3617
0
{
3618
0
  if (!str) {
3619
0
    errno = EINVAL;
3620
0
    return NULL;
3621
0
  }
3622
3623
0
  if (mm_malloc_fn_) {
3624
0
    size_t ln = strlen(str);
3625
0
    void *p = NULL;
3626
0
    if (ln == EV_SIZE_MAX)
3627
0
      goto error;
3628
0
    p = mm_malloc_fn_(ln+1);
3629
0
    if (p)
3630
0
      return memcpy(p, str, ln+1);
3631
0
  } else
3632
#ifdef _WIN32
3633
    return _strdup(str);
3634
#else
3635
0
    return strdup(str);
3636
0
#endif
3637
3638
0
error:
3639
0
  errno = ENOMEM;
3640
0
  return NULL;
3641
0
}
3642
3643
void *
3644
event_mm_realloc_(void *ptr, size_t sz)
3645
0
{
3646
0
  if (mm_realloc_fn_)
3647
0
    return mm_realloc_fn_(ptr, sz);
3648
0
  else
3649
0
    return realloc(ptr, sz);
3650
0
}
3651
3652
void
3653
event_mm_free_(void *ptr)
3654
0
{
3655
0
  if (mm_free_fn_)
3656
0
    mm_free_fn_(ptr);
3657
0
  else
3658
0
    free(ptr);
3659
0
}
3660
3661
void
3662
event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3663
      void *(*realloc_fn)(void *ptr, size_t sz),
3664
      void (*free_fn)(void *ptr))
3665
0
{
3666
0
  mm_malloc_fn_ = malloc_fn;
3667
0
  mm_realloc_fn_ = realloc_fn;
3668
0
  mm_free_fn_ = free_fn;
3669
0
}
3670
#endif
3671
3672
#ifdef EVENT__HAVE_EVENTFD
3673
static void
3674
evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3675
0
{
3676
0
  struct event_base *base = arg;
3677
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3678
0
  base->is_notify_pending = 0;
3679
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3680
0
}
3681
#endif
3682
3683
static void
3684
evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3685
0
{
3686
0
  unsigned char buf[1024];
3687
0
  struct event_base *base = arg;
3688
#ifdef _WIN32
3689
  while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3690
    ;
3691
#else
3692
0
  while (read(fd, (char*)buf, sizeof(buf)) > 0)
3693
0
    ;
3694
0
#endif
3695
3696
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3697
0
  base->is_notify_pending = 0;
3698
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3699
0
}
3700
3701
int
3702
evthread_make_base_notifiable(struct event_base *base)
3703
0
{
3704
0
  int r;
3705
0
  if (!base)
3706
0
    return -1;
3707
3708
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3709
0
  r = evthread_make_base_notifiable_nolock_(base);
3710
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3711
0
  return r;
3712
0
}
3713
3714
static int
3715
evthread_make_base_notifiable_nolock_(struct event_base *base)
3716
0
{
3717
0
  void (*cb)(evutil_socket_t, short, void *);
3718
0
  int (*notify)(struct event_base *);
3719
3720
0
  if (base->th_notify_fn != NULL) {
3721
    /* The base is already notifiable: we're doing fine. */
3722
0
    return 0;
3723
0
  }
3724
3725
#if defined(EVENT__HAVE_WORKING_KQUEUE)
3726
  if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3727
    base->th_notify_fn = event_kq_notify_base_;
3728
    /* No need to add an event here; the backend can wake
3729
     * itself up just fine. */
3730
    return 0;
3731
  }
3732
#endif
3733
3734
0
#ifdef EVENT__HAVE_EVENTFD
3735
0
  base->th_notify_fd[0] = evutil_eventfd_(0,
3736
0
      EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3737
0
  if (base->th_notify_fd[0] >= 0) {
3738
0
    base->th_notify_fd[1] = -1;
3739
0
    notify = evthread_notify_base_eventfd;
3740
0
    cb = evthread_notify_drain_eventfd;
3741
0
  } else
3742
0
#endif
3743
0
  if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3744
0
    notify = evthread_notify_base_default;
3745
0
    cb = evthread_notify_drain_default;
3746
0
  } else {
3747
0
    return -1;
3748
0
  }
3749
3750
0
  base->th_notify_fn = notify;
3751
3752
  /* prepare an event that we can use for wakeup */
3753
0
  event_assign(&base->th_notify, base, base->th_notify_fd[0],
3754
0
         EV_READ|EV_PERSIST|EV_ET, cb, base);
3755
3756
  /* we need to mark this as internal event */
3757
0
  base->th_notify.ev_flags |= EVLIST_INTERNAL;
3758
0
  event_priority_set(&base->th_notify, 0);
3759
3760
0
  return event_add_nolock_(&base->th_notify, NULL, 0);
3761
0
}
3762
3763
int
3764
event_base_foreach_event_nolock_(struct event_base *base,
3765
    event_base_foreach_event_cb fn, void *arg)
3766
0
{
3767
0
  int r, i;
3768
0
  size_t u;
3769
0
  struct event *ev;
3770
3771
  /* Start out with all the EVLIST_INSERTED events. */
3772
0
  if ((r = evmap_foreach_event_(base, fn, arg)))
3773
0
    return r;
3774
3775
  /* Okay, now we deal with those events that have timeouts and are in
3776
   * the min-heap. */
3777
0
  for (u = 0; u < base->timeheap.n; ++u) {
3778
0
    ev = base->timeheap.p[u];
3779
0
    if (ev->ev_flags & EVLIST_INSERTED) {
3780
      /* we already processed this one */
3781
0
      continue;
3782
0
    }
3783
0
    if ((r = fn(base, ev, arg)))
3784
0
      return r;
3785
0
  }
3786
3787
  /* Now for the events in one of the timeout queues.
3788
   * the min-heap. */
3789
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
3790
0
    struct common_timeout_list *ctl =
3791
0
        base->common_timeout_queues[i];
3792
0
    TAILQ_FOREACH(ev, &ctl->events,
3793
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3794
0
      if (ev->ev_flags & EVLIST_INSERTED) {
3795
        /* we already processed this one */
3796
0
        continue;
3797
0
      }
3798
0
      if ((r = fn(base, ev, arg)))
3799
0
        return r;
3800
0
    }
3801
0
  }
3802
3803
  /* Finally, we deal wit all the active events that we haven't touched
3804
   * yet. */
3805
0
  for (i = 0; i < base->nactivequeues; ++i) {
3806
0
    struct event_callback *evcb;
3807
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3808
0
      if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3809
        /* This isn't an event (evlist_init clear), or
3810
         * we already processed it. (inserted or
3811
         * timeout set */
3812
0
        continue;
3813
0
      }
3814
0
      ev = event_callback_to_event(evcb);
3815
0
      if ((r = fn(base, ev, arg)))
3816
0
        return r;
3817
0
    }
3818
0
  }
3819
3820
0
  return 0;
3821
0
}
3822
3823
/* Helper for event_base_dump_events: called on each event in the event base;
3824
 * dumps only the inserted events. */
3825
static int
3826
dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3827
0
{
3828
0
  FILE *output = arg;
3829
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3830
0
      "sig" : "fd ";
3831
3832
0
  if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3833
0
    return 0;
3834
3835
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3836
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3837
0
      (e->ev_events&EV_READ)?" Read":"",
3838
0
      (e->ev_events&EV_WRITE)?" Write":"",
3839
0
      (e->ev_events&EV_CLOSED)?" EOF":"",
3840
0
      (e->ev_events&EV_SIGNAL)?" Signal":"",
3841
0
      (e->ev_events&EV_PERSIST)?" Persist":"",
3842
0
      (e->ev_events&EV_ET)?" ET":"",
3843
0
      (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3844
0
  if (e->ev_flags & EVLIST_TIMEOUT) {
3845
0
    struct timeval tv;
3846
0
    tv.tv_sec = e->ev_timeout.tv_sec;
3847
0
    tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3848
0
    evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3849
0
    fprintf(output, " Timeout=%ld.%06d",
3850
0
        (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3851
0
  }
3852
0
  fputc('\n', output);
3853
3854
0
  return 0;
3855
0
}
3856
3857
/* Helper for event_base_dump_events: called on each event in the event base;
3858
 * dumps only the active events. */
3859
static int
3860
dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3861
0
{
3862
0
  FILE *output = arg;
3863
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3864
0
      "sig" : "fd ";
3865
3866
0
  if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3867
0
    return 0;
3868
3869
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3870
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3871
0
      (e->ev_res&EV_READ)?" Read":"",
3872
0
      (e->ev_res&EV_WRITE)?" Write":"",
3873
0
      (e->ev_res&EV_CLOSED)?" EOF":"",
3874
0
      (e->ev_res&EV_SIGNAL)?" Signal":"",
3875
0
      (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3876
0
      (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3877
0
      (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3878
3879
0
  return 0;
3880
0
}
3881
3882
int
3883
event_base_foreach_event(struct event_base *base,
3884
    event_base_foreach_event_cb fn, void *arg)
3885
0
{
3886
0
  int r;
3887
0
  if ((!fn) || (!base)) {
3888
0
    return -1;
3889
0
  }
3890
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3891
0
  r = event_base_foreach_event_nolock_(base, fn, arg);
3892
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3893
0
  return r;
3894
0
}
3895
3896
3897
void
3898
event_base_dump_events(struct event_base *base, FILE *output)
3899
0
{
3900
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3901
0
  fprintf(output, "Inserted events:\n");
3902
0
  event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3903
3904
0
  fprintf(output, "Active events:\n");
3905
0
  event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3906
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3907
0
}
3908
3909
void
3910
event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3911
0
{
3912
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3913
3914
  /* Activate any non timer events */
3915
0
  if (!(events & EV_TIMEOUT)) {
3916
0
    evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3917
0
  } else {
3918
    /* If we want to activate timer events, loop and activate each event with
3919
     * the same fd in both the timeheap and common timeouts list */
3920
0
    int i;
3921
0
    size_t u;
3922
0
    struct event *ev;
3923
3924
0
    for (u = 0; u < base->timeheap.n; ++u) {
3925
0
      ev = base->timeheap.p[u];
3926
0
      if (ev->ev_fd == fd) {
3927
0
        event_active_nolock_(ev, EV_TIMEOUT, 1);
3928
0
      }
3929
0
    }
3930
3931
0
    for (i = 0; i < base->n_common_timeouts; ++i) {
3932
0
      struct common_timeout_list *ctl = base->common_timeout_queues[i];
3933
0
      TAILQ_FOREACH(ev, &ctl->events,
3934
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3935
0
        if (ev->ev_fd == fd) {
3936
0
          event_active_nolock_(ev, EV_TIMEOUT, 1);
3937
0
        }
3938
0
      }
3939
0
    }
3940
0
  }
3941
3942
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3943
0
}
3944
3945
void
3946
event_base_active_by_signal(struct event_base *base, int sig)
3947
0
{
3948
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3949
0
  evmap_signal_active_(base, sig, 1);
3950
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3951
0
}
3952
3953
3954
void
3955
event_base_add_virtual_(struct event_base *base)
3956
0
{
3957
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3958
0
  base->virtual_event_count++;
3959
0
  MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3960
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3961
0
}
3962
3963
void
3964
event_base_del_virtual_(struct event_base *base)
3965
0
{
3966
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3967
0
  EVUTIL_ASSERT(base->virtual_event_count > 0);
3968
0
  base->virtual_event_count--;
3969
0
  if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3970
0
    evthread_notify_base(base);
3971
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3972
0
}
3973
3974
static void
3975
event_free_debug_globals_locks(void)
3976
0
{
3977
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3978
0
#ifndef EVENT__DISABLE_DEBUG_MODE
3979
0
  if (event_debug_map_lock_ != NULL) {
3980
0
    EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3981
0
    event_debug_map_lock_ = NULL;
3982
0
    evthreadimpl_disable_lock_debugging_();
3983
0
  }
3984
0
#endif /* EVENT__DISABLE_DEBUG_MODE */
3985
0
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
3986
0
  return;
3987
0
}
3988
3989
static void
3990
event_free_debug_globals(void)
3991
0
{
3992
0
  event_free_debug_globals_locks();
3993
0
}
3994
3995
static void
3996
event_free_evsig_globals(void)
3997
0
{
3998
0
  evsig_free_globals_();
3999
0
}
4000
4001
static void
4002
event_free_evutil_globals(void)
4003
0
{
4004
0
  evutil_free_globals_();
4005
0
}
4006
4007
static void
4008
event_free_globals(void)
4009
0
{
4010
0
  event_free_debug_globals();
4011
0
  event_free_evsig_globals();
4012
0
  event_free_evutil_globals();
4013
0
}
4014
4015
void
4016
libevent_global_shutdown(void)
4017
0
{
4018
0
  event_disable_debug_mode();
4019
0
  event_free_globals();
4020
0
}
4021
4022
#ifndef EVENT__DISABLE_THREAD_SUPPORT
4023
int
4024
event_global_setup_locks_(const int enable_locks)
4025
0
{
4026
0
#ifndef EVENT__DISABLE_DEBUG_MODE
4027
0
  EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
4028
0
#endif
4029
0
  if (evsig_global_setup_locks_(enable_locks) < 0)
4030
0
    return -1;
4031
0
  if (evutil_global_setup_locks_(enable_locks) < 0)
4032
0
    return -1;
4033
0
  if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
4034
0
    return -1;
4035
0
  return 0;
4036
0
}
4037
#endif
4038
4039
void
4040
event_base_assert_ok_(struct event_base *base)
4041
0
{
4042
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4043
0
  event_base_assert_ok_nolock_(base);
4044
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
4045
0
}
4046
4047
void
4048
event_base_assert_ok_nolock_(struct event_base *base)
4049
0
{
4050
0
  int i;
4051
0
  size_t u;
4052
0
  int count;
4053
4054
  /* First do checks on the per-fd and per-signal lists */
4055
0
  evmap_check_integrity_(base);
4056
4057
  /* Check the heap property */
4058
0
  for (u = 1; u < base->timeheap.n; ++u) {
4059
0
    size_t parent = (u - 1) / 2;
4060
0
    struct event *ev, *p_ev;
4061
0
    ev = base->timeheap.p[u];
4062
0
    p_ev = base->timeheap.p[parent];
4063
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4064
0
    EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
4065
0
    EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
4066
0
  }
4067
4068
  /* Check that the common timeouts are fine */
4069
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
4070
0
    struct common_timeout_list *ctl = base->common_timeout_queues[i];
4071
0
    struct event *last=NULL, *ev;
4072
4073
0
    EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
4074
4075
0
    TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
4076
0
      if (last)
4077
0
        EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
4078
0
      EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4079
0
      EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4080
0
      EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4081
0
      last = ev;
4082
0
    }
4083
0
  }
4084
4085
  /* Check the active queues. */
4086
0
  count = 0;
4087
0
  for (i = 0; i < base->nactivequeues; ++i) {
4088
0
    struct event_callback *evcb;
4089
0
    EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4090
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4091
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4092
0
      EVUTIL_ASSERT(evcb->evcb_pri == i);
4093
0
      ++count;
4094
0
    }
4095
0
  }
4096
4097
0
  {
4098
0
    struct event_callback *evcb;
4099
0
    TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4100
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4101
0
      ++count;
4102
0
    }
4103
0
  }
4104
0
  EVUTIL_ASSERT(count == base->event_count_active);
4105
0
}