Coverage Report

Created: 2024-02-25 06:25

/src/libevent/event.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3
 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. The name of the author may not be used to endorse or promote products
14
 *    derived from this software without specific prior written permission.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 */
27
#include "event2/event-config.h"
28
#include "evconfig-private.h"
29
30
#ifdef _WIN32
31
#include <winsock2.h>
32
#define WIN32_LEAN_AND_MEAN
33
#include <windows.h>
34
#undef WIN32_LEAN_AND_MEAN
35
#endif
36
#include <sys/types.h>
37
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38
#include <sys/time.h>
39
#endif
40
#include <sys/queue.h>
41
#ifdef EVENT__HAVE_SYS_SOCKET_H
42
#include <sys/socket.h>
43
#endif
44
#include <stdio.h>
45
#include <stdlib.h>
46
#ifdef EVENT__HAVE_UNISTD_H
47
#include <unistd.h>
48
#endif
49
#include <ctype.h>
50
#include <errno.h>
51
#include <signal.h>
52
#include <string.h>
53
#include <time.h>
54
#include <limits.h>
55
#ifdef EVENT__HAVE_FCNTL_H
56
#include <fcntl.h>
57
#endif
58
59
#include "event2/event.h"
60
#include "event2/event_struct.h"
61
#include "event2/event_compat.h"
62
#include "event2/watch.h"
63
#include "event-internal.h"
64
#include "defer-internal.h"
65
#include "evthread-internal.h"
66
#include "event2/thread.h"
67
#include "event2/util.h"
68
#include "log-internal.h"
69
#include "evmap-internal.h"
70
#include "iocp-internal.h"
71
#include "changelist-internal.h"
72
#define HT_NO_CACHE_HASH_VALUES
73
#include "ht-internal.h"
74
#include "util-internal.h"
75
76
77
#ifdef EVENT__HAVE_WORKING_KQUEUE
78
#include "kqueue-internal.h"
79
#endif
80
81
#ifdef EVENT__HAVE_EVENT_PORTS
82
extern const struct eventop evportops;
83
#endif
84
#ifdef EVENT__HAVE_SELECT
85
extern const struct eventop selectops;
86
#endif
87
#ifdef EVENT__HAVE_POLL
88
extern const struct eventop pollops;
89
#endif
90
#ifdef EVENT__HAVE_EPOLL
91
extern const struct eventop epollops;
92
#endif
93
#ifdef EVENT__HAVE_WORKING_KQUEUE
94
extern const struct eventop kqops;
95
#endif
96
#ifdef EVENT__HAVE_DEVPOLL
97
extern const struct eventop devpollops;
98
#endif
99
#ifdef EVENT__HAVE_WEPOLL
100
extern const struct eventop wepollops;
101
#endif
102
#ifdef _WIN32
103
extern const struct eventop win32ops;
104
#endif
105
106
/* Array of backends in order of preference. */
107
static const struct eventop *eventops[] = {
108
#ifdef EVENT__HAVE_EVENT_PORTS
109
  &evportops,
110
#endif
111
#ifdef EVENT__HAVE_WORKING_KQUEUE
112
  &kqops,
113
#endif
114
#ifdef EVENT__HAVE_EPOLL
115
  &epollops,
116
#endif
117
#ifdef EVENT__HAVE_DEVPOLL
118
  &devpollops,
119
#endif
120
#ifdef EVENT__HAVE_POLL
121
  &pollops,
122
#endif
123
#ifdef EVENT__HAVE_SELECT
124
  &selectops,
125
#endif
126
#ifdef _WIN32
127
  &win32ops,
128
#endif
129
#ifdef EVENT__HAVE_WEPOLL
130
  &wepollops,
131
#endif
132
  NULL
133
};
134
135
/* Global state; deprecated */
136
EVENT2_EXPORT_SYMBOL
137
struct event_base *event_global_current_base_ = NULL;
138
0
#define current_base event_global_current_base_
139
140
/* Global state */
141
142
static void *event_self_cbarg_ptr_ = NULL;
143
144
/* Prototypes */
145
static void event_queue_insert_active(struct event_base *, struct event_callback *);
146
static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
147
static void event_queue_insert_timeout(struct event_base *, struct event *);
148
static void event_queue_insert_inserted(struct event_base *, struct event *);
149
static void event_queue_remove_active(struct event_base *, struct event_callback *);
150
static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
151
static void event_queue_remove_timeout(struct event_base *, struct event *);
152
static void event_queue_remove_inserted(struct event_base *, struct event *);
153
static void event_queue_make_later_events_active(struct event_base *base);
154
155
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
156
static int event_del_(struct event *ev, int blocking);
157
158
#ifdef USE_REINSERT_TIMEOUT
159
/* This code seems buggy; only turn it on if we find out what the trouble is. */
160
static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
161
#endif
162
163
static int  event_haveevents(struct event_base *);
164
165
static int  event_process_active(struct event_base *);
166
167
static int  timeout_next(struct event_base *, struct timeval **);
168
static void timeout_process(struct event_base *);
169
170
static inline void  event_signal_closure(struct event_base *, struct event *ev);
171
static inline void  event_persist_closure(struct event_base *, struct event *ev);
172
173
static int  evthread_notify_base(struct event_base *base);
174
175
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
176
    struct event *ev);
177
178
#ifndef EVENT__DISABLE_DEBUG_MODE
179
/* These functions implement a hashtable of which 'struct event *' structures
180
 * have been setup or added.  We don't want to trust the content of the struct
181
 * event itself, since we're trying to work through cases where an event gets
182
 * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
183
 */
184
185
struct event_debug_entry {
186
  HT_ENTRY(event_debug_entry) node;
187
  const struct event *ptr;
188
  unsigned added : 1;
189
};
190
191
static inline unsigned
192
hash_debug_entry(const struct event_debug_entry *e)
193
0
{
194
  /* We need to do this silliness to convince compilers that we
195
   * honestly mean to cast e->ptr to an integer, and discard any
196
   * part of it that doesn't fit in an unsigned.
197
   */
198
0
  unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
199
  /* Our hashtable implementation is pretty sensitive to low bits,
200
   * and every struct event is over 64 bytes in size, so we can
201
   * just say >>6. */
202
0
  return (u >> 6);
203
0
}
204
205
static inline int
206
eq_debug_entry(const struct event_debug_entry *a,
207
    const struct event_debug_entry *b)
208
0
{
209
0
  return a->ptr == b->ptr;
210
0
}
211
212
int event_debug_mode_on_ = 0;
213
214
215
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
216
/**
217
 * @brief debug mode variable which is set for any function/structure that needs
218
 *        to be shared across threads (if thread support is enabled).
219
 *
220
 *        When and if evthreads are initialized, this variable will be evaluated,
221
 *        and if set to something other than zero, this means the evthread setup 
222
 *        functions were called out of order.
223
 *
224
 *        See: "Locks and threading" in the documentation.
225
 */
226
int event_debug_created_threadable_ctx_ = 0;
227
#endif
228
229
/* Set if it's too late to enable event_debug_mode. */
230
static int event_debug_mode_too_late = 0;
231
#ifndef EVENT__DISABLE_THREAD_SUPPORT
232
static void *event_debug_map_lock_ = NULL;
233
#endif
234
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
235
  HT_INITIALIZER();
236
237
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
238
    eq_debug_entry)
239
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
240
0
    eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
241
242
/* record that ev is now setup (that is, ready for an add) */
243
static void event_debug_note_setup_(const struct event *ev)
244
0
{
245
0
  struct event_debug_entry *dent, find;
246
247
0
  if (!event_debug_mode_on_)
248
0
    goto out;
249
250
0
  find.ptr = ev;
251
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
252
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
253
0
  if (dent) {
254
0
    dent->added = 0;
255
0
  } else {
256
0
    dent = mm_malloc(sizeof(*dent));
257
0
    if (!dent)
258
0
      event_err(1,
259
0
          "Out of memory in debugging code");
260
0
    dent->ptr = ev;
261
0
    dent->added = 0;
262
0
    HT_INSERT(event_debug_map, &global_debug_map, dent);
263
0
  }
264
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
265
266
0
out:
267
0
  event_debug_mode_too_late = 1;
268
0
}
269
/* record that ev is no longer setup */
270
static void event_debug_note_teardown_(const struct event *ev)
271
0
{
272
0
  struct event_debug_entry *dent, find;
273
274
0
  if (!event_debug_mode_on_)
275
0
    goto out;
276
277
0
  find.ptr = ev;
278
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
279
0
  dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
280
0
  if (dent)
281
0
    mm_free(dent);
282
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
283
284
0
out:
285
0
  event_debug_mode_too_late = 1;
286
0
}
287
/* Macro: record that ev is now added */
288
static void event_debug_note_add_(const struct event *ev)
289
0
{
290
0
  struct event_debug_entry *dent,find;
291
292
0
  if (!event_debug_mode_on_)
293
0
    goto out;
294
295
0
  find.ptr = ev;
296
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
297
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
298
0
  if (dent) {
299
0
    dent->added = 1;
300
0
  } else {
301
0
    event_errx(EVENT_ERR_ABORT_,
302
0
        "%s: noting an add on a non-setup event %p"
303
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
304
0
        ", flags: 0x%x)",
305
0
        __func__, (void *)ev, ev->ev_events,
306
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
307
0
  }
308
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
309
310
0
out:
311
0
  event_debug_mode_too_late = 1;
312
0
}
313
/* record that ev is no longer added */
314
static void event_debug_note_del_(const struct event *ev)
315
0
{
316
0
  struct event_debug_entry *dent, find;
317
318
0
  if (!event_debug_mode_on_)
319
0
    goto out;
320
321
0
  find.ptr = ev;
322
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
323
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
324
0
  if (dent) {
325
0
    dent->added = 0;
326
0
  } else {
327
0
    event_errx(EVENT_ERR_ABORT_,
328
0
        "%s: noting a del on a non-setup event %p"
329
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
330
0
        ", flags: 0x%x)",
331
0
        __func__, (void *)ev, ev->ev_events,
332
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
333
0
  }
334
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
335
336
0
out:
337
0
  event_debug_mode_too_late = 1;
338
0
}
339
/* assert that ev is setup (i.e., okay to add or inspect) */
340
static void event_debug_assert_is_setup_(const struct event *ev)
341
0
{
342
0
  struct event_debug_entry *dent, find;
343
344
0
  if (!event_debug_mode_on_)
345
0
    return;
346
347
0
  find.ptr = ev;
348
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
349
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
350
0
  if (!dent) {
351
0
    event_errx(EVENT_ERR_ABORT_,
352
0
        "%s called on a non-initialized event %p"
353
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
354
0
        ", flags: 0x%x)",
355
0
        __func__, (void *)ev, ev->ev_events,
356
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
357
0
  }
358
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
359
0
}
360
/* assert that ev is not added (i.e., okay to tear down or set up again) */
361
static void event_debug_assert_not_added_(const struct event *ev)
362
0
{
363
0
  struct event_debug_entry *dent, find;
364
365
0
  if (!event_debug_mode_on_)
366
0
    return;
367
368
0
  find.ptr = ev;
369
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
370
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
371
0
  if (dent && dent->added) {
372
0
    event_errx(EVENT_ERR_ABORT_,
373
0
        "%s called on an already added event %p"
374
0
        " (events: 0x%x, fd: "EV_SOCK_FMT", "
375
0
        "flags: 0x%x)",
376
0
        __func__, (void *)ev, ev->ev_events,
377
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
378
0
  }
379
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
380
0
}
381
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
382
0
{
383
0
  if (!event_debug_mode_on_)
384
0
    return;
385
0
  if (fd < 0)
386
0
    return;
387
388
0
#ifndef _WIN32
389
0
  {
390
0
    int flags;
391
0
    if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
392
0
      EVUTIL_ASSERT(flags & O_NONBLOCK);
393
0
    }
394
0
  }
395
0
#endif
396
0
}
397
#else
398
static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
399
static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
400
static void event_debug_note_add_(const struct event *ev) { (void)ev; }
401
static void event_debug_note_del_(const struct event *ev) { (void)ev; }
402
static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
403
static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
404
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
405
#endif
406
407
#define EVENT_BASE_ASSERT_LOCKED(base)    \
408
0
  EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
409
410
/* How often (in seconds) do we check for changes in wall clock time relative
411
 * to monotonic time?  Set this to -1 for 'never.' */
412
0
#define CLOCK_SYNC_INTERVAL -1
413
414
/** Set 'tp' to the current time according to 'base'.  We must hold the lock
415
 * on 'base'.  If there is a cached time, return it.  Otherwise, use
416
 * clock_gettime or gettimeofday as appropriate to find out the right time.
417
 * Return 0 on success, -1 on failure.
418
 */
419
static int
420
gettime(struct event_base *base, struct timeval *tp)
421
0
{
422
0
  EVENT_BASE_ASSERT_LOCKED(base);
423
424
0
  if (base->tv_cache.tv_sec) {
425
0
    *tp = base->tv_cache;
426
0
    return (0);
427
0
  }
428
429
0
  if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
430
0
    return -1;
431
0
  }
432
433
0
  if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
434
0
      < tp->tv_sec) {
435
0
    struct timeval tv;
436
0
    evutil_gettimeofday(&tv,NULL);
437
0
    evutil_timersub(&tv, tp, &base->tv_clock_diff);
438
0
    base->last_updated_clock_diff = tp->tv_sec;
439
0
  }
440
441
0
  return 0;
442
0
}
443
444
int
445
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
446
0
{
447
0
  int r;
448
0
  if (!base) {
449
0
    base = current_base;
450
0
    if (!current_base)
451
0
      return evutil_gettimeofday(tv, NULL);
452
0
  }
453
454
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
455
0
  if (base->tv_cache.tv_sec == 0) {
456
0
    r = evutil_gettimeofday(tv, NULL);
457
0
  } else {
458
0
    evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
459
0
    r = 0;
460
0
  }
461
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
462
0
  return r;
463
0
}
464
465
/** Make 'base' have no current cached time. */
466
static inline void
467
clear_time_cache(struct event_base *base)
468
0
{
469
0
  base->tv_cache.tv_sec = 0;
470
0
}
471
472
/** Replace the cached time in 'base' with the current time. */
473
static inline void
474
update_time_cache(struct event_base *base)
475
0
{
476
0
  base->tv_cache.tv_sec = 0;
477
0
  if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
478
0
    gettime(base, &base->tv_cache);
479
0
}
480
481
int
482
event_base_update_cache_time(struct event_base *base)
483
0
{
484
485
0
  if (!base) {
486
0
    base = current_base;
487
0
    if (!current_base)
488
0
      return -1;
489
0
  }
490
491
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
492
0
  if (base->running_loop)
493
0
    update_time_cache(base);
494
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
495
0
  return 0;
496
0
}
497
498
static inline struct event *
499
event_callback_to_event(struct event_callback *evcb)
500
0
{
501
0
  EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
502
0
  return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
503
0
}
504
505
static inline struct event_callback *
506
event_to_event_callback(struct event *ev)
507
0
{
508
0
  return &ev->ev_evcallback;
509
0
}
510
511
struct event_base *
512
event_init(void)
513
0
{
514
0
  struct event_base *base = event_base_new_with_config(NULL);
515
516
0
  if (base == NULL) {
517
0
    event_errx(1, "%s: Unable to construct event_base", __func__);
518
0
    return NULL;
519
0
  }
520
521
0
  current_base = base;
522
523
0
  return (base);
524
0
}
525
526
struct event_base *
527
event_base_new(void)
528
0
{
529
0
  struct event_base *base = NULL;
530
0
  struct event_config *cfg = event_config_new();
531
0
  if (cfg) {
532
0
    base = event_base_new_with_config(cfg);
533
0
    event_config_free(cfg);
534
0
  }
535
0
  return base;
536
0
}
537
538
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
539
 * avoid. */
540
static int
541
event_config_is_avoided_method(const struct event_config *cfg,
542
    const char *method)
543
0
{
544
0
  struct event_config_entry *entry;
545
546
0
  TAILQ_FOREACH(entry, &cfg->entries, next) {
547
0
    if (entry->avoid_method != NULL &&
548
0
        strcmp(entry->avoid_method, method) == 0)
549
0
      return (1);
550
0
  }
551
552
0
  return (0);
553
0
}
554
555
/** Return true iff 'method' is disabled according to the environment. */
556
static int
557
event_is_method_disabled(const char *name)
558
0
{
559
0
  char environment[64];
560
0
  int i;
561
562
0
  evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
563
0
  for (i = 8; environment[i] != '\0'; ++i)
564
0
    environment[i] = EVUTIL_TOUPPER_(environment[i]);
565
  /* Note that evutil_getenv_() ignores the environment entirely if
566
   * we're setuid */
567
0
  return (evutil_getenv_(environment) != NULL);
568
0
}
569
570
int
571
event_base_get_features(const struct event_base *base)
572
0
{
573
0
  return base->evsel->features;
574
0
}
575
576
void
577
event_enable_debug_mode(void)
578
0
{
579
0
#ifndef EVENT__DISABLE_DEBUG_MODE
580
0
  if (event_debug_mode_on_)
581
0
    event_errx(1, "%s was called twice!", __func__);
582
0
  if (event_debug_mode_too_late)
583
0
    event_errx(1, "%s must be called *before* creating any events "
584
0
        "or event_bases",__func__);
585
586
0
  event_debug_mode_on_ = 1;
587
588
0
  HT_INIT(event_debug_map, &global_debug_map);
589
0
#endif
590
0
}
591
592
void
593
event_disable_debug_mode(void)
594
0
{
595
0
#ifndef EVENT__DISABLE_DEBUG_MODE
596
0
  struct event_debug_entry **ent, *victim;
597
598
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
599
0
  for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
600
0
    victim = *ent;
601
0
    ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
602
0
    mm_free(victim);
603
0
  }
604
0
  HT_CLEAR(event_debug_map, &global_debug_map);
605
0
  EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
606
607
0
  event_debug_mode_on_  = 0;
608
0
#endif
609
0
}
610
611
struct event_base *
612
event_base_new_with_config(const struct event_config *cfg)
613
0
{
614
0
  int i;
615
0
  struct event_base *base;
616
0
  int should_check_environment;
617
618
0
#ifndef EVENT__DISABLE_DEBUG_MODE
619
0
  event_debug_mode_too_late = 1;
620
0
#endif
621
622
0
  if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
623
0
    event_warn("%s: calloc", __func__);
624
0
    return NULL;
625
0
  }
626
627
0
  if (cfg)
628
0
    base->flags = cfg->flags;
629
630
0
  should_check_environment =
631
0
      !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
632
633
0
  {
634
0
    struct timeval tmp;
635
0
    int precise_time =
636
0
        cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
637
0
    int flags;
638
0
    if (should_check_environment && !precise_time) {
639
0
      precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
640
0
      if (precise_time) {
641
0
        base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
642
0
      }
643
0
    }
644
0
    flags = precise_time ? EV_MONOT_PRECISE : 0;
645
0
    evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
646
647
0
    gettime(base, &tmp);
648
0
  }
649
650
0
  min_heap_ctor_(&base->timeheap);
651
652
0
  base->sig.ev_signal_pair[0] = -1;
653
0
  base->sig.ev_signal_pair[1] = -1;
654
0
  base->th_notify_fd[0] = -1;
655
0
  base->th_notify_fd[1] = -1;
656
657
0
  TAILQ_INIT(&base->active_later_queue);
658
659
0
  evmap_io_initmap_(&base->io);
660
0
  evmap_signal_initmap_(&base->sigmap);
661
0
  event_changelist_init_(&base->changelist);
662
663
0
  base->evbase = NULL;
664
665
0
  if (cfg) {
666
0
    memcpy(&base->max_dispatch_time,
667
0
        &cfg->max_dispatch_interval, sizeof(struct timeval));
668
0
    base->limit_callbacks_after_prio =
669
0
        cfg->limit_callbacks_after_prio;
670
0
  } else {
671
0
    base->max_dispatch_time.tv_sec = -1;
672
0
    base->limit_callbacks_after_prio = 1;
673
0
  }
674
0
  if (cfg && cfg->max_dispatch_callbacks >= 0) {
675
0
    base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
676
0
  } else {
677
0
    base->max_dispatch_callbacks = INT_MAX;
678
0
  }
679
0
  if (base->max_dispatch_callbacks == INT_MAX &&
680
0
      base->max_dispatch_time.tv_sec == -1)
681
0
    base->limit_callbacks_after_prio = INT_MAX;
682
683
0
  for (i = 0; eventops[i] && !base->evbase; i++) {
684
0
    if (cfg != NULL) {
685
      /* determine if this backend should be avoided */
686
0
      if (event_config_is_avoided_method(cfg,
687
0
        eventops[i]->name))
688
0
        continue;
689
0
      if ((eventops[i]->features & cfg->require_features)
690
0
          != cfg->require_features)
691
0
        continue;
692
0
    }
693
694
    /* also obey the environment variables */
695
0
    if (should_check_environment &&
696
0
        event_is_method_disabled(eventops[i]->name))
697
0
      continue;
698
699
0
    base->evsel = eventops[i];
700
701
0
    base->evbase = base->evsel->init(base);
702
0
  }
703
704
0
  if (base->evbase == NULL) {
705
0
    event_warnx("%s: no event mechanism available",
706
0
        __func__);
707
0
    base->evsel = NULL;
708
0
    event_base_free(base);
709
0
    return NULL;
710
0
  }
711
712
0
  if (evutil_getenv_("EVENT_SHOW_METHOD"))
713
0
    event_msgx("libevent using: %s", base->evsel->name);
714
715
  /* allocate a single active event queue */
716
0
  if (event_base_priority_init(base, 1) < 0) {
717
0
    event_base_free(base);
718
0
    return NULL;
719
0
  }
720
721
  /* prepare for threading */
722
723
0
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
724
0
  event_debug_created_threadable_ctx_ = 1;
725
0
#endif
726
727
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
728
0
  if (EVTHREAD_LOCKING_ENABLED() &&
729
0
      (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
730
0
    int r;
731
0
    EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
732
0
    EVTHREAD_ALLOC_COND(base->current_event_cond);
733
0
    r = evthread_make_base_notifiable(base);
734
0
    if (r<0) {
735
0
      event_warnx("%s: Unable to make base notifiable.", __func__);
736
0
      event_base_free(base);
737
0
      return NULL;
738
0
    }
739
0
  }
740
0
#endif
741
742
#ifdef _WIN32
743
  if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
744
    event_base_start_iocp_(base, cfg->n_cpus_hint);
745
#endif
746
747
  /* initialize watcher lists */
748
0
  for (i = 0; i < EVWATCH_MAX; ++i)
749
0
    TAILQ_INIT(&base->watchers[i]);
750
751
0
  return (base);
752
0
}
753
754
int
755
event_base_start_iocp_(struct event_base *base, int n_cpus)
756
0
{
757
#ifdef _WIN32
758
  if (base->iocp)
759
    return 0;
760
  base->iocp = event_iocp_port_launch_(n_cpus);
761
  if (!base->iocp) {
762
    event_warnx("%s: Couldn't launch IOCP", __func__);
763
    return -1;
764
  }
765
  return 0;
766
#else
767
0
  return -1;
768
0
#endif
769
0
}
770
771
void
772
event_base_stop_iocp_(struct event_base *base)
773
0
{
774
#ifdef _WIN32
775
  int rv;
776
777
  if (!base->iocp)
778
    return;
779
  rv = event_iocp_shutdown_(base->iocp, -1);
780
  EVUTIL_ASSERT(rv >= 0);
781
  base->iocp = NULL;
782
#endif
783
0
}
784
785
static int
786
event_base_cancel_single_callback_(struct event_base *base,
787
    struct event_callback *evcb,
788
    int run_finalizers)
789
0
{
790
0
  int result = 0;
791
792
0
  if (evcb->evcb_flags & EVLIST_INIT) {
793
0
    struct event *ev = event_callback_to_event(evcb);
794
0
    if (!(ev->ev_flags & EVLIST_INTERNAL)) {
795
0
      event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
796
0
      result = 1;
797
0
    }
798
0
  } else {
799
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
800
0
    event_callback_cancel_nolock_(base, evcb, 1);
801
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
802
0
    result = 1;
803
0
  }
804
805
0
  if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
806
0
    switch (evcb->evcb_closure) {
807
0
    case EV_CLOSURE_EVENT_FINALIZE:
808
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
809
0
      struct event *ev = event_callback_to_event(evcb);
810
0
      ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
811
0
      if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
812
0
        mm_free(ev);
813
0
      break;
814
0
    }
815
0
    case EV_CLOSURE_CB_FINALIZE:
816
0
      evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
817
0
      break;
818
0
    default:
819
0
      break;
820
0
    }
821
0
  }
822
0
  return result;
823
0
}
824
825
static int event_base_free_queues_(struct event_base *base, int run_finalizers)
826
0
{
827
0
  int deleted = 0, i;
828
829
0
  for (i = 0; i < base->nactivequeues; ++i) {
830
0
    struct event_callback *evcb, *next;
831
0
    for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
832
0
      next = TAILQ_NEXT(evcb, evcb_active_next);
833
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
834
0
      evcb = next;
835
0
    }
836
0
  }
837
838
0
  {
839
0
    struct event_callback *evcb;
840
0
    while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
841
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
842
0
    }
843
0
  }
844
845
0
  return deleted;
846
0
}
847
848
static void
849
event_base_free_(struct event_base *base, int run_finalizers)
850
0
{
851
0
  int i;
852
0
  size_t n_deleted=0;
853
0
  struct event *ev;
854
0
  struct evwatch *watcher;
855
  /* XXXX grab the lock? If there is contention when one thread frees
856
   * the base, then the contending thread will be very sad soon. */
857
858
  /* event_base_free(NULL) is how to free the current_base if we
859
   * made it with event_init and forgot to hold a reference to it. */
860
0
  if (base == NULL && current_base)
861
0
    base = current_base;
862
  /* Don't actually free NULL. */
863
0
  if (base == NULL) {
864
0
    event_warnx("%s: no base to free", __func__);
865
0
    return;
866
0
  }
867
  /* XXX(niels) - check for internal events first */
868
869
#ifdef _WIN32
870
  event_base_stop_iocp_(base);
871
#endif
872
873
  /* threading fds if we have them */
874
0
  if (base->th_notify_fd[0] != -1) {
875
0
    event_del(&base->th_notify);
876
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
877
0
    if (base->th_notify_fd[1] != -1)
878
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
879
0
    base->th_notify_fd[0] = -1;
880
0
    base->th_notify_fd[1] = -1;
881
0
    event_debug_unassign(&base->th_notify);
882
0
  }
883
884
  /* Delete all non-internal events. */
885
0
  evmap_delete_all_(base);
886
887
0
  while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
888
0
    event_del(ev);
889
0
    ++n_deleted;
890
0
  }
891
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
892
0
    struct common_timeout_list *ctl =
893
0
        base->common_timeout_queues[i];
894
0
    event_del(&ctl->timeout_event); /* Internal; doesn't count */
895
0
    event_debug_unassign(&ctl->timeout_event);
896
0
    for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
897
0
      struct event *next = TAILQ_NEXT(ev,
898
0
          ev_timeout_pos.ev_next_with_common_timeout);
899
0
      if (!(ev->ev_flags & EVLIST_INTERNAL)) {
900
0
        event_del(ev);
901
0
        ++n_deleted;
902
0
      }
903
0
      ev = next;
904
0
    }
905
0
    mm_free(ctl);
906
0
  }
907
0
  if (base->common_timeout_queues)
908
0
    mm_free(base->common_timeout_queues);
909
910
0
  for (;;) {
911
    /* For finalizers we can register yet another finalizer out from
912
     * finalizer, and iff finalizer will be in active_later_queue we can
913
     * add finalizer to activequeues, and we will have events in
914
     * activequeues after this function returns, which is not what we want
915
     * (we even have an assertion for this).
916
     *
917
     * A simple case is bufferevent with underlying (i.e. filters).
918
     */
919
0
    int i = event_base_free_queues_(base, run_finalizers);
920
0
    event_debug(("%s: %d events freed", __func__, i));
921
0
    if (!i) {
922
0
      break;
923
0
    }
924
0
    n_deleted += i;
925
0
  }
926
927
0
  if (n_deleted)
928
0
    event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
929
0
      __func__, n_deleted));
930
931
0
  while (LIST_FIRST(&base->once_events)) {
932
0
    struct event_once *eonce = LIST_FIRST(&base->once_events);
933
0
    LIST_REMOVE(eonce, next_once);
934
0
    mm_free(eonce);
935
0
  }
936
937
0
  if (base->evsel != NULL && base->evsel->dealloc != NULL)
938
0
    base->evsel->dealloc(base);
939
940
0
  for (i = 0; i < base->nactivequeues; ++i)
941
0
    EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
942
943
0
  EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
944
0
  min_heap_dtor_(&base->timeheap);
945
946
0
  mm_free(base->activequeues);
947
948
0
  evmap_io_clear_(&base->io);
949
0
  evmap_signal_clear_(&base->sigmap);
950
0
  event_changelist_freemem_(&base->changelist);
951
952
0
  EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
953
0
  EVTHREAD_FREE_COND(base->current_event_cond);
954
955
  /* Free all event watchers */
956
0
  for (i = 0; i < EVWATCH_MAX; ++i) {
957
0
    while (!TAILQ_EMPTY(&base->watchers[i])) {
958
0
      watcher = TAILQ_FIRST(&base->watchers[i]);
959
0
      TAILQ_REMOVE(&base->watchers[i], watcher, next);
960
0
      mm_free(watcher);
961
0
    }
962
0
  }
963
964
  /* If we're freeing current_base, there won't be a current_base. */
965
0
  if (base == current_base)
966
0
    current_base = NULL;
967
0
  mm_free(base);
968
0
}
969
970
void
971
event_base_free_nofinalize(struct event_base *base)
972
0
{
973
0
  event_base_free_(base, 0);
974
0
}
975
976
void
977
event_base_free(struct event_base *base)
978
0
{
979
0
  event_base_free_(base, 1);
980
0
}
981
982
/* Fake eventop; used to disable the backend temporarily inside event_reinit
983
 * so that we can call event_del() on an event without telling the backend.
984
 */
985
static int
986
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
987
    short events, void *fdinfo)
988
0
{
989
0
  return 0;
990
0
}
991
const struct eventop nil_eventop = {
992
  "nil",
993
  NULL, /* init: unused. */
994
  NULL, /* add: unused. */
995
  nil_backend_del, /* del: used, so needs to be killed. */
996
  NULL, /* dispatch: unused. */
997
  NULL, /* dealloc: unused. */
998
  0, 0, 0
999
};
1000
1001
/* reinitialize the event base after a fork */
1002
int
1003
event_reinit(struct event_base *base)
1004
0
{
1005
0
  const struct eventop *evsel;
1006
0
  int res = 0;
1007
0
  int was_notifiable = 0;
1008
0
  int had_signal_added = 0;
1009
1010
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1011
1012
0
  evsel = base->evsel;
1013
1014
  /* check if this event mechanism requires reinit on the backend */
1015
0
  if (evsel->need_reinit) {
1016
    /* We're going to call event_del() on our notify events (the
1017
     * ones that tell about signals and wakeup events).  But we
1018
     * don't actually want to tell the backend to change its
1019
     * state, since it might still share some resource (a kqueue,
1020
     * an epoll fd) with the parent process, and we don't want to
1021
     * delete the fds from _that_ backend, we temporarily stub out
1022
     * the evsel with a replacement.
1023
     */
1024
0
    base->evsel = &nil_eventop;
1025
0
  }
1026
1027
  /* We need to re-create a new signal-notification fd and a new
1028
   * thread-notification fd.  Otherwise, we'll still share those with
1029
   * the parent process, which would make any notification sent to them
1030
   * get received by one or both of the event loops, more or less at
1031
   * random.
1032
   */
1033
0
  if (base->sig.ev_signal_added) {
1034
0
    event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1035
0
    event_debug_unassign(&base->sig.ev_signal);
1036
0
    memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1037
0
    had_signal_added = 1;
1038
0
    base->sig.ev_signal_added = 0;
1039
0
  }
1040
0
  if (base->sig.ev_signal_pair[0] != -1)
1041
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1042
0
  if (base->sig.ev_signal_pair[1] != -1)
1043
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1044
0
  if (base->th_notify_fn != NULL) {
1045
0
    was_notifiable = 1;
1046
0
    base->th_notify_fn = NULL;
1047
0
  }
1048
0
  if (base->th_notify_fd[0] != -1) {
1049
0
    event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1050
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1051
0
    if (base->th_notify_fd[1] != -1)
1052
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1053
0
    base->th_notify_fd[0] = -1;
1054
0
    base->th_notify_fd[1] = -1;
1055
0
    event_debug_unassign(&base->th_notify);
1056
0
  }
1057
1058
  /* Replace the original evsel. */
1059
0
        base->evsel = evsel;
1060
1061
0
  if (evsel->need_reinit) {
1062
    /* Reconstruct the backend through brute-force, so that we do
1063
     * not share any structures with the parent process. For some
1064
     * backends, this is necessary: epoll and kqueue, for
1065
     * instance, have events associated with a kernel
1066
     * structure. If didn't reinitialize, we'd share that
1067
     * structure with the parent process, and any changes made by
1068
     * the parent would affect our backend's behavior (and vice
1069
     * versa).
1070
     */
1071
0
    if (base->evsel->dealloc != NULL)
1072
0
      base->evsel->dealloc(base);
1073
0
    base->evbase = evsel->init(base);
1074
0
    if (base->evbase == NULL) {
1075
0
      event_errx(1,
1076
0
         "%s: could not reinitialize event mechanism",
1077
0
         __func__);
1078
0
      res = -1;
1079
0
      goto done;
1080
0
    }
1081
1082
    /* Empty out the changelist (if any): we are starting from a
1083
     * blank slate. */
1084
0
    event_changelist_freemem_(&base->changelist);
1085
1086
    /* Tell the event maps to re-inform the backend about all
1087
     * pending events. This will make the signal notification
1088
     * event get re-created if necessary. */
1089
0
    if (evmap_reinit_(base) < 0)
1090
0
      res = -1;
1091
0
  } else {
1092
0
    res = evsig_init_(base);
1093
0
    if (res == 0 && had_signal_added) {
1094
0
      res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1095
0
      if (res == 0)
1096
0
        base->sig.ev_signal_added = 1;
1097
0
    }
1098
0
  }
1099
1100
  /* If we were notifiable before, and nothing just exploded, become
1101
   * notifiable again. */
1102
0
  if (was_notifiable && res == 0)
1103
0
    res = evthread_make_base_notifiable_nolock_(base);
1104
1105
0
done:
1106
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1107
0
  return (res);
1108
0
}
1109
1110
/* Get the monotonic time for this event_base' timer */
1111
int
1112
event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1113
0
{
1114
0
  int rv = -1;
1115
1116
0
  if (base && tv) {
1117
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1118
0
    rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1119
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1120
0
  }
1121
1122
0
  return rv;
1123
0
}
1124
1125
const char **
1126
event_get_supported_methods(void)
1127
0
{
1128
0
  static const char **methods = NULL;
1129
0
  const struct eventop **method;
1130
0
  const char **tmp;
1131
0
  int i = 0, k;
1132
1133
  /* count all methods */
1134
0
  for (method = &eventops[0]; *method != NULL; ++method) {
1135
0
    ++i;
1136
0
  }
1137
1138
  /* allocate one more than we need for the NULL pointer */
1139
0
  tmp = mm_calloc((i + 1), sizeof(char *));
1140
0
  if (tmp == NULL)
1141
0
    return (NULL);
1142
1143
  /* populate the array with the supported methods */
1144
0
  for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1145
0
    tmp[i++] = eventops[k]->name;
1146
0
  }
1147
0
  tmp[i] = NULL;
1148
1149
0
  if (methods != NULL)
1150
0
    mm_free((char**)methods);
1151
1152
0
  methods = tmp;
1153
1154
0
  return (methods);
1155
0
}
1156
1157
struct event_config *
1158
event_config_new(void)
1159
0
{
1160
0
  struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1161
1162
0
  if (cfg == NULL)
1163
0
    return (NULL);
1164
1165
0
  TAILQ_INIT(&cfg->entries);
1166
0
  cfg->max_dispatch_interval.tv_sec = -1;
1167
0
  cfg->max_dispatch_callbacks = INT_MAX;
1168
0
  cfg->limit_callbacks_after_prio = 1;
1169
1170
0
  return (cfg);
1171
0
}
1172
1173
static void
1174
event_config_entry_free(struct event_config_entry *entry)
1175
0
{
1176
0
  if (entry->avoid_method != NULL)
1177
0
    mm_free((char *)entry->avoid_method);
1178
0
  mm_free(entry);
1179
0
}
1180
1181
void
1182
event_config_free(struct event_config *cfg)
1183
0
{
1184
0
  struct event_config_entry *entry;
1185
1186
0
  while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1187
0
    TAILQ_REMOVE(&cfg->entries, entry, next);
1188
0
    event_config_entry_free(entry);
1189
0
  }
1190
0
  mm_free(cfg);
1191
0
}
1192
1193
int
1194
event_config_set_flag(struct event_config *cfg, int flag)
1195
0
{
1196
0
  if (!cfg)
1197
0
    return -1;
1198
0
  cfg->flags |= flag;
1199
0
  return 0;
1200
0
}
1201
1202
int
1203
event_config_avoid_method(struct event_config *cfg, const char *method)
1204
0
{
1205
0
  struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1206
0
  if (entry == NULL)
1207
0
    return (-1);
1208
1209
0
  if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1210
0
    mm_free(entry);
1211
0
    return (-1);
1212
0
  }
1213
1214
0
  TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1215
1216
0
  return (0);
1217
0
}
1218
1219
int
1220
event_config_require_features(struct event_config *cfg,
1221
    int features)
1222
0
{
1223
0
  if (!cfg)
1224
0
    return (-1);
1225
0
  cfg->require_features = features;
1226
0
  return (0);
1227
0
}
1228
1229
int
1230
event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1231
0
{
1232
0
  if (!cfg)
1233
0
    return (-1);
1234
0
  cfg->n_cpus_hint = cpus;
1235
0
  return (0);
1236
0
}
1237
1238
int
1239
event_config_set_max_dispatch_interval(struct event_config *cfg,
1240
    const struct timeval *max_interval, int max_callbacks, int min_priority)
1241
0
{
1242
0
  if (max_interval)
1243
0
    memcpy(&cfg->max_dispatch_interval, max_interval,
1244
0
        sizeof(struct timeval));
1245
0
  else
1246
0
    cfg->max_dispatch_interval.tv_sec = -1;
1247
0
  cfg->max_dispatch_callbacks =
1248
0
      max_callbacks >= 0 ? max_callbacks : INT_MAX;
1249
0
  if (min_priority < 0)
1250
0
    min_priority = 0;
1251
0
  cfg->limit_callbacks_after_prio = min_priority;
1252
0
  return (0);
1253
0
}
1254
1255
int
1256
event_priority_init(int npriorities)
1257
0
{
1258
0
  return event_base_priority_init(current_base, npriorities);
1259
0
}
1260
1261
int
1262
event_base_priority_init(struct event_base *base, int npriorities)
1263
0
{
1264
0
  int i, r;
1265
0
  r = -1;
1266
1267
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1268
1269
0
  if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1270
0
      || npriorities >= EVENT_MAX_PRIORITIES)
1271
0
    goto err;
1272
1273
0
  if (npriorities == base->nactivequeues)
1274
0
    goto ok;
1275
1276
0
  if (base->nactivequeues) {
1277
0
    mm_free(base->activequeues);
1278
0
    base->nactivequeues = 0;
1279
0
  }
1280
1281
  /* Allocate our priority queues */
1282
0
  base->activequeues = (struct evcallback_list *)
1283
0
    mm_calloc(npriorities, sizeof(struct evcallback_list));
1284
0
  if (base->activequeues == NULL) {
1285
0
    event_warn("%s: calloc", __func__);
1286
0
    goto err;
1287
0
  }
1288
0
  base->nactivequeues = npriorities;
1289
1290
0
  for (i = 0; i < base->nactivequeues; ++i) {
1291
0
    TAILQ_INIT(&base->activequeues[i]);
1292
0
  }
1293
1294
0
ok:
1295
0
  r = 0;
1296
0
err:
1297
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1298
0
  return (r);
1299
0
}
1300
1301
int
1302
event_base_get_npriorities(struct event_base *base)
1303
0
{
1304
1305
0
  int n;
1306
0
  if (base == NULL)
1307
0
    base = current_base;
1308
1309
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1310
0
  n = base->nactivequeues;
1311
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1312
0
  return (n);
1313
0
}
1314
1315
int
1316
event_base_get_num_events(struct event_base *base, unsigned int type)
1317
0
{
1318
0
  int r = 0;
1319
1320
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1321
1322
0
  if (type & EVENT_BASE_COUNT_ACTIVE)
1323
0
    r += base->event_count_active;
1324
1325
0
  if (type & EVENT_BASE_COUNT_VIRTUAL)
1326
0
    r += base->virtual_event_count;
1327
1328
0
  if (type & EVENT_BASE_COUNT_ADDED)
1329
0
    r += base->event_count;
1330
1331
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1332
1333
0
  return r;
1334
0
}
1335
1336
int
1337
event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1338
0
{
1339
0
  int r = 0;
1340
1341
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1342
1343
0
  if (type & EVENT_BASE_COUNT_ACTIVE) {
1344
0
    r += base->event_count_active_max;
1345
0
    if (clear)
1346
0
      base->event_count_active_max = 0;
1347
0
  }
1348
1349
0
  if (type & EVENT_BASE_COUNT_VIRTUAL) {
1350
0
    r += base->virtual_event_count_max;
1351
0
    if (clear)
1352
0
      base->virtual_event_count_max = 0;
1353
0
  }
1354
1355
0
  if (type & EVENT_BASE_COUNT_ADDED) {
1356
0
    r += base->event_count_max;
1357
0
    if (clear)
1358
0
      base->event_count_max = 0;
1359
0
  }
1360
1361
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1362
1363
0
  return r;
1364
0
}
1365
1366
/* Returns true iff we're currently watching any events. */
1367
static int
1368
event_haveevents(struct event_base *base)
1369
0
{
1370
  /* Caller must hold th_base_lock */
1371
0
  return (base->virtual_event_count > 0 || base->event_count > 0);
1372
0
}
1373
1374
/* "closure" function called when processing active signal events */
1375
static inline void
1376
event_signal_closure(struct event_base *base, struct event *ev)
1377
0
{
1378
0
#if defined(__clang__)
1379
#elif defined(__GNUC__)
1380
#pragma GCC diagnostic push
1381
/* NOTE: it is better to avoid such code all together, by using separate
1382
 * variable to break the loop in the event structure, but now this code is safe
1383
 * */
1384
#pragma GCC diagnostic ignored "-Wdangling-pointer"
1385
#endif
1386
1387
0
  short ncalls;
1388
0
  int should_break;
1389
1390
  /* Allows deletes to work */
1391
0
  ncalls = ev->ev_ncalls;
1392
0
  if (ncalls != 0)
1393
0
    ev->ev_pncalls = &ncalls;
1394
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1395
0
  while (ncalls) {
1396
0
    ncalls--;
1397
0
    ev->ev_ncalls = ncalls;
1398
0
    if (ncalls == 0)
1399
0
      ev->ev_pncalls = NULL;
1400
0
    (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1401
1402
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1403
0
    should_break = base->event_break;
1404
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1405
1406
0
    if (should_break) {
1407
0
      if (ncalls != 0)
1408
0
        ev->ev_pncalls = NULL;
1409
0
      return;
1410
0
    }
1411
0
  }
1412
1413
0
#if defined(__clang__)
1414
#elif defined(__GNUC__)
1415
#pragma GCC diagnostic pop
1416
#endif
1417
0
}
1418
1419
/* Common timeouts are special timeouts that are handled as queues rather than
1420
 * in the minheap.  This is more efficient than the minheap if we happen to
1421
 * know that we're going to get several thousands of timeout events all with
1422
 * the same timeout value.
1423
 *
1424
 * Since all our timeout handling code assumes timevals can be copied,
1425
 * assigned, etc, we can't use "magic pointer" to encode these common
1426
 * timeouts.  Searching through a list to see if every timeout is common could
1427
 * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1428
 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1429
 * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1430
 * of index into the event_base's aray of common timeouts.
1431
 */
1432
1433
0
#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1434
0
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1435
0
#define COMMON_TIMEOUT_IDX_SHIFT 20
1436
0
#define COMMON_TIMEOUT_MASK     0xf0000000
1437
0
#define COMMON_TIMEOUT_MAGIC    0x50000000
1438
1439
#define COMMON_TIMEOUT_IDX(tv) \
1440
0
  (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1441
1442
/** Return true iff if 'tv' is a common timeout in 'base' */
1443
static inline int
1444
is_common_timeout(const struct timeval *tv,
1445
    const struct event_base *base)
1446
0
{
1447
0
  int idx;
1448
0
  if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1449
0
    return 0;
1450
0
  idx = COMMON_TIMEOUT_IDX(tv);
1451
0
  return idx < base->n_common_timeouts;
1452
0
}
1453
1454
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
1455
 * one is a common timeout. */
1456
static inline int
1457
is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1458
0
{
1459
0
  return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1460
0
      (tv2->tv_usec & ~MICROSECONDS_MASK);
1461
0
}
1462
1463
/** Requires that 'tv' is a common timeout.  Return the corresponding
1464
 * common_timeout_list. */
1465
static inline struct common_timeout_list *
1466
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1467
0
{
1468
0
  return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1469
0
}
1470
1471
#if 0
1472
static inline int
1473
common_timeout_ok(const struct timeval *tv,
1474
    struct event_base *base)
1475
{
1476
  const struct timeval *expect =
1477
      &get_common_timeout_list(base, tv)->duration;
1478
  return tv->tv_sec == expect->tv_sec &&
1479
      tv->tv_usec == expect->tv_usec;
1480
}
1481
#endif
1482
1483
/* Add the timeout for the first event in given common timeout list to the
1484
 * event_base's minheap. */
1485
static void
1486
common_timeout_schedule(struct common_timeout_list *ctl,
1487
    const struct timeval *now, struct event *head)
1488
0
{
1489
0
  struct timeval timeout = head->ev_timeout;
1490
0
  timeout.tv_usec &= MICROSECONDS_MASK;
1491
0
  event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1492
0
}
1493
1494
/* Callback: invoked when the timeout for a common timeout queue triggers.
1495
 * This means that (at least) the first event in that queue should be run,
1496
 * and the timeout should be rescheduled if there are more events. */
1497
static void
1498
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1499
0
{
1500
0
  struct timeval now;
1501
0
  struct common_timeout_list *ctl = arg;
1502
0
  struct event_base *base = ctl->base;
1503
0
  struct event *ev = NULL;
1504
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1505
0
  gettime(base, &now);
1506
0
  while (1) {
1507
0
    ev = TAILQ_FIRST(&ctl->events);
1508
0
    if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1509
0
        (ev->ev_timeout.tv_sec == now.tv_sec &&
1510
0
      (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1511
0
      break;
1512
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1513
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
1514
0
  }
1515
0
  if (ev)
1516
0
    common_timeout_schedule(ctl, &now, ev);
1517
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1518
0
}
1519
1520
0
#define MAX_COMMON_TIMEOUTS 256
1521
1522
const struct timeval *
1523
event_base_init_common_timeout(struct event_base *base,
1524
    const struct timeval *duration)
1525
0
{
1526
0
  int i;
1527
0
  struct timeval tv;
1528
0
  const struct timeval *result=NULL;
1529
0
  struct common_timeout_list *new_ctl;
1530
1531
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1532
0
  if (duration->tv_usec > 1000000) {
1533
0
    memcpy(&tv, duration, sizeof(struct timeval));
1534
0
    if (is_common_timeout(duration, base))
1535
0
      tv.tv_usec &= MICROSECONDS_MASK;
1536
0
    tv.tv_sec += tv.tv_usec / 1000000;
1537
0
    tv.tv_usec %= 1000000;
1538
0
    duration = &tv;
1539
0
  }
1540
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
1541
0
    const struct common_timeout_list *ctl =
1542
0
        base->common_timeout_queues[i];
1543
0
    if (duration->tv_sec == ctl->duration.tv_sec &&
1544
0
        duration->tv_usec ==
1545
0
        (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1546
0
      EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1547
0
      result = &ctl->duration;
1548
0
      goto done;
1549
0
    }
1550
0
  }
1551
0
  if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1552
0
    event_warnx("%s: Too many common timeouts already in use; "
1553
0
        "we only support %d per event_base", __func__,
1554
0
        MAX_COMMON_TIMEOUTS);
1555
0
    goto done;
1556
0
  }
1557
0
  if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1558
0
    int n = base->n_common_timeouts < 16 ? 16 :
1559
0
        base->n_common_timeouts*2;
1560
0
    struct common_timeout_list **newqueues =
1561
0
        mm_realloc(base->common_timeout_queues,
1562
0
      n*sizeof(struct common_timeout_queue *));
1563
0
    if (!newqueues) {
1564
0
      event_warn("%s: realloc",__func__);
1565
0
      goto done;
1566
0
    }
1567
0
    base->n_common_timeouts_allocated = n;
1568
0
    base->common_timeout_queues = newqueues;
1569
0
  }
1570
0
  new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1571
0
  if (!new_ctl) {
1572
0
    event_warn("%s: calloc",__func__);
1573
0
    goto done;
1574
0
  }
1575
0
  TAILQ_INIT(&new_ctl->events);
1576
0
  new_ctl->duration.tv_sec = duration->tv_sec;
1577
0
  new_ctl->duration.tv_usec =
1578
0
      duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1579
0
      (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1580
0
  evtimer_assign(&new_ctl->timeout_event, base,
1581
0
      common_timeout_callback, new_ctl);
1582
0
  new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1583
0
  event_priority_set(&new_ctl->timeout_event, 0);
1584
0
  new_ctl->base = base;
1585
0
  base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1586
0
  result = &new_ctl->duration;
1587
1588
0
done:
1589
0
  if (result)
1590
0
    EVUTIL_ASSERT(is_common_timeout(result, base));
1591
1592
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1593
0
  return result;
1594
0
}
1595
1596
/* Closure function invoked when we're activating a persistent event. */
1597
static inline void
1598
event_persist_closure(struct event_base *base, struct event *ev)
1599
0
{
1600
0
  void (*evcb_callback)(evutil_socket_t, short, void *);
1601
1602
  // Other fields of *ev that must be stored before executing
1603
0
  evutil_socket_t evcb_fd;
1604
0
  short evcb_res;
1605
0
  void *evcb_arg;
1606
1607
  /* reschedule the persistent event if we have a timeout. */
1608
0
  if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1609
    /* If there was a timeout, we want it to run at an interval of
1610
     * ev_io_timeout after the last time it was _scheduled_ for,
1611
     * not ev_io_timeout after _now_.  If it fired for another
1612
     * reason, though, the timeout ought to start ticking _now_. */
1613
0
    struct timeval run_at, relative_to, delay, now;
1614
0
    ev_uint32_t usec_mask = 0;
1615
0
    EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1616
0
      &ev->ev_io_timeout));
1617
0
    gettime(base, &now);
1618
0
    if (is_common_timeout(&ev->ev_timeout, base)) {
1619
0
      delay = ev->ev_io_timeout;
1620
0
      usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1621
0
      delay.tv_usec &= MICROSECONDS_MASK;
1622
0
      if (ev->ev_res & EV_TIMEOUT) {
1623
0
        relative_to = ev->ev_timeout;
1624
0
        relative_to.tv_usec &= MICROSECONDS_MASK;
1625
0
      } else {
1626
0
        relative_to = now;
1627
0
      }
1628
0
    } else {
1629
0
      delay = ev->ev_io_timeout;
1630
0
      if (ev->ev_res & EV_TIMEOUT) {
1631
0
        relative_to = ev->ev_timeout;
1632
0
      } else {
1633
0
        relative_to = now;
1634
0
      }
1635
0
    }
1636
0
    evutil_timeradd(&relative_to, &delay, &run_at);
1637
0
    if (evutil_timercmp(&run_at, &now, <)) {
1638
      /* Looks like we missed at least one invocation due to
1639
       * a clock jump, not running the event loop for a
1640
       * while, really slow callbacks, or
1641
       * something. Reschedule relative to now.
1642
       */
1643
0
      evutil_timeradd(&now, &delay, &run_at);
1644
0
    }
1645
0
    run_at.tv_usec |= usec_mask;
1646
0
    event_add_nolock_(ev, &run_at, 1);
1647
0
  }
1648
1649
  // Save our callback before we release the lock
1650
0
  evcb_callback = ev->ev_callback;
1651
0
  evcb_fd = ev->ev_fd;
1652
0
  evcb_res = ev->ev_res;
1653
0
  evcb_arg = ev->ev_arg;
1654
1655
  // Release the lock
1656
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1657
1658
  // Execute the callback
1659
0
  (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1660
0
}
1661
1662
/*
1663
  Helper for event_process_active to process all the events in a single queue,
1664
  releasing the lock as we go.  This function requires that the lock be held
1665
  when it's invoked.  Returns -1 if we get a signal or an event_break that
1666
  means we should stop processing any active events now.  Otherwise returns
1667
  the number of non-internal event_callbacks that we processed.
1668
*/
1669
static int
1670
event_process_active_single_queue(struct event_base *base,
1671
    struct evcallback_list *activeq,
1672
    int max_to_process, const struct timeval *endtime)
1673
0
{
1674
0
  struct event_callback *evcb;
1675
0
  int count = 0;
1676
1677
0
  EVUTIL_ASSERT(activeq != NULL);
1678
1679
0
  for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1680
0
    struct event *ev=NULL;
1681
0
    if (evcb->evcb_flags & EVLIST_INIT) {
1682
0
      ev = event_callback_to_event(evcb);
1683
1684
0
      if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1685
0
        event_queue_remove_active(base, evcb);
1686
0
      else
1687
0
        event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1688
0
      event_debug((
1689
0
          "event_process_active: event: %p, %s%s%scall %p",
1690
0
          (void *)ev,
1691
0
          ev->ev_res & EV_READ ? "EV_READ " : " ",
1692
0
          ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1693
0
          ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1694
0
          (void *)ev->ev_callback));
1695
0
    } else {
1696
0
      event_queue_remove_active(base, evcb);
1697
0
      event_debug(("event_process_active: event_callback %p, "
1698
0
        "closure %d, call %p",
1699
0
        (void *)evcb, evcb->evcb_closure, (void *)evcb->evcb_cb_union.evcb_callback));
1700
0
    }
1701
1702
0
    if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1703
0
      ++count;
1704
1705
1706
0
    base->current_event = evcb;
1707
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1708
0
    base->current_event_waiters = 0;
1709
0
#endif
1710
1711
0
    switch (evcb->evcb_closure) {
1712
0
    case EV_CLOSURE_EVENT_SIGNAL:
1713
0
      EVUTIL_ASSERT(ev != NULL);
1714
0
      event_signal_closure(base, ev);
1715
0
      break;
1716
0
    case EV_CLOSURE_EVENT_PERSIST:
1717
0
      EVUTIL_ASSERT(ev != NULL);
1718
0
      event_persist_closure(base, ev);
1719
0
      break;
1720
0
    case EV_CLOSURE_EVENT: {
1721
0
      void (*evcb_callback)(evutil_socket_t, short, void *);
1722
0
      short res;
1723
0
      EVUTIL_ASSERT(ev != NULL);
1724
0
      evcb_callback = *ev->ev_callback;
1725
0
      res = ev->ev_res;
1726
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1727
0
      evcb_callback(ev->ev_fd, res, ev->ev_arg);
1728
0
    }
1729
0
    break;
1730
0
    case EV_CLOSURE_CB_SELF: {
1731
0
      void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1732
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1733
0
      evcb_selfcb(evcb, evcb->evcb_arg);
1734
0
    }
1735
0
    break;
1736
0
    case EV_CLOSURE_EVENT_FINALIZE:
1737
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1738
0
      void (*evcb_evfinalize)(struct event *, void *);
1739
0
      int evcb_closure = evcb->evcb_closure;
1740
0
      EVUTIL_ASSERT(ev != NULL);
1741
0
      base->current_event = NULL;
1742
0
      evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1743
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1744
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1745
0
      event_debug_note_teardown_(ev);
1746
0
      evcb_evfinalize(ev, ev->ev_arg);
1747
0
      if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1748
0
        mm_free(ev);
1749
0
    }
1750
0
    break;
1751
0
    case EV_CLOSURE_CB_FINALIZE: {
1752
0
      void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1753
0
      base->current_event = NULL;
1754
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1755
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1756
0
      evcb_cbfinalize(evcb, evcb->evcb_arg);
1757
0
    }
1758
0
    break;
1759
0
    default:
1760
0
      EVUTIL_ASSERT(0);
1761
0
    }
1762
1763
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1764
0
    base->current_event = NULL;
1765
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1766
0
    if (base->current_event_waiters) {
1767
0
      base->current_event_waiters = 0;
1768
0
      EVTHREAD_COND_BROADCAST(base->current_event_cond);
1769
0
    }
1770
0
#endif
1771
1772
0
    if (base->event_break)
1773
0
      return -1;
1774
0
    if (count >= max_to_process)
1775
0
      return count;
1776
0
    if (count && endtime) {
1777
0
      struct timeval now;
1778
0
      update_time_cache(base);
1779
0
      gettime(base, &now);
1780
0
      if (evutil_timercmp(&now, endtime, >=))
1781
0
        return count;
1782
0
    }
1783
0
    if (base->event_continue)
1784
0
      break;
1785
0
  }
1786
0
  return count;
1787
0
}
1788
1789
/*
1790
 * Active events are stored in priority queues.  Lower priorities are always
1791
 * process before higher priorities.  Low priority events can starve high
1792
 * priority ones.
1793
 */
1794
1795
static int
1796
event_process_active(struct event_base *base)
1797
0
{
1798
  /* Caller must hold th_base_lock */
1799
0
  struct evcallback_list *activeq = NULL;
1800
0
  int i, c = 0;
1801
0
  const struct timeval *endtime;
1802
0
  struct timeval tv;
1803
0
  const int maxcb = base->max_dispatch_callbacks;
1804
0
  const int limit_after_prio = base->limit_callbacks_after_prio;
1805
0
  if (base->max_dispatch_time.tv_sec >= 0) {
1806
0
    update_time_cache(base);
1807
0
    gettime(base, &tv);
1808
0
    evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1809
0
    endtime = &tv;
1810
0
  } else {
1811
0
    endtime = NULL;
1812
0
  }
1813
1814
0
  for (i = 0; i < base->nactivequeues; ++i) {
1815
0
    if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1816
0
      base->event_running_priority = i;
1817
0
      activeq = &base->activequeues[i];
1818
0
      if (i < limit_after_prio)
1819
0
        c = event_process_active_single_queue(base, activeq,
1820
0
            INT_MAX, NULL);
1821
0
      else
1822
0
        c = event_process_active_single_queue(base, activeq,
1823
0
            maxcb, endtime);
1824
0
      if (c < 0) {
1825
0
        goto done;
1826
0
      } else if (c > 0)
1827
0
        break; /* Processed a real event; do not
1828
          * consider lower-priority events */
1829
      /* If we get here, all of the events we processed
1830
       * were internal.  Continue. */
1831
0
    }
1832
0
  }
1833
1834
0
done:
1835
0
  base->event_running_priority = -1;
1836
1837
0
  return c;
1838
0
}
1839
1840
/*
1841
 * Wait continuously for events.  We exit only if no events are left.
1842
 */
1843
1844
int
1845
event_dispatch(void)
1846
0
{
1847
0
  return (event_loop(0));
1848
0
}
1849
1850
int
1851
event_base_dispatch(struct event_base *event_base)
1852
0
{
1853
0
  return (event_base_loop(event_base, 0));
1854
0
}
1855
1856
const char *
1857
event_base_get_method(const struct event_base *base)
1858
0
{
1859
0
  EVUTIL_ASSERT(base);
1860
0
  return (base->evsel->name);
1861
0
}
1862
1863
const char *
1864
event_base_get_signal_method(const struct event_base *base)
1865
0
{
1866
0
  EVUTIL_ASSERT(base);
1867
0
  return (base->evsigsel->name);
1868
0
}
1869
1870
/** Callback: used to implement event_base_loopexit by telling the event_base
1871
 * that it's time to exit its loop. */
1872
static void
1873
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1874
0
{
1875
0
  struct event_base *base = arg;
1876
0
  base->event_gotterm = 1;
1877
0
}
1878
1879
int
1880
event_loopexit(const struct timeval *tv)
1881
0
{
1882
0
  return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1883
0
        current_base, tv));
1884
0
}
1885
1886
int
1887
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1888
0
{
1889
0
  return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1890
0
        event_base, tv));
1891
0
}
1892
1893
int
1894
event_loopbreak(void)
1895
0
{
1896
0
  return (event_base_loopbreak(current_base));
1897
0
}
1898
1899
int
1900
event_base_loopbreak(struct event_base *event_base)
1901
0
{
1902
0
  int r = 0;
1903
0
  if (event_base == NULL)
1904
0
    return (-1);
1905
1906
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1907
0
  event_base->event_break = 1;
1908
1909
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1910
0
    r = evthread_notify_base(event_base);
1911
0
  } else {
1912
0
    r = (0);
1913
0
  }
1914
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1915
0
  return r;
1916
0
}
1917
1918
int
1919
event_base_loopcontinue(struct event_base *event_base)
1920
0
{
1921
0
  int r = 0;
1922
0
  if (event_base == NULL)
1923
0
    return (-1);
1924
1925
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1926
0
  event_base->event_continue = 1;
1927
1928
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1929
0
    r = evthread_notify_base(event_base);
1930
0
  } else {
1931
0
    r = (0);
1932
0
  }
1933
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1934
0
  return r;
1935
0
}
1936
1937
int
1938
event_base_got_break(struct event_base *event_base)
1939
0
{
1940
0
  int res;
1941
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1942
0
  res = event_base->event_break;
1943
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1944
0
  return res;
1945
0
}
1946
1947
int
1948
event_base_got_exit(struct event_base *event_base)
1949
0
{
1950
0
  int res;
1951
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1952
0
  res = event_base->event_gotterm;
1953
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1954
0
  return res;
1955
0
}
1956
1957
/* not thread safe */
1958
1959
int
1960
event_loop(int flags)
1961
0
{
1962
0
  return event_base_loop(current_base, flags);
1963
0
}
1964
1965
int
1966
event_base_loop(struct event_base *base, int flags)
1967
0
{
1968
0
  const struct eventop *evsel = base->evsel;
1969
0
  struct timeval tv;
1970
0
  struct timeval *tv_p;
1971
0
  int res, done, retval = 0;
1972
0
  struct evwatch_prepare_cb_info prepare_info;
1973
0
  struct evwatch_check_cb_info check_info;
1974
0
  struct evwatch *watcher;
1975
1976
  /* Grab the lock.  We will release it inside evsel.dispatch, and again
1977
   * as we invoke watchers and user callbacks. */
1978
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1979
1980
0
  if (base->running_loop) {
1981
0
    event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1982
0
        " can run on each event_base at once.", __func__);
1983
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1984
0
    return -1;
1985
0
  }
1986
1987
0
  base->running_loop = 1;
1988
1989
0
  clear_time_cache(base);
1990
1991
0
  if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1992
0
    evsig_set_base_(base);
1993
1994
0
  done = 0;
1995
1996
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1997
0
  base->th_owner_id = EVTHREAD_GET_ID();
1998
0
#endif
1999
2000
0
  base->event_gotterm = base->event_break = 0;
2001
2002
0
  while (!done) {
2003
0
    base->event_continue = 0;
2004
0
    base->n_deferreds_queued = 0;
2005
2006
    /* Terminate the loop if we have been asked to */
2007
0
    if (base->event_gotterm) {
2008
0
      break;
2009
0
    }
2010
2011
0
    if (base->event_break) {
2012
0
      break;
2013
0
    }
2014
2015
0
    tv_p = &tv;
2016
0
    if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
2017
0
      timeout_next(base, &tv_p);
2018
0
    } else {
2019
      /*
2020
       * if we have active events, we just poll new events
2021
       * without waiting.
2022
       */
2023
0
      evutil_timerclear(&tv);
2024
0
    }
2025
2026
    /* If we have no events, we just exit */
2027
0
    if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
2028
0
        !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
2029
0
      event_debug(("%s: no events registered.", __func__));
2030
0
      retval = 1;
2031
0
      goto done;
2032
0
    }
2033
2034
0
    event_queue_make_later_events_active(base);
2035
2036
    /* Invoke prepare watchers before polling for events */
2037
0
    prepare_info.timeout = tv_p;
2038
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next) {
2039
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2040
0
      (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
2041
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2042
0
    }
2043
2044
0
    clear_time_cache(base);
2045
2046
0
    res = evsel->dispatch(base, tv_p);
2047
2048
0
    if (res == -1) {
2049
0
      event_debug(("%s: dispatch returned unsuccessfully.",
2050
0
        __func__));
2051
0
      retval = -1;
2052
0
      goto done;
2053
0
    }
2054
2055
0
    update_time_cache(base);
2056
2057
    /* Invoke check watchers after polling for events, and before
2058
     * processing them */
2059
0
    TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next) {
2060
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
2061
0
      (*watcher->callback.check)(watcher, &check_info, watcher->arg);
2062
0
      EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2063
0
    }
2064
2065
0
    timeout_process(base);
2066
2067
0
    if (N_ACTIVE_CALLBACKS(base)) {
2068
0
      int n = event_process_active(base);
2069
0
      if ((flags & EVLOOP_ONCE)
2070
0
          && N_ACTIVE_CALLBACKS(base) == 0
2071
0
          && n != 0)
2072
0
        done = 1;
2073
0
    } else if (flags & EVLOOP_NONBLOCK)
2074
0
      done = 1;
2075
0
  }
2076
0
  event_debug(("%s: asked to terminate loop.", __func__));
2077
2078
0
done:
2079
0
  clear_time_cache(base);
2080
0
  base->running_loop = 0;
2081
2082
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2083
2084
0
  return (retval);
2085
0
}
2086
2087
/* One-time callback to implement event_base_once: invokes the user callback,
2088
 * then deletes the allocated storage */
2089
static void
2090
event_once_cb(evutil_socket_t fd, short events, void *arg)
2091
0
{
2092
0
  struct event_once *eonce = arg;
2093
2094
0
  (*eonce->cb)(fd, events, eonce->arg);
2095
0
  EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2096
0
  LIST_REMOVE(eonce, next_once);
2097
0
  EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2098
0
  event_debug_unassign(&eonce->ev);
2099
0
  mm_free(eonce);
2100
0
}
2101
2102
/* not threadsafe, event scheduled once. */
2103
int
2104
event_once(evutil_socket_t fd, short events,
2105
    void (*callback)(evutil_socket_t, short, void *),
2106
    void *arg, const struct timeval *tv)
2107
0
{
2108
0
  return event_base_once(current_base, fd, events, callback, arg, tv);
2109
0
}
2110
2111
/* Schedules an event once */
2112
int
2113
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2114
    void (*callback)(evutil_socket_t, short, void *),
2115
    void *arg, const struct timeval *tv)
2116
0
{
2117
0
  struct event_once *eonce;
2118
0
  int res = 0;
2119
0
  int activate = 0;
2120
2121
0
  if (!base)
2122
0
    return (-1);
2123
2124
  /* We cannot support signals that just fire once, or persistent
2125
   * events. */
2126
0
  if (events & (EV_SIGNAL|EV_PERSIST))
2127
0
    return (-1);
2128
2129
0
  if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2130
0
    return (-1);
2131
2132
0
  eonce->cb = callback;
2133
0
  eonce->arg = arg;
2134
2135
0
  if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2136
0
    evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2137
2138
0
    if (tv == NULL || ! evutil_timerisset(tv)) {
2139
      /* If the event is going to become active immediately,
2140
       * don't put it on the timeout queue.  This is one
2141
       * idiom for scheduling a callback, so let's make
2142
       * it fast (and order-preserving). */
2143
0
      activate = 1;
2144
0
    }
2145
0
  } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2146
0
    events &= EV_READ|EV_WRITE|EV_CLOSED;
2147
2148
0
    event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2149
0
  } else {
2150
    /* Bad event combination */
2151
0
    mm_free(eonce);
2152
0
    return (-1);
2153
0
  }
2154
2155
0
  if (res == 0) {
2156
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2157
0
    if (activate)
2158
0
      event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2159
0
    else
2160
0
      res = event_add_nolock_(&eonce->ev, tv, 0);
2161
2162
0
    if (res != 0) {
2163
0
      mm_free(eonce);
2164
0
      return (res);
2165
0
    } else {
2166
0
      LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2167
0
    }
2168
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
2169
0
  }
2170
2171
0
  return (0);
2172
0
}
2173
2174
int
2175
/* workaround for -Werror=maybe-uninitialized bug in gcc 11/12 */
2176
#if defined(__GNUC__) && (__GNUC__ == 11 || __GNUC__ == 12)
2177
__attribute__((noinline))
2178
#endif
2179
event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2180
0
{
2181
0
  if (!base)
2182
0
    base = current_base;
2183
0
  if (arg == &event_self_cbarg_ptr_)
2184
0
    arg = ev;
2185
2186
0
  if (!(events & EV_SIGNAL))
2187
0
    event_debug_assert_socket_nonblocking_(fd);
2188
0
  event_debug_assert_not_added_(ev);
2189
2190
0
  ev->ev_base = base;
2191
2192
0
  ev->ev_callback = callback;
2193
0
  ev->ev_arg = arg;
2194
0
  ev->ev_fd = fd;
2195
0
  ev->ev_events = events;
2196
0
  ev->ev_res = 0;
2197
0
  ev->ev_flags = EVLIST_INIT;
2198
0
  ev->ev_ncalls = 0;
2199
0
  ev->ev_pncalls = NULL;
2200
2201
0
  if (events & EV_SIGNAL) {
2202
0
    if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2203
0
      event_warnx("%s: EV_SIGNAL is not compatible with "
2204
0
          "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2205
0
      return -1;
2206
0
    }
2207
0
    ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2208
0
  } else {
2209
0
    if (events & EV_PERSIST) {
2210
0
      evutil_timerclear(&ev->ev_io_timeout);
2211
0
      ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2212
0
    } else {
2213
0
      ev->ev_closure = EV_CLOSURE_EVENT;
2214
0
    }
2215
0
  }
2216
2217
0
  min_heap_elem_init_(ev);
2218
2219
0
  if (base != NULL) {
2220
    /* by default, we put new events into the middle priority */
2221
0
    ev->ev_pri = base->nactivequeues / 2;
2222
0
  }
2223
2224
0
  event_debug_note_setup_(ev);
2225
2226
0
  return 0;
2227
0
}
2228
2229
int
2230
event_base_set(struct event_base *base, struct event *ev)
2231
0
{
2232
  /* Only innocent events may be assigned to a different base */
2233
0
  if (ev->ev_flags != EVLIST_INIT)
2234
0
    return (-1);
2235
2236
0
  event_debug_assert_is_setup_(ev);
2237
2238
0
  ev->ev_base = base;
2239
0
  ev->ev_pri = base->nactivequeues/2;
2240
2241
0
  return (0);
2242
0
}
2243
2244
void
2245
event_set(struct event *ev, evutil_socket_t fd, short events,
2246
    void (*callback)(evutil_socket_t, short, void *), void *arg)
2247
0
{
2248
0
  int r;
2249
0
  r = event_assign(ev, current_base, fd, events, callback, arg);
2250
0
  EVUTIL_ASSERT(r == 0);
2251
0
}
2252
2253
void *
2254
event_self_cbarg(void)
2255
0
{
2256
0
  return &event_self_cbarg_ptr_;
2257
0
}
2258
2259
struct event *
2260
event_base_get_running_event(struct event_base *base)
2261
0
{
2262
0
  struct event *ev = NULL;
2263
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2264
0
  if (EVBASE_IN_THREAD(base)) {
2265
0
    struct event_callback *evcb = base->current_event;
2266
0
    if (evcb->evcb_flags & EVLIST_INIT)
2267
0
      ev = event_callback_to_event(evcb);
2268
0
  }
2269
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2270
0
  return ev;
2271
0
}
2272
2273
struct event *
2274
event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2275
0
{
2276
0
  struct event *ev;
2277
0
  ev = mm_malloc(sizeof(struct event));
2278
0
  if (ev == NULL)
2279
0
    return (NULL);
2280
0
  if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2281
0
    mm_free(ev);
2282
0
    return (NULL);
2283
0
  }
2284
2285
0
  return (ev);
2286
0
}
2287
2288
void
2289
event_free(struct event *ev)
2290
0
{
2291
  /* This is disabled, so that events which have been finalized be a
2292
   * valid target for event_free(). That's */
2293
  // event_debug_assert_is_setup_(ev);
2294
2295
  /* make sure that this event won't be coming back to haunt us. */
2296
0
  event_del(ev);
2297
0
  event_debug_note_teardown_(ev);
2298
0
  mm_free(ev);
2299
2300
0
}
2301
2302
void
2303
event_debug_unassign(struct event *ev)
2304
0
{
2305
0
  event_debug_assert_not_added_(ev);
2306
0
  event_debug_note_teardown_(ev);
2307
2308
0
  ev->ev_flags &= ~EVLIST_INIT;
2309
0
}
2310
2311
0
#define EVENT_FINALIZE_FREE_ 0x10000
2312
static int
2313
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2314
0
{
2315
0
  ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2316
0
      EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2317
2318
0
  event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2319
0
  ev->ev_closure = closure;
2320
0
  ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2321
0
  event_active_nolock_(ev, EV_FINALIZE, 1);
2322
0
  ev->ev_flags |= EVLIST_FINALIZING;
2323
0
  return 0;
2324
0
}
2325
2326
static int
2327
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2328
0
{
2329
0
  int r;
2330
0
  struct event_base *base = ev->ev_base;
2331
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2332
0
    event_warnx("%s: event has no event_base set.", __func__);
2333
0
    return -1;
2334
0
  }
2335
2336
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2337
0
  r = event_finalize_nolock_(base, flags, ev, cb);
2338
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2339
0
  return r;
2340
0
}
2341
2342
int
2343
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2344
0
{
2345
0
  return event_finalize_impl_(flags, ev, cb);
2346
0
}
2347
2348
int
2349
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2350
0
{
2351
0
  return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2352
0
}
2353
2354
void
2355
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2356
0
{
2357
0
  struct event *ev = NULL;
2358
0
  if (evcb->evcb_flags & EVLIST_INIT) {
2359
0
    ev = event_callback_to_event(evcb);
2360
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2361
0
  } else {
2362
0
    event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2363
0
  }
2364
2365
0
  evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2366
0
  evcb->evcb_cb_union.evcb_cbfinalize = cb;
2367
0
  event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2368
0
  evcb->evcb_flags |= EVLIST_FINALIZING;
2369
0
}
2370
2371
void
2372
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2373
0
{
2374
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2375
0
  event_callback_finalize_nolock_(base, flags, evcb, cb);
2376
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2377
0
}
2378
2379
/** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2380
 * callback will be invoked on *one of them*, after they have *all* been
2381
 * finalized. */
2382
int
2383
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2384
0
{
2385
0
  int n_pending = 0, i;
2386
2387
0
  if (base == NULL)
2388
0
    base = current_base;
2389
2390
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2391
2392
0
  event_debug(("%s: %d events finalizing", __func__, n_cbs));
2393
2394
  /* At most one can be currently executing; the rest we just
2395
   * cancel... But we always make sure that the finalize callback
2396
   * runs. */
2397
0
  for (i = 0; i < n_cbs; ++i) {
2398
0
    struct event_callback *evcb = evcbs[i];
2399
0
    if (evcb == base->current_event) {
2400
0
      event_callback_finalize_nolock_(base, 0, evcb, cb);
2401
0
      ++n_pending;
2402
0
    } else {
2403
0
      event_callback_cancel_nolock_(base, evcb, 0);
2404
0
    }
2405
0
  }
2406
2407
0
  if (n_pending == 0) {
2408
    /* Just do the first one. */
2409
0
    event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2410
0
  }
2411
2412
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2413
0
  return 0;
2414
0
}
2415
2416
/*
2417
 * Set's the priority of an event - if an event is already scheduled
2418
 * changing the priority is going to fail.
2419
 */
2420
2421
int
2422
event_priority_set(struct event *ev, int pri)
2423
0
{
2424
0
  event_debug_assert_is_setup_(ev);
2425
2426
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2427
0
    return (-1);
2428
0
  if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2429
0
    return (-1);
2430
2431
0
  ev->ev_pri = pri;
2432
2433
0
  return (0);
2434
0
}
2435
2436
/*
2437
 * Checks if a specific event is pending or scheduled.
2438
 */
2439
2440
int
2441
event_pending(const struct event *ev, short event, struct timeval *tv)
2442
0
{
2443
0
  int flags = 0;
2444
2445
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2446
0
    event_warnx("%s: event has no event_base set.", __func__);
2447
0
    return 0;
2448
0
  }
2449
2450
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2451
0
  event_debug_assert_is_setup_(ev);
2452
2453
0
  if (ev->ev_flags & EVLIST_INSERTED)
2454
0
    flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2455
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2456
0
    flags |= ev->ev_res;
2457
0
  if (ev->ev_flags & EVLIST_TIMEOUT)
2458
0
    flags |= EV_TIMEOUT;
2459
2460
0
  event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2461
2462
  /* See if there is a timeout that we should report */
2463
0
  if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2464
0
    struct timeval tmp = ev->ev_timeout;
2465
0
    tmp.tv_usec &= MICROSECONDS_MASK;
2466
    /* correctly remamp to real time */
2467
0
    evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2468
0
  }
2469
2470
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2471
2472
0
  return (flags & event);
2473
0
}
2474
2475
int
2476
event_initialized(const struct event *ev)
2477
0
{
2478
0
  if (!(ev->ev_flags & EVLIST_INIT))
2479
0
    return 0;
2480
2481
0
  return 1;
2482
0
}
2483
2484
void
2485
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2486
0
{
2487
0
  event_debug_assert_is_setup_(event);
2488
2489
0
  if (base_out)
2490
0
    *base_out = event->ev_base;
2491
0
  if (fd_out)
2492
0
    *fd_out = event->ev_fd;
2493
0
  if (events_out)
2494
0
    *events_out = event->ev_events;
2495
0
  if (callback_out)
2496
0
    *callback_out = event->ev_callback;
2497
0
  if (arg_out)
2498
0
    *arg_out = event->ev_arg;
2499
0
}
2500
2501
size_t
2502
event_get_struct_event_size(void)
2503
0
{
2504
0
  return sizeof(struct event);
2505
0
}
2506
2507
evutil_socket_t
2508
event_get_fd(const struct event *ev)
2509
0
{
2510
0
  event_debug_assert_is_setup_(ev);
2511
0
  return ev->ev_fd;
2512
0
}
2513
2514
struct event_base *
2515
event_get_base(const struct event *ev)
2516
0
{
2517
0
  event_debug_assert_is_setup_(ev);
2518
0
  return ev->ev_base;
2519
0
}
2520
2521
short
2522
event_get_events(const struct event *ev)
2523
0
{
2524
0
  event_debug_assert_is_setup_(ev);
2525
0
  return ev->ev_events;
2526
0
}
2527
2528
event_callback_fn
2529
event_get_callback(const struct event *ev)
2530
0
{
2531
0
  event_debug_assert_is_setup_(ev);
2532
0
  return ev->ev_callback;
2533
0
}
2534
2535
void *
2536
event_get_callback_arg(const struct event *ev)
2537
0
{
2538
0
  event_debug_assert_is_setup_(ev);
2539
0
  return ev->ev_arg;
2540
0
}
2541
2542
int
2543
event_get_priority(const struct event *ev)
2544
0
{
2545
0
  event_debug_assert_is_setup_(ev);
2546
0
  return ev->ev_pri;
2547
0
}
2548
2549
int
2550
event_add(struct event *ev, const struct timeval *tv)
2551
0
{
2552
0
  int res;
2553
2554
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2555
0
    event_warnx("%s: event has no event_base set.", __func__);
2556
0
    return -1;
2557
0
  }
2558
2559
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2560
2561
0
  res = event_add_nolock_(ev, tv, 0);
2562
2563
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2564
2565
0
  return (res);
2566
0
}
2567
2568
/* Helper callback: wake an event_base from another thread.  This version
2569
 * works by writing a byte to one end of a socketpair, so that the event_base
2570
 * listening on the other end will wake up as the corresponding event
2571
 * triggers */
2572
static int
2573
evthread_notify_base_default(struct event_base *base)
2574
0
{
2575
0
  char buf[1];
2576
0
  int r;
2577
0
  buf[0] = (char) 0;
2578
#ifdef _WIN32
2579
  r = send(base->th_notify_fd[1], buf, 1, 0);
2580
#else
2581
0
  r = write(base->th_notify_fd[1], buf, 1);
2582
0
#endif
2583
0
  return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2584
0
}
2585
2586
#ifdef EVENT__HAVE_EVENTFD
2587
/* Helper callback: wake an event_base from another thread.  This version
2588
 * assumes that you have a working eventfd() implementation. */
2589
static int
2590
evthread_notify_base_eventfd(struct event_base *base)
2591
0
{
2592
0
  ev_uint64_t msg = 1;
2593
0
  int r;
2594
0
  do {
2595
0
    r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2596
0
  } while (r < 0 && errno == EAGAIN);
2597
2598
0
  return (r < 0) ? -1 : 0;
2599
0
}
2600
#endif
2601
2602
2603
/** Tell the thread currently running the event_loop for base (if any) that it
2604
 * needs to stop waiting in its dispatch function (if it is) and process all
2605
 * active callbacks. */
2606
static int
2607
evthread_notify_base(struct event_base *base)
2608
0
{
2609
0
  EVENT_BASE_ASSERT_LOCKED(base);
2610
0
  if (!base->th_notify_fn)
2611
0
    return -1;
2612
0
  if (base->is_notify_pending)
2613
0
    return 0;
2614
0
  base->is_notify_pending = 1;
2615
0
  return base->th_notify_fn(base);
2616
0
}
2617
2618
/* Implementation function to remove a timeout on a currently pending event.
2619
 */
2620
int
2621
event_remove_timer_nolock_(struct event *ev)
2622
0
{
2623
0
  struct event_base *base = ev->ev_base;
2624
2625
0
  EVENT_BASE_ASSERT_LOCKED(base);
2626
0
  event_debug_assert_is_setup_(ev);
2627
2628
0
  event_debug(("event_remove_timer_nolock: event: %p", (void *)ev));
2629
2630
  /* If it's not pending on a timeout, we don't need to do anything. */
2631
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2632
0
    event_queue_remove_timeout(base, ev);
2633
0
    evutil_timerclear(&ev->ev_io_timeout);
2634
0
  }
2635
2636
0
  return (0);
2637
0
}
2638
2639
int
2640
event_remove_timer(struct event *ev)
2641
0
{
2642
0
  int res;
2643
2644
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2645
0
    event_warnx("%s: event has no event_base set.", __func__);
2646
0
    return -1;
2647
0
  }
2648
2649
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2650
2651
0
  res = event_remove_timer_nolock_(ev);
2652
2653
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2654
2655
0
  return (res);
2656
0
}
2657
2658
/* Implementation function to add an event.  Works just like event_add,
2659
 * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2660
 * we treat tv as an absolute time, not as an interval to add to the current
2661
 * time */
2662
int
2663
event_add_nolock_(struct event *ev, const struct timeval *tv,
2664
    int tv_is_absolute)
2665
0
{
2666
0
  struct event_base *base = ev->ev_base;
2667
0
  int res = 0;
2668
0
  int notify = 0;
2669
2670
0
  EVENT_BASE_ASSERT_LOCKED(base);
2671
0
  event_debug_assert_is_setup_(ev);
2672
2673
0
  event_debug((
2674
0
     "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2675
0
     (void *)ev,
2676
0
     EV_SOCK_ARG(ev->ev_fd),
2677
0
     ev->ev_events & EV_READ ? "EV_READ " : " ",
2678
0
     ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2679
0
     ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2680
0
     tv ? "EV_TIMEOUT " : " ",
2681
0
     (void *)ev->ev_callback));
2682
2683
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2684
2685
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
2686
    /* XXXX debug */
2687
0
    return (-1);
2688
0
  }
2689
2690
  /*
2691
   * prepare for timeout insertion further below, if we get a
2692
   * failure on any step, we should not change any state.
2693
   */
2694
0
  if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2695
0
    if (min_heap_reserve_(&base->timeheap,
2696
0
      1 + min_heap_size_(&base->timeheap)) == -1)
2697
0
      return (-1);  /* ENOMEM == errno */
2698
0
  }
2699
2700
  /* If the main thread is currently executing a signal event's
2701
   * callback, and we are not the main thread, then we want to wait
2702
   * until the callback is done before we mess with the event, or else
2703
   * we can race on ev_ncalls and ev_pncalls below. */
2704
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2705
0
  if (base->current_event == event_to_event_callback(ev) &&
2706
0
      (ev->ev_events & EV_SIGNAL)
2707
0
      && !EVBASE_IN_THREAD(base)) {
2708
0
    ++base->current_event_waiters;
2709
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2710
0
  }
2711
0
#endif
2712
2713
0
  if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2714
0
      !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2715
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2716
0
      res = evmap_io_add_(base, ev->ev_fd, ev);
2717
0
    else if (ev->ev_events & EV_SIGNAL)
2718
0
      res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2719
0
    if (res != -1)
2720
0
      event_queue_insert_inserted(base, ev);
2721
0
    if (res == 1) {
2722
      /* evmap says we need to notify the main thread. */
2723
0
      notify = 1;
2724
0
      res = 0;
2725
0
    }
2726
0
  }
2727
2728
  /*
2729
   * we should change the timeout state only if the previous event
2730
   * addition succeeded.
2731
   */
2732
0
  if (res != -1 && tv != NULL) {
2733
0
    struct timeval now;
2734
0
    int common_timeout;
2735
#ifdef USE_REINSERT_TIMEOUT
2736
    int was_common;
2737
    int old_timeout_idx;
2738
#endif
2739
2740
    /*
2741
     * for persistent timeout events, we remember the
2742
     * timeout value and re-add the event.
2743
     *
2744
     * If tv_is_absolute, this was already set.
2745
     */
2746
0
    if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2747
0
      ev->ev_io_timeout = *tv;
2748
2749
0
#ifndef USE_REINSERT_TIMEOUT
2750
0
    if (ev->ev_flags & EVLIST_TIMEOUT) {
2751
0
      event_queue_remove_timeout(base, ev);
2752
0
    }
2753
0
#endif
2754
2755
    /* Check if it is active due to a timeout.  Rescheduling
2756
     * this timeout before the callback can be executed
2757
     * removes it from the active list. */
2758
0
    if ((ev->ev_flags & EVLIST_ACTIVE) &&
2759
0
        (ev->ev_res & EV_TIMEOUT)) {
2760
0
      if (ev->ev_events & EV_SIGNAL) {
2761
        /* See if we are just active executing
2762
         * this event in a loop
2763
         */
2764
0
        if (ev->ev_ncalls && ev->ev_pncalls) {
2765
          /* Abort loop */
2766
0
          *ev->ev_pncalls = 0;
2767
0
        }
2768
0
      }
2769
2770
0
      event_queue_remove_active(base, event_to_event_callback(ev));
2771
0
    }
2772
2773
0
    gettime(base, &now);
2774
2775
0
    common_timeout = is_common_timeout(tv, base);
2776
#ifdef USE_REINSERT_TIMEOUT
2777
    was_common = is_common_timeout(&ev->ev_timeout, base);
2778
    old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2779
#endif
2780
2781
0
    if (tv_is_absolute) {
2782
0
      ev->ev_timeout = *tv;
2783
0
    } else if (common_timeout) {
2784
0
      struct timeval tmp = *tv;
2785
0
      tmp.tv_usec &= MICROSECONDS_MASK;
2786
0
      evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2787
0
      ev->ev_timeout.tv_usec |=
2788
0
          (tv->tv_usec & ~MICROSECONDS_MASK);
2789
0
    } else {
2790
0
      evutil_timeradd(&now, tv, &ev->ev_timeout);
2791
0
    }
2792
2793
0
    event_debug((
2794
0
       "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2795
0
       (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec, (void *)ev->ev_callback));
2796
2797
#ifdef USE_REINSERT_TIMEOUT
2798
    event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2799
#else
2800
0
    event_queue_insert_timeout(base, ev);
2801
0
#endif
2802
2803
0
    if (common_timeout) {
2804
0
      struct common_timeout_list *ctl =
2805
0
          get_common_timeout_list(base, &ev->ev_timeout);
2806
0
      if (ev == TAILQ_FIRST(&ctl->events)) {
2807
0
        common_timeout_schedule(ctl, &now, ev);
2808
0
      }
2809
0
    } else {
2810
0
      struct event* top = NULL;
2811
      /* See if the earliest timeout is now earlier than it
2812
       * was before: if so, we will need to tell the main
2813
       * thread to wake up earlier than it would otherwise.
2814
       * We double check the timeout of the top element to
2815
       * handle time distortions due to system suspension.
2816
       */
2817
0
      if (min_heap_elt_is_top_(ev))
2818
0
        notify = 1;
2819
0
      else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2820
0
           evutil_timercmp(&top->ev_timeout, &now, <))
2821
0
        notify = 1;
2822
0
    }
2823
0
  }
2824
2825
  /* if we are not in the right thread, we need to wake up the loop */
2826
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2827
0
    evthread_notify_base(base);
2828
2829
0
  event_debug_note_add_(ev);
2830
2831
0
  return (res);
2832
0
}
2833
2834
static int
2835
event_del_(struct event *ev, int blocking)
2836
0
{
2837
0
  int res;
2838
0
  struct event_base *base = ev->ev_base;
2839
2840
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2841
0
    event_warnx("%s: event has no event_base set.", __func__);
2842
0
    return -1;
2843
0
  }
2844
2845
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2846
0
  res = event_del_nolock_(ev, blocking);
2847
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2848
2849
0
  return (res);
2850
0
}
2851
2852
int
2853
event_del(struct event *ev)
2854
0
{
2855
0
  return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2856
0
}
2857
2858
int
2859
event_del_block(struct event *ev)
2860
0
{
2861
0
  return event_del_(ev, EVENT_DEL_BLOCK);
2862
0
}
2863
2864
int
2865
event_del_noblock(struct event *ev)
2866
0
{
2867
0
  return event_del_(ev, EVENT_DEL_NOBLOCK);
2868
0
}
2869
2870
/** Helper for event_del: always called with th_base_lock held.
2871
 *
2872
 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2873
 * EVEN_IF_FINALIZING} values. See those for more information.
2874
 */
2875
int
2876
event_del_nolock_(struct event *ev, int blocking)
2877
0
{
2878
0
  struct event_base *base;
2879
0
  int res = 0, notify = 0;
2880
2881
0
  event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2882
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (void *)ev->ev_callback));
2883
2884
  /* An event without a base has not been added */
2885
0
  if (ev->ev_base == NULL)
2886
0
    return (-1);
2887
2888
0
  EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2889
2890
0
  if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2891
0
    if (ev->ev_flags & EVLIST_FINALIZING) {
2892
      /* XXXX Debug */
2893
0
      return 0;
2894
0
    }
2895
0
  }
2896
2897
0
  base = ev->ev_base;
2898
2899
0
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2900
2901
  /* See if we are just active executing this event in a loop */
2902
0
  if (ev->ev_events & EV_SIGNAL) {
2903
0
    if (ev->ev_ncalls && ev->ev_pncalls) {
2904
      /* Abort loop */
2905
0
      *ev->ev_pncalls = 0;
2906
0
    }
2907
0
  }
2908
2909
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
2910
    /* NOTE: We never need to notify the main thread because of a
2911
     * deleted timeout event: all that could happen if we don't is
2912
     * that the dispatch loop might wake up too early.  But the
2913
     * point of notifying the main thread _is_ to wake up the
2914
     * dispatch loop early anyway, so we wouldn't gain anything by
2915
     * doing it.
2916
     */
2917
0
    event_queue_remove_timeout(base, ev);
2918
0
  }
2919
2920
0
  if (ev->ev_flags & EVLIST_ACTIVE)
2921
0
    event_queue_remove_active(base, event_to_event_callback(ev));
2922
0
  else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2923
0
    event_queue_remove_active_later(base, event_to_event_callback(ev));
2924
2925
0
  if (ev->ev_flags & EVLIST_INSERTED) {
2926
0
    event_queue_remove_inserted(base, ev);
2927
0
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2928
0
      res = evmap_io_del_(base, ev->ev_fd, ev);
2929
0
    else
2930
0
      res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2931
0
    if (res == 1) {
2932
      /* evmap says we need to notify the main thread. */
2933
0
      notify = 1;
2934
0
      res = 0;
2935
0
    }
2936
    /* If we do not have events, let's notify event base so it can
2937
     * exit without waiting */
2938
0
    if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2939
0
      notify = 1;
2940
0
  }
2941
2942
  /* if we are not in the right thread, we need to wake up the loop */
2943
0
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2944
0
    evthread_notify_base(base);
2945
2946
0
  event_debug_note_del_(ev);
2947
2948
  /* If the main thread is currently executing this event's callback,
2949
   * and we are not the main thread, then we want to wait until the
2950
   * callback is done before returning. That way, when this function
2951
   * returns, it will be safe to free the user-supplied argument.
2952
   */
2953
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2954
0
  if (blocking != EVENT_DEL_NOBLOCK &&
2955
0
      base->current_event == event_to_event_callback(ev) &&
2956
0
      !EVBASE_IN_THREAD(base) &&
2957
0
      (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2958
0
    ++base->current_event_waiters;
2959
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2960
0
  }
2961
0
#endif
2962
2963
0
  return (res);
2964
0
}
2965
2966
void
2967
event_active(struct event *ev, int res, short ncalls)
2968
0
{
2969
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2970
0
    event_warnx("%s: event has no event_base set.", __func__);
2971
0
    return;
2972
0
  }
2973
2974
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2975
2976
0
  event_debug_assert_is_setup_(ev);
2977
2978
0
  event_active_nolock_(ev, res, ncalls);
2979
2980
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2981
0
}
2982
2983
2984
void
2985
event_active_nolock_(struct event *ev, int res, short ncalls)
2986
0
{
2987
0
  struct event_base *base;
2988
2989
0
  event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2990
0
    (void *)ev, EV_SOCK_ARG(ev->ev_fd), (int)res, (void *)ev->ev_callback));
2991
2992
0
  base = ev->ev_base;
2993
0
  EVENT_BASE_ASSERT_LOCKED(base);
2994
2995
0
  if (ev->ev_flags & EVLIST_FINALIZING) {
2996
    /* XXXX debug */
2997
0
    return;
2998
0
  }
2999
3000
0
  switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3001
0
  default:
3002
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3003
0
    EVUTIL_ASSERT(0);
3004
0
    break;
3005
0
  case EVLIST_ACTIVE:
3006
    /* We get different kinds of events, add them together */
3007
0
    ev->ev_res |= res;
3008
0
    return;
3009
0
  case EVLIST_ACTIVE_LATER:
3010
0
    ev->ev_res |= res;
3011
0
    break;
3012
0
  case 0:
3013
0
    ev->ev_res = res;
3014
0
    break;
3015
0
  }
3016
3017
0
  if (ev->ev_pri < base->event_running_priority)
3018
0
    base->event_continue = 1;
3019
3020
0
  if (ev->ev_events & EV_SIGNAL) {
3021
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3022
0
    if (base->current_event == event_to_event_callback(ev) &&
3023
0
        !EVBASE_IN_THREAD(base)) {
3024
0
      ++base->current_event_waiters;
3025
0
      EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
3026
0
    }
3027
0
#endif
3028
0
    ev->ev_ncalls = ncalls;
3029
0
    ev->ev_pncalls = NULL;
3030
0
  }
3031
3032
0
  event_callback_activate_nolock_(base, event_to_event_callback(ev));
3033
0
}
3034
3035
void
3036
event_active_later_(struct event *ev, int res)
3037
0
{
3038
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3039
0
  event_active_later_nolock_(ev, res);
3040
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3041
0
}
3042
3043
void
3044
event_active_later_nolock_(struct event *ev, int res)
3045
0
{
3046
0
  struct event_base *base = ev->ev_base;
3047
0
  EVENT_BASE_ASSERT_LOCKED(base);
3048
3049
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3050
    /* We get different kinds of events, add them together */
3051
0
    ev->ev_res |= res;
3052
0
    return;
3053
0
  }
3054
3055
0
  ev->ev_res = res;
3056
3057
0
  event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
3058
0
}
3059
3060
int
3061
event_callback_activate_(struct event_base *base,
3062
    struct event_callback *evcb)
3063
0
{
3064
0
  int r;
3065
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3066
0
  r = event_callback_activate_nolock_(base, evcb);
3067
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3068
0
  return r;
3069
0
}
3070
3071
int
3072
event_callback_activate_nolock_(struct event_base *base,
3073
    struct event_callback *evcb)
3074
0
{
3075
0
  int r = 1;
3076
3077
0
  if (evcb->evcb_flags & EVLIST_FINALIZING)
3078
0
    return 0;
3079
3080
0
  switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3081
0
  default:
3082
0
    EVUTIL_ASSERT(0);
3083
0
    EVUTIL_FALLTHROUGH;
3084
0
  case EVLIST_ACTIVE_LATER:
3085
0
    event_queue_remove_active_later(base, evcb);
3086
0
    r = 0;
3087
0
    break;
3088
0
  case EVLIST_ACTIVE:
3089
0
    return 0;
3090
0
  case 0:
3091
0
    break;
3092
0
  }
3093
3094
0
  event_queue_insert_active(base, evcb);
3095
3096
0
  if (EVBASE_NEED_NOTIFY(base))
3097
0
    evthread_notify_base(base);
3098
3099
0
  return r;
3100
0
}
3101
3102
int
3103
event_callback_activate_later_nolock_(struct event_base *base,
3104
    struct event_callback *evcb)
3105
0
{
3106
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3107
0
    return 0;
3108
3109
0
  event_queue_insert_active_later(base, evcb);
3110
0
  if (EVBASE_NEED_NOTIFY(base))
3111
0
    evthread_notify_base(base);
3112
0
  return 1;
3113
0
}
3114
3115
void
3116
event_callback_init_(struct event_base *base,
3117
    struct event_callback *cb)
3118
0
{
3119
0
  memset(cb, 0, sizeof(*cb));
3120
0
  cb->evcb_pri = base->nactivequeues - 1;
3121
0
}
3122
3123
int
3124
event_callback_cancel_(struct event_base *base,
3125
    struct event_callback *evcb)
3126
0
{
3127
0
  int r;
3128
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3129
0
  r = event_callback_cancel_nolock_(base, evcb, 0);
3130
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3131
0
  return r;
3132
0
}
3133
3134
int
3135
event_callback_cancel_nolock_(struct event_base *base,
3136
    struct event_callback *evcb, int even_if_finalizing)
3137
0
{
3138
0
  if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3139
0
    return 0;
3140
3141
0
  if (evcb->evcb_flags & EVLIST_INIT)
3142
0
    return event_del_nolock_(event_callback_to_event(evcb),
3143
0
        even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3144
3145
0
  switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3146
0
  default:
3147
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3148
0
    EVUTIL_ASSERT(0);
3149
0
    break;
3150
0
  case EVLIST_ACTIVE:
3151
    /* We get different kinds of events, add them together */
3152
0
    event_queue_remove_active(base, evcb);
3153
0
    return 0;
3154
0
  case EVLIST_ACTIVE_LATER:
3155
0
    event_queue_remove_active_later(base, evcb);
3156
0
    break;
3157
0
  case 0:
3158
0
    break;
3159
0
  }
3160
3161
0
  return 0;
3162
0
}
3163
3164
void
3165
event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3166
0
{
3167
0
  memset(cb, 0, sizeof(*cb));
3168
0
  cb->evcb_cb_union.evcb_selfcb = fn;
3169
0
  cb->evcb_arg = arg;
3170
0
  cb->evcb_pri = priority;
3171
0
  cb->evcb_closure = EV_CLOSURE_CB_SELF;
3172
0
}
3173
3174
void
3175
event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3176
0
{
3177
0
  cb->evcb_pri = priority;
3178
0
}
3179
3180
void
3181
event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3182
0
{
3183
0
  if (!base)
3184
0
    base = current_base;
3185
0
  event_callback_cancel_(base, cb);
3186
0
}
3187
3188
0
#define MAX_DEFERREDS_QUEUED 32
3189
int
3190
event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3191
0
{
3192
0
  int r = 1;
3193
0
  if (!base)
3194
0
    base = current_base;
3195
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3196
0
  if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3197
0
    r = event_callback_activate_later_nolock_(base, cb);
3198
0
  } else {
3199
0
    r = event_callback_activate_nolock_(base, cb);
3200
0
    if (r) {
3201
0
      ++base->n_deferreds_queued;
3202
0
    }
3203
0
  }
3204
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3205
0
  return r;
3206
0
}
3207
3208
static int
3209
timeout_next(struct event_base *base, struct timeval **tv_p)
3210
0
{
3211
  /* Caller must hold th_base_lock */
3212
0
  struct timeval now;
3213
0
  struct event *ev;
3214
0
  struct timeval *tv = *tv_p;
3215
0
  int res = 0;
3216
3217
0
  ev = min_heap_top_(&base->timeheap);
3218
3219
0
  if (ev == NULL) {
3220
    /* if no time-based events are active wait for I/O */
3221
0
    *tv_p = NULL;
3222
0
    goto out;
3223
0
  }
3224
3225
0
  if (gettime(base, &now) == -1) {
3226
0
    res = -1;
3227
0
    goto out;
3228
0
  }
3229
3230
0
  if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3231
0
    evutil_timerclear(tv);
3232
0
    goto out;
3233
0
  }
3234
3235
0
  evutil_timersub(&ev->ev_timeout, &now, tv);
3236
3237
0
  EVUTIL_ASSERT(tv->tv_sec >= 0);
3238
0
  EVUTIL_ASSERT(tv->tv_usec >= 0);
3239
0
  event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", (void *)ev, (int)tv->tv_sec, (int)tv->tv_usec));
3240
3241
0
out:
3242
0
  return (res);
3243
0
}
3244
3245
/* Activate every event whose timeout has elapsed. */
3246
static void
3247
timeout_process(struct event_base *base)
3248
0
{
3249
  /* Caller must hold lock. */
3250
0
  struct timeval now;
3251
0
  struct event *ev;
3252
3253
0
  if (min_heap_empty_(&base->timeheap)) {
3254
0
    return;
3255
0
  }
3256
3257
0
  gettime(base, &now);
3258
3259
0
  while ((ev = min_heap_top_(&base->timeheap))) {
3260
0
    if (evutil_timercmp(&ev->ev_timeout, &now, >))
3261
0
      break;
3262
3263
    /* delete this event from the I/O queues */
3264
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3265
3266
0
    event_debug(("timeout_process: event: %p, call %p",
3267
0
       (void *)ev, (void *)ev->ev_callback));
3268
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
3269
0
  }
3270
0
}
3271
3272
#ifndef MAX
3273
0
#define MAX(a,b) (((a)>(b))?(a):(b))
3274
#endif
3275
3276
0
#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3277
3278
/* These are a fancy way to spell
3279
     if (~flags & EVLIST_INTERNAL)
3280
         base->event_count--/++;
3281
*/
3282
#define DECR_EVENT_COUNT(base,flags) \
3283
0
  ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3284
0
#define INCR_EVENT_COUNT(base,flags) do {         \
3285
0
  ((base)->event_count += !((flags) & EVLIST_INTERNAL));     \
3286
0
  MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);   \
3287
0
} while (0)
3288
3289
static void
3290
event_queue_remove_inserted(struct event_base *base, struct event *ev)
3291
0
{
3292
0
  EVENT_BASE_ASSERT_LOCKED(base);
3293
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3294
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3295
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3296
0
    return;
3297
0
  }
3298
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3299
0
  ev->ev_flags &= ~EVLIST_INSERTED;
3300
0
}
3301
static void
3302
event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3303
0
{
3304
0
  EVENT_BASE_ASSERT_LOCKED(base);
3305
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3306
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3307
0
                          (void *)evcb, EVLIST_ACTIVE);
3308
0
    return;
3309
0
  }
3310
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3311
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE;
3312
0
  base->event_count_active--;
3313
3314
0
  TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3315
0
      evcb, evcb_active_next);
3316
0
}
3317
static void
3318
event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3319
0
{
3320
0
  EVENT_BASE_ASSERT_LOCKED(base);
3321
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3322
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3323
0
                          (void *)evcb, EVLIST_ACTIVE_LATER);
3324
0
    return;
3325
0
  }
3326
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3327
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3328
0
  base->event_count_active--;
3329
3330
0
  TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3331
0
}
3332
static void
3333
event_queue_remove_timeout(struct event_base *base, struct event *ev)
3334
0
{
3335
0
  EVENT_BASE_ASSERT_LOCKED(base);
3336
0
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3337
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3338
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3339
0
    return;
3340
0
  }
3341
0
  DECR_EVENT_COUNT(base, ev->ev_flags);
3342
0
  ev->ev_flags &= ~EVLIST_TIMEOUT;
3343
3344
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3345
0
    struct common_timeout_list *ctl =
3346
0
        get_common_timeout_list(base, &ev->ev_timeout);
3347
0
    TAILQ_REMOVE(&ctl->events, ev,
3348
0
        ev_timeout_pos.ev_next_with_common_timeout);
3349
0
  } else {
3350
0
    min_heap_erase_(&base->timeheap, ev);
3351
0
  }
3352
0
}
3353
3354
#ifdef USE_REINSERT_TIMEOUT
3355
/* Remove and reinsert 'ev' into the timeout queue. */
3356
static void
3357
event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3358
    int was_common, int is_common, int old_timeout_idx)
3359
{
3360
  struct common_timeout_list *ctl;
3361
  if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3362
    event_queue_insert_timeout(base, ev);
3363
    return;
3364
  }
3365
3366
  switch ((was_common<<1) | is_common) {
3367
  case 3: /* Changing from one common timeout to another */
3368
    ctl = base->common_timeout_queues[old_timeout_idx];
3369
    TAILQ_REMOVE(&ctl->events, ev,
3370
        ev_timeout_pos.ev_next_with_common_timeout);
3371
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3372
    insert_common_timeout_inorder(ctl, ev);
3373
    break;
3374
  case 2: /* Was common; is no longer common */
3375
    ctl = base->common_timeout_queues[old_timeout_idx];
3376
    TAILQ_REMOVE(&ctl->events, ev,
3377
        ev_timeout_pos.ev_next_with_common_timeout);
3378
    min_heap_push_(&base->timeheap, ev);
3379
    break;
3380
  case 1: /* Wasn't common; has become common. */
3381
    min_heap_erase_(&base->timeheap, ev);
3382
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3383
    insert_common_timeout_inorder(ctl, ev);
3384
    break;
3385
  case 0: /* was in heap; is still on heap. */
3386
    min_heap_adjust_(&base->timeheap, ev);
3387
    break;
3388
  default:
3389
    EVUTIL_ASSERT(0); /* unreachable */
3390
    break;
3391
  }
3392
}
3393
#endif
3394
3395
/* Add 'ev' to the common timeout list in 'ev'. */
3396
static void
3397
insert_common_timeout_inorder(struct common_timeout_list *ctl,
3398
    struct event *ev)
3399
0
{
3400
0
  struct event *e;
3401
  /* By all logic, we should just be able to append 'ev' to the end of
3402
   * ctl->events, since the timeout on each 'ev' is set to {the common
3403
   * timeout} + {the time when we add the event}, and so the events
3404
   * should arrive in order of their timeouts.  But just in case
3405
   * there's some wacky threading issue going on, we do a search from
3406
   * the end of 'ev' to find the right insertion point.
3407
   */
3408
0
  TAILQ_FOREACH_REVERSE(e, &ctl->events,
3409
0
      event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3410
    /* This timercmp is a little sneaky, since both ev and e have
3411
     * magic values in tv_usec.  Fortunately, they ought to have
3412
     * the _same_ magic values in tv_usec.  Let's assert for that.
3413
     */
3414
0
    EVUTIL_ASSERT(
3415
0
      is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3416
0
    if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3417
0
      TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3418
0
          ev_timeout_pos.ev_next_with_common_timeout);
3419
0
      return;
3420
0
    }
3421
0
  }
3422
0
  TAILQ_INSERT_HEAD(&ctl->events, ev,
3423
0
      ev_timeout_pos.ev_next_with_common_timeout);
3424
0
}
3425
3426
static void
3427
event_queue_insert_inserted(struct event_base *base, struct event *ev)
3428
0
{
3429
0
  EVENT_BASE_ASSERT_LOCKED(base);
3430
3431
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3432
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3433
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3434
0
    return;
3435
0
  }
3436
3437
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3438
3439
0
  ev->ev_flags |= EVLIST_INSERTED;
3440
0
}
3441
3442
static void
3443
event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3444
0
{
3445
0
  EVENT_BASE_ASSERT_LOCKED(base);
3446
3447
0
  if (evcb->evcb_flags & EVLIST_ACTIVE) {
3448
    /* Double insertion is possible for active events */
3449
0
    return;
3450
0
  }
3451
3452
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3453
3454
0
  evcb->evcb_flags |= EVLIST_ACTIVE;
3455
3456
0
  base->event_count_active++;
3457
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3458
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3459
0
  TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3460
0
      evcb, evcb_active_next);
3461
0
}
3462
3463
static void
3464
event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3465
0
{
3466
0
  EVENT_BASE_ASSERT_LOCKED(base);
3467
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3468
    /* Double insertion is possible */
3469
0
    return;
3470
0
  }
3471
3472
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3473
0
  evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3474
0
  base->event_count_active++;
3475
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3476
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3477
0
  TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3478
0
}
3479
3480
static void
3481
event_queue_insert_timeout(struct event_base *base, struct event *ev)
3482
0
{
3483
0
  EVENT_BASE_ASSERT_LOCKED(base);
3484
3485
0
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3486
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3487
0
                   (void *)ev, EV_SOCK_ARG(ev->ev_fd));
3488
0
    return;
3489
0
  }
3490
3491
0
  INCR_EVENT_COUNT(base, ev->ev_flags);
3492
3493
0
  ev->ev_flags |= EVLIST_TIMEOUT;
3494
3495
0
  if (is_common_timeout(&ev->ev_timeout, base)) {
3496
0
    struct common_timeout_list *ctl =
3497
0
        get_common_timeout_list(base, &ev->ev_timeout);
3498
0
    insert_common_timeout_inorder(ctl, ev);
3499
0
  } else {
3500
0
    min_heap_push_(&base->timeheap, ev);
3501
0
  }
3502
0
}
3503
3504
static void
3505
event_queue_make_later_events_active(struct event_base *base)
3506
0
{
3507
0
  struct event_callback *evcb;
3508
0
  EVENT_BASE_ASSERT_LOCKED(base);
3509
3510
0
  while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3511
0
    TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3512
0
    evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3513
0
    EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3514
0
    TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3515
0
    base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3516
0
  }
3517
0
}
3518
3519
/* Functions for debugging */
3520
3521
const char *
3522
event_get_version(void)
3523
0
{
3524
0
  return (EVENT__VERSION);
3525
0
}
3526
3527
ev_uint32_t
3528
event_get_version_number(void)
3529
0
{
3530
0
  return (EVENT__NUMERIC_VERSION);
3531
0
}
3532
3533
/*
3534
 * No thread-safe interface needed - the information should be the same
3535
 * for all threads.
3536
 */
3537
3538
const char *
3539
event_get_method(void)
3540
0
{
3541
0
  return (current_base->evsel->name);
3542
0
}
3543
3544
#ifndef EVENT__DISABLE_MM_REPLACEMENT
3545
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3546
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3547
static void (*mm_free_fn_)(void *p) = NULL;
3548
3549
void *
3550
event_mm_malloc_(size_t sz)
3551
0
{
3552
0
  if (sz == 0)
3553
0
    return NULL;
3554
3555
0
  if (mm_malloc_fn_)
3556
0
    return mm_malloc_fn_(sz);
3557
0
  else
3558
0
    return malloc(sz);
3559
0
}
3560
3561
void *
3562
event_mm_calloc_(size_t count, size_t size)
3563
0
{
3564
0
  if (count == 0 || size == 0)
3565
0
    return NULL;
3566
3567
0
  if (mm_malloc_fn_) {
3568
0
    size_t sz = count * size;
3569
0
    void *p = NULL;
3570
0
    if (count > EV_SIZE_MAX / size)
3571
0
      goto error;
3572
0
    p = mm_malloc_fn_(sz);
3573
0
    if (p)
3574
0
      return memset(p, 0, sz);
3575
0
  } else {
3576
0
    void *p = calloc(count, size);
3577
#ifdef _WIN32
3578
    /* Windows calloc doesn't reliably set ENOMEM */
3579
    if (p == NULL)
3580
      goto error;
3581
#endif
3582
0
    return p;
3583
0
  }
3584
3585
0
error:
3586
0
  errno = ENOMEM;
3587
0
  return NULL;
3588
0
}
3589
3590
char *
3591
event_mm_strdup_(const char *str)
3592
0
{
3593
0
  if (!str) {
3594
0
    errno = EINVAL;
3595
0
    return NULL;
3596
0
  }
3597
3598
0
  if (mm_malloc_fn_) {
3599
0
    size_t ln = strlen(str);
3600
0
    void *p = NULL;
3601
0
    if (ln == EV_SIZE_MAX)
3602
0
      goto error;
3603
0
    p = mm_malloc_fn_(ln+1);
3604
0
    if (p)
3605
0
      return memcpy(p, str, ln+1);
3606
0
  } else
3607
#ifdef _WIN32
3608
    return _strdup(str);
3609
#else
3610
0
    return strdup(str);
3611
0
#endif
3612
3613
0
error:
3614
0
  errno = ENOMEM;
3615
0
  return NULL;
3616
0
}
3617
3618
void *
3619
event_mm_realloc_(void *ptr, size_t sz)
3620
0
{
3621
0
  if (mm_realloc_fn_)
3622
0
    return mm_realloc_fn_(ptr, sz);
3623
0
  else
3624
0
    return realloc(ptr, sz);
3625
0
}
3626
3627
void
3628
event_mm_free_(void *ptr)
3629
0
{
3630
0
  if (mm_free_fn_)
3631
0
    mm_free_fn_(ptr);
3632
0
  else
3633
0
    free(ptr);
3634
0
}
3635
3636
void
3637
event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3638
      void *(*realloc_fn)(void *ptr, size_t sz),
3639
      void (*free_fn)(void *ptr))
3640
0
{
3641
0
  mm_malloc_fn_ = malloc_fn;
3642
0
  mm_realloc_fn_ = realloc_fn;
3643
0
  mm_free_fn_ = free_fn;
3644
0
}
3645
#endif
3646
3647
#ifdef EVENT__HAVE_EVENTFD
3648
static void
3649
evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3650
0
{
3651
0
  ev_uint64_t msg;
3652
0
  ev_ssize_t r;
3653
0
  struct event_base *base = arg;
3654
3655
0
  r = read(fd, (void*) &msg, sizeof(msg));
3656
0
  if (r<0 && errno != EAGAIN) {
3657
0
    event_sock_warn(fd, "Error reading from eventfd");
3658
0
  }
3659
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3660
0
  base->is_notify_pending = 0;
3661
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3662
0
}
3663
#endif
3664
3665
static void
3666
evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3667
0
{
3668
0
  unsigned char buf[1024];
3669
0
  struct event_base *base = arg;
3670
#ifdef _WIN32
3671
  while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3672
    ;
3673
#else
3674
0
  while (read(fd, (char*)buf, sizeof(buf)) > 0)
3675
0
    ;
3676
0
#endif
3677
3678
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3679
0
  base->is_notify_pending = 0;
3680
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3681
0
}
3682
3683
int
3684
evthread_make_base_notifiable(struct event_base *base)
3685
0
{
3686
0
  int r;
3687
0
  if (!base)
3688
0
    return -1;
3689
3690
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3691
0
  r = evthread_make_base_notifiable_nolock_(base);
3692
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3693
0
  return r;
3694
0
}
3695
3696
static int
3697
evthread_make_base_notifiable_nolock_(struct event_base *base)
3698
0
{
3699
0
  void (*cb)(evutil_socket_t, short, void *);
3700
0
  int (*notify)(struct event_base *);
3701
3702
0
  if (base->th_notify_fn != NULL) {
3703
    /* The base is already notifiable: we're doing fine. */
3704
0
    return 0;
3705
0
  }
3706
3707
#if defined(EVENT__HAVE_WORKING_KQUEUE)
3708
  if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3709
    base->th_notify_fn = event_kq_notify_base_;
3710
    /* No need to add an event here; the backend can wake
3711
     * itself up just fine. */
3712
    return 0;
3713
  }
3714
#endif
3715
3716
0
#ifdef EVENT__HAVE_EVENTFD
3717
0
  base->th_notify_fd[0] = evutil_eventfd_(0,
3718
0
      EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3719
0
  if (base->th_notify_fd[0] >= 0) {
3720
0
    base->th_notify_fd[1] = -1;
3721
0
    notify = evthread_notify_base_eventfd;
3722
0
    cb = evthread_notify_drain_eventfd;
3723
0
  } else
3724
0
#endif
3725
0
  if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3726
0
    notify = evthread_notify_base_default;
3727
0
    cb = evthread_notify_drain_default;
3728
0
  } else {
3729
0
    return -1;
3730
0
  }
3731
3732
0
  base->th_notify_fn = notify;
3733
3734
  /* prepare an event that we can use for wakeup */
3735
0
  event_assign(&base->th_notify, base, base->th_notify_fd[0],
3736
0
         EV_READ|EV_PERSIST, cb, base);
3737
3738
  /* we need to mark this as internal event */
3739
0
  base->th_notify.ev_flags |= EVLIST_INTERNAL;
3740
0
  event_priority_set(&base->th_notify, 0);
3741
3742
0
  return event_add_nolock_(&base->th_notify, NULL, 0);
3743
0
}
3744
3745
int
3746
event_base_foreach_event_nolock_(struct event_base *base,
3747
    event_base_foreach_event_cb fn, void *arg)
3748
0
{
3749
0
  int r, i;
3750
0
  size_t u;
3751
0
  struct event *ev;
3752
3753
  /* Start out with all the EVLIST_INSERTED events. */
3754
0
  if ((r = evmap_foreach_event_(base, fn, arg)))
3755
0
    return r;
3756
3757
  /* Okay, now we deal with those events that have timeouts and are in
3758
   * the min-heap. */
3759
0
  for (u = 0; u < base->timeheap.n; ++u) {
3760
0
    ev = base->timeheap.p[u];
3761
0
    if (ev->ev_flags & EVLIST_INSERTED) {
3762
      /* we already processed this one */
3763
0
      continue;
3764
0
    }
3765
0
    if ((r = fn(base, ev, arg)))
3766
0
      return r;
3767
0
  }
3768
3769
  /* Now for the events in one of the timeout queues.
3770
   * the min-heap. */
3771
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
3772
0
    struct common_timeout_list *ctl =
3773
0
        base->common_timeout_queues[i];
3774
0
    TAILQ_FOREACH(ev, &ctl->events,
3775
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3776
0
      if (ev->ev_flags & EVLIST_INSERTED) {
3777
        /* we already processed this one */
3778
0
        continue;
3779
0
      }
3780
0
      if ((r = fn(base, ev, arg)))
3781
0
        return r;
3782
0
    }
3783
0
  }
3784
3785
  /* Finally, we deal wit all the active events that we haven't touched
3786
   * yet. */
3787
0
  for (i = 0; i < base->nactivequeues; ++i) {
3788
0
    struct event_callback *evcb;
3789
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3790
0
      if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3791
        /* This isn't an event (evlist_init clear), or
3792
         * we already processed it. (inserted or
3793
         * timeout set */
3794
0
        continue;
3795
0
      }
3796
0
      ev = event_callback_to_event(evcb);
3797
0
      if ((r = fn(base, ev, arg)))
3798
0
        return r;
3799
0
    }
3800
0
  }
3801
3802
0
  return 0;
3803
0
}
3804
3805
/* Helper for event_base_dump_events: called on each event in the event base;
3806
 * dumps only the inserted events. */
3807
static int
3808
dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3809
0
{
3810
0
  FILE *output = arg;
3811
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3812
0
      "sig" : "fd ";
3813
3814
0
  if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3815
0
    return 0;
3816
3817
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3818
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3819
0
      (e->ev_events&EV_READ)?" Read":"",
3820
0
      (e->ev_events&EV_WRITE)?" Write":"",
3821
0
      (e->ev_events&EV_CLOSED)?" EOF":"",
3822
0
      (e->ev_events&EV_SIGNAL)?" Signal":"",
3823
0
      (e->ev_events&EV_PERSIST)?" Persist":"",
3824
0
      (e->ev_events&EV_ET)?" ET":"",
3825
0
      (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3826
0
  if (e->ev_flags & EVLIST_TIMEOUT) {
3827
0
    struct timeval tv;
3828
0
    tv.tv_sec = e->ev_timeout.tv_sec;
3829
0
    tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3830
0
    evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3831
0
    fprintf(output, " Timeout=%ld.%06d",
3832
0
        (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3833
0
  }
3834
0
  fputc('\n', output);
3835
3836
0
  return 0;
3837
0
}
3838
3839
/* Helper for event_base_dump_events: called on each event in the event base;
3840
 * dumps only the active events. */
3841
static int
3842
dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3843
0
{
3844
0
  FILE *output = arg;
3845
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
3846
0
      "sig" : "fd ";
3847
3848
0
  if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3849
0
    return 0;
3850
3851
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3852
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3853
0
      (e->ev_res&EV_READ)?" Read":"",
3854
0
      (e->ev_res&EV_WRITE)?" Write":"",
3855
0
      (e->ev_res&EV_CLOSED)?" EOF":"",
3856
0
      (e->ev_res&EV_SIGNAL)?" Signal":"",
3857
0
      (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3858
0
      (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3859
0
      (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3860
3861
0
  return 0;
3862
0
}
3863
3864
int
3865
event_base_foreach_event(struct event_base *base,
3866
    event_base_foreach_event_cb fn, void *arg)
3867
0
{
3868
0
  int r;
3869
0
  if ((!fn) || (!base)) {
3870
0
    return -1;
3871
0
  }
3872
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3873
0
  r = event_base_foreach_event_nolock_(base, fn, arg);
3874
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3875
0
  return r;
3876
0
}
3877
3878
3879
void
3880
event_base_dump_events(struct event_base *base, FILE *output)
3881
0
{
3882
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3883
0
  fprintf(output, "Inserted events:\n");
3884
0
  event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3885
3886
0
  fprintf(output, "Active events:\n");
3887
0
  event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3888
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3889
0
}
3890
3891
void
3892
event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3893
0
{
3894
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3895
3896
  /* Activate any non timer events */
3897
0
  if (!(events & EV_TIMEOUT)) {
3898
0
    evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3899
0
  } else {
3900
    /* If we want to activate timer events, loop and activate each event with
3901
     * the same fd in both the timeheap and common timeouts list */
3902
0
    int i;
3903
0
    size_t u;
3904
0
    struct event *ev;
3905
3906
0
    for (u = 0; u < base->timeheap.n; ++u) {
3907
0
      ev = base->timeheap.p[u];
3908
0
      if (ev->ev_fd == fd) {
3909
0
        event_active_nolock_(ev, EV_TIMEOUT, 1);
3910
0
      }
3911
0
    }
3912
3913
0
    for (i = 0; i < base->n_common_timeouts; ++i) {
3914
0
      struct common_timeout_list *ctl = base->common_timeout_queues[i];
3915
0
      TAILQ_FOREACH(ev, &ctl->events,
3916
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3917
0
        if (ev->ev_fd == fd) {
3918
0
          event_active_nolock_(ev, EV_TIMEOUT, 1);
3919
0
        }
3920
0
      }
3921
0
    }
3922
0
  }
3923
3924
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3925
0
}
3926
3927
void
3928
event_base_active_by_signal(struct event_base *base, int sig)
3929
0
{
3930
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3931
0
  evmap_signal_active_(base, sig, 1);
3932
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3933
0
}
3934
3935
3936
void
3937
event_base_add_virtual_(struct event_base *base)
3938
0
{
3939
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3940
0
  base->virtual_event_count++;
3941
0
  MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3942
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3943
0
}
3944
3945
void
3946
event_base_del_virtual_(struct event_base *base)
3947
0
{
3948
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3949
0
  EVUTIL_ASSERT(base->virtual_event_count > 0);
3950
0
  base->virtual_event_count--;
3951
0
  if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3952
0
    evthread_notify_base(base);
3953
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3954
0
}
3955
3956
static void
3957
event_free_debug_globals_locks(void)
3958
0
{
3959
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3960
0
#ifndef EVENT__DISABLE_DEBUG_MODE
3961
0
  if (event_debug_map_lock_ != NULL) {
3962
0
    EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3963
0
    event_debug_map_lock_ = NULL;
3964
0
    evthreadimpl_disable_lock_debugging_();
3965
0
  }
3966
0
#endif /* EVENT__DISABLE_DEBUG_MODE */
3967
0
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
3968
0
  return;
3969
0
}
3970
3971
static void
3972
event_free_debug_globals(void)
3973
0
{
3974
0
  event_free_debug_globals_locks();
3975
0
}
3976
3977
static void
3978
event_free_evsig_globals(void)
3979
0
{
3980
0
  evsig_free_globals_();
3981
0
}
3982
3983
static void
3984
event_free_evutil_globals(void)
3985
0
{
3986
0
  evutil_free_globals_();
3987
0
}
3988
3989
static void
3990
event_free_globals(void)
3991
0
{
3992
0
  event_free_debug_globals();
3993
0
  event_free_evsig_globals();
3994
0
  event_free_evutil_globals();
3995
0
}
3996
3997
void
3998
libevent_global_shutdown(void)
3999
0
{
4000
0
  event_disable_debug_mode();
4001
0
  event_free_globals();
4002
0
}
4003
4004
#ifndef EVENT__DISABLE_THREAD_SUPPORT
4005
int
4006
event_global_setup_locks_(const int enable_locks)
4007
0
{
4008
0
#ifndef EVENT__DISABLE_DEBUG_MODE
4009
0
  EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
4010
0
#endif
4011
0
  if (evsig_global_setup_locks_(enable_locks) < 0)
4012
0
    return -1;
4013
0
  if (evutil_global_setup_locks_(enable_locks) < 0)
4014
0
    return -1;
4015
0
  if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
4016
0
    return -1;
4017
0
  return 0;
4018
0
}
4019
#endif
4020
4021
void
4022
event_base_assert_ok_(struct event_base *base)
4023
0
{
4024
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4025
0
  event_base_assert_ok_nolock_(base);
4026
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
4027
0
}
4028
4029
void
4030
event_base_assert_ok_nolock_(struct event_base *base)
4031
0
{
4032
0
  int i;
4033
0
  size_t u;
4034
0
  int count;
4035
4036
  /* First do checks on the per-fd and per-signal lists */
4037
0
  evmap_check_integrity_(base);
4038
4039
  /* Check the heap property */
4040
0
  for (u = 1; u < base->timeheap.n; ++u) {
4041
0
    size_t parent = (u - 1) / 2;
4042
0
    struct event *ev, *p_ev;
4043
0
    ev = base->timeheap.p[u];
4044
0
    p_ev = base->timeheap.p[parent];
4045
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4046
0
    EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
4047
0
    EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
4048
0
  }
4049
4050
  /* Check that the common timeouts are fine */
4051
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
4052
0
    struct common_timeout_list *ctl = base->common_timeout_queues[i];
4053
0
    struct event *last=NULL, *ev;
4054
4055
0
    EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
4056
4057
0
    TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
4058
0
      if (last)
4059
0
        EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
4060
0
      EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4061
0
      EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4062
0
      EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4063
0
      last = ev;
4064
0
    }
4065
0
  }
4066
4067
  /* Check the active queues. */
4068
0
  count = 0;
4069
0
  for (i = 0; i < base->nactivequeues; ++i) {
4070
0
    struct event_callback *evcb;
4071
0
    EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4072
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4073
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4074
0
      EVUTIL_ASSERT(evcb->evcb_pri == i);
4075
0
      ++count;
4076
0
    }
4077
0
  }
4078
4079
0
  {
4080
0
    struct event_callback *evcb;
4081
0
    TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4082
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4083
0
      ++count;
4084
0
    }
4085
0
  }
4086
0
  EVUTIL_ASSERT(count == base->event_count_active);
4087
0
}