/src/dovecot/src/lib/ioloop.c
Line | Count | Source |
1 | | /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | #include "lib.h" |
4 | | #include "array.h" |
5 | | #include "backtrace-string.h" |
6 | | #include "llist.h" |
7 | | #include "time-util.h" |
8 | | #include "istream-private.h" |
9 | | #include "ioloop-private.h" |
10 | | |
11 | | #include <unistd.h> |
12 | | |
13 | | /* Dovecot attempts to detect also when time suddenly jumps forwards. |
14 | | This is done by getting the minimum timeout wait in epoll() (or similar) |
15 | | and then seeing if the current time after epoll() is past the timeout. |
16 | | This can't be very exact, so likely the difference is always at least |
17 | | 1 microsecond. In high load situations it can be somewhat higher. |
18 | | Dovecot generally doesn't have very important short timeouts, so to avoid |
19 | | logging many warnings about this, use a rather high value. */ |
20 | 0 | #define IOLOOP_TIME_MOVED_FORWARDS_MIN_USECS (100000) |
21 | | /* When the ioloop wait time is large, the "time moved forwards" detection |
22 | | can't be done as reliably. Apparently if we ask the kernel to wait for |
23 | | 10000ms, it might think it's okay to stop after 10100ms or more. So use |
24 | | a larger value for larger timeouts. */ |
25 | 0 | #define IOLOOP_TIME_MOVED_FORWARDS_MIN_USECS_LARGE (1000000) |
26 | | |
27 | | time_t ioloop_time = 0; |
28 | | struct timeval ioloop_timeval; |
29 | | struct ioloop *current_ioloop = NULL; |
30 | | uint64_t ioloop_global_wait_usecs = 0; |
31 | | |
32 | | static ARRAY(io_switch_callback_t *) io_switch_callbacks = ARRAY_INIT; |
33 | | static ARRAY(io_destroy_callback_t *) io_destroy_callbacks = ARRAY_INIT; |
34 | | static bool panic_on_leak = FALSE, panic_on_leak_set = FALSE; |
35 | | |
36 | | static time_t data_stack_last_free_unused = 0; |
37 | | |
38 | | static void io_loop_initialize_handler(struct ioloop *ioloop) |
39 | 0 | { |
40 | 0 | unsigned int initial_fd_count; |
41 | |
|
42 | 0 | initial_fd_count = ioloop->max_fd_count > 0 && |
43 | 0 | ioloop->max_fd_count < IOLOOP_INITIAL_FD_COUNT ? |
44 | 0 | ioloop->max_fd_count : IOLOOP_INITIAL_FD_COUNT; |
45 | 0 | io_loop_handler_init(ioloop, initial_fd_count); |
46 | 0 | } |
47 | | |
48 | | static struct io_file * |
49 | | io_add_file(struct ioloop *ioloop, int fd, enum io_condition condition, |
50 | | const char *source_filename, |
51 | | unsigned int source_linenum, |
52 | | io_callback_t *callback, void *context) |
53 | 0 | { |
54 | 0 | struct io_file *io; |
55 | |
|
56 | 0 | i_assert(callback != NULL); |
57 | 0 | i_assert((condition & IO_NOTIFY) == 0); |
58 | | |
59 | 0 | io = i_new(struct io_file, 1); |
60 | 0 | io->io.condition = condition; |
61 | 0 | io->io.callback = callback; |
62 | 0 | io->io.context = context; |
63 | 0 | io->io.ioloop = ioloop; |
64 | 0 | io->io.source_filename = source_filename; |
65 | 0 | io->io.source_linenum = source_linenum; |
66 | 0 | io->refcount = 1; |
67 | 0 | io->fd = fd; |
68 | |
|
69 | 0 | if (io->io.ioloop->cur_ctx != NULL) { |
70 | 0 | io->io.ctx = io->io.ioloop->cur_ctx; |
71 | 0 | io_loop_context_ref(io->io.ctx); |
72 | 0 | } |
73 | |
|
74 | 0 | if (io->io.ioloop->handler_context == NULL) |
75 | 0 | io_loop_initialize_handler(io->io.ioloop); |
76 | 0 | if (fd != -1) |
77 | 0 | io_loop_handle_add(io); |
78 | 0 | else { |
79 | | /* we're adding an istream whose only way to get notified |
80 | | is to call i_stream_set_input_pending() */ |
81 | 0 | } |
82 | |
|
83 | 0 | if (io->io.ioloop->io_files != NULL) { |
84 | 0 | io->io.ioloop->io_files->prev = io; |
85 | 0 | io->next = io->io.ioloop->io_files; |
86 | 0 | } |
87 | 0 | io->io.ioloop->io_files = io; |
88 | 0 | return io; |
89 | 0 | } |
90 | | |
91 | | #undef io_add_to |
92 | | struct io *io_add_to(struct ioloop *ioloop, int fd, enum io_condition condition, |
93 | | const char *source_filename, unsigned int source_linenum, |
94 | | io_callback_t *callback, void *context) |
95 | 0 | { |
96 | 0 | struct io_file *io; |
97 | |
|
98 | 0 | i_assert(fd >= 0); |
99 | 0 | io = io_add_file(ioloop, fd, condition, |
100 | 0 | source_filename, source_linenum, |
101 | 0 | callback, context); |
102 | 0 | return &io->io; |
103 | 0 | } |
104 | | |
105 | | #undef io_add |
106 | | struct io *io_add(int fd, enum io_condition condition, |
107 | | const char *source_filename, |
108 | | unsigned int source_linenum, |
109 | | io_callback_t *callback, void *context) |
110 | 0 | { |
111 | 0 | return io_add_to(current_ioloop, fd, condition, |
112 | 0 | source_filename, source_linenum, |
113 | 0 | callback, context); |
114 | 0 | } |
115 | | |
116 | | #undef io_add_istream_to |
117 | | struct io *io_add_istream_to(struct ioloop *ioloop, struct istream *input, |
118 | | const char *source_filename, |
119 | | unsigned int source_linenum, |
120 | | io_callback_t *callback, void *context) |
121 | 0 | { |
122 | 0 | struct io_file *io; |
123 | |
|
124 | 0 | io = io_add_file(ioloop, i_stream_get_fd(input), IO_READ, |
125 | 0 | source_filename, source_linenum, callback, context); |
126 | 0 | io->istream = input; |
127 | 0 | i_stream_ref(io->istream); |
128 | 0 | i_stream_set_io(io->istream, &io->io); |
129 | 0 | return &io->io; |
130 | 0 | } |
131 | | |
132 | | #undef io_add_istream |
133 | | struct io *io_add_istream(struct istream *input, const char *source_filename, |
134 | | unsigned int source_linenum, |
135 | | io_callback_t *callback, void *context) |
136 | 0 | { |
137 | 0 | return io_add_istream_to(current_ioloop, input, |
138 | 0 | source_filename, source_linenum, |
139 | 0 | callback, context); |
140 | 0 | } |
141 | | |
142 | | static void io_file_unlink(struct io_file *io) |
143 | 0 | { |
144 | 0 | if (io->prev != NULL) |
145 | 0 | io->prev->next = io->next; |
146 | 0 | else |
147 | 0 | io->io.ioloop->io_files = io->next; |
148 | |
|
149 | 0 | if (io->next != NULL) |
150 | 0 | io->next->prev = io->prev; |
151 | | |
152 | | /* if we got here from an I/O handler callback, make sure we |
153 | | don't try to handle this one next. */ |
154 | 0 | if (io->io.ioloop->next_io_file == io) |
155 | 0 | io->io.ioloop->next_io_file = io->next; |
156 | 0 | } |
157 | | |
158 | | static void io_remove_full(struct io **_io, bool closed) |
159 | 0 | { |
160 | 0 | struct io *io = *_io; |
161 | |
|
162 | 0 | i_assert(io->callback != NULL); |
163 | | |
164 | 0 | *_io = NULL; |
165 | | |
166 | | /* make sure the callback doesn't get called anymore. |
167 | | kqueue code relies on this. */ |
168 | 0 | io->callback = NULL; |
169 | |
|
170 | 0 | if (io->pending) { |
171 | 0 | i_assert(io->ioloop->io_pending_count > 0); |
172 | 0 | io->ioloop->io_pending_count--; |
173 | 0 | } |
174 | | |
175 | 0 | if (io->ctx != NULL) |
176 | 0 | io_loop_context_unref(&io->ctx); |
177 | |
|
178 | 0 | if ((io->condition & IO_NOTIFY) != 0) |
179 | 0 | io_loop_notify_remove(io); |
180 | 0 | else { |
181 | 0 | struct io_file *io_file = (struct io_file *)io; |
182 | 0 | struct istream *istream = io_file->istream; |
183 | |
|
184 | 0 | if (istream != NULL) { |
185 | | /* remove io before it's freed */ |
186 | 0 | i_stream_unset_io(istream, io); |
187 | 0 | } |
188 | |
|
189 | 0 | io_file_unlink(io_file); |
190 | 0 | if (io_file->fd != -1) |
191 | 0 | io_loop_handle_remove(io_file, closed); |
192 | 0 | else |
193 | 0 | i_free(io); |
194 | | |
195 | | /* remove io from the ioloop before unreferencing the istream, |
196 | | because a destroyed istream may automatically close the |
197 | | fd. */ |
198 | 0 | i_stream_unref(&istream); |
199 | 0 | } |
200 | 0 | } |
201 | | |
202 | | void io_remove(struct io **io) |
203 | 0 | { |
204 | 0 | if (*io == NULL) |
205 | 0 | return; |
206 | | |
207 | 0 | io_remove_full(io, FALSE); |
208 | 0 | } |
209 | | |
210 | | void io_remove_closed(struct io **io) |
211 | 0 | { |
212 | 0 | if (*io == NULL) |
213 | 0 | return; |
214 | | |
215 | 0 | i_assert(((*io)->condition & IO_NOTIFY) == 0); |
216 | | |
217 | 0 | io_remove_full(io, TRUE); |
218 | 0 | } |
219 | | |
220 | | void io_set_pending(struct io *io) |
221 | 0 | { |
222 | 0 | i_assert((io->condition & IO_NOTIFY) == 0); |
223 | | |
224 | 0 | if (!io->pending) { |
225 | 0 | io->pending = TRUE; |
226 | 0 | io->ioloop->io_pending_count++; |
227 | 0 | } |
228 | 0 | } |
229 | | |
230 | | bool io_is_pending(struct io *io) |
231 | 0 | { |
232 | 0 | return io->pending; |
233 | 0 | } |
234 | | |
235 | | void io_set_never_wait_alone(struct io *io, bool set) |
236 | 0 | { |
237 | 0 | io->never_wait_alone = set; |
238 | 0 | } |
239 | | |
240 | | static void timeout_update_next(struct timeout *timeout, struct timeval *tv_now) |
241 | 0 | { |
242 | 0 | if (tv_now == NULL) |
243 | 0 | i_gettimeofday(&timeout->next_run); |
244 | 0 | else { |
245 | 0 | timeout->next_run.tv_sec = tv_now->tv_sec; |
246 | 0 | timeout->next_run.tv_usec = tv_now->tv_usec; |
247 | 0 | } |
248 | | |
249 | | /* we don't want microsecond accuracy or this function will be |
250 | | called all the time - millisecond is more than enough */ |
251 | 0 | timeout->next_run.tv_usec -= timeout->next_run.tv_usec % 1000; |
252 | |
|
253 | 0 | timeout->next_run.tv_sec += timeout->msecs/1000; |
254 | 0 | timeout->next_run.tv_usec += (timeout->msecs%1000)*1000; |
255 | |
|
256 | 0 | if (timeout->next_run.tv_usec >= 1000000) { |
257 | 0 | timeout->next_run.tv_sec++; |
258 | 0 | timeout->next_run.tv_usec -= 1000000; |
259 | 0 | } |
260 | 0 | } |
261 | | |
262 | | static struct timeout * |
263 | | timeout_add_common(struct ioloop *ioloop, const char *source_filename, |
264 | | unsigned int source_linenum, |
265 | | timeout_callback_t *callback, void *context) |
266 | 0 | { |
267 | 0 | struct timeout *timeout; |
268 | |
|
269 | 0 | timeout = i_new(struct timeout, 1); |
270 | 0 | timeout->item.idx = UINT_MAX; |
271 | 0 | timeout->source_filename = source_filename; |
272 | 0 | timeout->source_linenum = source_linenum; |
273 | 0 | timeout->ioloop = ioloop; |
274 | |
|
275 | 0 | timeout->callback = callback; |
276 | 0 | timeout->context = context; |
277 | |
|
278 | 0 | if (timeout->ioloop->cur_ctx != NULL) { |
279 | 0 | timeout->ctx = timeout->ioloop->cur_ctx; |
280 | 0 | io_loop_context_ref(timeout->ctx); |
281 | 0 | } |
282 | |
|
283 | 0 | return timeout; |
284 | 0 | } |
285 | | |
286 | | #undef timeout_add_to |
287 | | struct timeout *timeout_add_to(struct ioloop *ioloop, unsigned int msecs, |
288 | | const char *source_filename, |
289 | | unsigned int source_linenum, |
290 | | timeout_callback_t *callback, void *context) |
291 | 0 | { |
292 | 0 | struct timeout *timeout; |
293 | |
|
294 | 0 | timeout = timeout_add_common(ioloop, source_filename, source_linenum, |
295 | 0 | callback, context); |
296 | 0 | timeout->msecs = msecs; |
297 | |
|
298 | 0 | if (msecs > 0) { |
299 | | /* start this timeout in the next run cycle */ |
300 | 0 | array_push_back(&timeout->ioloop->timeouts_new, &timeout); |
301 | 0 | } else { |
302 | | /* Trigger zero timeouts as soon as possible. When ioloop is |
303 | | running, refresh the timestamp to prevent infinite loops |
304 | | in case a timeout callback keeps recreating the 0-timeout. */ |
305 | 0 | timeout_update_next(timeout, timeout->ioloop->running ? |
306 | 0 | NULL : &ioloop_timeval); |
307 | 0 | priorityq_add(timeout->ioloop->timeouts, &timeout->item); |
308 | 0 | } |
309 | 0 | return timeout; |
310 | 0 | } |
311 | | |
312 | | #undef timeout_add |
313 | | struct timeout *timeout_add(unsigned int msecs, const char *source_filename, |
314 | | unsigned int source_linenum, |
315 | | timeout_callback_t *callback, void *context) |
316 | 0 | { |
317 | 0 | return timeout_add_to(current_ioloop, msecs, |
318 | 0 | source_filename, source_linenum, |
319 | 0 | callback, context); |
320 | 0 | } |
321 | | |
322 | | #undef timeout_add_short_to |
323 | | struct timeout * |
324 | | timeout_add_short_to(struct ioloop *ioloop, unsigned int msecs, |
325 | | const char *source_filename, unsigned int source_linenum, |
326 | | timeout_callback_t *callback, void *context) |
327 | 0 | { |
328 | 0 | return timeout_add_to(ioloop, msecs, |
329 | 0 | source_filename, source_linenum, |
330 | 0 | callback, context); |
331 | 0 | } |
332 | | |
333 | | #undef timeout_add_short |
334 | | struct timeout * |
335 | | timeout_add_short(unsigned int msecs, const char *source_filename, |
336 | | unsigned int source_linenum, |
337 | | timeout_callback_t *callback, void *context) |
338 | 0 | { |
339 | 0 | return timeout_add(msecs, source_filename, source_linenum, |
340 | 0 | callback, context); |
341 | 0 | } |
342 | | |
343 | | #undef timeout_add_absolute_to |
344 | | struct timeout * |
345 | | timeout_add_absolute_to(struct ioloop *ioloop, const struct timeval *time, |
346 | | const char *source_filename, |
347 | | unsigned int source_linenum, |
348 | | timeout_callback_t *callback, void *context) |
349 | 0 | { |
350 | 0 | struct timeout *timeout; |
351 | |
|
352 | 0 | timeout = timeout_add_common(ioloop, source_filename, source_linenum, |
353 | 0 | callback, context); |
354 | 0 | timeout->one_shot = TRUE; |
355 | 0 | timeout->next_run = *time; |
356 | |
|
357 | 0 | priorityq_add(timeout->ioloop->timeouts, &timeout->item); |
358 | 0 | return timeout; |
359 | 0 | } |
360 | | |
361 | | #undef timeout_add_absolute |
362 | | struct timeout * |
363 | | timeout_add_absolute(const struct timeval *time, |
364 | | const char *source_filename, |
365 | | unsigned int source_linenum, |
366 | | timeout_callback_t *callback, void *context) |
367 | 0 | { |
368 | 0 | return timeout_add_absolute_to(current_ioloop, time, |
369 | 0 | source_filename, source_linenum, |
370 | 0 | callback, context); |
371 | 0 | } |
372 | | |
373 | | static struct timeout * |
374 | | timeout_copy(const struct timeout *old_to, struct ioloop *ioloop) |
375 | 0 | { |
376 | 0 | struct timeout *new_to; |
377 | |
|
378 | 0 | new_to = timeout_add_common(ioloop, |
379 | 0 | old_to->source_filename, old_to->source_linenum, |
380 | 0 | old_to->callback, old_to->context); |
381 | 0 | new_to->one_shot = old_to->one_shot; |
382 | 0 | new_to->msecs = old_to->msecs; |
383 | 0 | new_to->next_run = old_to->next_run; |
384 | |
|
385 | 0 | if (old_to->item.idx != UINT_MAX) |
386 | 0 | priorityq_add(new_to->ioloop->timeouts, &new_to->item); |
387 | 0 | else if (!new_to->one_shot) { |
388 | 0 | i_assert(new_to->msecs > 0); |
389 | 0 | array_push_back(&new_to->ioloop->timeouts_new, &new_to); |
390 | 0 | } |
391 | | |
392 | 0 | return new_to; |
393 | 0 | } |
394 | | |
395 | | static void timeout_free(struct timeout *timeout) |
396 | 0 | { |
397 | 0 | if (timeout->ctx != NULL) |
398 | 0 | io_loop_context_unref(&timeout->ctx); |
399 | 0 | i_free(timeout); |
400 | 0 | } |
401 | | |
402 | | void timeout_remove(struct timeout **_timeout) |
403 | 0 | { |
404 | 0 | struct timeout *timeout = *_timeout; |
405 | 0 | struct ioloop *ioloop; |
406 | |
|
407 | 0 | if (timeout == NULL) |
408 | 0 | return; |
409 | | |
410 | 0 | ioloop = timeout->ioloop; |
411 | |
|
412 | 0 | *_timeout = NULL; |
413 | 0 | if (timeout->item.idx != UINT_MAX) |
414 | 0 | priorityq_remove(timeout->ioloop->timeouts, &timeout->item); |
415 | 0 | else if (!timeout->one_shot && timeout->msecs > 0) { |
416 | 0 | unsigned int idx; |
417 | |
|
418 | 0 | if (!array_lsearch_ptr_idx(&ioloop->timeouts_new, timeout, &idx)) |
419 | 0 | i_unreached(); |
420 | 0 | array_delete(&ioloop->timeouts_new, idx, 1); |
421 | 0 | } |
422 | 0 | timeout_free(timeout); |
423 | 0 | } |
424 | | |
425 | | static void ATTR_NULL(2) |
426 | | timeout_reset_timeval(struct timeout *timeout, struct timeval *tv_now) |
427 | 0 | { |
428 | 0 | if (timeout->item.idx == UINT_MAX) |
429 | 0 | return; |
430 | | |
431 | 0 | timeout_update_next(timeout, tv_now); |
432 | | /* If we came here from io_loop_handle_timeouts_real(), next_run must |
433 | | be larger than tv_now or it can go to infinite loop. This would |
434 | | mainly happen with 0 ms timeouts. Avoid this by making sure |
435 | | next_run is at least 1 us higher than tv_now. |
436 | | |
437 | | Note that some callers (like master process's process_min_avail |
438 | | preforking timeout) really do want the 0 ms timeout to trigger |
439 | | multiple times as rapidly as it can (but in separate ioloop runs). |
440 | | So don't increase it more than by 1 us. */ |
441 | 0 | if (tv_now != NULL && timeval_cmp(&timeout->next_run, tv_now) <= 0) { |
442 | 0 | timeout->next_run = *tv_now; |
443 | 0 | timeval_add_usecs(&timeout->next_run, 1); |
444 | 0 | } |
445 | 0 | priorityq_remove(timeout->ioloop->timeouts, &timeout->item); |
446 | 0 | priorityq_add(timeout->ioloop->timeouts, &timeout->item); |
447 | 0 | } |
448 | | |
449 | | void timeout_reset(struct timeout *timeout) |
450 | 0 | { |
451 | 0 | i_assert(!timeout->one_shot); |
452 | 0 | timeout_reset_timeval(timeout, NULL); |
453 | 0 | } |
454 | | |
455 | | static int timeout_get_wait_time(struct timeout *timeout, struct timeval *tv_r, |
456 | | struct timeval *tv_now, bool in_timeout_loop) |
457 | 0 | { |
458 | 0 | int ret; |
459 | |
|
460 | 0 | if (tv_now->tv_sec == 0) |
461 | 0 | i_gettimeofday(tv_now); |
462 | 0 | tv_r->tv_sec = tv_now->tv_sec; |
463 | 0 | tv_r->tv_usec = tv_now->tv_usec; |
464 | |
|
465 | 0 | i_assert(tv_r->tv_sec > 0); |
466 | 0 | i_assert(timeout->next_run.tv_sec > 0); |
467 | | |
468 | 0 | tv_r->tv_sec = timeout->next_run.tv_sec - tv_r->tv_sec; |
469 | 0 | tv_r->tv_usec = timeout->next_run.tv_usec - tv_r->tv_usec; |
470 | 0 | if (tv_r->tv_usec < 0) { |
471 | 0 | tv_r->tv_sec--; |
472 | 0 | tv_r->tv_usec += 1000000; |
473 | 0 | } |
474 | |
|
475 | 0 | if (tv_r->tv_sec < 0) { |
476 | | /* The timeout should have been called already */ |
477 | 0 | tv_r->tv_sec = 0; |
478 | 0 | tv_r->tv_usec = 0; |
479 | 0 | return 0; |
480 | 0 | } |
481 | 0 | if (tv_r->tv_sec == 0 && tv_r->tv_usec == 1 && !in_timeout_loop) { |
482 | | /* Possibly 0 ms timeout. Don't wait for a full millisecond |
483 | | for it to trigger. */ |
484 | 0 | tv_r->tv_usec = 0; |
485 | 0 | return 0; |
486 | 0 | } |
487 | 0 | if (tv_r->tv_sec > INT_MAX/1000-1) |
488 | 0 | tv_r->tv_sec = INT_MAX/1000-1; |
489 | | |
490 | | /* round wait times up to next millisecond */ |
491 | 0 | ret = tv_r->tv_sec * 1000 + (tv_r->tv_usec + 999) / 1000; |
492 | 0 | i_assert(ret >= 0 && tv_r->tv_sec >= 0 && tv_r->tv_usec >= 0); |
493 | 0 | return ret; |
494 | 0 | } |
495 | | |
496 | | static int io_loop_get_wait_time(struct ioloop *ioloop, struct timeval *tv_r) |
497 | 0 | { |
498 | 0 | struct timeval tv_now; |
499 | 0 | struct priorityq_item *item; |
500 | 0 | struct timeout *timeout; |
501 | 0 | int msecs; |
502 | |
|
503 | 0 | item = priorityq_peek(ioloop->timeouts); |
504 | 0 | timeout = (struct timeout *)item; |
505 | | |
506 | | /* we need to see if there are pending IO waiting, |
507 | | if there is, we set msecs = 0 to ensure they are |
508 | | processed without delay */ |
509 | 0 | if (timeout == NULL && ioloop->io_pending_count == 0) { |
510 | | /* no timeouts. use INT_MAX msecs for timeval and |
511 | | return -1 for poll/epoll infinity. */ |
512 | 0 | tv_r->tv_sec = INT_MAX / 1000; |
513 | 0 | tv_r->tv_usec = 0; |
514 | 0 | ioloop->next_max_time.tv_sec = (1ULL << (TIME_T_MAX_BITS-1)) - 1; |
515 | 0 | ioloop->next_max_time.tv_usec = 0; |
516 | 0 | return -1; |
517 | 0 | } |
518 | | |
519 | 0 | if (ioloop->io_pending_count > 0) { |
520 | 0 | i_gettimeofday(&tv_now); |
521 | 0 | msecs = 0; |
522 | 0 | tv_r->tv_sec = 0; |
523 | 0 | tv_r->tv_usec = 0; |
524 | 0 | } else { |
525 | 0 | tv_now.tv_sec = 0; |
526 | 0 | msecs = timeout_get_wait_time(timeout, tv_r, &tv_now, FALSE); |
527 | 0 | } |
528 | 0 | ioloop->next_max_time = tv_now; |
529 | 0 | timeval_add_msecs(&ioloop->next_max_time, msecs); |
530 | | |
531 | | /* update ioloop_timeval - this is meant for io_loop_handle_timeouts()'s |
532 | | ioloop_wait_usecs calculation. normally after this we go to the |
533 | | ioloop and after that we update ioloop_timeval immediately again. */ |
534 | 0 | ioloop_timeval = tv_now; |
535 | 0 | ioloop_time = tv_now.tv_sec; |
536 | 0 | i_assert(msecs == 0 || timeout->msecs > 0 || timeout->one_shot); |
537 | 0 | return msecs; |
538 | 0 | } |
539 | | |
540 | | static bool io_loop_have_waitable_io_files(struct ioloop *ioloop) |
541 | 0 | { |
542 | 0 | struct io_file *io; |
543 | |
|
544 | 0 | for (io = ioloop->io_files; io != NULL; io = io->next) { |
545 | 0 | if (io->io.callback != NULL && !io->io.never_wait_alone) |
546 | 0 | return TRUE; |
547 | 0 | } |
548 | 0 | return FALSE; |
549 | 0 | } |
550 | | |
551 | | int io_loop_run_get_wait_time(struct ioloop *ioloop, struct timeval *tv_r) |
552 | 0 | { |
553 | 0 | int msecs = io_loop_get_wait_time(ioloop, tv_r); |
554 | 0 | if (msecs < 0 && !io_loop_have_waitable_io_files(ioloop)) |
555 | 0 | i_panic("BUG: No IOs or timeouts set. Not waiting for infinity."); |
556 | 0 | return msecs; |
557 | 0 | } |
558 | | |
559 | | static int timeout_cmp(const void *p1, const void *p2) |
560 | 0 | { |
561 | 0 | const struct timeout *to1 = p1, *to2 = p2; |
562 | |
|
563 | 0 | return timeval_cmp(&to1->next_run, &to2->next_run); |
564 | 0 | } |
565 | | |
566 | | static void |
567 | | io_loop_default_time_moved(const struct timeval *old_time, |
568 | | const struct timeval *new_time) |
569 | 0 | { |
570 | 0 | long long diff = timeval_diff_usecs(old_time, new_time); |
571 | 0 | if (diff > 0) { |
572 | 0 | i_warning("Time moved backwards by %lld.%06lld seconds.", |
573 | 0 | diff / 1000000, diff % 1000000); |
574 | 0 | } |
575 | 0 | } |
576 | | |
577 | | static void io_loop_timeouts_start_new(struct ioloop *ioloop) |
578 | 0 | { |
579 | 0 | struct timeout *timeout; |
580 | |
|
581 | 0 | if (array_count(&ioloop->timeouts_new) == 0) |
582 | 0 | return; |
583 | | |
584 | 0 | io_loop_time_refresh(); |
585 | |
|
586 | 0 | array_foreach_elem(&ioloop->timeouts_new, timeout) { |
587 | 0 | i_assert(timeout->next_run.tv_sec == 0 && |
588 | 0 | timeout->next_run.tv_usec == 0); |
589 | 0 | i_assert(!timeout->one_shot); |
590 | 0 | i_assert(timeout->msecs > 0); |
591 | 0 | timeout_update_next(timeout, &ioloop_timeval); |
592 | 0 | priorityq_add(ioloop->timeouts, &timeout->item); |
593 | 0 | } |
594 | 0 | array_clear(&ioloop->timeouts_new); |
595 | 0 | } |
596 | | |
597 | | static void io_loop_timeouts_update(struct ioloop *ioloop, long long diff_usecs) |
598 | 0 | { |
599 | 0 | struct priorityq_item *const *items; |
600 | 0 | unsigned int i, count; |
601 | |
|
602 | 0 | count = priorityq_count(ioloop->timeouts); |
603 | 0 | items = priorityq_items(ioloop->timeouts); |
604 | 0 | for (i = 0; i < count; i++) { |
605 | 0 | struct timeout *to = (struct timeout *)items[i]; |
606 | |
|
607 | 0 | if (diff_usecs > 0) |
608 | 0 | timeval_add_usecs(&to->next_run, diff_usecs); |
609 | 0 | else |
610 | 0 | timeval_sub_usecs(&to->next_run, -diff_usecs); |
611 | 0 | } |
612 | 0 | } |
613 | | |
614 | | static void io_loops_timeouts_update(long long diff_usecs) |
615 | 0 | { |
616 | 0 | struct ioloop *ioloop; |
617 | |
|
618 | 0 | for (ioloop = current_ioloop; ioloop != NULL; ioloop = ioloop->prev) |
619 | 0 | io_loop_timeouts_update(ioloop, diff_usecs); |
620 | 0 | } |
621 | | |
622 | | static void ioloop_add_wait_time(struct ioloop *ioloop) |
623 | 0 | { |
624 | 0 | struct io_wait_timer *timer; |
625 | 0 | long long diff; |
626 | |
|
627 | 0 | diff = timeval_diff_usecs(&ioloop_timeval, &ioloop->wait_started); |
628 | 0 | if (diff < 0) { |
629 | | /* time moved backwards */ |
630 | 0 | diff = 0; |
631 | 0 | } |
632 | |
|
633 | 0 | ioloop->ioloop_wait_usecs += diff; |
634 | 0 | ioloop_global_wait_usecs += diff; |
635 | |
|
636 | 0 | for (timer = ioloop->wait_timers; timer != NULL; timer = timer->next) |
637 | 0 | timer->usecs += diff; |
638 | 0 | } |
639 | | |
640 | | static void io_loop_handle_timeouts_real(struct ioloop *ioloop) |
641 | 0 | { |
642 | 0 | struct priorityq_item *item; |
643 | 0 | struct timeval tv_old, tv, tv_call; |
644 | 0 | long long diff_usecs; |
645 | 0 | data_stack_frame_t t_id; |
646 | |
|
647 | 0 | tv_old = ioloop_timeval; |
648 | 0 | i_gettimeofday(&ioloop_timeval); |
649 | |
|
650 | 0 | diff_usecs = timeval_diff_usecs(&ioloop_timeval, &tv_old); |
651 | 0 | if (unlikely(diff_usecs < 0)) { |
652 | | /* time moved backwards */ |
653 | 0 | io_loops_timeouts_update(diff_usecs); |
654 | 0 | ioloop->time_moved_callback(&tv_old, &ioloop_timeval); |
655 | 0 | i_assert(ioloop == current_ioloop); |
656 | | /* the callback may have slept, so check the time again. */ |
657 | 0 | i_gettimeofday(&ioloop_timeval); |
658 | 0 | } else { |
659 | 0 | int max_diff = diff_usecs < IOLOOP_TIME_MOVED_FORWARDS_MIN_USECS_LARGE ? |
660 | 0 | IOLOOP_TIME_MOVED_FORWARDS_MIN_USECS : |
661 | 0 | IOLOOP_TIME_MOVED_FORWARDS_MIN_USECS_LARGE; |
662 | |
|
663 | 0 | diff_usecs = timeval_diff_usecs(&ioloop->next_max_time, |
664 | 0 | &ioloop_timeval); |
665 | 0 | if (unlikely(-diff_usecs >= max_diff)) { |
666 | 0 | io_loops_timeouts_update(-diff_usecs); |
667 | | /* time moved forward */ |
668 | 0 | ioloop->time_moved_callback(&ioloop->next_max_time, |
669 | 0 | &ioloop_timeval); |
670 | 0 | i_assert(ioloop == current_ioloop); |
671 | 0 | } |
672 | 0 | ioloop_add_wait_time(ioloop); |
673 | 0 | } |
674 | | |
675 | 0 | ioloop_time = ioloop_timeval.tv_sec; |
676 | 0 | tv_call = ioloop_timeval; |
677 | |
|
678 | 0 | while (ioloop->running && |
679 | 0 | (item = priorityq_peek(ioloop->timeouts)) != NULL) { |
680 | 0 | struct timeout *timeout = (struct timeout *)item; |
681 | | |
682 | | /* use tv_call to make sure we don't get to infinite loop in |
683 | | case callbacks update ioloop_timeval. */ |
684 | 0 | if (timeout_get_wait_time(timeout, &tv, &tv_call, TRUE) > 0) |
685 | 0 | break; |
686 | | |
687 | 0 | if (timeout->one_shot) { |
688 | | /* remove timeout from queue */ |
689 | 0 | priorityq_remove(timeout->ioloop->timeouts, &timeout->item); |
690 | 0 | } else { |
691 | | /* update timeout's next_run and reposition it in the queue */ |
692 | 0 | timeout_reset_timeval(timeout, &tv_call); |
693 | 0 | } |
694 | |
|
695 | 0 | if (timeout->ctx != NULL) |
696 | 0 | io_loop_context_activate(timeout->ctx); |
697 | 0 | t_id = t_push_named("ioloop timeout handler %p", |
698 | 0 | (void *)timeout->callback); |
699 | 0 | timeout->callback(timeout->context); |
700 | 0 | if (!t_pop(&t_id)) { |
701 | 0 | i_panic("Leaked a t_pop() call in timeout handler %p", |
702 | 0 | (void *)timeout->callback); |
703 | 0 | } |
704 | 0 | if (ioloop->cur_ctx != NULL) |
705 | 0 | io_loop_context_deactivate(ioloop->cur_ctx); |
706 | 0 | i_assert(ioloop == current_ioloop); |
707 | 0 | } |
708 | 0 | } |
709 | | |
710 | | void io_loop_handle_timeouts(struct ioloop *ioloop) |
711 | 0 | { |
712 | 0 | T_BEGIN { |
713 | 0 | io_loop_handle_timeouts_real(ioloop); |
714 | 0 | } T_END; |
715 | | |
716 | | /* Free the unused memory in data stack once per second. This way if |
717 | | the data stack has grown excessively large temporarily, it won't |
718 | | permanently waste memory. And if the data stack grows back to the |
719 | | same large size, re-allocating it once per second doesn't cause |
720 | | performance problems. */ |
721 | 0 | if (data_stack_last_free_unused != ioloop_time) { |
722 | 0 | if (data_stack_last_free_unused != 0) |
723 | 0 | data_stack_free_unused(); |
724 | 0 | data_stack_last_free_unused = ioloop_time; |
725 | 0 | } |
726 | 0 | } |
727 | | |
728 | | void io_loop_call_io(struct io *io) |
729 | 0 | { |
730 | 0 | struct ioloop *ioloop = io->ioloop; |
731 | 0 | data_stack_frame_t t_id; |
732 | |
|
733 | 0 | if (io->pending) { |
734 | 0 | i_assert(ioloop->io_pending_count > 0); |
735 | 0 | ioloop->io_pending_count--; |
736 | 0 | io->pending = FALSE; |
737 | 0 | } |
738 | | |
739 | 0 | if (io->ctx != NULL) |
740 | 0 | io_loop_context_activate(io->ctx); |
741 | 0 | t_id = t_push_named("ioloop handler %p", |
742 | 0 | (void *)io->callback); |
743 | 0 | io->callback(io->context); |
744 | 0 | if (!t_pop(&t_id)) { |
745 | 0 | i_panic("Leaked a t_pop() call in I/O handler %p", |
746 | 0 | (void *)io->callback); |
747 | 0 | } |
748 | 0 | if (ioloop->cur_ctx != NULL) |
749 | 0 | io_loop_context_deactivate(ioloop->cur_ctx); |
750 | 0 | i_assert(ioloop == current_ioloop); |
751 | 0 | } |
752 | | |
753 | | void io_loop_run(struct ioloop *ioloop) |
754 | 0 | { |
755 | 0 | if (ioloop->handler_context == NULL) |
756 | 0 | io_loop_initialize_handler(ioloop); |
757 | |
|
758 | 0 | if (ioloop->cur_ctx != NULL) |
759 | 0 | io_loop_context_deactivate(ioloop->cur_ctx); |
760 | | |
761 | | /* recursive io_loop_run() isn't allowed for the same ioloop. |
762 | | it can break backends. */ |
763 | 0 | i_assert(!ioloop->iolooping); |
764 | 0 | ioloop->iolooping = TRUE; |
765 | |
|
766 | 0 | ioloop->running = TRUE; |
767 | 0 | while (ioloop->running) |
768 | 0 | io_loop_handler_run(ioloop); |
769 | 0 | ioloop->iolooping = FALSE; |
770 | 0 | } |
771 | | |
772 | | static void io_loop_call_pending(struct ioloop *ioloop) |
773 | 0 | { |
774 | 0 | struct io_file *io; |
775 | |
|
776 | 0 | while (ioloop->io_pending_count > 0) { |
777 | 0 | io = ioloop->io_files; |
778 | 0 | do { |
779 | 0 | ioloop->next_io_file = io->next; |
780 | 0 | if (io->io.pending) |
781 | 0 | io_loop_call_io(&io->io); |
782 | 0 | if (ioloop->io_pending_count == 0) |
783 | 0 | break; |
784 | 0 | io = ioloop->next_io_file; |
785 | 0 | } while (io != NULL); |
786 | 0 | } |
787 | 0 | } |
788 | | |
789 | | void io_loop_handler_run(struct ioloop *ioloop) |
790 | 0 | { |
791 | 0 | i_assert(ioloop == current_ioloop); |
792 | | |
793 | 0 | io_loop_timeouts_start_new(ioloop); |
794 | 0 | ioloop->wait_started = ioloop_timeval; |
795 | 0 | io_loop_handler_run_internal(ioloop); |
796 | 0 | io_loop_call_pending(ioloop); |
797 | 0 | if (ioloop->stop_after_run_loop) |
798 | 0 | io_loop_stop(ioloop); |
799 | |
|
800 | 0 | i_assert(ioloop == current_ioloop); |
801 | 0 | } |
802 | | |
803 | | void io_loop_stop(struct ioloop *ioloop) |
804 | 0 | { |
805 | 0 | ioloop->running = FALSE; |
806 | 0 | ioloop->stop_after_run_loop = FALSE; |
807 | 0 | } |
808 | | |
809 | | void io_loop_stop_delayed(struct ioloop *ioloop) |
810 | 0 | { |
811 | 0 | ioloop->stop_after_run_loop = TRUE; |
812 | 0 | } |
813 | | |
814 | | void io_loop_set_running(struct ioloop *ioloop) |
815 | 0 | { |
816 | 0 | ioloop->running = TRUE; |
817 | 0 | } |
818 | | |
819 | | void io_loop_set_max_fd_count(struct ioloop *ioloop, unsigned int max_fds) |
820 | 0 | { |
821 | 0 | ioloop->max_fd_count = max_fds; |
822 | 0 | } |
823 | | |
824 | | bool io_loop_is_running(struct ioloop *ioloop) |
825 | 0 | { |
826 | 0 | return ioloop->running; |
827 | 0 | } |
828 | | |
829 | | void io_loop_time_refresh(void) |
830 | 0 | { |
831 | 0 | i_gettimeofday(&ioloop_timeval); |
832 | 0 | ioloop_time = ioloop_timeval.tv_sec; |
833 | 0 | } |
834 | | |
835 | | struct ioloop *io_loop_create(void) |
836 | 8.52k | { |
837 | 8.52k | struct ioloop *ioloop; |
838 | | |
839 | 8.52k | if (!panic_on_leak_set) { |
840 | 1 | panic_on_leak_set = TRUE; |
841 | 1 | panic_on_leak = getenv("CORE_IO_LEAK") != NULL; |
842 | 1 | } |
843 | | |
844 | | /* initialize time */ |
845 | 8.52k | i_gettimeofday(&ioloop_timeval); |
846 | 8.52k | ioloop_time = ioloop_timeval.tv_sec; |
847 | | |
848 | 8.52k | ioloop = i_new(struct ioloop, 1); |
849 | 8.52k | ioloop->timeouts = priorityq_init(timeout_cmp, 32); |
850 | 8.52k | i_array_init(&ioloop->timeouts_new, 8); |
851 | | |
852 | 8.52k | ioloop->time_moved_callback = current_ioloop != NULL ? |
853 | 0 | current_ioloop->time_moved_callback : |
854 | 8.52k | io_loop_default_time_moved; |
855 | | |
856 | 8.52k | ioloop->prev = current_ioloop; |
857 | 8.52k | io_loop_set_current(ioloop); |
858 | 8.52k | return ioloop; |
859 | 8.52k | } |
860 | | |
861 | | void io_loop_destroy(struct ioloop **_ioloop) |
862 | 8.52k | { |
863 | 8.52k | struct ioloop *ioloop = *_ioloop; |
864 | 8.52k | struct timeout *to; |
865 | 8.52k | struct priorityq_item *item; |
866 | 8.52k | bool leaks = FALSE; |
867 | | |
868 | 8.52k | *_ioloop = NULL; |
869 | | |
870 | | /* ->prev won't work unless loops are destroyed in create order */ |
871 | 8.52k | i_assert(ioloop == current_ioloop); |
872 | 8.52k | if (array_is_created(&io_destroy_callbacks)) { |
873 | 8.52k | io_destroy_callback_t *callback; |
874 | 17.0k | array_foreach_elem(&io_destroy_callbacks, callback) T_BEGIN { |
875 | 17.0k | callback(current_ioloop); |
876 | 17.0k | } T_END; |
877 | 8.52k | } |
878 | | |
879 | 8.52k | io_loop_set_current(current_ioloop->prev); |
880 | | |
881 | 8.52k | if (ioloop->notify_handler_context != NULL) |
882 | 0 | io_loop_notify_handler_deinit(ioloop); |
883 | | |
884 | 8.52k | while (ioloop->io_files != NULL) { |
885 | 0 | struct io_file *io = ioloop->io_files; |
886 | 0 | struct io *_io = &io->io; |
887 | 0 | const char *error = t_strdup_printf( |
888 | 0 | "I/O leak: %p (%s:%u, fd %d)", |
889 | 0 | (void *)io->io.callback, |
890 | 0 | io->io.source_filename, |
891 | 0 | io->io.source_linenum, io->fd); |
892 | |
|
893 | 0 | if (panic_on_leak) |
894 | 0 | i_panic("%s", error); |
895 | 0 | else |
896 | 0 | i_warning("%s", error); |
897 | 0 | io_remove(&_io); |
898 | 0 | leaks = TRUE; |
899 | 0 | } |
900 | 8.52k | i_assert(ioloop->io_pending_count == 0); |
901 | | |
902 | 8.52k | array_foreach_elem(&ioloop->timeouts_new, to) { |
903 | 0 | const char *error = t_strdup_printf( |
904 | 0 | "Timeout leak: %p (%s:%u)", (void *)to->callback, |
905 | 0 | to->source_filename, |
906 | 0 | to->source_linenum); |
907 | |
|
908 | 0 | if (panic_on_leak) |
909 | 0 | i_panic("%s", error); |
910 | 0 | else |
911 | 0 | i_warning("%s", error); |
912 | 0 | timeout_free(to); |
913 | 0 | leaks = TRUE; |
914 | 0 | } |
915 | 8.52k | array_free(&ioloop->timeouts_new); |
916 | | |
917 | 8.52k | while ((item = priorityq_pop(ioloop->timeouts)) != NULL) { |
918 | 0 | struct timeout *to = (struct timeout *)item; |
919 | 0 | const char *error = t_strdup_printf( |
920 | 0 | "Timeout leak: %p (%s:%u)", (void *)to->callback, |
921 | 0 | to->source_filename, |
922 | 0 | to->source_linenum); |
923 | |
|
924 | 0 | if (panic_on_leak) |
925 | 0 | i_panic("%s", error); |
926 | 0 | else |
927 | 0 | i_warning("%s", error); |
928 | 0 | timeout_free(to); |
929 | 0 | leaks = TRUE; |
930 | 0 | } |
931 | 8.52k | priorityq_deinit(&ioloop->timeouts); |
932 | | |
933 | 8.52k | while (ioloop->wait_timers != NULL) { |
934 | 0 | struct io_wait_timer *timer = ioloop->wait_timers; |
935 | 0 | const char *error = t_strdup_printf( |
936 | 0 | "IO wait timer leak: %s:%u", |
937 | 0 | timer->source_filename, |
938 | 0 | timer->source_linenum); |
939 | |
|
940 | 0 | if (panic_on_leak) |
941 | 0 | i_panic("%s", error); |
942 | 0 | else |
943 | 0 | i_warning("%s", error); |
944 | 0 | io_wait_timer_remove(&timer); |
945 | 0 | leaks = TRUE; |
946 | 0 | } |
947 | | |
948 | 8.52k | if (leaks) { |
949 | 0 | const char *backtrace, *error; |
950 | 0 | if (backtrace_get(&backtrace, &error) == 0) |
951 | 0 | i_warning("Raw backtrace for leaks: %s", backtrace); |
952 | 0 | } |
953 | | |
954 | 8.52k | if (ioloop->handler_context != NULL) |
955 | 0 | io_loop_handler_deinit(ioloop); |
956 | 8.52k | if (ioloop->cur_ctx != NULL) |
957 | 0 | io_loop_context_unref(&ioloop->cur_ctx); |
958 | 8.52k | i_free(ioloop); |
959 | 8.52k | } |
960 | | |
961 | | void io_loop_set_time_moved_callback(struct ioloop *ioloop, |
962 | | io_loop_time_moved_callback_t *callback) |
963 | 0 | { |
964 | 0 | ioloop->time_moved_callback = callback; |
965 | 0 | } |
966 | | |
967 | | static void io_switch_callbacks_free(void) |
968 | 0 | { |
969 | 0 | array_free(&io_switch_callbacks); |
970 | 0 | } |
971 | | |
972 | | static void io_destroy_callbacks_free(void) |
973 | 0 | { |
974 | 0 | array_free(&io_destroy_callbacks); |
975 | 0 | } |
976 | | |
977 | | void io_loop_set_current(struct ioloop *ioloop) |
978 | 17.0k | { |
979 | 17.0k | io_switch_callback_t *callback; |
980 | 17.0k | struct ioloop *prev_ioloop = current_ioloop; |
981 | | |
982 | 17.0k | if (ioloop == current_ioloop) |
983 | 0 | return; |
984 | | |
985 | 17.0k | current_ioloop = ioloop; |
986 | 17.0k | if (array_is_created(&io_switch_callbacks)) { |
987 | 34.1k | array_foreach_elem(&io_switch_callbacks, callback) T_BEGIN { |
988 | 34.1k | callback(prev_ioloop); |
989 | 34.1k | } T_END; |
990 | 17.0k | } |
991 | 17.0k | } |
992 | | |
993 | | struct ioloop *io_loop_get_root(void) |
994 | 0 | { |
995 | 0 | struct ioloop *ioloop = current_ioloop; |
996 | |
|
997 | 0 | while (ioloop->prev != NULL) |
998 | 0 | ioloop = ioloop->prev; |
999 | 0 | return ioloop; |
1000 | 0 | } |
1001 | | |
1002 | | void io_loop_add_switch_callback(io_switch_callback_t *callback) |
1003 | 1 | { |
1004 | 1 | if (!array_is_created(&io_switch_callbacks)) { |
1005 | 1 | i_array_init(&io_switch_callbacks, 4); |
1006 | 1 | lib_atexit_priority(io_switch_callbacks_free, LIB_ATEXIT_PRIORITY_LOW); |
1007 | 1 | } |
1008 | 1 | array_push_back(&io_switch_callbacks, &callback); |
1009 | 1 | } |
1010 | | |
1011 | | void io_loop_remove_switch_callback(io_switch_callback_t *callback) |
1012 | 0 | { |
1013 | 0 | unsigned int idx; |
1014 | |
|
1015 | 0 | if (!array_lsearch_ptr_idx(&io_switch_callbacks, callback, &idx)) |
1016 | 0 | i_unreached(); |
1017 | 0 | array_delete(&io_switch_callbacks, idx, 1); |
1018 | 0 | } |
1019 | | |
1020 | | void io_loop_add_destroy_callback(io_destroy_callback_t *callback) |
1021 | 1 | { |
1022 | 1 | if (!array_is_created(&io_destroy_callbacks)) { |
1023 | 1 | i_array_init(&io_destroy_callbacks, 4); |
1024 | 1 | lib_atexit_priority(io_destroy_callbacks_free, LIB_ATEXIT_PRIORITY_LOW); |
1025 | 1 | } |
1026 | 1 | array_push_back(&io_destroy_callbacks, &callback); |
1027 | 1 | } |
1028 | | |
1029 | | void io_loop_remove_destroy_callback(io_destroy_callback_t *callback) |
1030 | 0 | { |
1031 | 0 | unsigned int idx; |
1032 | |
|
1033 | 0 | if (!array_lsearch_ptr_idx(&io_destroy_callbacks, callback, &idx)) |
1034 | 0 | i_unreached(); |
1035 | 0 | array_delete(&io_destroy_callbacks, idx, 1); |
1036 | 0 | } |
1037 | | |
1038 | | struct ioloop_context *io_loop_context_new(struct ioloop *ioloop) |
1039 | 0 | { |
1040 | 0 | struct ioloop_context *ctx; |
1041 | |
|
1042 | 0 | ctx = i_new(struct ioloop_context, 1); |
1043 | 0 | ctx->refcount = 1; |
1044 | 0 | ctx->ioloop = ioloop; |
1045 | 0 | i_array_init(&ctx->callbacks, 4); |
1046 | 0 | return ctx; |
1047 | 0 | } |
1048 | | |
1049 | | void io_loop_context_ref(struct ioloop_context *ctx) |
1050 | 0 | { |
1051 | 0 | i_assert(ctx->refcount > 0); |
1052 | | |
1053 | 0 | ctx->refcount++; |
1054 | 0 | } |
1055 | | |
1056 | | void io_loop_context_unref(struct ioloop_context **_ctx) |
1057 | 0 | { |
1058 | 0 | struct ioloop_context *ctx = *_ctx; |
1059 | |
|
1060 | 0 | *_ctx = NULL; |
1061 | |
|
1062 | 0 | i_assert(ctx->refcount > 0); |
1063 | 0 | if (--ctx->refcount > 0) |
1064 | 0 | return; |
1065 | | |
1066 | | /* cur_ctx itself keeps a reference */ |
1067 | 0 | i_assert(ctx->ioloop->cur_ctx != ctx); |
1068 | | |
1069 | 0 | array_free(&ctx->callbacks); |
1070 | 0 | array_free(&ctx->global_event_stack); |
1071 | 0 | i_free(ctx); |
1072 | 0 | } |
1073 | | |
1074 | | #undef io_loop_context_add_callbacks |
1075 | | void io_loop_context_add_callbacks(struct ioloop_context *ctx, |
1076 | | io_callback_t *activate, |
1077 | | io_callback_t *deactivate, void *context) |
1078 | 0 | { |
1079 | 0 | struct ioloop_context_callback cb; |
1080 | |
|
1081 | 0 | i_zero(&cb); |
1082 | 0 | cb.activate = activate; |
1083 | 0 | cb.deactivate = deactivate; |
1084 | 0 | cb.context = context; |
1085 | |
|
1086 | 0 | array_push_back(&ctx->callbacks, &cb); |
1087 | 0 | } |
1088 | | |
1089 | | #undef io_loop_context_remove_callbacks |
1090 | | void io_loop_context_remove_callbacks(struct ioloop_context *ctx, |
1091 | | io_callback_t *activate, |
1092 | | io_callback_t *deactivate, void *context) |
1093 | 0 | { |
1094 | 0 | struct ioloop_context_callback *cb; |
1095 | |
|
1096 | 0 | array_foreach_modifiable(&ctx->callbacks, cb) { |
1097 | 0 | if (cb->context == context && |
1098 | 0 | cb->activate == activate && cb->deactivate == deactivate) { |
1099 | | /* simply mark it as deleted, since we could get |
1100 | | here from activate/deactivate loop */ |
1101 | 0 | cb->activate = NULL; |
1102 | 0 | cb->deactivate = NULL; |
1103 | 0 | cb->context = NULL; |
1104 | 0 | return; |
1105 | 0 | } |
1106 | 0 | } |
1107 | 0 | i_panic("io_loop_context_remove_callbacks() context not found"); |
1108 | 0 | } |
1109 | | |
1110 | | static void |
1111 | | io_loop_context_remove_deleted_callbacks(struct ioloop_context *ctx) |
1112 | 0 | { |
1113 | 0 | const struct ioloop_context_callback *cbs; |
1114 | 0 | unsigned int i, count; |
1115 | |
|
1116 | 0 | cbs = array_get(&ctx->callbacks, &count); |
1117 | 0 | for (i = 0; i < count; ) { |
1118 | 0 | if (cbs[i].activate != NULL) |
1119 | 0 | i++; |
1120 | 0 | else { |
1121 | 0 | array_delete(&ctx->callbacks, i, 1); |
1122 | 0 | cbs = array_get(&ctx->callbacks, &count); |
1123 | 0 | } |
1124 | 0 | } |
1125 | 0 | } |
1126 | | |
1127 | | static void io_loop_context_push_global_events(struct ioloop_context *ctx) |
1128 | 0 | { |
1129 | 0 | struct event *const *events; |
1130 | 0 | unsigned int i, count; |
1131 | |
|
1132 | 0 | ctx->root_global_event = event_get_global(); |
1133 | |
|
1134 | 0 | if (!array_is_created(&ctx->global_event_stack)) |
1135 | 0 | return; |
1136 | | |
1137 | | /* push the global events from stack in reverse order */ |
1138 | 0 | events = array_get(&ctx->global_event_stack, &count); |
1139 | 0 | if (count == 0) |
1140 | 0 | return; |
1141 | | |
1142 | | /* Remember the oldest global event. We're going to pop until that |
1143 | | event when deactivating the context. */ |
1144 | 0 | for (i = count; i > 0; i--) |
1145 | 0 | event_push_global(events[i-1]); |
1146 | 0 | array_clear(&ctx->global_event_stack); |
1147 | 0 | } |
1148 | | |
1149 | | static void io_loop_context_pop_global_events(struct ioloop_context *ctx) |
1150 | 0 | { |
1151 | 0 | struct event *event; |
1152 | | |
1153 | | /* ioloop context is always global, so we can't push one ioloop context |
1154 | | on top of another one. We'll need to rewind the global event stack |
1155 | | until we've reached the event that started this context. We'll push |
1156 | | these global events back when the ioloop context is activated |
1157 | | again. (We'll assert-crash if the root event is freed before these |
1158 | | global events have been popped.) */ |
1159 | 0 | while ((event = event_get_global()) != ctx->root_global_event) { |
1160 | 0 | i_assert(event != NULL); |
1161 | 0 | if (!array_is_created(&ctx->global_event_stack)) |
1162 | 0 | i_array_init(&ctx->global_event_stack, 4); |
1163 | 0 | array_push_back(&ctx->global_event_stack, &event); |
1164 | 0 | event_pop_global(event); |
1165 | 0 | } |
1166 | 0 | ctx->root_global_event = NULL; |
1167 | 0 | } |
1168 | | |
1169 | | void io_loop_context_activate(struct ioloop_context *ctx) |
1170 | 0 | { |
1171 | 0 | struct ioloop_context_callback *cb; |
1172 | |
|
1173 | 0 | i_assert(ctx->ioloop->cur_ctx == NULL); |
1174 | | |
1175 | 0 | ctx->ioloop->cur_ctx = ctx; |
1176 | 0 | io_loop_context_push_global_events(ctx); |
1177 | 0 | io_loop_context_ref(ctx); |
1178 | 0 | array_foreach_modifiable(&ctx->callbacks, cb) { |
1179 | 0 | i_assert(!cb->activated); |
1180 | 0 | if (cb->activate != NULL) T_BEGIN { |
1181 | 0 | cb->activate(cb->context); |
1182 | 0 | } T_END; |
1183 | 0 | cb->activated = TRUE; |
1184 | 0 | } |
1185 | 0 | } |
1186 | | |
1187 | | void io_loop_context_deactivate(struct ioloop_context *ctx) |
1188 | 0 | { |
1189 | 0 | struct ioloop_context_callback *cb; |
1190 | |
|
1191 | 0 | i_assert(ctx->ioloop->cur_ctx == ctx); |
1192 | | |
1193 | 0 | array_foreach_modifiable(&ctx->callbacks, cb) { |
1194 | 0 | if (!cb->activated) { |
1195 | | /* we just added this callback. don't deactivate it |
1196 | | before it gets first activated. */ |
1197 | 0 | } else { |
1198 | 0 | if (cb->deactivate != NULL) T_BEGIN { |
1199 | 0 | cb->deactivate(cb->context); |
1200 | 0 | } T_END; |
1201 | 0 | cb->activated = FALSE; |
1202 | 0 | } |
1203 | 0 | } |
1204 | 0 | ctx->ioloop->cur_ctx = NULL; |
1205 | 0 | io_loop_context_pop_global_events(ctx); |
1206 | 0 | io_loop_context_remove_deleted_callbacks(ctx); |
1207 | 0 | io_loop_context_unref(&ctx); |
1208 | 0 | } |
1209 | | |
1210 | | void io_loop_context_switch(struct ioloop_context *ctx) |
1211 | 0 | { |
1212 | 0 | if (ctx->ioloop->cur_ctx != NULL) { |
1213 | 0 | if (ctx->ioloop->cur_ctx == ctx) |
1214 | 0 | return; |
1215 | 0 | io_loop_context_deactivate(ctx->ioloop->cur_ctx); |
1216 | | /* deactivation may remove the cur_ctx */ |
1217 | 0 | if (ctx->ioloop->cur_ctx != NULL) |
1218 | 0 | io_loop_context_unref(&ctx->ioloop->cur_ctx); |
1219 | 0 | } |
1220 | 0 | io_loop_context_activate(ctx); |
1221 | 0 | } |
1222 | | |
1223 | | struct ioloop_context *io_loop_get_current_context(struct ioloop *ioloop) |
1224 | 0 | { |
1225 | 0 | return ioloop->cur_ctx; |
1226 | 0 | } |
1227 | | |
1228 | | struct io *io_loop_move_io_to(struct ioloop *ioloop, struct io **_io) |
1229 | 0 | { |
1230 | 0 | struct io *old_io = *_io; |
1231 | 0 | struct io_file *old_io_file, *new_io_file; |
1232 | |
|
1233 | 0 | if (old_io == NULL) |
1234 | 0 | return NULL; |
1235 | | |
1236 | 0 | i_assert((old_io->condition & IO_NOTIFY) == 0); |
1237 | | |
1238 | 0 | if (old_io->ioloop == ioloop) |
1239 | 0 | return old_io; |
1240 | | |
1241 | 0 | old_io_file = (struct io_file *)old_io; |
1242 | 0 | new_io_file = io_add_file(ioloop, old_io_file->fd, |
1243 | 0 | old_io->condition, old_io->source_filename, |
1244 | 0 | old_io->source_linenum, |
1245 | 0 | old_io->callback, old_io->context); |
1246 | 0 | if (old_io_file->istream != NULL) { |
1247 | | /* reference before io_remove() */ |
1248 | 0 | new_io_file->istream = old_io_file->istream; |
1249 | 0 | i_stream_ref(new_io_file->istream); |
1250 | 0 | } |
1251 | 0 | if (old_io->pending) |
1252 | 0 | io_set_pending(&new_io_file->io); |
1253 | 0 | io_remove(_io); |
1254 | 0 | if (new_io_file->istream != NULL) { |
1255 | | /* update istream io after it was removed with io_remove() */ |
1256 | 0 | i_stream_set_io(new_io_file->istream, &new_io_file->io); |
1257 | 0 | } |
1258 | 0 | return &new_io_file->io; |
1259 | 0 | } |
1260 | | |
1261 | | struct io *io_loop_move_io(struct io **_io) |
1262 | 0 | { |
1263 | 0 | return io_loop_move_io_to(current_ioloop, _io); |
1264 | 0 | } |
1265 | | |
1266 | | struct timeout *io_loop_move_timeout_to(struct ioloop *ioloop, |
1267 | | struct timeout **_timeout) |
1268 | 0 | { |
1269 | 0 | struct timeout *new_to, *old_to = *_timeout; |
1270 | |
|
1271 | 0 | if (old_to == NULL || old_to->ioloop == ioloop) |
1272 | 0 | return old_to; |
1273 | | |
1274 | 0 | new_to = timeout_copy(old_to, ioloop); |
1275 | 0 | timeout_remove(_timeout); |
1276 | 0 | return new_to; |
1277 | 0 | } |
1278 | | |
1279 | | struct timeout *io_loop_move_timeout(struct timeout **_timeout) |
1280 | 0 | { |
1281 | 0 | return io_loop_move_timeout_to(current_ioloop, _timeout); |
1282 | 0 | } |
1283 | | |
1284 | | bool io_loop_have_ios(struct ioloop *ioloop) |
1285 | 0 | { |
1286 | 0 | return ioloop->io_files != NULL; |
1287 | 0 | } |
1288 | | |
1289 | | bool io_loop_have_immediate_timeouts(struct ioloop *ioloop) |
1290 | 0 | { |
1291 | 0 | struct timeval tv; |
1292 | |
|
1293 | 0 | return io_loop_get_wait_time(ioloop, &tv) == 0; |
1294 | 0 | } |
1295 | | |
1296 | | bool io_loop_is_empty(struct ioloop *ioloop) |
1297 | 0 | { |
1298 | 0 | return ioloop->io_files == NULL && |
1299 | 0 | priorityq_count(ioloop->timeouts) == 0 && |
1300 | 0 | array_count(&ioloop->timeouts_new) == 0; |
1301 | 0 | } |
1302 | | |
1303 | | uint64_t io_loop_get_wait_usecs(struct ioloop *ioloop) |
1304 | 0 | { |
1305 | 0 | return ioloop->ioloop_wait_usecs; |
1306 | 0 | } |
1307 | | |
1308 | | enum io_condition io_loop_find_fd_conditions(struct ioloop *ioloop, int fd) |
1309 | 0 | { |
1310 | 0 | enum io_condition conditions = 0; |
1311 | 0 | struct io_file *io; |
1312 | |
|
1313 | 0 | i_assert(fd >= 0); |
1314 | | |
1315 | 0 | for (io = ioloop->io_files; io != NULL; io = io->next) { |
1316 | 0 | if (io->fd == fd) |
1317 | 0 | conditions |= io->io.condition; |
1318 | 0 | } |
1319 | 0 | return conditions; |
1320 | 0 | } |
1321 | | |
1322 | | #undef io_wait_timer_add_to |
1323 | | struct io_wait_timer * |
1324 | | io_wait_timer_add_to(struct ioloop *ioloop, const char *source_filename, |
1325 | | unsigned int source_linenum) |
1326 | 0 | { |
1327 | 0 | struct io_wait_timer *timer; |
1328 | |
|
1329 | 0 | timer = i_new(struct io_wait_timer, 1); |
1330 | 0 | timer->ioloop = ioloop; |
1331 | 0 | timer->source_filename = source_filename; |
1332 | 0 | timer->source_linenum = source_linenum; |
1333 | 0 | DLLIST_PREPEND(&ioloop->wait_timers, timer); |
1334 | 0 | return timer; |
1335 | 0 | } |
1336 | | |
1337 | | #undef io_wait_timer_add |
1338 | | struct io_wait_timer * |
1339 | | io_wait_timer_add(const char *source_filename, unsigned int source_linenum) |
1340 | 0 | { |
1341 | 0 | return io_wait_timer_add_to(current_ioloop, source_filename, |
1342 | 0 | source_linenum); |
1343 | 0 | } |
1344 | | |
1345 | | struct io_wait_timer *io_wait_timer_move_to(struct io_wait_timer **_timer, |
1346 | | struct ioloop *ioloop) |
1347 | 0 | { |
1348 | 0 | struct io_wait_timer *timer = *_timer; |
1349 | |
|
1350 | 0 | *_timer = NULL; |
1351 | 0 | DLLIST_REMOVE(&timer->ioloop->wait_timers, timer); |
1352 | 0 | DLLIST_PREPEND(&ioloop->wait_timers, timer); |
1353 | 0 | timer->ioloop = ioloop; |
1354 | 0 | return timer; |
1355 | 0 | } |
1356 | | |
1357 | | struct io_wait_timer *io_wait_timer_move(struct io_wait_timer **_timer) |
1358 | 0 | { |
1359 | 0 | return io_wait_timer_move_to(_timer, current_ioloop); |
1360 | 0 | } |
1361 | | |
1362 | | void io_wait_timer_remove(struct io_wait_timer **_timer) |
1363 | 0 | { |
1364 | 0 | struct io_wait_timer *timer = *_timer; |
1365 | |
|
1366 | 0 | *_timer = NULL; |
1367 | 0 | DLLIST_REMOVE(&timer->ioloop->wait_timers, timer); |
1368 | 0 | i_free(timer); |
1369 | 0 | } |
1370 | | |
1371 | | uint64_t io_wait_timer_get_usecs(struct io_wait_timer *timer) |
1372 | 0 | { |
1373 | 0 | return timer->usecs; |
1374 | 0 | } |
1375 | | |
1376 | | struct event *io_loop_get_active_global_root(void) |
1377 | 0 | { |
1378 | 0 | if (current_ioloop == NULL) |
1379 | 0 | return NULL; |
1380 | 0 | if (current_ioloop->cur_ctx == NULL) |
1381 | 0 | return NULL; |
1382 | 0 | return current_ioloop->cur_ctx->root_global_event; |
1383 | 0 | } |