/src/samba/lib/tevent/tevent_timed.c
Line | Count | Source |
1 | | /* |
2 | | Unix SMB/CIFS implementation. |
3 | | |
4 | | common events code for timed events |
5 | | |
6 | | Copyright (C) Andrew Tridgell 2003-2006 |
7 | | Copyright (C) Stefan Metzmacher 2005-2009 |
8 | | |
9 | | ** NOTE! The following LGPL license applies to the tevent |
10 | | ** library. This does NOT imply that all of Samba is released |
11 | | ** under the LGPL |
12 | | |
13 | | This library is free software; you can redistribute it and/or |
14 | | modify it under the terms of the GNU Lesser General Public |
15 | | License as published by the Free Software Foundation; either |
16 | | version 3 of the License, or (at your option) any later version. |
17 | | |
18 | | This library is distributed in the hope that it will be useful, |
19 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
21 | | Lesser General Public License for more details. |
22 | | |
23 | | You should have received a copy of the GNU Lesser General Public |
24 | | License along with this library; if not, see <http://www.gnu.org/licenses/>. |
25 | | */ |
26 | | |
27 | | #include "replace.h" |
28 | | #include "system/time.h" |
29 | | #define TEVENT_DEPRECATED 1 |
30 | | #include "tevent.h" |
31 | | #include "tevent_internal.h" |
32 | | #include "tevent_util.h" |
33 | | |
34 | | /** |
35 | | compare two timeval structures. |
36 | | Return -1 if tv1 < tv2 |
37 | | Return 0 if tv1 == tv2 |
38 | | Return 1 if tv1 > tv2 |
39 | | */ |
40 | | int tevent_timeval_compare(const struct timeval *tv1, const struct timeval *tv2) |
41 | 0 | { |
42 | 0 | if (tv1->tv_sec > tv2->tv_sec) return 1; |
43 | 0 | if (tv1->tv_sec < tv2->tv_sec) return -1; |
44 | 0 | if (tv1->tv_usec > tv2->tv_usec) return 1; |
45 | 0 | if (tv1->tv_usec < tv2->tv_usec) return -1; |
46 | 0 | return 0; |
47 | 0 | } |
48 | | |
49 | | /** |
50 | | return a zero timeval |
51 | | */ |
52 | | struct timeval tevent_timeval_zero(void) |
53 | 0 | { |
54 | 0 | struct timeval tv; |
55 | 0 | tv.tv_sec = 0; |
56 | 0 | tv.tv_usec = 0; |
57 | 0 | return tv; |
58 | 0 | } |
59 | | |
60 | | /** |
61 | | return a timeval for the current time |
62 | | */ |
63 | | struct timeval tevent_timeval_current(void) |
64 | 0 | { |
65 | 0 | struct timeval tv; |
66 | 0 | gettimeofday(&tv, NULL); |
67 | 0 | return tv; |
68 | 0 | } |
69 | | |
70 | | /** |
71 | | return a timeval struct with the given elements |
72 | | */ |
73 | | struct timeval tevent_timeval_set(uint32_t secs, uint32_t usecs) |
74 | 0 | { |
75 | 0 | struct timeval tv; |
76 | 0 | tv.tv_sec = secs; |
77 | 0 | tv.tv_usec = usecs; |
78 | 0 | return tv; |
79 | 0 | } |
80 | | |
81 | | /** |
82 | | return the difference between two timevals as a timeval |
83 | | if tv1 comes after tv2, then return a zero timeval |
84 | | (this is *tv2 - *tv1) |
85 | | */ |
86 | | struct timeval tevent_timeval_until(const struct timeval *tv1, |
87 | | const struct timeval *tv2) |
88 | 0 | { |
89 | 0 | struct timeval t; |
90 | 0 | if (tevent_timeval_compare(tv1, tv2) >= 0) { |
91 | 0 | return tevent_timeval_zero(); |
92 | 0 | } |
93 | 0 | t.tv_sec = tv2->tv_sec - tv1->tv_sec; |
94 | 0 | if (tv1->tv_usec > tv2->tv_usec) { |
95 | 0 | t.tv_sec--; |
96 | 0 | t.tv_usec = 1000000 - (tv1->tv_usec - tv2->tv_usec); |
97 | 0 | } else { |
98 | 0 | t.tv_usec = tv2->tv_usec - tv1->tv_usec; |
99 | 0 | } |
100 | 0 | return t; |
101 | 0 | } |
102 | | |
103 | | /** |
104 | | return true if a timeval is zero |
105 | | */ |
106 | | bool tevent_timeval_is_zero(const struct timeval *tv) |
107 | 0 | { |
108 | 0 | return tv->tv_sec == 0 && tv->tv_usec == 0; |
109 | 0 | } |
110 | | |
111 | | struct timeval tevent_timeval_add(const struct timeval *tv, uint32_t secs, |
112 | | uint32_t usecs) |
113 | 0 | { |
114 | 0 | struct timeval tv2 = *tv; |
115 | 0 | tv2.tv_sec += secs; |
116 | 0 | tv2.tv_usec += usecs; |
117 | 0 | tv2.tv_sec += tv2.tv_usec / 1000000; |
118 | 0 | tv2.tv_usec = tv2.tv_usec % 1000000; |
119 | |
|
120 | 0 | return tv2; |
121 | 0 | } |
122 | | |
123 | | /** |
124 | | return a timeval in the future with a specified offset |
125 | | */ |
126 | | struct timeval tevent_timeval_current_ofs(uint32_t secs, uint32_t usecs) |
127 | 0 | { |
128 | 0 | struct timeval tv = tevent_timeval_current(); |
129 | 0 | return tevent_timeval_add(&tv, secs, usecs); |
130 | 0 | } |
131 | | |
132 | | /* |
133 | | destroy a timed event |
134 | | */ |
135 | | static int tevent_common_timed_destructor(struct tevent_timer *te) |
136 | 0 | { |
137 | 0 | if (te->destroyed) { |
138 | 0 | tevent_common_check_double_free(te, "tevent_timer double free"); |
139 | 0 | goto done; |
140 | 0 | } |
141 | 0 | te->destroyed = true; |
142 | |
|
143 | 0 | if (te->event_ctx == NULL) { |
144 | 0 | return 0; |
145 | 0 | } |
146 | | |
147 | 0 | TEVENT_DEBUG(te->event_ctx, TEVENT_DEBUG_TRACE, |
148 | 0 | "Destroying timer event %p \"%s\"\n", |
149 | 0 | te, te->handler_name); |
150 | |
|
151 | 0 | if (te->event_ctx->last_zero_timer == te) { |
152 | 0 | te->event_ctx->last_zero_timer = DLIST_PREV(te); |
153 | 0 | } |
154 | |
|
155 | 0 | tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); |
156 | 0 | DLIST_REMOVE(te->event_ctx->timer_events, te); |
157 | |
|
158 | 0 | te->event_ctx = NULL; |
159 | 0 | done: |
160 | 0 | if (te->busy) { |
161 | 0 | return -1; |
162 | 0 | } |
163 | 0 | te->wrapper = NULL; |
164 | |
|
165 | 0 | return 0; |
166 | 0 | } |
167 | | |
168 | | static void tevent_common_insert_timer(struct tevent_context *ev, |
169 | | struct tevent_timer *te, |
170 | | bool optimize_zero) |
171 | 0 | { |
172 | 0 | struct tevent_timer *prev_te = NULL; |
173 | |
|
174 | 0 | if (te->destroyed) { |
175 | 0 | tevent_abort(ev, "tevent_timer use after free"); |
176 | 0 | return; |
177 | 0 | } |
178 | | |
179 | | /* keep the list ordered */ |
180 | 0 | if (optimize_zero && tevent_timeval_is_zero(&te->next_event)) { |
181 | | /* |
182 | | * Some callers use zero tevent_timer |
183 | | * instead of tevent_immediate events. |
184 | | * |
185 | | * As these can happen very often, |
186 | | * we remember the last zero timer |
187 | | * in the list. |
188 | | */ |
189 | 0 | prev_te = ev->last_zero_timer; |
190 | 0 | ev->last_zero_timer = te; |
191 | 0 | } else { |
192 | 0 | struct tevent_timer *cur_te; |
193 | | |
194 | | /* |
195 | | * we traverse the list from the tail |
196 | | * because it's much more likely that |
197 | | * timers are added at the end of the list |
198 | | */ |
199 | 0 | for (cur_te = DLIST_TAIL(ev->timer_events); |
200 | 0 | cur_te != NULL; |
201 | 0 | cur_te = DLIST_PREV(cur_te)) |
202 | 0 | { |
203 | 0 | int ret; |
204 | | |
205 | | /* |
206 | | * if the new event comes before the current |
207 | | * we continue searching |
208 | | */ |
209 | 0 | ret = tevent_timeval_compare(&te->next_event, |
210 | 0 | &cur_te->next_event); |
211 | 0 | if (ret < 0) { |
212 | 0 | continue; |
213 | 0 | } |
214 | | |
215 | 0 | break; |
216 | 0 | } |
217 | |
|
218 | 0 | prev_te = cur_te; |
219 | 0 | } |
220 | |
|
221 | 0 | tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_ATTACH); |
222 | 0 | DLIST_ADD_AFTER(ev->timer_events, te, prev_te); |
223 | 0 | } |
224 | | |
225 | | /* |
226 | | add a timed event |
227 | | return NULL on failure (memory allocation error) |
228 | | */ |
229 | | static struct tevent_timer *tevent_common_add_timer_internal( |
230 | | struct tevent_context *ev, |
231 | | TALLOC_CTX *mem_ctx, |
232 | | struct timeval next_event, |
233 | | tevent_timer_handler_t handler, |
234 | | void *private_data, |
235 | | const char *handler_name, |
236 | | const char *location, |
237 | | bool optimize_zero) |
238 | 0 | { |
239 | 0 | struct tevent_timer *te; |
240 | |
|
241 | 0 | te = talloc(mem_ctx?mem_ctx:ev, struct tevent_timer); |
242 | 0 | if (te == NULL) return NULL; |
243 | | |
244 | 0 | *te = (struct tevent_timer) { |
245 | 0 | .event_ctx = ev, |
246 | 0 | .next_event = next_event, |
247 | 0 | .handler = handler, |
248 | 0 | .private_data = private_data, |
249 | 0 | .handler_name = handler_name, |
250 | 0 | .location = location, |
251 | 0 | }; |
252 | |
|
253 | 0 | if (ev->timer_events == NULL) { |
254 | 0 | ev->last_zero_timer = NULL; |
255 | 0 | } |
256 | |
|
257 | 0 | tevent_common_insert_timer(ev, te, optimize_zero); |
258 | |
|
259 | 0 | talloc_set_destructor(te, tevent_common_timed_destructor); |
260 | | |
261 | |
|
262 | 0 | TEVENT_DEBUG(ev, TEVENT_DEBUG_TRACE, |
263 | 0 | "Added timed event \"%s\": %p\n", |
264 | 0 | handler_name, te); |
265 | 0 | return te; |
266 | 0 | } |
267 | | |
268 | | struct tevent_timer *tevent_common_add_timer(struct tevent_context *ev, |
269 | | TALLOC_CTX *mem_ctx, |
270 | | struct timeval next_event, |
271 | | tevent_timer_handler_t handler, |
272 | | void *private_data, |
273 | | const char *handler_name, |
274 | | const char *location) |
275 | 0 | { |
276 | | /* |
277 | | * do not use optimization, there are broken Samba |
278 | | * versions which use tevent_common_add_timer() |
279 | | * without using tevent_common_loop_timer_delay(), |
280 | | * it just uses DLIST_REMOVE(ev->timer_events, te) |
281 | | * and would leave ev->last_zero_timer behind. |
282 | | */ |
283 | 0 | return tevent_common_add_timer_internal(ev, mem_ctx, next_event, |
284 | 0 | handler, private_data, |
285 | 0 | handler_name, location, |
286 | 0 | false); |
287 | 0 | } |
288 | | |
289 | | struct tevent_timer *tevent_common_add_timer_v2(struct tevent_context *ev, |
290 | | TALLOC_CTX *mem_ctx, |
291 | | struct timeval next_event, |
292 | | tevent_timer_handler_t handler, |
293 | | void *private_data, |
294 | | const char *handler_name, |
295 | | const char *location) |
296 | 0 | { |
297 | | /* |
298 | | * Here we turn on last_zero_timer optimization |
299 | | */ |
300 | 0 | return tevent_common_add_timer_internal(ev, mem_ctx, next_event, |
301 | 0 | handler, private_data, |
302 | 0 | handler_name, location, |
303 | 0 | true); |
304 | 0 | } |
305 | | |
306 | | void tevent_update_timer(struct tevent_timer *te, struct timeval next_event) |
307 | 0 | { |
308 | 0 | struct tevent_context *ev = te->event_ctx; |
309 | |
|
310 | 0 | if (ev->last_zero_timer == te) { |
311 | 0 | te->event_ctx->last_zero_timer = DLIST_PREV(te); |
312 | 0 | } |
313 | 0 | tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); |
314 | 0 | DLIST_REMOVE(ev->timer_events, te); |
315 | |
|
316 | 0 | te->next_event = next_event; |
317 | | |
318 | | /* |
319 | | * Not doing the zero_timer optimization. This is for new code |
320 | | * that should know about immediates. |
321 | | */ |
322 | 0 | tevent_common_insert_timer(ev, te, false); |
323 | 0 | } |
324 | | |
325 | | int tevent_common_invoke_timer_handler(struct tevent_timer *te, |
326 | | struct timeval current_time, |
327 | | bool *removed) |
328 | 0 | { |
329 | 0 | struct tevent_context *handler_ev = te->event_ctx; |
330 | |
|
331 | 0 | if (removed != NULL) { |
332 | 0 | *removed = false; |
333 | 0 | } |
334 | |
|
335 | 0 | if (te->event_ctx == NULL) { |
336 | 0 | return 0; |
337 | 0 | } |
338 | | |
339 | | /* |
340 | | * We need to remove the timer from the list before calling the |
341 | | * handler because in a semi-async inner event loop called from the |
342 | | * handler we don't want to come across this event again -- vl |
343 | | */ |
344 | 0 | if (te->event_ctx->last_zero_timer == te) { |
345 | 0 | te->event_ctx->last_zero_timer = DLIST_PREV(te); |
346 | 0 | } |
347 | 0 | DLIST_REMOVE(te->event_ctx->timer_events, te); |
348 | |
|
349 | 0 | TEVENT_DEBUG(te->event_ctx, TEVENT_DEBUG_TRACE, |
350 | 0 | "Running timer event %p \"%s\"\n", |
351 | 0 | te, te->handler_name); |
352 | | |
353 | | /* |
354 | | * If the timed event was registered for a zero current_time, |
355 | | * then we pass a zero timeval here too! To avoid the |
356 | | * overhead of gettimeofday() calls. |
357 | | * |
358 | | * otherwise we pass the current time |
359 | | */ |
360 | 0 | te->busy = true; |
361 | 0 | if (te->wrapper != NULL) { |
362 | 0 | handler_ev = te->wrapper->wrap_ev; |
363 | |
|
364 | 0 | tevent_wrapper_push_use_internal(handler_ev, te->wrapper); |
365 | 0 | te->wrapper->ops->before_timer_handler( |
366 | 0 | te->wrapper->wrap_ev, |
367 | 0 | te->wrapper->private_state, |
368 | 0 | te->wrapper->main_ev, |
369 | 0 | te, |
370 | 0 | te->next_event, |
371 | 0 | current_time, |
372 | 0 | te->handler_name, |
373 | 0 | te->location); |
374 | 0 | } |
375 | 0 | tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_BEFORE_HANDLER); |
376 | 0 | te->handler(handler_ev, te, current_time, te->private_data); |
377 | 0 | if (te->wrapper != NULL) { |
378 | 0 | te->wrapper->ops->after_timer_handler( |
379 | 0 | te->wrapper->wrap_ev, |
380 | 0 | te->wrapper->private_state, |
381 | 0 | te->wrapper->main_ev, |
382 | 0 | te, |
383 | 0 | te->next_event, |
384 | 0 | current_time, |
385 | 0 | te->handler_name, |
386 | 0 | te->location); |
387 | 0 | tevent_wrapper_pop_use_internal(handler_ev, te->wrapper); |
388 | 0 | } |
389 | 0 | te->busy = false; |
390 | |
|
391 | 0 | TEVENT_DEBUG(te->event_ctx, TEVENT_DEBUG_TRACE, |
392 | 0 | "Ending timer event %p \"%s\"\n", |
393 | 0 | te, te->handler_name); |
394 | | |
395 | | /* The callback was already called when freed from the handler. */ |
396 | 0 | if (!te->destroyed) { |
397 | 0 | tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH); |
398 | 0 | } |
399 | |
|
400 | 0 | te->wrapper = NULL; |
401 | 0 | te->event_ctx = NULL; |
402 | 0 | talloc_set_destructor(te, NULL); |
403 | 0 | TALLOC_FREE(te); |
404 | |
|
405 | 0 | if (removed != NULL) { |
406 | 0 | *removed = true; |
407 | 0 | } |
408 | |
|
409 | 0 | return 0; |
410 | 0 | } |
411 | | /* |
412 | | do a single event loop using the events defined in ev |
413 | | |
414 | | return the delay until the next timed event, |
415 | | or zero if a timed event was triggered |
416 | | */ |
417 | | struct timeval tevent_common_loop_timer_delay(struct tevent_context *ev) |
418 | 0 | { |
419 | 0 | struct timeval current_time = tevent_timeval_zero(); |
420 | 0 | struct tevent_timer *te = ev->timer_events; |
421 | 0 | int ret; |
422 | |
|
423 | 0 | if (!te) { |
424 | 0 | return ev->wait_timeout; |
425 | 0 | } |
426 | | |
427 | | /* |
428 | | * work out the right timeout for the next timed event |
429 | | * |
430 | | * avoid the syscall to gettimeofday() if the timed event should |
431 | | * be triggered directly |
432 | | * |
433 | | * if there's a delay till the next timed event, we're done |
434 | | * with just returning the delay |
435 | | */ |
436 | 0 | if (!tevent_timeval_is_zero(&te->next_event)) { |
437 | 0 | struct timeval delay; |
438 | |
|
439 | 0 | current_time = tevent_timeval_current(); |
440 | |
|
441 | 0 | delay = tevent_timeval_until(¤t_time, &te->next_event); |
442 | 0 | if (!tevent_timeval_is_zero(&delay)) { |
443 | 0 | if (tevent_common_no_timeout(&ev->wait_timeout)) { |
444 | 0 | return ev->wait_timeout; |
445 | 0 | } |
446 | 0 | return delay; |
447 | 0 | } |
448 | 0 | } |
449 | | |
450 | | /* |
451 | | * ok, we have a timed event that we'll process ... |
452 | | */ |
453 | 0 | ret = tevent_common_invoke_timer_handler(te, current_time, NULL); |
454 | 0 | if (ret != 0) { |
455 | 0 | tevent_abort(ev, "tevent_common_invoke_timer_handler() failed"); |
456 | 0 | } |
457 | |
|
458 | 0 | return tevent_timeval_zero(); |
459 | 0 | } |
460 | | |
461 | | void tevent_timer_set_tag(struct tevent_timer *te, uint64_t tag) |
462 | 0 | { |
463 | 0 | if (te == NULL) { |
464 | 0 | return; |
465 | 0 | } |
466 | | |
467 | 0 | te->tag = tag; |
468 | 0 | } |
469 | | |
470 | | uint64_t tevent_timer_get_tag(const struct tevent_timer *te) |
471 | 0 | { |
472 | 0 | if (te == NULL) { |
473 | 0 | return 0; |
474 | 0 | } |
475 | | |
476 | 0 | return te->tag; |
477 | 0 | } |