/src/dovecot/src/lib/lib-event.c
Line | Count | Source |
1 | | /* Copyright (c) 2017-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | #include "lib.h" |
4 | | #include "lib-event-private.h" |
5 | | #include "event-filter.h" |
6 | | #include "array.h" |
7 | | #include "hash.h" |
8 | | #include "llist.h" |
9 | | #include "time-util.h" |
10 | | #include "str.h" |
11 | | #include "strescape.h" |
12 | | #include "ioloop-private.h" |
13 | | |
14 | | #include <ctype.h> |
15 | | |
16 | | HASH_TABLE_DEFINE_TYPE(category_set, void *, const struct event_category *); |
17 | | |
18 | | enum event_code { |
19 | | EVENT_CODE_ALWAYS_LOG_SOURCE = 'a', |
20 | | EVENT_CODE_CATEGORY = 'c', |
21 | | EVENT_CODE_TV_LAST_SENT = 'l', |
22 | | EVENT_CODE_SENDING_NAME = 'n', |
23 | | EVENT_CODE_SOURCE = 's', |
24 | | |
25 | | EVENT_CODE_FIELD_INTMAX = 'I', |
26 | | EVENT_CODE_FIELD_STR = 'S', |
27 | | EVENT_CODE_FIELD_TIMEVAL = 'T', |
28 | | EVENT_CODE_FIELD_IP = 'P', |
29 | | EVENT_CODE_FIELD_STRLIST = 'L', |
30 | | }; |
31 | | |
32 | | /* Internal event category state. |
33 | | |
34 | | Each (unique) event category maps to one internal category. (I.e., if |
35 | | two places attempt to register the same category, they will share the |
36 | | internal state.) |
37 | | |
38 | | This is required in order to support multiple registrations of the same |
39 | | category. Currently, the only situation in which this occurs is the |
40 | | stats process receiving categories from other processes and also using |
41 | | the same categories internally. |
42 | | |
43 | | During registration, we look up the internal state based on the new |
44 | | category's name. If found, we use it after sanity checking that the two |
45 | | are identical (i.e., they both have the same name and parent). If not |
46 | | found, we allocate a new internal state and use it. |
47 | | |
48 | | We stash a pointer to the internal state in struct event_category (the |
49 | | "internal" member). As a result, all category structs for the same |
50 | | category point to the same internal state. */ |
51 | | struct event_internal_category { |
52 | | /* More than one category can be represented by the internal state. |
53 | | To give consumers a unique but consistent category pointer, we |
54 | | return a pointer to this 'representative' category structure. |
55 | | Because we allocated it, we know that it will live exactly as |
56 | | long as we need it to. */ |
57 | | struct event_category representative; |
58 | | |
59 | | struct event_internal_category *parent; |
60 | | char *name; |
61 | | int refcount; |
62 | | }; |
63 | | |
64 | | struct event_reason { |
65 | | struct event *event; |
66 | | }; |
67 | | |
68 | | struct event_category_iterator { |
69 | | HASH_TABLE_TYPE(category_set) hash; |
70 | | struct hash_iterate_context *iter; |
71 | | }; |
72 | | |
73 | | extern struct event_passthrough event_passthrough_vfuncs; |
74 | | |
75 | | static struct event *events = NULL; |
76 | | static struct event *current_global_event = NULL; |
77 | | static struct event *event_last_passthrough = NULL; |
78 | | static ARRAY(event_callback_t *) event_handlers; |
79 | | static ARRAY(event_category_callback_t *) event_category_callbacks; |
80 | | static ARRAY(struct event_internal_category *) event_registered_categories_internal; |
81 | | static ARRAY(struct event_category *) event_registered_categories_representative; |
82 | | static ARRAY(struct event *) global_event_stack; |
83 | | static uint64_t event_id_counter = 0; |
84 | | |
85 | | static void get_self_rusage(struct rusage *ru_r) |
86 | 0 | { |
87 | 0 | if (getrusage(RUSAGE_SELF, ru_r) < 0) |
88 | 0 | i_fatal("getrusage() failed: %m"); |
89 | 0 | } |
90 | | |
91 | | static struct event * |
92 | | event_create_internal(struct event *parent, const char *source_filename, |
93 | | unsigned int source_linenum); |
94 | | static struct event_internal_category * |
95 | | event_category_find_internal(const char *name); |
96 | | |
97 | | static struct event *last_passthrough_event(void) |
98 | 0 | { |
99 | 0 | i_assert(event_last_passthrough != NULL); |
100 | 0 | return event_last_passthrough; |
101 | 0 | } |
102 | | |
103 | | static void event_copy_parent_defaults(struct event *event, |
104 | | const struct event *parent) |
105 | 0 | { |
106 | 0 | event->always_log_source = parent->always_log_source; |
107 | 0 | event->passthrough = parent->passthrough; |
108 | 0 | event->min_log_level = parent->min_log_level; |
109 | 0 | event->forced_debug = parent->forced_debug; |
110 | 0 | event->forced_never_debug = parent->forced_never_debug; |
111 | 0 | event->disable_callbacks = parent->disable_callbacks; |
112 | 0 | } |
113 | | |
114 | | static bool |
115 | | event_find_category(const struct event *event, |
116 | | const struct event_category *category); |
117 | | |
118 | | static void event_set_changed(struct event *event) |
119 | 45 | { |
120 | 45 | event->change_id++; |
121 | | /* It's unlikely that change_id will ever wrap, but lets be safe |
122 | | anyway. */ |
123 | 45 | if (event->change_id == 0 || |
124 | 45 | event->change_id == event->sent_to_stats_id) |
125 | 0 | event->change_id++; |
126 | 45 | } |
127 | | |
128 | | static bool |
129 | | event_call_callbacks(struct event *event, enum event_callback_type type, |
130 | | struct failure_context *ctx, const char *fmt, va_list args) |
131 | 1 | { |
132 | 1 | event_callback_t *callback; |
133 | | |
134 | 1 | if (event->disable_callbacks) |
135 | 0 | return TRUE; |
136 | 1 | if (!array_is_created(&event_handlers)) |
137 | 0 | return TRUE; |
138 | | |
139 | 1 | array_foreach_elem(&event_handlers, callback) { |
140 | 0 | bool ret; |
141 | |
|
142 | 0 | T_BEGIN { |
143 | 0 | ret = callback(event, type, ctx, fmt, args); |
144 | 0 | } T_END; |
145 | 0 | if (!ret) { |
146 | | /* event sending was stopped */ |
147 | 0 | return FALSE; |
148 | 0 | } |
149 | 0 | } |
150 | 1 | return TRUE; |
151 | 1 | } |
152 | | |
153 | | static void |
154 | | event_call_callbacks_noargs(struct event *event, |
155 | | enum event_callback_type type, ...) |
156 | 1 | { |
157 | 1 | va_list args; |
158 | | |
159 | | /* the args are empty and not used for anything, but there doesn't seem |
160 | | to be any nice and standard way of passing an initialized va_list |
161 | | as a parameter without va_start(). */ |
162 | 1 | va_start(args, type); |
163 | 1 | (void)event_call_callbacks(event, type, NULL, NULL, args); |
164 | 1 | va_end(args); |
165 | 1 | } |
166 | | |
167 | | void event_copy_categories(struct event *to, struct event *from) |
168 | 0 | { |
169 | 0 | unsigned int cat_count; |
170 | 0 | struct event_category *const *categories = |
171 | 0 | event_get_categories(from, &cat_count); |
172 | 0 | for (unsigned int i = 1; i <= cat_count; i++) |
173 | 0 | event_add_category(to, categories[cat_count-i]); |
174 | 0 | } |
175 | | |
176 | | void event_copy_fields(struct event *to, struct event *from) |
177 | 0 | { |
178 | 0 | const struct event_field *fld; |
179 | 0 | unsigned int count; |
180 | 0 | const char *const *values; |
181 | |
|
182 | 0 | if (!array_is_created(&from->fields)) |
183 | 0 | return; |
184 | 0 | array_foreach(&from->fields, fld) { |
185 | 0 | switch (fld->value_type) { |
186 | 0 | case EVENT_FIELD_VALUE_TYPE_STR: |
187 | 0 | event_add_str(to, fld->key, fld->value.str); |
188 | 0 | break; |
189 | 0 | case EVENT_FIELD_VALUE_TYPE_INTMAX: |
190 | 0 | event_add_int(to, fld->key, fld->value.intmax); |
191 | 0 | break; |
192 | 0 | case EVENT_FIELD_VALUE_TYPE_TIMEVAL: |
193 | 0 | event_add_timeval(to, fld->key, &fld->value.timeval); |
194 | 0 | break; |
195 | 0 | case EVENT_FIELD_VALUE_TYPE_IP: |
196 | 0 | event_add_ip(to, fld->key, &fld->value.ip); |
197 | 0 | break; |
198 | 0 | case EVENT_FIELD_VALUE_TYPE_STRLIST: |
199 | 0 | values = array_get(&fld->value.strlist, &count); |
200 | 0 | for (unsigned int i = 0; i < count; i++) |
201 | 0 | event_strlist_append(to, fld->key, values[i]); |
202 | 0 | break; |
203 | 0 | default: |
204 | 0 | break; |
205 | 0 | } |
206 | 0 | } |
207 | 0 | } |
208 | | |
209 | | bool event_has_all_categories(struct event *event, const struct event *other) |
210 | 0 | { |
211 | 0 | struct event_category **cat; |
212 | 0 | if (!array_is_created(&other->categories)) |
213 | 0 | return TRUE; |
214 | 0 | if (!array_is_created(&event->categories)) |
215 | 0 | return FALSE; |
216 | 0 | array_foreach_modifiable(&other->categories, cat) { |
217 | 0 | if (!event_find_category(event, *cat)) |
218 | 0 | return FALSE; |
219 | 0 | } |
220 | 0 | return TRUE; |
221 | 0 | } |
222 | | |
223 | | bool event_has_all_fields(struct event *event, const struct event *other) |
224 | 0 | { |
225 | 0 | struct event_field *fld; |
226 | 0 | if (!array_is_created(&other->fields)) |
227 | 0 | return TRUE; |
228 | 0 | array_foreach_modifiable(&other->fields, fld) { |
229 | 0 | if (event_find_field_nonrecursive(event, fld->key) == NULL) |
230 | 0 | return FALSE; |
231 | 0 | } |
232 | 0 | return TRUE; |
233 | 0 | } |
234 | | |
235 | | struct event *event_dup(const struct event *source) |
236 | 0 | { |
237 | 0 | struct event *ret = |
238 | 0 | event_create_internal(source->parent, source->source_filename, |
239 | 0 | source->source_linenum); |
240 | 0 | string_t *str = t_str_new(256); |
241 | 0 | const char *err; |
242 | 0 | event_export(source, str); |
243 | 0 | if (!event_import(ret, str_c(str), &err)) |
244 | 0 | i_panic("event_import(%s) failed: %s", str_c(str), err); |
245 | 0 | ret->tv_created_ioloop = source->tv_created_ioloop; |
246 | 0 | return ret; |
247 | 0 | } |
248 | | |
249 | | /* |
250 | | * Copy the source's categories and fields recursively. |
251 | | * |
252 | | * We recurse to the parent before copying this event's data because we may |
253 | | * be overriding a field. |
254 | | */ |
255 | | static void event_flatten_recurse(struct event *dst, struct event *src, |
256 | | struct event *limit) |
257 | 0 | { |
258 | 0 | if (src->parent != limit) |
259 | 0 | event_flatten_recurse(dst, src->parent, limit); |
260 | |
|
261 | 0 | event_copy_categories(dst, src); |
262 | 0 | event_copy_fields(dst, src); |
263 | 0 | } |
264 | | |
265 | | struct event *event_flatten(struct event *src) |
266 | 0 | { |
267 | 0 | struct event *dst; |
268 | | |
269 | | /* If we don't have a parent or a global event, |
270 | | we have nothing to flatten. */ |
271 | 0 | if (src->parent == NULL && current_global_event == NULL) |
272 | 0 | return event_ref(src); |
273 | | |
274 | | /* We have to flatten the event. */ |
275 | | |
276 | 0 | dst = event_create_internal(NULL, src->source_filename, |
277 | 0 | src->source_linenum); |
278 | 0 | dst = event_set_name(dst, src->sending_name); |
279 | |
|
280 | 0 | if (current_global_event != NULL) |
281 | 0 | event_flatten_recurse(dst, current_global_event, NULL); |
282 | 0 | event_flatten_recurse(dst, src, NULL); |
283 | |
|
284 | 0 | dst->tv_created_ioloop = src->tv_created_ioloop; |
285 | 0 | dst->tv_created = src->tv_created; |
286 | 0 | dst->tv_last_sent = src->tv_last_sent; |
287 | |
|
288 | 0 | return dst; |
289 | 0 | } |
290 | | |
291 | | static inline void replace_parent_ref(struct event *event, struct event *new) |
292 | 0 | { |
293 | 0 | if (event->parent == new) |
294 | 0 | return; /* no-op */ |
295 | | |
296 | 0 | if (new != NULL) |
297 | 0 | event_ref(new); |
298 | |
|
299 | 0 | event_unref(&event->parent); |
300 | |
|
301 | 0 | event->parent = new; |
302 | 0 | } |
303 | | |
304 | | /* |
305 | | * Minimize the event and its ancestry. |
306 | | * |
307 | | * In general, the chain of parents starting from this event can be divided |
308 | | * up into four consecutive ranges: |
309 | | * |
310 | | * 1. the event itself |
311 | | * 2. a range of events that should be flattened into the event itself |
312 | | * 3. a range of trivial (i.e., no categories or fields) events that should |
313 | | * be skipped |
314 | | * 4. the rest of the chain |
315 | | * |
316 | | * Except for the first range, the event itself, the remaining ranges can |
317 | | * have zero events. |
318 | | * |
319 | | * As the names of these ranges imply, we want to flatten certain parts of |
320 | | * the ancestry, skip other parts of the ancestry and leave the remainder |
321 | | * untouched. |
322 | | * |
323 | | * For example, suppose that we have an event (A) with ancestors forming the |
324 | | * following graph: |
325 | | * |
326 | | * A -> B -> C -> D -> E -> F |
327 | | * |
328 | | * Further, suppose that B, C, and F contain some categories or fields but |
329 | | * have not yet been sent to an external process that knows how to reference |
330 | | * previously encountered events, and D contains no fields or categories of |
331 | | * its own (but it inherits some from E and F). |
332 | | * |
333 | | * We can define the 4 ranges: |
334 | | * |
335 | | * A: the event |
336 | | * B-C: flattening |
337 | | * D: skipping |
338 | | * E-end: the rest |
339 | | * |
340 | | * The output would therefore be: |
341 | | * |
342 | | * G -> E -> F |
343 | | * |
344 | | * where G contains the fields and categories of A, B, and C (and trivially |
345 | | * D because D was empty). |
346 | | * |
347 | | * Note that even though F has not yet been sent out, we send it now because |
348 | | * it is part of the "rest" range. |
349 | | * |
350 | | * TODO: We could likely apply this function recursively on the "rest" |
351 | | * range, but further investigation is required to determine whether it is |
352 | | * worth it. |
353 | | */ |
354 | | struct event *event_minimize(struct event *event) |
355 | 0 | { |
356 | 0 | struct event *flatten_bound; |
357 | 0 | struct event *skip_bound; |
358 | 0 | struct event *new_event; |
359 | 0 | struct event *cur; |
360 | |
|
361 | 0 | if (event->parent == NULL) |
362 | 0 | return event_ref(event); |
363 | | |
364 | | /* find the bound for field/category flattening */ |
365 | 0 | flatten_bound = NULL; |
366 | 0 | for (cur = event->parent; cur != NULL; cur = cur->parent) { |
367 | 0 | if (cur->sent_to_stats_id == 0 && |
368 | 0 | timeval_cmp(&cur->tv_created_ioloop, |
369 | 0 | &event->tv_created_ioloop) == 0) |
370 | 0 | continue; |
371 | | |
372 | 0 | flatten_bound = cur; |
373 | 0 | break; |
374 | 0 | } |
375 | | |
376 | | /* continue to find the bound for empty event skipping */ |
377 | 0 | skip_bound = NULL; |
378 | 0 | for (; cur != NULL; cur = cur->parent) { |
379 | 0 | if (cur->sent_to_stats_id == 0 && |
380 | 0 | (!array_is_created(&cur->fields) || |
381 | 0 | array_is_empty(&cur->fields)) && |
382 | 0 | (!array_is_created(&cur->categories) || |
383 | 0 | array_is_empty(&cur->categories))) |
384 | 0 | continue; |
385 | | |
386 | 0 | skip_bound = cur; |
387 | 0 | break; |
388 | 0 | } |
389 | | |
390 | | /* fast path - no flattening and no skipping to do */ |
391 | 0 | if ((event->parent == flatten_bound) && |
392 | 0 | (event->parent == skip_bound)) |
393 | 0 | return event_ref(event); |
394 | | |
395 | 0 | new_event = event_dup(event); |
396 | | |
397 | | /* flatten */ |
398 | 0 | event_flatten_recurse(new_event, event, flatten_bound); |
399 | 0 | replace_parent_ref(new_event, flatten_bound); |
400 | | |
401 | | /* skip */ |
402 | 0 | replace_parent_ref(new_event, skip_bound); |
403 | |
|
404 | 0 | return new_event; |
405 | 0 | } |
406 | | |
407 | | static struct event * |
408 | | event_create_internal(struct event *parent, const char *source_filename, |
409 | | unsigned int source_linenum) |
410 | 1 | { |
411 | 1 | struct event *event; |
412 | 1 | pool_t pool = pool_alloconly_create(MEMPOOL_GROWING"event", 1024); |
413 | | |
414 | 1 | event = p_new(pool, struct event, 1); |
415 | 1 | event->refcount = 1; |
416 | 1 | event->id = ++event_id_counter; |
417 | 1 | event->pool = pool; |
418 | 1 | event->tv_created_ioloop = ioloop_timeval; |
419 | 1 | event->min_log_level = LOG_TYPE_INFO; |
420 | 1 | i_gettimeofday(&event->tv_created); |
421 | 1 | event->source_filename = p_strdup(pool, source_filename); |
422 | 1 | event->source_linenum = source_linenum; |
423 | 1 | event->change_id = 1; |
424 | 1 | if (parent != NULL) { |
425 | 0 | event->parent = parent; |
426 | 0 | event_ref(event->parent); |
427 | 0 | event_copy_parent_defaults(event, parent); |
428 | 0 | } |
429 | 1 | DLLIST_PREPEND(&events, event); |
430 | 1 | return event; |
431 | 1 | } |
432 | | |
433 | | #undef event_create |
434 | | struct event *event_create(struct event *parent, const char *source_filename, |
435 | | unsigned int source_linenum) |
436 | 1 | { |
437 | 1 | struct event *event; |
438 | | |
439 | 1 | event = event_create_internal(parent, source_filename, source_linenum); |
440 | 1 | (void)event_call_callbacks_noargs(event, EVENT_CALLBACK_TYPE_CREATE); |
441 | 1 | return event; |
442 | 1 | } |
443 | | |
444 | | #undef event_create_passthrough |
445 | | struct event_passthrough * |
446 | | event_create_passthrough(struct event *parent, const char *source_filename, |
447 | | unsigned int source_linenum) |
448 | 0 | { |
449 | 0 | if (!parent->passthrough) { |
450 | 0 | if (event_last_passthrough != NULL) { |
451 | | /* API is being used in a wrong or dangerous way */ |
452 | 0 | i_panic("Can't create multiple passthrough events - " |
453 | 0 | "finish the earlier with ->event()"); |
454 | 0 | } |
455 | 0 | struct event *event = |
456 | 0 | event_create(parent, source_filename, source_linenum); |
457 | 0 | event->passthrough = TRUE; |
458 | | /* This event only intends to extend the parent event. |
459 | | Use the parent's creation timestamp. */ |
460 | 0 | event->tv_created_ioloop = parent->tv_created_ioloop; |
461 | 0 | event->tv_created = parent->tv_created; |
462 | 0 | memcpy(&event->ru_last, &parent->ru_last, sizeof(parent->ru_last)); |
463 | 0 | event_last_passthrough = event; |
464 | 0 | } else { |
465 | 0 | event_last_passthrough = parent; |
466 | 0 | } |
467 | 0 | return &event_passthrough_vfuncs; |
468 | 0 | } |
469 | | |
470 | | struct event *event_ref(struct event *event) |
471 | 0 | { |
472 | 0 | i_assert(event->refcount > 0); |
473 | | |
474 | 0 | event->refcount++; |
475 | 0 | return event; |
476 | 0 | } |
477 | | |
478 | | void event_unref(struct event **_event) |
479 | 0 | { |
480 | 0 | struct event *event = *_event; |
481 | |
|
482 | 0 | if (event == NULL) |
483 | 0 | return; |
484 | 0 | *_event = NULL; |
485 | |
|
486 | 0 | i_assert(event->refcount > 0); |
487 | 0 | if (--event->refcount > 0) |
488 | 0 | return; |
489 | 0 | i_assert(event != current_global_event); |
490 | | |
491 | 0 | event_call_callbacks_noargs(event, EVENT_CALLBACK_TYPE_FREE); |
492 | |
|
493 | 0 | if (event_last_passthrough == event) |
494 | 0 | event_last_passthrough = NULL; |
495 | 0 | if (event->log_prefix_from_system_pool) |
496 | 0 | i_free(event->log_prefix); |
497 | 0 | i_free(event->sending_name); |
498 | 0 | event_unref(&event->parent); |
499 | |
|
500 | 0 | DLLIST_REMOVE(&events, event); |
501 | 0 | pool_unref(&event->pool); |
502 | 0 | } |
503 | | |
504 | | struct event *events_get_head(void) |
505 | 0 | { |
506 | 0 | return events; |
507 | 0 | } |
508 | | |
509 | | struct event *event_push_global(struct event *event) |
510 | 0 | { |
511 | 0 | i_assert(event != NULL); |
512 | | |
513 | 0 | if (current_global_event != NULL) { |
514 | 0 | if (!array_is_created(&global_event_stack)) |
515 | 0 | i_array_init(&global_event_stack, 4); |
516 | 0 | array_push_back(&global_event_stack, ¤t_global_event); |
517 | 0 | } |
518 | 0 | current_global_event = event; |
519 | 0 | return event; |
520 | 0 | } |
521 | | |
522 | | struct event *event_pop_global(struct event *event) |
523 | 0 | { |
524 | 0 | i_assert(event != NULL); |
525 | 0 | i_assert(event == current_global_event); |
526 | | /* If the active context's root event is popped, we'll assert-crash |
527 | | later on when deactivating the context and the root event no longer |
528 | | exists. */ |
529 | 0 | i_assert(event != io_loop_get_active_global_root()); |
530 | | |
531 | 0 | if (!array_is_created(&global_event_stack) || |
532 | 0 | array_count(&global_event_stack) == 0) |
533 | 0 | current_global_event = NULL; |
534 | 0 | else { |
535 | 0 | unsigned int event_count; |
536 | 0 | struct event *const *events = |
537 | 0 | array_get(&global_event_stack, &event_count); |
538 | |
|
539 | 0 | i_assert(event_count > 0); |
540 | 0 | current_global_event = events[event_count-1]; |
541 | 0 | array_delete(&global_event_stack, event_count-1, 1); |
542 | 0 | } |
543 | 0 | return current_global_event; |
544 | 0 | } |
545 | | |
546 | | struct event *event_get_global(void) |
547 | 0 | { |
548 | 0 | return current_global_event; |
549 | 0 | } |
550 | | |
551 | | #undef event_reason_begin |
552 | | struct event_reason * |
553 | | event_reason_begin(const char *reason_code, const char *source_filename, |
554 | | unsigned int source_linenum) |
555 | 0 | { |
556 | 0 | struct event_reason *reason; |
557 | |
|
558 | 0 | reason = i_new(struct event_reason, 1); |
559 | 0 | reason->event = event_create(event_get_global(), |
560 | 0 | source_filename, source_linenum); |
561 | 0 | event_strlist_append(reason->event, EVENT_REASON_CODE, reason_code); |
562 | 0 | event_push_global(reason->event); |
563 | 0 | return reason; |
564 | 0 | } |
565 | | |
566 | | void event_reason_end(struct event_reason **_reason) |
567 | 0 | { |
568 | 0 | struct event_reason *reason = *_reason; |
569 | |
|
570 | 0 | if (reason == NULL) |
571 | 0 | return; |
572 | 0 | event_pop_global(reason->event); |
573 | | /* This event was created only for global use. It shouldn't be |
574 | | permanently stored anywhere. This assert could help catch bugs. */ |
575 | 0 | i_assert(reason->event->refcount == 1); |
576 | 0 | event_unref(&reason->event); |
577 | 0 | i_free(reason); |
578 | 0 | } |
579 | | |
580 | | const char *event_reason_code(const char *module, const char *name) |
581 | 0 | { |
582 | 0 | return event_reason_code_prefix(module, "", name); |
583 | 0 | } |
584 | | |
585 | | static bool event_reason_code_module_validate(const char *module) |
586 | 0 | { |
587 | 0 | const char *p; |
588 | |
|
589 | 0 | for (p = module; *p != '\0'; p++) { |
590 | 0 | if (*p == ' ' || *p == '-' || *p == ':') |
591 | 0 | return FALSE; |
592 | 0 | if (i_isupper(*p)) |
593 | 0 | return FALSE; |
594 | 0 | } |
595 | 0 | return TRUE; |
596 | 0 | } |
597 | | |
598 | | const char *event_reason_code_prefix(const char *module, |
599 | | const char *name_prefix, const char *name) |
600 | 0 | { |
601 | 0 | const char *p; |
602 | |
|
603 | 0 | i_assert(module[0] != '\0'); |
604 | 0 | i_assert(name[0] != '\0'); |
605 | | |
606 | 0 | if (!event_reason_code_module_validate(module)) { |
607 | 0 | i_panic("event_reason_code_prefix(): " |
608 | 0 | "Invalid module '%s'", module); |
609 | 0 | } |
610 | 0 | if (!event_reason_code_module_validate(name_prefix)) { |
611 | 0 | i_panic("event_reason_code_prefix(): " |
612 | 0 | "Invalid name_prefix '%s'", name_prefix); |
613 | 0 | } |
614 | | |
615 | 0 | string_t *str = t_str_new(strlen(module) + 1 + |
616 | 0 | strlen(name_prefix) + strlen(name)); |
617 | 0 | str_append(str, module); |
618 | 0 | str_append_c(str, ':'); |
619 | 0 | str_append(str, name_prefix); |
620 | |
|
621 | 0 | for (p = name; *p != '\0'; p++) { |
622 | 0 | switch (*p) { |
623 | 0 | case ' ': |
624 | 0 | case '-': |
625 | 0 | str_append_c(str, '_'); |
626 | 0 | break; |
627 | 0 | case ':': |
628 | 0 | i_panic("event_reason_code_prefix(): " |
629 | 0 | "name has ':' (%s, %s%s)", |
630 | 0 | module, name_prefix, name); |
631 | 0 | default: |
632 | 0 | str_append_c(str, i_tolower(*p)); |
633 | 0 | break; |
634 | 0 | } |
635 | 0 | } |
636 | 0 | return str_c(str); |
637 | 0 | } |
638 | | |
639 | | static struct event * |
640 | | event_set_log_prefix(struct event *event, const char *prefix, bool append) |
641 | 0 | { |
642 | 0 | event->log_prefix_callback = NULL; |
643 | 0 | event->log_prefix_callback_context = NULL; |
644 | 0 | if (event->log_prefix == NULL) { |
645 | | /* allocate the first log prefix from the pool */ |
646 | 0 | event->log_prefix = p_strdup(event->pool, prefix); |
647 | 0 | } else { |
648 | | /* log prefix is being updated multiple times - |
649 | | switch to system pool so we don't keep leaking memory */ |
650 | 0 | if (event->log_prefix_from_system_pool) |
651 | 0 | i_free(event->log_prefix); |
652 | 0 | else |
653 | 0 | event->log_prefix_from_system_pool = TRUE; |
654 | 0 | event->log_prefix = i_strdup(prefix); |
655 | 0 | } |
656 | 0 | event->log_prefix_replace = !append; |
657 | 0 | return event; |
658 | 0 | } |
659 | | |
660 | | struct event * |
661 | | event_set_append_log_prefix(struct event *event, const char *prefix) |
662 | 0 | { |
663 | 0 | return event_set_log_prefix(event, prefix, TRUE); |
664 | 0 | } |
665 | | |
666 | | struct event *event_replace_log_prefix(struct event *event, const char *prefix) |
667 | 0 | { |
668 | 0 | return event_set_log_prefix(event, prefix, FALSE); |
669 | 0 | } |
670 | | |
671 | | struct event * |
672 | | event_drop_parent_log_prefixes(struct event *event, unsigned int count) |
673 | 0 | { |
674 | 0 | event->log_prefixes_dropped = count; |
675 | 0 | return event; |
676 | 0 | } |
677 | | |
678 | | #undef event_set_log_prefix_callback |
679 | | struct event * |
680 | | event_set_log_prefix_callback(struct event *event, |
681 | | bool replace, |
682 | | event_log_prefix_callback_t *callback, |
683 | | void *context) |
684 | 0 | { |
685 | 0 | if (event->log_prefix_from_system_pool) |
686 | 0 | i_free(event->log_prefix); |
687 | 0 | else |
688 | 0 | event->log_prefix = NULL; |
689 | 0 | event->log_prefix_replace = replace; |
690 | 0 | event->log_prefix_callback = callback; |
691 | 0 | event->log_prefix_callback_context = context; |
692 | 0 | return event; |
693 | 0 | } |
694 | | |
695 | | #undef event_set_log_message_callback |
696 | | struct event * |
697 | | event_set_log_message_callback(struct event *event, |
698 | | event_log_message_callback_t *callback, |
699 | | void *context) |
700 | 0 | { |
701 | 0 | event->log_message_callback = callback; |
702 | 0 | event->log_message_callback_context = context; |
703 | 0 | return event; |
704 | 0 | } |
705 | | |
706 | | void event_disable_callbacks(struct event *event) |
707 | 0 | { |
708 | 0 | event->disable_callbacks = TRUE; |
709 | 0 | } |
710 | | |
711 | | #undef event_unset_log_message_callback |
712 | | void event_unset_log_message_callback(struct event *event, |
713 | | event_log_message_callback_t *callback, |
714 | | void *context) |
715 | 0 | { |
716 | 0 | i_assert(event->log_message_callback == callback); |
717 | 0 | i_assert(event->log_message_callback_context == context); |
718 | | |
719 | 0 | event->log_message_callback = NULL; |
720 | 0 | event->log_message_callback_context = NULL; |
721 | 0 | } |
722 | | |
723 | | struct event * |
724 | | event_set_name(struct event *event, const char *name) |
725 | 9 | { |
726 | 9 | i_free(event->sending_name); |
727 | 9 | event->sending_name = i_strdup(name); |
728 | 9 | return event; |
729 | 9 | } |
730 | | |
731 | | struct event * |
732 | | event_set_source(struct event *event, const char *filename, |
733 | | unsigned int linenum, bool literal_fname) |
734 | 0 | { |
735 | 0 | if (strcmp(event->source_filename, filename) != 0) { |
736 | 0 | event->source_filename = literal_fname ? filename : |
737 | 0 | p_strdup(event->pool, filename); |
738 | 0 | } |
739 | 0 | event->source_linenum = linenum; |
740 | 0 | return event; |
741 | 0 | } |
742 | | |
743 | | struct event *event_set_always_log_source(struct event *event) |
744 | 0 | { |
745 | 0 | event->always_log_source = TRUE; |
746 | 0 | return event; |
747 | 0 | } |
748 | | |
749 | | struct event *event_set_min_log_level(struct event *event, enum log_type level) |
750 | 0 | { |
751 | 0 | event->min_log_level = level; |
752 | 0 | event_recalculate_debug_level(event); |
753 | 0 | return event; |
754 | 0 | } |
755 | | |
756 | | enum log_type event_get_min_log_level(const struct event *event) |
757 | 0 | { |
758 | 0 | return event->min_log_level; |
759 | 0 | } |
760 | | |
761 | | struct event *event_set_ptr(struct event *event, const char *key, void *value) |
762 | 0 | { |
763 | 0 | struct event_pointer *p; |
764 | |
|
765 | 0 | if (!array_is_created(&event->pointers)) |
766 | 0 | p_array_init(&event->pointers, event->pool, 4); |
767 | 0 | else { |
768 | | /* replace existing pointer if the key already exists */ |
769 | 0 | array_foreach_modifiable(&event->pointers, p) { |
770 | 0 | if (strcmp(p->key, key) == 0) { |
771 | 0 | p->value = value; |
772 | 0 | return event; |
773 | 0 | } |
774 | 0 | } |
775 | 0 | } |
776 | 0 | p = array_append_space(&event->pointers); |
777 | 0 | p->key = p_strdup(event->pool, key); |
778 | 0 | p->value = value; |
779 | 0 | return event; |
780 | 0 | } |
781 | | |
782 | | void *event_get_ptr(const struct event *event, const char *key) |
783 | 0 | { |
784 | 0 | const struct event_pointer *p; |
785 | |
|
786 | 0 | if (!array_is_created(&event->pointers)) |
787 | 0 | return NULL; |
788 | 0 | array_foreach(&event->pointers, p) { |
789 | 0 | if (strcmp(p->key, key) == 0) |
790 | 0 | return p->value; |
791 | 0 | } |
792 | 0 | return NULL; |
793 | 0 | } |
794 | | |
795 | | struct event_category *event_category_find_registered(const char *name) |
796 | 0 | { |
797 | 0 | struct event_category *cat; |
798 | |
|
799 | 0 | array_foreach_elem(&event_registered_categories_representative, cat) { |
800 | 0 | if (strcmp(cat->name, name) == 0) |
801 | 0 | return cat; |
802 | 0 | } |
803 | 0 | return NULL; |
804 | 0 | } |
805 | | |
806 | | static struct event_internal_category * |
807 | | event_category_find_internal(const char *name) |
808 | 0 | { |
809 | 0 | struct event_internal_category *internal; |
810 | |
|
811 | 0 | array_foreach_elem(&event_registered_categories_internal, internal) { |
812 | 0 | if (strcmp(internal->name, name) == 0) |
813 | 0 | return internal; |
814 | 0 | } |
815 | | |
816 | 0 | return NULL; |
817 | 0 | } |
818 | | |
819 | | struct event_category *const * |
820 | | event_get_registered_categories(unsigned int *count_r) |
821 | 0 | { |
822 | 0 | return array_get(&event_registered_categories_representative, count_r); |
823 | 0 | } |
824 | | |
825 | | static void |
826 | | event_category_add_to_array(struct event_internal_category *internal) |
827 | 0 | { |
828 | 0 | struct event_category *representative = &internal->representative; |
829 | |
|
830 | 0 | array_push_back(&event_registered_categories_internal, &internal); |
831 | 0 | array_push_back(&event_registered_categories_representative, |
832 | 0 | &representative); |
833 | 0 | } |
834 | | |
835 | | static struct event_category * |
836 | | event_category_register(struct event_category *category) |
837 | 0 | { |
838 | 0 | struct event_internal_category *internal = category->internal; |
839 | 0 | event_category_callback_t *callback; |
840 | 0 | bool allocated; |
841 | |
|
842 | 0 | if (internal != NULL) |
843 | 0 | return &internal->representative; /* case 2 - see below */ |
844 | | |
845 | | /* register parent categories first */ |
846 | 0 | if (category->parent != NULL) |
847 | 0 | (void) event_category_register(category->parent); |
848 | | |
849 | | /* There are four cases we need to handle: |
850 | | |
851 | | 1) a new category is registered |
852 | | 2) same category struct is re-registered - already handled above |
853 | | internal NULL check |
854 | | 3) different category struct is registered, but it is identical |
855 | | to the previously registered one |
856 | | 4) different category struct is registered, and it is different |
857 | | from the previously registered one - a programming error */ |
858 | 0 | internal = event_category_find_internal(category->name); |
859 | 0 | if (internal == NULL) { |
860 | | /* case 1: first time we saw this name - allocate new */ |
861 | 0 | internal = i_new(struct event_internal_category, 1); |
862 | 0 | if (category->parent != NULL) |
863 | 0 | internal->parent = category->parent->internal; |
864 | 0 | internal->name = i_strdup(category->name); |
865 | 0 | internal->refcount = 1; |
866 | 0 | internal->representative.name = internal->name; |
867 | 0 | internal->representative.parent = category->parent; |
868 | 0 | internal->representative.internal = internal; |
869 | |
|
870 | 0 | event_category_add_to_array(internal); |
871 | |
|
872 | 0 | allocated = TRUE; |
873 | 0 | } else { |
874 | | /* case 3 or 4: someone registered this name before - share */ |
875 | 0 | if ((category->parent != NULL) && |
876 | 0 | (internal->parent != category->parent->internal)) { |
877 | | /* case 4 */ |
878 | 0 | struct event_internal_category *other = |
879 | 0 | category->parent->internal; |
880 | |
|
881 | 0 | i_panic("event category parent mismatch detected: " |
882 | 0 | "category %p internal %p (%s), " |
883 | 0 | "internal parent %p (%s), public parent %p (%s)", |
884 | 0 | category, internal, internal->name, |
885 | 0 | internal->parent, internal->parent->name, |
886 | 0 | other, other->name); |
887 | 0 | } |
888 | | |
889 | 0 | internal->refcount++; |
890 | |
|
891 | 0 | allocated = FALSE; |
892 | 0 | } |
893 | | |
894 | 0 | category->internal = internal; |
895 | |
|
896 | 0 | if (!allocated) { |
897 | | /* not the first registration of this category */ |
898 | 0 | return &internal->representative; |
899 | 0 | } |
900 | | |
901 | 0 | array_foreach_elem(&event_category_callbacks, callback) T_BEGIN { |
902 | 0 | callback(&internal->representative); |
903 | 0 | } T_END; |
904 | | |
905 | 0 | return &internal->representative; |
906 | 0 | } |
907 | | |
908 | | static bool |
909 | | event_find_category(const struct event *event, |
910 | | const struct event_category *category) |
911 | 0 | { |
912 | 0 | struct event_internal_category *internal = category->internal; |
913 | | |
914 | | /* make sure we're always looking for a representative */ |
915 | 0 | i_assert(category == &internal->representative); |
916 | | |
917 | 0 | return array_lsearch_ptr(&event->categories, category) != NULL; |
918 | 0 | } |
919 | | |
920 | | struct event * |
921 | | event_add_categories(struct event *event, |
922 | | struct event_category *const *categories) |
923 | 0 | { |
924 | 0 | struct event_category *representative; |
925 | |
|
926 | 0 | if (!array_is_created(&event->categories)) |
927 | 0 | p_array_init(&event->categories, event->pool, 4); |
928 | |
|
929 | 0 | for (unsigned int i = 0; categories[i] != NULL; i++) { |
930 | 0 | representative = event_category_register(categories[i]); |
931 | 0 | if (!event_find_category(event, representative)) |
932 | 0 | array_push_back(&event->categories, &representative); |
933 | 0 | } |
934 | 0 | event_set_changed(event); |
935 | 0 | event_recalculate_debug_level(event); |
936 | 0 | return event; |
937 | 0 | } |
938 | | |
939 | | struct event * |
940 | | event_add_category(struct event *event, struct event_category *category) |
941 | 0 | { |
942 | 0 | struct event_category *const categories[] = { category, NULL }; |
943 | 0 | return event_add_categories(event, categories); |
944 | 0 | } |
945 | | |
946 | | struct event_field * |
947 | | event_find_field_nonrecursive(const struct event *event, const char *key) |
948 | 45 | { |
949 | 45 | struct event_field *field; |
950 | | |
951 | 45 | if (!array_is_created(&event->fields)) |
952 | 1 | return NULL; |
953 | | |
954 | 130 | array_foreach_modifiable(&event->fields, field) { |
955 | 130 | if (strcmp(field->key, key) == 0) |
956 | 40 | return field; |
957 | 130 | } |
958 | 4 | return NULL; |
959 | 44 | } |
960 | | |
961 | | const struct event_field * |
962 | | event_find_field_recursive(const struct event *event, const char *key) |
963 | 0 | { |
964 | 0 | const struct event_field *field; |
965 | |
|
966 | 0 | do { |
967 | 0 | if ((field = event_find_field_nonrecursive(event, key)) != NULL) |
968 | 0 | return field; |
969 | 0 | event = event->parent; |
970 | 0 | } while (event != NULL); |
971 | | |
972 | | /* check also the global event and its parents */ |
973 | 0 | event = event_get_global(); |
974 | 0 | while (event != NULL) { |
975 | 0 | if ((field = event_find_field_nonrecursive(event, key)) != NULL) |
976 | 0 | return field; |
977 | 0 | event = event->parent; |
978 | 0 | } |
979 | 0 | return NULL; |
980 | 0 | } |
981 | | |
982 | | static void |
983 | | event_get_recursive_strlist(const struct event *event, pool_t pool, |
984 | | const char *key, ARRAY_TYPE(const_string) *dest) |
985 | 0 | { |
986 | 0 | const struct event_field *field; |
987 | 0 | const char *str; |
988 | |
|
989 | 0 | if (event == NULL) |
990 | 0 | return; |
991 | | |
992 | 0 | field = event_find_field_nonrecursive(event, key); |
993 | 0 | if (field != NULL) { |
994 | 0 | if (field->value_type != EVENT_FIELD_VALUE_TYPE_STRLIST) { |
995 | | /* Value type unexpectedly changed. Stop recursing. */ |
996 | 0 | return; |
997 | 0 | } |
998 | 0 | array_foreach_elem(&field->value.strlist, str) { |
999 | 0 | if (array_lsearch(dest, &str, i_strcmp_p) == NULL) { |
1000 | 0 | if (pool != NULL) |
1001 | 0 | str = p_strdup(pool, str); |
1002 | 0 | array_push_back(dest, &str); |
1003 | 0 | } |
1004 | 0 | } |
1005 | 0 | } |
1006 | 0 | event_get_recursive_strlist(event->parent, pool, key, dest); |
1007 | 0 | } |
1008 | | |
1009 | | const char * |
1010 | | event_find_field_recursive_str(const struct event *event, const char *key) |
1011 | 0 | { |
1012 | 0 | const struct event_field *field; |
1013 | |
|
1014 | 0 | field = event_find_field_recursive(event, key); |
1015 | 0 | if (field == NULL) |
1016 | 0 | return NULL; |
1017 | | |
1018 | 0 | switch (field->value_type) { |
1019 | 0 | case EVENT_FIELD_VALUE_TYPE_STR: |
1020 | 0 | return field->value.str; |
1021 | 0 | case EVENT_FIELD_VALUE_TYPE_INTMAX: |
1022 | 0 | return t_strdup_printf("%jd", field->value.intmax); |
1023 | 0 | case EVENT_FIELD_VALUE_TYPE_TIMEVAL: |
1024 | 0 | return t_strdup_printf("%"PRIdTIME_T".%u", |
1025 | 0 | field->value.timeval.tv_sec, |
1026 | 0 | (unsigned int)field->value.timeval.tv_usec); |
1027 | 0 | case EVENT_FIELD_VALUE_TYPE_IP: |
1028 | 0 | return net_ip2addr(&field->value.ip); |
1029 | 0 | case EVENT_FIELD_VALUE_TYPE_STRLIST: { |
1030 | 0 | ARRAY_TYPE(const_string) list; |
1031 | 0 | t_array_init(&list, 8); |
1032 | | /* This is a bit different, because it needs to be merging |
1033 | | all of the parent events' and global events' lists |
1034 | | together. */ |
1035 | 0 | event_get_recursive_strlist(event, NULL, key, &list); |
1036 | 0 | event_get_recursive_strlist(event_get_global(), NULL, |
1037 | 0 | key, &list); |
1038 | 0 | return t_array_const_string_join(&list, ","); |
1039 | 0 | } |
1040 | 0 | } |
1041 | 0 | i_unreached(); |
1042 | 0 | } |
1043 | | |
1044 | | static struct event_field * |
1045 | | event_get_field(struct event *event, const char *key, bool clear) |
1046 | 45 | { |
1047 | 45 | struct event_field *field; |
1048 | | |
1049 | 45 | field = event_find_field_nonrecursive(event, key); |
1050 | 45 | if (field == NULL) { |
1051 | 5 | if (!array_is_created(&event->fields)) |
1052 | 5 | p_array_init(&event->fields, event->pool, 8); |
1053 | 5 | field = array_append_space(&event->fields); |
1054 | 5 | field->key = p_strdup(event->pool, key); |
1055 | 40 | } else if (clear) { |
1056 | 40 | i_zero(&field->value); |
1057 | 40 | } |
1058 | 45 | event_set_changed(event); |
1059 | 45 | return field; |
1060 | 45 | } |
1061 | | |
1062 | | struct event * |
1063 | | event_add_str(struct event *event, const char *key, const char *value) |
1064 | 9 | { |
1065 | 9 | struct event_field *field; |
1066 | | |
1067 | 9 | if (value == NULL) { |
1068 | | /* Silently ignoring is perhaps better than assert-crashing? |
1069 | | However, if the field already exists, this should be the |
1070 | | same as event_field_clear() */ |
1071 | 0 | if (event_find_field_recursive(event, key) == NULL) |
1072 | 0 | return event; |
1073 | 0 | value = ""; |
1074 | 0 | } |
1075 | | |
1076 | 9 | field = event_get_field(event, key, TRUE); |
1077 | 9 | field->value_type = EVENT_FIELD_VALUE_TYPE_STR; |
1078 | 9 | field->value.str = p_strdup(event->pool, value); |
1079 | 9 | return event; |
1080 | 9 | } |
1081 | | |
1082 | | struct event * |
1083 | | event_strlist_append(struct event *event, const char *key, const char *value) |
1084 | 0 | { |
1085 | 0 | struct event_field *field = event_get_field(event, key, FALSE); |
1086 | |
|
1087 | 0 | if (field->value_type != EVENT_FIELD_VALUE_TYPE_STRLIST || |
1088 | 0 | !array_is_created(&field->value.strlist)) { |
1089 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_STRLIST; |
1090 | 0 | p_array_init(&field->value.strlist, event->pool, 1); |
1091 | 0 | } |
1092 | | |
1093 | | /* lets not add empty values there though */ |
1094 | 0 | if (value == NULL) |
1095 | 0 | return event; |
1096 | | |
1097 | 0 | const char *str = p_strdup(event->pool, value); |
1098 | 0 | if (array_lsearch(&field->value.strlist, &str, i_strcmp_p) == NULL) |
1099 | 0 | array_push_back(&field->value.strlist, &str); |
1100 | 0 | return event; |
1101 | 0 | } |
1102 | | |
1103 | | struct event * |
1104 | | event_strlist_replace(struct event *event, const char *key, |
1105 | | const char *const *values, unsigned int count) |
1106 | 0 | { |
1107 | 0 | struct event_field *field = event_get_field(event, key, TRUE); |
1108 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_STRLIST; |
1109 | |
|
1110 | 0 | for (unsigned int i = 0; i < count; i++) |
1111 | 0 | event_strlist_append(event, key, values[i]); |
1112 | 0 | return event; |
1113 | 0 | } |
1114 | | |
1115 | | struct event * |
1116 | | event_strlist_copy_recursive(struct event *dest, const struct event *src, |
1117 | | const char *key) |
1118 | 0 | { |
1119 | 0 | event_strlist_append(dest, key, NULL); |
1120 | 0 | struct event_field *field = event_get_field(dest, key, FALSE); |
1121 | 0 | i_assert(field != NULL); |
1122 | 0 | event_get_recursive_strlist(src, dest->pool, key, |
1123 | 0 | &field->value.strlist); |
1124 | 0 | return dest; |
1125 | 0 | } |
1126 | | |
1127 | | struct event * |
1128 | | event_add_int(struct event *event, const char *key, intmax_t num) |
1129 | 36 | { |
1130 | 36 | struct event_field *field; |
1131 | | |
1132 | 36 | field = event_get_field(event, key, TRUE); |
1133 | 36 | field->value_type = EVENT_FIELD_VALUE_TYPE_INTMAX; |
1134 | 36 | field->value.intmax = num; |
1135 | 36 | return event; |
1136 | 36 | } |
1137 | | |
1138 | | struct event * |
1139 | | event_add_int_nonzero(struct event *event, const char *key, intmax_t num) |
1140 | 0 | { |
1141 | 0 | if (num != 0) |
1142 | 0 | return event_add_int(event, key, num); |
1143 | 0 | return event; |
1144 | 0 | } |
1145 | | |
1146 | | struct event * |
1147 | | event_inc_int(struct event *event, const char *key, intmax_t num) |
1148 | 0 | { |
1149 | 0 | struct event_field *field; |
1150 | |
|
1151 | 0 | field = event_find_field_nonrecursive(event, key); |
1152 | 0 | if (field == NULL || field->value_type != EVENT_FIELD_VALUE_TYPE_INTMAX) |
1153 | 0 | return event_add_int(event, key, num); |
1154 | | |
1155 | 0 | field->value.intmax += num; |
1156 | 0 | event_set_changed(event); |
1157 | 0 | return event; |
1158 | 0 | } |
1159 | | |
1160 | | struct event * |
1161 | | event_add_timeval(struct event *event, const char *key, |
1162 | | const struct timeval *tv) |
1163 | 0 | { |
1164 | 0 | struct event_field *field; |
1165 | |
|
1166 | 0 | field = event_get_field(event, key, TRUE); |
1167 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_TIMEVAL; |
1168 | 0 | field->value.timeval = *tv; |
1169 | 0 | return event; |
1170 | 0 | } |
1171 | | |
1172 | | struct event * |
1173 | | event_add_ip(struct event *event, const char *key, const struct ip_addr *ip) |
1174 | 0 | { |
1175 | 0 | struct event_field *field; |
1176 | |
|
1177 | 0 | if (ip->family == 0) { |
1178 | | /* ignore nonexistent IP (similar to |
1179 | | event_add_str(value=NULL)) */ |
1180 | 0 | if (event_find_field_recursive(event, key) != NULL) |
1181 | 0 | event_field_clear(event, key); |
1182 | 0 | return event; |
1183 | 0 | } |
1184 | | |
1185 | 0 | field = event_get_field(event, key, TRUE); |
1186 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_IP; |
1187 | 0 | field->value.ip = *ip; |
1188 | 0 | return event; |
1189 | 0 | } |
1190 | | |
1191 | | struct event * |
1192 | | event_add_fields(struct event *event, |
1193 | | const struct event_add_field *fields) |
1194 | 0 | { |
1195 | 0 | for (unsigned int i = 0; fields[i].key != NULL; i++) { |
1196 | 0 | if (fields[i].value != NULL) |
1197 | 0 | event_add_str(event, fields[i].key, fields[i].value); |
1198 | 0 | else if (fields[i].value_timeval.tv_sec != 0) { |
1199 | 0 | event_add_timeval(event, fields[i].key, |
1200 | 0 | &fields[i].value_timeval); |
1201 | 0 | } else if (fields[i].value_ip.family != 0) { |
1202 | 0 | event_add_ip(event, fields[i].key, &fields[i].value_ip); |
1203 | 0 | } else { |
1204 | 0 | event_add_int(event, fields[i].key, |
1205 | 0 | fields[i].value_intmax); |
1206 | 0 | } |
1207 | 0 | } |
1208 | 0 | return event; |
1209 | 0 | } |
1210 | | |
1211 | | void event_field_clear(struct event *event, const char *key) |
1212 | 0 | { |
1213 | 0 | event_add_str(event, key, ""); |
1214 | 0 | } |
1215 | | |
1216 | | struct event *event_get_parent(const struct event *event) |
1217 | 0 | { |
1218 | 0 | return event->parent; |
1219 | 0 | } |
1220 | | |
1221 | | pool_t event_get_pool(const struct event *event) |
1222 | 0 | { |
1223 | 0 | return event->pool; |
1224 | 0 | } |
1225 | | |
1226 | | void event_get_create_time(const struct event *event, struct timeval *tv_r) |
1227 | 0 | { |
1228 | 0 | *tv_r = event->tv_created; |
1229 | 0 | } |
1230 | | |
1231 | | bool event_get_last_send_time(const struct event *event, struct timeval *tv_r) |
1232 | 0 | { |
1233 | 0 | *tv_r = event->tv_last_sent; |
1234 | 0 | return tv_r->tv_sec != 0; |
1235 | 0 | } |
1236 | | |
1237 | | void event_get_last_duration(const struct event *event, |
1238 | | uintmax_t *duration_usecs_r) |
1239 | 0 | { |
1240 | 0 | if (event->tv_last_sent.tv_sec == 0) { |
1241 | 0 | *duration_usecs_r = 0; |
1242 | 0 | return; |
1243 | 0 | } |
1244 | 0 | long long diff = timeval_diff_usecs(&event->tv_last_sent, |
1245 | 0 | &event->tv_created); |
1246 | 0 | i_assert(diff >= 0); |
1247 | 0 | *duration_usecs_r = diff; |
1248 | 0 | } |
1249 | | |
1250 | | const struct event_field * |
1251 | | event_get_fields(const struct event *event, unsigned int *count_r) |
1252 | 0 | { |
1253 | 0 | if (!array_is_created(&event->fields)) { |
1254 | 0 | *count_r = 0; |
1255 | 0 | return NULL; |
1256 | 0 | } |
1257 | 0 | return array_get(&event->fields, count_r); |
1258 | 0 | } |
1259 | | |
1260 | | struct event_category *const * |
1261 | | event_get_categories(const struct event *event, unsigned int *count_r) |
1262 | 0 | { |
1263 | 0 | if (!array_is_created(&event->categories)) { |
1264 | 0 | *count_r = 0; |
1265 | 0 | return NULL; |
1266 | 0 | } |
1267 | 0 | return array_get(&event->categories, count_r); |
1268 | 0 | } |
1269 | | |
1270 | | static void |
1271 | | insert_category(HASH_TABLE_TYPE(category_set) hash, |
1272 | | const struct event_category *const cat) |
1273 | 0 | { |
1274 | | /* insert this category (key == the unique internal pointer) */ |
1275 | 0 | hash_table_update(hash, cat->internal, cat); |
1276 | | |
1277 | | /* insert parent's categories */ |
1278 | 0 | if (cat->parent != NULL) |
1279 | 0 | insert_category(hash, cat->parent); |
1280 | 0 | } |
1281 | | |
1282 | | struct event_category_iterator * |
1283 | | event_categories_iterate_init(const struct event *event) |
1284 | 0 | { |
1285 | 0 | struct event_category_iterator *iter; |
1286 | 0 | struct event_category *const *cats; |
1287 | 0 | unsigned int count, i; |
1288 | |
|
1289 | 0 | cats = event_get_categories(event, &count); |
1290 | 0 | if (count == 0) |
1291 | 0 | return NULL; |
1292 | | |
1293 | 0 | iter = i_new(struct event_category_iterator, 1); |
1294 | |
|
1295 | 0 | hash_table_create_direct(&iter->hash, default_pool, |
1296 | 0 | 3 * count /* estimate */); |
1297 | | |
1298 | | /* Insert all the categories into the hash table */ |
1299 | 0 | for (i = 0; i < count; i++) |
1300 | 0 | insert_category(iter->hash, cats[i]); |
1301 | |
|
1302 | 0 | iter->iter = hash_table_iterate_init(iter->hash); |
1303 | |
|
1304 | 0 | return iter; |
1305 | 0 | } |
1306 | | |
1307 | | bool event_categories_iterate(struct event_category_iterator *iter, |
1308 | | const struct event_category **cat_r) |
1309 | 0 | { |
1310 | 0 | void *key ATTR_UNUSED; |
1311 | |
|
1312 | 0 | if (iter == NULL) { |
1313 | 0 | *cat_r = NULL; |
1314 | 0 | return FALSE; |
1315 | 0 | } |
1316 | 0 | return hash_table_iterate(iter->iter, iter->hash, &key, cat_r); |
1317 | 0 | } |
1318 | | |
1319 | | void event_categories_iterate_deinit(struct event_category_iterator **_iter) |
1320 | 0 | { |
1321 | 0 | struct event_category_iterator *iter = *_iter; |
1322 | |
|
1323 | 0 | if (iter == NULL) |
1324 | 0 | return; |
1325 | 0 | *_iter = NULL; |
1326 | |
|
1327 | 0 | hash_table_iterate_deinit(&iter->iter); |
1328 | 0 | hash_table_destroy(&iter->hash); |
1329 | 0 | i_free(iter); |
1330 | 0 | } |
1331 | | |
1332 | | void event_send(struct event *event, struct failure_context *ctx, |
1333 | | const char *fmt, ...) |
1334 | 0 | { |
1335 | 0 | va_list args; |
1336 | |
|
1337 | 0 | va_start(args, fmt); |
1338 | 0 | event_vsend(event, ctx, fmt, args); |
1339 | 0 | va_end(args); |
1340 | 0 | } |
1341 | | |
1342 | | void event_vsend(struct event *event, struct failure_context *ctx, |
1343 | | const char *fmt, va_list args) |
1344 | 0 | { |
1345 | 0 | i_gettimeofday(&event->tv_last_sent); |
1346 | | |
1347 | | /* Skip adding user_cpu_usecs if not enabled. */ |
1348 | 0 | if (event->ru_last.ru_utime.tv_sec != 0 || |
1349 | 0 | event->ru_last.ru_utime.tv_usec != 0) { |
1350 | 0 | struct rusage ru_current; |
1351 | 0 | get_self_rusage(&ru_current); |
1352 | 0 | long long udiff = timeval_diff_usecs(&ru_current.ru_utime, |
1353 | 0 | &event->ru_last.ru_utime); |
1354 | 0 | event_add_int(event, "user_cpu_usecs", udiff > 0 ? udiff : 0); |
1355 | 0 | } |
1356 | 0 | if (event_call_callbacks(event, EVENT_CALLBACK_TYPE_SEND, |
1357 | 0 | ctx, fmt, args)) { |
1358 | 0 | if (ctx->type != LOG_TYPE_DEBUG || |
1359 | 0 | event->sending_debug_log) |
1360 | 0 | i_log_typev(ctx, fmt, args); |
1361 | 0 | } |
1362 | 0 | event_send_abort(event); |
1363 | 0 | } |
1364 | | |
1365 | | void event_send_abort(struct event *event) |
1366 | 0 | { |
1367 | | /* if the event is sent again, it needs a new name */ |
1368 | 0 | i_free(event->sending_name); |
1369 | 0 | if (event->passthrough) |
1370 | 0 | event_unref(&event); |
1371 | 0 | } |
1372 | | |
1373 | | static void |
1374 | | event_export_field_value(string_t *dest, const struct event_field *field) |
1375 | 0 | { |
1376 | 0 | switch (field->value_type) { |
1377 | 0 | case EVENT_FIELD_VALUE_TYPE_STR: |
1378 | 0 | str_append_c(dest, EVENT_CODE_FIELD_STR); |
1379 | 0 | str_append_tabescaped(dest, field->key); |
1380 | 0 | str_append_c(dest, '\t'); |
1381 | 0 | str_append_tabescaped(dest, field->value.str); |
1382 | 0 | break; |
1383 | 0 | case EVENT_FIELD_VALUE_TYPE_INTMAX: |
1384 | 0 | str_append_c(dest, EVENT_CODE_FIELD_INTMAX); |
1385 | 0 | str_append_tabescaped(dest, field->key); |
1386 | 0 | str_printfa(dest, "\t%jd", field->value.intmax); |
1387 | 0 | break; |
1388 | 0 | case EVENT_FIELD_VALUE_TYPE_TIMEVAL: |
1389 | 0 | str_append_c(dest, EVENT_CODE_FIELD_TIMEVAL); |
1390 | 0 | str_append_tabescaped(dest, field->key); |
1391 | 0 | str_printfa(dest, "\t%"PRIdTIME_T"\t%u", |
1392 | 0 | field->value.timeval.tv_sec, |
1393 | 0 | (unsigned int)field->value.timeval.tv_usec); |
1394 | 0 | break; |
1395 | 0 | case EVENT_FIELD_VALUE_TYPE_IP: |
1396 | 0 | str_append_c(dest, EVENT_CODE_FIELD_IP); |
1397 | 0 | str_append_tabescaped(dest, field->key); |
1398 | 0 | str_printfa(dest, "\t%s", net_ip2addr(&field->value.ip)); |
1399 | 0 | break; |
1400 | 0 | case EVENT_FIELD_VALUE_TYPE_STRLIST: { |
1401 | 0 | unsigned int count; |
1402 | 0 | const char *const *strlist = |
1403 | 0 | array_get(&field->value.strlist, &count); |
1404 | 0 | str_append_c(dest, EVENT_CODE_FIELD_STRLIST); |
1405 | 0 | str_append_tabescaped(dest, field->key); |
1406 | 0 | str_printfa(dest, "\t%u", count); |
1407 | 0 | for (unsigned int i = 0; i < count; i++) { |
1408 | 0 | str_append_c(dest, '\t'); |
1409 | 0 | str_append_tabescaped(dest, strlist[i]); |
1410 | 0 | } |
1411 | 0 | } |
1412 | 0 | } |
1413 | 0 | } |
1414 | | |
1415 | | void event_export(const struct event *event, string_t *dest) |
1416 | 0 | { |
1417 | | /* required fields: */ |
1418 | 0 | str_printfa(dest, "%"PRIdTIME_T"\t%u", |
1419 | 0 | event->tv_created.tv_sec, |
1420 | 0 | (unsigned int)event->tv_created.tv_usec); |
1421 | | |
1422 | | /* optional fields: */ |
1423 | 0 | if (event->source_filename != NULL) { |
1424 | 0 | str_append_c(dest, '\t'); |
1425 | 0 | str_append_c(dest, EVENT_CODE_SOURCE); |
1426 | 0 | str_append_tabescaped(dest, event->source_filename); |
1427 | 0 | str_printfa(dest, "\t%u", event->source_linenum); |
1428 | 0 | } |
1429 | 0 | if (event->always_log_source) { |
1430 | 0 | str_append_c(dest, '\t'); |
1431 | 0 | str_append_c(dest, EVENT_CODE_ALWAYS_LOG_SOURCE); |
1432 | 0 | } |
1433 | 0 | if (event->tv_last_sent.tv_sec != 0) { |
1434 | 0 | str_printfa(dest, "\t%c%"PRIdTIME_T"\t%u", |
1435 | 0 | EVENT_CODE_TV_LAST_SENT, |
1436 | 0 | event->tv_last_sent.tv_sec, |
1437 | 0 | (unsigned int)event->tv_last_sent.tv_usec); |
1438 | 0 | } |
1439 | 0 | if (event->sending_name != NULL) { |
1440 | 0 | str_append_c(dest, '\t'); |
1441 | 0 | str_append_c(dest, EVENT_CODE_SENDING_NAME); |
1442 | 0 | str_append_tabescaped(dest, event->sending_name); |
1443 | 0 | } |
1444 | |
|
1445 | 0 | if (array_is_created(&event->categories)) { |
1446 | 0 | struct event_category *cat; |
1447 | 0 | array_foreach_elem(&event->categories, cat) { |
1448 | 0 | str_append_c(dest, '\t'); |
1449 | 0 | str_append_c(dest, EVENT_CODE_CATEGORY); |
1450 | 0 | str_append_tabescaped(dest, cat->name); |
1451 | 0 | } |
1452 | 0 | } |
1453 | |
|
1454 | 0 | if (array_is_created(&event->fields)) { |
1455 | 0 | const struct event_field *field; |
1456 | 0 | array_foreach(&event->fields, field) { |
1457 | 0 | str_append_c(dest, '\t'); |
1458 | 0 | event_export_field_value(dest, field); |
1459 | 0 | } |
1460 | 0 | } |
1461 | 0 | } |
1462 | | |
1463 | | bool event_import(struct event *event, const char *str, const char **error_r) |
1464 | 0 | { |
1465 | 0 | return event_import_unescaped(event, t_strsplit_tabescaped(str), |
1466 | 0 | error_r); |
1467 | 0 | } |
1468 | | |
1469 | | static bool event_import_tv(const char *arg_secs, const char *arg_usecs, |
1470 | | struct timeval *tv_r, const char **error_r) |
1471 | 0 | { |
1472 | 0 | unsigned int usecs; |
1473 | |
|
1474 | 0 | if (str_to_time(arg_secs, &tv_r->tv_sec) < 0) { |
1475 | 0 | *error_r = "Invalid timeval seconds parameter"; |
1476 | 0 | return FALSE; |
1477 | 0 | } |
1478 | | |
1479 | 0 | if (arg_usecs == NULL) { |
1480 | 0 | *error_r = "Timeval missing microseconds parameter"; |
1481 | 0 | return FALSE; |
1482 | 0 | } |
1483 | 0 | if (str_to_uint(arg_usecs, &usecs) < 0 || usecs >= 1000000) { |
1484 | 0 | *error_r = "Invalid timeval microseconds parameter"; |
1485 | 0 | return FALSE; |
1486 | 0 | } |
1487 | 0 | tv_r->tv_usec = usecs; |
1488 | 0 | return TRUE; |
1489 | 0 | } |
1490 | | |
1491 | | static bool |
1492 | | event_import_strlist(struct event *event, struct event_field *field, |
1493 | | const char *const **_args, const char **error_r) |
1494 | 0 | { |
1495 | 0 | const char *const *args = *_args; |
1496 | 0 | unsigned int count, i; |
1497 | |
|
1498 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_STRLIST; |
1499 | 0 | if (str_to_uint(args[0], &count) < 0) { |
1500 | 0 | *error_r = t_strdup_printf("Field '%s' has invalid count: '%s'", |
1501 | 0 | field->key, args[0]); |
1502 | 0 | return FALSE; |
1503 | 0 | } |
1504 | 0 | p_array_init(&field->value.strlist, event->pool, count); |
1505 | 0 | for (i = 1; i <= count && args[i] != NULL; i++) { |
1506 | 0 | const char *str = p_strdup(event->pool, args[i]); |
1507 | 0 | array_push_back(&field->value.strlist, &str); |
1508 | 0 | } |
1509 | 0 | if (i < count) { |
1510 | 0 | *error_r = t_strdup_printf("Field '%s' has too few values", |
1511 | 0 | field->key); |
1512 | 0 | return FALSE; |
1513 | 0 | } |
1514 | 0 | *_args += count; |
1515 | 0 | return TRUE; |
1516 | 0 | } |
1517 | | |
1518 | | static bool |
1519 | | event_import_field(struct event *event, enum event_code code, const char *arg, |
1520 | | const char *const **_args, const char **error_r) |
1521 | 0 | { |
1522 | 0 | const char *const *args = *_args; |
1523 | 0 | const char *error; |
1524 | |
|
1525 | 0 | if (*arg == '\0') { |
1526 | 0 | *error_r = "Field name is missing"; |
1527 | 0 | return FALSE; |
1528 | 0 | } |
1529 | 0 | struct event_field *field = event_get_field(event, arg, TRUE); |
1530 | 0 | if (args[0] == NULL) { |
1531 | 0 | *error_r = "Field value is missing"; |
1532 | 0 | return FALSE; |
1533 | 0 | } |
1534 | 0 | switch (code) { |
1535 | 0 | case EVENT_CODE_FIELD_INTMAX: |
1536 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_INTMAX; |
1537 | 0 | if (str_to_intmax(*args, &field->value.intmax) < 0) { |
1538 | 0 | *error_r = t_strdup_printf( |
1539 | 0 | "Invalid field value '%s' number for '%s'", |
1540 | 0 | *args, field->key); |
1541 | 0 | return FALSE; |
1542 | 0 | } |
1543 | 0 | break; |
1544 | 0 | case EVENT_CODE_FIELD_STR: |
1545 | 0 | if (field->value_type == EVENT_FIELD_VALUE_TYPE_STR && |
1546 | 0 | null_strcmp(field->value.str, *args) == 0) { |
1547 | | /* already identical value */ |
1548 | 0 | break; |
1549 | 0 | } |
1550 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_STR; |
1551 | 0 | field->value.str = p_strdup(event->pool, *args); |
1552 | 0 | break; |
1553 | 0 | case EVENT_CODE_FIELD_TIMEVAL: |
1554 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_TIMEVAL; |
1555 | 0 | if (!event_import_tv(args[0], args[1], |
1556 | 0 | &field->value.timeval, &error)) { |
1557 | 0 | *error_r = t_strdup_printf("Field '%s' value '%s': %s", |
1558 | 0 | field->key, args[1], error); |
1559 | 0 | return FALSE; |
1560 | 0 | } |
1561 | 0 | args++; |
1562 | 0 | break; |
1563 | 0 | case EVENT_CODE_FIELD_IP: |
1564 | 0 | field->value_type = EVENT_FIELD_VALUE_TYPE_IP; |
1565 | 0 | if (net_addr2ip(*args, &field->value.ip) < 0) { |
1566 | 0 | *error_r = t_strdup_printf( |
1567 | 0 | "Invalid field value '%s' IP for '%s'", |
1568 | 0 | *args, field->key); |
1569 | 0 | return FALSE; |
1570 | 0 | } |
1571 | 0 | break; |
1572 | 0 | case EVENT_CODE_FIELD_STRLIST: |
1573 | 0 | if (!event_import_strlist(event, field, &args, error_r)) |
1574 | 0 | return FALSE; |
1575 | 0 | break; |
1576 | 0 | default: |
1577 | 0 | i_unreached(); |
1578 | 0 | } |
1579 | 0 | *_args = args; |
1580 | 0 | return TRUE; |
1581 | 0 | } |
1582 | | |
1583 | | |
1584 | | static bool |
1585 | | event_import_arg(struct event *event, const char *const **_args, |
1586 | | const char **error_r) |
1587 | 0 | { |
1588 | 0 | const char *const *args = *_args; |
1589 | 0 | const char *error, *arg = *args; |
1590 | 0 | enum event_code code = arg[0]; |
1591 | |
|
1592 | 0 | arg++; |
1593 | 0 | switch (code) { |
1594 | 0 | case EVENT_CODE_ALWAYS_LOG_SOURCE: |
1595 | 0 | event->always_log_source = TRUE; |
1596 | 0 | break; |
1597 | 0 | case EVENT_CODE_CATEGORY: { |
1598 | 0 | struct event_category *category = |
1599 | 0 | event_category_find_registered(arg); |
1600 | 0 | if (category == NULL) { |
1601 | 0 | *error_r = t_strdup_printf( |
1602 | 0 | "Unregistered category: '%s'", arg); |
1603 | 0 | return FALSE; |
1604 | 0 | } |
1605 | 0 | if (!array_is_created(&event->categories)) |
1606 | 0 | p_array_init(&event->categories, event->pool, 4); |
1607 | 0 | if (!event_find_category(event, category)) |
1608 | 0 | array_push_back(&event->categories, &category); |
1609 | 0 | break; |
1610 | 0 | } |
1611 | 0 | case EVENT_CODE_TV_LAST_SENT: |
1612 | 0 | if (!event_import_tv(arg, args[1], &event->tv_last_sent, |
1613 | 0 | &error)) { |
1614 | 0 | *error_r = t_strdup_printf( |
1615 | 0 | "Invalid tv_last_sent: %s", error); |
1616 | 0 | return FALSE; |
1617 | 0 | } |
1618 | 0 | args++; |
1619 | 0 | break; |
1620 | 0 | case EVENT_CODE_SENDING_NAME: |
1621 | 0 | i_free(event->sending_name); |
1622 | 0 | event->sending_name = i_strdup(arg); |
1623 | 0 | break; |
1624 | 0 | case EVENT_CODE_SOURCE: { |
1625 | 0 | unsigned int linenum; |
1626 | |
|
1627 | 0 | if (args[1] == NULL) { |
1628 | 0 | *error_r = "Source line number missing"; |
1629 | 0 | return FALSE; |
1630 | 0 | } |
1631 | 0 | if (str_to_uint(args[1], &linenum) < 0) { |
1632 | 0 | *error_r = "Invalid Source line number"; |
1633 | 0 | return FALSE; |
1634 | 0 | } |
1635 | 0 | event_set_source(event, arg, linenum, FALSE); |
1636 | 0 | args++; |
1637 | 0 | break; |
1638 | 0 | } |
1639 | 0 | case EVENT_CODE_FIELD_INTMAX: |
1640 | 0 | case EVENT_CODE_FIELD_STR: |
1641 | 0 | case EVENT_CODE_FIELD_STRLIST: |
1642 | 0 | case EVENT_CODE_FIELD_TIMEVAL: |
1643 | 0 | case EVENT_CODE_FIELD_IP: { |
1644 | 0 | args++; |
1645 | 0 | if (!event_import_field(event, code, arg, &args, error_r)) |
1646 | 0 | return FALSE; |
1647 | 0 | break; |
1648 | 0 | } |
1649 | 0 | } |
1650 | 0 | *_args = args; |
1651 | 0 | return TRUE; |
1652 | 0 | } |
1653 | | |
1654 | | bool event_import_unescaped(struct event *event, const char *const *args, |
1655 | | const char **error_r) |
1656 | 0 | { |
1657 | 0 | const char *error; |
1658 | | |
1659 | | /* Event's create callback has already added service:<name> category. |
1660 | | This imported event may be coming from another service process |
1661 | | though, so clear it out. */ |
1662 | 0 | if (array_is_created(&event->categories)) |
1663 | 0 | array_clear(&event->categories); |
1664 | | |
1665 | | /* required fields: */ |
1666 | 0 | if (args[0] == NULL) { |
1667 | 0 | *error_r = "Missing required fields"; |
1668 | 0 | return FALSE; |
1669 | 0 | } |
1670 | 0 | if (!event_import_tv(args[0], args[1], &event->tv_created, &error)) { |
1671 | 0 | *error_r = t_strdup_printf("Invalid tv_created: %s", error); |
1672 | 0 | return FALSE; |
1673 | 0 | } |
1674 | 0 | args += 2; |
1675 | | |
1676 | | /* optional fields: */ |
1677 | 0 | while (*args != NULL) { |
1678 | 0 | if (!event_import_arg(event, &args, error_r)) |
1679 | 0 | return FALSE; |
1680 | 0 | args++; |
1681 | 0 | } |
1682 | 0 | return TRUE; |
1683 | 0 | } |
1684 | | |
1685 | | void event_register_callback(event_callback_t *callback) |
1686 | 0 | { |
1687 | 0 | array_push_back(&event_handlers, &callback); |
1688 | 0 | } |
1689 | | |
1690 | | void event_unregister_callback(event_callback_t *callback) |
1691 | 0 | { |
1692 | 0 | unsigned int idx; |
1693 | |
|
1694 | 0 | if (!array_lsearch_ptr_idx(&event_handlers, callback, &idx)) |
1695 | 0 | i_unreached(); |
1696 | 0 | array_delete(&event_handlers, idx, 1); |
1697 | 0 | } |
1698 | | |
1699 | | void event_category_register_callback(event_category_callback_t *callback) |
1700 | 1 | { |
1701 | 1 | array_push_back(&event_category_callbacks, &callback); |
1702 | 1 | } |
1703 | | |
1704 | | void event_category_unregister_callback(event_category_callback_t *callback) |
1705 | 0 | { |
1706 | 0 | unsigned int idx; |
1707 | |
|
1708 | 0 | if (!array_lsearch_ptr_idx(&event_category_callbacks, callback, &idx)) |
1709 | 0 | i_unreached(); |
1710 | 0 | array_delete(&event_category_callbacks, idx, 1); |
1711 | 0 | } |
1712 | | |
1713 | | static struct event_passthrough * |
1714 | | event_passthrough_set_append_log_prefix(const char *prefix) |
1715 | 0 | { |
1716 | 0 | event_set_append_log_prefix(last_passthrough_event(), prefix); |
1717 | 0 | return &event_passthrough_vfuncs; |
1718 | 0 | } |
1719 | | |
1720 | | static struct event_passthrough * |
1721 | | event_passthrough_replace_log_prefix(const char *prefix) |
1722 | 0 | { |
1723 | 0 | event_replace_log_prefix(last_passthrough_event(), prefix); |
1724 | 0 | return &event_passthrough_vfuncs; |
1725 | 0 | } |
1726 | | |
1727 | | static struct event_passthrough * |
1728 | | event_passthrough_set_name(const char *name) |
1729 | 0 | { |
1730 | 0 | event_set_name(last_passthrough_event(), name); |
1731 | 0 | return &event_passthrough_vfuncs; |
1732 | 0 | } |
1733 | | |
1734 | | static struct event_passthrough * |
1735 | | event_passthrough_set_source(const char *filename, |
1736 | | unsigned int linenum, bool literal_fname) |
1737 | 0 | { |
1738 | 0 | event_set_source(last_passthrough_event(), filename, |
1739 | 0 | linenum, literal_fname); |
1740 | 0 | return &event_passthrough_vfuncs; |
1741 | 0 | } |
1742 | | |
1743 | | static struct event_passthrough * |
1744 | | event_passthrough_set_always_log_source(void) |
1745 | 0 | { |
1746 | 0 | event_set_always_log_source(last_passthrough_event()); |
1747 | 0 | return &event_passthrough_vfuncs; |
1748 | 0 | } |
1749 | | |
1750 | | static struct event_passthrough * |
1751 | | event_passthrough_add_categories(struct event_category *const *categories) |
1752 | 0 | { |
1753 | 0 | event_add_categories(last_passthrough_event(), categories); |
1754 | 0 | return &event_passthrough_vfuncs; |
1755 | 0 | } |
1756 | | |
1757 | | static struct event_passthrough * |
1758 | | event_passthrough_add_category(struct event_category *category) |
1759 | 0 | { |
1760 | 0 | event_add_category(last_passthrough_event(), category); |
1761 | 0 | return &event_passthrough_vfuncs; |
1762 | 0 | } |
1763 | | |
1764 | | static struct event_passthrough * |
1765 | | event_passthrough_add_fields(const struct event_add_field *fields) |
1766 | 0 | { |
1767 | 0 | event_add_fields(last_passthrough_event(), fields); |
1768 | 0 | return &event_passthrough_vfuncs; |
1769 | 0 | } |
1770 | | |
1771 | | static struct event_passthrough * |
1772 | | event_passthrough_add_str(const char *key, const char *value) |
1773 | 0 | { |
1774 | 0 | event_add_str(last_passthrough_event(), key, value); |
1775 | 0 | return &event_passthrough_vfuncs; |
1776 | 0 | } |
1777 | | |
1778 | | static struct event_passthrough * |
1779 | | event_passthrough_strlist_append(const char *key, const char *value) |
1780 | 0 | { |
1781 | 0 | event_strlist_append(last_passthrough_event(), key, value); |
1782 | 0 | return &event_passthrough_vfuncs; |
1783 | 0 | } |
1784 | | |
1785 | | static struct event_passthrough * |
1786 | | event_passthrough_strlist_replace(const char *key, const char *const *values, |
1787 | | unsigned int count) |
1788 | 0 | { |
1789 | 0 | event_strlist_replace(last_passthrough_event(), key, values, count); |
1790 | 0 | return &event_passthrough_vfuncs; |
1791 | 0 | } |
1792 | | |
1793 | | static struct event_passthrough * |
1794 | | event_passthrough_add_int(const char *key, intmax_t num) |
1795 | 0 | { |
1796 | 0 | event_add_int(last_passthrough_event(), key, num); |
1797 | 0 | return &event_passthrough_vfuncs; |
1798 | 0 | } |
1799 | | |
1800 | | static struct event_passthrough * |
1801 | | event_passthrough_add_int_nonzero(const char *key, intmax_t num) |
1802 | 0 | { |
1803 | 0 | event_add_int_nonzero(last_passthrough_event(), key, num); |
1804 | 0 | return &event_passthrough_vfuncs; |
1805 | 0 | } |
1806 | | |
1807 | | static struct event_passthrough * |
1808 | | event_passthrough_add_timeval(const char *key, const struct timeval *tv) |
1809 | 0 | { |
1810 | 0 | event_add_timeval(last_passthrough_event(), key, tv); |
1811 | 0 | return &event_passthrough_vfuncs; |
1812 | 0 | } |
1813 | | |
1814 | | static struct event_passthrough * |
1815 | | event_passthrough_add_ip(const char *key, const struct ip_addr *ip) |
1816 | 0 | { |
1817 | 0 | event_add_ip(last_passthrough_event(), key, ip); |
1818 | 0 | return &event_passthrough_vfuncs; |
1819 | 0 | } |
1820 | | |
1821 | | static struct event_passthrough * |
1822 | | event_passthrough_inc_int(const char *key, intmax_t num) |
1823 | 0 | { |
1824 | 0 | event_inc_int(last_passthrough_event(), key, num); |
1825 | 0 | return &event_passthrough_vfuncs; |
1826 | 0 | } |
1827 | | |
1828 | | static struct event_passthrough * |
1829 | | event_passthrough_clear_field(const char *key) |
1830 | 0 | { |
1831 | 0 | event_field_clear(last_passthrough_event(), key); |
1832 | 0 | return &event_passthrough_vfuncs; |
1833 | 0 | } |
1834 | | |
1835 | | static struct event *event_passthrough_event(void) |
1836 | 0 | { |
1837 | 0 | struct event *event = last_passthrough_event(); |
1838 | 0 | event_last_passthrough = NULL; |
1839 | 0 | return event; |
1840 | 0 | } |
1841 | | |
1842 | | struct event_passthrough event_passthrough_vfuncs = { |
1843 | | .append_log_prefix = event_passthrough_set_append_log_prefix, |
1844 | | .replace_log_prefix = event_passthrough_replace_log_prefix, |
1845 | | .set_name = event_passthrough_set_name, |
1846 | | .set_source = event_passthrough_set_source, |
1847 | | .set_always_log_source = event_passthrough_set_always_log_source, |
1848 | | .add_categories = event_passthrough_add_categories, |
1849 | | .add_category = event_passthrough_add_category, |
1850 | | .add_fields = event_passthrough_add_fields, |
1851 | | .add_str = event_passthrough_add_str, |
1852 | | .add_int = event_passthrough_add_int, |
1853 | | .add_int_nonzero = event_passthrough_add_int_nonzero, |
1854 | | .add_timeval = event_passthrough_add_timeval, |
1855 | | .add_ip = event_passthrough_add_ip, |
1856 | | .inc_int = event_passthrough_inc_int, |
1857 | | .strlist_append = event_passthrough_strlist_append, |
1858 | | .strlist_replace = event_passthrough_strlist_replace, |
1859 | | .clear_field = event_passthrough_clear_field, |
1860 | | .event = event_passthrough_event, |
1861 | | }; |
1862 | | |
1863 | | void event_enable_user_cpu_usecs(struct event *event) |
1864 | 0 | { |
1865 | 0 | get_self_rusage(&event->ru_last); |
1866 | 0 | } |
1867 | | |
1868 | | void lib_event_init(void) |
1869 | 1 | { |
1870 | 1 | i_array_init(&event_handlers, 4); |
1871 | 1 | i_array_init(&event_category_callbacks, 4); |
1872 | 1 | i_array_init(&event_registered_categories_internal, 16); |
1873 | 1 | i_array_init(&event_registered_categories_representative, 16); |
1874 | 1 | } |
1875 | | |
1876 | | void lib_event_deinit(void) |
1877 | 0 | { |
1878 | 0 | struct event_internal_category *internal; |
1879 | |
|
1880 | 0 | event_unset_global_debug_log_filter(); |
1881 | 0 | event_unset_global_debug_send_filter(); |
1882 | 0 | event_unset_global_core_log_filter(); |
1883 | 0 | for (struct event *event = events; event != NULL; event = event->next) { |
1884 | 0 | i_warning("Event %p leaked (parent=%p): %s:%u", |
1885 | 0 | event, event->parent, |
1886 | 0 | event->source_filename, event->source_linenum); |
1887 | 0 | } |
1888 | | /* categories cannot be unregistered, so just free them here */ |
1889 | 0 | array_foreach_elem(&event_registered_categories_internal, internal) { |
1890 | 0 | i_free(internal->name); |
1891 | 0 | i_free(internal); |
1892 | 0 | } |
1893 | 0 | array_free(&event_handlers); |
1894 | 0 | array_free(&event_category_callbacks); |
1895 | 0 | array_free(&event_registered_categories_internal); |
1896 | 0 | array_free(&event_registered_categories_representative); |
1897 | 0 | array_free(&global_event_stack); |
1898 | 0 | } |