/src/lldpd/src/daemon/event.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- mode: c; c-file-style: "openbsd" -*- */ |
2 | | /* |
3 | | * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx> |
4 | | * |
5 | | * Permission to use, copy, modify, and/or distribute this software for any |
6 | | * purpose with or without fee is hereby granted, provided that the above |
7 | | * copyright notice and this permission notice appear in all copies. |
8 | | * |
9 | | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
10 | | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
11 | | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
12 | | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
13 | | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
14 | | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
15 | | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
16 | | */ |
17 | | |
18 | | #include "lldpd.h" |
19 | | #include "trace.h" |
20 | | |
21 | | #include <unistd.h> |
22 | | #include <signal.h> |
23 | | #include <errno.h> |
24 | | #include <time.h> |
25 | | #include <fcntl.h> |
26 | | #if defined(__clang__) |
27 | | # pragma clang diagnostic push |
28 | | # pragma clang diagnostic ignored "-Wdocumentation" |
29 | | #endif |
30 | | #include <event2/event.h> |
31 | | #include <event2/bufferevent.h> |
32 | | #include <event2/buffer.h> |
33 | | #if defined(__clang__) |
34 | | # pragma clang diagnostic pop |
35 | | #endif |
36 | | |
37 | | #define EVENT_BUFFER 1024 |
38 | | |
39 | | static void |
40 | | levent_log_cb(int severity, const char *msg) |
41 | 0 | { |
42 | 0 | switch (severity) { |
43 | 0 | case _EVENT_LOG_DEBUG: |
44 | 0 | log_debug("libevent", "%s", msg); |
45 | 0 | break; |
46 | 0 | case _EVENT_LOG_MSG: |
47 | 0 | log_info("libevent", "%s", msg); |
48 | 0 | break; |
49 | 0 | case _EVENT_LOG_WARN: |
50 | 0 | log_warnx("libevent", "%s", msg); |
51 | 0 | break; |
52 | 0 | case _EVENT_LOG_ERR: |
53 | 0 | log_warnx("libevent", "%s", msg); |
54 | 0 | break; |
55 | 0 | } |
56 | 0 | } |
57 | | |
58 | | struct lldpd_events { |
59 | | TAILQ_ENTRY(lldpd_events) next; |
60 | | struct event *ev; |
61 | | }; |
62 | | TAILQ_HEAD(ev_l, lldpd_events); |
63 | | |
64 | | #define levent_snmp_fds(cfg) ((struct ev_l *)(cfg)->g_snmp_fds) |
65 | 0 | #define levent_hardware_fds(hardware) ((struct ev_l *)(hardware)->h_recv) |
66 | | |
67 | | #ifdef USE_SNMP |
68 | | # include <net-snmp/net-snmp-config.h> |
69 | | # include <net-snmp/net-snmp-includes.h> |
70 | | # include <net-snmp/agent/net-snmp-agent-includes.h> |
71 | | # include <net-snmp/agent/snmp_vars.h> |
72 | | |
73 | | /* Compatibility with older versions of NetSNMP */ |
74 | | # ifndef HAVE_SNMP_SELECT_INFO2 |
75 | | # define netsnmp_large_fd_set fd_set |
76 | | # define snmp_read2 snmp_read |
77 | | # define snmp_select_info2 snmp_select_info |
78 | | # define netsnmp_large_fd_set_init(...) |
79 | | # define netsnmp_large_fd_set_cleanup(...) |
80 | | # define NETSNMP_LARGE_FD_SET FD_SET |
81 | | # define NETSNMP_LARGE_FD_CLR FD_CLR |
82 | | # define NETSNMP_LARGE_FD_ZERO FD_ZERO |
83 | | # define NETSNMP_LARGE_FD_ISSET FD_ISSET |
84 | | # else |
85 | | # include <net-snmp/library/large_fd_set.h> |
86 | | # endif |
87 | | |
88 | | static void levent_snmp_update(struct lldpd *); |
89 | | |
90 | | /* |
91 | | * Callback function when we have something to read from SNMP. |
92 | | * |
93 | | * This function is called because we have a read event on one SNMP |
94 | | * file descriptor. When need to call snmp_read() on it. |
95 | | */ |
96 | | static void |
97 | | levent_snmp_read(evutil_socket_t fd, short what, void *arg) |
98 | | { |
99 | | struct lldpd *cfg = arg; |
100 | | netsnmp_large_fd_set fdset; |
101 | | (void)what; |
102 | | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); |
103 | | NETSNMP_LARGE_FD_ZERO(&fdset); |
104 | | NETSNMP_LARGE_FD_SET(fd, &fdset); |
105 | | snmp_read2(&fdset); |
106 | | levent_snmp_update(cfg); |
107 | | } |
108 | | |
109 | | /* |
110 | | * Callback function for a SNMP timeout. |
111 | | * |
112 | | * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it. |
113 | | */ |
114 | | static void |
115 | | levent_snmp_timeout(evutil_socket_t fd, short what, void *arg) |
116 | | { |
117 | | struct lldpd *cfg = arg; |
118 | | (void)what; |
119 | | (void)fd; |
120 | | snmp_timeout(); |
121 | | run_alarms(); |
122 | | levent_snmp_update(cfg); |
123 | | } |
124 | | |
125 | | /* |
126 | | * Watch a new SNMP FD. |
127 | | * |
128 | | * @param base The libevent base we are working on. |
129 | | * @param fd The file descriptor we want to watch. |
130 | | * |
131 | | * The file descriptor is appended to the list of file descriptors we |
132 | | * want to watch. |
133 | | */ |
134 | | static void |
135 | | levent_snmp_add_fd(struct lldpd *cfg, int fd) |
136 | | { |
137 | | struct event_base *base = cfg->g_base; |
138 | | struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events)); |
139 | | if (!snmpfd) { |
140 | | log_warn("event", "unable to allocate memory for new SNMP event"); |
141 | | return; |
142 | | } |
143 | | levent_make_socket_nonblocking(fd); |
144 | | if ((snmpfd->ev = event_new(base, fd, EV_READ | EV_PERSIST, levent_snmp_read, |
145 | | cfg)) == NULL) { |
146 | | log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd); |
147 | | free(snmpfd); |
148 | | return; |
149 | | } |
150 | | if (event_add(snmpfd->ev, NULL) == -1) { |
151 | | log_warnx("event", "unable to schedule new SNMP event for FD %d", fd); |
152 | | event_free(snmpfd->ev); |
153 | | free(snmpfd); |
154 | | return; |
155 | | } |
156 | | TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next); |
157 | | } |
158 | | |
159 | | /* |
160 | | * Update SNMP event loop. |
161 | | * |
162 | | * New events are added and some other are removed. This function |
163 | | * should be called every time a SNMP event happens: either when |
164 | | * handling a SNMP packet, a SNMP timeout or when sending a SNMP |
165 | | * packet. This function will keep libevent in sync with NetSNMP. |
166 | | * |
167 | | * @param base The libevent base we are working on. |
168 | | */ |
169 | | static void |
170 | | levent_snmp_update(struct lldpd *cfg) |
171 | | { |
172 | | int maxfd = 0; |
173 | | int block = 1; |
174 | | struct timeval timeout; |
175 | | static int howmany = 0; |
176 | | int added = 0, removed = 0, current = 0; |
177 | | struct lldpd_events *snmpfd, *snmpfd_next; |
178 | | |
179 | | /* snmp_select_info() can be tricky to understand. We set `block` to |
180 | | 1 to means that we don't request a timeout. snmp_select_info() |
181 | | will reset `block` to 0 if it wants us to set up a timeout. In |
182 | | this timeout, `snmp_timeout()` should be invoked. |
183 | | |
184 | | Each FD in `fdset` will need to be watched for reading. If one of |
185 | | them become active, `snmp_read()` should be called on it. |
186 | | */ |
187 | | |
188 | | netsnmp_large_fd_set fdset; |
189 | | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); |
190 | | NETSNMP_LARGE_FD_ZERO(&fdset); |
191 | | snmp_select_info2(&maxfd, &fdset, &timeout, &block); |
192 | | |
193 | | /* We need to untrack any event whose FD is not in `fdset` |
194 | | anymore */ |
195 | | for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg)); snmpfd; snmpfd = snmpfd_next) { |
196 | | snmpfd_next = TAILQ_NEXT(snmpfd, next); |
197 | | if (event_get_fd(snmpfd->ev) >= maxfd || |
198 | | (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) { |
199 | | event_free(snmpfd->ev); |
200 | | TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next); |
201 | | free(snmpfd); |
202 | | removed++; |
203 | | } else { |
204 | | NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset); |
205 | | current++; |
206 | | } |
207 | | } |
208 | | |
209 | | /* Invariant: FD in `fdset` are not in list of FD */ |
210 | | for (int fd = 0; fd < maxfd; fd++) { |
211 | | if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) { |
212 | | levent_snmp_add_fd(cfg, fd); |
213 | | added++; |
214 | | } |
215 | | } |
216 | | current += added; |
217 | | if (howmany != current) { |
218 | | log_debug("event", |
219 | | "added %d events, removed %d events, total of %d events", added, |
220 | | removed, current); |
221 | | howmany = current; |
222 | | } |
223 | | |
224 | | /* If needed, handle timeout */ |
225 | | if (evtimer_add(cfg->g_snmp_timeout, block ? NULL : &timeout) == -1) |
226 | | log_warnx("event", "unable to schedule timeout function for SNMP"); |
227 | | |
228 | | netsnmp_large_fd_set_cleanup(&fdset); |
229 | | } |
230 | | #endif /* USE_SNMP */ |
231 | | |
232 | | struct lldpd_one_client { |
233 | | TAILQ_ENTRY(lldpd_one_client) next; |
234 | | struct lldpd *cfg; |
235 | | struct bufferevent *bev; |
236 | | int subscribed; /* Is this client subscribed to changes? */ |
237 | | }; |
238 | | TAILQ_HEAD(, lldpd_one_client) lldpd_clients; |
239 | | |
240 | | static void |
241 | | levent_ctl_free_client(struct lldpd_one_client *client) |
242 | 0 | { |
243 | 0 | if (client && client->bev) bufferevent_free(client->bev); |
244 | 0 | if (client) { |
245 | 0 | TAILQ_REMOVE(&lldpd_clients, client, next); |
246 | 0 | free(client); |
247 | 0 | } |
248 | 0 | } |
249 | | |
250 | | static void |
251 | | levent_ctl_close_clients() |
252 | 0 | { |
253 | 0 | struct lldpd_one_client *client, *client_next; |
254 | 0 | for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) { |
255 | 0 | client_next = TAILQ_NEXT(client, next); |
256 | 0 | levent_ctl_free_client(client); |
257 | 0 | } |
258 | 0 | } |
259 | | |
260 | | static ssize_t |
261 | | levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len) |
262 | 0 | { |
263 | 0 | struct bufferevent *bev = client->bev; |
264 | 0 | struct hmsg_header hdr = { .len = len, .type = type }; |
265 | 0 | bufferevent_disable(bev, EV_WRITE); |
266 | 0 | if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 || |
267 | 0 | (len > 0 && bufferevent_write(bev, data, len) == -1)) { |
268 | 0 | log_warnx("event", "unable to create answer to client"); |
269 | 0 | levent_ctl_free_client(client); |
270 | 0 | return -1; |
271 | 0 | } |
272 | 0 | bufferevent_enable(bev, EV_WRITE); |
273 | 0 | return len; |
274 | 0 | } |
275 | | |
276 | | void |
277 | | levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor) |
278 | 0 | { |
279 | 0 | struct lldpd_one_client *client, *client_next; |
280 | 0 | struct lldpd_neighbor_change neigh = { .ifname = ifname, |
281 | 0 | .state = state, |
282 | 0 | .neighbor = neighbor }; |
283 | 0 | void *output = NULL; |
284 | 0 | ssize_t output_len = 0; |
285 | | |
286 | | /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */ |
287 | 0 | log_debug("control", "notify clients of neighbor changes"); |
288 | 0 | for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) { |
289 | 0 | client_next = TAILQ_NEXT(client, next); |
290 | 0 | if (!client->subscribed) continue; |
291 | | |
292 | 0 | if (output == NULL) { |
293 | | /* Ugly hack: we don't want to transmit a list of |
294 | | * ports. We patch the port to avoid this. */ |
295 | 0 | TAILQ_ENTRY(lldpd_port) backup_p_entries; |
296 | 0 | memcpy(&backup_p_entries, &neighbor->p_entries, |
297 | 0 | sizeof(backup_p_entries)); |
298 | 0 | memset(&neighbor->p_entries, 0, sizeof(backup_p_entries)); |
299 | 0 | output_len = lldpd_neighbor_change_serialize(&neigh, &output); |
300 | 0 | memcpy(&neighbor->p_entries, &backup_p_entries, |
301 | 0 | sizeof(backup_p_entries)); |
302 | |
|
303 | 0 | if (output_len <= 0) { |
304 | 0 | log_warnx("event", |
305 | 0 | "unable to serialize changed neighbor"); |
306 | 0 | return; |
307 | 0 | } |
308 | 0 | } |
309 | | |
310 | 0 | levent_ctl_send(client, NOTIFICATION, output, output_len); |
311 | 0 | } |
312 | | |
313 | 0 | free(output); |
314 | 0 | } |
315 | | |
316 | | static ssize_t |
317 | | levent_ctl_send_cb(void *out, int type, void *data, size_t len) |
318 | 0 | { |
319 | 0 | struct lldpd_one_client *client = out; |
320 | 0 | return levent_ctl_send(client, type, data, len); |
321 | 0 | } |
322 | | |
323 | | static void |
324 | | levent_ctl_recv(struct bufferevent *bev, void *ptr) |
325 | 0 | { |
326 | 0 | struct lldpd_one_client *client = ptr; |
327 | 0 | struct evbuffer *buffer = bufferevent_get_input(bev); |
328 | 0 | size_t buffer_len = evbuffer_get_length(buffer); |
329 | 0 | struct hmsg_header hdr; |
330 | 0 | void *data = NULL; |
331 | |
|
332 | 0 | log_debug("control", "receive data on Unix socket"); |
333 | 0 | if (buffer_len < sizeof(struct hmsg_header)) return; /* Not enough data yet */ |
334 | 0 | if (evbuffer_copyout(buffer, &hdr, sizeof(struct hmsg_header)) != |
335 | 0 | sizeof(struct hmsg_header)) { |
336 | 0 | log_warnx("event", "not able to read header"); |
337 | 0 | return; |
338 | 0 | } |
339 | 0 | if (hdr.len > HMSG_MAX_SIZE) { |
340 | 0 | log_warnx("event", "message received is too large"); |
341 | 0 | goto recv_error; |
342 | 0 | } |
343 | | |
344 | 0 | if (buffer_len < hdr.len + sizeof(struct hmsg_header)) |
345 | 0 | return; /* Not enough data yet */ |
346 | 0 | if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) { |
347 | 0 | log_warnx("event", "not enough memory"); |
348 | 0 | goto recv_error; |
349 | 0 | } |
350 | 0 | evbuffer_drain(buffer, sizeof(struct hmsg_header)); |
351 | 0 | if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len); |
352 | | |
353 | | /* Currently, we should not receive notification acknowledgment. But if |
354 | | * we receive one, we can discard it. */ |
355 | 0 | if (hdr.len == 0 && hdr.type == NOTIFICATION) return; |
356 | 0 | if (client_handle_client(client->cfg, levent_ctl_send_cb, client, hdr.type, |
357 | 0 | data, hdr.len, &client->subscribed) == -1) |
358 | 0 | goto recv_error; |
359 | 0 | free(data); |
360 | 0 | return; |
361 | | |
362 | 0 | recv_error: |
363 | 0 | free(data); |
364 | 0 | levent_ctl_free_client(client); |
365 | 0 | } |
366 | | |
367 | | static void |
368 | | levent_ctl_event(struct bufferevent *bev, short events, void *ptr) |
369 | 0 | { |
370 | 0 | struct lldpd_one_client *client = ptr; |
371 | 0 | if (events & BEV_EVENT_ERROR) { |
372 | 0 | log_warnx("event", "an error occurred with client: %s", |
373 | 0 | evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); |
374 | 0 | levent_ctl_free_client(client); |
375 | 0 | } else if (events & BEV_EVENT_EOF) { |
376 | 0 | log_debug("event", "client has been disconnected"); |
377 | 0 | levent_ctl_free_client(client); |
378 | 0 | } |
379 | 0 | } |
380 | | |
381 | | static void |
382 | | levent_ctl_accept(evutil_socket_t fd, short what, void *arg) |
383 | 0 | { |
384 | 0 | struct lldpd *cfg = arg; |
385 | 0 | struct lldpd_one_client *client = NULL; |
386 | 0 | int s; |
387 | 0 | (void)what; |
388 | |
|
389 | 0 | log_debug("control", "accept a new connection"); |
390 | 0 | if ((s = accept(fd, NULL, NULL)) == -1) { |
391 | 0 | log_warn("event", "unable to accept connection from socket"); |
392 | 0 | return; |
393 | 0 | } |
394 | 0 | client = calloc(1, sizeof(struct lldpd_one_client)); |
395 | 0 | if (!client) { |
396 | 0 | log_warnx("event", "unable to allocate memory for new client"); |
397 | 0 | close(s); |
398 | 0 | goto accept_failed; |
399 | 0 | } |
400 | 0 | client->cfg = cfg; |
401 | 0 | levent_make_socket_nonblocking(s); |
402 | 0 | TAILQ_INSERT_TAIL(&lldpd_clients, client, next); |
403 | 0 | if ((client->bev = bufferevent_socket_new(cfg->g_base, s, |
404 | 0 | BEV_OPT_CLOSE_ON_FREE)) == NULL) { |
405 | 0 | log_warnx("event", |
406 | 0 | "unable to allocate a new buffer event for new client"); |
407 | 0 | close(s); |
408 | 0 | goto accept_failed; |
409 | 0 | } |
410 | 0 | bufferevent_setcb(client->bev, levent_ctl_recv, NULL, levent_ctl_event, client); |
411 | 0 | bufferevent_enable(client->bev, EV_READ | EV_WRITE); |
412 | 0 | log_debug("event", "new client accepted"); |
413 | | /* coverity[leaked_handle] |
414 | | s has been saved by bufferevent_socket_new */ |
415 | 0 | return; |
416 | 0 | accept_failed: |
417 | 0 | levent_ctl_free_client(client); |
418 | 0 | } |
419 | | |
420 | | static void |
421 | | levent_priv(evutil_socket_t fd, short what, void *arg) |
422 | 0 | { |
423 | 0 | struct event_base *base = arg; |
424 | 0 | ssize_t n; |
425 | 0 | int err; |
426 | 0 | char one; |
427 | 0 | (void)what; |
428 | | /* Check if we have some data available. We need to pass the socket in |
429 | | * non-blocking mode to be able to run the check without disruption. */ |
430 | 0 | levent_make_socket_nonblocking(fd); |
431 | 0 | n = read(fd, &one, 1); |
432 | 0 | err = errno; |
433 | 0 | levent_make_socket_blocking(fd); |
434 | |
|
435 | 0 | switch (n) { |
436 | 0 | case -1: |
437 | 0 | if (err == EAGAIN || err == EWOULDBLOCK) /* No data, all good */ |
438 | 0 | return; |
439 | 0 | log_warnx("event", "unable to poll monitor process, exit"); |
440 | 0 | break; |
441 | 0 | case 0: |
442 | 0 | log_warnx("event", "monitor process has terminated, exit"); |
443 | 0 | break; |
444 | 0 | default: |
445 | | /* This is a bit unsafe as we are now out-of-sync with the |
446 | | * monitor. It would be safer to request 0 byte, but some OS |
447 | | * (illumos) seem to take the shortcut that by asking 0 byte, |
448 | | * we can just return 0 byte. */ |
449 | 0 | log_warnx("event", |
450 | 0 | "received unexpected data from monitor process, exit"); |
451 | 0 | break; |
452 | 0 | } |
453 | 0 | event_base_loopbreak(base); |
454 | 0 | } |
455 | | |
456 | | static void |
457 | | levent_dump(evutil_socket_t fd, short what, void *arg) |
458 | 0 | { |
459 | 0 | struct event_base *base = arg; |
460 | 0 | (void)fd; |
461 | 0 | (void)what; |
462 | 0 | log_debug("event", "dumping all events"); |
463 | 0 | event_base_dump_events(base, stderr); |
464 | 0 | } |
465 | | static void |
466 | | levent_stop(evutil_socket_t fd, short what, void *arg) |
467 | 0 | { |
468 | 0 | struct event_base *base = arg; |
469 | 0 | (void)fd; |
470 | 0 | (void)what; |
471 | 0 | event_base_loopbreak(base); |
472 | 0 | } |
473 | | |
474 | | static void |
475 | | levent_update_and_send(evutil_socket_t fd, short what, void *arg) |
476 | 0 | { |
477 | 0 | struct lldpd *cfg = arg; |
478 | 0 | struct timeval tv; |
479 | 0 | long interval_ms = cfg->g_config.c_tx_interval; |
480 | |
|
481 | 0 | (void)fd; |
482 | 0 | (void)what; |
483 | 0 | lldpd_loop(cfg); |
484 | 0 | if (cfg->g_iface_event != NULL) interval_ms *= 20; |
485 | 0 | if (interval_ms < 30000) interval_ms = 30000; |
486 | 0 | tv.tv_sec = interval_ms / 1000; |
487 | 0 | tv.tv_usec = (interval_ms % 1000) * 1000; |
488 | 0 | event_add(cfg->g_main_loop, &tv); |
489 | 0 | } |
490 | | |
491 | | void |
492 | | levent_update_now(struct lldpd *cfg) |
493 | 0 | { |
494 | 0 | if (cfg->g_main_loop) event_active(cfg->g_main_loop, EV_TIMEOUT, 1); |
495 | 0 | } |
496 | | |
497 | | void |
498 | | levent_send_now(struct lldpd *cfg) |
499 | 0 | { |
500 | 0 | struct lldpd_hardware *hardware; |
501 | 0 | TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) { |
502 | 0 | if (hardware->h_timer) |
503 | 0 | event_active(hardware->h_timer, EV_TIMEOUT, 1); |
504 | 0 | else |
505 | 0 | log_warnx("event", "BUG: no timer present for interface %s", |
506 | 0 | hardware->h_ifname); |
507 | 0 | } |
508 | 0 | } |
509 | | |
510 | | static void |
511 | | levent_init(struct lldpd *cfg) |
512 | 0 | { |
513 | | /* Set up libevent */ |
514 | 0 | log_debug("event", "initialize libevent"); |
515 | 0 | event_set_log_callback(levent_log_cb); |
516 | 0 | if (!(cfg->g_base = event_base_new())) |
517 | 0 | fatalx("event", "unable to create a new libevent base"); |
518 | 0 | log_info("event", "libevent %s initialized with %s method", event_get_version(), |
519 | 0 | event_base_get_method(cfg->g_base)); |
520 | | |
521 | | /* Set up SNMP */ |
522 | | #ifdef USE_SNMP |
523 | | if (cfg->g_snmp) { |
524 | | agent_init(cfg, cfg->g_snmp_agentx); |
525 | | cfg->g_snmp_timeout = |
526 | | evtimer_new(cfg->g_base, levent_snmp_timeout, cfg); |
527 | | if (!cfg->g_snmp_timeout) |
528 | | fatalx("event", "unable to setup timeout function for SNMP"); |
529 | | if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL) |
530 | | fatalx("event", "unable to allocate memory for SNMP events"); |
531 | | TAILQ_INIT(levent_snmp_fds(cfg)); |
532 | | } |
533 | | #endif |
534 | | |
535 | | /* Setup loop that will run every X seconds. */ |
536 | 0 | log_debug("event", "register loop timer"); |
537 | 0 | if (!(cfg->g_main_loop = |
538 | 0 | event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg))) |
539 | 0 | fatalx("event", "unable to setup main timer"); |
540 | 0 | event_active(cfg->g_main_loop, EV_TIMEOUT, 1); |
541 | | |
542 | | /* Set up unix socket */ |
543 | 0 | struct event *ctl_event; |
544 | 0 | log_debug("event", "register Unix socket"); |
545 | 0 | TAILQ_INIT(&lldpd_clients); |
546 | 0 | levent_make_socket_nonblocking(cfg->g_ctl); |
547 | 0 | if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ | EV_PERSIST, |
548 | 0 | levent_ctl_accept, cfg)) == NULL) |
549 | 0 | fatalx("event", "unable to setup control socket event"); |
550 | 0 | event_add(ctl_event, NULL); |
551 | | |
552 | | /* Somehow monitor the monitor process */ |
553 | 0 | struct event *monitor_event; |
554 | 0 | log_debug("event", "monitor the monitor process"); |
555 | 0 | if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED), |
556 | 0 | EV_READ | EV_PERSIST, levent_priv, cfg->g_base)) == NULL) |
557 | 0 | fatalx("event", "unable to monitor monitor process"); |
558 | 0 | event_add(monitor_event, NULL); |
559 | | |
560 | | /* Signals */ |
561 | 0 | log_debug("event", "register signals"); |
562 | 0 | evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base), |
563 | 0 | NULL); |
564 | 0 | evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL); |
565 | 0 | evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base), |
566 | 0 | NULL); |
567 | 0 | } |
568 | | |
569 | | /* Initialize libevent and start the event loop */ |
570 | | void |
571 | | levent_loop(struct lldpd *cfg) |
572 | 0 | { |
573 | 0 | levent_init(cfg); |
574 | 0 | lldpd_loop(cfg); |
575 | | #ifdef USE_SNMP |
576 | | if (cfg->g_snmp) levent_snmp_update(cfg); |
577 | | #endif |
578 | | |
579 | | /* libevent loop */ |
580 | 0 | do { |
581 | 0 | TRACE(LLDPD_EVENT_LOOP()); |
582 | 0 | if (event_base_got_break(cfg->g_base) || |
583 | 0 | event_base_got_exit(cfg->g_base)) |
584 | 0 | break; |
585 | 0 | } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0); |
586 | | |
587 | 0 | if (cfg->g_iface_timer_event != NULL) event_free(cfg->g_iface_timer_event); |
588 | |
|
589 | | #ifdef USE_SNMP |
590 | | if (cfg->g_snmp) agent_shutdown(); |
591 | | #endif /* USE_SNMP */ |
592 | |
|
593 | 0 | levent_ctl_close_clients(); |
594 | 0 | } |
595 | | |
596 | | /* Release libevent resources */ |
597 | | void |
598 | | levent_shutdown(struct lldpd *cfg) |
599 | 0 | { |
600 | 0 | if (cfg->g_iface_event) event_free(cfg->g_iface_event); |
601 | 0 | if (cfg->g_cleanup_timer) event_free(cfg->g_cleanup_timer); |
602 | 0 | event_base_free(cfg->g_base); |
603 | 0 | } |
604 | | |
605 | | static void |
606 | | levent_hardware_recv(evutil_socket_t fd, short what, void *arg) |
607 | 0 | { |
608 | 0 | struct lldpd_hardware *hardware = arg; |
609 | 0 | struct lldpd *cfg = hardware->h_cfg; |
610 | 0 | (void)what; |
611 | 0 | log_debug("event", "received something for %s", hardware->h_ifname); |
612 | 0 | lldpd_recv(cfg, hardware, fd); |
613 | 0 | levent_schedule_cleanup(cfg); |
614 | 0 | } |
615 | | |
616 | | void |
617 | | levent_hardware_init(struct lldpd_hardware *hardware) |
618 | 0 | { |
619 | 0 | log_debug("event", "initialize events for %s", hardware->h_ifname); |
620 | 0 | if ((hardware->h_recv = malloc(sizeof(struct ev_l))) == NULL) { |
621 | 0 | log_warnx("event", "unable to allocate memory for %s", |
622 | 0 | hardware->h_ifname); |
623 | 0 | return; |
624 | 0 | } |
625 | 0 | TAILQ_INIT(levent_hardware_fds(hardware)); |
626 | 0 | } |
627 | | |
628 | | void |
629 | | levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd) |
630 | 0 | { |
631 | 0 | struct lldpd_events *hfd = NULL; |
632 | 0 | if (!hardware->h_recv) return; |
633 | | |
634 | 0 | hfd = calloc(1, sizeof(struct lldpd_events)); |
635 | 0 | if (!hfd) { |
636 | 0 | log_warnx("event", "unable to allocate new event for %s", |
637 | 0 | hardware->h_ifname); |
638 | 0 | return; |
639 | 0 | } |
640 | 0 | levent_make_socket_nonblocking(fd); |
641 | 0 | if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd, EV_READ | EV_PERSIST, |
642 | 0 | levent_hardware_recv, hardware)) == NULL) { |
643 | 0 | log_warnx("event", "unable to allocate a new event for %s", |
644 | 0 | hardware->h_ifname); |
645 | 0 | free(hfd); |
646 | 0 | return; |
647 | 0 | } |
648 | 0 | if (event_add(hfd->ev, NULL) == -1) { |
649 | 0 | log_warnx("event", "unable to schedule new event for %s", |
650 | 0 | hardware->h_ifname); |
651 | 0 | event_free(hfd->ev); |
652 | 0 | free(hfd); |
653 | 0 | return; |
654 | 0 | } |
655 | 0 | TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next); |
656 | 0 | } |
657 | | |
658 | | void |
659 | | levent_hardware_release(struct lldpd_hardware *hardware) |
660 | 0 | { |
661 | 0 | struct lldpd_events *ev, *ev_next; |
662 | 0 | if (hardware->h_timer) { |
663 | 0 | event_free(hardware->h_timer); |
664 | 0 | hardware->h_timer = NULL; |
665 | 0 | } |
666 | 0 | if (!hardware->h_recv) return; |
667 | | |
668 | 0 | log_debug("event", "release events for %s", hardware->h_ifname); |
669 | 0 | for (ev = TAILQ_FIRST(levent_hardware_fds(hardware)); ev; ev = ev_next) { |
670 | 0 | ev_next = TAILQ_NEXT(ev, next); |
671 | | /* We may close several time the same FD. This is harmless. */ |
672 | 0 | close(event_get_fd(ev->ev)); |
673 | 0 | event_free(ev->ev); |
674 | 0 | TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next); |
675 | 0 | free(ev); |
676 | 0 | } |
677 | 0 | free(levent_hardware_fds(hardware)); |
678 | 0 | } |
679 | | |
680 | | static void |
681 | | levent_iface_trigger(evutil_socket_t fd, short what, void *arg) |
682 | 0 | { |
683 | 0 | struct lldpd *cfg = arg; |
684 | 0 | log_debug("event", "triggering update of all interfaces"); |
685 | 0 | lldpd_update_localports(cfg); |
686 | 0 | } |
687 | | |
688 | | static void |
689 | | levent_iface_recv(evutil_socket_t fd, short what, void *arg) |
690 | 0 | { |
691 | 0 | struct lldpd *cfg = arg; |
692 | 0 | char buffer[EVENT_BUFFER]; |
693 | 0 | int n; |
694 | |
|
695 | 0 | if (cfg->g_iface_cb == NULL) { |
696 | | /* Discard the message */ |
697 | 0 | while (1) { |
698 | 0 | n = read(fd, buffer, sizeof(buffer)); |
699 | 0 | if (n == -1 && (errno == EWOULDBLOCK || errno == EAGAIN)) break; |
700 | 0 | if (n == -1) { |
701 | 0 | log_warn("event", |
702 | 0 | "unable to receive interface change notification message"); |
703 | 0 | return; |
704 | 0 | } |
705 | 0 | if (n == 0) { |
706 | 0 | log_warnx("event", |
707 | 0 | "end of file reached while getting interface change notification message"); |
708 | 0 | return; |
709 | 0 | } |
710 | 0 | } |
711 | 0 | } else { |
712 | 0 | cfg->g_iface_cb(cfg); |
713 | 0 | } |
714 | | |
715 | | /* Schedule local port update. We don't run it right away because we may |
716 | | * receive a batch of events like this. */ |
717 | 0 | struct timeval one_sec = { 1, 0 }; |
718 | 0 | TRACE(LLDPD_INTERFACES_NOTIFICATION()); |
719 | 0 | log_debug("event", |
720 | 0 | "received notification change, schedule an update of all interfaces in one second"); |
721 | 0 | if (cfg->g_iface_timer_event == NULL) { |
722 | 0 | if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base, |
723 | 0 | levent_iface_trigger, cfg)) == NULL) { |
724 | 0 | log_warnx("event", |
725 | 0 | "unable to create a new event to trigger interface update"); |
726 | 0 | return; |
727 | 0 | } |
728 | 0 | } |
729 | 0 | if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) { |
730 | 0 | log_warnx("event", "unable to schedule interface updates"); |
731 | 0 | return; |
732 | 0 | } |
733 | 0 | } |
734 | | |
735 | | int |
736 | | levent_iface_subscribe(struct lldpd *cfg, int socket) |
737 | 0 | { |
738 | 0 | log_debug("event", "subscribe to interface changes from socket %d", socket); |
739 | 0 | levent_make_socket_nonblocking(socket); |
740 | 0 | cfg->g_iface_event = event_new(cfg->g_base, socket, EV_READ | EV_PERSIST, |
741 | 0 | levent_iface_recv, cfg); |
742 | 0 | if (cfg->g_iface_event == NULL) { |
743 | 0 | log_warnx("event", |
744 | 0 | "unable to allocate a new event for interface changes"); |
745 | 0 | return -1; |
746 | 0 | } |
747 | 0 | if (event_add(cfg->g_iface_event, NULL) == -1) { |
748 | 0 | log_warnx("event", "unable to schedule new interface changes event"); |
749 | 0 | event_free(cfg->g_iface_event); |
750 | 0 | cfg->g_iface_event = NULL; |
751 | 0 | return -1; |
752 | 0 | } |
753 | 0 | return 0; |
754 | 0 | } |
755 | | |
756 | | static void |
757 | | levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg) |
758 | 0 | { |
759 | 0 | struct lldpd *cfg = arg; |
760 | 0 | lldpd_cleanup(cfg); |
761 | 0 | } |
762 | | |
763 | | void |
764 | | levent_schedule_cleanup(struct lldpd *cfg) |
765 | 0 | { |
766 | 0 | log_debug("event", "schedule next cleanup"); |
767 | 0 | if (cfg->g_cleanup_timer != NULL) { |
768 | 0 | event_free(cfg->g_cleanup_timer); |
769 | 0 | } |
770 | 0 | cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg); |
771 | 0 | if (cfg->g_cleanup_timer == NULL) { |
772 | 0 | log_warnx("event", "unable to allocate a new event for cleanup tasks"); |
773 | 0 | return; |
774 | 0 | } |
775 | | |
776 | | /* Compute the next TTL event */ |
777 | 0 | struct timeval tv = { cfg->g_config.c_ttl, 0 }; |
778 | 0 | time_t now = time(NULL); |
779 | 0 | time_t next; |
780 | 0 | struct lldpd_hardware *hardware; |
781 | 0 | struct lldpd_port *port; |
782 | 0 | TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) { |
783 | 0 | TAILQ_FOREACH (port, &hardware->h_rports, p_entries) { |
784 | 0 | if (now >= port->p_lastupdate + port->p_ttl) { |
785 | 0 | tv.tv_sec = 0; |
786 | 0 | log_debug("event", |
787 | 0 | "immediate cleanup on port %s (%lld, %d, %lld)", |
788 | 0 | hardware->h_ifname, (long long)now, port->p_ttl, |
789 | 0 | (long long)port->p_lastupdate); |
790 | 0 | break; |
791 | 0 | } |
792 | 0 | next = port->p_ttl - (now - port->p_lastupdate); |
793 | 0 | if (next < tv.tv_sec) tv.tv_sec = next; |
794 | 0 | } |
795 | 0 | } |
796 | |
|
797 | 0 | log_debug("event", "next cleanup in %ld seconds", (long)tv.tv_sec); |
798 | 0 | if (event_add(cfg->g_cleanup_timer, &tv) == -1) { |
799 | 0 | log_warnx("event", "unable to schedule cleanup task"); |
800 | 0 | event_free(cfg->g_cleanup_timer); |
801 | 0 | cfg->g_cleanup_timer = NULL; |
802 | 0 | return; |
803 | 0 | } |
804 | 0 | } |
805 | | |
806 | | static void |
807 | | levent_send_pdu(evutil_socket_t fd, short what, void *arg) |
808 | 0 | { |
809 | 0 | struct lldpd_hardware *hardware = arg; |
810 | 0 | int tx_interval = hardware->h_cfg->g_config.c_tx_interval; |
811 | |
|
812 | 0 | log_debug("event", "trigger sending PDU for port %s", hardware->h_ifname); |
813 | 0 | lldpd_send(hardware); |
814 | |
|
815 | 0 | #ifdef ENABLE_LLDPMED |
816 | 0 | if (hardware->h_tx_fast > 0) hardware->h_tx_fast--; |
817 | |
|
818 | 0 | if (hardware->h_tx_fast > 0) |
819 | 0 | tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval * 1000; |
820 | 0 | #endif |
821 | |
|
822 | 0 | struct timeval tv; |
823 | 0 | tv.tv_sec = tx_interval / 1000; |
824 | 0 | tv.tv_usec = (tx_interval % 1000) * 1000; |
825 | 0 | if (event_add(hardware->h_timer, &tv) == -1) { |
826 | 0 | log_warnx("event", "unable to re-register timer event for port %s", |
827 | 0 | hardware->h_ifname); |
828 | 0 | event_free(hardware->h_timer); |
829 | 0 | hardware->h_timer = NULL; |
830 | 0 | return; |
831 | 0 | } |
832 | 0 | } |
833 | | |
834 | | void |
835 | | levent_schedule_pdu(struct lldpd_hardware *hardware) |
836 | 0 | { |
837 | 0 | log_debug("event", "schedule sending PDU on %s", hardware->h_ifname); |
838 | 0 | if (hardware->h_timer == NULL) { |
839 | 0 | hardware->h_timer = |
840 | 0 | evtimer_new(hardware->h_cfg->g_base, levent_send_pdu, hardware); |
841 | 0 | if (hardware->h_timer == NULL) { |
842 | 0 | log_warnx("event", "unable to schedule PDU sending for port %s", |
843 | 0 | hardware->h_ifname); |
844 | 0 | return; |
845 | 0 | } |
846 | 0 | } |
847 | | |
848 | 0 | struct timeval tv = { 0, 0 }; |
849 | 0 | if (event_add(hardware->h_timer, &tv) == -1) { |
850 | 0 | log_warnx("event", "unable to register timer event for port %s", |
851 | 0 | hardware->h_ifname); |
852 | 0 | event_free(hardware->h_timer); |
853 | 0 | hardware->h_timer = NULL; |
854 | 0 | return; |
855 | 0 | } |
856 | 0 | } |
857 | | |
858 | | int |
859 | | levent_make_socket_nonblocking(int fd) |
860 | 0 | { |
861 | 0 | int flags; |
862 | 0 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { |
863 | 0 | log_warn("event", "fcntl(%d, F_GETFL)", fd); |
864 | 0 | return -1; |
865 | 0 | } |
866 | 0 | if (flags & O_NONBLOCK) return 0; |
867 | 0 | if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { |
868 | 0 | log_warn("event", "fcntl(%d, F_SETFL)", fd); |
869 | 0 | return -1; |
870 | 0 | } |
871 | 0 | return 0; |
872 | 0 | } |
873 | | |
874 | | int |
875 | | levent_make_socket_blocking(int fd) |
876 | 0 | { |
877 | 0 | int flags; |
878 | 0 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { |
879 | 0 | log_warn("event", "fcntl(%d, F_GETFL)", fd); |
880 | 0 | return -1; |
881 | 0 | } |
882 | 0 | if (!(flags & O_NONBLOCK)) return 0; |
883 | 0 | if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) { |
884 | 0 | log_warn("event", "fcntl(%d, F_SETFL)", fd); |
885 | 0 | return -1; |
886 | 0 | } |
887 | 0 | return 0; |
888 | 0 | } |
889 | | |
890 | | #ifdef HOST_OS_LINUX |
891 | | /* Receive and log error from a socket when there is suspicion of an error. */ |
892 | | void |
893 | | levent_recv_error(int fd, const char *source) |
894 | 0 | { |
895 | 0 | do { |
896 | 0 | ssize_t n; |
897 | 0 | char buf[1024] = {}; |
898 | 0 | struct msghdr msg = { .msg_control = buf, |
899 | 0 | .msg_controllen = sizeof(buf) }; |
900 | 0 | if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) { |
901 | 0 | return; |
902 | 0 | } |
903 | 0 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); |
904 | 0 | if (cmsg == NULL) |
905 | 0 | log_warnx("event", "received unknown error on %s", source); |
906 | 0 | else |
907 | 0 | log_warnx("event", "received error (level=%d/type=%d) on %s", |
908 | 0 | cmsg->cmsg_level, cmsg->cmsg_type, source); |
909 | 0 | } while (1); |
910 | 0 | } |
911 | | #endif |