Coverage Report

Created: 2026-02-14 07:04

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/lldpd/src/daemon/event.c
Line
Count
Source
1
/* -*- mode: c; c-file-style: "openbsd" -*- */
2
/*
3
 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
4
 *
5
 * Permission to use, copy, modify, and/or distribute this software for any
6
 * purpose with or without fee is hereby granted, provided that the above
7
 * copyright notice and this permission notice appear in all copies.
8
 *
9
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
 */
17
18
#include "lldpd.h"
19
#include "trace.h"
20
21
#include <unistd.h>
22
#include <signal.h>
23
#include <errno.h>
24
#include <time.h>
25
#include <fcntl.h>
26
#if defined(__clang__)
27
#  pragma clang diagnostic push
28
#  pragma clang diagnostic ignored "-Wdocumentation"
29
#endif
30
#include <event2/event.h>
31
#include <event2/bufferevent.h>
32
#include <event2/buffer.h>
33
#if defined(__clang__)
34
#  pragma clang diagnostic pop
35
#endif
36
37
#define EVENT_BUFFER 1024
38
39
static void
40
levent_log_cb(int severity, const char *msg)
41
0
{
42
0
  switch (severity) {
43
0
  case _EVENT_LOG_DEBUG:
44
0
    log_debug("libevent", "%s", msg);
45
0
    break;
46
0
  case _EVENT_LOG_MSG:
47
0
    log_info("libevent", "%s", msg);
48
0
    break;
49
0
  case _EVENT_LOG_WARN:
50
0
    log_warnx("libevent", "%s", msg);
51
0
    break;
52
0
  case _EVENT_LOG_ERR:
53
0
    log_warnx("libevent", "%s", msg);
54
0
    break;
55
0
  }
56
0
}
57
58
struct lldpd_events {
59
  TAILQ_ENTRY(lldpd_events) next;
60
  struct event *ev;
61
};
62
TAILQ_HEAD(ev_l, lldpd_events);
63
64
#define levent_snmp_fds(cfg) ((struct ev_l *)(cfg)->g_snmp_fds)
65
0
#define levent_hardware_fds(hardware) ((struct ev_l *)(hardware)->h_recv)
66
67
#ifdef USE_SNMP
68
#  include <net-snmp/net-snmp-config.h>
69
#  include <net-snmp/net-snmp-includes.h>
70
#  include <net-snmp/agent/net-snmp-agent-includes.h>
71
#  include <net-snmp/agent/snmp_vars.h>
72
73
/* Compatibility with older versions of NetSNMP */
74
#  ifndef HAVE_SNMP_SELECT_INFO2
75
#    define netsnmp_large_fd_set fd_set
76
#    define snmp_read2 snmp_read
77
#    define snmp_select_info2 snmp_select_info
78
#    define netsnmp_large_fd_set_init(...)
79
#    define netsnmp_large_fd_set_cleanup(...)
80
#    define NETSNMP_LARGE_FD_SET FD_SET
81
#    define NETSNMP_LARGE_FD_CLR FD_CLR
82
#    define NETSNMP_LARGE_FD_ZERO FD_ZERO
83
#    define NETSNMP_LARGE_FD_ISSET FD_ISSET
84
#  else
85
#    include <net-snmp/library/large_fd_set.h>
86
#  endif
87
88
static void levent_snmp_update(struct lldpd *);
89
90
/*
91
 * Callback function when we have something to read from SNMP.
92
 *
93
 * This function is called because we have a read event on one SNMP
94
 * file descriptor. When need to call snmp_read() on it.
95
 */
96
static void
97
levent_snmp_read(evutil_socket_t fd, short what, void *arg)
98
{
99
  struct lldpd *cfg = arg;
100
  netsnmp_large_fd_set fdset;
101
  (void)what;
102
  netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
103
  NETSNMP_LARGE_FD_ZERO(&fdset);
104
  NETSNMP_LARGE_FD_SET(fd, &fdset);
105
  snmp_read2(&fdset);
106
  netsnmp_large_fd_set_cleanup(&fdset);
107
  levent_snmp_update(cfg);
108
}
109
110
/*
111
 * Callback function for a SNMP timeout.
112
 *
113
 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
114
 */
115
static void
116
levent_snmp_timeout(evutil_socket_t fd, short what, void *arg)
117
{
118
  struct lldpd *cfg = arg;
119
  (void)what;
120
  (void)fd;
121
  snmp_timeout();
122
  run_alarms();
123
  levent_snmp_update(cfg);
124
}
125
126
/*
127
 * Watch a new SNMP FD.
128
 *
129
 * @param base The libevent base we are working on.
130
 * @param fd The file descriptor we want to watch.
131
 *
132
 * The file descriptor is appended to the list of file descriptors we
133
 * want to watch.
134
 */
135
static void
136
levent_snmp_add_fd(struct lldpd *cfg, int fd)
137
{
138
  struct event_base *base = cfg->g_base;
139
  struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events));
140
  if (!snmpfd) {
141
    log_warn("event", "unable to allocate memory for new SNMP event");
142
    return;
143
  }
144
  levent_make_socket_nonblocking(fd);
145
  if ((snmpfd->ev = event_new(base, fd, EV_READ | EV_PERSIST, levent_snmp_read,
146
     cfg)) == NULL) {
147
    log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd);
148
    free(snmpfd);
149
    return;
150
  }
151
  if (event_add(snmpfd->ev, NULL) == -1) {
152
    log_warnx("event", "unable to schedule new SNMP event for FD %d", fd);
153
    event_free(snmpfd->ev);
154
    free(snmpfd);
155
    return;
156
  }
157
  TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next);
158
}
159
160
/*
161
 * Update SNMP event loop.
162
 *
163
 * New events are added and some other are removed. This function
164
 * should be called every time a SNMP event happens: either when
165
 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
166
 * packet. This function will keep libevent in sync with NetSNMP.
167
 *
168
 * @param base The libevent base we are working on.
169
 */
170
static void
171
levent_snmp_update(struct lldpd *cfg)
172
{
173
  int maxfd = 0;
174
  int block = 1;
175
  struct timeval timeout;
176
  static int howmany = 0;
177
  int added = 0, removed = 0, current = 0;
178
  struct lldpd_events *snmpfd, *snmpfd_next;
179
180
  /* snmp_select_info() can be tricky to understand. We set `block` to
181
     1 to means that we don't request a timeout. snmp_select_info()
182
     will reset `block` to 0 if it wants us to set up a timeout. In
183
     this timeout, `snmp_timeout()` should be invoked.
184
185
     Each FD in `fdset` will need to be watched for reading. If one of
186
     them become active, `snmp_read()` should be called on it.
187
  */
188
189
  netsnmp_large_fd_set fdset;
190
  netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
191
  NETSNMP_LARGE_FD_ZERO(&fdset);
192
  snmp_select_info2(&maxfd, &fdset, &timeout, &block);
193
194
  /* We need to untrack any event whose FD is not in `fdset`
195
     anymore */
196
  for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg)); snmpfd; snmpfd = snmpfd_next) {
197
    snmpfd_next = TAILQ_NEXT(snmpfd, next);
198
    if (event_get_fd(snmpfd->ev) >= maxfd ||
199
        (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) {
200
      event_free(snmpfd->ev);
201
      TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next);
202
      free(snmpfd);
203
      removed++;
204
    } else {
205
      NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset);
206
      current++;
207
    }
208
  }
209
210
  /* Invariant: FD in `fdset` are not in list of FD */
211
  for (int fd = 0; fd < maxfd; fd++) {
212
    if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) {
213
      levent_snmp_add_fd(cfg, fd);
214
      added++;
215
    }
216
  }
217
  current += added;
218
  if (howmany != current) {
219
    log_debug("event",
220
        "added %d events, removed %d events, total of %d events", added,
221
        removed, current);
222
    howmany = current;
223
  }
224
225
  /* If needed, handle timeout */
226
  if (evtimer_add(cfg->g_snmp_timeout, block ? NULL : &timeout) == -1)
227
    log_warnx("event", "unable to schedule timeout function for SNMP");
228
229
  netsnmp_large_fd_set_cleanup(&fdset);
230
}
231
#endif /* USE_SNMP */
232
233
struct lldpd_one_client {
234
  TAILQ_ENTRY(lldpd_one_client) next;
235
  struct lldpd *cfg;
236
  struct bufferevent *bev;
237
  int subscribed; /* Is this client subscribed to changes? */
238
};
239
TAILQ_HEAD(, lldpd_one_client) lldpd_clients;
240
241
static void
242
levent_ctl_free_client(struct lldpd_one_client *client)
243
0
{
244
0
  if (client && client->bev) bufferevent_free(client->bev);
245
0
  if (client) {
246
0
    TAILQ_REMOVE(&lldpd_clients, client, next);
247
0
    free(client);
248
0
  }
249
0
}
250
251
static void
252
levent_ctl_close_clients()
253
0
{
254
0
  struct lldpd_one_client *client, *client_next;
255
0
  for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) {
256
0
    client_next = TAILQ_NEXT(client, next);
257
0
    levent_ctl_free_client(client);
258
0
  }
259
0
}
260
261
static ssize_t
262
levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len)
263
0
{
264
0
  struct bufferevent *bev = client->bev;
265
0
  struct hmsg_header hdr = { .len = len, .type = type };
266
0
  bufferevent_disable(bev, EV_WRITE);
267
0
  if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 ||
268
0
      (len > 0 && bufferevent_write(bev, data, len) == -1)) {
269
0
    log_warnx("event", "unable to create answer to client");
270
0
    levent_ctl_free_client(client);
271
0
    return -1;
272
0
  }
273
0
  bufferevent_enable(bev, EV_WRITE);
274
0
  return len;
275
0
}
276
277
void
278
levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor)
279
0
{
280
0
  struct lldpd_one_client *client, *client_next;
281
0
  struct lldpd_neighbor_change neigh = { .ifname = ifname,
282
0
    .state = state,
283
0
    .neighbor = neighbor };
284
0
  void *output = NULL;
285
0
  ssize_t output_len = 0;
286
287
  /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */
288
0
  log_debug("control", "notify clients of neighbor changes");
289
0
  for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) {
290
0
    client_next = TAILQ_NEXT(client, next);
291
0
    if (!client->subscribed) continue;
292
293
0
    if (output == NULL) {
294
      /* Ugly hack: we don't want to transmit a list of
295
       * ports. We patch the port to avoid this. */
296
0
      TAILQ_ENTRY(lldpd_port) backup_p_entries;
297
0
      memcpy(&backup_p_entries, &neighbor->p_entries,
298
0
          sizeof(backup_p_entries));
299
0
      memset(&neighbor->p_entries, 0, sizeof(backup_p_entries));
300
0
      output_len = lldpd_neighbor_change_serialize(&neigh, &output);
301
0
      memcpy(&neighbor->p_entries, &backup_p_entries,
302
0
          sizeof(backup_p_entries));
303
304
0
      if (output_len <= 0) {
305
0
        log_warnx("event",
306
0
            "unable to serialize changed neighbor");
307
0
        return;
308
0
      }
309
0
    }
310
311
0
    levent_ctl_send(client, NOTIFICATION, output, output_len);
312
0
  }
313
314
0
  free(output);
315
0
}
316
317
static ssize_t
318
levent_ctl_send_cb(void *out, int type, void *data, size_t len)
319
0
{
320
0
  struct lldpd_one_client *client = out;
321
0
  return levent_ctl_send(client, type, data, len);
322
0
}
323
324
static void
325
levent_ctl_recv(struct bufferevent *bev, void *ptr)
326
0
{
327
0
  struct lldpd_one_client *client = ptr;
328
0
  struct evbuffer *buffer = bufferevent_get_input(bev);
329
0
  size_t buffer_len = evbuffer_get_length(buffer);
330
0
  struct hmsg_header hdr;
331
0
  void *data = NULL;
332
333
0
  log_debug("control", "receive data on Unix socket");
334
0
  if (buffer_len < sizeof(struct hmsg_header)) return; /* Not enough data yet */
335
0
  if (evbuffer_copyout(buffer, &hdr, sizeof(struct hmsg_header)) !=
336
0
      sizeof(struct hmsg_header)) {
337
0
    log_warnx("event", "not able to read header");
338
0
    return;
339
0
  }
340
0
  if (hdr.len > HMSG_MAX_SIZE) {
341
0
    log_warnx("event", "message received is too large");
342
0
    goto recv_error;
343
0
  }
344
345
0
  if (buffer_len < hdr.len + sizeof(struct hmsg_header))
346
0
    return; /* Not enough data yet */
347
0
  if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) {
348
0
    log_warnx("event", "not enough memory");
349
0
    goto recv_error;
350
0
  }
351
0
  evbuffer_drain(buffer, sizeof(struct hmsg_header));
352
0
  if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len);
353
354
  /* Currently, we should not receive notification acknowledgment. But if
355
   * we receive one, we can discard it. */
356
0
  if (hdr.len == 0 && hdr.type == NOTIFICATION) return;
357
0
  if (client_handle_client(client->cfg, levent_ctl_send_cb, client, hdr.type,
358
0
    data, hdr.len, &client->subscribed) == -1)
359
0
    goto recv_error;
360
0
  free(data);
361
0
  return;
362
363
0
recv_error:
364
0
  free(data);
365
0
  levent_ctl_free_client(client);
366
0
}
367
368
static void
369
levent_ctl_event(struct bufferevent *bev, short events, void *ptr)
370
0
{
371
0
  struct lldpd_one_client *client = ptr;
372
0
  if (events & BEV_EVENT_ERROR) {
373
0
    log_warnx("event", "an error occurred with client: %s",
374
0
        evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
375
0
    levent_ctl_free_client(client);
376
0
  } else if (events & BEV_EVENT_EOF) {
377
0
    log_debug("event", "client has been disconnected");
378
0
    levent_ctl_free_client(client);
379
0
  }
380
0
}
381
382
static void
383
levent_ctl_accept(evutil_socket_t fd, short what, void *arg)
384
0
{
385
0
  struct lldpd *cfg = arg;
386
0
  struct lldpd_one_client *client = NULL;
387
0
  int s;
388
0
  (void)what;
389
390
0
  log_debug("control", "accept a new connection");
391
0
  if ((s = accept(fd, NULL, NULL)) == -1) {
392
0
    log_warn("event", "unable to accept connection from socket");
393
0
    return;
394
0
  }
395
0
  client = calloc(1, sizeof(struct lldpd_one_client));
396
0
  if (!client) {
397
0
    log_warnx("event", "unable to allocate memory for new client");
398
0
    close(s);
399
0
    goto accept_failed;
400
0
  }
401
0
  client->cfg = cfg;
402
0
  levent_make_socket_nonblocking(s);
403
0
  TAILQ_INSERT_TAIL(&lldpd_clients, client, next);
404
0
  if ((client->bev = bufferevent_socket_new(cfg->g_base, s,
405
0
     BEV_OPT_CLOSE_ON_FREE)) == NULL) {
406
0
    log_warnx("event",
407
0
        "unable to allocate a new buffer event for new client");
408
0
    close(s);
409
0
    goto accept_failed;
410
0
  }
411
0
  bufferevent_setcb(client->bev, levent_ctl_recv, NULL, levent_ctl_event, client);
412
0
  bufferevent_enable(client->bev, EV_READ | EV_WRITE);
413
0
  log_debug("event", "new client accepted");
414
  /* coverity[leaked_handle]
415
     s has been saved by bufferevent_socket_new */
416
0
  return;
417
0
accept_failed:
418
0
  levent_ctl_free_client(client);
419
0
}
420
421
static void
422
levent_priv(evutil_socket_t fd, short what, void *arg)
423
0
{
424
0
  struct event_base *base = arg;
425
0
  ssize_t n;
426
0
  int err;
427
0
  char one;
428
0
  (void)what;
429
  /* Check if we have some data available. We need to pass the socket in
430
   * non-blocking mode to be able to run the check without disruption. */
431
0
  levent_make_socket_nonblocking(fd);
432
0
  n = read(fd, &one, 1);
433
0
  err = errno;
434
0
  levent_make_socket_blocking(fd);
435
436
0
  switch (n) {
437
0
  case -1:
438
0
    if (err == EAGAIN || err == EWOULDBLOCK) /* No data, all good */
439
0
      return;
440
0
    log_warnx("event", "unable to poll monitor process, exit");
441
0
    break;
442
0
  case 0:
443
0
    log_warnx("event", "monitor process has terminated, exit");
444
0
    break;
445
0
  default:
446
    /* This is a bit unsafe as we are now out-of-sync with the
447
     * monitor. It would be safer to request 0 byte, but some OS
448
     * (illumos) seem to take the shortcut that by asking 0 byte,
449
     * we can just return 0 byte. */
450
0
    log_warnx("event",
451
0
        "received unexpected data from monitor process, exit");
452
0
    break;
453
0
  }
454
0
  event_base_loopbreak(base);
455
0
}
456
457
static void
458
levent_dump(evutil_socket_t fd, short what, void *arg)
459
0
{
460
0
  struct event_base *base = arg;
461
0
  (void)fd;
462
0
  (void)what;
463
0
  log_debug("event", "dumping all events");
464
0
  event_base_dump_events(base, stderr);
465
0
}
466
static void
467
levent_stop(evutil_socket_t fd, short what, void *arg)
468
0
{
469
0
  struct event_base *base = arg;
470
0
  (void)fd;
471
0
  (void)what;
472
0
  event_base_loopbreak(base);
473
0
}
474
475
static void
476
levent_update_and_send(evutil_socket_t fd, short what, void *arg)
477
0
{
478
0
  struct lldpd *cfg = arg;
479
0
  struct timeval tv;
480
0
  long interval_ms = cfg->g_config.c_tx_interval;
481
482
0
  (void)fd;
483
0
  (void)what;
484
0
  lldpd_loop(cfg);
485
0
  if (cfg->g_iface_event != NULL) interval_ms *= 20;
486
0
  if (interval_ms < 30000) interval_ms = 30000;
487
0
  tv.tv_sec = interval_ms / 1000;
488
0
  tv.tv_usec = (interval_ms % 1000) * 1000;
489
0
  event_add(cfg->g_main_loop, &tv);
490
0
}
491
492
void
493
levent_update_now(struct lldpd *cfg)
494
0
{
495
0
  if (cfg->g_main_loop) event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
496
0
}
497
498
void
499
levent_send_now(struct lldpd *cfg)
500
0
{
501
0
  struct lldpd_hardware *hardware;
502
0
  TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) {
503
0
    if (hardware->h_timer)
504
0
      event_active(hardware->h_timer, EV_TIMEOUT, 1);
505
0
    else
506
0
      log_warnx("event", "BUG: no timer present for interface %s",
507
0
          hardware->h_ifname);
508
0
  }
509
0
}
510
511
static void
512
levent_init(struct lldpd *cfg)
513
0
{
514
  /* Set up libevent */
515
0
  log_debug("event", "initialize libevent");
516
0
  event_set_log_callback(levent_log_cb);
517
0
  if (!(cfg->g_base = event_base_new()))
518
0
    fatalx("event", "unable to create a new libevent base");
519
0
  log_info("event", "libevent %s initialized with %s method", event_get_version(),
520
0
      event_base_get_method(cfg->g_base));
521
522
  /* Set up SNMP */
523
#ifdef USE_SNMP
524
  if (cfg->g_snmp) {
525
    agent_init(cfg, cfg->g_snmp_agentx);
526
    cfg->g_snmp_timeout =
527
        evtimer_new(cfg->g_base, levent_snmp_timeout, cfg);
528
    if (!cfg->g_snmp_timeout)
529
      fatalx("event", "unable to setup timeout function for SNMP");
530
    if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL)
531
      fatalx("event", "unable to allocate memory for SNMP events");
532
    TAILQ_INIT(levent_snmp_fds(cfg));
533
  }
534
#endif
535
536
  /* Setup loop that will run every X seconds. */
537
0
  log_debug("event", "register loop timer");
538
0
  if (!(cfg->g_main_loop =
539
0
        event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg)))
540
0
    fatalx("event", "unable to setup main timer");
541
0
  event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
542
543
  /* Set up unix socket */
544
0
  struct event *ctl_event;
545
0
  log_debug("event", "register Unix socket");
546
0
  TAILQ_INIT(&lldpd_clients);
547
0
  levent_make_socket_nonblocking(cfg->g_ctl);
548
0
  if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ | EV_PERSIST,
549
0
     levent_ctl_accept, cfg)) == NULL)
550
0
    fatalx("event", "unable to setup control socket event");
551
0
  event_add(ctl_event, NULL);
552
553
  /* Somehow monitor the monitor process */
554
0
  struct event *monitor_event;
555
0
  log_debug("event", "monitor the monitor process");
556
0
  if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED),
557
0
     EV_READ | EV_PERSIST, levent_priv, cfg->g_base)) == NULL)
558
0
    fatalx("event", "unable to monitor monitor process");
559
0
  event_add(monitor_event, NULL);
560
561
  /* Signals */
562
0
  log_debug("event", "register signals");
563
0
  evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base),
564
0
      NULL);
565
0
  evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL);
566
0
  evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base),
567
0
      NULL);
568
0
}
569
570
/* Initialize libevent and start the event loop */
571
void
572
levent_loop(struct lldpd *cfg)
573
0
{
574
0
  levent_init(cfg);
575
0
  lldpd_loop(cfg);
576
#ifdef USE_SNMP
577
  if (cfg->g_snmp) levent_snmp_update(cfg);
578
#endif
579
580
  /* libevent loop */
581
0
  do {
582
0
    TRACE(LLDPD_EVENT_LOOP());
583
0
    if (event_base_got_break(cfg->g_base) ||
584
0
        event_base_got_exit(cfg->g_base))
585
0
      break;
586
0
  } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0);
587
588
0
  if (cfg->g_iface_timer_event != NULL) event_free(cfg->g_iface_timer_event);
589
590
#ifdef USE_SNMP
591
  if (cfg->g_snmp) agent_shutdown();
592
#endif /* USE_SNMP */
593
594
0
  levent_ctl_close_clients();
595
0
}
596
597
/* Release libevent resources */
598
void
599
levent_shutdown(struct lldpd *cfg)
600
0
{
601
0
  if (cfg->g_iface_event) event_free(cfg->g_iface_event);
602
0
  if (cfg->g_cleanup_timer) event_free(cfg->g_cleanup_timer);
603
0
  event_base_free(cfg->g_base);
604
0
}
605
606
static void
607
levent_hardware_recv(evutil_socket_t fd, short what, void *arg)
608
0
{
609
0
  struct lldpd_hardware *hardware = arg;
610
0
  struct lldpd *cfg = hardware->h_cfg;
611
0
  (void)what;
612
0
  log_debug("event", "received something for %s", hardware->h_ifname);
613
0
  lldpd_recv(cfg, hardware, fd);
614
0
  levent_schedule_cleanup(cfg);
615
0
}
616
617
void
618
levent_hardware_init(struct lldpd_hardware *hardware)
619
0
{
620
0
  log_debug("event", "initialize events for %s", hardware->h_ifname);
621
0
  if ((hardware->h_recv = malloc(sizeof(struct ev_l))) == NULL) {
622
0
    log_warnx("event", "unable to allocate memory for %s",
623
0
        hardware->h_ifname);
624
0
    return;
625
0
  }
626
0
  TAILQ_INIT(levent_hardware_fds(hardware));
627
0
}
628
629
void
630
levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd)
631
0
{
632
0
  struct lldpd_events *hfd = NULL;
633
0
  if (!hardware->h_recv) return;
634
635
0
  hfd = calloc(1, sizeof(struct lldpd_events));
636
0
  if (!hfd) {
637
0
    log_warnx("event", "unable to allocate new event for %s",
638
0
        hardware->h_ifname);
639
0
    return;
640
0
  }
641
0
  levent_make_socket_nonblocking(fd);
642
0
  if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd, EV_READ | EV_PERSIST,
643
0
     levent_hardware_recv, hardware)) == NULL) {
644
0
    log_warnx("event", "unable to allocate a new event for %s",
645
0
        hardware->h_ifname);
646
0
    free(hfd);
647
0
    return;
648
0
  }
649
0
  if (event_add(hfd->ev, NULL) == -1) {
650
0
    log_warnx("event", "unable to schedule new event for %s",
651
0
        hardware->h_ifname);
652
0
    event_free(hfd->ev);
653
0
    free(hfd);
654
0
    return;
655
0
  }
656
0
  TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next);
657
0
}
658
659
void
660
levent_hardware_release(struct lldpd_hardware *hardware)
661
0
{
662
0
  struct lldpd_events *ev, *ev_next;
663
0
  if (hardware->h_timer) {
664
0
    event_free(hardware->h_timer);
665
0
    hardware->h_timer = NULL;
666
0
  }
667
0
  if (!hardware->h_recv) return;
668
669
0
  log_debug("event", "release events for %s", hardware->h_ifname);
670
0
  for (ev = TAILQ_FIRST(levent_hardware_fds(hardware)); ev; ev = ev_next) {
671
0
    ev_next = TAILQ_NEXT(ev, next);
672
    /* We may close several time the same FD. This is harmless. */
673
0
    close(event_get_fd(ev->ev));
674
0
    event_free(ev->ev);
675
0
    TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next);
676
0
    free(ev);
677
0
  }
678
0
  free(levent_hardware_fds(hardware));
679
0
}
680
681
static void
682
levent_iface_trigger(evutil_socket_t fd, short what, void *arg)
683
0
{
684
0
  struct lldpd *cfg = arg;
685
0
  log_debug("event", "triggering update of all interfaces");
686
0
  lldpd_update_localports(cfg);
687
0
}
688
689
static void
690
levent_iface_recv(evutil_socket_t fd, short what, void *arg)
691
0
{
692
0
  struct lldpd *cfg = arg;
693
0
  char buffer[EVENT_BUFFER];
694
0
  int n;
695
696
0
  if (cfg->g_iface_cb == NULL) {
697
    /* Discard the message */
698
0
    while (1) {
699
0
      n = read(fd, buffer, sizeof(buffer));
700
0
      if (n == -1 && (errno == EWOULDBLOCK || errno == EAGAIN)) break;
701
0
      if (n == -1) {
702
0
        log_warn("event",
703
0
            "unable to receive interface change notification message");
704
0
        return;
705
0
      }
706
0
      if (n == 0) {
707
0
        log_warnx("event",
708
0
            "end of file reached while getting interface change notification message");
709
0
        return;
710
0
      }
711
0
    }
712
0
  } else {
713
0
    cfg->g_iface_cb(cfg);
714
0
  }
715
716
  /* Schedule local port update. We don't run it right away because we may
717
   * receive a batch of events like this. */
718
0
  struct timeval one_sec = { 1, 0 };
719
0
  TRACE(LLDPD_INTERFACES_NOTIFICATION());
720
0
  log_debug("event",
721
0
      "received notification change, schedule an update of all interfaces in one second");
722
0
  if (cfg->g_iface_timer_event == NULL) {
723
0
    if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base,
724
0
       levent_iface_trigger, cfg)) == NULL) {
725
0
      log_warnx("event",
726
0
          "unable to create a new event to trigger interface update");
727
0
      return;
728
0
    }
729
0
  }
730
0
  if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) {
731
0
    log_warnx("event", "unable to schedule interface updates");
732
0
    return;
733
0
  }
734
0
}
735
736
int
737
levent_iface_subscribe(struct lldpd *cfg, int socket)
738
0
{
739
0
  log_debug("event", "subscribe to interface changes from socket %d", socket);
740
0
  levent_make_socket_nonblocking(socket);
741
0
  cfg->g_iface_event = event_new(cfg->g_base, socket, EV_READ | EV_PERSIST,
742
0
      levent_iface_recv, cfg);
743
0
  if (cfg->g_iface_event == NULL) {
744
0
    log_warnx("event",
745
0
        "unable to allocate a new event for interface changes");
746
0
    return -1;
747
0
  }
748
0
  if (event_add(cfg->g_iface_event, NULL) == -1) {
749
0
    log_warnx("event", "unable to schedule new interface changes event");
750
0
    event_free(cfg->g_iface_event);
751
0
    cfg->g_iface_event = NULL;
752
0
    return -1;
753
0
  }
754
0
  return 0;
755
0
}
756
757
static void
758
levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg)
759
0
{
760
0
  struct lldpd *cfg = arg;
761
0
  lldpd_cleanup(cfg);
762
0
}
763
764
void
765
levent_schedule_cleanup(struct lldpd *cfg)
766
0
{
767
0
  log_debug("event", "schedule next cleanup");
768
0
  if (cfg->g_cleanup_timer != NULL) {
769
0
    event_free(cfg->g_cleanup_timer);
770
0
  }
771
0
  cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg);
772
0
  if (cfg->g_cleanup_timer == NULL) {
773
0
    log_warnx("event", "unable to allocate a new event for cleanup tasks");
774
0
    return;
775
0
  }
776
777
  /* Compute the next TTL event */
778
0
  struct timeval tv = { cfg->g_config.c_ttl, 0 };
779
0
  time_t now = time(NULL);
780
0
  time_t next;
781
0
  struct lldpd_hardware *hardware;
782
0
  struct lldpd_port *port;
783
0
  TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) {
784
0
    TAILQ_FOREACH (port, &hardware->h_rports, p_entries) {
785
0
      if (now >= port->p_lastupdate + port->p_ttl) {
786
0
        tv.tv_sec = 0;
787
0
        log_debug("event",
788
0
            "immediate cleanup on port %s (%lld, %d, %lld)",
789
0
            hardware->h_ifname, (long long)now, port->p_ttl,
790
0
            (long long)port->p_lastupdate);
791
0
        break;
792
0
      }
793
0
      next = port->p_ttl - (now - port->p_lastupdate);
794
0
      if (next < tv.tv_sec) tv.tv_sec = next;
795
0
    }
796
0
  }
797
798
0
  log_debug("event", "next cleanup in %ld seconds", (long)tv.tv_sec);
799
0
  if (event_add(cfg->g_cleanup_timer, &tv) == -1) {
800
0
    log_warnx("event", "unable to schedule cleanup task");
801
0
    event_free(cfg->g_cleanup_timer);
802
0
    cfg->g_cleanup_timer = NULL;
803
0
    return;
804
0
  }
805
0
}
806
807
static void
808
levent_send_pdu(evutil_socket_t fd, short what, void *arg)
809
0
{
810
0
  struct lldpd_hardware *hardware = arg;
811
0
  int tx_interval = hardware->h_cfg->g_config.c_tx_interval;
812
813
0
  log_debug("event", "trigger sending PDU for port %s", hardware->h_ifname);
814
0
  lldpd_send(hardware);
815
816
0
  if (hardware->h_tx_fast > 0) hardware->h_tx_fast--;
817
818
0
  if (hardware->h_tx_fast > 0)
819
0
    tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval * 1000;
820
821
0
  struct timeval tv;
822
0
  tv.tv_sec = tx_interval / 1000;
823
0
  tv.tv_usec = (tx_interval % 1000) * 1000;
824
0
  if (event_add(hardware->h_timer, &tv) == -1) {
825
0
    log_warnx("event", "unable to re-register timer event for port %s",
826
0
        hardware->h_ifname);
827
0
    event_free(hardware->h_timer);
828
0
    hardware->h_timer = NULL;
829
0
    return;
830
0
  }
831
0
}
832
833
void
834
levent_schedule_pdu(struct lldpd_hardware *hardware)
835
0
{
836
0
  log_debug("event", "schedule sending PDU on %s", hardware->h_ifname);
837
0
  if (hardware->h_timer == NULL) {
838
0
    hardware->h_timer =
839
0
        evtimer_new(hardware->h_cfg->g_base, levent_send_pdu, hardware);
840
0
    if (hardware->h_timer == NULL) {
841
0
      log_warnx("event", "unable to schedule PDU sending for port %s",
842
0
          hardware->h_ifname);
843
0
      return;
844
0
    }
845
0
  }
846
847
0
  struct timeval tv = { 0, 0 };
848
0
  if (event_add(hardware->h_timer, &tv) == -1) {
849
0
    log_warnx("event", "unable to register timer event for port %s",
850
0
        hardware->h_ifname);
851
0
    event_free(hardware->h_timer);
852
0
    hardware->h_timer = NULL;
853
0
    return;
854
0
  }
855
0
}
856
857
int
858
levent_make_socket_nonblocking(int fd)
859
0
{
860
0
  int flags;
861
0
  if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
862
0
    log_warn("event", "fcntl(%d, F_GETFL)", fd);
863
0
    return -1;
864
0
  }
865
0
  if (flags & O_NONBLOCK) return 0;
866
0
  if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
867
0
    log_warn("event", "fcntl(%d, F_SETFL)", fd);
868
0
    return -1;
869
0
  }
870
0
  return 0;
871
0
}
872
873
int
874
levent_make_socket_blocking(int fd)
875
0
{
876
0
  int flags;
877
0
  if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
878
0
    log_warn("event", "fcntl(%d, F_GETFL)", fd);
879
0
    return -1;
880
0
  }
881
0
  if (!(flags & O_NONBLOCK)) return 0;
882
0
  if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {
883
0
    log_warn("event", "fcntl(%d, F_SETFL)", fd);
884
0
    return -1;
885
0
  }
886
0
  return 0;
887
0
}
888
889
#ifdef HOST_OS_LINUX
890
/* Receive and log error from a socket when there is suspicion of an error. */
891
void
892
levent_recv_error(int fd, const char *source)
893
0
{
894
0
  do {
895
0
    ssize_t n;
896
0
    char buf[1024] = {};
897
0
    struct msghdr msg = { .msg_control = buf,
898
0
      .msg_controllen = sizeof(buf) };
899
0
    if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) {
900
0
      return;
901
0
    }
902
0
    struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
903
0
    if (cmsg == NULL)
904
0
      log_warnx("event", "received unknown error on %s", source);
905
0
    else
906
0
      log_warnx("event", "received error (level=%d/type=%d) on %s",
907
0
          cmsg->cmsg_level, cmsg->cmsg_type, source);
908
0
  } while (1);
909
0
}
910
#endif