Coverage Report

Created: 2025-04-24 06:18

/src/hostap/src/utils/eloop.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Event loop based on select() loop
3
 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4
 *
5
 * This software may be distributed under the terms of the BSD license.
6
 * See README for more details.
7
 */
8
9
#include "includes.h"
10
#include <assert.h>
11
12
#include "common.h"
13
#include "trace.h"
14
#include "list.h"
15
#include "eloop.h"
16
17
#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18
#error Do not define both of poll and epoll
19
#endif
20
21
#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22
#error Do not define both of poll and kqueue
23
#endif
24
25
#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26
    !defined(CONFIG_ELOOP_KQUEUE)
27
#define CONFIG_ELOOP_SELECT
28
#endif
29
30
#ifdef CONFIG_ELOOP_POLL
31
#include <poll.h>
32
#endif /* CONFIG_ELOOP_POLL */
33
34
#ifdef CONFIG_ELOOP_EPOLL
35
#include <sys/epoll.h>
36
#endif /* CONFIG_ELOOP_EPOLL */
37
38
#ifdef CONFIG_ELOOP_KQUEUE
39
#include <sys/event.h>
40
#endif /* CONFIG_ELOOP_KQUEUE */
41
42
struct eloop_sock {
43
  int sock;
44
  void *eloop_data;
45
  void *user_data;
46
  eloop_sock_handler handler;
47
  WPA_TRACE_REF(eloop);
48
  WPA_TRACE_REF(user);
49
  WPA_TRACE_INFO
50
};
51
52
struct eloop_timeout {
53
  struct dl_list list;
54
  struct os_reltime time;
55
  void *eloop_data;
56
  void *user_data;
57
  eloop_timeout_handler handler;
58
  WPA_TRACE_REF(eloop);
59
  WPA_TRACE_REF(user);
60
  WPA_TRACE_INFO
61
};
62
63
struct eloop_signal {
64
  int sig;
65
  void *user_data;
66
  eloop_signal_handler handler;
67
  int signaled;
68
};
69
70
struct eloop_sock_table {
71
  size_t count;
72
  struct eloop_sock *table;
73
  eloop_event_type type;
74
  int changed;
75
};
76
77
struct eloop_data {
78
  int max_sock;
79
80
  size_t count; /* sum of all table counts */
81
#ifdef CONFIG_ELOOP_POLL
82
  size_t max_pollfd_map; /* number of pollfds_map currently allocated */
83
  size_t max_poll_fds; /* number of pollfds currently allocated */
84
  struct pollfd *pollfds;
85
  struct pollfd **pollfds_map;
86
#endif /* CONFIG_ELOOP_POLL */
87
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88
  int max_fd;
89
  struct eloop_sock *fd_table;
90
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91
#ifdef CONFIG_ELOOP_EPOLL
92
  int epollfd;
93
  size_t epoll_max_event_num;
94
  struct epoll_event *epoll_events;
95
#endif /* CONFIG_ELOOP_EPOLL */
96
#ifdef CONFIG_ELOOP_KQUEUE
97
  int kqueuefd;
98
  size_t kqueue_nevents;
99
  struct kevent *kqueue_events;
100
#endif /* CONFIG_ELOOP_KQUEUE */
101
  struct eloop_sock_table readers;
102
  struct eloop_sock_table writers;
103
  struct eloop_sock_table exceptions;
104
105
  struct dl_list timeout;
106
107
  size_t signal_count;
108
  struct eloop_signal *signals;
109
  int signaled;
110
  int pending_terminate;
111
112
  int terminate;
113
};
114
115
static struct eloop_data eloop;
116
117
118
#ifdef WPA_TRACE
119
120
static void eloop_sigsegv_handler(int sig)
121
{
122
  wpa_trace_show("eloop SIGSEGV");
123
  abort();
124
}
125
126
static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127
{
128
  size_t i;
129
130
  if (table == NULL || table->table == NULL)
131
    return;
132
  for (i = 0; i < table->count; i++) {
133
    wpa_trace_add_ref(&table->table[i], eloop,
134
          table->table[i].eloop_data);
135
    wpa_trace_add_ref(&table->table[i], user,
136
          table->table[i].user_data);
137
  }
138
}
139
140
141
static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
142
{
143
  size_t i;
144
145
  if (table == NULL || table->table == NULL)
146
    return;
147
  for (i = 0; i < table->count; i++) {
148
    wpa_trace_remove_ref(&table->table[i], eloop,
149
             table->table[i].eloop_data);
150
    wpa_trace_remove_ref(&table->table[i], user,
151
             table->table[i].user_data);
152
  }
153
}
154
155
#else /* WPA_TRACE */
156
157
0
#define eloop_trace_sock_add_ref(table) do { } while (0)
158
0
#define eloop_trace_sock_remove_ref(table) do { } while (0)
159
160
#endif /* WPA_TRACE */
161
162
163
int eloop_init(void)
164
1.43k
{
165
1.43k
  os_memset(&eloop, 0, sizeof(eloop));
166
1.43k
  dl_list_init(&eloop.timeout);
167
#ifdef CONFIG_ELOOP_EPOLL
168
  eloop.epollfd = epoll_create1(0);
169
  if (eloop.epollfd < 0) {
170
    wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
171
         __func__, strerror(errno));
172
    return -1;
173
  }
174
#endif /* CONFIG_ELOOP_EPOLL */
175
#ifdef CONFIG_ELOOP_KQUEUE
176
  eloop.kqueuefd = kqueue();
177
  if (eloop.kqueuefd < 0) {
178
    wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
179
         __func__, strerror(errno));
180
    return -1;
181
  }
182
#endif /* CONFIG_ELOOP_KQUEUE */
183
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
184
  eloop.readers.type = EVENT_TYPE_READ;
185
  eloop.writers.type = EVENT_TYPE_WRITE;
186
  eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
187
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
188
#ifdef WPA_TRACE
189
  signal(SIGSEGV, eloop_sigsegv_handler);
190
#endif /* WPA_TRACE */
191
1.43k
  return 0;
192
1.43k
}
193
194
195
#ifdef CONFIG_ELOOP_EPOLL
196
static int eloop_sock_queue(int sock, eloop_event_type type)
197
{
198
  struct epoll_event ev;
199
200
  os_memset(&ev, 0, sizeof(ev));
201
  switch (type) {
202
  case EVENT_TYPE_READ:
203
    ev.events = EPOLLIN;
204
    break;
205
  case EVENT_TYPE_WRITE:
206
    ev.events = EPOLLOUT;
207
    break;
208
  /*
209
   * Exceptions are always checked when using epoll, but I suppose it's
210
   * possible that someone registered a socket *only* for exception
211
   * handling.
212
   */
213
  case EVENT_TYPE_EXCEPTION:
214
    ev.events = EPOLLERR | EPOLLHUP;
215
    break;
216
  }
217
  ev.data.fd = sock;
218
  if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
219
    wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
220
         __func__, sock, strerror(errno));
221
    return -1;
222
  }
223
  return 0;
224
}
225
#endif /* CONFIG_ELOOP_EPOLL */
226
227
228
#ifdef CONFIG_ELOOP_KQUEUE
229
230
static short event_type_kevent_filter(eloop_event_type type)
231
{
232
  switch (type) {
233
  case EVENT_TYPE_READ:
234
    return EVFILT_READ;
235
  case EVENT_TYPE_WRITE:
236
    return EVFILT_WRITE;
237
  default:
238
    return 0;
239
  }
240
}
241
242
243
static int eloop_sock_queue(int sock, eloop_event_type type)
244
{
245
  struct kevent ke;
246
247
  EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
248
  if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
249
    wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
250
         __func__, sock, strerror(errno));
251
    return -1;
252
  }
253
  return 0;
254
}
255
256
#endif /* CONFIG_ELOOP_KQUEUE */
257
258
259
static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
260
                                     int sock, eloop_sock_handler handler,
261
                                     void *eloop_data, void *user_data)
262
0
{
263
#ifdef CONFIG_ELOOP_EPOLL
264
  struct epoll_event *temp_events;
265
#endif /* CONFIG_ELOOP_EPOLL */
266
#ifdef CONFIG_ELOOP_KQUEUE
267
  struct kevent *temp_events;
268
#endif /* CONFIG_ELOOP_EPOLL */
269
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
270
  struct eloop_sock *temp_table;
271
  size_t next;
272
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
273
0
  struct eloop_sock *tmp;
274
0
  int new_max_sock;
275
276
0
  if (sock > eloop.max_sock)
277
0
    new_max_sock = sock;
278
0
  else
279
0
    new_max_sock = eloop.max_sock;
280
281
0
  if (table == NULL)
282
0
    return -1;
283
284
#ifdef CONFIG_ELOOP_POLL
285
  if ((size_t) new_max_sock >= eloop.max_pollfd_map) {
286
    struct pollfd **nmap;
287
    nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
288
          sizeof(struct pollfd *));
289
    if (nmap == NULL)
290
      return -1;
291
292
    eloop.max_pollfd_map = new_max_sock + 50;
293
    eloop.pollfds_map = nmap;
294
  }
295
296
  if (eloop.count + 1 > eloop.max_poll_fds) {
297
    struct pollfd *n;
298
    size_t nmax = eloop.count + 1 + 50;
299
300
    n = os_realloc_array(eloop.pollfds, nmax,
301
             sizeof(struct pollfd));
302
    if (n == NULL)
303
      return -1;
304
305
    eloop.max_poll_fds = nmax;
306
    eloop.pollfds = n;
307
  }
308
#endif /* CONFIG_ELOOP_POLL */
309
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
310
  if (new_max_sock >= eloop.max_fd) {
311
    next = new_max_sock + 16;
312
    temp_table = os_realloc_array(eloop.fd_table, next,
313
                sizeof(struct eloop_sock));
314
    if (temp_table == NULL)
315
      return -1;
316
317
    eloop.max_fd = next;
318
    eloop.fd_table = temp_table;
319
  }
320
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
321
322
#ifdef CONFIG_ELOOP_EPOLL
323
  if (eloop.count + 1 > eloop.epoll_max_event_num) {
324
    next = eloop.epoll_max_event_num == 0 ? 8 :
325
      eloop.epoll_max_event_num * 2;
326
    temp_events = os_realloc_array(eloop.epoll_events, next,
327
                 sizeof(struct epoll_event));
328
    if (temp_events == NULL) {
329
      wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
330
           __func__, strerror(errno));
331
      return -1;
332
    }
333
334
    eloop.epoll_max_event_num = next;
335
    eloop.epoll_events = temp_events;
336
  }
337
#endif /* CONFIG_ELOOP_EPOLL */
338
#ifdef CONFIG_ELOOP_KQUEUE
339
  if (eloop.count + 1 > eloop.kqueue_nevents) {
340
    next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
341
    temp_events = os_malloc(next * sizeof(*temp_events));
342
    if (!temp_events) {
343
      wpa_printf(MSG_ERROR,
344
           "%s: malloc for kqueue failed: %s",
345
           __func__, strerror(errno));
346
      return -1;
347
    }
348
349
    os_free(eloop.kqueue_events);
350
    eloop.kqueue_events = temp_events;
351
    eloop.kqueue_nevents = next;
352
  }
353
#endif /* CONFIG_ELOOP_KQUEUE */
354
355
0
  eloop_trace_sock_remove_ref(table);
356
0
  tmp = os_realloc_array(table->table, table->count + 1,
357
0
             sizeof(struct eloop_sock));
358
0
  if (tmp == NULL) {
359
0
    eloop_trace_sock_add_ref(table);
360
0
    return -1;
361
0
  }
362
363
0
  tmp[table->count].sock = sock;
364
0
  tmp[table->count].eloop_data = eloop_data;
365
0
  tmp[table->count].user_data = user_data;
366
0
  tmp[table->count].handler = handler;
367
0
  wpa_trace_record(&tmp[table->count]);
368
0
  table->count++;
369
0
  table->table = tmp;
370
0
  eloop.max_sock = new_max_sock;
371
0
  eloop.count++;
372
0
  table->changed = 1;
373
0
  eloop_trace_sock_add_ref(table);
374
375
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
376
  if (eloop_sock_queue(sock, table->type) < 0)
377
    return -1;
378
  os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
379
      sizeof(struct eloop_sock));
380
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
381
0
  return 0;
382
0
}
383
384
385
static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
386
                                         int sock)
387
0
{
388
#ifdef CONFIG_ELOOP_KQUEUE
389
  struct kevent ke;
390
#endif /* CONFIG_ELOOP_KQUEUE */
391
0
  size_t i;
392
393
0
  if (table == NULL || table->table == NULL || table->count == 0)
394
0
    return;
395
396
0
  for (i = 0; i < table->count; i++) {
397
0
    if (table->table[i].sock == sock)
398
0
      break;
399
0
  }
400
0
  if (i == table->count)
401
0
    return;
402
0
  eloop_trace_sock_remove_ref(table);
403
0
  if (i != table->count - 1) {
404
0
    os_memmove(&table->table[i], &table->table[i + 1],
405
0
         (table->count - i - 1) *
406
0
         sizeof(struct eloop_sock));
407
0
  }
408
0
  table->count--;
409
0
  eloop.count--;
410
0
  table->changed = 1;
411
0
  eloop_trace_sock_add_ref(table);
412
#ifdef CONFIG_ELOOP_EPOLL
413
  if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
414
    wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
415
         __func__, sock, strerror(errno));
416
    return;
417
  }
418
  os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
419
#endif /* CONFIG_ELOOP_EPOLL */
420
#ifdef CONFIG_ELOOP_KQUEUE
421
  EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
422
         0, 0);
423
  if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
424
    wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
425
         __func__, sock, strerror(errno));
426
    return;
427
  }
428
  os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
429
#endif /* CONFIG_ELOOP_KQUEUE */
430
0
}
431
432
433
#ifdef CONFIG_ELOOP_POLL
434
435
static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
436
{
437
  if (fd < mx && fd >= 0)
438
    return pollfds_map[fd];
439
  return NULL;
440
}
441
442
443
static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
444
            struct eloop_sock_table *writers,
445
            struct eloop_sock_table *exceptions,
446
            struct pollfd *pollfds,
447
            struct pollfd **pollfds_map,
448
            int max_pollfd_map)
449
{
450
  size_t i;
451
  int nxt = 0;
452
  int fd;
453
  struct pollfd *pfd;
454
455
  /* Clear pollfd lookup map. It will be re-populated below. */
456
  os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
457
458
  if (readers && readers->table) {
459
    for (i = 0; i < readers->count; i++) {
460
      fd = readers->table[i].sock;
461
      assert(fd >= 0 && fd < max_pollfd_map);
462
      pollfds[nxt].fd = fd;
463
      pollfds[nxt].events = POLLIN;
464
      pollfds[nxt].revents = 0;
465
      pollfds_map[fd] = &(pollfds[nxt]);
466
      nxt++;
467
    }
468
  }
469
470
  if (writers && writers->table) {
471
    for (i = 0; i < writers->count; i++) {
472
      /*
473
       * See if we already added this descriptor, update it
474
       * if so.
475
       */
476
      fd = writers->table[i].sock;
477
      assert(fd >= 0 && fd < max_pollfd_map);
478
      pfd = pollfds_map[fd];
479
      if (!pfd) {
480
        pfd = &(pollfds[nxt]);
481
        pfd->events = 0;
482
        pfd->fd = fd;
483
        pollfds[i].revents = 0;
484
        pollfds_map[fd] = pfd;
485
        nxt++;
486
      }
487
      pfd->events |= POLLOUT;
488
    }
489
  }
490
491
  /*
492
   * Exceptions are always checked when using poll, but I suppose it's
493
   * possible that someone registered a socket *only* for exception
494
   * handling. Set the POLLIN bit in this case.
495
   */
496
  if (exceptions && exceptions->table) {
497
    for (i = 0; i < exceptions->count; i++) {
498
      /*
499
       * See if we already added this descriptor, just use it
500
       * if so.
501
       */
502
      fd = exceptions->table[i].sock;
503
      assert(fd >= 0 && fd < max_pollfd_map);
504
      pfd = pollfds_map[fd];
505
      if (!pfd) {
506
        pfd = &(pollfds[nxt]);
507
        pfd->events = POLLIN;
508
        pfd->fd = fd;
509
        pollfds[i].revents = 0;
510
        pollfds_map[fd] = pfd;
511
        nxt++;
512
      }
513
    }
514
  }
515
516
  return nxt;
517
}
518
519
520
static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
521
             struct pollfd **pollfds_map,
522
             int max_pollfd_map,
523
             short int revents)
524
{
525
  size_t i;
526
  struct pollfd *pfd;
527
528
  if (!table || !table->table)
529
    return 0;
530
531
  table->changed = 0;
532
  for (i = 0; i < table->count; i++) {
533
    pfd = find_pollfd(pollfds_map, table->table[i].sock,
534
          max_pollfd_map);
535
    if (!pfd)
536
      continue;
537
538
    if (!(pfd->revents & revents))
539
      continue;
540
541
    table->table[i].handler(table->table[i].sock,
542
          table->table[i].eloop_data,
543
          table->table[i].user_data);
544
    if (table->changed)
545
      return 1;
546
  }
547
548
  return 0;
549
}
550
551
552
static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
553
              struct eloop_sock_table *writers,
554
              struct eloop_sock_table *exceptions,
555
              struct pollfd **pollfds_map,
556
              int max_pollfd_map)
557
{
558
  if (eloop_sock_table_dispatch_table(readers, pollfds_map,
559
              max_pollfd_map, POLLIN | POLLERR |
560
              POLLHUP))
561
    return; /* pollfds may be invalid at this point */
562
563
  if (eloop_sock_table_dispatch_table(writers, pollfds_map,
564
              max_pollfd_map, POLLOUT))
565
    return; /* pollfds may be invalid at this point */
566
567
  eloop_sock_table_dispatch_table(exceptions, pollfds_map,
568
          max_pollfd_map, POLLERR | POLLHUP);
569
}
570
571
#endif /* CONFIG_ELOOP_POLL */
572
573
#ifdef CONFIG_ELOOP_SELECT
574
575
static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
576
             fd_set *fds)
577
4.29k
{
578
4.29k
  size_t i;
579
580
4.29k
  FD_ZERO(fds);
581
582
4.29k
  if (table->table == NULL)
583
4.29k
    return;
584
585
0
  for (i = 0; i < table->count; i++) {
586
0
    assert(table->table[i].sock >= 0);
587
0
    FD_SET(table->table[i].sock, fds);
588
0
  }
589
0
}
590
591
592
static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
593
              fd_set *fds)
594
0
{
595
0
  size_t i;
596
597
0
  if (table == NULL || table->table == NULL)
598
0
    return;
599
600
0
  table->changed = 0;
601
0
  for (i = 0; i < table->count; i++) {
602
0
    if (FD_ISSET(table->table[i].sock, fds)) {
603
0
      table->table[i].handler(table->table[i].sock,
604
0
            table->table[i].eloop_data,
605
0
            table->table[i].user_data);
606
0
      if (table->changed)
607
0
        break;
608
0
    }
609
0
  }
610
0
}
611
612
#endif /* CONFIG_ELOOP_SELECT */
613
614
615
#ifdef CONFIG_ELOOP_EPOLL
616
static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
617
{
618
  struct eloop_sock *table;
619
  int i;
620
621
  for (i = 0; i < nfds; i++) {
622
    table = &eloop.fd_table[events[i].data.fd];
623
    if (table->handler == NULL)
624
      continue;
625
    table->handler(table->sock, table->eloop_data,
626
             table->user_data);
627
    if (eloop.readers.changed ||
628
        eloop.writers.changed ||
629
        eloop.exceptions.changed)
630
      break;
631
  }
632
}
633
#endif /* CONFIG_ELOOP_EPOLL */
634
635
636
#ifdef CONFIG_ELOOP_KQUEUE
637
638
static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
639
{
640
  struct eloop_sock *table;
641
  int i;
642
643
  for (i = 0; i < nfds; i++) {
644
    table = &eloop.fd_table[events[i].ident];
645
    if (table->handler == NULL)
646
      continue;
647
    table->handler(table->sock, table->eloop_data,
648
             table->user_data);
649
    if (eloop.readers.changed ||
650
        eloop.writers.changed ||
651
        eloop.exceptions.changed)
652
      break;
653
  }
654
}
655
656
657
static int eloop_sock_table_requeue(struct eloop_sock_table *table)
658
{
659
  size_t i;
660
  int r;
661
662
  r = 0;
663
  for (i = 0; i < table->count && table->table; i++) {
664
    if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
665
      r = -1;
666
  }
667
  return r;
668
}
669
670
#endif /* CONFIG_ELOOP_KQUEUE */
671
672
673
int eloop_sock_requeue(void)
674
0
{
675
0
  int r = 0;
676
677
#ifdef CONFIG_ELOOP_KQUEUE
678
  close(eloop.kqueuefd);
679
  eloop.kqueuefd = kqueue();
680
  if (eloop.kqueuefd < 0) {
681
    wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
682
         __func__, strerror(errno));
683
    return -1;
684
  }
685
686
  if (eloop_sock_table_requeue(&eloop.readers) < 0)
687
    r = -1;
688
  if (eloop_sock_table_requeue(&eloop.writers) < 0)
689
    r = -1;
690
  if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
691
    r = -1;
692
#endif /* CONFIG_ELOOP_KQUEUE */
693
694
0
  return r;
695
0
}
696
697
698
static void eloop_sock_table_destroy(struct eloop_sock_table *table)
699
4.29k
{
700
4.29k
  if (table) {
701
4.29k
    size_t i;
702
703
4.29k
    for (i = 0; i < table->count && table->table; i++) {
704
0
      wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
705
0
           "sock=%d eloop_data=%p user_data=%p "
706
0
           "handler=%p",
707
0
           table->table[i].sock,
708
0
           table->table[i].eloop_data,
709
0
           table->table[i].user_data,
710
0
           table->table[i].handler);
711
0
      wpa_trace_dump_funcname("eloop unregistered socket "
712
0
            "handler",
713
0
            table->table[i].handler);
714
0
      wpa_trace_dump("eloop sock", &table->table[i]);
715
0
    }
716
4.29k
    os_free(table->table);
717
4.29k
  }
718
4.29k
}
719
720
721
int eloop_register_read_sock(int sock, eloop_sock_handler handler,
722
           void *eloop_data, void *user_data)
723
0
{
724
0
  return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
725
0
           eloop_data, user_data);
726
0
}
727
728
729
void eloop_unregister_read_sock(int sock)
730
0
{
731
0
  eloop_unregister_sock(sock, EVENT_TYPE_READ);
732
0
}
733
734
735
static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
736
0
{
737
0
  switch (type) {
738
0
  case EVENT_TYPE_READ:
739
0
    return &eloop.readers;
740
0
  case EVENT_TYPE_WRITE:
741
0
    return &eloop.writers;
742
0
  case EVENT_TYPE_EXCEPTION:
743
0
    return &eloop.exceptions;
744
0
  }
745
746
0
  return NULL;
747
0
}
748
749
750
int eloop_register_sock(int sock, eloop_event_type type,
751
      eloop_sock_handler handler,
752
      void *eloop_data, void *user_data)
753
0
{
754
0
  struct eloop_sock_table *table;
755
756
0
  assert(sock >= 0);
757
0
  table = eloop_get_sock_table(type);
758
0
  return eloop_sock_table_add_sock(table, sock, handler,
759
0
           eloop_data, user_data);
760
0
}
761
762
763
void eloop_unregister_sock(int sock, eloop_event_type type)
764
0
{
765
0
  struct eloop_sock_table *table;
766
767
0
  table = eloop_get_sock_table(type);
768
0
  eloop_sock_table_remove_sock(table, sock);
769
0
}
770
771
772
int eloop_register_timeout(unsigned int secs, unsigned int usecs,
773
         eloop_timeout_handler handler,
774
         void *eloop_data, void *user_data)
775
2.36k
{
776
2.36k
  struct eloop_timeout *timeout, *tmp;
777
2.36k
  os_time_t now_sec;
778
779
2.36k
  timeout = os_zalloc(sizeof(*timeout));
780
2.36k
  if (timeout == NULL)
781
0
    return -1;
782
2.36k
  if (os_get_reltime(&timeout->time) < 0) {
783
0
    os_free(timeout);
784
0
    return -1;
785
0
  }
786
2.36k
  now_sec = timeout->time.sec;
787
2.36k
  timeout->time.sec += secs;
788
2.36k
  if (timeout->time.sec < now_sec)
789
0
    goto overflow;
790
2.36k
  timeout->time.usec += usecs;
791
2.36k
  while (timeout->time.usec >= 1000000) {
792
0
    timeout->time.sec++;
793
0
    timeout->time.usec -= 1000000;
794
0
  }
795
2.36k
  if (timeout->time.sec < now_sec)
796
0
    goto overflow;
797
2.36k
  timeout->eloop_data = eloop_data;
798
2.36k
  timeout->user_data = user_data;
799
2.36k
  timeout->handler = handler;
800
2.36k
  wpa_trace_add_ref(timeout, eloop, eloop_data);
801
2.36k
  wpa_trace_add_ref(timeout, user, user_data);
802
2.36k
  wpa_trace_record(timeout);
803
804
  /* Maintain timeouts in order of increasing time */
805
2.36k
  dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
806
0
    if (os_reltime_before(&timeout->time, &tmp->time)) {
807
0
      dl_list_add(tmp->list.prev, &timeout->list);
808
0
      return 0;
809
0
    }
810
0
  }
811
2.36k
  dl_list_add_tail(&eloop.timeout, &timeout->list);
812
813
2.36k
  return 0;
814
815
0
overflow:
816
  /*
817
   * Integer overflow - assume long enough timeout to be assumed
818
   * to be infinite, i.e., the timeout would never happen.
819
   */
820
0
  wpa_printf(MSG_DEBUG,
821
0
       "ELOOP: Too long timeout (secs=%u usecs=%u) to ever happen - ignore it",
822
0
       secs,usecs);
823
0
  os_free(timeout);
824
0
  return 0;
825
2.36k
}
826
827
828
static void eloop_remove_timeout(struct eloop_timeout *timeout)
829
2.36k
{
830
2.36k
  dl_list_del(&timeout->list);
831
2.36k
  wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
832
2.36k
  wpa_trace_remove_ref(timeout, user, timeout->user_data);
833
2.36k
  os_free(timeout);
834
2.36k
}
835
836
837
int eloop_cancel_timeout(eloop_timeout_handler handler,
838
       void *eloop_data, void *user_data)
839
0
{
840
0
  struct eloop_timeout *timeout, *prev;
841
0
  int removed = 0;
842
843
0
  dl_list_for_each_safe(timeout, prev, &eloop.timeout,
844
0
            struct eloop_timeout, list) {
845
0
    if (timeout->handler == handler &&
846
0
        (timeout->eloop_data == eloop_data ||
847
0
         eloop_data == ELOOP_ALL_CTX) &&
848
0
        (timeout->user_data == user_data ||
849
0
         user_data == ELOOP_ALL_CTX)) {
850
0
      eloop_remove_timeout(timeout);
851
0
      removed++;
852
0
    }
853
0
  }
854
855
0
  return removed;
856
0
}
857
858
859
int eloop_cancel_timeout_one(eloop_timeout_handler handler,
860
           void *eloop_data, void *user_data,
861
           struct os_reltime *remaining)
862
0
{
863
0
  struct eloop_timeout *timeout, *prev;
864
0
  int removed = 0;
865
0
  struct os_reltime now;
866
867
0
  os_get_reltime(&now);
868
0
  remaining->sec = remaining->usec = 0;
869
870
0
  dl_list_for_each_safe(timeout, prev, &eloop.timeout,
871
0
            struct eloop_timeout, list) {
872
0
    if (timeout->handler == handler &&
873
0
        (timeout->eloop_data == eloop_data) &&
874
0
        (timeout->user_data == user_data)) {
875
0
      removed = 1;
876
0
      if (os_reltime_before(&now, &timeout->time))
877
0
        os_reltime_sub(&timeout->time, &now, remaining);
878
0
      eloop_remove_timeout(timeout);
879
0
      break;
880
0
    }
881
0
  }
882
0
  return removed;
883
0
}
884
885
886
int eloop_is_timeout_registered(eloop_timeout_handler handler,
887
        void *eloop_data, void *user_data)
888
0
{
889
0
  struct eloop_timeout *tmp;
890
891
0
  dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
892
0
    if (tmp->handler == handler &&
893
0
        tmp->eloop_data == eloop_data &&
894
0
        tmp->user_data == user_data)
895
0
      return 1;
896
0
  }
897
898
0
  return 0;
899
0
}
900
901
902
int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
903
        eloop_timeout_handler handler, void *eloop_data,
904
        void *user_data)
905
929
{
906
929
  struct os_reltime now, requested, remaining;
907
929
  struct eloop_timeout *tmp;
908
909
929
  dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
910
0
    if (tmp->handler == handler &&
911
0
        tmp->eloop_data == eloop_data &&
912
0
        tmp->user_data == user_data) {
913
0
      requested.sec = req_secs;
914
0
      requested.usec = req_usecs;
915
0
      os_get_reltime(&now);
916
0
      os_reltime_sub(&tmp->time, &now, &remaining);
917
0
      if (os_reltime_before(&requested, &remaining)) {
918
0
        eloop_cancel_timeout(handler, eloop_data,
919
0
                 user_data);
920
0
        eloop_register_timeout(requested.sec,
921
0
                   requested.usec,
922
0
                   handler, eloop_data,
923
0
                   user_data);
924
0
        return 1;
925
0
      }
926
0
      return 0;
927
0
    }
928
0
  }
929
930
929
  return -1;
931
929
}
932
933
934
int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
935
          eloop_timeout_handler handler, void *eloop_data,
936
          void *user_data)
937
0
{
938
0
  struct os_reltime now, requested, remaining;
939
0
  struct eloop_timeout *tmp;
940
941
0
  dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
942
0
    if (tmp->handler == handler &&
943
0
        tmp->eloop_data == eloop_data &&
944
0
        tmp->user_data == user_data) {
945
0
      requested.sec = req_secs;
946
0
      requested.usec = req_usecs;
947
0
      os_get_reltime(&now);
948
0
      os_reltime_sub(&tmp->time, &now, &remaining);
949
0
      if (os_reltime_before(&remaining, &requested)) {
950
0
        eloop_cancel_timeout(handler, eloop_data,
951
0
                 user_data);
952
0
        eloop_register_timeout(requested.sec,
953
0
                   requested.usec,
954
0
                   handler, eloop_data,
955
0
                   user_data);
956
0
        return 1;
957
0
      }
958
0
      return 0;
959
0
    }
960
0
  }
961
962
0
  return -1;
963
0
}
964
965
966
#ifndef CONFIG_NATIVE_WINDOWS
967
static void eloop_handle_alarm(int sig)
968
0
{
969
0
  wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
970
0
       "two seconds. Looks like there\n"
971
0
       "is a bug that ends up in a busy loop that "
972
0
       "prevents clean shutdown.\n"
973
0
       "Killing program forcefully.\n");
974
0
  exit(1);
975
0
}
976
#endif /* CONFIG_NATIVE_WINDOWS */
977
978
979
static void eloop_handle_signal(int sig)
980
0
{
981
0
  size_t i;
982
983
0
#ifndef CONFIG_NATIVE_WINDOWS
984
0
  if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
985
    /* Use SIGALRM to break out from potential busy loops that
986
     * would not allow the program to be killed. */
987
0
    eloop.pending_terminate = 1;
988
0
    signal(SIGALRM, eloop_handle_alarm);
989
0
    alarm(2);
990
0
  }
991
0
#endif /* CONFIG_NATIVE_WINDOWS */
992
993
0
  eloop.signaled++;
994
0
  for (i = 0; i < eloop.signal_count; i++) {
995
0
    if (eloop.signals[i].sig == sig) {
996
0
      eloop.signals[i].signaled++;
997
0
      break;
998
0
    }
999
0
  }
1000
0
}
1001
1002
1003
static void eloop_process_pending_signals(void)
1004
1.43k
{
1005
1.43k
  size_t i;
1006
1007
1.43k
  if (eloop.signaled == 0)
1008
1.43k
    return;
1009
0
  eloop.signaled = 0;
1010
1011
0
  if (eloop.pending_terminate) {
1012
0
#ifndef CONFIG_NATIVE_WINDOWS
1013
0
    alarm(0);
1014
0
#endif /* CONFIG_NATIVE_WINDOWS */
1015
0
    eloop.pending_terminate = 0;
1016
0
  }
1017
1018
0
  for (i = 0; i < eloop.signal_count; i++) {
1019
0
    if (eloop.signals[i].signaled) {
1020
0
      eloop.signals[i].signaled = 0;
1021
0
      eloop.signals[i].handler(eloop.signals[i].sig,
1022
0
             eloop.signals[i].user_data);
1023
0
    }
1024
0
  }
1025
0
}
1026
1027
1028
int eloop_register_signal(int sig, eloop_signal_handler handler,
1029
        void *user_data)
1030
0
{
1031
0
  struct eloop_signal *tmp;
1032
1033
0
  tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1034
0
             sizeof(struct eloop_signal));
1035
0
  if (tmp == NULL)
1036
0
    return -1;
1037
1038
0
  tmp[eloop.signal_count].sig = sig;
1039
0
  tmp[eloop.signal_count].user_data = user_data;
1040
0
  tmp[eloop.signal_count].handler = handler;
1041
0
  tmp[eloop.signal_count].signaled = 0;
1042
0
  eloop.signal_count++;
1043
0
  eloop.signals = tmp;
1044
0
  signal(sig, eloop_handle_signal);
1045
1046
0
  return 0;
1047
0
}
1048
1049
1050
int eloop_register_signal_terminate(eloop_signal_handler handler,
1051
            void *user_data)
1052
0
{
1053
0
  int ret = eloop_register_signal(SIGINT, handler, user_data);
1054
0
  if (ret == 0)
1055
0
    ret = eloop_register_signal(SIGTERM, handler, user_data);
1056
0
  return ret;
1057
0
}
1058
1059
1060
int eloop_register_signal_reconfig(eloop_signal_handler handler,
1061
           void *user_data)
1062
0
{
1063
#ifdef CONFIG_NATIVE_WINDOWS
1064
  return 0;
1065
#else /* CONFIG_NATIVE_WINDOWS */
1066
0
  return eloop_register_signal(SIGHUP, handler, user_data);
1067
0
#endif /* CONFIG_NATIVE_WINDOWS */
1068
0
}
1069
1070
1071
void eloop_run(void)
1072
1.43k
{
1073
#ifdef CONFIG_ELOOP_POLL
1074
  int num_poll_fds;
1075
  int timeout_ms = 0;
1076
#endif /* CONFIG_ELOOP_POLL */
1077
1.43k
#ifdef CONFIG_ELOOP_SELECT
1078
1.43k
  fd_set *rfds, *wfds, *efds;
1079
1.43k
  struct timeval _tv;
1080
1.43k
#endif /* CONFIG_ELOOP_SELECT */
1081
#ifdef CONFIG_ELOOP_EPOLL
1082
  int timeout_ms = -1;
1083
#endif /* CONFIG_ELOOP_EPOLL */
1084
#ifdef CONFIG_ELOOP_KQUEUE
1085
  struct timespec ts;
1086
#endif /* CONFIG_ELOOP_KQUEUE */
1087
1.43k
  int res;
1088
1.43k
  struct os_reltime tv, now;
1089
1090
1.43k
#ifdef CONFIG_ELOOP_SELECT
1091
1.43k
  rfds = os_malloc(sizeof(*rfds));
1092
1.43k
  wfds = os_malloc(sizeof(*wfds));
1093
1.43k
  efds = os_malloc(sizeof(*efds));
1094
1.43k
  if (rfds == NULL || wfds == NULL || efds == NULL)
1095
0
    goto out;
1096
1.43k
#endif /* CONFIG_ELOOP_SELECT */
1097
1098
2.86k
  while (!eloop.terminate &&
1099
2.86k
         (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1100
1.43k
    eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1101
1.43k
    struct eloop_timeout *timeout;
1102
1103
1.43k
    if (eloop.pending_terminate) {
1104
      /*
1105
       * This may happen in some corner cases where a signal
1106
       * is received during a blocking operation. We need to
1107
       * process the pending signals and exit if requested to
1108
       * avoid hitting the SIGALRM limit if the blocking
1109
       * operation took more than two seconds.
1110
       */
1111
0
      eloop_process_pending_signals();
1112
0
      if (eloop.terminate)
1113
0
        break;
1114
0
    }
1115
1116
1.43k
    timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1117
1.43k
          list);
1118
1.43k
    if (timeout) {
1119
1.43k
      os_get_reltime(&now);
1120
1.43k
      if (os_reltime_before(&now, &timeout->time))
1121
0
        os_reltime_sub(&timeout->time, &now, &tv);
1122
1.43k
      else
1123
1.43k
        tv.sec = tv.usec = 0;
1124
#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1125
      timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1126
#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1127
1.43k
#ifdef CONFIG_ELOOP_SELECT
1128
1.43k
      _tv.tv_sec = tv.sec;
1129
1.43k
      _tv.tv_usec = tv.usec;
1130
1.43k
#endif /* CONFIG_ELOOP_SELECT */
1131
#ifdef CONFIG_ELOOP_KQUEUE
1132
      ts.tv_sec = tv.sec;
1133
      ts.tv_nsec = tv.usec * 1000L;
1134
#endif /* CONFIG_ELOOP_KQUEUE */
1135
1.43k
    }
1136
1137
#ifdef CONFIG_ELOOP_POLL
1138
    num_poll_fds = eloop_sock_table_set_fds(
1139
      &eloop.readers, &eloop.writers, &eloop.exceptions,
1140
      eloop.pollfds, eloop.pollfds_map,
1141
      eloop.max_pollfd_map);
1142
    res = poll(eloop.pollfds, num_poll_fds,
1143
         timeout ? timeout_ms : -1);
1144
#endif /* CONFIG_ELOOP_POLL */
1145
1.43k
#ifdef CONFIG_ELOOP_SELECT
1146
1.43k
    eloop_sock_table_set_fds(&eloop.readers, rfds);
1147
1.43k
    eloop_sock_table_set_fds(&eloop.writers, wfds);
1148
1.43k
    eloop_sock_table_set_fds(&eloop.exceptions, efds);
1149
1.43k
    res = select(eloop.max_sock + 1, rfds, wfds, efds,
1150
1.43k
           timeout ? &_tv : NULL);
1151
1.43k
#endif /* CONFIG_ELOOP_SELECT */
1152
#ifdef CONFIG_ELOOP_EPOLL
1153
    if (eloop.count == 0) {
1154
      res = 0;
1155
    } else {
1156
      res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1157
           eloop.count, timeout_ms);
1158
    }
1159
#endif /* CONFIG_ELOOP_EPOLL */
1160
#ifdef CONFIG_ELOOP_KQUEUE
1161
    if (eloop.count == 0) {
1162
      res = 0;
1163
    } else {
1164
      res = kevent(eloop.kqueuefd, NULL, 0,
1165
             eloop.kqueue_events, eloop.kqueue_nevents,
1166
             timeout ? &ts : NULL);
1167
    }
1168
#endif /* CONFIG_ELOOP_KQUEUE */
1169
1.43k
    if (res < 0 && errno != EINTR && errno != 0) {
1170
0
      wpa_printf(MSG_ERROR, "eloop: %s: %s",
1171
#ifdef CONFIG_ELOOP_POLL
1172
           "poll"
1173
#endif /* CONFIG_ELOOP_POLL */
1174
0
#ifdef CONFIG_ELOOP_SELECT
1175
0
           "select"
1176
0
#endif /* CONFIG_ELOOP_SELECT */
1177
#ifdef CONFIG_ELOOP_EPOLL
1178
           "epoll"
1179
#endif /* CONFIG_ELOOP_EPOLL */
1180
#ifdef CONFIG_ELOOP_KQUEUE
1181
           "kqueue"
1182
#endif /* CONFIG_ELOOP_EKQUEUE */
1183
1184
0
           , strerror(errno));
1185
0
      goto out;
1186
0
    }
1187
1188
1.43k
    eloop.readers.changed = 0;
1189
1.43k
    eloop.writers.changed = 0;
1190
1.43k
    eloop.exceptions.changed = 0;
1191
1192
1.43k
    eloop_process_pending_signals();
1193
1194
1195
    /* check if some registered timeouts have occurred */
1196
1.43k
    timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1197
1.43k
          list);
1198
1.43k
    if (timeout) {
1199
1.43k
      os_get_reltime(&now);
1200
1.43k
      if (!os_reltime_before(&now, &timeout->time)) {
1201
1.43k
        void *eloop_data = timeout->eloop_data;
1202
1.43k
        void *user_data = timeout->user_data;
1203
1.43k
        eloop_timeout_handler handler =
1204
1.43k
          timeout->handler;
1205
1.43k
        eloop_remove_timeout(timeout);
1206
1.43k
        handler(eloop_data, user_data);
1207
1.43k
      }
1208
1209
1.43k
    }
1210
1211
1.43k
    if (res <= 0)
1212
1.43k
      continue;
1213
1214
0
    if (eloop.readers.changed ||
1215
0
        eloop.writers.changed ||
1216
0
        eloop.exceptions.changed) {
1217
       /*
1218
        * Sockets may have been closed and reopened with the
1219
        * same FD in the signal or timeout handlers, so we
1220
        * must skip the previous results and check again
1221
        * whether any of the currently registered sockets have
1222
        * events.
1223
        */
1224
0
      continue;
1225
0
    }
1226
1227
#ifdef CONFIG_ELOOP_POLL
1228
    eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1229
            &eloop.exceptions, eloop.pollfds_map,
1230
            eloop.max_pollfd_map);
1231
#endif /* CONFIG_ELOOP_POLL */
1232
0
#ifdef CONFIG_ELOOP_SELECT
1233
0
    eloop_sock_table_dispatch(&eloop.readers, rfds);
1234
0
    eloop_sock_table_dispatch(&eloop.writers, wfds);
1235
0
    eloop_sock_table_dispatch(&eloop.exceptions, efds);
1236
0
#endif /* CONFIG_ELOOP_SELECT */
1237
#ifdef CONFIG_ELOOP_EPOLL
1238
    eloop_sock_table_dispatch(eloop.epoll_events, res);
1239
#endif /* CONFIG_ELOOP_EPOLL */
1240
#ifdef CONFIG_ELOOP_KQUEUE
1241
    eloop_sock_table_dispatch(eloop.kqueue_events, res);
1242
#endif /* CONFIG_ELOOP_KQUEUE */
1243
0
  }
1244
1245
1.43k
  eloop.terminate = 0;
1246
1.43k
out:
1247
1.43k
#ifdef CONFIG_ELOOP_SELECT
1248
1.43k
  os_free(rfds);
1249
1.43k
  os_free(wfds);
1250
1.43k
  os_free(efds);
1251
1.43k
#endif /* CONFIG_ELOOP_SELECT */
1252
1.43k
  return;
1253
1.43k
}
1254
1255
1256
void eloop_terminate(void)
1257
1.43k
{
1258
1.43k
  eloop.terminate = 1;
1259
1.43k
}
1260
1261
1262
void eloop_destroy(void)
1263
1.43k
{
1264
1.43k
  struct eloop_timeout *timeout, *prev;
1265
1.43k
  struct os_reltime now;
1266
1267
1.43k
  os_get_reltime(&now);
1268
1.43k
  dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1269
1.43k
            struct eloop_timeout, list) {
1270
929
    int sec, usec;
1271
929
    sec = timeout->time.sec - now.sec;
1272
929
    usec = timeout->time.usec - now.usec;
1273
929
    if (timeout->time.usec < now.usec) {
1274
131
      sec--;
1275
131
      usec += 1000000;
1276
131
    }
1277
929
    wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1278
929
         "eloop_data=%p user_data=%p handler=%p",
1279
929
         sec, usec, timeout->eloop_data, timeout->user_data,
1280
929
         timeout->handler);
1281
929
    wpa_trace_dump_funcname("eloop unregistered timeout handler",
1282
929
          timeout->handler);
1283
929
    wpa_trace_dump("eloop timeout", timeout);
1284
929
    eloop_remove_timeout(timeout);
1285
929
  }
1286
1.43k
  eloop_sock_table_destroy(&eloop.readers);
1287
1.43k
  eloop_sock_table_destroy(&eloop.writers);
1288
1.43k
  eloop_sock_table_destroy(&eloop.exceptions);
1289
1.43k
  os_free(eloop.signals);
1290
1291
#ifdef CONFIG_ELOOP_POLL
1292
  os_free(eloop.pollfds);
1293
  os_free(eloop.pollfds_map);
1294
#endif /* CONFIG_ELOOP_POLL */
1295
#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1296
  os_free(eloop.fd_table);
1297
#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1298
#ifdef CONFIG_ELOOP_EPOLL
1299
  os_free(eloop.epoll_events);
1300
  close(eloop.epollfd);
1301
#endif /* CONFIG_ELOOP_EPOLL */
1302
#ifdef CONFIG_ELOOP_KQUEUE
1303
  os_free(eloop.kqueue_events);
1304
  close(eloop.kqueuefd);
1305
#endif /* CONFIG_ELOOP_KQUEUE */
1306
1.43k
}
1307
1308
1309
int eloop_terminated(void)
1310
0
{
1311
0
  return eloop.terminate || eloop.pending_terminate;
1312
0
}
1313
1314
1315
void eloop_wait_for_read_sock(int sock)
1316
0
{
1317
#ifdef CONFIG_ELOOP_POLL
1318
  struct pollfd pfd;
1319
1320
  if (sock < 0)
1321
    return;
1322
1323
  os_memset(&pfd, 0, sizeof(pfd));
1324
  pfd.fd = sock;
1325
  pfd.events = POLLIN;
1326
1327
  poll(&pfd, 1, -1);
1328
#endif /* CONFIG_ELOOP_POLL */
1329
0
#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1330
  /*
1331
   * We can use epoll() here. But epoll() requres 4 system calls.
1332
   * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1333
   * epoll fd. So select() is better for performance here.
1334
   */
1335
0
  fd_set rfds;
1336
1337
0
  if (sock < 0)
1338
0
    return;
1339
1340
0
  FD_ZERO(&rfds);
1341
0
  FD_SET(sock, &rfds);
1342
0
  select(sock + 1, &rfds, NULL, NULL, NULL);
1343
0
#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1344
#ifdef CONFIG_ELOOP_KQUEUE
1345
  int kfd;
1346
  struct kevent ke1, ke2;
1347
1348
  kfd = kqueue();
1349
  if (kfd == -1)
1350
    return;
1351
  EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1352
  kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1353
  close(kfd);
1354
#endif /* CONFIG_ELOOP_KQUEUE */
1355
0
}
1356
1357
#ifdef CONFIG_ELOOP_SELECT
1358
#undef CONFIG_ELOOP_SELECT
1359
#endif /* CONFIG_ELOOP_SELECT */