Coverage Report

Created: 2025-04-11 06:59

/src/opensips/io_wait.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2014-2015 OpenSIPS Solutions
3
 * Copyright (C) 2005 iptelorg GmbH
4
 *
5
 * This file is part of opensips, a free SIP server.
6
 *
7
 * opensips is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version
11
 *
12
 * opensips is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
20
 *
21
 * History:
22
 * --------
23
 *  2005-06-13  created by andrei
24
 *  2005-06-26  added kqueue (andrei)
25
 *  2005-07-01  added /dev/poll (andrei)
26
 *  2014-08-25  looping functions moved to io_wait_loop.h (bogdan)
27
 */
28
29
/*!
30
 * \file
31
 * \brief tcp io wait common stuff used by tcp_main.c & tcp_read.c
32
 * - \ref TCPiowait
33
 */
34
35
/*! \page TCPiowait TCP io wait common stuff used by tcp_main.c & tcp_read.c
36
 * All the functions are inline because of speed reasons and because they are
37
 * used only from 2 places.
38
 * You also have to define:
39
 *   -  int handle_io(struct fd_map* fm, int idx) (see below)
40
 *     (this could be trivially replaced by a callback pointer entry attached
41
 *      to the io_wait handler if more flexibility rather then performance
42
 *      is needed)
43
 *   -   fd_type - define to some enum of you choice and define also
44
 *                FD_TYPE_DEFINED (if you don't do it fd_type will be defined
45
 *                to int). 0 has a special not set/not init. meaning
46
 *                (a lot of sanity checks and the sigio_rt code are based on
47
 *                 this assumption)
48
 *   -  local_malloc (defaults to pkg_malloc)
49
 *   -  local_free   (defaults to pkg_free)
50
 *
51
 */
52
53
54
#ifndef _io_wait_h
55
#define _io_wait_h
56
57
#include <errno.h>
58
#include <string.h>
59
#ifdef HAVE_SIGIO_RT
60
#define __USE_GNU /* or else F_SETSIG won't be included */
61
#define _GNU_SOURCE /* define this as well */
62
#include <sys/types.h> /* recv */
63
#include <sys/socket.h> /* recv */
64
#include <signal.h> /* sigprocmask, sigwait a.s.o */
65
#endif
66
#ifdef HAVE_EPOLL
67
#include <sys/epoll.h>
68
#endif
69
#ifdef HAVE_KQUEUE
70
#include <sys/types.h> /* needed on freebsd */
71
#include <sys/event.h>
72
#include <sys/time.h>
73
#endif
74
#ifdef HAVE_DEVPOLL
75
#include <sys/devpoll.h>
76
#endif
77
#ifdef HAVE_SELECT
78
/* needed on openbsd for select*/
79
#include <sys/time.h>
80
#include <sys/types.h>
81
#include <unistd.h>
82
/* needed according to POSIX for select*/
83
#include <sys/select.h>
84
#endif
85
#include <sys/poll.h>
86
#include <fcntl.h>
87
88
#include "dprint.h"
89
90
#include "poll_types.h" /* poll_types*/
91
#include "pt.h" /* mypid() */
92
#include "error.h"
93
94
#ifdef __OS_linux
95
#include <features.h>     /* for GLIBC version testing */
96
#endif
97
#include "lib/dbg/backtrace.h"
98
99
#ifndef FD_TYPE_DEFINED
100
typedef int fd_type;
101
#define FD_TYPE_DEFINED
102
#endif
103
104
/*! \brief maps a fd to some other structure; used in almost all cases
105
 * except epoll and maybe kqueue or /dev/poll */
106
struct fd_map {
107
  int fd;               /* fd no */
108
  fd_type type;         /* "data" type */
109
  void* data;           /* pointer to the corresponding structure */
110
  int flags;            /* so far used to indicate whether we should 
111
                         * read, write or both ; last 4 are reserved for 
112
                         * internal usage */
113
  int app_flags;        /* flags to be used by upper layer apps, not by
114
                         * the reactor */
115
  unsigned int timeout;
116
};
117
118
119
#ifdef HAVE_KQUEUE
120
#ifndef KQ_CHANGES_ARRAY_SIZE
121
#define KQ_CHANGES_ARRAY_SIZE 128
122
123
#ifdef __OS_netbsd
124
#define KEV_UDATA_CAST (intptr_t)
125
#else
126
#define KEV_UDATA_CAST
127
#endif
128
129
#endif
130
#endif
131
132
133
0
#define IO_FD_CLOSING 16
134
135
/*! \brief handler structure */
136
struct io_wait_handler{
137
  char *name;
138
  int max_prio;
139
#ifdef HAVE_EPOLL
140
  struct epoll_event* ep_array;
141
  int epfd; /* epoll ctrl fd */
142
#endif
143
#ifdef HAVE_SIGIO_RT
144
  sigset_t sset; /* signal mask for sigio & sigrtmin */
145
  int signo;     /* real time signal used */
146
#endif
147
#ifdef HAVE_KQUEUE
148
  struct kevent* kq_array;   /* used for the eventlist*/
149
  struct kevent* kq_changes; /* used for the changelist */
150
  size_t kq_nchanges;
151
  size_t kq_changes_size; /* size of the changes array */
152
  int kq_fd;
153
#endif
154
#ifdef HAVE_DEVPOLL
155
  int dpoll_fd;
156
  struct pollfd* dp_changes;
157
#endif
158
#ifdef HAVE_SELECT
159
  fd_set master_set;
160
  int max_fd_select; /* maximum select used fd */
161
#endif
162
  /* common stuff for POLL, SIGIO_RT and SELECT
163
   * since poll support is always compiled => this will always be compiled */
164
  int *prio_idx; /* size of max_prio - idxs in fd_array where prio changes*/
165
  struct fd_map* fd_hash;
166
  struct pollfd* fd_array;
167
  int fd_no; /*  current index used in fd_array */
168
  int max_fd_no; /* maximum fd no, is also the size of fd_array,
169
                   fd_hash  and ep_array*/
170
  enum poll_types poll_method;
171
  int flags;
172
};
173
174
typedef struct io_wait_handler io_wait_h;
175
176
177
/*! \brief get the corresponding fd_map structure pointer */
178
0
#define get_fd_map(h, fd)   (&(h)->fd_hash[(fd)])
179
180
/*! \brief remove a fd_map structure from the hash;
181
 * the pointer must be returned by get_fd_map or hash_fd_map
182
 */
183
#define unhash_fd_map(pfm,c_flags,sock_flags,erase) \
184
0
  do{ \
185
0
    if ((c_flags & IO_FD_CLOSING) || (pfm->flags&IO_WATCH_PRV_FILTER)==sock_flags) { \
186
0
      (pfm)->type=0 /*F_NONE */; \
187
0
      (pfm)->fd=-1; \
188
0
      (pfm)->flags = 0; \
189
0
      (pfm)->data = NULL; \
190
0
      erase = 1; \
191
0
    } else { \
192
0
      (pfm)->flags &= ~sock_flags; \
193
0
      erase = 0; \
194
0
    } \
195
0
  }while(0)
196
197
#define unhash_fd_map2(pfm,c_flags,sock_flags,erase)  \
198
0
  do{ \
199
0
    if ((c_flags & IO_FD_CLOSING) || (pfm->flags&IO_WATCH_PRV_FILTER)==sock_flags) { \
200
0
      rla_log("erase case detected with sflags=%x\n",sock_flags); \
201
0
      (pfm)->type=0 /*F_NONE */; \
202
0
      (pfm)->fd=-1; \
203
0
      (pfm)->flags = 0; \
204
0
      (pfm)->data = NULL; \
205
0
      erase = 1; \
206
0
    } else { \
207
0
      rla_log("not erasing flags=%x, flags=%x\n",(pfm)->flags,sock_flags); \
208
0
      (pfm)->flags &= ~sock_flags; \
209
0
      erase = 0; \
210
0
    } \
211
0
  }while(0)
212
213
214
/*! \brief add a fd_map structure to the fd hash */
215
static inline struct fd_map* hash_fd_map( io_wait_h* h,
216
            int fd,
217
            fd_type type,
218
            void* data,
219
            int flags,
220
            unsigned int timeout,
221
            int *already)
222
0
{
223
0
  if (h->fd_hash[fd].fd <= 0) {
224
0
    *already = 0;
225
0
  } else {
226
0
    *already = 1;
227
0
  }
228
229
0
  h->fd_hash[fd].fd=fd;
230
0
  h->fd_hash[fd].type=type;
231
0
  h->fd_hash[fd].data=data;
232
233
0
  h->fd_hash[fd].flags|=flags;
234
235
0
  h->fd_hash[fd].timeout = timeout;
236
237
0
  return &h->fd_hash[fd];
238
0
}
Unexecuted instantiation: net_tcp_proc.c:hash_fd_map
Unexecuted instantiation: net_tcp.c:hash_fd_map
Unexecuted instantiation: tcp_common.c:hash_fd_map
Unexecuted instantiation: net_udp.c:hash_fd_map
Unexecuted instantiation: async.c:hash_fd_map
Unexecuted instantiation: timer.c:hash_fd_map
Unexecuted instantiation: reactor.c:hash_fd_map
Unexecuted instantiation: io_wait.c:hash_fd_map
Unexecuted instantiation: cfg_reload.c:hash_fd_map
239
240
241
#ifdef HAVE_KQUEUE
242
/*
243
 * kqueue specific function: register a change
244
 * (adds a change to the kevent change array, and if full flushes it first)
245
 * returns: -1 on error, 0 on success
246
 */
247
static inline int kq_ev_change(io_wait_h* h, int fd, int filter, int flag,
248
                void* data)
249
{
250
  int n;
251
  struct timespec tspec;
252
253
  if (h->kq_nchanges>=h->kq_changes_size){
254
    /* changes array full ! */
255
    LM_WARN("[%s] kqueue changes array full trying to flush...\n",
256
      h->name);
257
    tspec.tv_sec=0;
258
    tspec.tv_nsec=0;
259
again:
260
    n=kevent(h->kq_fd, h->kq_changes, h->kq_nchanges, 0, 0, &tspec);
261
    if (n==-1){
262
      if (errno==EINTR) goto again;
263
      LM_ERR("[%s] kevent flush changes failed: %s [%d]\n",
264
        h->name, strerror(errno), errno);
265
      return -1;
266
    }
267
    h->kq_nchanges=0; /* changes array is empty */
268
  }
269
  EV_SET(&h->kq_changes[h->kq_nchanges], fd, filter, flag, 0, 0,
270
      KEV_UDATA_CAST data);
271
  h->kq_nchanges++;
272
  return 0;
273
}
274
#endif
275
276
277
0
#define IO_WATCH_READ            (1<<0)
278
0
#define IO_WATCH_WRITE           (1<<1)
279
#define IO_WATCH_ERROR           (1<<2)
280
0
#define IO_WATCH_TIMEOUT         (1<<3)
281
/* 24 starting are reserved, do not attempt to use */
282
0
#define IO_WATCH_PRV_FILTER      ((1<<24)-1)
283
0
#define IO_WATCH_PRV_CHECKED     (1<<29)
284
0
#define IO_WATCH_PRV_TRIG_READ   (1<<30)
285
0
#define IO_WATCH_PRV_TRIG_WRITE  (1<<31)
286
287
#define fd_array_print \
288
  do { \
289
    int k;\
290
    LM_DBG("[%s] size=%d, fd array is",h->name,h->fd_no);\
291
    for(k=0;k<h->fd_no;k++) LM_GEN1(L_DBG," %d flags = %d",h->fd_array[k].fd,h->fd_hash[h->fd_array[k].fd].flags);\
292
    LM_GEN1(L_DBG,"\n"); \
293
    LM_DBG("[%s] size=%d, prio array is",h->name,h->max_prio);\
294
    for(k=0;k<h->max_prio;k++) LM_GEN1(L_DBG," %d",h->prio_idx[k]);\
295
    LM_GEN1(L_DBG,"\n"); \
296
  }while(0)
297
298
299
#define check_io_data() \
300
0
  do { \
301
0
    struct fd_map* _e;\
302
0
    int _t,k;\
303
0
    check_error = 0;\
304
0
    /* iterate the fd_array and check if fd_hash is properly set for each */ \
305
0
    for(k=0;k<h->fd_no;k++) {\
306
0
      _e = get_fd_map(h, h->fd_array[k].fd); \
307
0
      if (_e->type==0 || _e->fd<=0 || \
308
0
      (_e->flags&(IO_WATCH_READ|IO_WATCH_WRITE))==0 ) {\
309
0
        LM_BUG("fd_array idx %d (fd=%d) points to bogus map "\
310
0
          "(fd=%d,type=%d,flags=%x,data=%p)\n",k,h->fd_array[k].fd,\
311
0
          _e->fd, _e->type, _e->flags, _e->data);\
312
0
          check_error = 1;\
313
0
      }\
314
0
      _e->flags |= IO_WATCH_PRV_CHECKED;\
315
0
    }\
316
0
    /* iterate the fd_map and see if all records are checked */ \
317
0
    _t = 0; \
318
0
    for(k=0;k<h->max_fd_no;k++) {\
319
0
      _e = get_fd_map(h, k); \
320
0
      if (_e->type==0) { \
321
0
        /* fd not in used, everything should be on zero */ \
322
0
        if (_e->fd>0 || _e->data!=NULL || _e->flags!=0 ) {\
323
0
          LM_BUG("unused fd_map fd=%d has bogus data "\
324
0
          "(fd=%d,flags=%x,data=%p)\n",k,\
325
0
          _e->fd, _e->flags, _e->data);\
326
0
          check_error = 1;\
327
0
        }\
328
0
      } else {\
329
0
        /* fd in used, check if in checked */ \
330
0
        if (_e->fd<=0 || \
331
0
        (_e->flags&(IO_WATCH_READ|IO_WATCH_WRITE))==0 ) {\
332
0
        LM_BUG("used fd map fd=%d has bogus data "\
333
0
          "(fd=%d,type=%d,flags=%x,data=%p)\n",k,\
334
0
          _e->fd, _e->type, _e->flags, _e->data);\
335
0
          check_error = 1;\
336
0
        }\
337
0
        /* the map is valid */ \
338
0
        if ((_e->flags&IO_WATCH_PRV_CHECKED)==0) {\
339
0
          LM_BUG("used fd map fd=%d is not present in fd_array "\
340
0
            "(fd=%d,type=%d,flags=%x,data=%p)\n",k,\
341
0
            _e->fd, _e->type, _e->flags, _e->data);\
342
0
            check_error = 1;\
343
0
        }\
344
0
        _e->flags &= ~IO_WATCH_PRV_CHECKED;\
345
0
        _t++;\
346
0
      }\
347
0
    }\
348
0
    if (_t!=h->fd_no) { \
349
0
      LM_BUG("fd_map versus fd_array size mismatch: %d versus %d\n",\
350
0
        _t, h->fd_no);\
351
0
      check_error = 1;\
352
0
    }\
353
0
  } while(0)
354
355
356
/*! \brief generic io_watch_add function
357
 * \return 0 on success, -1 on error
358
 *
359
 * this version should be faster than pointers to poll_method specific
360
 * functions (it avoids functions calls, the overhead being only an extra
361
 *  switch())
362
*/
363
inline static int io_watch_add( io_wait_h* h, // lgtm [cpp/use-of-goto]
364
                int fd,
365
                fd_type type,
366
                void* data,
367
                int prio,
368
                unsigned int timeout,
369
                int flags)
370
0
{
371
372
  /* helper macros */
373
0
#define fd_array_setup \
374
0
  do{ \
375
0
    n = h->prio_idx[prio]; \
376
0
    if (n<h->fd_no)\
377
0
      memmove( &h->fd_array[n+1], &h->fd_array[n],\
378
0
        (h->fd_no-n)*sizeof(*(h->fd_array)) ); \
379
0
    h->fd_array[n].fd=fd; \
380
0
    h->fd_array[n].events=0; \
381
0
    if (flags & IO_WATCH_READ) \
382
0
      h->fd_array[n].events|=POLLIN; /* useless for select */ \
383
0
    if (flags & IO_WATCH_WRITE) \
384
0
      h->fd_array[n].events|=POLLOUT; /* useless for select */ \
385
0
    h->fd_array[n].revents=0;     /* useless for select */ \
386
0
    for( n=prio ; n<h->max_prio ; n++) \
387
0
      h->prio_idx[n]++; \
388
0
    h->fd_no++; \
389
0
  }while(0)
390
391
0
#define set_fd_flags(f) \
392
0
  do{ \
393
0
      ctl_flags=fcntl(fd, F_GETFL); \
394
0
      if (ctl_flags==-1){ \
395
0
        LM_ERR("[%s] fcntl: GETFL failed:" \
396
0
          " %s [%d]\n", h->name, strerror(errno), errno); \
397
0
        goto error; \
398
0
      } \
399
0
      if (fcntl(fd, F_SETFL, ctl_flags|(f))==-1){ \
400
0
        LM_ERR("[%s] fcntl: SETFL" \
401
0
          " failed: %s [%d]\n", h->name, strerror(errno), errno);\
402
0
        goto error; \
403
0
      } \
404
0
  }while(0)
405
406
407
0
  struct fd_map* e;
408
0
  int already=-1;
409
0
#ifdef HAVE_EPOLL
410
0
  struct epoll_event ep_event;
411
0
#endif
412
#ifdef HAVE_DEVPOLL
413
  struct pollfd pfd;
414
#endif
415
0
  int ctl_flags;
416
0
  int n;  //FIXME
417
0
  int check_error;
418
#if 0 //defined(HAVE_SIGIO_RT) || defined (HAVE_EPOLL) FIXME
419
  int n;
420
  int idx;
421
  int check_io;
422
  struct pollfd pf;
423
424
  check_io=0; /* set to 1 if we need to check for pre-existing queued
425
           io/data on the fd */
426
  idx=-1;
427
#endif
428
0
  e=0;
429
430
0
  if (fd==-1){
431
0
    LM_CRIT("fd is -1!\n");
432
0
    goto error0;
433
0
  }
434
  /* check if not too big */
435
0
  if (h->fd_no >= h->max_fd_no || fd >= h->max_fd_no) {
436
0
    LM_CRIT("[%s] maximum fd number exceeded: %d, %d/%d\n",
437
0
      h->name, fd, h->fd_no, h->max_fd_no);
438
0
    goto error0;
439
0
  }
440
0
  if (prio > h->max_prio) {
441
0
    LM_BUG("[%s] priority %d requested (max is %d)\n",
442
0
      h->name, prio, h->max_prio);
443
0
    goto error0;
444
0
  }
445
0
#if defined (HAVE_EPOLL)
446
0
  LM_DBG("[%s] io_watch_add op (%d on %d) (%p, %d, %d, %p,%d), fd_no=%d/%d\n",
447
0
      h->name,fd,h->epfd, h,fd,type,data,flags,h->fd_no,h->max_fd_no);
448
#else
449
  LM_DBG("[%s] io_watch_add op (%d) (%p, %d, %d, %p,%d), fd_no=%d/%d\n",
450
      h->name,fd, h,fd,type,data,flags,h->fd_no,h->max_fd_no);
451
#endif
452
  //fd_array_print;
453
  /*  hash sanity check */
454
0
  e=get_fd_map(h, fd);
455
456
0
  check_io_data();
457
0
  if (check_error) {
458
0
    LM_CRIT("[%s] check failed before fd add "
459
0
      "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name,
460
0
      fd, type, data, flags, already);
461
0
  }
462
463
0
  if (e->flags & flags){
464
0
    if (e->data != data) {
465
0
      LM_BUG("[%s] BUG trying to overwrite entry %d"
466
0
          " in the hash(%d, %d, %p,%d) with (%d, %d, %p,%d)\n",
467
0
          h->name,fd, e->fd, e->type, e->data,e->flags, fd, type, data,flags);
468
0
      goto error0;
469
0
    }
470
0
    LM_DBG("[%s] Socket %d is already being listened on for flags %d\n",
471
0
         h->name,fd,flags);
472
0
    return 0;
473
0
  }
474
475
0
  if (timeout)
476
0
    timeout+=get_ticks();
477
478
0
  if ((e=hash_fd_map(h, fd, type, data,flags, timeout, &already))==0){
479
0
    LM_ERR("[%s] failed to hash the fd %d\n",h->name, fd);
480
0
    goto error0;
481
0
  }
482
0
  switch(h->poll_method){ /* faster then pointer to functions */
483
0
    case POLL_POLL:
484
0
      set_fd_flags(O_NONBLOCK);
485
0
      break;
486
0
#ifdef HAVE_SELECT
487
0
    case POLL_SELECT:
488
0
      FD_SET(fd, &h->master_set);
489
0
      if (h->max_fd_select<fd) h->max_fd_select=fd;
490
0
      break;
491
0
#endif
492
0
#ifdef HAVE_SIGIO_RT
493
0
    case POLL_SIGIO_RT:
494
      /* re-set O_ASYNC might be needed, if not done from
495
       * io_watch_del (or if somebody wants to add a fd which has
496
       * already O_ASYNC/F_SETSIG set on a dupplicate)
497
       */
498
      /* set async & signal */
499
0
      if (fcntl(fd, F_SETOWN, my_pid())==-1){
500
0
        LM_ERR("[%s] fcntl: SETOWN"
501
0
        " failed: %s [%d]\n",h->name, strerror(errno), errno);
502
0
        goto error;
503
0
      }
504
0
      if (fcntl(fd, F_SETSIG, h->signo)==-1){
505
0
        LM_ERR("[%s] fcntl: SETSIG"
506
0
          " failed: %s [%d]\n",h->name, strerror(errno), errno);
507
0
        goto error;
508
0
      }
509
      /* set both non-blocking and async */
510
0
      set_fd_flags(O_ASYNC| O_NONBLOCK);
511
#ifdef EXTRA_DEBUG
512
      LM_DBG("[%s] sigio_rt on f %d, signal %d to pid %d\n",
513
          h->name,fd,  h->signo, my_pid());
514
#endif
515
      /* empty socket receive buffer, if buffer is already full
516
       * no more space to put packets
517
       * => no more signals are ever generated
518
       * also when moving fds, the freshly moved fd might have
519
       *  already some bytes queued, we want to get them now
520
       *  and not later -- andrei */
521
      //idx=h->fd_no;  FIXME
522
      //check_io=1;
523
0
      break;
524
0
#endif
525
0
#ifdef HAVE_EPOLL
526
0
    case POLL_EPOLL:
527
0
      ep_event.data.ptr=e;
528
0
      ep_event.events=0;
529
0
      if (e->flags & IO_WATCH_READ)
530
0
        ep_event.events|=EPOLLIN;
531
0
      if (e->flags & IO_WATCH_WRITE)
532
0
        ep_event.events|=EPOLLOUT;
533
0
      if (!already) {
534
0
again1:
535
#if 0
536
/* This is currently broken, because when using EPOLLEXCLUSIVE, the OS will
537
 * send sequential events to the same process - thus our pseudo-dispatcher
538
 * will no longer work, since events on a pipe will be queued by a single
539
 * process. - razvanc
540
 */
541
#if (defined __OS_linux) && (__GLIBC__ >= 2) && (__GLIBC_MINOR__ >= 24)
542
        if (e->flags & IO_WATCH_READ)
543
          ep_event.events|=EPOLLEXCLUSIVE;
544
#endif
545
#endif
546
0
        n=epoll_ctl(h->epfd, EPOLL_CTL_ADD, fd, &ep_event);
547
0
        if (n==-1){
548
0
          if (errno==EAGAIN) goto again1;
549
0
          LM_ERR("[%s] epoll_ctl ADD failed: %s [%d]\n",
550
0
            h->name,strerror(errno), errno);
551
0
          goto error;
552
0
        }
553
0
      } else {
554
0
again11:
555
0
        n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event);
556
0
        if (n==-1){
557
0
          if (errno==EAGAIN) goto again11;
558
0
          LM_ERR("[%s] epoll_ctl MOD failed: %s [%d]\n",
559
0
            h->name,strerror(errno), errno);
560
0
          goto error;
561
0
        }
562
0
      }
563
0
      break;
564
0
#endif
565
#ifdef HAVE_KQUEUE
566
    case POLL_KQUEUE:
567
      if (kq_ev_change(h, fd, EVFILT_READ, EV_ADD, e)==-1)
568
        goto error;
569
      break;
570
#endif
571
#ifdef HAVE_DEVPOLL
572
    case POLL_DEVPOLL:
573
      pfd.fd=fd;
574
      pfd.events=POLLIN;
575
      pfd.revents=0;
576
again_devpoll:
577
      if (write(h->dpoll_fd, &pfd, sizeof(pfd))==-1){
578
        if (errno==EAGAIN) goto again_devpoll;
579
        LM_ERR("[%s] /dev/poll write failed:"
580
          "%s [%d]\n",h->name, strerror(errno), errno);
581
        goto error;
582
      }
583
      break;
584
#endif
585
586
0
    default:
587
0
      LM_CRIT("[%s] no support for poll method "
588
0
        " %s (%d)\n",h->name, poll_method_str[h->poll_method],
589
0
        h->poll_method);
590
0
      goto error;
591
0
  }
592
593
0
  if (!already) {
594
0
    fd_array_setup;
595
0
  }
596
597
#if 0 //defined(HAVE_SIGIO_RT) || defined (HAVE_EPOLL) FIXME !!!
598
  if (check_io){
599
    /* handle possible pre-existing events */
600
    pf.fd=fd;
601
    pf.events=POLLIN;
602
check_io_again:
603
    while( ((n=poll(&pf, 1, 0))>0) && (handle_io(e, idx,IO_WATCH_READ)>0));
604
    if (n==-1){
605
      if (errno==EINTR) goto check_io_again;
606
      LM_ERR("check_io poll: %s [%d]\n",
607
            strerror(errno), errno);
608
    }
609
  }
610
#endif
611
  //fd_array_print;
612
0
  check_io_data();
613
0
  if (check_error) {
614
0
    LM_CRIT("[%s] check failed after successful fd add "
615
0
      "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name,
616
0
      fd, type, data, flags, already);
617
0
  }
618
0
  return 0;
619
0
error:
620
0
  if (e) unhash_fd_map(e,0,flags,already);
621
0
error0:
622
0
  check_io_data();
623
0
  if (check_error) {
624
0
    LM_CRIT("[%s] check failed after failed fd add "
625
0
      "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name,
626
0
      fd, type, data, flags, already);
627
0
  }
628
0
  return -1;
629
0
#undef fd_array_setup
630
0
#undef set_fd_flags
631
0
}
Unexecuted instantiation: net_tcp_proc.c:io_watch_add
Unexecuted instantiation: net_tcp.c:io_watch_add
Unexecuted instantiation: tcp_common.c:io_watch_add
Unexecuted instantiation: net_udp.c:io_watch_add
Unexecuted instantiation: async.c:io_watch_add
Unexecuted instantiation: timer.c:io_watch_add
Unexecuted instantiation: reactor.c:io_watch_add
Unexecuted instantiation: io_wait.c:io_watch_add
Unexecuted instantiation: cfg_reload.c:io_watch_add
632
633
634
635
/*!
636
 * \brief
637
 * \param h handler
638
 * \param fd file descriptor
639
 * \param idx index in the fd_array if known, -1 if not
640
 *                    (if index==-1 fd_array will be searched for the
641
 *                     corresponding fd* entry -- slower but unavoidable in
642
 *                     some cases). index is not used (no fd_array) for epoll,
643
 *                     /dev/poll and kqueue
644
 * \param flags optimization flags, e.g. IO_FD_CLOSING, the fd was or will
645
 *                    shortly be closed, in some cases we can avoid extra
646
 *                    remove operations (e.g.: epoll, kqueue, sigio)
647
 * \return 0 if ok, -1 on error
648
 */
649
inline static int io_watch_del(io_wait_h* h, int fd, int idx,
650
          int flags,int sock_flags)
651
0
{
652
0
#define fix_fd_array \
653
0
  do{\
654
0
      if (idx==-1){ \
655
        /* fix idx if -1 and needed */ \
656
0
        for (idx=0; (idx<h->fd_no) && \
657
0
              (h->fd_array[idx].fd!=fd); idx++); \
658
0
      } \
659
0
      rla_log("fixing: final idx=%d out of %d, erase=%d\n",idx,idx<h->fd_no,erase); \
660
0
      if (idx<h->fd_no){ \
661
0
        if (erase) { \
662
0
          memmove(&h->fd_array[idx], &h->fd_array[idx+1], \
663
0
            (h->fd_no-(idx+1))*sizeof(*(h->fd_array))); \
664
0
          for( i=0 ; i<h->max_prio && h->prio_idx[i]<=idx ; i++ ); \
665
0
          for( ; i<h->max_prio ; i++ ) h->prio_idx[i]-- ; \
666
0
          h->fd_no--; \
667
0
        } else { \
668
0
          h->fd_array[idx].events = 0; \
669
0
          if (e->flags & IO_WATCH_READ) \
670
0
            h->fd_array[idx].events|=POLLIN; /* useless for select */ \
671
0
          if (flags & IO_WATCH_WRITE) \
672
0
            h->fd_array[idx].events|=POLLOUT; /* useless for select */ \
673
0
          h->fd_array[idx].revents = 0; \
674
0
        } \
675
0
      } \
676
0
  }while(0)
677
678
0
  struct fd_map* e;
679
0
#ifdef HAVE_EPOLL
680
0
  int n;
681
0
  struct epoll_event ep_event;
682
0
#endif
683
#ifdef HAVE_DEVPOLL
684
  struct pollfd pfd;
685
#endif
686
0
#ifdef HAVE_SIGIO_RT
687
0
  int fd_flags;
688
0
#endif
689
0
  int erase = 0;
690
0
  int check_error;
691
0
  int i;
692
693
0
  #define x_RL_NO  10
694
0
  #define x_RL_LEN 200
695
0
  static char rla[x_RL_NO][x_RL_LEN];
696
0
  int rla_idx=0;
697
0
  #define rla_log( _fmt, args...) \
698
0
    snprintf( rla[rla_idx++], x_RL_LEN, _fmt, ## args)
699
0
  #define rla_dump() \
700
0
    do { \
701
0
      int w; \
702
0
      for(w=0;w<rla_idx;w++) \
703
0
        LM_CRIT("[%d]-> [%s]",w,rla[w]); \
704
0
    } while(0)
705
706
0
  if ((fd<0) || (fd>=h->max_fd_no)){
707
0
    LM_CRIT("[%s] invalid fd %d, not in [0, %d)\n", h->name, fd, h->fd_no);
708
0
    goto error0;
709
0
  }
710
0
  LM_DBG("[%s] io_watch_del op on index %d %d (%p, %d, %d, 0x%x,0x%x) "
711
0
    "fd_no=%d called\n", h->name,idx,fd, h, fd, idx, flags,
712
0
    sock_flags,h->fd_no);
713
0
  rla_log("[%s] io_watch_del op on index %d %d (%p, %d, %d, 0x%x,0x%x) "
714
0
    "fd_no=%d called\n", h->name,idx,fd, h, fd, idx, flags,
715
0
    sock_flags,h->fd_no);
716
  //fd_array_print;
717
718
0
  e=get_fd_map(h, fd);
719
  /* more sanity checks */
720
0
  if (e==0){
721
0
    LM_CRIT("[%s] no corresponding hash entry for %d\n",h->name, fd);
722
0
    goto error0;
723
0
  }
724
0
  if (e->type==0 /*F_NONE*/){
725
0
    LM_ERR("[%s] trying to delete already erased"
726
0
        " entry %d in the hash(%d, %d, %p) )\n",
727
0
        h->name,fd, e->fd, e->type, e->data);
728
0
    goto error0;
729
0
  }
730
731
0
  if (idx != -1) {
732
0
    if (!(idx>=0 && idx<h->fd_no)) {
733
0
      LM_CRIT("[%s] FD index check failed, idx=%d, max=%d"
734
0
        " operating on %d\n",h->name, idx, h->fd_no, fd );
735
0
      log_backtrace();
736
0
      rla_dump();
737
0
      idx = -1;
738
0
    } else if (h->fd_array[idx].fd!=fd) {
739
0
      LM_CRIT("[%s] FD consistency check failed, idx=%d points to fd=%d,"
740
0
        " but operating on %d\n",h->name, idx, h->fd_array[idx].fd, fd );
741
0
      log_backtrace();
742
0
      rla_dump();
743
0
      idx = -1;
744
0
    }
745
0
  }
746
747
0
  if ((e->flags & sock_flags) == 0) {
748
0
    LM_ERR("BUG - [%s] trying to del fd %d with flags %d %d\n",
749
0
      h->name, fd, e->flags,sock_flags);
750
0
    goto error0;
751
0
  }
752
753
0
  unhash_fd_map2(e,flags,sock_flags,erase);
754
755
0
  switch(h->poll_method){
756
0
    case POLL_POLL:
757
0
      break;
758
0
#ifdef HAVE_SELECT
759
0
    case POLL_SELECT:
760
0
      FD_CLR(fd, &h->master_set);
761
0
      if (h->max_fd_select && (h->max_fd_select==fd))
762
        /* we don't know the prev. max, so we just decrement it */
763
0
        h->max_fd_select--;
764
0
      break;
765
0
#endif
766
0
#ifdef HAVE_SIGIO_RT
767
0
    case POLL_SIGIO_RT:
768
      /* the O_ASYNC flag must be reset all the time, the fd
769
       *  can be changed only if  O_ASYNC is reset (if not and
770
       *  the fd is a duplicate, you will get signals from the dup. fd
771
       *  and not from the original, even if the dup. fd was closed
772
       *  and the signals re-set on the original) -- andrei
773
       */
774
      /*if (!(flags & IO_FD_CLOSING)){*/
775
        /* reset ASYNC */
776
0
        fd_flags=fcntl(fd, F_GETFL);
777
0
        if (fd_flags==-1){
778
0
          LM_ERR("[%s] fcntl: GETFL failed:"
779
0
            " %s [%d]\n",h->name, strerror(errno), errno);
780
0
          goto error;
781
0
        }
782
0
        if (fcntl(fd, F_SETFL, fd_flags&(~O_ASYNC))==-1){
783
0
          LM_ERR("[%s] fcntl: SETFL"
784
0
            " failed: %s [%d]\n",h->name, strerror(errno), errno);
785
0
          goto error;
786
0
        }
787
0
      break;
788
0
#endif
789
0
#ifdef HAVE_EPOLL
790
0
    case POLL_EPOLL:
791
      /* epoll doesn't seem to automatically remove sockets,
792
       * if the socket is a dupplicate/moved and the original
793
       * is still open. The fd is removed from the epoll set
794
       * only when the original (and all the  copies?) is/are
795
       * closed. This is probably a bug in epoll. --andrei */
796
#ifdef EPOLL_NO_CLOSE_BUG
797
      if (!(flags & IO_FD_CLOSING)){
798
#endif
799
0
        if (erase) {
800
0
          n=epoll_ctl(h->epfd, EPOLL_CTL_DEL, fd, &ep_event);
801
          /*
802
           * in some cases (fds managed by external libraries),
803
           * the fd may have already been closed
804
           */
805
0
          if (n==-1 && errno != EBADF && errno != ENOENT) {
806
0
            LM_ERR("[%s] removing fd from epoll (%d from %d) "
807
0
              "list failed: %s [%d]\n",h->name, fd, h->epfd,
808
0
              strerror(errno), errno);
809
0
            goto error;
810
0
          }
811
0
        } else {
812
0
          ep_event.data.ptr=e;
813
0
          ep_event.events=0;
814
0
          if (e->flags & IO_WATCH_READ)
815
0
            ep_event.events|=EPOLLIN;
816
0
          if (e->flags & IO_WATCH_WRITE)
817
0
            ep_event.events|=EPOLLOUT;
818
0
          n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event);
819
0
          if (n==-1){
820
0
            LM_ERR("[%s] epoll_ctl failed: %s [%d]\n",
821
0
              h->name,strerror(errno), errno);
822
0
            goto error;
823
0
          }
824
0
        }
825
#ifdef EPOLL_NO_CLOSE_BUG
826
      }
827
#endif
828
0
      break;
829
0
#endif
830
#ifdef HAVE_KQUEUE
831
    case POLL_KQUEUE:
832
      if (!(flags & IO_FD_CLOSING)){
833
        if (kq_ev_change(h, fd, EVFILT_READ, EV_DELETE, 0)==-1)
834
          goto error;
835
      }
836
      break;
837
#endif
838
#ifdef HAVE_DEVPOLL
839
    case POLL_DEVPOLL:
840
        /* for /dev/poll the closed fds _must_ be removed
841
           (they are not removed automatically on close()) */
842
        pfd.fd=fd;
843
        pfd.events=POLLREMOVE;
844
        pfd.revents=0;
845
again_devpoll:
846
        if (write(h->dpoll_fd, &pfd, sizeof(pfd))==-1){
847
          if (errno==EINTR) goto again_devpoll;
848
          LM_ERR("[%s] removing fd from /dev/poll failed: "
849
            "%s [%d]\n",h->name, strerror(errno), errno);
850
          goto error;
851
        }
852
        break;
853
#endif
854
0
    default:
855
0
      LM_CRIT("[%s] no support for poll method %s (%d)\n",
856
0
        h->name,poll_method_str[h->poll_method], h->poll_method);
857
0
      goto error;
858
0
  }
859
0
  rla_log("fixing fd array, idx=%d\n",idx); \
860
0
861
0
  fix_fd_array;
862
  //fd_array_print;
863
864
0
  check_io_data();
865
0
  if (check_error) {
866
0
    LM_CRIT("[%s] check failed after successful fd del "
867
0
      "(fd=%d,flags=%d, sflags=%d) over map "
868
0
      "(fd=%d,type=%d,data=%p,flags=%d) erase=%d\n",h->name,
869
0
      fd, flags, sock_flags,
870
0
      e->fd, e->type, e->data, e->flags,
871
0
      erase);
872
0
    rla_dump();
873
0
  }
874
875
0
  return 0;
876
0
error:
877
  /*
878
   * although the DEL operation failed, both
879
   * "fd_hash" and "fd_array" must remain consistent
880
   */
881
0
  fix_fd_array;
882
883
0
  check_io_data();
884
0
  if (check_error) {
885
0
    LM_CRIT("[%s] check failed after failed fd del "
886
0
      "(fd=%d,flags=%d, sflags=%d) over map "
887
0
      "(fd=%d,type=%d,data=%p,flags=%d) erase=%d\n",h->name,
888
0
      fd, flags, sock_flags,
889
0
      e->fd, e->type, e->data, e->flags,
890
0
      erase);
891
0
    rla_dump();
892
0
  }
893
0
error0:
894
895
0
  return -1;
896
0
#undef fix_fd_array
897
0
}
Unexecuted instantiation: net_tcp_proc.c:io_watch_del
Unexecuted instantiation: net_tcp.c:io_watch_del
Unexecuted instantiation: tcp_common.c:io_watch_del
Unexecuted instantiation: net_udp.c:io_watch_del
Unexecuted instantiation: async.c:io_watch_del
Unexecuted instantiation: timer.c:io_watch_del
Unexecuted instantiation: reactor.c:io_watch_del
Unexecuted instantiation: io_wait.c:io_watch_del
Unexecuted instantiation: cfg_reload.c:io_watch_del
898
899
900
/* init */
901
902
903
/*! \brief initializes the static vars/arrays
904
 * \param h pointer to the io_wait_h that will be initialized
905
 * \param max_fd maximum allowed fd number
906
 * \param poll_method poll method (0 for automatic best fit)
907
 */
908
int init_io_wait(io_wait_h* h, char *name, int max_fd,
909
                enum poll_types poll_method, int max_prio);
910
911
/*! \brief destroys everything init_io_wait allocated */
912
void destroy_io_wait(io_wait_h* h);
913
914
int io_set_app_flag( io_wait_h *h , int type, int app_flag);
915
916
int io_check_app_flag( io_wait_h *h , int app_flag);
917
918
919
#endif