Coverage Report

Created: 2025-08-26 06:38

/src/opensips/io_wait_loop.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2014-2015 OpenSIPS Solutions
3
 * Copyright (C) 2005 iptelorg GmbH
4
 *
5
 * This file is part of opensips, a free SIP server.
6
 *
7
 * opensips is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version
11
 *
12
 * opensips is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
20
 *
21
 * History:
22
 * --------
23
 *  2014-08-25  split from io_wait.h (bogdan)
24
 */
25
26
/*!
27
 * \file
28
 * \brief io wait looping and triggering functions
29
 */
30
31
32
#ifndef _io_wait_loop_h
33
#define _io_wait_loop_h
34
35
#include "io_wait.h"
36
37
38
#ifdef HANDLE_IO_INLINE
39
/*!\brief generic handle io routine
40
 * this must be defined in the including file
41
 * (faster then registering a callback pointer)
42
 *
43
 * \param fm pointer to a fd hash entry
44
 * \param idx index in the fd_array (or -1 if not known)
45
 * \return return: -1 on error
46
 *          0 on EAGAIN or when by some other way it is known that no more
47
 *            io events are queued on the fd (the receive buffer is empty).
48
 *            Usefull to detect when there are no more io events queued for
49
 *            sigio_rt, epoll_et, kqueue.
50
 *         >0 on successful read from the fd (when there might be more io
51
 *            queued -- the receive buffer might still be non-empty)
52
 */
53
inline static int handle_io(struct fd_map* fm, int idx,int event_type);
54
#else
55
static int handle_io(struct fd_map* fm, int idx,int event_type) {
56
  return 0;
57
}
58
#endif
59
60
61
62
/*! \brief io_wait_loop_x style function
63
 * wait for io using poll()
64
 * \param h io_wait handle
65
 * \param t timeout in s
66
 * \param repeat if !=0 handle_io will be called until it returns <=0
67
 * \return number of IO events handled on success (can be 0), -1 on error
68
 */
69
inline static int io_wait_loop_poll(io_wait_h* h, int t, int repeat)
70
0
{
71
0
  int n, r;
72
0
  int ret;
73
0
  struct fd_map *e;
74
0
  unsigned int curr_time;
75
76
0
again:
77
0
    ret=n=poll(h->fd_array, h->fd_no, t*1000);
78
0
    if (n==-1){
79
0
      if (errno==EINTR) goto again; /* signal, ignore it */
80
0
      else{
81
0
        LM_ERR("[%s] poll: %s [%d]\n",h->name, strerror(errno), errno);
82
0
        goto error;
83
0
      }
84
0
    }
85
86
0
    curr_time = get_ticks();
87
88
0
    for (r=h->fd_no-1; (r>=0) ; r--){
89
0
      if (h->fd_array[r].revents & POLLOUT) {
90
        /* sanity checks */
91
0
        if ((h->fd_array[r].fd >= h->max_fd_no)||
92
0
            (h->fd_array[r].fd < 0)){
93
0
          LM_CRIT("[%s] bad fd %d (no in the 0 - %d range)\n",
94
0
            h->name, h->fd_array[r].fd, h->max_fd_no);
95
          /* try to continue anyway */
96
0
          h->fd_array[r].events=0; /* clear the events */
97
0
          continue;
98
0
        }
99
0
        handle_io(get_fd_map(h, h->fd_array[r].fd),r,IO_WATCH_WRITE);
100
0
      } else if (h->fd_array[r].revents & (POLLIN|POLLERR|POLLHUP)){
101
        /* sanity checks */
102
0
        if ((h->fd_array[r].fd >= h->max_fd_no)||
103
0
            (h->fd_array[r].fd < 0)){
104
0
          LM_CRIT("[%s] bad fd %d (no in the 0 - %d range)\n",
105
0
            h->name,h->fd_array[r].fd, h->max_fd_no);
106
          /* try to continue anyway */
107
0
          h->fd_array[r].events=0; /* clear the events */
108
0
          continue;
109
0
        }
110
111
0
        while((handle_io(get_fd_map(h, h->fd_array[r].fd), r,
112
0
        IO_WATCH_READ) > 0)
113
0
             && repeat);
114
0
      } else if ( (e=get_fd_map(h, h->fd_array[r].fd))!=NULL &&
115
0
      e->timeout!=0 && e->timeout<=curr_time ) {
116
0
        e->timeout = 0;
117
0
        handle_io( e, r, IO_WATCH_TIMEOUT);
118
0
      }
119
0
    }
120
0
error:
121
0
  return ret;
122
0
}
Unexecuted instantiation: net_tcp_proc.c:io_wait_loop_poll
Unexecuted instantiation: net_tcp.c:io_wait_loop_poll
Unexecuted instantiation: net_udp.c:io_wait_loop_poll
Unexecuted instantiation: timer.c:io_wait_loop_poll
123
124
125
126
#ifdef HAVE_SELECT
127
/*! \brief wait for io using select */
128
inline static int io_wait_loop_select(io_wait_h* h, int t, int repeat)
129
0
{
130
0
  fd_set sel_set;
131
0
  int n, ret;
132
0
  struct timeval timeout;
133
0
  int r;
134
0
  struct fd_map *e;
135
0
  unsigned int curr_time;
136
137
0
again:
138
0
    sel_set=h->master_set;
139
0
    timeout.tv_sec=t;
140
0
    timeout.tv_usec=0;
141
0
    ret=n=select(h->max_fd_select+1, &sel_set, 0, 0, &timeout);
142
0
    if (n<0){
143
0
      if (errno==EINTR) goto again; /* just a signal */
144
0
      LM_ERR("[%s] select: %s [%d]\n",h->name, strerror(errno), errno);
145
0
      n=0;
146
      /* continue */
147
0
    }
148
149
0
    curr_time = get_ticks();
150
151
    /* use poll fd array */
152
0
    for(r=h->fd_no-1; (r>=0) ; r--){
153
0
      if (FD_ISSET(h->fd_array[r].fd, &sel_set)){
154
0
        while( (handle_io( get_fd_map(h, h->fd_array[r].fd), r,
155
0
        IO_WATCH_READ)>0) && repeat );
156
0
      } else if ( (e=get_fd_map(h, h->fd_array[r].fd))!=NULL &&
157
0
      e->timeout!=0 && e->timeout<=curr_time ) {
158
0
        e->timeout = 0;
159
0
        handle_io( e, r, IO_WATCH_TIMEOUT);
160
0
      }
161
0
    };
162
0
  return ret;
163
0
}
Unexecuted instantiation: net_tcp_proc.c:io_wait_loop_select
Unexecuted instantiation: net_tcp.c:io_wait_loop_select
Unexecuted instantiation: net_udp.c:io_wait_loop_select
Unexecuted instantiation: timer.c:io_wait_loop_select
164
#endif
165
166
167
168
#ifdef HAVE_EPOLL
169
inline static int io_wait_loop_epoll(io_wait_h* h, int t, int repeat)
170
0
{
171
0
  int ret, n, r, i;
172
0
  struct fd_map *e;
173
0
  struct epoll_event ep_event;
174
0
  int fd;
175
0
  unsigned int curr_time;
176
177
0
again:
178
0
    ret=n=epoll_wait(h->epfd, h->ep_array, h->fd_no, t*1000);
179
0
    if (n==-1){
180
0
      if (errno == EINTR) {
181
0
        goto again; /* signal, ignore it */
182
0
      } else if (h->fd_no == 0) {
183
0
        sleep(t);
184
0
        return 0;
185
0
      } else {
186
0
        LM_ERR("[%s] epoll_wait(%d, %p, %d, %d): %s [%d]\n",
187
0
          h->name,h->epfd, h->ep_array, h->fd_no, t*1000,
188
0
          strerror(errno), errno);
189
0
        goto error;
190
0
      }
191
0
    }
192
193
0
    curr_time = get_ticks();
194
195
0
    for (r=0; r<n; r++) {
196
#if 0
197
      LM_NOTICE("[%s] triggering  fd %d, events %d, flags %d\n",
198
        h->name, ((struct fd_map*)h->ep_array[r].data.ptr)->fd,
199
        h->ep_array[r].events,
200
        ((struct fd_map*)h->ep_array[r].data.ptr)->flags);
201
#endif
202
      /* do some sanity check over the triggered fd */
203
0
      e = ((struct fd_map*)h->ep_array[r].data.ptr);
204
0
      if (e->type==0 || e->fd<=0 ||
205
0
      (e->flags&(IO_WATCH_READ|IO_WATCH_WRITE))==0 ) {
206
0
        fd = e - h->fd_hash;
207
0
        LM_ERR("[%s] unset/bogus map (idx=%d) triggered for %d by "
208
0
          "epoll (fd=%d,type=%d,flags=%x,data=%p) -> removing "
209
0
          "from epoll\n", h->name,
210
0
          fd, h->ep_array[r].events,
211
0
          e->fd, e->type, e->flags, e->data);
212
        /* as the triggering fd has no corresponding in fd_map, better
213
         * remove it from poll, to avoid un-managed reporting 
214
         * on this fd */
215
0
        if (epoll_ctl(h->epfd, EPOLL_CTL_DEL, fd, &ep_event)<0) {
216
0
          LM_ERR("failed to remove from epoll %s(%d)\n",
217
0
            strerror(errno), errno);
218
0
        }
219
0
        close(fd);
220
0
        continue;
221
0
      }
222
223
      /* anything containing EPOLLIN (like HUP or ERR) goes as a READ */
224
0
      if (h->ep_array[r].events & EPOLLIN) {
225
0
        if ((e->flags&IO_WATCH_READ)==0) {
226
0
          LM_BUG("[%s] EPOLLIN triggered(%d) for non-read fd_map "
227
0
            "(fd=%d,type=%d,flags=%x,data=%p)\n",h->name,
228
0
            h->ep_array[r].events,
229
0
            e->fd, e->type, e->flags, e->data);
230
0
        }
231
0
        if (h->ep_array[r].events&EPOLLHUP) {
232
0
          LM_DBG("[%s] EPOLLHUP on IN ->"
233
0
            "connection closed by the remote peer!\n",h->name);
234
0
        }
235
236
0
        ((struct fd_map*)h->ep_array[r].data.ptr)->flags |=
237
0
          IO_WATCH_PRV_TRIG_READ;
238
239
      /* anything containing EPOLLOUT (like HUP or ERR) goes as a WRITE*/
240
0
      } else if (h->ep_array[r].events & EPOLLOUT){
241
0
        if ((e->flags&IO_WATCH_WRITE)==0) {
242
0
          LM_BUG("[%s] EPOLLOUT triggered(%d) for non-read fd_map "
243
0
            "(fd=%d,type=%d,flags=%x,data=%p)\n",h->name,
244
0
            h->ep_array[r].events,
245
0
            e->fd, e->type, e->flags, e->data);
246
0
        }
247
0
        if (h->ep_array[r].events&EPOLLHUP) {
248
0
          LM_DBG("[%s] EPOLLHUP on OUT ->"
249
0
            "connection closed by the remote peer!\n",h->name);
250
0
        }
251
252
0
        ((struct fd_map*)h->ep_array[r].data.ptr)->flags |=
253
0
          IO_WATCH_PRV_TRIG_WRITE;
254
255
      /* ERR or HUP without IN or OUT triggering ?? */
256
0
      } else if (h->ep_array[r].events & (EPOLLERR|EPOLLHUP) ) {
257
0
        LM_DBG("[%s] non-op event %x, using flags %x\n",h->name,
258
0
          h->ep_array[r].events,
259
0
          ((struct fd_map*)h->ep_array[r].data.ptr)->flags);
260
261
        /* as the epoll did not provide any info on IN/OUT
262
         * we look back the IO flags we set */
263
0
        if ( ((struct fd_map*)h->ep_array[r].data.ptr)->flags & IO_WATCH_WRITE )
264
0
          ((struct fd_map*)h->ep_array[r].data.ptr)->flags |=
265
0
            IO_WATCH_PRV_TRIG_WRITE;
266
0
        else
267
0
          ((struct fd_map*)h->ep_array[r].data.ptr)->flags |=
268
0
            IO_WATCH_PRV_TRIG_READ;
269
270
0
      } else {
271
0
        LM_ERR("[%s] unexpected event %x on %d/%d, data=%p\n",
272
0
          h->name,h->ep_array[r].events, r+1, n,
273
0
          h->ep_array[r].data.ptr);
274
0
      }
275
0
    }
276
    /* now do the actual running of IO handlers */
277
0
    for(r=h->fd_no-1; (r>=0) ; r--) {
278
0
      e = get_fd_map(h, h->fd_array[r].fd);
279
      /* test the sanity of the fd_map */
280
0
      if (e->flags & (IO_WATCH_PRV_TRIG_READ|IO_WATCH_PRV_TRIG_WRITE)) {
281
        /* the fd correlated to this fd_map was triggered by the
282
         * reactor, so let's check if the fd_map payload is still
283
         * valid */
284
0
        if (e->fd==-1 || e->type==F_NONE) {
285
          /* this is bogus!! */
286
0
          LM_BUG("[%s] FD %d with map (%d,%d,%p) is out of sync,"
287
0
            " removing it from reactor\n",
288
0
            h->name, h->fd_array[r].fd, e->fd, e->type, e->data);
289
          /* remove from epoll */
290
0
          epoll_ctl(h->epfd, EPOLL_CTL_DEL, h->fd_array[r].fd,
291
0
            &ep_event);
292
0
          close(h->fd_array[r].fd);
293
          /* remove from fd_array */
294
0
          memmove(&h->fd_array[r], &h->fd_array[r+1],
295
0
            (h->fd_no-(r+1))*sizeof(*(h->fd_array)));
296
0
          for( i=0 ; i<h->max_prio && h->prio_idx[i]<=r ; i++ );
297
0
          for( ; i<h->max_prio ; i++ ) h->prio_idx[i]-- ;
298
0
          h->fd_no--;
299
          /* no handler triggering for this FD */
300
0
          continue;
301
0
        }
302
0
      }
303
0
      if ( e->flags & IO_WATCH_PRV_TRIG_READ ) {
304
0
        e->flags &= ~IO_WATCH_PRV_TRIG_READ;
305
0
        while((handle_io( e, r, IO_WATCH_READ)>0) && repeat);
306
0
      } else if ( e->flags & IO_WATCH_PRV_TRIG_WRITE ){
307
0
        e->flags &= ~IO_WATCH_PRV_TRIG_WRITE;
308
0
        handle_io( e, r, IO_WATCH_WRITE);
309
0
      } else if ( e->timeout!=0 && e->timeout<=curr_time ) {
310
0
        e->timeout = 0;
311
0
        handle_io( e, r, IO_WATCH_TIMEOUT);
312
0
      }
313
0
    }
314
315
0
error:
316
0
  return ret;
317
0
}
Unexecuted instantiation: net_tcp_proc.c:io_wait_loop_epoll
Unexecuted instantiation: net_tcp.c:io_wait_loop_epoll
Unexecuted instantiation: net_udp.c:io_wait_loop_epoll
Unexecuted instantiation: timer.c:io_wait_loop_epoll
318
#endif
319
320
321
322
#ifdef HAVE_KQUEUE
323
inline static int io_wait_loop_kqueue(io_wait_h* h, int t, int repeat)
324
{
325
  int ret, n, r;
326
  struct timespec tspec;
327
  struct fd_map *e;
328
  unsigned int curr_time;
329
330
  tspec.tv_sec=t;
331
  tspec.tv_nsec=0;
332
again:
333
    ret=n=kevent(h->kq_fd, h->kq_changes, h->kq_nchanges,  h->kq_array,
334
          h->fd_no, &tspec);
335
    if (n==-1){
336
      if (errno==EINTR) goto again; /* signal, ignore it */
337
      else{
338
        LM_ERR("[%s] kevent: %s [%d]\n", h->name,
339
          strerror(errno), errno);
340
        goto error;
341
      }
342
    }
343
344
    curr_time = get_ticks();
345
346
    h->kq_nchanges=0; /* reset changes array */
347
    for (r=0; r<n; r++){
348
#ifdef EXTRA_DEBUG
349
      LM_DBG("[%s] event %d/%d: fd=%d, udata=%lx, flags=0x%x\n",
350
        h->name, r, n, h->kq_array[r].ident,
351
        (long)h->kq_array[r].udata,
352
        h->kq_array[r].flags);
353
#endif
354
      if (h->kq_array[r].flags & EV_ERROR){
355
        /* error in changes: we ignore it, it can be caused by
356
           trying to remove an already closed fd: race between
357
           adding smething to the changes array, close() and
358
           applying the changes */
359
        LM_INFO("[%s] kevent error on fd %u: %s [%ld]\n",
360
          h->name, (unsigned int)h->kq_array[r].ident,
361
          strerror(h->kq_array[r].data),
362
          (long)h->kq_array[r].data);
363
      }else /* READ/EOF */
364
        ((struct fd_map*)h->kq_array[r].udata)->flags |=
365
          IO_WATCH_PRV_TRIG_READ;
366
    }
367
    /* now do the actual running of IO handlers */
368
    for(r=h->fd_no-1; (r>=0) && n ; r--) {
369
      e = get_fd_map(h, h->fd_array[r].fd);
370
      if ( e->flags & IO_WATCH_PRV_TRIG_READ ) {
371
        e->flags &= ~IO_WATCH_PRV_TRIG_READ;
372
        while((handle_io( e, r, IO_WATCH_READ)>0) && repeat);
373
        n--;
374
      } else if ( e->timeout!=0 && e->timeout<=curr_time ) {
375
        e->timeout = 0;
376
        handle_io( e, r, IO_WATCH_TIMEOUT);
377
      }
378
    }
379
380
error:
381
  return ret;
382
}
383
#endif
384
385
386
387
#ifdef HAVE_SIGIO_RT
388
/*! \brief sigio rt version has no repeat (it doesn't make sense)*/
389
inline static int io_wait_loop_sigio_rt(io_wait_h* h, int t)
390
0
{
391
0
  int n;
392
0
  int ret;
393
0
  struct timespec ts;
394
0
  siginfo_t siginfo;
395
0
  int sigio_band;
396
0
  int sigio_fd;
397
0
  struct fd_map* fm;
398
399
0
  ret=1; /* 1 event per call normally */
400
0
  ts.tv_sec=t;
401
0
  ts.tv_nsec=0;
402
0
  if (!sigismember(&h->sset, h->signo) || !sigismember(&h->sset, SIGIO)){
403
0
    LM_CRIT("[%s] the signal mask is not properly set!\n",h->name);
404
0
    goto error;
405
0
  }
406
407
0
again:
408
0
  n=sigtimedwait(&h->sset, &siginfo, &ts);
409
0
  if (n==-1){
410
0
    if (errno==EINTR) goto again; /* some other signal, ignore it */
411
0
    else if (errno==EAGAIN){ /* timeout */
412
0
      ret=0;
413
0
      goto end;
414
0
    }else{
415
0
      LM_ERR("[%s] sigtimed_wait %s [%d]\n",h->name,
416
0
        strerror(errno), errno);
417
0
      goto error;
418
0
    }
419
0
  }
420
0
  if (n!=SIGIO){
421
#ifdef SIGINFO64_WORKARROUND
422
    /* on linux siginfo.si_band is defined as long in userspace
423
     * and as int kernel => on 64 bits things will break!
424
     * (si_band will include si_fd, and si_fd will contain
425
     *  garbage)
426
     *  see /usr/src/linux/include/asm-generic/siginfo.h and
427
     *      /usr/include/bits/siginfo.h
428
     * -- andrei */
429
    if (sizeof(siginfo.si_band)>sizeof(int)){
430
      sigio_band=*((int*)&siginfo.si_band);
431
      sigio_fd=*(((int*)&siginfo.si_band)+1);
432
    }else
433
#endif
434
0
    {
435
0
      sigio_band=siginfo.si_band;
436
0
      sigio_fd=siginfo.si_fd;
437
0
    }
438
0
    if (siginfo.si_code==SI_SIGIO){
439
      /* old style, we don't know the event (linux 2.2.?) */
440
0
      LM_WARN("[%s] old style sigio interface\n",h->name);
441
0
      fm=get_fd_map(h, sigio_fd);
442
      /* we can have queued signals generated by fds not watched
443
       * any more, or by fds in transition, to a child => ignore them*/
444
0
      if (fm->type)
445
0
        handle_io(fm, -1,IO_WATCH_READ);
446
0
    }else{
447
#ifdef EXTRA_DEBUG
448
      LM_DBG("[%s] siginfo: signal=%d (%d),"
449
          " si_code=%d, si_band=0x%x,"
450
          " si_fd=%d\n",
451
          h->name,siginfo.si_signo, n, siginfo.si_code,
452
          (unsigned)sigio_band,
453
          sigio_fd);
454
#endif
455
      /* on some errors (e.g. when receving TCP RST), sigio_band will
456
       * be set to 0x08 (undocumented, no corresp. POLL_xx), so better
457
       * catch all events --andrei */
458
0
      if (sigio_band/*&(POLL_IN|POLL_ERR|POLL_HUP)*/){
459
0
        fm=get_fd_map(h, sigio_fd);
460
        /* we can have queued signals generated by fds not watched
461
         * any more, or by fds in transition, to a child
462
         * => ignore them */
463
0
        if (fm->type)
464
0
          handle_io(fm, -1,IO_WATCH_READ);
465
0
        else
466
0
          LM_ERR("[%s] ignoring event"
467
0
            " %x on fd %d (fm->fd=%d, fm->data=%p)\n",
468
0
            h->name,sigio_band, sigio_fd, fm->fd, fm->data);
469
0
      }else{
470
0
        LM_ERR("[%s] unexpected event on fd %d: %x\n",h->name, sigio_fd, sigio_band);
471
0
      }
472
0
    }
473
0
  }else{
474
    /* signal queue overflow
475
     * TODO: increase signal queue size: 2.4x /proc/.., 2.6x -rlimits */
476
0
    LM_WARN("[%s] signal queue overflowed- falling back to poll\n",h->name);
477
    /* clear real-time signal queue
478
     * both SIG_IGN and SIG_DFL are needed , it doesn't work
479
     * only with SIG_DFL  */
480
0
    if (signal(h->signo, SIG_IGN)==SIG_ERR){
481
0
      LM_CRIT("[%s] couldn't reset signal to IGN\n",h->name);
482
0
    }
483
484
0
    if (signal(h->signo, SIG_DFL)==SIG_ERR){
485
0
      LM_CRIT("[%s] couldn't reset signal to DFL\n",h->name);
486
0
    }
487
    /* falling back to normal poll */
488
0
    ret=io_wait_loop_poll(h, -1, 1);
489
0
  }
490
0
end:
491
0
  return ret;
492
0
error:
493
0
  return -1;
494
0
}
Unexecuted instantiation: net_tcp_proc.c:io_wait_loop_sigio_rt
Unexecuted instantiation: net_tcp.c:io_wait_loop_sigio_rt
Unexecuted instantiation: net_udp.c:io_wait_loop_sigio_rt
Unexecuted instantiation: timer.c:io_wait_loop_sigio_rt
495
#endif
496
497
498
499
#ifdef HAVE_DEVPOLL
500
inline static int io_wait_loop_devpoll(io_wait_h* h, int t, int repeat)
501
{
502
  int n, r;
503
  int ret;
504
  struct dvpoll dpoll;
505
  struct fd_map *e;
506
  unsigned int curr_time;
507
508
    dpoll.dp_timeout=t*1000;
509
    dpoll.dp_nfds=h->fd_no;
510
    dpoll.dp_fds=h->dp_changes;
511
again:
512
    ret=n=ioctl(h->dpoll_fd, DP_POLL, &dpoll);
513
    if (n==-1){
514
      if (errno==EINTR) goto again; /* signal, ignore it */
515
      else{
516
        LM_ERR("[%s] ioctl: %s [%d]\n",h->name, strerror(errno), errno);
517
        goto error;
518
      }
519
    }
520
521
    curr_time = get_ticks();
522
523
    for (r=0; r< n; r++){
524
      if (h->dp_changes[r].revents & (POLLNVAL|POLLERR)){
525
        LM_ERR("[%s] pollinval returned for fd %d, revents=%x\n",
526
          h->name,h->fd_array[r].fd, h->fd_array[r].revents);
527
      }
528
      /* POLLIN|POLLHUP just go through */
529
      (get_fd_map(h, h->dp_changes[r].fd))->flags |=
530
        IO_WATCH_PRV_TRIG_READ;
531
    }
532
    /* now do the actual running of IO handlers */
533
    for(r=h->fd_no-1; (r>=0) ; r--) {
534
      e = get_fd_map(h, h->fd_array[r].fd);
535
      if ( e->flags & IO_WATCH_PRV_TRIG_READ ) {
536
        e->flags &= ~IO_WATCH_PRV_TRIG_READ;
537
        while((handle_io( e, r, IO_WATCH_READ)>0) && repeat);
538
      } else if ( e->timeout!=0 && e->timeout<=curr_time ) {
539
        e->timeout = 0;
540
        handle_io( e, r, IO_WATCH_TIMEOUT);
541
      }
542
    }
543
544
error:
545
  return ret;
546
}
547
#endif
548
549
550
#endif