Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2014-2015 OpenSIPS Solutions |
3 | | * Copyright (C) 2005 iptelorg GmbH |
4 | | * |
5 | | * This file is part of opensips, a free SIP server. |
6 | | * |
7 | | * opensips is free software; you can redistribute it and/or modify |
8 | | * it under the terms of the GNU General Public License as published by |
9 | | * the Free Software Foundation; either version 2 of the License, or |
10 | | * (at your option) any later version |
11 | | * |
12 | | * opensips is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | | * GNU General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU General Public License |
18 | | * along with this program; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | | * |
21 | | * History: |
22 | | * -------- |
23 | | * 2005-06-13 created by andrei |
24 | | * 2005-06-26 added kqueue (andrei) |
25 | | * 2005-07-01 added /dev/poll (andrei) |
26 | | * 2014-08-25 looping functions moved to io_wait_loop.h (bogdan) |
27 | | */ |
28 | | |
29 | | /*! |
30 | | * \file |
31 | | * \brief tcp io wait common stuff used by tcp_main.c & tcp_read.c |
32 | | * - \ref TCPiowait |
33 | | */ |
34 | | |
35 | | /*! \page TCPiowait TCP io wait common stuff used by tcp_main.c & tcp_read.c |
36 | | * All the functions are inline because of speed reasons and because they are |
37 | | * used only from 2 places. |
38 | | * You also have to define: |
39 | | * - int handle_io(struct fd_map* fm, int idx) (see below) |
40 | | * (this could be trivially replaced by a callback pointer entry attached |
41 | | * to the io_wait handler if more flexibility rather then performance |
42 | | * is needed) |
43 | | * - fd_type - define to some enum of you choice and define also |
44 | | * FD_TYPE_DEFINED (if you don't do it fd_type will be defined |
45 | | * to int). 0 has a special not set/not init. meaning |
46 | | * (a lot of sanity checks and the sigio_rt code are based on |
47 | | * this assumption) |
48 | | * - local_malloc (defaults to pkg_malloc) |
49 | | * - local_free (defaults to pkg_free) |
50 | | * |
51 | | */ |
52 | | |
53 | | |
54 | | #ifndef _io_wait_h |
55 | | #define _io_wait_h |
56 | | |
57 | | #include <errno.h> |
58 | | #include <string.h> |
59 | | #ifdef HAVE_SIGIO_RT |
60 | | #define __USE_GNU /* or else F_SETSIG won't be included */ |
61 | | #define _GNU_SOURCE /* define this as well */ |
62 | | #include <sys/types.h> /* recv */ |
63 | | #include <sys/socket.h> /* recv */ |
64 | | #include <signal.h> /* sigprocmask, sigwait a.s.o */ |
65 | | #endif |
66 | | #ifdef HAVE_EPOLL |
67 | | #include <sys/epoll.h> |
68 | | #endif |
69 | | #ifdef HAVE_KQUEUE |
70 | | #include <sys/types.h> /* needed on freebsd */ |
71 | | #include <sys/event.h> |
72 | | #include <sys/time.h> |
73 | | #endif |
74 | | #ifdef HAVE_DEVPOLL |
75 | | #include <sys/devpoll.h> |
76 | | #endif |
77 | | #ifdef HAVE_SELECT |
78 | | /* needed on openbsd for select*/ |
79 | | #include <sys/time.h> |
80 | | #include <sys/types.h> |
81 | | #include <unistd.h> |
82 | | /* needed according to POSIX for select*/ |
83 | | #include <sys/select.h> |
84 | | #endif |
85 | | #include <sys/poll.h> |
86 | | #include <fcntl.h> |
87 | | |
88 | | #include "dprint.h" |
89 | | |
90 | | #include "poll_types.h" /* poll_types*/ |
91 | | #include "pt.h" /* mypid() */ |
92 | | #include "error.h" |
93 | | |
94 | | #ifdef __OS_linux |
95 | | #include <features.h> /* for GLIBC version testing */ |
96 | | #endif |
97 | | #ifdef EXTRA_DEBUG |
98 | | #include "lib/dbg/backtrace.h" |
99 | | #endif |
100 | | |
101 | | #ifndef FD_TYPE_DEFINED |
102 | | typedef int fd_type; |
103 | | #define FD_TYPE_DEFINED |
104 | | #endif |
105 | | |
106 | | /*! \brief maps a fd to some other structure; used in almost all cases |
107 | | * except epoll and maybe kqueue or /dev/poll */ |
108 | | struct fd_map { |
109 | | int fd; /* fd no */ |
110 | | fd_type type; /* "data" type */ |
111 | | void* data; /* pointer to the corresponding structure */ |
112 | | int flags; /* so far used to indicate whether we should |
113 | | * read, write or both ; last 4 are reserved for |
114 | | * internal usage */ |
115 | | int app_flags; /* flags to be used by upper layer apps, not by |
116 | | * the reactor */ |
117 | | unsigned int timeout; |
118 | | }; |
119 | | |
120 | | |
121 | | #ifdef HAVE_KQUEUE |
122 | | #ifndef KQ_CHANGES_ARRAY_SIZE |
123 | | #define KQ_CHANGES_ARRAY_SIZE 128 |
124 | | |
125 | | #ifdef __OS_netbsd |
126 | | #define KEV_UDATA_CAST (intptr_t) |
127 | | #else |
128 | | #define KEV_UDATA_CAST |
129 | | #endif |
130 | | |
131 | | #endif |
132 | | #endif |
133 | | |
134 | | |
135 | 0 | #define IO_FD_CLOSING 16 |
136 | | |
137 | | /*! \brief handler structure */ |
138 | | struct io_wait_handler{ |
139 | | char *name; |
140 | | int max_prio; |
141 | | #ifdef HAVE_EPOLL |
142 | | struct epoll_event* ep_array; |
143 | | int epfd; /* epoll ctrl fd */ |
144 | | #endif |
145 | | #ifdef HAVE_SIGIO_RT |
146 | | sigset_t sset; /* signal mask for sigio & sigrtmin */ |
147 | | int signo; /* real time signal used */ |
148 | | #endif |
149 | | #ifdef HAVE_KQUEUE |
150 | | struct kevent* kq_array; /* used for the eventlist*/ |
151 | | struct kevent* kq_changes; /* used for the changelist */ |
152 | | size_t kq_nchanges; |
153 | | size_t kq_changes_size; /* size of the changes array */ |
154 | | int kq_fd; |
155 | | #endif |
156 | | #ifdef HAVE_DEVPOLL |
157 | | int dpoll_fd; |
158 | | struct pollfd* dp_changes; |
159 | | #endif |
160 | | #ifdef HAVE_SELECT |
161 | | fd_set master_set; |
162 | | int max_fd_select; /* maximum select used fd */ |
163 | | #endif |
164 | | /* common stuff for POLL, SIGIO_RT and SELECT |
165 | | * since poll support is always compiled => this will always be compiled */ |
166 | | int *prio_idx; /* size of max_prio - idxs in fd_array where prio changes*/ |
167 | | struct fd_map* fd_hash; |
168 | | struct pollfd* fd_array; |
169 | | int fd_no; /* current index used in fd_array */ |
170 | | int max_fd_no; /* maximum fd no, is also the size of fd_array, |
171 | | fd_hash and ep_array*/ |
172 | | enum poll_types poll_method; |
173 | | int flags; |
174 | | }; |
175 | | |
176 | | typedef struct io_wait_handler io_wait_h; |
177 | | |
178 | | |
179 | | /*! \brief get the corresponding fd_map structure pointer */ |
180 | 0 | #define get_fd_map(h, fd) (&(h)->fd_hash[(fd)]) |
181 | | |
182 | | /*! \brief remove a fd_map structure from the hash; |
183 | | * the pointer must be returned by get_fd_map or hash_fd_map |
184 | | */ |
185 | | #define unhash_fd_map(pfm,c_flags,sock_flags,erase) \ |
186 | 0 | do{ \ |
187 | 0 | if ((c_flags & IO_FD_CLOSING) || (pfm->flags&IO_WATCH_PRV_FILTER)==sock_flags) { \ |
188 | 0 | (pfm)->type=0 /*F_NONE */; \ |
189 | 0 | (pfm)->fd=-1; \ |
190 | 0 | (pfm)->flags = 0; \ |
191 | 0 | (pfm)->data = NULL; \ |
192 | 0 | erase = 1; \ |
193 | 0 | } else { \ |
194 | 0 | (pfm)->flags &= ~sock_flags; \ |
195 | 0 | erase = 0; \ |
196 | 0 | } \ |
197 | 0 | }while(0) |
198 | | |
199 | | #define unhash_fd_map2(pfm,c_flags,sock_flags,erase) \ |
200 | 0 | do{ \ |
201 | 0 | if ((c_flags & IO_FD_CLOSING) || (pfm->flags&IO_WATCH_PRV_FILTER)==sock_flags) { \ |
202 | 0 | rla_log("erase case detected with sflags=%x\n",sock_flags); \ |
203 | 0 | (pfm)->type=0 /*F_NONE */; \ |
204 | 0 | (pfm)->fd=-1; \ |
205 | 0 | (pfm)->flags = 0; \ |
206 | 0 | (pfm)->data = NULL; \ |
207 | 0 | erase = 1; \ |
208 | 0 | } else { \ |
209 | 0 | rla_log("not erasing flags=%x, flags=%x\n",(pfm)->flags,sock_flags); \ |
210 | 0 | (pfm)->flags &= ~sock_flags; \ |
211 | 0 | erase = 0; \ |
212 | 0 | } \ |
213 | 0 | }while(0) |
214 | | |
215 | | |
216 | | /*! \brief add a fd_map structure to the fd hash */ |
217 | | static inline struct fd_map* hash_fd_map( io_wait_h* h, |
218 | | int fd, |
219 | | fd_type type, |
220 | | void* data, |
221 | | int flags, |
222 | | unsigned int timeout, |
223 | | int *already) |
224 | 0 | { |
225 | 0 | if (h->fd_hash[fd].fd <= 0) { |
226 | 0 | *already = 0; |
227 | 0 | } else { |
228 | 0 | *already = 1; |
229 | 0 | } |
230 | |
|
231 | 0 | h->fd_hash[fd].fd=fd; |
232 | 0 | h->fd_hash[fd].type=type; |
233 | 0 | h->fd_hash[fd].data=data; |
234 | |
|
235 | 0 | h->fd_hash[fd].flags|=flags; |
236 | |
|
237 | 0 | h->fd_hash[fd].timeout = timeout; |
238 | |
|
239 | 0 | return &h->fd_hash[fd]; |
240 | 0 | } Unexecuted instantiation: net_tcp_proc.c:hash_fd_map Unexecuted instantiation: net_tcp.c:hash_fd_map Unexecuted instantiation: tcp_common.c:hash_fd_map Unexecuted instantiation: net_udp.c:hash_fd_map Unexecuted instantiation: async.c:hash_fd_map Unexecuted instantiation: timer.c:hash_fd_map Unexecuted instantiation: reactor.c:hash_fd_map Unexecuted instantiation: io_wait.c:hash_fd_map Unexecuted instantiation: cfg_reload.c:hash_fd_map |
241 | | |
242 | | |
243 | | #ifdef HAVE_KQUEUE |
244 | | /* |
245 | | * kqueue specific function: register a change |
246 | | * (adds a change to the kevent change array, and if full flushes it first) |
247 | | * returns: -1 on error, 0 on success |
248 | | */ |
249 | | static inline int kq_ev_change(io_wait_h* h, int fd, int filter, int flag, |
250 | | void* data) |
251 | | { |
252 | | int n; |
253 | | struct timespec tspec; |
254 | | |
255 | | if (h->kq_nchanges>=h->kq_changes_size){ |
256 | | /* changes array full ! */ |
257 | | LM_WARN("[%s] kqueue changes array full trying to flush...\n", |
258 | | h->name); |
259 | | tspec.tv_sec=0; |
260 | | tspec.tv_nsec=0; |
261 | | again: |
262 | | n=kevent(h->kq_fd, h->kq_changes, h->kq_nchanges, 0, 0, &tspec); |
263 | | if (n==-1){ |
264 | | if (errno==EINTR) goto again; |
265 | | LM_ERR("[%s] kevent flush changes failed: %s [%d]\n", |
266 | | h->name, strerror(errno), errno); |
267 | | return -1; |
268 | | } |
269 | | h->kq_nchanges=0; /* changes array is empty */ |
270 | | } |
271 | | EV_SET(&h->kq_changes[h->kq_nchanges], fd, filter, flag, 0, 0, |
272 | | KEV_UDATA_CAST data); |
273 | | h->kq_nchanges++; |
274 | | return 0; |
275 | | } |
276 | | #endif |
277 | | |
278 | | |
279 | 0 | #define IO_WATCH_READ (1<<0) |
280 | 0 | #define IO_WATCH_WRITE (1<<1) |
281 | | #define IO_WATCH_ERROR (1<<2) |
282 | 0 | #define IO_WATCH_TIMEOUT (1<<3) |
283 | | /* 24 starting are reserved, do not attempt to use */ |
284 | 0 | #define IO_WATCH_PRV_FILTER ((1<<24)-1) |
285 | 0 | #define IO_WATCH_PRV_CHECKED (1<<29) |
286 | 0 | #define IO_WATCH_PRV_TRIG_READ (1<<30) |
287 | 0 | #define IO_WATCH_PRV_TRIG_WRITE (1<<31) |
288 | | |
289 | | #define fd_array_print \ |
290 | | do { \ |
291 | | int k;\ |
292 | | LM_DBG("[%s] size=%d, fd array is",h->name,h->fd_no);\ |
293 | | for(k=0;k<h->fd_no;k++) LM_GEN1(L_DBG," %d flags = %d",h->fd_array[k].fd,h->fd_hash[h->fd_array[k].fd].flags);\ |
294 | | LM_GEN1(L_DBG,"\n"); \ |
295 | | LM_DBG("[%s] size=%d, prio array is",h->name,h->max_prio);\ |
296 | | for(k=0;k<h->max_prio;k++) LM_GEN1(L_DBG," %d",h->prio_idx[k]);\ |
297 | | LM_GEN1(L_DBG,"\n"); \ |
298 | | }while(0) |
299 | | |
300 | | |
301 | | #define check_io_data() \ |
302 | 0 | do { \ |
303 | 0 | struct fd_map* _e;\ |
304 | 0 | int _t,k;\ |
305 | 0 | check_error = 0;\ |
306 | 0 | /* iterate the fd_array and check if fd_hash is properly set for each */ \ |
307 | 0 | for(k=0;k<h->fd_no;k++) {\ |
308 | 0 | _e = get_fd_map(h, h->fd_array[k].fd); \ |
309 | 0 | if (_e->type==0 || _e->fd<=0 || \ |
310 | 0 | (_e->flags&(IO_WATCH_READ|IO_WATCH_WRITE))==0 ) {\ |
311 | 0 | LM_BUG("fd_array idx %d (fd=%d) points to bogus map "\ |
312 | 0 | "(fd=%d,type=%d,flags=%x,data=%p)\n",k,h->fd_array[k].fd,\ |
313 | 0 | _e->fd, _e->type, _e->flags, _e->data);\ |
314 | 0 | check_error = 1;\ |
315 | 0 | }\ |
316 | 0 | _e->flags |= IO_WATCH_PRV_CHECKED;\ |
317 | 0 | }\ |
318 | 0 | /* iterate the fd_map and see if all records are checked */ \ |
319 | 0 | _t = 0; \ |
320 | 0 | for(k=0;k<h->max_fd_no;k++) {\ |
321 | 0 | _e = get_fd_map(h, k); \ |
322 | 0 | if (_e->type==0) { \ |
323 | 0 | /* fd not in used, everything should be on zero */ \ |
324 | 0 | if (_e->fd>0 || _e->data!=NULL || _e->flags!=0 ) {\ |
325 | 0 | LM_BUG("unused fd_map fd=%d has bogus data "\ |
326 | 0 | "(fd=%d,flags=%x,data=%p)\n",k,\ |
327 | 0 | _e->fd, _e->flags, _e->data);\ |
328 | 0 | check_error = 1;\ |
329 | 0 | }\ |
330 | 0 | } else {\ |
331 | 0 | /* fd in used, check if in checked */ \ |
332 | 0 | if (_e->fd<=0 || \ |
333 | 0 | (_e->flags&(IO_WATCH_READ|IO_WATCH_WRITE))==0 ) {\ |
334 | 0 | LM_BUG("used fd map fd=%d has bogus data "\ |
335 | 0 | "(fd=%d,type=%d,flags=%x,data=%p)\n",k,\ |
336 | 0 | _e->fd, _e->type, _e->flags, _e->data);\ |
337 | 0 | check_error = 1;\ |
338 | 0 | }\ |
339 | 0 | /* the map is valid */ \ |
340 | 0 | if ((_e->flags&IO_WATCH_PRV_CHECKED)==0) {\ |
341 | 0 | LM_BUG("used fd map fd=%d is not present in fd_array "\ |
342 | 0 | "(fd=%d,type=%d,flags=%x,data=%p)\n",k,\ |
343 | 0 | _e->fd, _e->type, _e->flags, _e->data);\ |
344 | 0 | check_error = 1;\ |
345 | 0 | }\ |
346 | 0 | _e->flags &= ~IO_WATCH_PRV_CHECKED;\ |
347 | 0 | _t++;\ |
348 | 0 | }\ |
349 | 0 | }\ |
350 | 0 | if (_t!=h->fd_no) { \ |
351 | 0 | LM_BUG("fd_map versus fd_array size mismatch: %d versus %d\n",\ |
352 | 0 | _t, h->fd_no);\ |
353 | 0 | check_error = 1;\ |
354 | 0 | }\ |
355 | 0 | } while(0) |
356 | | |
357 | | |
358 | | /*! \brief generic io_watch_add function |
359 | | * \return 0 on success, -1 on error |
360 | | * |
361 | | * this version should be faster than pointers to poll_method specific |
362 | | * functions (it avoids functions calls, the overhead being only an extra |
363 | | * switch()) |
364 | | */ |
365 | | inline static int io_watch_add( io_wait_h* h, // lgtm [cpp/use-of-goto] |
366 | | int fd, |
367 | | fd_type type, |
368 | | void* data, |
369 | | int prio, |
370 | | unsigned int timeout, |
371 | | int flags) |
372 | 0 | { |
373 | | |
374 | | /* helper macros */ |
375 | 0 | #define fd_array_setup \ |
376 | 0 | do{ \ |
377 | 0 | n = h->prio_idx[prio]; \ |
378 | 0 | if (n<h->fd_no)\ |
379 | 0 | memmove( &h->fd_array[n+1], &h->fd_array[n],\ |
380 | 0 | (h->fd_no-n)*sizeof(*(h->fd_array)) ); \ |
381 | 0 | h->fd_array[n].fd=fd; \ |
382 | 0 | h->fd_array[n].events=0; \ |
383 | 0 | if (flags & IO_WATCH_READ) \ |
384 | 0 | h->fd_array[n].events|=POLLIN; /* useless for select */ \ |
385 | 0 | if (flags & IO_WATCH_WRITE) \ |
386 | 0 | h->fd_array[n].events|=POLLOUT; /* useless for select */ \ |
387 | 0 | h->fd_array[n].revents=0; /* useless for select */ \ |
388 | 0 | for( n=prio ; n<h->max_prio ; n++) \ |
389 | 0 | h->prio_idx[n]++; \ |
390 | 0 | h->fd_no++; \ |
391 | 0 | }while(0) |
392 | |
|
393 | 0 | #define set_fd_flags(f) \ |
394 | 0 | do{ \ |
395 | 0 | ctl_flags=fcntl(fd, F_GETFL); \ |
396 | 0 | if (ctl_flags==-1){ \ |
397 | 0 | LM_ERR("[%s] fcntl: GETFL failed:" \ |
398 | 0 | " %s [%d]\n", h->name, strerror(errno), errno); \ |
399 | 0 | goto error; \ |
400 | 0 | } \ |
401 | 0 | if (fcntl(fd, F_SETFL, ctl_flags|(f))==-1){ \ |
402 | 0 | LM_ERR("[%s] fcntl: SETFL" \ |
403 | 0 | " failed: %s [%d]\n", h->name, strerror(errno), errno);\ |
404 | 0 | goto error; \ |
405 | 0 | } \ |
406 | 0 | }while(0) |
407 | | |
408 | |
|
409 | 0 | struct fd_map* e; |
410 | 0 | int already=-1; |
411 | 0 | #ifdef HAVE_EPOLL |
412 | 0 | struct epoll_event ep_event; |
413 | 0 | #endif |
414 | | #ifdef HAVE_DEVPOLL |
415 | | struct pollfd pfd; |
416 | | #endif |
417 | 0 | int ctl_flags; |
418 | 0 | int n; //FIXME |
419 | 0 | int check_error; |
420 | | #if 0 //defined(HAVE_SIGIO_RT) || defined (HAVE_EPOLL) FIXME |
421 | | int n; |
422 | | int idx; |
423 | | int check_io; |
424 | | struct pollfd pf; |
425 | | |
426 | | check_io=0; /* set to 1 if we need to check for pre-existing queued |
427 | | io/data on the fd */ |
428 | | idx=-1; |
429 | | #endif |
430 | 0 | e=0; |
431 | |
|
432 | 0 | if (fd==-1){ |
433 | 0 | LM_CRIT("fd is -1!\n"); |
434 | 0 | goto error0; |
435 | 0 | } |
436 | | /* check if not too big */ |
437 | 0 | if (h->fd_no >= h->max_fd_no || fd >= h->max_fd_no) { |
438 | 0 | LM_CRIT("[%s] maximum fd number exceeded: %d, %d/%d\n", |
439 | 0 | h->name, fd, h->fd_no, h->max_fd_no); |
440 | 0 | goto error0; |
441 | 0 | } |
442 | 0 | if (prio > h->max_prio) { |
443 | 0 | LM_BUG("[%s] priority %d requested (max is %d)\n", |
444 | 0 | h->name, prio, h->max_prio); |
445 | 0 | goto error0; |
446 | 0 | } |
447 | 0 | #if defined (HAVE_EPOLL) |
448 | 0 | LM_DBG("[%s] io_watch_add op (%d on %d) (%p, %d, %d, %p,%d), fd_no=%d/%d\n", |
449 | 0 | h->name,fd,h->epfd, h,fd,type,data,flags,h->fd_no,h->max_fd_no); |
450 | | #else |
451 | | LM_DBG("[%s] io_watch_add op (%d) (%p, %d, %d, %p,%d), fd_no=%d/%d\n", |
452 | | h->name,fd, h,fd,type,data,flags,h->fd_no,h->max_fd_no); |
453 | | #endif |
454 | | //fd_array_print; |
455 | | /* hash sanity check */ |
456 | 0 | e=get_fd_map(h, fd); |
457 | |
|
458 | 0 | check_io_data(); |
459 | 0 | if (check_error) { |
460 | 0 | LM_CRIT("[%s] check failed before fd add " |
461 | 0 | "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name, |
462 | 0 | fd, type, data, flags, already); |
463 | 0 | } |
464 | |
|
465 | 0 | if (e->flags & flags){ |
466 | 0 | if (e->data != data) { |
467 | 0 | LM_BUG("[%s] BUG trying to overwrite entry %d" |
468 | 0 | " in the hash(%d, %d, %p,%d) with (%d, %d, %p,%d)\n", |
469 | 0 | h->name,fd, e->fd, e->type, e->data,e->flags, fd, type, data,flags); |
470 | 0 | goto error0; |
471 | 0 | } |
472 | 0 | LM_DBG("[%s] Socket %d is already being listened on for flags %d\n", |
473 | 0 | h->name,fd,flags); |
474 | 0 | return 0; |
475 | 0 | } |
476 | | |
477 | 0 | if (timeout) |
478 | 0 | timeout+=get_ticks(); |
479 | |
|
480 | 0 | if ((e=hash_fd_map(h, fd, type, data,flags, timeout, &already))==0){ |
481 | 0 | LM_ERR("[%s] failed to hash the fd %d\n",h->name, fd); |
482 | 0 | goto error0; |
483 | 0 | } |
484 | 0 | switch(h->poll_method){ /* faster then pointer to functions */ |
485 | 0 | case POLL_POLL: |
486 | 0 | set_fd_flags(O_NONBLOCK); |
487 | 0 | break; |
488 | 0 | #ifdef HAVE_SELECT |
489 | 0 | case POLL_SELECT: |
490 | 0 | FD_SET(fd, &h->master_set); |
491 | 0 | if (h->max_fd_select<fd) h->max_fd_select=fd; |
492 | 0 | break; |
493 | 0 | #endif |
494 | 0 | #ifdef HAVE_SIGIO_RT |
495 | 0 | case POLL_SIGIO_RT: |
496 | | /* re-set O_ASYNC might be needed, if not done from |
497 | | * io_watch_del (or if somebody wants to add a fd which has |
498 | | * already O_ASYNC/F_SETSIG set on a dupplicate) |
499 | | */ |
500 | | /* set async & signal */ |
501 | 0 | if (fcntl(fd, F_SETOWN, my_pid())==-1){ |
502 | 0 | LM_ERR("[%s] fcntl: SETOWN" |
503 | 0 | " failed: %s [%d]\n",h->name, strerror(errno), errno); |
504 | 0 | goto error; |
505 | 0 | } |
506 | 0 | if (fcntl(fd, F_SETSIG, h->signo)==-1){ |
507 | 0 | LM_ERR("[%s] fcntl: SETSIG" |
508 | 0 | " failed: %s [%d]\n",h->name, strerror(errno), errno); |
509 | 0 | goto error; |
510 | 0 | } |
511 | | /* set both non-blocking and async */ |
512 | 0 | set_fd_flags(O_ASYNC| O_NONBLOCK); |
513 | | #ifdef EXTRA_DEBUG |
514 | | LM_DBG("[%s] sigio_rt on f %d, signal %d to pid %d\n", |
515 | | h->name,fd, h->signo, my_pid()); |
516 | | #endif |
517 | | /* empty socket receive buffer, if buffer is already full |
518 | | * no more space to put packets |
519 | | * => no more signals are ever generated |
520 | | * also when moving fds, the freshly moved fd might have |
521 | | * already some bytes queued, we want to get them now |
522 | | * and not later -- andrei */ |
523 | | //idx=h->fd_no; FIXME |
524 | | //check_io=1; |
525 | 0 | break; |
526 | 0 | #endif |
527 | 0 | #ifdef HAVE_EPOLL |
528 | 0 | case POLL_EPOLL: |
529 | 0 | ep_event.data.ptr=e; |
530 | 0 | ep_event.events=0; |
531 | 0 | if (e->flags & IO_WATCH_READ) |
532 | 0 | ep_event.events|=EPOLLIN; |
533 | 0 | if (e->flags & IO_WATCH_WRITE) |
534 | 0 | ep_event.events|=EPOLLOUT; |
535 | 0 | if (!already) { |
536 | 0 | again1: |
537 | | #if 0 |
538 | | /* This is currently broken, because when using EPOLLEXCLUSIVE, the OS will |
539 | | * send sequential events to the same process - thus our pseudo-dispatcher |
540 | | * will no longer work, since events on a pipe will be queued by a single |
541 | | * process. - razvanc |
542 | | */ |
543 | | #if (defined __OS_linux) && (__GLIBC__ >= 2) && (__GLIBC_MINOR__ >= 24) |
544 | | if (e->flags & IO_WATCH_READ) |
545 | | ep_event.events|=EPOLLEXCLUSIVE; |
546 | | #endif |
547 | | #endif |
548 | 0 | n=epoll_ctl(h->epfd, EPOLL_CTL_ADD, fd, &ep_event); |
549 | 0 | if (n==-1){ |
550 | 0 | if (errno==EAGAIN) goto again1; |
551 | 0 | LM_ERR("[%s] epoll_ctl ADD failed: %s [%d]\n", |
552 | 0 | h->name,strerror(errno), errno); |
553 | 0 | goto error; |
554 | 0 | } |
555 | 0 | } else { |
556 | 0 | again11: |
557 | 0 | n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event); |
558 | 0 | if (n==-1){ |
559 | 0 | if (errno==EAGAIN) goto again11; |
560 | 0 | LM_ERR("[%s] epoll_ctl MOD failed: %s [%d]\n", |
561 | 0 | h->name,strerror(errno), errno); |
562 | 0 | goto error; |
563 | 0 | } |
564 | 0 | } |
565 | 0 | break; |
566 | 0 | #endif |
567 | | #ifdef HAVE_KQUEUE |
568 | | case POLL_KQUEUE: |
569 | | if (kq_ev_change(h, fd, EVFILT_READ, EV_ADD, e)==-1) |
570 | | goto error; |
571 | | break; |
572 | | #endif |
573 | | #ifdef HAVE_DEVPOLL |
574 | | case POLL_DEVPOLL: |
575 | | pfd.fd=fd; |
576 | | pfd.events=POLLIN; |
577 | | pfd.revents=0; |
578 | | again_devpoll: |
579 | | if (write(h->dpoll_fd, &pfd, sizeof(pfd))==-1){ |
580 | | if (errno==EAGAIN) goto again_devpoll; |
581 | | LM_ERR("[%s] /dev/poll write failed:" |
582 | | "%s [%d]\n",h->name, strerror(errno), errno); |
583 | | goto error; |
584 | | } |
585 | | break; |
586 | | #endif |
587 | | |
588 | 0 | default: |
589 | 0 | LM_CRIT("[%s] no support for poll method " |
590 | 0 | " %s (%d)\n",h->name, poll_method_str[h->poll_method], |
591 | 0 | h->poll_method); |
592 | 0 | goto error; |
593 | 0 | } |
594 | | |
595 | 0 | if (!already) { |
596 | 0 | fd_array_setup; |
597 | 0 | } |
598 | |
|
599 | | #if 0 //defined(HAVE_SIGIO_RT) || defined (HAVE_EPOLL) FIXME !!! |
600 | | if (check_io){ |
601 | | /* handle possible pre-existing events */ |
602 | | pf.fd=fd; |
603 | | pf.events=POLLIN; |
604 | | check_io_again: |
605 | | while( ((n=poll(&pf, 1, 0))>0) && (handle_io(e, idx,IO_WATCH_READ)>0)); |
606 | | if (n==-1){ |
607 | | if (errno==EINTR) goto check_io_again; |
608 | | LM_ERR("check_io poll: %s [%d]\n", |
609 | | strerror(errno), errno); |
610 | | } |
611 | | } |
612 | | #endif |
613 | | //fd_array_print; |
614 | 0 | check_io_data(); |
615 | 0 | if (check_error) { |
616 | 0 | LM_CRIT("[%s] check failed after successful fd add " |
617 | 0 | "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name, |
618 | 0 | fd, type, data, flags, already); |
619 | 0 | } |
620 | 0 | return 0; |
621 | 0 | error: |
622 | 0 | if (e) unhash_fd_map(e,0,flags,already); |
623 | 0 | error0: |
624 | 0 | check_io_data(); |
625 | 0 | if (check_error) { |
626 | 0 | LM_CRIT("[%s] check failed after failed fd add " |
627 | 0 | "(fd=%d,type=%d,data=%p,flags=%x) already=%d\n",h->name, |
628 | 0 | fd, type, data, flags, already); |
629 | 0 | } |
630 | 0 | return -1; |
631 | 0 | #undef fd_array_setup |
632 | 0 | #undef set_fd_flags |
633 | 0 | } Unexecuted instantiation: net_tcp_proc.c:io_watch_add Unexecuted instantiation: net_tcp.c:io_watch_add Unexecuted instantiation: tcp_common.c:io_watch_add Unexecuted instantiation: net_udp.c:io_watch_add Unexecuted instantiation: async.c:io_watch_add Unexecuted instantiation: timer.c:io_watch_add Unexecuted instantiation: reactor.c:io_watch_add Unexecuted instantiation: io_wait.c:io_watch_add Unexecuted instantiation: cfg_reload.c:io_watch_add |
634 | | |
635 | | |
636 | | |
637 | | /*! |
638 | | * \brief |
639 | | * \param h handler |
640 | | * \param fd file descriptor |
641 | | * \param idx index in the fd_array if known, -1 if not |
642 | | * (if index==-1 fd_array will be searched for the |
643 | | * corresponding fd* entry -- slower but unavoidable in |
644 | | * some cases). index is not used (no fd_array) for epoll, |
645 | | * /dev/poll and kqueue |
646 | | * \param flags optimization flags, e.g. IO_FD_CLOSING, the fd was or will |
647 | | * shortly be closed, in some cases we can avoid extra |
648 | | * remove operations (e.g.: epoll, kqueue, sigio) |
649 | | * \return 0 if ok, -1 on error |
650 | | */ |
651 | | inline static int io_watch_del(io_wait_h* h, int fd, int idx, |
652 | | int flags,int sock_flags) |
653 | 0 | { |
654 | 0 | #define fix_fd_array \ |
655 | 0 | do{\ |
656 | 0 | if (idx==-1){ \ |
657 | | /* fix idx if -1 and needed */ \ |
658 | 0 | for (idx=0; (idx<h->fd_no) && \ |
659 | 0 | (h->fd_array[idx].fd!=fd); idx++); \ |
660 | 0 | } \ |
661 | 0 | rla_log("fixing: final idx=%d out of %d, erase=%d\n",idx,idx<h->fd_no,erase); \ |
662 | 0 | if (idx<h->fd_no){ \ |
663 | 0 | if (erase) { \ |
664 | 0 | memmove(&h->fd_array[idx], &h->fd_array[idx+1], \ |
665 | 0 | (h->fd_no-(idx+1))*sizeof(*(h->fd_array))); \ |
666 | 0 | for( i=0 ; i<h->max_prio && h->prio_idx[i]<=idx ; i++ ); \ |
667 | 0 | for( ; i<h->max_prio ; i++ ) h->prio_idx[i]-- ; \ |
668 | 0 | h->fd_no--; \ |
669 | 0 | } else { \ |
670 | 0 | h->fd_array[idx].events = 0; \ |
671 | 0 | if (e->flags & IO_WATCH_READ) \ |
672 | 0 | h->fd_array[idx].events|=POLLIN; /* useless for select */ \ |
673 | 0 | if (flags & IO_WATCH_WRITE) \ |
674 | 0 | h->fd_array[idx].events|=POLLOUT; /* useless for select */ \ |
675 | 0 | h->fd_array[idx].revents = 0; \ |
676 | 0 | } \ |
677 | 0 | } \ |
678 | 0 | }while(0) |
679 | |
|
680 | 0 | struct fd_map* e; |
681 | 0 | #ifdef HAVE_EPOLL |
682 | 0 | int n; |
683 | 0 | struct epoll_event ep_event; |
684 | 0 | #endif |
685 | | #ifdef HAVE_DEVPOLL |
686 | | struct pollfd pfd; |
687 | | #endif |
688 | 0 | #ifdef HAVE_SIGIO_RT |
689 | 0 | int fd_flags; |
690 | 0 | #endif |
691 | 0 | int erase = 0; |
692 | 0 | int check_error; |
693 | 0 | int i; |
694 | |
|
695 | 0 | #define x_RL_NO 10 |
696 | 0 | #define x_RL_LEN 200 |
697 | 0 | static char rla[x_RL_NO][x_RL_LEN]; |
698 | 0 | int rla_idx=0; |
699 | 0 | #define rla_log( _fmt, args...) \ |
700 | 0 | snprintf( rla[rla_idx++], x_RL_LEN, _fmt, ## args) |
701 | 0 | #define rla_dump() \ |
702 | 0 | do { \ |
703 | 0 | int w; \ |
704 | 0 | for(w=0;w<rla_idx;w++) \ |
705 | 0 | LM_CRIT("[%d]-> [%s]\n",w,rla[w]); \ |
706 | 0 | } while(0) |
707 | |
|
708 | 0 | if ((fd<0) || (fd>=h->max_fd_no)){ |
709 | 0 | LM_CRIT("[%s] invalid fd %d, not in [0, %d)\n", h->name, fd, h->fd_no); |
710 | 0 | goto error0; |
711 | 0 | } |
712 | 0 | LM_DBG("[%s] io_watch_del op on index %d %d (%p, %d, %d, 0x%x,0x%x) " |
713 | 0 | "fd_no=%d called\n", h->name,idx,fd, h, fd, idx, flags, |
714 | 0 | sock_flags,h->fd_no); |
715 | 0 | rla_log("[%s] io_watch_del op on index %d %d (%p, %d, %d, 0x%x,0x%x) " |
716 | 0 | "fd_no=%d called\n", h->name,idx,fd, h, fd, idx, flags, |
717 | 0 | sock_flags,h->fd_no); |
718 | | //fd_array_print; |
719 | |
|
720 | 0 | e=get_fd_map(h, fd); |
721 | | /* more sanity checks */ |
722 | 0 | if (e==0){ |
723 | 0 | LM_CRIT("[%s] no corresponding hash entry for %d\n",h->name, fd); |
724 | 0 | goto error0; |
725 | 0 | } |
726 | 0 | if (e->type==0 /*F_NONE*/){ |
727 | 0 | LM_ERR("[%s] trying to delete already erased" |
728 | 0 | " entry %d in the hash(%d, %d, %p) )\n", |
729 | 0 | h->name,fd, e->fd, e->type, e->data); |
730 | 0 | goto error0; |
731 | 0 | } |
732 | | |
733 | 0 | if (idx != -1) { |
734 | 0 | if (!(idx>=0 && idx<h->fd_no)) { |
735 | 0 | LM_CRIT("[%s] FD index check failed, idx=%d, max=%d" |
736 | 0 | " operating on %d\n",h->name, idx, h->fd_no, fd ); |
737 | | #ifdef EXTRA_DEBUG |
738 | | log_backtrace(); |
739 | | #endif |
740 | 0 | rla_dump(); |
741 | 0 | idx = -1; |
742 | 0 | } else if (h->fd_array[idx].fd!=fd) { |
743 | 0 | LM_CRIT("[%s] FD consistency check failed, idx=%d points to fd=%d," |
744 | 0 | " but operating on %d\n",h->name, idx, h->fd_array[idx].fd, fd ); |
745 | | #ifdef EXTRA_DEBUG |
746 | | log_backtrace(); |
747 | | #endif |
748 | 0 | rla_dump(); |
749 | 0 | idx = -1; |
750 | 0 | } |
751 | 0 | } |
752 | |
|
753 | 0 | if ((e->flags & sock_flags) == 0) { |
754 | 0 | LM_ERR("BUG - [%s] trying to del fd %d with flags %d %d\n", |
755 | 0 | h->name, fd, e->flags,sock_flags); |
756 | 0 | goto error0; |
757 | 0 | } |
758 | | |
759 | 0 | unhash_fd_map2(e,flags,sock_flags,erase); |
760 | |
|
761 | 0 | switch(h->poll_method){ |
762 | 0 | case POLL_POLL: |
763 | 0 | break; |
764 | 0 | #ifdef HAVE_SELECT |
765 | 0 | case POLL_SELECT: |
766 | 0 | FD_CLR(fd, &h->master_set); |
767 | 0 | if (h->max_fd_select && (h->max_fd_select==fd)) |
768 | | /* we don't know the prev. max, so we just decrement it */ |
769 | 0 | h->max_fd_select--; |
770 | 0 | break; |
771 | 0 | #endif |
772 | 0 | #ifdef HAVE_SIGIO_RT |
773 | 0 | case POLL_SIGIO_RT: |
774 | | /* the O_ASYNC flag must be reset all the time, the fd |
775 | | * can be changed only if O_ASYNC is reset (if not and |
776 | | * the fd is a duplicate, you will get signals from the dup. fd |
777 | | * and not from the original, even if the dup. fd was closed |
778 | | * and the signals re-set on the original) -- andrei |
779 | | */ |
780 | | /*if (!(flags & IO_FD_CLOSING)){*/ |
781 | | /* reset ASYNC */ |
782 | 0 | fd_flags=fcntl(fd, F_GETFL); |
783 | 0 | if (fd_flags==-1){ |
784 | 0 | LM_ERR("[%s] fcntl: GETFL failed:" |
785 | 0 | " %s [%d]\n",h->name, strerror(errno), errno); |
786 | 0 | goto error; |
787 | 0 | } |
788 | 0 | if (fcntl(fd, F_SETFL, fd_flags&(~O_ASYNC))==-1){ |
789 | 0 | LM_ERR("[%s] fcntl: SETFL" |
790 | 0 | " failed: %s [%d]\n",h->name, strerror(errno), errno); |
791 | 0 | goto error; |
792 | 0 | } |
793 | 0 | break; |
794 | 0 | #endif |
795 | 0 | #ifdef HAVE_EPOLL |
796 | 0 | case POLL_EPOLL: |
797 | | /* epoll doesn't seem to automatically remove sockets, |
798 | | * if the socket is a dupplicate/moved and the original |
799 | | * is still open. The fd is removed from the epoll set |
800 | | * only when the original (and all the copies?) is/are |
801 | | * closed. This is probably a bug in epoll. --andrei */ |
802 | | #ifdef EPOLL_NO_CLOSE_BUG |
803 | | if (!(flags & IO_FD_CLOSING)){ |
804 | | #endif |
805 | 0 | if (erase) { |
806 | 0 | n=epoll_ctl(h->epfd, EPOLL_CTL_DEL, fd, &ep_event); |
807 | | /* |
808 | | * in some cases (fds managed by external libraries), |
809 | | * the fd may have already been closed |
810 | | */ |
811 | 0 | if (n==-1 && errno != EBADF && errno != ENOENT) { |
812 | 0 | LM_ERR("[%s] removing fd from epoll (%d from %d) " |
813 | 0 | "list failed: %s [%d]\n",h->name, fd, h->epfd, |
814 | 0 | strerror(errno), errno); |
815 | 0 | goto error; |
816 | 0 | } |
817 | 0 | } else { |
818 | 0 | ep_event.data.ptr=e; |
819 | 0 | ep_event.events=0; |
820 | 0 | if (e->flags & IO_WATCH_READ) |
821 | 0 | ep_event.events|=EPOLLIN; |
822 | 0 | if (e->flags & IO_WATCH_WRITE) |
823 | 0 | ep_event.events|=EPOLLOUT; |
824 | 0 | n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event); |
825 | 0 | if (n==-1){ |
826 | 0 | LM_ERR("[%s] epoll_ctl failed: %s [%d]\n", |
827 | 0 | h->name,strerror(errno), errno); |
828 | 0 | goto error; |
829 | 0 | } |
830 | 0 | } |
831 | | #ifdef EPOLL_NO_CLOSE_BUG |
832 | | } |
833 | | #endif |
834 | 0 | break; |
835 | 0 | #endif |
836 | | #ifdef HAVE_KQUEUE |
837 | | case POLL_KQUEUE: |
838 | | if (!(flags & IO_FD_CLOSING)){ |
839 | | if (kq_ev_change(h, fd, EVFILT_READ, EV_DELETE, 0)==-1) |
840 | | goto error; |
841 | | } |
842 | | break; |
843 | | #endif |
844 | | #ifdef HAVE_DEVPOLL |
845 | | case POLL_DEVPOLL: |
846 | | /* for /dev/poll the closed fds _must_ be removed |
847 | | (they are not removed automatically on close()) */ |
848 | | pfd.fd=fd; |
849 | | pfd.events=POLLREMOVE; |
850 | | pfd.revents=0; |
851 | | again_devpoll: |
852 | | if (write(h->dpoll_fd, &pfd, sizeof(pfd))==-1){ |
853 | | if (errno==EINTR) goto again_devpoll; |
854 | | LM_ERR("[%s] removing fd from /dev/poll failed: " |
855 | | "%s [%d]\n",h->name, strerror(errno), errno); |
856 | | goto error; |
857 | | } |
858 | | break; |
859 | | #endif |
860 | 0 | default: |
861 | 0 | LM_CRIT("[%s] no support for poll method %s (%d)\n", |
862 | 0 | h->name,poll_method_str[h->poll_method], h->poll_method); |
863 | 0 | goto error; |
864 | 0 | } |
865 | 0 | rla_log("fixing fd array, idx=%d\n",idx); \ |
866 | 0 |
|
867 | 0 | fix_fd_array; |
868 | | //fd_array_print; |
869 | |
|
870 | 0 | check_io_data(); |
871 | 0 | if (check_error) { |
872 | 0 | LM_CRIT("[%s] check failed after successful fd del " |
873 | 0 | "(fd=%d,flags=%d, sflags=%d) over map " |
874 | 0 | "(fd=%d,type=%d,data=%p,flags=%d) erase=%d\n",h->name, |
875 | 0 | fd, flags, sock_flags, |
876 | 0 | e->fd, e->type, e->data, e->flags, |
877 | 0 | erase); |
878 | 0 | rla_dump(); |
879 | 0 | } |
880 | |
|
881 | 0 | return 0; |
882 | 0 | error: |
883 | | /* |
884 | | * although the DEL operation failed, both |
885 | | * "fd_hash" and "fd_array" must remain consistent |
886 | | */ |
887 | 0 | fix_fd_array; |
888 | |
|
889 | 0 | check_io_data(); |
890 | 0 | if (check_error) { |
891 | 0 | LM_CRIT("[%s] check failed after failed fd del " |
892 | 0 | "(fd=%d,flags=%d, sflags=%d) over map " |
893 | 0 | "(fd=%d,type=%d,data=%p,flags=%d) erase=%d\n",h->name, |
894 | 0 | fd, flags, sock_flags, |
895 | 0 | e->fd, e->type, e->data, e->flags, |
896 | 0 | erase); |
897 | 0 | rla_dump(); |
898 | 0 | } |
899 | 0 | error0: |
900 | |
|
901 | 0 | return -1; |
902 | 0 | #undef fix_fd_array |
903 | 0 | } Unexecuted instantiation: net_tcp_proc.c:io_watch_del Unexecuted instantiation: net_tcp.c:io_watch_del Unexecuted instantiation: tcp_common.c:io_watch_del Unexecuted instantiation: net_udp.c:io_watch_del Unexecuted instantiation: async.c:io_watch_del Unexecuted instantiation: timer.c:io_watch_del Unexecuted instantiation: reactor.c:io_watch_del Unexecuted instantiation: io_wait.c:io_watch_del Unexecuted instantiation: cfg_reload.c:io_watch_del |
904 | | |
905 | | |
906 | | /* init */ |
907 | | |
908 | | |
909 | | /*! \brief initializes the static vars/arrays |
910 | | * \param h pointer to the io_wait_h that will be initialized |
911 | | * \param max_fd maximum allowed fd number |
912 | | * \param poll_method poll method (0 for automatic best fit) |
913 | | */ |
914 | | int init_io_wait(io_wait_h* h, char *name, int max_fd, |
915 | | enum poll_types poll_method, int max_prio); |
916 | | |
917 | | /*! \brief destroys everything init_io_wait allocated */ |
918 | | void destroy_io_wait(io_wait_h* h); |
919 | | |
920 | | int io_set_app_flag( io_wait_h *h , int type, int app_flag); |
921 | | |
922 | | int io_check_app_flag( io_wait_h *h , int app_flag); |
923 | | |
924 | | |
925 | | #endif |