/src/samba/lib/tevent/tevent_poll.c
Line | Count | Source |
1 | | /* |
2 | | Unix SMB/CIFS implementation. |
3 | | main select loop and event handling |
4 | | Copyright (C) Andrew Tridgell 2003-2005 |
5 | | Copyright (C) Stefan Metzmacher 2005-2009 |
6 | | |
7 | | ** NOTE! The following LGPL license applies to the tevent |
8 | | ** library. This does NOT imply that all of Samba is released |
9 | | ** under the LGPL |
10 | | |
11 | | This library is free software; you can redistribute it and/or |
12 | | modify it under the terms of the GNU Lesser General Public |
13 | | License as published by the Free Software Foundation; either |
14 | | version 3 of the License, or (at your option) any later version. |
15 | | |
16 | | This library is distributed in the hope that it will be useful, |
17 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | | Lesser General Public License for more details. |
20 | | |
21 | | You should have received a copy of the GNU Lesser General Public |
22 | | License along with this library; if not, see <http://www.gnu.org/licenses/>. |
23 | | */ |
24 | | |
25 | | #include "replace.h" |
26 | | #include "system/filesys.h" |
27 | | #include "system/select.h" |
28 | | #include "tevent.h" |
29 | | #include "tevent_util.h" |
30 | | #include "tevent_internal.h" |
31 | | |
32 | | struct poll_event_context { |
33 | | /* a pointer back to the generic event_context */ |
34 | | struct tevent_context *ev; |
35 | | |
36 | | /* |
37 | | * one or more events were deleted or disabled |
38 | | */ |
39 | | bool deleted; |
40 | | |
41 | | /* |
42 | | * These two arrays are maintained together. |
43 | | * |
44 | | * The following is always true: |
45 | | * num_fds <= num_fdes |
46 | | * |
47 | | * new 'fresh' elements are added at the end |
48 | | * of the 'fdes' array and picked up later |
49 | | * to the 'fds' array in poll_event_sync_arrays() |
50 | | * before the poll() syscall. |
51 | | */ |
52 | | struct pollfd *fds; |
53 | | size_t num_fds; |
54 | | struct tevent_fd **fdes; |
55 | | size_t num_fdes; |
56 | | |
57 | | /* |
58 | | * use tevent_common_wakeup(ev) to wake the poll() thread |
59 | | */ |
60 | | bool use_mt_mode; |
61 | | }; |
62 | | |
63 | | /* |
64 | | create a poll_event_context structure. |
65 | | */ |
66 | | static int poll_event_context_init(struct tevent_context *ev) |
67 | 0 | { |
68 | 0 | struct poll_event_context *poll_ev; |
69 | | |
70 | | /* |
71 | | * we might be called during tevent_re_initialise() |
72 | | * which means we need to free our old additional_data |
73 | | * in order to detach old fd events from the |
74 | | * poll_ev->fresh list |
75 | | */ |
76 | 0 | TALLOC_FREE(ev->additional_data); |
77 | |
|
78 | 0 | poll_ev = talloc_zero(ev, struct poll_event_context); |
79 | 0 | if (poll_ev == NULL) { |
80 | 0 | return -1; |
81 | 0 | } |
82 | 0 | poll_ev->ev = ev; |
83 | 0 | ev->additional_data = poll_ev; |
84 | 0 | return 0; |
85 | 0 | } |
86 | | |
87 | | static int poll_event_context_init_mt(struct tevent_context *ev) |
88 | 0 | { |
89 | 0 | struct poll_event_context *poll_ev; |
90 | 0 | int ret; |
91 | |
|
92 | 0 | ret = poll_event_context_init(ev); |
93 | 0 | if (ret == -1) { |
94 | 0 | return ret; |
95 | 0 | } |
96 | | |
97 | 0 | poll_ev = talloc_get_type_abort( |
98 | 0 | ev->additional_data, struct poll_event_context); |
99 | |
|
100 | 0 | ret = tevent_common_wakeup_init(ev); |
101 | 0 | if (ret != 0) { |
102 | 0 | return ret; |
103 | 0 | } |
104 | | |
105 | 0 | poll_ev->use_mt_mode = true; |
106 | |
|
107 | 0 | return 0; |
108 | 0 | } |
109 | | |
110 | | static void poll_event_wake_pollthread(struct poll_event_context *poll_ev) |
111 | 0 | { |
112 | 0 | if (!poll_ev->use_mt_mode) { |
113 | 0 | return; |
114 | 0 | } |
115 | 0 | tevent_common_wakeup(poll_ev->ev); |
116 | 0 | } |
117 | | |
118 | | /* |
119 | | destroy an fd_event |
120 | | */ |
121 | | static int poll_event_fd_destructor(struct tevent_fd *fde) |
122 | 0 | { |
123 | 0 | struct tevent_context *ev = fde->event_ctx; |
124 | 0 | struct poll_event_context *poll_ev; |
125 | 0 | uint64_t del_idx = fde->additional_flags; |
126 | |
|
127 | 0 | if (ev == NULL) { |
128 | 0 | goto done; |
129 | 0 | } |
130 | | |
131 | 0 | poll_ev = talloc_get_type_abort( |
132 | 0 | ev->additional_data, struct poll_event_context); |
133 | |
|
134 | 0 | if (del_idx == UINT64_MAX) { |
135 | 0 | goto done; |
136 | 0 | } |
137 | | |
138 | 0 | poll_ev->fdes[del_idx] = NULL; |
139 | 0 | poll_ev->deleted = true; |
140 | 0 | poll_event_wake_pollthread(poll_ev); |
141 | 0 | done: |
142 | 0 | return tevent_common_fd_destructor(fde); |
143 | 0 | } |
144 | | |
145 | | static void poll_event_schedule_immediate(struct tevent_immediate *im, |
146 | | struct tevent_context *ev, |
147 | | tevent_immediate_handler_t handler, |
148 | | void *private_data, |
149 | | const char *handler_name, |
150 | | const char *location) |
151 | 0 | { |
152 | 0 | struct poll_event_context *poll_ev = talloc_get_type_abort( |
153 | 0 | ev->additional_data, struct poll_event_context); |
154 | |
|
155 | 0 | tevent_common_schedule_immediate(im, ev, handler, private_data, |
156 | 0 | handler_name, location); |
157 | 0 | poll_event_wake_pollthread(poll_ev); |
158 | 0 | } |
159 | | |
160 | | /* |
161 | | Private function called by "standard" backend fallback. |
162 | | Note this only allows fallback to "poll" backend, not "poll-mt". |
163 | | */ |
164 | | _PRIVATE_ bool tevent_poll_event_add_fd_internal(struct tevent_context *ev, |
165 | | struct tevent_fd *fde) |
166 | 0 | { |
167 | 0 | struct poll_event_context *poll_ev = talloc_get_type_abort( |
168 | 0 | ev->additional_data, struct poll_event_context); |
169 | 0 | uint64_t fde_idx = UINT64_MAX; |
170 | 0 | size_t num_fdes; |
171 | |
|
172 | 0 | fde->additional_flags = UINT64_MAX; |
173 | 0 | tevent_common_fd_mpx_reinit(fde); |
174 | 0 | talloc_set_destructor(fde, poll_event_fd_destructor); |
175 | |
|
176 | 0 | if (fde->flags == 0) { |
177 | | /* |
178 | | * Nothing more to do... |
179 | | */ |
180 | 0 | return true; |
181 | 0 | } |
182 | | |
183 | | /* |
184 | | * We need to add it to the end of the 'fdes' array. |
185 | | */ |
186 | 0 | num_fdes = poll_ev->num_fdes + 1; |
187 | 0 | if (num_fdes > talloc_array_length(poll_ev->fdes)) { |
188 | 0 | struct tevent_fd **tmp_fdes = NULL; |
189 | 0 | size_t array_length; |
190 | |
|
191 | 0 | array_length = (num_fdes + 15) & ~15; /* round up to 16 */ |
192 | |
|
193 | 0 | tmp_fdes = talloc_realloc(poll_ev, |
194 | 0 | poll_ev->fdes, |
195 | 0 | struct tevent_fd *, |
196 | 0 | array_length); |
197 | 0 | if (tmp_fdes == NULL) { |
198 | 0 | return false; |
199 | 0 | } |
200 | 0 | poll_ev->fdes = tmp_fdes; |
201 | 0 | } |
202 | | |
203 | 0 | fde_idx = poll_ev->num_fdes; |
204 | 0 | fde->additional_flags = fde_idx; |
205 | 0 | poll_ev->fdes[fde_idx] = fde; |
206 | 0 | poll_ev->num_fdes++; |
207 | |
|
208 | 0 | return true; |
209 | 0 | } |
210 | | |
211 | | /* |
212 | | add a fd based event |
213 | | return NULL on failure (memory allocation error) |
214 | | */ |
215 | | static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev, |
216 | | TALLOC_CTX *mem_ctx, |
217 | | int fd, uint16_t flags, |
218 | | tevent_fd_handler_t handler, |
219 | | void *private_data, |
220 | | const char *handler_name, |
221 | | const char *location) |
222 | 0 | { |
223 | 0 | struct poll_event_context *poll_ev = talloc_get_type_abort( |
224 | 0 | ev->additional_data, struct poll_event_context); |
225 | 0 | struct tevent_fd *fde; |
226 | 0 | bool ok; |
227 | |
|
228 | 0 | if (fd < 0) { |
229 | 0 | return NULL; |
230 | 0 | } |
231 | | |
232 | 0 | fde = tevent_common_add_fd(ev, |
233 | 0 | mem_ctx, |
234 | 0 | fd, |
235 | 0 | flags, |
236 | 0 | handler, |
237 | 0 | private_data, |
238 | 0 | handler_name, |
239 | 0 | location); |
240 | 0 | if (fde == NULL) { |
241 | 0 | return NULL; |
242 | 0 | } |
243 | | |
244 | 0 | ok = tevent_poll_event_add_fd_internal(ev, fde); |
245 | 0 | if (!ok) { |
246 | 0 | TALLOC_FREE(fde); |
247 | 0 | return NULL; |
248 | 0 | } |
249 | 0 | poll_event_wake_pollthread(poll_ev); |
250 | | |
251 | | /* |
252 | | * poll_event_loop_poll will take care of the rest in |
253 | | * poll_event_setup_fresh |
254 | | */ |
255 | 0 | return fde; |
256 | 0 | } |
257 | | |
258 | | /* |
259 | | map from TEVENT_FD_* to POLLIN/POLLOUT |
260 | | */ |
261 | | static uint16_t poll_map_flags(uint16_t flags) |
262 | 0 | { |
263 | 0 | uint16_t pollflags = 0; |
264 | | |
265 | | /* |
266 | | * we do not need to specify POLLERR | POLLHUP |
267 | | * they are always reported. |
268 | | */ |
269 | |
|
270 | 0 | if (flags & TEVENT_FD_READ) { |
271 | 0 | pollflags |= POLLIN; |
272 | 0 | #ifdef POLLRDHUP |
273 | | /* |
274 | | * Note that at least on Linux |
275 | | * POLLRDHUP always returns |
276 | | * POLLIN in addition, so this |
277 | | * is not strictly needed, but |
278 | | * we want to make it explicit. |
279 | | */ |
280 | 0 | pollflags |= POLLRDHUP; |
281 | 0 | #endif |
282 | 0 | } |
283 | 0 | if (flags & TEVENT_FD_WRITE) { |
284 | 0 | pollflags |= POLLOUT; |
285 | 0 | } |
286 | 0 | if (flags & TEVENT_FD_ERROR) { |
287 | 0 | #ifdef POLLRDHUP |
288 | 0 | pollflags |= POLLRDHUP; |
289 | 0 | #endif |
290 | 0 | } |
291 | |
|
292 | 0 | return pollflags; |
293 | 0 | } |
294 | | |
295 | | /* |
296 | | set the fd event flags |
297 | | */ |
298 | | static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags) |
299 | 0 | { |
300 | 0 | struct tevent_context *ev = fde->event_ctx; |
301 | 0 | struct poll_event_context *poll_ev; |
302 | 0 | uint64_t idx = fde->additional_flags; |
303 | |
|
304 | 0 | if (ev == NULL) { |
305 | 0 | return; |
306 | 0 | } |
307 | | |
308 | 0 | if (fde->flags == flags) { |
309 | 0 | return; |
310 | 0 | } |
311 | | |
312 | 0 | poll_ev = talloc_get_type_abort( |
313 | 0 | ev->additional_data, struct poll_event_context); |
314 | |
|
315 | 0 | fde->flags = flags; |
316 | |
|
317 | 0 | if (idx == UINT64_MAX) { |
318 | | /* |
319 | | * We move it between the fresh and disabled lists. |
320 | | */ |
321 | 0 | tevent_poll_event_add_fd_internal(ev, fde); |
322 | 0 | poll_event_wake_pollthread(poll_ev); |
323 | 0 | return; |
324 | 0 | } |
325 | | |
326 | 0 | if (fde->flags == 0) { |
327 | | /* |
328 | | * We need to remove it from the array |
329 | | * and move it to the disabled list. |
330 | | */ |
331 | 0 | poll_ev->fdes[idx] = NULL; |
332 | 0 | poll_ev->deleted = true; |
333 | 0 | fde->additional_flags = UINT64_MAX; |
334 | 0 | poll_event_wake_pollthread(poll_ev); |
335 | 0 | return; |
336 | 0 | } |
337 | | |
338 | 0 | if (idx >= poll_ev->num_fds) { |
339 | | /* |
340 | | * Not yet added to the |
341 | | * poll_ev->fds array. |
342 | | */ |
343 | 0 | poll_event_wake_pollthread(poll_ev); |
344 | 0 | return; |
345 | 0 | } |
346 | | |
347 | 0 | poll_ev->fds[idx].events = poll_map_flags(flags); |
348 | |
|
349 | 0 | poll_event_wake_pollthread(poll_ev); |
350 | 0 | } |
351 | | |
352 | | static bool poll_event_sync_arrays(struct tevent_context *ev, |
353 | | struct poll_event_context *poll_ev) |
354 | 0 | { |
355 | 0 | size_t i; |
356 | 0 | size_t array_length; |
357 | |
|
358 | 0 | if (poll_ev->deleted) { |
359 | |
|
360 | 0 | for (i=0; i < poll_ev->num_fds;) { |
361 | 0 | struct tevent_fd *fde = poll_ev->fdes[i]; |
362 | 0 | size_t ci; |
363 | |
|
364 | 0 | if (fde != NULL) { |
365 | 0 | i++; |
366 | 0 | continue; |
367 | 0 | } |
368 | | |
369 | | /* |
370 | | * This fde was talloc_free()'ed. Delete it |
371 | | * from the arrays |
372 | | */ |
373 | 0 | poll_ev->num_fds -= 1; |
374 | 0 | ci = poll_ev->num_fds; |
375 | 0 | if (ci > i) { |
376 | 0 | poll_ev->fds[i] = poll_ev->fds[ci]; |
377 | 0 | poll_ev->fdes[i] = poll_ev->fdes[ci]; |
378 | 0 | if (poll_ev->fdes[i] != NULL) { |
379 | 0 | poll_ev->fdes[i]->additional_flags = i; |
380 | 0 | } |
381 | 0 | } |
382 | 0 | poll_ev->fds[ci] = (struct pollfd) { .fd = -1 }; |
383 | 0 | poll_ev->fdes[ci] = NULL; |
384 | 0 | } |
385 | 0 | poll_ev->deleted = false; |
386 | 0 | } |
387 | |
|
388 | 0 | if (poll_ev->num_fds == poll_ev->num_fdes) { |
389 | 0 | return true; |
390 | 0 | } |
391 | | |
392 | | /* |
393 | | * Recheck the size of both arrays and make sure |
394 | | * poll_fd->fds array has at least the size of the |
395 | | * in use poll_ev->fdes array. |
396 | | */ |
397 | 0 | if (poll_ev->num_fdes > talloc_array_length(poll_ev->fds)) { |
398 | 0 | struct pollfd *tmp_fds = NULL; |
399 | | |
400 | | /* |
401 | | * Make sure both allocated the same length. |
402 | | */ |
403 | 0 | array_length = talloc_array_length(poll_ev->fdes); |
404 | |
|
405 | 0 | tmp_fds = talloc_realloc(poll_ev, |
406 | 0 | poll_ev->fds, |
407 | 0 | struct pollfd, |
408 | 0 | array_length); |
409 | 0 | if (tmp_fds == NULL) { |
410 | 0 | return false; |
411 | 0 | } |
412 | 0 | poll_ev->fds = tmp_fds; |
413 | 0 | } |
414 | | |
415 | | /* |
416 | | * Now setup the new elements. |
417 | | */ |
418 | 0 | for (i = poll_ev->num_fds; i < poll_ev->num_fdes; i++) { |
419 | 0 | struct tevent_fd *fde = poll_ev->fdes[i]; |
420 | 0 | struct pollfd *pfd = &poll_ev->fds[poll_ev->num_fds]; |
421 | |
|
422 | 0 | if (fde == NULL) { |
423 | 0 | continue; |
424 | 0 | } |
425 | | |
426 | 0 | if (i > poll_ev->num_fds) { |
427 | 0 | poll_ev->fdes[poll_ev->num_fds] = fde; |
428 | 0 | fde->additional_flags = poll_ev->num_fds; |
429 | 0 | poll_ev->fdes[i] = NULL; |
430 | 0 | } |
431 | |
|
432 | 0 | pfd->fd = fde->fd; |
433 | 0 | pfd->events = poll_map_flags(fde->flags); |
434 | 0 | pfd->revents = 0; |
435 | |
|
436 | 0 | poll_ev->num_fds += 1; |
437 | 0 | } |
438 | | /* Both are in sync again */ |
439 | 0 | poll_ev->num_fdes = poll_ev->num_fds; |
440 | | |
441 | | /* |
442 | | * Check if we should shrink the arrays |
443 | | * But keep at least 16 elements. |
444 | | */ |
445 | |
|
446 | 0 | array_length = (poll_ev->num_fds + 15) & ~15; /* round up to 16 */ |
447 | 0 | array_length = MAX(array_length, 16); |
448 | 0 | if (array_length < talloc_array_length(poll_ev->fdes)) { |
449 | 0 | struct tevent_fd **tmp_fdes = NULL; |
450 | 0 | struct pollfd *tmp_fds = NULL; |
451 | |
|
452 | 0 | tmp_fdes = talloc_realloc(poll_ev, |
453 | 0 | poll_ev->fdes, |
454 | 0 | struct tevent_fd *, |
455 | 0 | array_length); |
456 | 0 | if (tmp_fdes == NULL) { |
457 | 0 | return false; |
458 | 0 | } |
459 | 0 | poll_ev->fdes = tmp_fdes; |
460 | |
|
461 | 0 | tmp_fds = talloc_realloc(poll_ev, |
462 | 0 | poll_ev->fds, |
463 | 0 | struct pollfd, |
464 | 0 | array_length); |
465 | 0 | if (tmp_fds == NULL) { |
466 | 0 | return false; |
467 | 0 | } |
468 | 0 | poll_ev->fds = tmp_fds; |
469 | 0 | } |
470 | | |
471 | 0 | return true; |
472 | 0 | } |
473 | | |
474 | | /* |
475 | | event loop handling using poll() |
476 | | */ |
477 | | static int poll_event_loop_poll(struct tevent_context *ev, |
478 | | struct timeval *tvalp) |
479 | 0 | { |
480 | 0 | struct poll_event_context *poll_ev = talloc_get_type_abort( |
481 | 0 | ev->additional_data, struct poll_event_context); |
482 | 0 | int pollrtn; |
483 | 0 | int timeout = tevent_common_timeout_msec(tvalp); |
484 | 0 | int poll_errno; |
485 | 0 | struct tevent_fd *fde = NULL; |
486 | 0 | struct tevent_fd *next = NULL; |
487 | 0 | unsigned i; |
488 | 0 | bool ok; |
489 | |
|
490 | 0 | if (ev->signal_events && tevent_common_check_signal(ev)) { |
491 | 0 | return 0; |
492 | 0 | } |
493 | | |
494 | 0 | ok = poll_event_sync_arrays(ev, poll_ev); |
495 | 0 | if (!ok) { |
496 | 0 | return -1; |
497 | 0 | } |
498 | | |
499 | 0 | tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT); |
500 | 0 | pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout); |
501 | 0 | poll_errno = errno; |
502 | 0 | tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT); |
503 | |
|
504 | 0 | if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) { |
505 | 0 | tevent_common_check_signal(ev); |
506 | 0 | return 0; |
507 | 0 | } |
508 | | |
509 | 0 | if (pollrtn == 0) { |
510 | | /* |
511 | | * tevent_context_set_wait_timeout(0) was used. |
512 | | */ |
513 | 0 | if (tevent_common_no_timeout(tvalp)) { |
514 | 0 | errno = EAGAIN; |
515 | 0 | return -1; |
516 | 0 | } |
517 | | |
518 | | /* we don't care about a possible delay here */ |
519 | 0 | tevent_common_loop_timer_delay(ev); |
520 | 0 | return 0; |
521 | 0 | } |
522 | | |
523 | 0 | if (pollrtn <= 0) { |
524 | | /* |
525 | | * No fd's ready |
526 | | */ |
527 | 0 | return 0; |
528 | 0 | } |
529 | | |
530 | | /* at least one file descriptor is ready - check |
531 | | which ones and call the handler, being careful to allow |
532 | | the handler to remove itself when called */ |
533 | | |
534 | 0 | for (fde = ev->fd_events; fde; fde = next) { |
535 | 0 | uint64_t idx = fde->additional_flags; |
536 | 0 | struct pollfd *pfd; |
537 | 0 | uint16_t flags = 0; |
538 | |
|
539 | 0 | next = fde->next; |
540 | |
|
541 | 0 | if (idx == UINT64_MAX) { |
542 | 0 | continue; |
543 | 0 | } |
544 | | |
545 | 0 | pfd = &poll_ev->fds[idx]; |
546 | |
|
547 | 0 | if (pfd->revents & POLLNVAL) { |
548 | | /* |
549 | | * the socket is dead! this should never |
550 | | * happen as the socket should have first been |
551 | | * made readable and that should have removed |
552 | | * the event, so this must be a bug. |
553 | | * |
554 | | * We ignore it here to match the epoll |
555 | | * behavior. |
556 | | */ |
557 | 0 | tevent_debug(ev, TEVENT_DEBUG_ERROR, |
558 | 0 | "POLLNVAL on fde[%p] fd[%d] - disabling\n", |
559 | 0 | fde, pfd->fd); |
560 | 0 | poll_ev->fdes[idx] = NULL; |
561 | 0 | poll_ev->deleted = true; |
562 | 0 | tevent_common_fd_disarm(fde); |
563 | 0 | continue; |
564 | 0 | } |
565 | | |
566 | 0 | #ifdef POLLRDHUP |
567 | 0 | #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR|POLLRDHUP) |
568 | | #else |
569 | | #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR) |
570 | | #endif |
571 | | |
572 | 0 | if (pfd->revents & __POLL_RETURN_ERROR_FLAGS) { |
573 | | /* |
574 | | * If we only wait for TEVENT_FD_WRITE, we |
575 | | * should not tell the event handler about it, |
576 | | * and remove the writable flag, as we only |
577 | | * report errors when waiting for read events |
578 | | * or explicit for errors. |
579 | | */ |
580 | 0 | if (!(fde->flags & (TEVENT_FD_READ|TEVENT_FD_ERROR))) |
581 | 0 | { |
582 | 0 | TEVENT_FD_NOT_WRITEABLE(fde); |
583 | 0 | continue; |
584 | 0 | } |
585 | 0 | if (fde->flags & TEVENT_FD_ERROR) { |
586 | 0 | flags |= TEVENT_FD_ERROR; |
587 | 0 | } |
588 | 0 | if (fde->flags & TEVENT_FD_READ) { |
589 | 0 | flags |= TEVENT_FD_READ; |
590 | 0 | } |
591 | 0 | } |
592 | 0 | if (pfd->revents & POLLIN) { |
593 | 0 | flags |= TEVENT_FD_READ; |
594 | 0 | } |
595 | 0 | if (pfd->revents & POLLOUT) { |
596 | 0 | flags |= TEVENT_FD_WRITE; |
597 | 0 | } |
598 | | /* |
599 | | * Note that fde->flags could be changed when using |
600 | | * the poll_mt backend together with threads, |
601 | | * that why we need to check pfd->revents and fde->flags |
602 | | */ |
603 | 0 | flags &= fde->flags; |
604 | 0 | if (flags != 0) { |
605 | 0 | DLIST_DEMOTE(ev->fd_events, fde); |
606 | 0 | return tevent_common_invoke_fd_handler(fde, flags, NULL); |
607 | 0 | } |
608 | 0 | } |
609 | | |
610 | 0 | for (i = 0; i < poll_ev->num_fds; i++) { |
611 | 0 | if (poll_ev->fds[i].revents & POLLNVAL) { |
612 | | /* |
613 | | * the socket is dead! this should never |
614 | | * happen as the socket should have first been |
615 | | * made readable and that should have removed |
616 | | * the event, so this must be a bug or |
617 | | * a race in the poll_mt usage. |
618 | | */ |
619 | 0 | fde = poll_ev->fdes[i]; |
620 | 0 | tevent_debug(ev, TEVENT_DEBUG_WARNING, |
621 | 0 | "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n", |
622 | 0 | poll_ev->fds[i].fd, fde); |
623 | 0 | poll_ev->fdes[i] = NULL; |
624 | 0 | poll_ev->deleted = true; |
625 | 0 | if (fde != NULL) { |
626 | 0 | tevent_common_fd_disarm(fde); |
627 | 0 | } |
628 | 0 | } |
629 | 0 | } |
630 | |
|
631 | 0 | return 0; |
632 | 0 | } |
633 | | |
634 | | /* |
635 | | do a single event loop using the events defined in ev |
636 | | */ |
637 | | static int poll_event_loop_once(struct tevent_context *ev, |
638 | | const char *location) |
639 | 0 | { |
640 | 0 | struct timeval tval; |
641 | |
|
642 | 0 | if (ev->signal_events && |
643 | 0 | tevent_common_check_signal(ev)) { |
644 | 0 | return 0; |
645 | 0 | } |
646 | | |
647 | 0 | if (ev->threaded_contexts != NULL) { |
648 | 0 | tevent_common_threaded_activate_immediate(ev); |
649 | 0 | } |
650 | |
|
651 | 0 | if (ev->immediate_events && |
652 | 0 | tevent_common_loop_immediate(ev)) { |
653 | 0 | return 0; |
654 | 0 | } |
655 | | |
656 | 0 | tval = tevent_common_loop_timer_delay(ev); |
657 | 0 | if (tevent_timeval_is_zero(&tval)) { |
658 | 0 | return 0; |
659 | 0 | } |
660 | | |
661 | 0 | return poll_event_loop_poll(ev, &tval); |
662 | 0 | } |
663 | | |
664 | | static const struct tevent_ops poll_event_ops = { |
665 | | .context_init = poll_event_context_init, |
666 | | .add_fd = poll_event_add_fd, |
667 | | .set_fd_close_fn = tevent_common_fd_set_close_fn, |
668 | | .get_fd_flags = tevent_common_fd_get_flags, |
669 | | .set_fd_flags = poll_event_set_fd_flags, |
670 | | .add_timer = tevent_common_add_timer_v2, |
671 | | .schedule_immediate = tevent_common_schedule_immediate, |
672 | | .add_signal = tevent_common_add_signal, |
673 | | .loop_once = poll_event_loop_once, |
674 | | .loop_wait = tevent_common_loop_wait, |
675 | | }; |
676 | | |
677 | | _PRIVATE_ bool tevent_poll_init(void) |
678 | 0 | { |
679 | 0 | return tevent_register_backend("poll", &poll_event_ops); |
680 | 0 | } |
681 | | |
682 | | static const struct tevent_ops poll_event_mt_ops = { |
683 | | .context_init = poll_event_context_init_mt, |
684 | | .add_fd = poll_event_add_fd, |
685 | | .set_fd_close_fn = tevent_common_fd_set_close_fn, |
686 | | .get_fd_flags = tevent_common_fd_get_flags, |
687 | | .set_fd_flags = poll_event_set_fd_flags, |
688 | | .add_timer = tevent_common_add_timer_v2, |
689 | | .schedule_immediate = poll_event_schedule_immediate, |
690 | | .add_signal = tevent_common_add_signal, |
691 | | .loop_once = poll_event_loop_once, |
692 | | .loop_wait = tevent_common_loop_wait, |
693 | | }; |
694 | | |
695 | | _PRIVATE_ bool tevent_poll_mt_init(void) |
696 | 0 | { |
697 | 0 | return tevent_register_backend("poll_mt", &poll_event_mt_ops); |
698 | 0 | } |