/src/openssl32/ssl/quic/quic_reactor.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | #include "internal/quic_reactor.h" |
10 | | #include "internal/common.h" |
11 | | #include "internal/thread_arch.h" |
12 | | |
13 | | /* |
14 | | * Core I/O Reactor Framework |
15 | | * ========================== |
16 | | */ |
17 | | void ossl_quic_reactor_init(QUIC_REACTOR *rtor, |
18 | | void (*tick_cb)(QUIC_TICK_RESULT *res, void *arg, |
19 | | uint32_t flags), |
20 | | void *tick_cb_arg, |
21 | | OSSL_TIME initial_tick_deadline) |
22 | 11.1k | { |
23 | 11.1k | rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE; |
24 | 11.1k | rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE; |
25 | 11.1k | rtor->net_read_desired = 0; |
26 | 11.1k | rtor->net_write_desired = 0; |
27 | 11.1k | rtor->can_poll_r = 0; |
28 | 11.1k | rtor->can_poll_w = 0; |
29 | 11.1k | rtor->tick_deadline = initial_tick_deadline; |
30 | | |
31 | 11.1k | rtor->tick_cb = tick_cb; |
32 | 11.1k | rtor->tick_cb_arg = tick_cb_arg; |
33 | 11.1k | } |
34 | | |
35 | | void ossl_quic_reactor_set_poll_r(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *r) |
36 | 22.4M | { |
37 | 22.4M | if (r == NULL) |
38 | 0 | rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE; |
39 | 22.4M | else |
40 | 22.4M | rtor->poll_r = *r; |
41 | | |
42 | 22.4M | rtor->can_poll_r |
43 | 22.4M | = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_r); |
44 | 22.4M | } |
45 | | |
46 | | void ossl_quic_reactor_set_poll_w(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *w) |
47 | 22.4M | { |
48 | 22.4M | if (w == NULL) |
49 | 0 | rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE; |
50 | 22.4M | else |
51 | 22.4M | rtor->poll_w = *w; |
52 | | |
53 | 22.4M | rtor->can_poll_w |
54 | 22.4M | = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_w); |
55 | 22.4M | } |
56 | | |
57 | | const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_r(const QUIC_REACTOR *rtor) |
58 | 0 | { |
59 | 0 | return &rtor->poll_r; |
60 | 0 | } |
61 | | |
62 | | const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_w(const QUIC_REACTOR *rtor) |
63 | 0 | { |
64 | 0 | return &rtor->poll_w; |
65 | 0 | } |
66 | | |
67 | | int ossl_quic_reactor_can_support_poll_descriptor(const QUIC_REACTOR *rtor, |
68 | | const BIO_POLL_DESCRIPTOR *d) |
69 | 44.9M | { |
70 | 44.9M | return d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD; |
71 | 44.9M | } |
72 | | |
73 | | int ossl_quic_reactor_can_poll_r(const QUIC_REACTOR *rtor) |
74 | 33.7M | { |
75 | 33.7M | return rtor->can_poll_r; |
76 | 33.7M | } |
77 | | |
78 | | int ossl_quic_reactor_can_poll_w(const QUIC_REACTOR *rtor) |
79 | 19.8M | { |
80 | 19.8M | return rtor->can_poll_w; |
81 | 19.8M | } |
82 | | |
83 | | int ossl_quic_reactor_net_read_desired(QUIC_REACTOR *rtor) |
84 | 0 | { |
85 | 0 | return rtor->net_read_desired; |
86 | 0 | } |
87 | | |
88 | | int ossl_quic_reactor_net_write_desired(QUIC_REACTOR *rtor) |
89 | 0 | { |
90 | 0 | return rtor->net_write_desired; |
91 | 0 | } |
92 | | |
93 | | OSSL_TIME ossl_quic_reactor_get_tick_deadline(QUIC_REACTOR *rtor) |
94 | 33.5M | { |
95 | 33.5M | return rtor->tick_deadline; |
96 | 33.5M | } |
97 | | |
98 | | int ossl_quic_reactor_tick(QUIC_REACTOR *rtor, uint32_t flags) |
99 | 17.7M | { |
100 | 17.7M | QUIC_TICK_RESULT res = {0}; |
101 | | |
102 | | /* |
103 | | * Note that the tick callback cannot fail; this is intentional. Arguably it |
104 | | * does not make that much sense for ticking to 'fail' (in the sense of an |
105 | | * explicit error indicated to the user) because ticking is by its nature |
106 | | * best effort. If something fatal happens with a connection we can report |
107 | | * it on the next actual application I/O call. |
108 | | */ |
109 | 17.7M | rtor->tick_cb(&res, rtor->tick_cb_arg, flags); |
110 | | |
111 | 17.7M | rtor->net_read_desired = res.net_read_desired; |
112 | 17.7M | rtor->net_write_desired = res.net_write_desired; |
113 | 17.7M | rtor->tick_deadline = res.tick_deadline; |
114 | 17.7M | return 1; |
115 | 17.7M | } |
116 | | |
117 | | /* |
118 | | * Blocking I/O Adaptation Layer |
119 | | * ============================= |
120 | | */ |
121 | | |
122 | | /* |
123 | | * Utility which can be used to poll on up to two FDs. This is designed to |
124 | | * support use of split FDs (e.g. with SSL_set_rfd and SSL_set_wfd where |
125 | | * different FDs are used for read and write). |
126 | | * |
127 | | * Generally use of poll(2) is preferred where available. Windows, however, |
128 | | * hasn't traditionally offered poll(2), only select(2). WSAPoll() was |
129 | | * introduced in Vista but has seemingly been buggy until relatively recent |
130 | | * versions of Windows 10. Moreover we support XP so this is not a suitable |
131 | | * target anyway. However, the traditional issues with select(2) turn out not to |
132 | | * be an issue on Windows; whereas traditional *NIX select(2) uses a bitmap of |
133 | | * FDs (and thus is limited in the magnitude of the FDs expressible), Windows |
134 | | * select(2) is very different. In Windows, socket handles are not allocated |
135 | | * contiguously from zero and thus this bitmap approach was infeasible. Thus in |
136 | | * adapting the Berkeley sockets API to Windows a different approach was taken |
137 | | * whereby the fd_set contains a fixed length array of socket handles and an |
138 | | * integer indicating how many entries are valid; thus Windows select() |
139 | | * ironically is actually much more like *NIX poll(2) than *NIX select(2). In |
140 | | * any case, this means that the relevant limit for Windows select() is the |
141 | | * number of FDs being polled, not the magnitude of those FDs. Since we only |
142 | | * poll for two FDs here, this limit does not concern us. |
143 | | * |
144 | | * Usage: rfd and wfd may be the same or different. Either or both may also be |
145 | | * -1. If rfd_want_read is 1, rfd is polled for readability, and if |
146 | | * wfd_want_write is 1, wfd is polled for writability. Note that since any |
147 | | * passed FD is always polled for error conditions, setting rfd_want_read=0 and |
148 | | * wfd_want_write=0 is not the same as passing -1 for both FDs. |
149 | | * |
150 | | * deadline is a timestamp to return at. If it is ossl_time_infinite(), the call |
151 | | * never times out. |
152 | | * |
153 | | * Returns 0 on error and 1 on success. Timeout expiry is considered a success |
154 | | * condition. We don't elaborate our return values here because the way we are |
155 | | * actually using this doesn't currently care. |
156 | | * |
157 | | * If mutex is non-NULL, it is assumed to be held for write and is unlocked for |
158 | | * the duration of the call. |
159 | | * |
160 | | * Precondition: mutex is NULL or is held for write (unchecked) |
161 | | * Postcondition: mutex is NULL or is held for write (unless |
162 | | * CRYPTO_THREAD_write_lock fails) |
163 | | */ |
164 | | static int poll_two_fds(int rfd, int rfd_want_read, |
165 | | int wfd, int wfd_want_write, |
166 | | OSSL_TIME deadline, |
167 | | CRYPTO_MUTEX *mutex) |
168 | 0 | { |
169 | | #if defined(OPENSSL_SYS_WINDOWS) || !defined(POLLIN) |
170 | | fd_set rfd_set, wfd_set, efd_set; |
171 | | OSSL_TIME now, timeout; |
172 | | struct timeval tv, *ptv; |
173 | | int maxfd, pres; |
174 | | |
175 | | # ifndef OPENSSL_SYS_WINDOWS |
176 | | /* |
177 | | * On Windows there is no relevant limit to the magnitude of a fd value (see |
178 | | * above). On *NIX the fd_set uses a bitmap and we must check the limit. |
179 | | */ |
180 | | if (rfd >= FD_SETSIZE || wfd >= FD_SETSIZE) |
181 | | return 0; |
182 | | # endif |
183 | | |
184 | | FD_ZERO(&rfd_set); |
185 | | FD_ZERO(&wfd_set); |
186 | | FD_ZERO(&efd_set); |
187 | | |
188 | | if (rfd != -1 && rfd_want_read) |
189 | | openssl_fdset(rfd, &rfd_set); |
190 | | if (wfd != -1 && wfd_want_write) |
191 | | openssl_fdset(wfd, &wfd_set); |
192 | | |
193 | | /* Always check for error conditions. */ |
194 | | if (rfd != -1) |
195 | | openssl_fdset(rfd, &efd_set); |
196 | | if (wfd != -1) |
197 | | openssl_fdset(wfd, &efd_set); |
198 | | |
199 | | maxfd = rfd; |
200 | | if (wfd > maxfd) |
201 | | maxfd = wfd; |
202 | | |
203 | | if (!ossl_assert(rfd != -1 || wfd != -1 |
204 | | || !ossl_time_is_infinite(deadline))) |
205 | | /* Do not block forever; should not happen. */ |
206 | | return 0; |
207 | | |
208 | | # if defined(OPENSSL_THREADS) |
209 | | if (mutex != NULL) |
210 | | ossl_crypto_mutex_unlock(mutex); |
211 | | # endif |
212 | | |
213 | | do { |
214 | | /* |
215 | | * select expects a timeout, not a deadline, so do the conversion. |
216 | | * Update for each call to ensure the correct value is used if we repeat |
217 | | * due to EINTR. |
218 | | */ |
219 | | if (ossl_time_is_infinite(deadline)) { |
220 | | ptv = NULL; |
221 | | } else { |
222 | | now = ossl_time_now(); |
223 | | /* |
224 | | * ossl_time_subtract saturates to zero so we don't need to check if |
225 | | * now > deadline. |
226 | | */ |
227 | | timeout = ossl_time_subtract(deadline, now); |
228 | | tv = ossl_time_to_timeval(timeout); |
229 | | ptv = &tv; |
230 | | } |
231 | | |
232 | | pres = select(maxfd + 1, &rfd_set, &wfd_set, &efd_set, ptv); |
233 | | } while (pres == -1 && get_last_socket_error_is_eintr()); |
234 | | |
235 | | # if defined(OPENSSL_THREADS) |
236 | | if (mutex != NULL) |
237 | | ossl_crypto_mutex_lock(mutex); |
238 | | # endif |
239 | | |
240 | | return pres < 0 ? 0 : 1; |
241 | | #else |
242 | 0 | int pres, timeout_ms; |
243 | 0 | OSSL_TIME now, timeout; |
244 | 0 | struct pollfd pfds[2] = {0}; |
245 | 0 | size_t npfd = 0; |
246 | |
|
247 | 0 | if (rfd == wfd) { |
248 | 0 | pfds[npfd].fd = rfd; |
249 | 0 | pfds[npfd].events = (rfd_want_read ? POLLIN : 0) |
250 | 0 | | (wfd_want_write ? POLLOUT : 0); |
251 | 0 | if (rfd >= 0 && pfds[npfd].events != 0) |
252 | 0 | ++npfd; |
253 | 0 | } else { |
254 | 0 | pfds[npfd].fd = rfd; |
255 | 0 | pfds[npfd].events = (rfd_want_read ? POLLIN : 0); |
256 | 0 | if (rfd >= 0 && pfds[npfd].events != 0) |
257 | 0 | ++npfd; |
258 | |
|
259 | 0 | pfds[npfd].fd = wfd; |
260 | 0 | pfds[npfd].events = (wfd_want_write ? POLLOUT : 0); |
261 | 0 | if (wfd >= 0 && pfds[npfd].events != 0) |
262 | 0 | ++npfd; |
263 | 0 | } |
264 | |
|
265 | 0 | if (!ossl_assert(npfd != 0 || !ossl_time_is_infinite(deadline))) |
266 | | /* Do not block forever; should not happen. */ |
267 | 0 | return 0; |
268 | | |
269 | 0 | # if defined(OPENSSL_THREADS) |
270 | 0 | if (mutex != NULL) |
271 | 0 | ossl_crypto_mutex_unlock(mutex); |
272 | 0 | # endif |
273 | |
|
274 | 0 | do { |
275 | 0 | if (ossl_time_is_infinite(deadline)) { |
276 | 0 | timeout_ms = -1; |
277 | 0 | } else { |
278 | 0 | now = ossl_time_now(); |
279 | 0 | timeout = ossl_time_subtract(deadline, now); |
280 | 0 | timeout_ms = ossl_time2ms(timeout); |
281 | 0 | } |
282 | |
|
283 | 0 | pres = poll(pfds, npfd, timeout_ms); |
284 | 0 | } while (pres == -1 && get_last_socket_error_is_eintr()); |
285 | |
|
286 | 0 | # if defined(OPENSSL_THREADS) |
287 | 0 | if (mutex != NULL) |
288 | 0 | ossl_crypto_mutex_lock(mutex); |
289 | 0 | # endif |
290 | |
|
291 | 0 | return pres < 0 ? 0 : 1; |
292 | 0 | #endif |
293 | 0 | } |
294 | | |
295 | | static int poll_descriptor_to_fd(const BIO_POLL_DESCRIPTOR *d, int *fd) |
296 | 0 | { |
297 | 0 | if (d == NULL || d->type == BIO_POLL_DESCRIPTOR_TYPE_NONE) { |
298 | 0 | *fd = INVALID_SOCKET; |
299 | 0 | return 1; |
300 | 0 | } |
301 | | |
302 | 0 | if (d->type != BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD |
303 | 0 | || d->value.fd == INVALID_SOCKET) |
304 | 0 | return 0; |
305 | | |
306 | 0 | *fd = d->value.fd; |
307 | 0 | return 1; |
308 | 0 | } |
309 | | |
310 | | /* |
311 | | * Poll up to two abstract poll descriptors. Currently we only support |
312 | | * poll descriptors which represent FDs. |
313 | | * |
314 | | * If mutex is non-NULL, it is assumed be a lock currently held for write and is |
315 | | * unlocked for the duration of any wait. |
316 | | * |
317 | | * Precondition: mutex is NULL or is held for write (unchecked) |
318 | | * Postcondition: mutex is NULL or is held for write (unless |
319 | | * CRYPTO_THREAD_write_lock fails) |
320 | | */ |
321 | | static int poll_two_descriptors(const BIO_POLL_DESCRIPTOR *r, int r_want_read, |
322 | | const BIO_POLL_DESCRIPTOR *w, int w_want_write, |
323 | | OSSL_TIME deadline, |
324 | | CRYPTO_MUTEX *mutex) |
325 | 0 | { |
326 | 0 | int rfd, wfd; |
327 | |
|
328 | 0 | if (!poll_descriptor_to_fd(r, &rfd) |
329 | 0 | || !poll_descriptor_to_fd(w, &wfd)) |
330 | 0 | return 0; |
331 | | |
332 | 0 | return poll_two_fds(rfd, r_want_read, wfd, w_want_write, deadline, mutex); |
333 | 0 | } |
334 | | |
335 | | /* |
336 | | * Block until a predicate function evaluates to true. |
337 | | * |
338 | | * If mutex is non-NULL, it is assumed be a lock currently held for write and is |
339 | | * unlocked for the duration of any wait. |
340 | | * |
341 | | * Precondition: Must hold channel write lock (unchecked) |
342 | | * Precondition: mutex is NULL or is held for write (unchecked) |
343 | | * Postcondition: mutex is NULL or is held for write (unless |
344 | | * CRYPTO_THREAD_write_lock fails) |
345 | | */ |
346 | | int ossl_quic_reactor_block_until_pred(QUIC_REACTOR *rtor, |
347 | | int (*pred)(void *arg), void *pred_arg, |
348 | | uint32_t flags, |
349 | | CRYPTO_MUTEX *mutex) |
350 | 0 | { |
351 | 0 | int res; |
352 | |
|
353 | 0 | for (;;) { |
354 | 0 | if ((flags & SKIP_FIRST_TICK) != 0) |
355 | 0 | flags &= ~SKIP_FIRST_TICK; |
356 | 0 | else |
357 | | /* best effort */ |
358 | 0 | ossl_quic_reactor_tick(rtor, 0); |
359 | |
|
360 | 0 | if ((res = pred(pred_arg)) != 0) |
361 | 0 | return res; |
362 | | |
363 | 0 | if (!poll_two_descriptors(ossl_quic_reactor_get_poll_r(rtor), |
364 | 0 | ossl_quic_reactor_net_read_desired(rtor), |
365 | 0 | ossl_quic_reactor_get_poll_w(rtor), |
366 | 0 | ossl_quic_reactor_net_write_desired(rtor), |
367 | 0 | ossl_quic_reactor_get_tick_deadline(rtor), |
368 | 0 | mutex)) |
369 | | /* |
370 | | * We don't actually care why the call succeeded (timeout, FD |
371 | | * readiness), we just call reactor_tick and start trying to do I/O |
372 | | * things again. If poll_two_fds returns 0, this is some other |
373 | | * non-timeout failure and we should stop here. |
374 | | * |
375 | | * TODO(QUIC FUTURE): In the future we could avoid unnecessary |
376 | | * syscalls by not retrying network I/O that isn't ready based |
377 | | * on the result of the poll call. However this might be difficult |
378 | | * because it requires we do the call to poll(2) or equivalent |
379 | | * syscall ourselves, whereas in the general case the application |
380 | | * does the polling and just calls SSL_handle_events(). |
381 | | * Implementing this optimisation in the future will probably |
382 | | * therefore require API changes. |
383 | | */ |
384 | 0 | return 0; |
385 | 0 | } |
386 | 0 | } |