/src/h2o/deps/quicly/lib/quicly.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017 Fastly, Kazuho Oku |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <assert.h> |
23 | | #include <inttypes.h> |
24 | | #include <arpa/inet.h> |
25 | | #include <sys/types.h> |
26 | | #include <netinet/in.h> |
27 | | #include <netinet/ip.h> |
28 | | #include <pthread.h> |
29 | | #include <stdarg.h> |
30 | | #include <stdio.h> |
31 | | #include <stdlib.h> |
32 | | #include <sys/socket.h> |
33 | | #include <sys/time.h> |
34 | | #include "khash.h" |
35 | | #include "quicly.h" |
36 | | #include "quicly/defaults.h" |
37 | | #include "quicly/sentmap.h" |
38 | | #include "quicly/pacer.h" |
39 | | #include "quicly/frame.h" |
40 | | #include "quicly/streambuf.h" |
41 | | #include "quicly/cc.h" |
42 | | #if QUICLY_USE_DTRACE |
43 | | #include "quicly-probes.h" |
44 | | #endif |
45 | | #include "quicly/retire_cid.h" |
46 | | |
47 | 0 | #define QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_FINAL 0x39 |
48 | 0 | #define QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_DRAFT 0xffa5 |
49 | | #define QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID 0 |
50 | | #define QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT 1 |
51 | | #define QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN 2 |
52 | | #define QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE 3 |
53 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA 4 |
54 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 5 |
55 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 6 |
56 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI 7 |
57 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI 8 |
58 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI 9 |
59 | | #define QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT 10 |
60 | | #define QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY 11 |
61 | | #define QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION 12 |
62 | | #define QUICLY_TRANSPORT_PARAMETER_ID_PREFERRED_ADDRESS 13 |
63 | | #define QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT 14 |
64 | | #define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID 15 |
65 | | #define QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID 16 |
66 | | #define QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE 0x20 |
67 | | #define QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY 0xff03de1a |
68 | | |
69 | | /** |
70 | | * maximum size of token that quicly accepts |
71 | | */ |
72 | 0 | #define QUICLY_MAX_TOKEN_LEN 512 |
73 | | /** |
74 | | * sends ACK bundled with PING, when number of gaps in the ack queue reaches or exceeds this threshold. This value should be much |
75 | | * smaller than QUICLY_MAX_RANGES. |
76 | | */ |
77 | 0 | #define QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK 8 |
78 | | |
79 | | KHASH_MAP_INIT_INT64(quicly_stream_t, quicly_stream_t *) |
80 | | |
81 | | #if QUICLY_USE_TRACER |
82 | 0 | #define QUICLY_TRACER(label, conn, ...) QUICLY_TRACER_##label(conn, __VA_ARGS__) |
83 | | #else |
84 | | #define QUICLY_TRACER(...) |
85 | | #endif |
86 | | |
87 | | #if QUICLY_USE_DTRACE |
88 | | #define QUICLY_PROBE(label, conn, ...) \ |
89 | | do { \ |
90 | | quicly_conn_t *_conn = (conn); \ |
91 | | if (PTLS_UNLIKELY(QUICLY_##label##_ENABLED())) \ |
92 | | QUICLY_##label(_conn, __VA_ARGS__); \ |
93 | | QUICLY_TRACER(label, _conn, __VA_ARGS__); \ |
94 | | } while (0) |
95 | | #define QUICLY_PROBE_ENABLED(label) QUICLY_##label##_ENABLED() |
96 | | #else |
97 | 0 | #define QUICLY_PROBE(label, conn, ...) QUICLY_TRACER(label, conn, __VA_ARGS__) |
98 | 0 | #define QUICLY_PROBE_ENABLED(label) 0 |
99 | | #endif |
100 | | #define QUICLY_PROBE_HEXDUMP(s, l) \ |
101 | | ({ \ |
102 | | size_t _l = (l); \ |
103 | | ptls_hexdump(alloca(_l * 2 + 1), (s), _l); \ |
104 | | }) |
105 | | #define QUICLY_PROBE_ESCAPE_UNSAFE_STRING(s, l) \ |
106 | | ({ \ |
107 | | size_t _l = (l); \ |
108 | | quicly_escape_unsafe_string(alloca(_l * 4 + 1), (s), _l); \ |
109 | | }) |
110 | | |
111 | | struct st_quicly_cipher_context_t { |
112 | | ptls_aead_context_t *aead; |
113 | | ptls_cipher_context_t *header_protection; |
114 | | }; |
115 | | |
116 | | struct st_quicly_pn_space_t { |
117 | | /** |
118 | | * acks to be sent to remote peer |
119 | | */ |
120 | | quicly_ranges_t ack_queue; |
121 | | /** |
122 | | * time at when the largest pn in the ack_queue has been received (or INT64_MAX if none) |
123 | | */ |
124 | | int64_t largest_pn_received_at; |
125 | | /** |
126 | | * |
127 | | */ |
128 | | uint64_t next_expected_packet_number; |
129 | | /** |
130 | | * number of ACK-eliciting packets that have not been ACKed yet |
131 | | */ |
132 | | uint32_t unacked_count; |
133 | | /** |
134 | | * ECN in the order of ECT(0), ECT(1), CE |
135 | | */ |
136 | | uint64_t ecn_counts[3]; |
137 | | /** |
138 | | * maximum number of ACK-eliciting packets to be queued before sending an ACK |
139 | | */ |
140 | | uint32_t packet_tolerance; |
141 | | /** |
142 | | * boolean indicating if reorder should NOT trigger an immediate ack |
143 | | */ |
144 | | uint8_t ignore_order; |
145 | | }; |
146 | | |
147 | | struct st_quicly_handshake_space_t { |
148 | | struct st_quicly_pn_space_t super; |
149 | | struct { |
150 | | struct st_quicly_cipher_context_t ingress; |
151 | | struct st_quicly_cipher_context_t egress; |
152 | | } cipher; |
153 | | uint16_t largest_ingress_udp_payload_size; |
154 | | }; |
155 | | |
156 | | struct st_quicly_application_space_t { |
157 | | struct st_quicly_pn_space_t super; |
158 | | struct { |
159 | | struct { |
160 | | struct { |
161 | | ptls_cipher_context_t *zero_rtt, *one_rtt; |
162 | | } header_protection; |
163 | | ptls_aead_context_t *aead[2]; /* 0-RTT uses aead[1], 1-RTT uses aead[key_phase] */ |
164 | | uint8_t secret[PTLS_MAX_DIGEST_SIZE]; |
165 | | struct { |
166 | | uint64_t prepared; |
167 | | uint64_t decrypted; |
168 | | } key_phase; |
169 | | } ingress; |
170 | | struct { |
171 | | struct st_quicly_cipher_context_t key; |
172 | | uint8_t secret[PTLS_MAX_DIGEST_SIZE]; |
173 | | uint64_t key_phase; |
174 | | struct { |
175 | | /** |
176 | | * PN at which key update was initiated. Set to UINT64_MAX once key update is acked. |
177 | | */ |
178 | | uint64_t last; |
179 | | /** |
180 | | * PN at which key update should be initiated. Set to UINT64_MAX when key update cannot be initiated. |
181 | | */ |
182 | | uint64_t next; |
183 | | } key_update_pn; |
184 | | } egress; |
185 | | } cipher; |
186 | | int one_rtt_writable; |
187 | | }; |
188 | | |
189 | | struct st_quicly_conn_path_t { |
190 | | struct { |
191 | | /** |
192 | | * remote address (must not be AF_UNSPEC) |
193 | | */ |
194 | | quicly_address_t remote; |
195 | | /** |
196 | | * local address (may be AF_UNSPEC) |
197 | | */ |
198 | | quicly_address_t local; |
199 | | } address; |
200 | | /** |
201 | | * DCID being used for the path indicated by the sequence number; or UINT64_MAX if yet to be assigned. Multile paths will share |
202 | | * the same value of zero if peer CID is zero-length. |
203 | | */ |
204 | | uint64_t dcid; |
205 | | /** |
206 | | * Maximum number of packets being received by the connection when a packet was last received on this path. This value is used |
207 | | * to determine the least-recently-used path which will be recycled. |
208 | | */ |
209 | | uint64_t packet_last_received; |
210 | | /** |
211 | | * `send_at` indicates when a PATH_CHALLENGE frame carrying `data` should be sent, or if the value is INT64_MAX the path is |
212 | | * validated |
213 | | */ |
214 | | struct { |
215 | | int64_t send_at; |
216 | | uint64_t num_sent; |
217 | | uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN]; |
218 | | } path_challenge; |
219 | | /** |
220 | | * path response to be sent, if `send_` is set |
221 | | */ |
222 | | struct { |
223 | | uint8_t send_; |
224 | | uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN]; |
225 | | } path_response; |
226 | | /** |
227 | | * if this path is the initial path (i.e., the one on which handshake is done) |
228 | | */ |
229 | | uint8_t initial : 1; |
230 | | /** |
231 | | * if only probe packets have been received (and hence have been sent) on the path |
232 | | */ |
233 | | uint8_t probe_only : 1; |
234 | | /** |
235 | | * number of packets being sent / received on the path |
236 | | */ |
237 | | struct { |
238 | | uint64_t sent; |
239 | | uint64_t received; |
240 | | } num_packets; |
241 | | }; |
242 | | |
243 | | struct st_quicly_conn_t { |
244 | | struct _st_quicly_conn_public_t super; |
245 | | /** |
246 | | * `paths[0]` is the non-probing path that is guaranteed to exist, others are backups that may be NULL |
247 | | */ |
248 | | struct st_quicly_conn_path_t *paths[QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT]; |
249 | | /** |
250 | | * the initial context |
251 | | */ |
252 | | struct st_quicly_handshake_space_t *initial; |
253 | | /** |
254 | | * the handshake context |
255 | | */ |
256 | | struct st_quicly_handshake_space_t *handshake; |
257 | | /** |
258 | | * 0-RTT and 1-RTT context |
259 | | */ |
260 | | struct st_quicly_application_space_t *application; |
261 | | /** |
262 | | * hashtable of streams |
263 | | */ |
264 | | khash_t(quicly_stream_t) * streams; |
265 | | /** |
266 | | * |
267 | | */ |
268 | | struct { |
269 | | /** |
270 | | * |
271 | | */ |
272 | | struct { |
273 | | uint64_t bytes_consumed; |
274 | | quicly_maxsender_t sender; |
275 | | } max_data; |
276 | | /** |
277 | | * |
278 | | */ |
279 | | struct { |
280 | | quicly_maxsender_t uni, bidi; |
281 | | } max_streams; |
282 | | /** |
283 | | * |
284 | | */ |
285 | | struct { |
286 | | uint64_t next_sequence; |
287 | | } ack_frequency; |
288 | | } ingress; |
289 | | /** |
290 | | * |
291 | | */ |
292 | | struct { |
293 | | /** |
294 | | * loss recovery |
295 | | */ |
296 | | quicly_loss_t loss; |
297 | | /** |
298 | | * next or the currently encoding packet number |
299 | | */ |
300 | | uint64_t packet_number; |
301 | | /** |
302 | | * next PN to be skipped |
303 | | */ |
304 | | uint64_t next_pn_to_skip; |
305 | | /** |
306 | | * |
307 | | */ |
308 | | uint16_t max_udp_payload_size; |
309 | | /** |
310 | | * valid if state is CLOSING |
311 | | */ |
312 | | struct { |
313 | | uint64_t error_code; |
314 | | uint64_t frame_type; /* UINT64_MAX if application close */ |
315 | | const char *reason_phrase; |
316 | | unsigned long num_packets_received; |
317 | | } connection_close; |
318 | | /** |
319 | | * |
320 | | */ |
321 | | struct { |
322 | | uint64_t permitted; |
323 | | uint64_t sent; |
324 | | } max_data; |
325 | | /** |
326 | | * |
327 | | */ |
328 | | struct { |
329 | | struct st_quicly_max_streams_t { |
330 | | uint64_t count; |
331 | | quicly_maxsender_t blocked_sender; |
332 | | } uni, bidi; |
333 | | } max_streams; |
334 | | /** |
335 | | * |
336 | | */ |
337 | | struct { |
338 | | uint64_t generation; |
339 | | uint64_t max_acked; |
340 | | uint32_t num_inflight; |
341 | | } new_token; |
342 | | /** |
343 | | * |
344 | | */ |
345 | | struct { |
346 | | int64_t update_at; |
347 | | uint64_t sequence; |
348 | | } ack_frequency; |
349 | | /** |
350 | | * |
351 | | */ |
352 | | int64_t last_retransmittable_sent_at; |
353 | | /** |
354 | | * when to send an ACK, connection close frames or to destroy the connection |
355 | | */ |
356 | | int64_t send_ack_at; |
357 | | /** |
358 | | * when a PATH_CHALLENGE or PATH_RESPONSE frame is to be sent on any path |
359 | | */ |
360 | | int64_t send_probe_at; |
361 | | /** |
362 | | * congestion control |
363 | | */ |
364 | | quicly_cc_t cc; |
365 | | /** |
366 | | * Next PN to be used when the path is initialized or promoted. As loss recovery / CC is reset upon path promotion, ACKs for |
367 | | * for packets with PN below this property are ignored. |
368 | | */ |
369 | | uint64_t pn_path_start; |
370 | | /** |
371 | | * pacer |
372 | | */ |
373 | | quicly_pacer_t *pacer; |
374 | | /** |
375 | | * ECN |
376 | | */ |
377 | | struct { |
378 | | enum en_quicly_ecn_state { QUICLY_ECN_OFF, QUICLY_ECN_ON, QUICLY_ECN_PROBING } state; |
379 | | uint64_t counts[QUICLY_NUM_EPOCHS][3]; |
380 | | } ecn; |
381 | | /** |
382 | | * things to be sent at the stream-level, that are not governed by the stream scheduler |
383 | | */ |
384 | | struct { |
385 | | /** |
386 | | * list of blocked streams (sorted in ascending order of stream_ids) |
387 | | */ |
388 | | struct { |
389 | | quicly_linklist_t uni; |
390 | | quicly_linklist_t bidi; |
391 | | } blocked; |
392 | | /** |
393 | | * list of streams with pending control data (e.g., RESET_STREAM) |
394 | | */ |
395 | | quicly_linklist_t control; |
396 | | } pending_streams; |
397 | | /** |
398 | | * send state for DATA_BLOCKED frame that corresponds to the current value of `conn->egress.max_data.permitted` |
399 | | */ |
400 | | quicly_sender_state_t data_blocked; |
401 | | /** |
402 | | * bit vector indicating if there's any pending crypto data (the insignificant 4 bits), or other non-stream data |
403 | | */ |
404 | | uint8_t pending_flows; |
405 | | /* The flags below indicate if the respective frames have to be sent or not. There are no false positives. */ |
406 | 0 | #define QUICLY_PENDING_FLOW_NEW_TOKEN_BIT (1 << 4) |
407 | 0 | #define QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT (1 << 5) |
408 | | /* Indicates that MAX_STREAMS, MAX_DATA, DATA_BLOCKED, STREAMS_BLOCKED, NEW_CONNECTION_ID _might_ have to be sent. There could be |
409 | | * false positives; logic for sending each of these frames have the capability of detecting such false positives. The purpose of |
410 | | * this bit is to consolidate information as an optimization. */ |
411 | 0 | #define QUICLY_PENDING_FLOW_OTHERS_BIT (1 << 6) |
412 | | /** |
413 | | * |
414 | | */ |
415 | | uint8_t try_jumpstart : 1; |
416 | | /** |
417 | | * pending RETIRE_CONNECTION_ID frames to be sent |
418 | | */ |
419 | | quicly_retire_cid_set_t retire_cid; |
420 | | /** |
421 | | * payload of DATAGRAM frames to be sent |
422 | | */ |
423 | | struct { |
424 | | ptls_iovec_t payloads[10]; |
425 | | size_t count; |
426 | | } datagram_frame_payloads; |
427 | | /** |
428 | | * delivery rate estimator |
429 | | */ |
430 | | quicly_ratemeter_t ratemeter; |
431 | | } egress; |
432 | | /** |
433 | | * crypto data |
434 | | */ |
435 | | struct { |
436 | | ptls_t *tls; |
437 | | ptls_handshake_properties_t handshake_properties; |
438 | | struct { |
439 | | ptls_raw_extension_t ext[3]; |
440 | | ptls_buffer_t buf; |
441 | | } transport_params; |
442 | | unsigned async_in_progress : 1; |
443 | | } crypto; |
444 | | /** |
445 | | * token (if the token is a Retry token can be determined by consulting the length of retry_scid) |
446 | | */ |
447 | | ptls_iovec_t token; |
448 | | /** |
449 | | * len=UINT8_MAX if Retry was not used, use client_received_retry() to check |
450 | | */ |
451 | | quicly_cid_t retry_scid; |
452 | | /** |
453 | | * |
454 | | */ |
455 | | struct { |
456 | | /** |
457 | | * The moment when the idle timeout fires (including the additional 3 PTO). The value is set to INT64_MAX while the |
458 | | * handshake is in progress. |
459 | | */ |
460 | | int64_t at; |
461 | | /** |
462 | | * idle timeout |
463 | | */ |
464 | | uint8_t should_rearm_on_send : 1; |
465 | | } idle_timeout; |
466 | | /** |
467 | | * records the time when this connection was created |
468 | | */ |
469 | | int64_t created_at; |
470 | | /** |
471 | | * structure to hold various data used internally |
472 | | */ |
473 | | struct { |
474 | | /** |
475 | | * This value holds current time that remains constant while quicly functions that deal with time are running. Only |
476 | | * available when the lock is held using `lock_now`. |
477 | | */ |
478 | | int64_t now; |
479 | | /** |
480 | | * |
481 | | */ |
482 | | uint8_t lock_count; |
483 | | struct { |
484 | | /** |
485 | | * This cache is used to concatenate acked ranges of streams before processing them, reducing the frequency of function |
486 | | * calls to `quicly_sendstate_t` and to the application-level send window management callbacks. This approach works, |
487 | | * because in most cases acks will contain contiguous ranges of a single stream. |
488 | | */ |
489 | | struct { |
490 | | /** |
491 | | * set to INT64_MIN when the cache is invalid |
492 | | */ |
493 | | quicly_stream_id_t stream_id; |
494 | | quicly_sendstate_sent_t args; |
495 | | } active_acked_cache; |
496 | | } on_ack_stream; |
497 | | } stash; |
498 | | }; |
499 | | |
500 | | #if QUICLY_USE_TRACER |
501 | | #include "quicly-tracer.h" |
502 | | #endif |
503 | | |
504 | | struct st_quicly_handle_payload_state_t { |
505 | | const uint8_t *src, *const end; |
506 | | size_t epoch; |
507 | | size_t path_index; |
508 | | uint64_t frame_type; |
509 | | }; |
510 | | |
511 | | static void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len); |
512 | | |
513 | | static const quicly_stream_callbacks_t crypto_stream_callbacks = {quicly_streambuf_destroy, quicly_streambuf_egress_shift, |
514 | | quicly_streambuf_egress_emit, NULL, crypto_stream_receive}; |
515 | | |
516 | | static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret); |
517 | | static quicly_error_t initiate_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, const char *reason_phrase); |
518 | | static quicly_error_t handle_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, ptls_iovec_t reason_phrase); |
519 | | static quicly_error_t discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs); |
520 | | |
521 | | quicly_cid_plaintext_t quicly_cid_plaintext_invalid = {.node_id = UINT64_MAX, .thread_id = 0xffffff}; |
522 | | |
523 | | static const quicly_transport_parameters_t default_transport_params = {.max_udp_payload_size = QUICLY_DEFAULT_MAX_UDP_PAYLOAD_SIZE, |
524 | | .ack_delay_exponent = QUICLY_DEFAULT_ACK_DELAY_EXPONENT, |
525 | | .max_ack_delay = QUICLY_DEFAULT_MAX_ACK_DELAY, |
526 | | .min_ack_delay_usec = UINT64_MAX, |
527 | | .active_connection_id_limit = |
528 | | QUICLY_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT}; |
529 | | |
530 | | const quicly_salt_t *quicly_get_salt(uint32_t protocol_version) |
531 | 0 | { |
532 | 0 | static const quicly_salt_t |
533 | 0 | v1 = {.initial = {0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, |
534 | 0 | 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a}, |
535 | 0 | .retry = {.key = {0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}, |
536 | 0 | .iv = {0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}}}, |
537 | 0 | draft29 = {.initial = {0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, |
538 | 0 | 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99}, |
539 | 0 | .retry = {.key = {0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, |
540 | 0 | 0xe1}, |
541 | 0 | .iv = {0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}}}, |
542 | 0 | draft27 = { |
543 | 0 | .initial = {0xc3, 0xee, 0xf7, 0x12, 0xc7, 0x2e, 0xbb, 0x5a, 0x11, 0xa7, |
544 | 0 | 0xd2, 0x43, 0x2b, 0xb4, 0x63, 0x65, 0xbe, 0xf9, 0xf5, 0x02}, |
545 | 0 | .retry = {.key = {0x4d, 0x32, 0xec, 0xdb, 0x2a, 0x21, 0x33, 0xc8, 0x41, 0xe4, 0x04, 0x3d, 0xf2, 0x7d, 0x44, 0x30}, |
546 | 0 | .iv = {0x4d, 0x16, 0x11, 0xd0, 0x55, 0x13, 0xa5, 0x52, 0xc5, 0x87, 0xd5, 0x75}}}; |
547 | |
|
548 | 0 | switch (protocol_version) { |
549 | 0 | case QUICLY_PROTOCOL_VERSION_1: |
550 | 0 | return &v1; |
551 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT29: |
552 | 0 | return &draft29; |
553 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT27: |
554 | 0 | return &draft27; |
555 | 0 | break; |
556 | 0 | default: |
557 | 0 | return NULL; |
558 | 0 | } |
559 | 0 | } |
560 | | |
561 | | static void lock_now(quicly_conn_t *conn, int is_reentrant) |
562 | 0 | { |
563 | 0 | if (conn->stash.now == 0) { |
564 | 0 | assert(conn->stash.lock_count == 0); |
565 | 0 | conn->stash.now = conn->super.ctx->now->cb(conn->super.ctx->now); |
566 | 0 | } else { |
567 | 0 | assert(is_reentrant && "caller must be reentrant"); |
568 | 0 | assert(conn->stash.lock_count != 0); |
569 | 0 | } |
570 | | |
571 | 0 | ++conn->stash.lock_count; |
572 | 0 | } |
573 | | |
574 | | static void unlock_now(quicly_conn_t *conn) |
575 | 0 | { |
576 | 0 | assert(conn->stash.now != 0); |
577 | | |
578 | 0 | if (--conn->stash.lock_count == 0) |
579 | 0 | conn->stash.now = 0; |
580 | 0 | } |
581 | | |
582 | | static void set_address(quicly_address_t *addr, struct sockaddr *sa) |
583 | 0 | { |
584 | 0 | if (sa == NULL) { |
585 | 0 | addr->sa.sa_family = AF_UNSPEC; |
586 | 0 | return; |
587 | 0 | } |
588 | | |
589 | 0 | switch (sa->sa_family) { |
590 | 0 | case AF_UNSPEC: |
591 | 0 | addr->sa.sa_family = AF_UNSPEC; |
592 | 0 | break; |
593 | 0 | case AF_INET: |
594 | 0 | addr->sin = *(struct sockaddr_in *)sa; |
595 | 0 | break; |
596 | 0 | case AF_INET6: |
597 | 0 | addr->sin6 = *(struct sockaddr_in6 *)sa; |
598 | 0 | break; |
599 | 0 | default: |
600 | 0 | memset(addr, 0xff, sizeof(*addr)); |
601 | 0 | assert(!"unexpected address type"); |
602 | 0 | break; |
603 | 0 | } |
604 | 0 | } |
605 | | |
606 | | static ptls_cipher_suite_t *get_aes128gcmsha256(quicly_context_t *ctx) |
607 | 0 | { |
608 | 0 | ptls_cipher_suite_t **cs; |
609 | |
|
610 | 0 | for (cs = ctx->tls->cipher_suites;; ++cs) { |
611 | 0 | assert(cs != NULL); |
612 | 0 | if ((*cs)->id == PTLS_CIPHER_SUITE_AES_128_GCM_SHA256) |
613 | 0 | break; |
614 | 0 | } |
615 | 0 | return *cs; |
616 | 0 | } |
617 | | |
618 | | static inline uint8_t get_epoch(uint8_t first_byte) |
619 | 0 | { |
620 | 0 | if (!QUICLY_PACKET_IS_LONG_HEADER(first_byte)) |
621 | 0 | return QUICLY_EPOCH_1RTT; |
622 | | |
623 | 0 | switch (first_byte & QUICLY_PACKET_TYPE_BITMASK) { |
624 | 0 | case QUICLY_PACKET_TYPE_INITIAL: |
625 | 0 | return QUICLY_EPOCH_INITIAL; |
626 | 0 | case QUICLY_PACKET_TYPE_HANDSHAKE: |
627 | 0 | return QUICLY_EPOCH_HANDSHAKE; |
628 | 0 | case QUICLY_PACKET_TYPE_0RTT: |
629 | 0 | return QUICLY_EPOCH_0RTT; |
630 | 0 | default: |
631 | 0 | assert(!"FIXME"); |
632 | 0 | } |
633 | 0 | } |
634 | | |
635 | | static ptls_aead_context_t *create_retry_aead(quicly_context_t *ctx, uint32_t protocol_version, int is_enc) |
636 | 0 | { |
637 | 0 | const quicly_salt_t *salt = quicly_get_salt(protocol_version); |
638 | 0 | assert(salt != NULL); |
639 | | |
640 | 0 | ptls_cipher_suite_t *algo = get_aes128gcmsha256(ctx); |
641 | 0 | ptls_aead_context_t *aead = ptls_aead_new_direct(algo->aead, is_enc, salt->retry.key, salt->retry.iv); |
642 | 0 | assert(aead != NULL); |
643 | 0 | return aead; |
644 | 0 | } |
645 | | |
646 | | static void dispose_cipher(struct st_quicly_cipher_context_t *ctx) |
647 | 0 | { |
648 | 0 | ptls_aead_free(ctx->aead); |
649 | 0 | ptls_cipher_free(ctx->header_protection); |
650 | 0 | } |
651 | | |
652 | | static void clear_datagram_frame_payloads(quicly_conn_t *conn) |
653 | 0 | { |
654 | 0 | for (size_t i = 0; i != conn->egress.datagram_frame_payloads.count; ++i) { |
655 | 0 | free(conn->egress.datagram_frame_payloads.payloads[i].base); |
656 | 0 | conn->egress.datagram_frame_payloads.payloads[i] = ptls_iovec_init(NULL, 0); |
657 | 0 | } |
658 | 0 | conn->egress.datagram_frame_payloads.count = 0; |
659 | 0 | } |
660 | | |
661 | | static int is_retry(quicly_conn_t *conn) |
662 | 0 | { |
663 | 0 | return conn->retry_scid.len != UINT8_MAX; |
664 | 0 | } |
665 | | |
666 | | static int needs_cid_auth(quicly_conn_t *conn) |
667 | 0 | { |
668 | 0 | switch (conn->super.version) { |
669 | 0 | case QUICLY_PROTOCOL_VERSION_1: |
670 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT29: |
671 | 0 | return 1; |
672 | 0 | default: |
673 | 0 | return 0; |
674 | 0 | } |
675 | 0 | } |
676 | | |
677 | | static int64_t get_sentmap_expiration_time(quicly_conn_t *conn) |
678 | 0 | { |
679 | 0 | return quicly_loss_get_sentmap_expiration_time(&conn->egress.loss, conn->super.remote.transport_params.max_ack_delay); |
680 | 0 | } |
681 | | |
682 | | /** |
683 | | * converts ECN bits to index in the order of ACK-ECN field (i.e., ECT(0) -> 0, ECT(1) -> 1, CE -> 2) |
684 | | */ |
685 | | static size_t get_ecn_index_from_bits(uint8_t bits) |
686 | 0 | { |
687 | 0 | assert(1 <= bits && bits <= 3); |
688 | 0 | return (18 >> bits) & 3; |
689 | 0 | } |
690 | | |
691 | | static void update_ecn_state(quicly_conn_t *conn, enum en_quicly_ecn_state new_state) |
692 | 0 | { |
693 | 0 | assert(new_state == QUICLY_ECN_ON || new_state == QUICLY_ECN_OFF); |
694 | | |
695 | 0 | conn->egress.ecn.state = new_state; |
696 | 0 | if (new_state == QUICLY_ECN_ON) { |
697 | 0 | ++conn->super.stats.num_paths.ecn_validated; |
698 | 0 | } else { |
699 | 0 | ++conn->super.stats.num_paths.ecn_failed; |
700 | 0 | } |
701 | |
|
702 | 0 | QUICLY_PROBE(ECN_VALIDATION, conn, conn->stash.now, (int)new_state); |
703 | 0 | QUICLY_LOG_CONN(ecn_validation, conn, { PTLS_LOG_ELEMENT_SIGNED(state, (int)new_state); }); |
704 | 0 | } |
705 | | |
706 | | static void ack_frequency_set_next_update_at(quicly_conn_t *conn) |
707 | 0 | { |
708 | 0 | if (conn->super.remote.transport_params.min_ack_delay_usec != UINT64_MAX) |
709 | 0 | conn->egress.ack_frequency.update_at = conn->stash.now + get_sentmap_expiration_time(conn); |
710 | 0 | } |
711 | | |
712 | | size_t quicly_decode_packet(quicly_context_t *ctx, quicly_decoded_packet_t *packet, const uint8_t *datagram, size_t datagram_size, |
713 | | size_t *off) |
714 | 0 | { |
715 | 0 | const uint8_t *src = datagram, *src_end = datagram + datagram_size; |
716 | |
|
717 | 0 | assert(*off <= datagram_size); |
718 | | |
719 | 0 | packet->octets = ptls_iovec_init(src + *off, datagram_size - *off); |
720 | 0 | if (packet->octets.len < 2) |
721 | 0 | goto Error; |
722 | 0 | packet->datagram_size = *off == 0 ? datagram_size : 0; |
723 | 0 | packet->token = ptls_iovec_init(NULL, 0); |
724 | 0 | packet->decrypted.pn = UINT64_MAX; |
725 | 0 | packet->ecn = 0; /* non-ECT */ |
726 | | |
727 | | /* move the cursor to the second byte */ |
728 | 0 | src += *off + 1; |
729 | |
|
730 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) { |
731 | | /* long header */ |
732 | 0 | uint64_t rest_length; |
733 | 0 | if (src_end - src < 5) |
734 | 0 | goto Error; |
735 | 0 | packet->version = quicly_decode32(&src); |
736 | 0 | packet->cid.dest.encrypted.len = *src++; |
737 | 0 | if (src_end - src < packet->cid.dest.encrypted.len + 1) |
738 | 0 | goto Error; |
739 | 0 | packet->cid.dest.encrypted.base = (uint8_t *)src; |
740 | 0 | src += packet->cid.dest.encrypted.len; |
741 | 0 | packet->cid.src.len = *src++; |
742 | 0 | if (src_end - src < packet->cid.src.len) |
743 | 0 | goto Error; |
744 | 0 | packet->cid.src.base = (uint8_t *)src; |
745 | 0 | src += packet->cid.src.len; |
746 | 0 | switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) { |
747 | 0 | case QUICLY_PACKET_TYPE_INITIAL: |
748 | 0 | case QUICLY_PACKET_TYPE_0RTT: |
749 | 0 | if (ctx->cid_encryptor == NULL || packet->cid.dest.encrypted.len == 0 || |
750 | 0 | ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, packet->cid.dest.encrypted.base, |
751 | 0 | packet->cid.dest.encrypted.len) == SIZE_MAX) |
752 | 0 | packet->cid.dest.plaintext = quicly_cid_plaintext_invalid; |
753 | 0 | packet->cid.dest.might_be_client_generated = 1; |
754 | 0 | break; |
755 | 0 | default: |
756 | 0 | if (ctx->cid_encryptor != NULL) { |
757 | 0 | if (packet->cid.dest.encrypted.len == 0) |
758 | 0 | goto Error; |
759 | 0 | if (ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, |
760 | 0 | packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len) == SIZE_MAX) |
761 | 0 | goto Error; |
762 | 0 | } else { |
763 | 0 | packet->cid.dest.plaintext = quicly_cid_plaintext_invalid; |
764 | 0 | } |
765 | 0 | packet->cid.dest.might_be_client_generated = 0; |
766 | 0 | break; |
767 | 0 | } |
768 | 0 | switch (packet->version) { |
769 | 0 | case QUICLY_PROTOCOL_VERSION_1: |
770 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT29: |
771 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT27: |
772 | | /* these are the recognized versions, and they share the same packet header format */ |
773 | 0 | if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_RETRY) { |
774 | | /* retry */ |
775 | 0 | if (src_end - src <= PTLS_AESGCM_TAG_SIZE) |
776 | 0 | goto Error; |
777 | 0 | packet->token = ptls_iovec_init(src, src_end - src - PTLS_AESGCM_TAG_SIZE); |
778 | 0 | src += packet->token.len; |
779 | 0 | packet->encrypted_off = src - packet->octets.base; |
780 | 0 | } else { |
781 | | /* coalescible long header packet */ |
782 | 0 | if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL) { |
783 | | /* initial has a token */ |
784 | 0 | uint64_t token_len; |
785 | 0 | if ((token_len = quicly_decodev(&src, src_end)) == UINT64_MAX) |
786 | 0 | goto Error; |
787 | 0 | if (src_end - src < token_len) |
788 | 0 | goto Error; |
789 | 0 | packet->token = ptls_iovec_init(src, token_len); |
790 | 0 | src += token_len; |
791 | 0 | } |
792 | 0 | if ((rest_length = quicly_decodev(&src, src_end)) == UINT64_MAX) |
793 | 0 | goto Error; |
794 | 0 | if (rest_length < 1) |
795 | 0 | goto Error; |
796 | 0 | if (src_end - src < rest_length) |
797 | 0 | goto Error; |
798 | 0 | packet->encrypted_off = src - packet->octets.base; |
799 | 0 | packet->octets.len = packet->encrypted_off + rest_length; |
800 | 0 | } |
801 | 0 | break; |
802 | 0 | default: |
803 | | /* VN packet or packets of unknown version cannot be parsed. `encrypted_off` is set to the first byte after SCID. */ |
804 | 0 | packet->encrypted_off = src - packet->octets.base; |
805 | 0 | } |
806 | 0 | packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET; |
807 | 0 | } else { |
808 | | /* short header */ |
809 | 0 | if (ctx->cid_encryptor != NULL) { |
810 | 0 | if (src_end - src < QUICLY_MAX_CID_LEN_V1) |
811 | 0 | goto Error; |
812 | 0 | size_t local_cidl = ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, src, 0); |
813 | 0 | if (local_cidl == SIZE_MAX) |
814 | 0 | goto Error; |
815 | 0 | packet->cid.dest.encrypted = ptls_iovec_init(src, local_cidl); |
816 | 0 | src += local_cidl; |
817 | 0 | } else { |
818 | 0 | packet->cid.dest.encrypted = ptls_iovec_init(NULL, 0); |
819 | 0 | packet->cid.dest.plaintext = quicly_cid_plaintext_invalid; |
820 | 0 | } |
821 | 0 | packet->cid.dest.might_be_client_generated = 0; |
822 | 0 | packet->cid.src = ptls_iovec_init(NULL, 0); |
823 | 0 | packet->version = 0; |
824 | 0 | packet->encrypted_off = src - packet->octets.base; |
825 | 0 | packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_MAYBE_STATELESS_RESET; |
826 | 0 | } |
827 | | |
828 | 0 | *off += packet->octets.len; |
829 | 0 | return packet->octets.len; |
830 | | |
831 | 0 | Error: |
832 | 0 | return SIZE_MAX; |
833 | 0 | } |
834 | | |
835 | | uint64_t quicly_determine_packet_number(uint32_t truncated, size_t num_bits, uint64_t expected) |
836 | 0 | { |
837 | 0 | uint64_t win = (uint64_t)1 << num_bits, candidate = (expected & ~(win - 1)) | truncated; |
838 | |
|
839 | 0 | if (candidate + win / 2 <= expected) |
840 | 0 | return candidate + win; |
841 | 0 | if (candidate > expected + win / 2 && candidate >= win) |
842 | 0 | return candidate - win; |
843 | 0 | return candidate; |
844 | 0 | } |
845 | | |
846 | | static void assert_consistency(quicly_conn_t *conn, int timer_must_be_in_future) |
847 | 0 | { |
848 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) { |
849 | 0 | assert(!timer_must_be_in_future || conn->stash.now < conn->egress.send_ack_at); |
850 | 0 | return; |
851 | 0 | } |
852 | | |
853 | 0 | if (conn->egress.loss.sentmap.bytes_in_flight != 0 || conn->super.remote.address_validation.send_probe) { |
854 | 0 | assert(conn->egress.loss.alarm_at != INT64_MAX); |
855 | 0 | } else { |
856 | 0 | assert(conn->egress.loss.loss_time == INT64_MAX); |
857 | 0 | } |
858 | | /* Allow timers not in the future when the remote peer is not yet validated, since we may not be able to send packets even when |
859 | | * timers fire. */ |
860 | 0 | if (timer_must_be_in_future && conn->super.remote.address_validation.validated) |
861 | 0 | assert(conn->stash.now < conn->egress.loss.alarm_at); |
862 | 0 | } |
863 | | |
864 | | static quicly_error_t on_invalid_ack(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
865 | 0 | { |
866 | 0 | if (acked) |
867 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
868 | 0 | return 0; |
869 | 0 | } |
870 | | |
871 | | static uint64_t calc_next_pn_to_skip(ptls_context_t *tlsctx, uint64_t next_pn, uint32_t cwnd, uint64_t mtu) |
872 | 0 | { |
873 | 0 | static __thread struct { |
874 | 0 | uint32_t values[8]; |
875 | 0 | size_t off; |
876 | 0 | } cached_rand; |
877 | |
|
878 | 0 | if (cached_rand.off == 0) { |
879 | 0 | tlsctx->random_bytes(cached_rand.values, sizeof(cached_rand.values)); |
880 | 0 | cached_rand.off = PTLS_ELEMENTSOF(cached_rand.values); |
881 | 0 | } |
882 | | |
883 | | /* on average, skip one PN per every min(256 packets, 8 * CWND) */ |
884 | 0 | uint32_t packet_cwnd = cwnd / mtu; |
885 | 0 | if (packet_cwnd < 32) |
886 | 0 | packet_cwnd = 32; |
887 | 0 | uint64_t skip_after = cached_rand.values[--cached_rand.off] % (16 * packet_cwnd); |
888 | 0 | return next_pn + 1 + skip_after; |
889 | 0 | } |
890 | | |
891 | | static void init_max_streams(struct st_quicly_max_streams_t *m) |
892 | 0 | { |
893 | 0 | m->count = 0; |
894 | 0 | quicly_maxsender_init(&m->blocked_sender, -1); |
895 | 0 | } |
896 | | |
897 | | static quicly_error_t update_max_streams(struct st_quicly_max_streams_t *m, uint64_t count) |
898 | 0 | { |
899 | 0 | if (count > (uint64_t)1 << 60) |
900 | 0 | return QUICLY_TRANSPORT_ERROR_STREAM_LIMIT; |
901 | | |
902 | 0 | if (m->count < count) { |
903 | 0 | m->count = count; |
904 | 0 | if (m->blocked_sender.max_acked < count) |
905 | 0 | m->blocked_sender.max_acked = count; |
906 | 0 | } |
907 | |
|
908 | 0 | return 0; |
909 | 0 | } |
910 | | |
911 | | int quicly_connection_is_ready(quicly_conn_t *conn) |
912 | 0 | { |
913 | 0 | return conn->application != NULL; |
914 | 0 | } |
915 | | |
916 | | static int stream_is_destroyable(quicly_stream_t *stream) |
917 | 0 | { |
918 | 0 | if (!quicly_recvstate_transfer_complete(&stream->recvstate)) |
919 | 0 | return 0; |
920 | 0 | if (!quicly_sendstate_transfer_complete(&stream->sendstate)) |
921 | 0 | return 0; |
922 | 0 | switch (stream->_send_aux.reset_stream.sender_state) { |
923 | 0 | case QUICLY_SENDER_STATE_NONE: |
924 | 0 | case QUICLY_SENDER_STATE_ACKED: |
925 | 0 | break; |
926 | 0 | default: |
927 | 0 | return 0; |
928 | 0 | } |
929 | 0 | return 1; |
930 | 0 | } |
931 | | |
932 | | static void sched_stream_control(quicly_stream_t *stream) |
933 | 0 | { |
934 | 0 | assert(stream->stream_id >= 0); |
935 | | |
936 | 0 | if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.control)) |
937 | 0 | quicly_linklist_insert(stream->conn->egress.pending_streams.control.prev, &stream->_send_aux.pending_link.control); |
938 | 0 | } |
939 | | |
940 | | static void resched_stream_data(quicly_stream_t *stream) |
941 | 0 | { |
942 | 0 | if (stream->stream_id < 0) { |
943 | 0 | assert(-4 <= stream->stream_id); |
944 | 0 | uint8_t mask = 1 << -(1 + stream->stream_id); |
945 | 0 | assert((mask & (QUICLY_PENDING_FLOW_NEW_TOKEN_BIT | QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT | |
946 | 0 | QUICLY_PENDING_FLOW_OTHERS_BIT)) == 0); |
947 | 0 | if (stream->sendstate.pending.num_ranges != 0) { |
948 | 0 | stream->conn->egress.pending_flows |= mask; |
949 | 0 | } else { |
950 | 0 | stream->conn->egress.pending_flows &= ~mask; |
951 | 0 | } |
952 | 0 | return; |
953 | 0 | } |
954 | | |
955 | | /* do nothing if blocked */ |
956 | 0 | if (stream->streams_blocked) |
957 | 0 | return; |
958 | | |
959 | 0 | quicly_stream_scheduler_t *scheduler = stream->conn->super.ctx->stream_scheduler; |
960 | 0 | scheduler->update_state(scheduler, stream); |
961 | 0 | } |
962 | | |
963 | | static int should_send_max_data(quicly_conn_t *conn) |
964 | 0 | { |
965 | 0 | return quicly_maxsender_should_send_max(&conn->ingress.max_data.sender, conn->ingress.max_data.bytes_consumed, |
966 | 0 | (uint32_t)conn->super.ctx->transport_params.max_data, 512); |
967 | 0 | } |
968 | | |
969 | | static int should_send_max_stream_data(quicly_stream_t *stream) |
970 | 0 | { |
971 | 0 | if (stream->recvstate.eos != UINT64_MAX) |
972 | 0 | return 0; |
973 | 0 | return quicly_maxsender_should_send_max(&stream->_send_aux.max_stream_data_sender, stream->recvstate.data_off, |
974 | 0 | stream->_recv_aux.window, 512); |
975 | 0 | } |
976 | | |
977 | | int quicly_stream_sync_sendbuf(quicly_stream_t *stream, int activate) |
978 | 22.2k | { |
979 | 22.2k | int ret; |
980 | | |
981 | 22.2k | if (activate) { |
982 | 22.2k | if ((ret = quicly_sendstate_activate(&stream->sendstate)) != 0) |
983 | 0 | return ret; |
984 | 22.2k | } |
985 | | |
986 | 22.2k | resched_stream_data(stream); |
987 | 22.2k | return 0; |
988 | 22.2k | } |
989 | | |
990 | | void quicly_stream_sync_recvbuf(quicly_stream_t *stream, size_t shift_amount) |
991 | | { |
992 | | stream->recvstate.data_off += shift_amount; |
993 | | if (stream->stream_id >= 0) { |
994 | | if (should_send_max_stream_data(stream)) |
995 | | sched_stream_control(stream); |
996 | | } |
997 | | } |
998 | | |
999 | | /** |
1000 | | * calculate how many CIDs we provide to the remote peer |
1001 | | */ |
1002 | | static size_t local_cid_size(const quicly_conn_t *conn) |
1003 | 0 | { |
1004 | 0 | PTLS_BUILD_ASSERT(QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT < SIZE_MAX / sizeof(uint64_t)); |
1005 | | |
1006 | | /* if we don't have an encryptor, the only CID we issue is the one we send during handshake */ |
1007 | 0 | if (conn->super.ctx->cid_encryptor == NULL) |
1008 | 0 | return 1; |
1009 | | |
1010 | 0 | uint64_t capacity = conn->super.remote.transport_params.active_connection_id_limit; |
1011 | 0 | if (capacity > QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT) |
1012 | 0 | capacity = QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT; |
1013 | 0 | return capacity; |
1014 | 0 | } |
1015 | | |
1016 | | /** |
1017 | | * set up an internal record to send RETIRE_CONNECTION_ID frame later |
1018 | | */ |
1019 | | static void schedule_retire_connection_id_frame(quicly_conn_t *conn, uint64_t sequence) |
1020 | 0 | { |
1021 | 0 | quicly_retire_cid_push(&conn->egress.retire_cid, sequence); |
1022 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
1023 | 0 | } |
1024 | | |
1025 | | static void retire_connection_id(quicly_conn_t *conn, uint64_t sequence) |
1026 | 0 | { |
1027 | | /* Reset path CIDs that are being retired. To maximize the chance of having enough number of CIDs to run all paths when new CIDs |
1028 | | * are provided through multiple NCID frames possibly scattered over multiple packets, CIDs are reassigned to the paths lazily. |
1029 | | */ |
1030 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) { |
1031 | 0 | struct st_quicly_conn_path_t *path = conn->paths[i]; |
1032 | 0 | if (path != NULL && path->dcid == sequence) |
1033 | 0 | path->dcid = UINT64_MAX; |
1034 | 0 | } |
1035 | |
|
1036 | 0 | quicly_remote_cid_unregister(&conn->super.remote.cid_set, sequence); |
1037 | 0 | schedule_retire_connection_id_frame(conn, sequence); |
1038 | 0 | } |
1039 | | |
1040 | | static int write_crypto_data(quicly_conn_t *conn, ptls_buffer_t *tlsbuf, size_t epoch_offsets[5]) |
1041 | 0 | { |
1042 | 0 | size_t epoch; |
1043 | 0 | int ret; |
1044 | |
|
1045 | 0 | if (tlsbuf->off == 0) |
1046 | 0 | return 0; |
1047 | | |
1048 | 0 | for (epoch = 0; epoch < 4; ++epoch) { |
1049 | 0 | size_t len = epoch_offsets[epoch + 1] - epoch_offsets[epoch]; |
1050 | 0 | if (len == 0) |
1051 | 0 | continue; |
1052 | 0 | quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch)); |
1053 | 0 | assert(stream != NULL); |
1054 | 0 | if ((ret = quicly_streambuf_egress_write(stream, tlsbuf->base + epoch_offsets[epoch], len)) != 0) |
1055 | 0 | return ret; |
1056 | 0 | } |
1057 | | |
1058 | 0 | return 0; |
1059 | 0 | } |
1060 | | |
1061 | | /** |
1062 | | * compresses a quicly error code into an int, converting QUIC transport error codes into negative ints |
1063 | | */ |
1064 | | static int compress_handshake_result(quicly_error_t quicly_err) |
1065 | 0 | { |
1066 | 0 | if (QUICLY_ERROR_IS_QUIC_TRANSPORT(quicly_err)) { |
1067 | 0 | assert(QUICLY_ERROR_GET_ERROR_CODE(quicly_err) <= INT32_MAX); |
1068 | 0 | return (int)-QUICLY_ERROR_GET_ERROR_CODE(quicly_err); |
1069 | 0 | } else { |
1070 | 0 | assert(0 <= quicly_err && quicly_err < INT_MAX); |
1071 | 0 | return (int)quicly_err; |
1072 | 0 | } |
1073 | 0 | } |
1074 | | |
1075 | | static quicly_error_t expand_handshake_result(int compressed_err) |
1076 | 0 | { |
1077 | 0 | if (compressed_err < 0) { |
1078 | 0 | return QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(-compressed_err); |
1079 | 0 | } else { |
1080 | 0 | return compressed_err; |
1081 | 0 | } |
1082 | 0 | } |
1083 | | |
1084 | | static void crypto_handshake(quicly_conn_t *conn, size_t in_epoch, ptls_iovec_t input) |
1085 | 0 | { |
1086 | 0 | ptls_buffer_t output; |
1087 | 0 | size_t epoch_offsets[5] = {0}; |
1088 | |
|
1089 | 0 | assert(!conn->crypto.async_in_progress); |
1090 | | |
1091 | 0 | ptls_buffer_init(&output, "", 0); |
1092 | |
|
1093 | 0 | quicly_error_t handshake_result = expand_handshake_result(ptls_handle_message( |
1094 | 0 | conn->crypto.tls, &output, epoch_offsets, in_epoch, input.base, input.len, &conn->crypto.handshake_properties)); |
1095 | 0 | QUICLY_PROBE(CRYPTO_HANDSHAKE, conn, conn->stash.now, handshake_result); |
1096 | 0 | QUICLY_LOG_CONN(crypto_handshake, conn, { PTLS_LOG_ELEMENT_SIGNED(ret, handshake_result); }); |
1097 | 0 | switch (handshake_result) { |
1098 | 0 | case 0: |
1099 | 0 | case PTLS_ERROR_IN_PROGRESS: |
1100 | 0 | break; |
1101 | 0 | case PTLS_ERROR_ASYNC_OPERATION: |
1102 | 0 | assert(conn->super.ctx->async_handshake != NULL && |
1103 | 0 | "async handshake is used but the quicly_context_t::async_handshake is NULL"); |
1104 | 0 | conn->crypto.async_in_progress = 1; |
1105 | 0 | conn->super.ctx->async_handshake->cb(conn->super.ctx->async_handshake, conn->crypto.tls); |
1106 | 0 | break; |
1107 | 0 | default: |
1108 | 0 | initiate_close(conn, |
1109 | 0 | QUICLY_ERROR_IS_QUIC_TRANSPORT(handshake_result) || |
1110 | 0 | PTLS_ERROR_GET_CLASS(handshake_result) == PTLS_ERROR_CLASS_SELF_ALERT |
1111 | 0 | ? handshake_result |
1112 | 0 | : QUICLY_TRANSPORT_ERROR_INTERNAL, |
1113 | 0 | QUICLY_FRAME_TYPE_CRYPTO, NULL); |
1114 | 0 | goto Exit; |
1115 | 0 | } |
1116 | | /* drop 0-RTT write key if 0-RTT is rejected by remote peer */ |
1117 | 0 | if (conn->application != NULL && !conn->application->one_rtt_writable && conn->application->cipher.egress.key.aead != NULL) { |
1118 | 0 | assert(quicly_is_client(conn)); |
1119 | 0 | if (conn->crypto.handshake_properties.client.early_data_acceptance == PTLS_EARLY_DATA_REJECTED) { |
1120 | 0 | dispose_cipher(&conn->application->cipher.egress.key); |
1121 | 0 | conn->application->cipher.egress.key = (struct st_quicly_cipher_context_t){NULL}; |
1122 | | /* retire all packets with ack_epoch == 3; they are all 0-RTT packets */ |
1123 | 0 | quicly_error_t ret; |
1124 | 0 | if ((ret = discard_sentmap_by_epoch(conn, 1u << QUICLY_EPOCH_1RTT)) != 0) { |
1125 | 0 | initiate_close(conn, ret, QUICLY_FRAME_TYPE_CRYPTO, NULL); |
1126 | 0 | goto Exit; |
1127 | 0 | } |
1128 | 0 | } |
1129 | 0 | } |
1130 | | |
1131 | 0 | write_crypto_data(conn, &output, epoch_offsets); |
1132 | |
|
1133 | 0 | Exit: |
1134 | 0 | ptls_buffer_dispose(&output); |
1135 | 0 | } |
1136 | | |
1137 | | void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len) |
1138 | 0 | { |
1139 | 0 | quicly_conn_t *conn = stream->conn; |
1140 | 0 | ptls_iovec_t input; |
1141 | | |
1142 | | /* store input */ |
1143 | 0 | if (quicly_streambuf_ingress_receive(stream, off, src, len) != 0) |
1144 | 0 | return; |
1145 | | |
1146 | | /* While the server generates the handshake signature asynchronously, clients would not send additional messages. They cannot |
1147 | | * generate Finished. They would not send Certificate / CertificateVerify before authenticating the server identity. */ |
1148 | 0 | if (conn->crypto.async_in_progress) { |
1149 | 0 | initiate_close(conn, PTLS_ALERT_UNEXPECTED_MESSAGE, QUICLY_FRAME_TYPE_CRYPTO, NULL); |
1150 | 0 | return; |
1151 | 0 | } |
1152 | | |
1153 | | /* feed the input into TLS, send result */ |
1154 | 0 | if ((input = quicly_streambuf_ingress_get(stream)).len != 0) { |
1155 | 0 | size_t in_epoch = -(1 + stream->stream_id); |
1156 | 0 | crypto_handshake(conn, in_epoch, input); |
1157 | 0 | quicly_streambuf_ingress_shift(stream, input.len); |
1158 | 0 | } |
1159 | 0 | } |
1160 | | |
1161 | | quicly_conn_t *quicly_resume_handshake(ptls_t *tls) |
1162 | 0 | { |
1163 | 0 | quicly_conn_t *conn; |
1164 | |
|
1165 | 0 | if ((conn = *ptls_get_data_ptr(tls)) == NULL) { |
1166 | | /* QUIC connection has been closed while TLS async operation was inflight. */ |
1167 | 0 | ptls_free(tls); |
1168 | 0 | return NULL; |
1169 | 0 | } |
1170 | | |
1171 | 0 | assert(conn->crypto.async_in_progress); |
1172 | 0 | conn->crypto.async_in_progress = 0; |
1173 | |
|
1174 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) |
1175 | 0 | return conn; |
1176 | | |
1177 | 0 | crypto_handshake(conn, 0, ptls_iovec_init(NULL, 0)); |
1178 | 0 | return conn; |
1179 | 0 | } |
1180 | | |
1181 | | static void init_stream_properties(quicly_stream_t *stream, uint32_t initial_max_stream_data_local, |
1182 | | uint64_t initial_max_stream_data_remote) |
1183 | 0 | { |
1184 | 0 | int is_client = quicly_is_client(stream->conn); |
1185 | |
|
1186 | 0 | if (quicly_stream_has_send_side(is_client, stream->stream_id)) { |
1187 | 0 | quicly_sendstate_init(&stream->sendstate); |
1188 | 0 | } else { |
1189 | 0 | quicly_sendstate_init_closed(&stream->sendstate); |
1190 | 0 | } |
1191 | 0 | if (quicly_stream_has_receive_side(is_client, stream->stream_id)) { |
1192 | 0 | quicly_recvstate_init(&stream->recvstate); |
1193 | 0 | } else { |
1194 | 0 | quicly_recvstate_init_closed(&stream->recvstate); |
1195 | 0 | } |
1196 | 0 | stream->streams_blocked = 0; |
1197 | |
|
1198 | 0 | stream->_send_aux.max_stream_data = initial_max_stream_data_remote; |
1199 | 0 | stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_NONE; |
1200 | 0 | stream->_send_aux.stop_sending.error_code = 0; |
1201 | 0 | stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_NONE; |
1202 | 0 | stream->_send_aux.reset_stream.error_code = 0; |
1203 | 0 | quicly_maxsender_init(&stream->_send_aux.max_stream_data_sender, initial_max_stream_data_local); |
1204 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_NONE; |
1205 | 0 | quicly_linklist_init(&stream->_send_aux.pending_link.control); |
1206 | 0 | quicly_linklist_init(&stream->_send_aux.pending_link.default_scheduler); |
1207 | |
|
1208 | 0 | stream->_recv_aux.window = initial_max_stream_data_local; |
1209 | | |
1210 | | /* Set the number of max ranges to be capable of handling following case: |
1211 | | * * every one of the two packets being sent are lost |
1212 | | * * average size of a STREAM frame found in a packet is >= ~512 bytes, or small STREAM frame is sent for every other stream |
1213 | | * being opened (e.g., sending QPACK encoder/decoder stream frame for each HTTP/3 request) |
1214 | | * See also: the doc-comment on `_recv_aux.max_ranges`. |
1215 | | */ |
1216 | 0 | uint32_t fragments_minmax = (uint32_t)(stream->conn->super.ctx->transport_params.max_streams_uni + |
1217 | 0 | stream->conn->super.ctx->transport_params.max_streams_bidi); |
1218 | 0 | if (fragments_minmax < 63) |
1219 | 0 | fragments_minmax = 63; |
1220 | 0 | if ((stream->_recv_aux.max_ranges = initial_max_stream_data_local / 1024) < fragments_minmax) |
1221 | 0 | stream->_recv_aux.max_ranges = fragments_minmax; |
1222 | 0 | } |
1223 | | |
1224 | | static void dispose_stream_properties(quicly_stream_t *stream) |
1225 | 0 | { |
1226 | 0 | quicly_sendstate_dispose(&stream->sendstate); |
1227 | 0 | quicly_recvstate_dispose(&stream->recvstate); |
1228 | 0 | quicly_maxsender_dispose(&stream->_send_aux.max_stream_data_sender); |
1229 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.control); |
1230 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
1231 | 0 | } |
1232 | | |
1233 | | static quicly_stream_t *open_stream(quicly_conn_t *conn, uint64_t stream_id, uint32_t initial_max_stream_data_local, |
1234 | | uint64_t initial_max_stream_data_remote) |
1235 | 0 | { |
1236 | 0 | quicly_stream_t *stream; |
1237 | |
|
1238 | 0 | if ((stream = malloc(sizeof(*stream))) == NULL) |
1239 | 0 | return NULL; |
1240 | 0 | stream->conn = conn; |
1241 | 0 | stream->stream_id = stream_id; |
1242 | 0 | stream->callbacks = NULL; |
1243 | 0 | stream->data = NULL; |
1244 | |
|
1245 | 0 | int r; |
1246 | 0 | khiter_t iter = kh_put(quicly_stream_t, conn->streams, stream_id, &r); |
1247 | 0 | assert(iter != kh_end(conn->streams)); |
1248 | 0 | kh_val(conn->streams, iter) = stream; |
1249 | |
|
1250 | 0 | init_stream_properties(stream, initial_max_stream_data_local, initial_max_stream_data_remote); |
1251 | |
|
1252 | 0 | return stream; |
1253 | 0 | } |
1254 | | |
1255 | | static struct st_quicly_conn_streamgroup_state_t *get_streamgroup_state(quicly_conn_t *conn, quicly_stream_id_t stream_id) |
1256 | 0 | { |
1257 | 0 | if (quicly_is_client(conn) == quicly_stream_is_client_initiated(stream_id)) { |
1258 | 0 | return quicly_stream_is_unidirectional(stream_id) ? &conn->super.local.uni : &conn->super.local.bidi; |
1259 | 0 | } else { |
1260 | 0 | return quicly_stream_is_unidirectional(stream_id) ? &conn->super.remote.uni : &conn->super.remote.bidi; |
1261 | 0 | } |
1262 | 0 | } |
1263 | | |
1264 | | static int should_send_max_streams(quicly_conn_t *conn, int uni) |
1265 | 0 | { |
1266 | 0 | uint64_t concurrency; |
1267 | 0 | quicly_maxsender_t *maxsender; |
1268 | 0 | struct st_quicly_conn_streamgroup_state_t *group; |
1269 | |
|
1270 | 0 | #define INIT_VARS(type) \ |
1271 | 0 | do { \ |
1272 | 0 | concurrency = conn->super.ctx->transport_params.max_streams_##type; \ |
1273 | 0 | maxsender = &conn->ingress.max_streams.type; \ |
1274 | 0 | group = &conn->super.remote.type; \ |
1275 | 0 | } while (0) |
1276 | 0 | if (uni) { |
1277 | 0 | INIT_VARS(uni); |
1278 | 0 | } else { |
1279 | 0 | INIT_VARS(bidi); |
1280 | 0 | } |
1281 | 0 | #undef INIT_VARS |
1282 | |
|
1283 | 0 | if (concurrency == 0) |
1284 | 0 | return 0; |
1285 | | |
1286 | 0 | if (!quicly_maxsender_should_send_max(maxsender, group->next_stream_id / 4, group->num_streams, 768)) |
1287 | 0 | return 0; |
1288 | | |
1289 | 0 | return 1; |
1290 | 0 | } |
1291 | | |
1292 | | static void destroy_stream(quicly_stream_t *stream, quicly_error_t err) |
1293 | 0 | { |
1294 | 0 | quicly_conn_t *conn = stream->conn; |
1295 | |
|
1296 | 0 | QUICLY_PROBE(STREAM_ON_DESTROY, conn, conn->stash.now, stream, err); |
1297 | 0 | QUICLY_LOG_CONN(stream_on_destroy, conn, { |
1298 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
1299 | 0 | PTLS_LOG_ELEMENT_SIGNED(err, err); |
1300 | 0 | }); |
1301 | | |
1302 | 0 | if (stream->callbacks != NULL) |
1303 | 0 | stream->callbacks->on_destroy(stream, err); |
1304 | |
|
1305 | 0 | khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream->stream_id); |
1306 | 0 | assert(iter != kh_end(conn->streams)); |
1307 | 0 | kh_del(quicly_stream_t, conn->streams, iter); |
1308 | |
|
1309 | 0 | if (stream->stream_id < 0) { |
1310 | 0 | size_t epoch = -(1 + stream->stream_id); |
1311 | 0 | stream->conn->egress.pending_flows &= ~(uint8_t)(1 << epoch); |
1312 | 0 | } else { |
1313 | 0 | struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream->stream_id); |
1314 | 0 | --group->num_streams; |
1315 | 0 | } |
1316 | |
|
1317 | 0 | dispose_stream_properties(stream); |
1318 | |
|
1319 | 0 | if (conn->application != NULL && should_send_max_streams(conn, quicly_stream_is_unidirectional(stream->stream_id))) |
1320 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
1321 | |
|
1322 | 0 | free(stream); |
1323 | 0 | } |
1324 | | |
1325 | | static void destroy_all_streams(quicly_conn_t *conn, quicly_error_t err, int including_crypto_streams) |
1326 | 0 | { |
1327 | 0 | quicly_stream_t *stream; |
1328 | 0 | kh_foreach_value(conn->streams, stream, { |
1329 | | /* TODO do we need to send reset signals to open streams? */ |
1330 | 0 | if (including_crypto_streams || stream->stream_id >= 0) |
1331 | 0 | destroy_stream(stream, err); |
1332 | 0 | }); |
1333 | 0 | assert(quicly_num_streams(conn) == 0); |
1334 | 0 | } |
1335 | | |
1336 | | int64_t quicly_foreach_stream(quicly_conn_t *conn, void *thunk, int64_t (*cb)(void *thunk, quicly_stream_t *stream)) |
1337 | 0 | { |
1338 | 0 | quicly_stream_t *stream; |
1339 | 0 | kh_foreach_value(conn->streams, stream, { |
1340 | 0 | if (stream->stream_id >= 0) { |
1341 | 0 | int64_t ret = cb(thunk, stream); |
1342 | 0 | if (ret != 0) |
1343 | 0 | return ret; |
1344 | 0 | } |
1345 | 0 | }); |
1346 | 0 | return 0; |
1347 | 0 | } |
1348 | | |
1349 | | quicly_stream_t *quicly_get_stream(quicly_conn_t *conn, quicly_stream_id_t stream_id) |
1350 | 0 | { |
1351 | 0 | khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream_id); |
1352 | 0 | if (iter != kh_end(conn->streams)) |
1353 | 0 | return kh_val(conn->streams, iter); |
1354 | 0 | return NULL; |
1355 | 0 | } |
1356 | | |
1357 | | ptls_t *quicly_get_tls(quicly_conn_t *conn) |
1358 | 570 | { |
1359 | 570 | return conn->crypto.tls; |
1360 | 570 | } |
1361 | | |
1362 | | uint32_t quicly_num_streams_by_group(quicly_conn_t *conn, int uni, int locally_initiated) |
1363 | 6.68k | { |
1364 | 6.68k | int server_initiated = quicly_is_client(conn) != locally_initiated; |
1365 | 6.68k | struct st_quicly_conn_streamgroup_state_t *state = get_streamgroup_state(conn, uni * 2 + server_initiated); |
1366 | 6.68k | return state->num_streams; |
1367 | 6.68k | } |
1368 | | |
1369 | | struct sockaddr *quicly_get_sockname(quicly_conn_t *conn) |
1370 | 0 | { |
1371 | 0 | return &conn->paths[0]->address.local.sa; |
1372 | 0 | } |
1373 | | |
1374 | | struct sockaddr *quicly_get_peername(quicly_conn_t *conn) |
1375 | 570 | { |
1376 | 570 | return &conn->paths[0]->address.remote.sa; |
1377 | 570 | } |
1378 | | |
1379 | | quicly_error_t quicly_get_stats(quicly_conn_t *conn, quicly_stats_t *stats) |
1380 | | { |
1381 | | /* copy the pre-built stats fields */ |
1382 | | memcpy(stats, &conn->super.stats, sizeof(conn->super.stats)); |
1383 | | |
1384 | | /* set or generate the non-pre-built stats fields here */ |
1385 | | stats->rtt = conn->egress.loss.rtt; |
1386 | | stats->loss_thresholds = conn->egress.loss.thresholds; |
1387 | | stats->cc = conn->egress.cc; |
1388 | | /* convert `exit_slow_start_at` to time spent since the connection was created */ |
1389 | | if (stats->cc.exit_slow_start_at != INT64_MAX) { |
1390 | | assert(stats->cc.exit_slow_start_at >= conn->created_at); |
1391 | | stats->cc.exit_slow_start_at -= conn->created_at; |
1392 | | } |
1393 | | quicly_ratemeter_report(&conn->egress.ratemeter, &stats->delivery_rate); |
1394 | | stats->num_sentmap_packets_largest = conn->egress.loss.sentmap.num_packets_largest; |
1395 | | stats->handshake_confirmed_msec = conn->super.stats.handshake_confirmed_msec; |
1396 | | |
1397 | | return 0; |
1398 | | } |
1399 | | |
1400 | | quicly_error_t quicly_get_delivery_rate(quicly_conn_t *conn, quicly_rate_t *delivery_rate) |
1401 | 0 | { |
1402 | 0 | quicly_ratemeter_report(&conn->egress.ratemeter, delivery_rate); |
1403 | 0 | return 0; |
1404 | 0 | } |
1405 | | |
1406 | | quicly_stream_id_t quicly_get_ingress_max_streams(quicly_conn_t *conn, int uni) |
1407 | 0 | { |
1408 | 0 | quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi; |
1409 | 0 | return maxsender->max_committed; |
1410 | 0 | } |
1411 | | |
1412 | | void quicly_get_max_data(quicly_conn_t *conn, uint64_t *send_permitted, uint64_t *sent, uint64_t *consumed) |
1413 | 8.08k | { |
1414 | 8.08k | if (send_permitted != NULL) |
1415 | 0 | *send_permitted = conn->egress.max_data.permitted; |
1416 | 8.08k | if (sent != NULL) |
1417 | 8.08k | *sent = conn->egress.max_data.sent; |
1418 | 8.08k | if (consumed != NULL) |
1419 | 0 | *consumed = conn->ingress.max_data.bytes_consumed; |
1420 | 8.08k | } |
1421 | | |
1422 | | static void update_idle_timeout(quicly_conn_t *conn, int is_in_receive) |
1423 | 0 | { |
1424 | 0 | if (!is_in_receive && !conn->idle_timeout.should_rearm_on_send) |
1425 | 0 | return; |
1426 | | |
1427 | | /* calculate the minimum of the two max_idle_timeout */ |
1428 | 0 | int64_t idle_msec = INT64_MAX; |
1429 | 0 | if (conn->initial == NULL && conn->handshake == NULL && conn->super.remote.transport_params.max_idle_timeout != 0) |
1430 | 0 | idle_msec = conn->super.remote.transport_params.max_idle_timeout; |
1431 | 0 | if (conn->super.ctx->transport_params.max_idle_timeout != 0 && conn->super.ctx->transport_params.max_idle_timeout < idle_msec) |
1432 | 0 | idle_msec = conn->super.ctx->transport_params.max_idle_timeout; |
1433 | |
|
1434 | 0 | if (idle_msec == INT64_MAX) |
1435 | 0 | return; |
1436 | | |
1437 | 0 | uint32_t three_pto = 3 * quicly_rtt_get_pto(&conn->egress.loss.rtt, conn->super.ctx->transport_params.max_ack_delay, |
1438 | 0 | conn->egress.loss.conf->min_pto); |
1439 | 0 | conn->idle_timeout.at = conn->stash.now + (idle_msec > three_pto ? idle_msec : three_pto); |
1440 | 0 | conn->idle_timeout.should_rearm_on_send = is_in_receive; |
1441 | 0 | } |
1442 | | |
1443 | | static int scheduler_can_send(quicly_conn_t *conn) |
1444 | 0 | { |
1445 | | /* invoke the scheduler only when we are able to send stream data; skipping STATE_ACCEPTING is important as the application |
1446 | | * would not have setup data pointer. */ |
1447 | 0 | switch (conn->super.state) { |
1448 | 0 | case QUICLY_STATE_FIRSTFLIGHT: |
1449 | 0 | case QUICLY_STATE_CONNECTED: |
1450 | 0 | break; |
1451 | 0 | default: |
1452 | 0 | return 0; |
1453 | 0 | } |
1454 | | |
1455 | | /* scheduler would never have data to send, until application keys become available */ |
1456 | 0 | if (conn->application == NULL || conn->application->cipher.egress.key.aead == NULL) |
1457 | 0 | return 0; |
1458 | | |
1459 | 0 | int conn_is_saturated = !(conn->egress.max_data.sent < conn->egress.max_data.permitted); |
1460 | 0 | return conn->super.ctx->stream_scheduler->can_send(conn->super.ctx->stream_scheduler, conn, conn_is_saturated); |
1461 | 0 | } |
1462 | | |
1463 | | static void update_send_alarm(quicly_conn_t *conn, int can_send_stream_data, int is_after_send) |
1464 | 0 | { |
1465 | 0 | int has_outstanding = conn->egress.loss.sentmap.bytes_in_flight != 0 || conn->super.remote.address_validation.send_probe, |
1466 | 0 | handshake_is_in_progress = conn->initial != NULL || conn->handshake != NULL; |
1467 | 0 | quicly_loss_update_alarm(&conn->egress.loss, conn->stash.now, conn->egress.last_retransmittable_sent_at, has_outstanding, |
1468 | 0 | can_send_stream_data, handshake_is_in_progress, conn->egress.max_data.sent, is_after_send); |
1469 | 0 | } |
1470 | | |
1471 | | static void update_ratemeter(quicly_conn_t *conn, int is_cc_limited) |
1472 | 0 | { |
1473 | 0 | if (quicly_ratemeter_is_cc_limited(&conn->egress.ratemeter) != is_cc_limited) { |
1474 | 0 | if (is_cc_limited) { |
1475 | 0 | quicly_ratemeter_enter_cc_limited(&conn->egress.ratemeter, conn->egress.packet_number); |
1476 | 0 | QUICLY_PROBE(ENTER_CC_LIMITED, conn, conn->stash.now, conn->egress.packet_number); |
1477 | 0 | QUICLY_LOG_CONN(enter_cc_limited, conn, { PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number); }); |
1478 | 0 | } else { |
1479 | 0 | quicly_ratemeter_exit_cc_limited(&conn->egress.ratemeter, conn->egress.packet_number); |
1480 | 0 | QUICLY_PROBE(EXIT_CC_LIMITED, conn, conn->stash.now, conn->egress.packet_number); |
1481 | 0 | QUICLY_LOG_CONN(exit_cc_limited, conn, { PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number); }); |
1482 | 0 | } |
1483 | 0 | } |
1484 | 0 | } |
1485 | | |
1486 | | /** |
1487 | | * Updates the send alarm and adjusts the delivery rate estimator. This function is called from the receive path. From the sendp |
1488 | | * path, `update_send_alarm` is called directly. |
1489 | | */ |
1490 | | static void setup_next_send(quicly_conn_t *conn) |
1491 | 0 | { |
1492 | 0 | int can_send_stream_data = scheduler_can_send(conn); |
1493 | |
|
1494 | 0 | update_send_alarm(conn, can_send_stream_data, 0); |
1495 | | |
1496 | | /* When the flow becomes application-limited due to receiving some information, stop collecting delivery rate samples. */ |
1497 | 0 | if (!can_send_stream_data) |
1498 | 0 | update_ratemeter(conn, 0); |
1499 | 0 | } |
1500 | | |
1501 | | static int create_handshake_flow(quicly_conn_t *conn, size_t epoch) |
1502 | 0 | { |
1503 | 0 | quicly_stream_t *stream; |
1504 | 0 | int ret; |
1505 | |
|
1506 | 0 | if ((stream = open_stream(conn, -(quicly_stream_id_t)(1 + epoch), 65536, 65536)) == NULL) |
1507 | 0 | return PTLS_ERROR_NO_MEMORY; |
1508 | 0 | if ((ret = quicly_streambuf_create(stream, sizeof(quicly_streambuf_t))) != 0) { |
1509 | 0 | destroy_stream(stream, ret); |
1510 | 0 | return ret; |
1511 | 0 | } |
1512 | 0 | stream->callbacks = &crypto_stream_callbacks; |
1513 | |
|
1514 | 0 | return 0; |
1515 | 0 | } |
1516 | | |
1517 | | static void destroy_handshake_flow(quicly_conn_t *conn, size_t epoch) |
1518 | 0 | { |
1519 | 0 | quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch)); |
1520 | 0 | if (stream != NULL) |
1521 | 0 | destroy_stream(stream, 0); |
1522 | 0 | } |
1523 | | |
1524 | | static struct st_quicly_pn_space_t *alloc_pn_space(size_t sz, uint32_t packet_tolerance) |
1525 | 0 | { |
1526 | 0 | struct st_quicly_pn_space_t *space; |
1527 | |
|
1528 | 0 | if ((space = malloc(sz)) == NULL) |
1529 | 0 | return NULL; |
1530 | | |
1531 | 0 | quicly_ranges_init(&space->ack_queue); |
1532 | 0 | space->largest_pn_received_at = INT64_MAX; |
1533 | 0 | space->next_expected_packet_number = 0; |
1534 | 0 | space->unacked_count = 0; |
1535 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(space->ecn_counts); ++i) |
1536 | 0 | space->ecn_counts[i] = 0; |
1537 | 0 | space->packet_tolerance = packet_tolerance; |
1538 | 0 | space->ignore_order = 0; |
1539 | 0 | if (sz != sizeof(*space)) |
1540 | 0 | memset((uint8_t *)space + sizeof(*space), 0, sz - sizeof(*space)); |
1541 | |
|
1542 | 0 | return space; |
1543 | 0 | } |
1544 | | |
1545 | | static void do_free_pn_space(struct st_quicly_pn_space_t *space) |
1546 | 0 | { |
1547 | 0 | quicly_ranges_clear(&space->ack_queue); |
1548 | 0 | free(space); |
1549 | 0 | } |
1550 | | |
1551 | | static quicly_error_t record_pn(quicly_ranges_t *ranges, uint64_t pn, int *is_out_of_order) |
1552 | 0 | { |
1553 | 0 | quicly_error_t ret; |
1554 | |
|
1555 | 0 | *is_out_of_order = 0; |
1556 | |
|
1557 | 0 | if (ranges->num_ranges != 0) { |
1558 | | /* fast path that is taken when we receive a packet in-order */ |
1559 | 0 | if (ranges->ranges[ranges->num_ranges - 1].end == pn) { |
1560 | 0 | ranges->ranges[ranges->num_ranges - 1].end = pn + 1; |
1561 | 0 | return 0; |
1562 | 0 | } |
1563 | 0 | *is_out_of_order = 1; |
1564 | 0 | } |
1565 | | |
1566 | | /* slow path; we add, then remove the oldest ranges when the number of ranges exceed the maximum */ |
1567 | 0 | if ((ret = quicly_ranges_add(ranges, pn, pn + 1)) != 0) |
1568 | 0 | return ret; |
1569 | 0 | if (ranges->num_ranges > QUICLY_MAX_ACK_BLOCKS) |
1570 | 0 | quicly_ranges_drop_by_range_indices(ranges, ranges->num_ranges - QUICLY_MAX_ACK_BLOCKS, ranges->num_ranges); |
1571 | |
|
1572 | 0 | return 0; |
1573 | 0 | } |
1574 | | |
1575 | | static quicly_error_t record_receipt(struct st_quicly_pn_space_t *space, uint64_t pn, uint8_t ecn, int is_ack_only, int64_t now, |
1576 | | int64_t *send_ack_at, uint64_t *received_out_of_order) |
1577 | 0 | { |
1578 | 0 | int ack_now, is_out_of_order; |
1579 | 0 | quicly_error_t ret; |
1580 | |
|
1581 | 0 | if ((ret = record_pn(&space->ack_queue, pn, &is_out_of_order)) != 0) |
1582 | 0 | goto Exit; |
1583 | 0 | if (is_out_of_order) |
1584 | 0 | *received_out_of_order += 1; |
1585 | |
|
1586 | 0 | ack_now = !is_ack_only && ((is_out_of_order && !space->ignore_order) || ecn == IPTOS_ECN_CE); |
1587 | | |
1588 | | /* update largest_pn_received_at (TODO implement deduplication at an earlier moment?) */ |
1589 | 0 | if (space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end == pn + 1) |
1590 | 0 | space->largest_pn_received_at = now; |
1591 | | |
1592 | | /* increment ecn counters */ |
1593 | 0 | if (ecn != 0) |
1594 | 0 | space->ecn_counts[get_ecn_index_from_bits(ecn)] += 1; |
1595 | | |
1596 | | /* if the received packet is ack-eliciting, update / schedule transmission of ACK */ |
1597 | 0 | if (!is_ack_only) { |
1598 | 0 | space->unacked_count++; |
1599 | 0 | if (space->unacked_count >= space->packet_tolerance) |
1600 | 0 | ack_now = 1; |
1601 | 0 | } |
1602 | |
|
1603 | 0 | if (ack_now) { |
1604 | 0 | *send_ack_at = now; |
1605 | 0 | } else if (*send_ack_at == INT64_MAX && space->unacked_count != 0) { |
1606 | 0 | *send_ack_at = now + QUICLY_DELAYED_ACK_TIMEOUT; |
1607 | 0 | } |
1608 | |
|
1609 | 0 | ret = 0; |
1610 | 0 | Exit: |
1611 | 0 | return ret; |
1612 | 0 | } |
1613 | | |
1614 | | static void free_handshake_space(struct st_quicly_handshake_space_t **space) |
1615 | 0 | { |
1616 | 0 | if (*space != NULL) { |
1617 | 0 | if ((*space)->cipher.ingress.aead != NULL) |
1618 | 0 | dispose_cipher(&(*space)->cipher.ingress); |
1619 | 0 | if ((*space)->cipher.egress.aead != NULL) |
1620 | 0 | dispose_cipher(&(*space)->cipher.egress); |
1621 | 0 | do_free_pn_space(&(*space)->super); |
1622 | 0 | *space = NULL; |
1623 | 0 | } |
1624 | 0 | } |
1625 | | |
1626 | | static int setup_cipher(quicly_conn_t *conn, size_t epoch, int is_enc, ptls_cipher_context_t **hp_ctx, |
1627 | | ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, |
1628 | | const void *secret) |
1629 | 0 | { |
1630 | | /* quicly_accept builds cipher before instantiating a connection. In such case, we use the default crypto engine */ |
1631 | 0 | quicly_crypto_engine_t *engine = conn != NULL ? conn->super.ctx->crypto_engine : &quicly_default_crypto_engine; |
1632 | |
|
1633 | 0 | return engine->setup_cipher(engine, conn, epoch, is_enc, hp_ctx, aead_ctx, aead, hash, secret); |
1634 | 0 | } |
1635 | | |
1636 | | static int setup_handshake_space_and_flow(quicly_conn_t *conn, size_t epoch) |
1637 | 0 | { |
1638 | 0 | struct st_quicly_handshake_space_t **space = epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake; |
1639 | 0 | if ((*space = (void *)alloc_pn_space(sizeof(struct st_quicly_handshake_space_t), 1)) == NULL) |
1640 | 0 | return PTLS_ERROR_NO_MEMORY; |
1641 | 0 | return create_handshake_flow(conn, epoch); |
1642 | 0 | } |
1643 | | |
1644 | | static void free_application_space(struct st_quicly_application_space_t **space) |
1645 | 0 | { |
1646 | 0 | if (*space != NULL) { |
1647 | 0 | #define DISPOSE_INGRESS(label, func) \ |
1648 | 0 | if ((*space)->cipher.ingress.label != NULL) \ |
1649 | 0 | func((*space)->cipher.ingress.label) |
1650 | 0 | DISPOSE_INGRESS(header_protection.zero_rtt, ptls_cipher_free); |
1651 | 0 | DISPOSE_INGRESS(header_protection.one_rtt, ptls_cipher_free); |
1652 | 0 | DISPOSE_INGRESS(aead[0], ptls_aead_free); |
1653 | 0 | DISPOSE_INGRESS(aead[1], ptls_aead_free); |
1654 | 0 | #undef DISPOSE_INGRESS |
1655 | 0 | if ((*space)->cipher.egress.key.aead != NULL) |
1656 | 0 | dispose_cipher(&(*space)->cipher.egress.key); |
1657 | 0 | ptls_clear_memory((*space)->cipher.egress.secret, sizeof((*space)->cipher.egress.secret)); |
1658 | 0 | do_free_pn_space(&(*space)->super); |
1659 | 0 | *space = NULL; |
1660 | 0 | } |
1661 | 0 | } |
1662 | | |
1663 | | static int setup_application_space(quicly_conn_t *conn) |
1664 | 0 | { |
1665 | 0 | if ((conn->application = |
1666 | 0 | (void *)alloc_pn_space(sizeof(struct st_quicly_application_space_t), QUICLY_DEFAULT_PACKET_TOLERANCE)) == NULL) |
1667 | 0 | return PTLS_ERROR_NO_MEMORY; |
1668 | | |
1669 | | /* prohibit key-update until receiving an ACK for an 1-RTT packet */ |
1670 | 0 | conn->application->cipher.egress.key_update_pn.last = 0; |
1671 | 0 | conn->application->cipher.egress.key_update_pn.next = UINT64_MAX; |
1672 | |
|
1673 | 0 | return create_handshake_flow(conn, QUICLY_EPOCH_1RTT); |
1674 | 0 | } |
1675 | | |
1676 | | static quicly_error_t discard_handshake_context(quicly_conn_t *conn, size_t epoch) |
1677 | 0 | { |
1678 | 0 | quicly_error_t ret; |
1679 | |
|
1680 | 0 | assert(epoch == QUICLY_EPOCH_INITIAL || epoch == QUICLY_EPOCH_HANDSHAKE); |
1681 | | |
1682 | 0 | if ((ret = discard_sentmap_by_epoch(conn, 1u << epoch)) != 0) |
1683 | 0 | return ret; |
1684 | 0 | destroy_handshake_flow(conn, epoch); |
1685 | 0 | if (epoch == QUICLY_EPOCH_HANDSHAKE) { |
1686 | 0 | assert(conn->stash.now != 0); |
1687 | 0 | conn->super.stats.handshake_confirmed_msec = conn->stash.now - conn->created_at; |
1688 | 0 | } |
1689 | 0 | free_handshake_space(epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake); |
1690 | |
|
1691 | 0 | return 0; |
1692 | 0 | } |
1693 | | |
1694 | | static quicly_error_t apply_remote_transport_params(quicly_conn_t *conn) |
1695 | 0 | { |
1696 | 0 | quicly_error_t ret; |
1697 | |
|
1698 | 0 | conn->egress.max_data.permitted = conn->super.remote.transport_params.max_data; |
1699 | 0 | if ((ret = update_max_streams(&conn->egress.max_streams.uni, conn->super.remote.transport_params.max_streams_uni)) != 0) |
1700 | 0 | return ret; |
1701 | 0 | if ((ret = update_max_streams(&conn->egress.max_streams.bidi, conn->super.remote.transport_params.max_streams_bidi)) != 0) |
1702 | 0 | return ret; |
1703 | | |
1704 | 0 | return 0; |
1705 | 0 | } |
1706 | | |
1707 | | static int update_1rtt_key(quicly_conn_t *conn, ptls_cipher_suite_t *cipher, int is_enc, ptls_aead_context_t **aead, |
1708 | | uint8_t *secret) |
1709 | 0 | { |
1710 | 0 | uint8_t new_secret[PTLS_MAX_DIGEST_SIZE]; |
1711 | 0 | ptls_aead_context_t *new_aead = NULL; |
1712 | 0 | int ret; |
1713 | | |
1714 | | /* generate next AEAD key */ |
1715 | 0 | if ((ret = ptls_hkdf_expand_label(cipher->hash, new_secret, cipher->hash->digest_size, |
1716 | 0 | ptls_iovec_init(secret, cipher->hash->digest_size), "quic ku", ptls_iovec_init(NULL, 0), |
1717 | 0 | NULL)) != 0) |
1718 | 0 | goto Exit; |
1719 | 0 | if ((ret = setup_cipher(conn, QUICLY_EPOCH_1RTT, is_enc, NULL, &new_aead, cipher->aead, cipher->hash, new_secret)) != 0) |
1720 | 0 | goto Exit; |
1721 | | |
1722 | | /* success! update AEAD and secret */ |
1723 | 0 | if (*aead != NULL) |
1724 | 0 | ptls_aead_free(*aead); |
1725 | 0 | *aead = new_aead; |
1726 | 0 | new_aead = NULL; |
1727 | 0 | memcpy(secret, new_secret, cipher->hash->digest_size); |
1728 | |
|
1729 | 0 | ret = 0; |
1730 | 0 | Exit: |
1731 | 0 | if (new_aead != NULL) |
1732 | 0 | ptls_aead_free(new_aead); |
1733 | 0 | ptls_clear_memory(new_secret, cipher->hash->digest_size); |
1734 | 0 | return ret; |
1735 | 0 | } |
1736 | | |
1737 | | static int update_1rtt_egress_key(quicly_conn_t *conn) |
1738 | 0 | { |
1739 | 0 | struct st_quicly_application_space_t *space = conn->application; |
1740 | 0 | ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls); |
1741 | 0 | int ret; |
1742 | | |
1743 | | /* generate next AEAD key, and increment key phase if it succeeds */ |
1744 | 0 | if ((ret = update_1rtt_key(conn, cipher, 1, &space->cipher.egress.key.aead, space->cipher.egress.secret)) != 0) |
1745 | 0 | return ret; |
1746 | 0 | ++space->cipher.egress.key_phase; |
1747 | | |
1748 | | /* signal that we are waiting for an ACK */ |
1749 | 0 | space->cipher.egress.key_update_pn.last = conn->egress.packet_number; |
1750 | 0 | space->cipher.egress.key_update_pn.next = UINT64_MAX; |
1751 | |
|
1752 | 0 | QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE, conn, conn->stash.now, space->cipher.egress.key_phase, |
1753 | 0 | QUICLY_PROBE_HEXDUMP(space->cipher.egress.secret, cipher->hash->digest_size)); |
1754 | 0 | QUICLY_LOG_CONN(crypto_send_key_update, conn, { |
1755 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.egress.key_phase); |
1756 | 0 | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.egress.secret, cipher->hash->digest_size); |
1757 | 0 | }); |
1758 | | |
1759 | 0 | return 0; |
1760 | 0 | } |
1761 | | |
1762 | | static int received_key_update(quicly_conn_t *conn, uint64_t newly_decrypted_key_phase) |
1763 | 0 | { |
1764 | 0 | struct st_quicly_application_space_t *space = conn->application; |
1765 | |
|
1766 | 0 | assert(space->cipher.ingress.key_phase.decrypted < newly_decrypted_key_phase); |
1767 | 0 | assert(newly_decrypted_key_phase <= space->cipher.ingress.key_phase.prepared); |
1768 | | |
1769 | 0 | space->cipher.ingress.key_phase.decrypted = newly_decrypted_key_phase; |
1770 | |
|
1771 | 0 | QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE, conn, conn->stash.now, space->cipher.ingress.key_phase.decrypted, |
1772 | 0 | QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, ptls_get_cipher(conn->crypto.tls)->hash->digest_size)); |
1773 | 0 | QUICLY_LOG_CONN(crypto_receive_key_update, conn, { |
1774 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.ingress.key_phase.decrypted); |
1775 | 0 | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.ingress.secret, |
1776 | 0 | ptls_get_cipher(conn->crypto.tls)->hash->digest_size); |
1777 | 0 | }); |
1778 | | |
1779 | 0 | if (space->cipher.egress.key_phase < space->cipher.ingress.key_phase.decrypted) { |
1780 | 0 | return update_1rtt_egress_key(conn); |
1781 | 0 | } else { |
1782 | 0 | return 0; |
1783 | 0 | } |
1784 | 0 | } |
1785 | | |
1786 | | static void calc_resume_sendrate(quicly_conn_t *conn, uint64_t *rate, uint32_t *rtt) |
1787 | 0 | { |
1788 | 0 | quicly_rate_t reported; |
1789 | |
|
1790 | 0 | quicly_ratemeter_report(&conn->egress.ratemeter, &reported); |
1791 | |
|
1792 | 0 | if (reported.smoothed != 0 || reported.latest != 0) { |
1793 | 0 | *rate = reported.smoothed > reported.latest ? reported.smoothed : reported.latest; |
1794 | 0 | *rtt = conn->egress.loss.rtt.minimum; |
1795 | 0 | } else { |
1796 | 0 | *rate = 0; |
1797 | 0 | *rtt = 0; |
1798 | 0 | } |
1799 | 0 | } |
1800 | | |
1801 | | static inline void update_open_count(quicly_context_t *ctx, ssize_t delta) |
1802 | 0 | { |
1803 | 0 | if (ctx->update_open_count != NULL) |
1804 | 0 | ctx->update_open_count->cb(ctx->update_open_count, delta); |
1805 | 0 | } |
1806 | | |
1807 | 0 | #define LONGEST_ADDRESS_STR "[0000:1111:2222:3333:4444:5555:6666:7777]:12345" |
1808 | | static void stringify_address(char *buf, struct sockaddr *sa) |
1809 | 0 | { |
1810 | 0 | char *p = buf; |
1811 | 0 | uint16_t port = 0; |
1812 | |
|
1813 | 0 | p = buf; |
1814 | 0 | switch (sa->sa_family) { |
1815 | 0 | case AF_INET: |
1816 | 0 | inet_ntop(AF_INET, &((struct sockaddr_in *)sa)->sin_addr, p, sizeof(LONGEST_ADDRESS_STR)); |
1817 | 0 | p += strlen(p); |
1818 | 0 | port = ntohs(((struct sockaddr_in *)sa)->sin_port); |
1819 | 0 | break; |
1820 | 0 | case AF_INET6: |
1821 | 0 | *p++ = '['; |
1822 | 0 | inet_ntop(AF_INET6, &((struct sockaddr_in6 *)sa)->sin6_addr, p, sizeof(LONGEST_ADDRESS_STR)); |
1823 | 0 | *p++ = ']'; |
1824 | 0 | port = ntohs(((struct sockaddr_in *)sa)->sin_port); |
1825 | 0 | break; |
1826 | 0 | default: |
1827 | 0 | assert("unexpected addres family"); |
1828 | 0 | break; |
1829 | 0 | } |
1830 | | |
1831 | 0 | *p++ = ':'; |
1832 | 0 | sprintf(p, "%" PRIu16, port); |
1833 | 0 | } |
1834 | | |
1835 | | static int new_path(quicly_conn_t *conn, size_t path_index, struct sockaddr *remote_addr, struct sockaddr *local_addr) |
1836 | 0 | { |
1837 | 0 | struct st_quicly_conn_path_t *path; |
1838 | |
|
1839 | 0 | assert(conn->paths[path_index] == NULL); |
1840 | | |
1841 | 0 | if ((path = malloc(sizeof(*conn->paths[path_index]))) == NULL) |
1842 | 0 | return PTLS_ERROR_NO_MEMORY; |
1843 | | |
1844 | 0 | if (path_index == 0) { |
1845 | | /* default path used for handshake */ |
1846 | 0 | *path = (struct st_quicly_conn_path_t){ |
1847 | 0 | .dcid = 0, |
1848 | 0 | .path_challenge.send_at = INT64_MAX, |
1849 | 0 | .initial = 1, |
1850 | 0 | .probe_only = 0, |
1851 | 0 | }; |
1852 | 0 | } else { |
1853 | 0 | *path = (struct st_quicly_conn_path_t){ |
1854 | 0 | .dcid = UINT64_MAX, |
1855 | 0 | .path_challenge.send_at = 0, |
1856 | 0 | .probe_only = 1, |
1857 | 0 | }; |
1858 | 0 | conn->super.ctx->tls->random_bytes(path->path_challenge.data, sizeof(path->path_challenge.data)); |
1859 | 0 | conn->super.stats.num_paths.created += 1; |
1860 | 0 | } |
1861 | 0 | set_address(&path->address.remote, remote_addr); |
1862 | 0 | set_address(&path->address.local, local_addr); |
1863 | |
|
1864 | 0 | conn->paths[path_index] = path; |
1865 | |
|
1866 | 0 | PTLS_LOG_DEFINE_POINT(quicly, new_path, new_path_logpoint); |
1867 | 0 | if (QUICLY_PROBE_ENABLED(NEW_PATH) || |
1868 | 0 | (ptls_log_point_maybe_active(&new_path_logpoint) & ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls), |
1869 | 0 | (const char *(*)(void *))ptls_get_server_name, |
1870 | 0 | conn->crypto.tls)) != 0) { |
1871 | 0 | char remote[sizeof(LONGEST_ADDRESS_STR)]; |
1872 | 0 | stringify_address(remote, &path->address.remote.sa); |
1873 | 0 | QUICLY_PROBE(NEW_PATH, conn, conn->stash.now, path_index, remote); |
1874 | 0 | QUICLY_LOG_CONN(new_path, conn, { |
1875 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); |
1876 | 0 | PTLS_LOG_ELEMENT_SAFESTR(remote, remote); |
1877 | 0 | }); |
1878 | 0 | } |
1879 | | |
1880 | 0 | return 0; |
1881 | 0 | } |
1882 | | |
1883 | | static void do_delete_path(quicly_conn_t *conn, struct st_quicly_conn_path_t *path) |
1884 | 0 | { |
1885 | 0 | if (path->dcid != UINT64_MAX && conn->super.remote.cid_set.cids[0].cid.len != 0) |
1886 | 0 | retire_connection_id(conn, path->dcid); |
1887 | 0 | free(path); |
1888 | 0 | } |
1889 | | |
1890 | | static void delete_path(quicly_conn_t *conn, size_t path_index) |
1891 | 0 | { |
1892 | 0 | QUICLY_PROBE(DELETE_PATH, conn, conn->stash.now, path_index); |
1893 | 0 | QUICLY_LOG_CONN(delete_path, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); }); |
1894 | | |
1895 | 0 | struct st_quicly_conn_path_t *path = conn->paths[path_index]; |
1896 | 0 | conn->paths[path_index] = NULL; |
1897 | 0 | if (path->path_challenge.send_at != INT64_MAX) |
1898 | 0 | conn->super.stats.num_paths.validation_failed += 1; |
1899 | |
|
1900 | 0 | do_delete_path(conn, path); |
1901 | 0 | } |
1902 | | |
1903 | | /** |
1904 | | * paths[0] (the default path) is freed and the path specified by `path_index` is promoted |
1905 | | */ |
1906 | | static quicly_error_t promote_path(quicly_conn_t *conn, size_t path_index) |
1907 | 0 | { |
1908 | 0 | QUICLY_PROBE(PROMOTE_PATH, conn, conn->stash.now, path_index); |
1909 | 0 | QUICLY_LOG_CONN(promote_path, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); }); |
1910 | | |
1911 | 0 | { /* mark all packets as lost, as it is unlikely that packets sent on the old path wound be acknowledged */ |
1912 | 0 | quicly_sentmap_iter_t iter; |
1913 | 0 | quicly_error_t ret; |
1914 | 0 | if ((ret = quicly_loss_init_sentmap_iter(&conn->egress.loss, &iter, conn->stash.now, |
1915 | 0 | conn->super.remote.transport_params.max_ack_delay, 0)) != 0) |
1916 | 0 | return ret; |
1917 | 0 | const quicly_sent_packet_t *sent; |
1918 | 0 | while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) { |
1919 | 0 | if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_PTO)) != 0) |
1920 | 0 | return ret; |
1921 | 0 | } |
1922 | 0 | } |
1923 | | |
1924 | | /* reset CC (FIXME flush sentmap and reset loss recovery) */ |
1925 | 0 | conn->egress.cc.type->cc_init->cb( |
1926 | 0 | conn->egress.cc.type->cc_init, &conn->egress.cc, |
1927 | 0 | quicly_cc_calc_initial_cwnd(conn->super.ctx->initcwnd_packets, conn->egress.max_udp_payload_size), conn->stash.now); |
1928 | | |
1929 | | /* set jumpstart target */ |
1930 | 0 | calc_resume_sendrate(conn, &conn->super.stats.jumpstart.prev_rate, &conn->super.stats.jumpstart.prev_rtt); |
1931 | | |
1932 | | /* reset RTT estimate, adopting SRTT of the original path as initial RTT (TODO calculate RTT based on path challenge RT) */ |
1933 | 0 | quicly_rtt_init(&conn->egress.loss.rtt, &conn->super.ctx->loss, |
1934 | 0 | conn->egress.loss.rtt.smoothed < conn->super.ctx->loss.default_initial_rtt |
1935 | 0 | ? conn->egress.loss.rtt.smoothed |
1936 | 0 | : conn->super.ctx->loss.default_initial_rtt); |
1937 | | |
1938 | | /* reset ratemeter */ |
1939 | 0 | quicly_ratemeter_init(&conn->egress.ratemeter); |
1940 | | |
1941 | | /* remember PN when the path was promoted */ |
1942 | 0 | conn->egress.pn_path_start = conn->egress.packet_number; |
1943 | | |
1944 | | /* update path mapping */ |
1945 | 0 | struct st_quicly_conn_path_t *path = conn->paths[0]; |
1946 | 0 | conn->paths[0] = conn->paths[path_index]; |
1947 | 0 | conn->paths[path_index] = NULL; |
1948 | 0 | conn->super.stats.num_paths.promoted += 1; |
1949 | |
|
1950 | 0 | do_delete_path(conn, path); |
1951 | | |
1952 | | /* rearm the loss timer, now that the RTT estimate has been changed */ |
1953 | 0 | setup_next_send(conn); |
1954 | |
|
1955 | 0 | return 0; |
1956 | 0 | } |
1957 | | |
1958 | | static int open_path(quicly_conn_t *conn, size_t *path_index, struct sockaddr *remote_addr, struct sockaddr *local_addr) |
1959 | 0 | { |
1960 | 0 | int ret; |
1961 | | |
1962 | | /* choose a slot that in unused or the least-recently-used one that has completed validation */ |
1963 | 0 | *path_index = SIZE_MAX; |
1964 | 0 | for (size_t i = 1; i < PTLS_ELEMENTSOF(conn->paths); ++i) { |
1965 | 0 | struct st_quicly_conn_path_t *p = conn->paths[i]; |
1966 | 0 | if (p == NULL) { |
1967 | 0 | *path_index = i; |
1968 | 0 | break; |
1969 | 0 | } |
1970 | 0 | if (p->path_challenge.send_at != INT64_MAX) |
1971 | 0 | continue; |
1972 | 0 | if (*path_index == SIZE_MAX || p->packet_last_received < conn->paths[*path_index]->packet_last_received) |
1973 | 0 | *path_index = i; |
1974 | 0 | } |
1975 | 0 | if (*path_index == SIZE_MAX) |
1976 | 0 | return QUICLY_ERROR_PACKET_IGNORED; |
1977 | | |
1978 | | /* free existing path info */ |
1979 | 0 | if (conn->paths[*path_index] != NULL) |
1980 | 0 | delete_path(conn, *path_index); |
1981 | | |
1982 | | /* initialize new path info */ |
1983 | 0 | if ((ret = new_path(conn, *path_index, remote_addr, local_addr)) != 0) |
1984 | 0 | return ret; |
1985 | | |
1986 | | /* schedule emission of PATH_CHALLENGE */ |
1987 | 0 | conn->egress.send_probe_at = 0; |
1988 | |
|
1989 | 0 | return 0; |
1990 | 0 | } |
1991 | | |
1992 | | static void recalc_send_probe_at(quicly_conn_t *conn) |
1993 | 0 | { |
1994 | 0 | conn->egress.send_probe_at = INT64_MAX; |
1995 | |
|
1996 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) { |
1997 | 0 | if (conn->paths[i] == NULL) |
1998 | 0 | continue; |
1999 | 0 | if (conn->egress.send_probe_at > conn->paths[i]->path_challenge.send_at) |
2000 | 0 | conn->egress.send_probe_at = conn->paths[i]->path_challenge.send_at; |
2001 | 0 | if (conn->paths[i]->path_response.send_) { |
2002 | 0 | conn->egress.send_probe_at = 0; |
2003 | 0 | break; |
2004 | 0 | } |
2005 | 0 | } |
2006 | 0 | } |
2007 | | |
2008 | | void quicly_free(quicly_conn_t *conn) |
2009 | 0 | { |
2010 | 0 | lock_now(conn, 0); |
2011 | |
|
2012 | 0 | QUICLY_PROBE(FREE, conn, conn->stash.now); |
2013 | 0 | QUICLY_LOG_CONN(free, conn, {}); |
2014 | | |
2015 | 0 | if (QUICLY_PROBE_ENABLED(CONN_STATS)) { |
2016 | 0 | quicly_stats_t stats; |
2017 | 0 | quicly_get_stats(conn, &stats); |
2018 | 0 | QUICLY_PROBE(CONN_STATS, conn, conn->stash.now, &stats, sizeof(stats)); |
2019 | | // TODO: emit stats with QUICLY_LOG_CONN() |
2020 | 0 | } |
2021 | |
|
2022 | 0 | destroy_all_streams(conn, 0, 1); |
2023 | 0 | update_open_count(conn->super.ctx, -1); |
2024 | 0 | clear_datagram_frame_payloads(conn); |
2025 | |
|
2026 | 0 | quicly_maxsender_dispose(&conn->ingress.max_data.sender); |
2027 | 0 | quicly_maxsender_dispose(&conn->ingress.max_streams.uni); |
2028 | 0 | quicly_maxsender_dispose(&conn->ingress.max_streams.bidi); |
2029 | 0 | quicly_loss_dispose(&conn->egress.loss); |
2030 | |
|
2031 | 0 | kh_destroy(quicly_stream_t, conn->streams); |
2032 | |
|
2033 | 0 | assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.uni)); |
2034 | 0 | assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.bidi)); |
2035 | 0 | assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.control)); |
2036 | 0 | assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.active)); |
2037 | 0 | assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.blocked)); |
2038 | | |
2039 | 0 | free_handshake_space(&conn->initial); |
2040 | 0 | free_handshake_space(&conn->handshake); |
2041 | 0 | free_application_space(&conn->application); |
2042 | |
|
2043 | 0 | ptls_buffer_dispose(&conn->crypto.transport_params.buf); |
2044 | |
|
2045 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) { |
2046 | 0 | if (conn->paths[i] != NULL) |
2047 | 0 | delete_path(conn, i); |
2048 | 0 | } |
2049 | | |
2050 | | /* `crytpo.tls` is disposed late, because logging relies on `ptls_skip_tracing` */ |
2051 | 0 | if (conn->crypto.async_in_progress) { |
2052 | | /* When async signature generation is inflight, `ptls_free` will be called from `quicly_resume_handshake` laterwards. */ |
2053 | 0 | *ptls_get_data_ptr(conn->crypto.tls) = NULL; |
2054 | 0 | } else { |
2055 | 0 | ptls_free(conn->crypto.tls); |
2056 | 0 | } |
2057 | |
|
2058 | 0 | unlock_now(conn); |
2059 | |
|
2060 | 0 | if (conn->egress.pacer != NULL) |
2061 | 0 | free(conn->egress.pacer); |
2062 | 0 | free(conn->token.base); |
2063 | 0 | free(conn); |
2064 | 0 | } |
2065 | | |
2066 | | static int calc_initial_key(ptls_cipher_suite_t *cs, uint8_t *traffic_secret, const void *master_secret, const char *label) |
2067 | 0 | { |
2068 | 0 | return ptls_hkdf_expand_label(cs->hash, traffic_secret, cs->hash->digest_size, |
2069 | 0 | ptls_iovec_init(master_secret, cs->hash->digest_size), label, ptls_iovec_init(NULL, 0), NULL); |
2070 | 0 | } |
2071 | | |
2072 | | int quicly_calc_initial_keys(ptls_cipher_suite_t *cs, uint8_t *ingress, uint8_t *egress, ptls_iovec_t cid, int is_client, |
2073 | | ptls_iovec_t salt) |
2074 | 0 | { |
2075 | 0 | static const char *labels[2] = {"client in", "server in"}; |
2076 | 0 | uint8_t master_secret[PTLS_MAX_DIGEST_SIZE]; |
2077 | 0 | int ret; |
2078 | | |
2079 | | /* extract master secret */ |
2080 | 0 | if ((ret = ptls_hkdf_extract(cs->hash, master_secret, salt, cid)) != 0) |
2081 | 0 | goto Exit; |
2082 | | |
2083 | | /* calc secrets */ |
2084 | 0 | if (ingress != NULL && (ret = calc_initial_key(cs, ingress, master_secret, labels[is_client])) != 0) |
2085 | 0 | goto Exit; |
2086 | 0 | if (egress != NULL && (ret = calc_initial_key(cs, egress, master_secret, labels[!is_client])) != 0) |
2087 | 0 | goto Exit; |
2088 | | |
2089 | 0 | Exit: |
2090 | 0 | ptls_clear_memory(master_secret, sizeof(master_secret)); |
2091 | 0 | return ret; |
2092 | 0 | } |
2093 | | |
2094 | | /** |
2095 | | * @param conn maybe NULL when called by quicly_accept |
2096 | | */ |
2097 | | static int setup_initial_encryption(ptls_cipher_suite_t *cs, struct st_quicly_cipher_context_t *ingress, |
2098 | | struct st_quicly_cipher_context_t *egress, ptls_iovec_t cid, int is_client, ptls_iovec_t salt, |
2099 | | quicly_conn_t *conn) |
2100 | 0 | { |
2101 | 0 | struct { |
2102 | 0 | uint8_t ingress[PTLS_MAX_DIGEST_SIZE]; |
2103 | 0 | uint8_t egress[PTLS_MAX_DIGEST_SIZE]; |
2104 | 0 | } secrets; |
2105 | 0 | int ret; |
2106 | |
|
2107 | 0 | if ((ret = quicly_calc_initial_keys(cs, ingress != NULL ? secrets.ingress : NULL, egress != NULL ? secrets.egress : NULL, cid, |
2108 | 0 | is_client, salt)) != 0) |
2109 | 0 | goto Exit; |
2110 | | |
2111 | 0 | if (ingress != NULL && (ret = setup_cipher(conn, QUICLY_EPOCH_INITIAL, 0, &ingress->header_protection, &ingress->aead, cs->aead, |
2112 | 0 | cs->hash, secrets.ingress)) != 0) |
2113 | 0 | goto Exit; |
2114 | 0 | if (egress != NULL && (ret = setup_cipher(conn, QUICLY_EPOCH_INITIAL, 1, &egress->header_protection, &egress->aead, cs->aead, |
2115 | 0 | cs->hash, secrets.egress)) != 0) |
2116 | 0 | goto Exit; |
2117 | | |
2118 | 0 | Exit: |
2119 | 0 | ptls_clear_memory(&secrets, sizeof(secrets)); |
2120 | 0 | return ret; |
2121 | 0 | } |
2122 | | |
2123 | | static quicly_error_t reinstall_initial_encryption(quicly_conn_t *conn, quicly_error_t err_code_if_unknown_version) |
2124 | 0 | { |
2125 | 0 | const quicly_salt_t *salt; |
2126 | | |
2127 | | /* get salt */ |
2128 | 0 | if ((salt = quicly_get_salt(conn->super.version)) == NULL) |
2129 | 0 | return err_code_if_unknown_version; |
2130 | | |
2131 | | /* dispose existing context */ |
2132 | 0 | dispose_cipher(&conn->initial->cipher.ingress); |
2133 | 0 | dispose_cipher(&conn->initial->cipher.egress); |
2134 | | |
2135 | | /* setup encryption context */ |
2136 | 0 | return setup_initial_encryption( |
2137 | 0 | get_aes128gcmsha256(conn->super.ctx), &conn->initial->cipher.ingress, &conn->initial->cipher.egress, |
2138 | 0 | ptls_iovec_init(conn->super.remote.cid_set.cids[0].cid.cid, conn->super.remote.cid_set.cids[0].cid.len), 1, |
2139 | 0 | ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL); |
2140 | 0 | } |
2141 | | |
2142 | | static quicly_error_t apply_stream_frame(quicly_stream_t *stream, quicly_stream_frame_t *frame) |
2143 | 0 | { |
2144 | 0 | quicly_error_t ret; |
2145 | |
|
2146 | 0 | QUICLY_PROBE(STREAM_RECEIVE, stream->conn, stream->conn->stash.now, stream, frame->offset, frame->data.len); |
2147 | 0 | QUICLY_LOG_CONN(stream_receive, stream->conn, { |
2148 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
2149 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, frame->offset); |
2150 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(len, frame->data.len); |
2151 | 0 | }); |
2152 | | |
2153 | 0 | if (quicly_recvstate_transfer_complete(&stream->recvstate)) |
2154 | 0 | return 0; |
2155 | | |
2156 | | /* flow control */ |
2157 | 0 | if (stream->stream_id >= 0) { |
2158 | | /* STREAMs */ |
2159 | 0 | uint64_t max_stream_data = frame->offset + frame->data.len; |
2160 | 0 | if ((int64_t)stream->_recv_aux.window < (int64_t)max_stream_data - (int64_t)stream->recvstate.data_off) |
2161 | 0 | return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL; |
2162 | 0 | if (stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end < max_stream_data) { |
2163 | 0 | uint64_t newly_received = |
2164 | 0 | max_stream_data - stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end; |
2165 | 0 | if (stream->conn->ingress.max_data.bytes_consumed + newly_received > |
2166 | 0 | stream->conn->ingress.max_data.sender.max_committed) |
2167 | 0 | return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL; |
2168 | 0 | stream->conn->ingress.max_data.bytes_consumed += newly_received; |
2169 | | /* FIXME send MAX_DATA if necessary */ |
2170 | 0 | } |
2171 | 0 | } else { |
2172 | | /* CRYPTO streams; maybe add different limit for 1-RTT CRYPTO? */ |
2173 | 0 | if (frame->offset + frame->data.len > stream->conn->super.ctx->max_crypto_bytes) |
2174 | 0 | return QUICLY_TRANSPORT_ERROR_CRYPTO_BUFFER_EXCEEDED; |
2175 | 0 | } |
2176 | | |
2177 | | /* update recvbuf */ |
2178 | 0 | size_t apply_len = frame->data.len; |
2179 | 0 | if ((ret = quicly_recvstate_update(&stream->recvstate, frame->offset, &apply_len, frame->is_fin, |
2180 | 0 | stream->_recv_aux.max_ranges)) != 0) |
2181 | 0 | return ret; |
2182 | | |
2183 | 0 | if (apply_len != 0 || quicly_recvstate_transfer_complete(&stream->recvstate)) { |
2184 | 0 | uint64_t buf_offset = frame->offset + frame->data.len - apply_len - stream->recvstate.data_off; |
2185 | 0 | const void *apply_src = frame->data.base + frame->data.len - apply_len; |
2186 | 0 | QUICLY_PROBE(STREAM_ON_RECEIVE, stream->conn, stream->conn->stash.now, stream, (size_t)buf_offset, apply_src, apply_len); |
2187 | 0 | QUICLY_LOG_CONN(stream_on_receive, stream->conn, { |
2188 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
2189 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, buf_offset); |
2190 | 0 | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(src, apply_src, apply_len); |
2191 | 0 | }); |
2192 | 0 | stream->callbacks->on_receive(stream, (size_t)buf_offset, apply_src, apply_len); |
2193 | 0 | if (stream->conn->super.state >= QUICLY_STATE_CLOSING) |
2194 | 0 | return QUICLY_ERROR_IS_CLOSING; |
2195 | 0 | } |
2196 | | |
2197 | 0 | if (should_send_max_stream_data(stream)) |
2198 | 0 | sched_stream_control(stream); |
2199 | |
|
2200 | 0 | if (stream_is_destroyable(stream)) |
2201 | 0 | destroy_stream(stream, 0); |
2202 | |
|
2203 | 0 | return 0; |
2204 | 0 | } |
2205 | | |
2206 | | int quicly_encode_transport_parameter_list(ptls_buffer_t *buf, const quicly_transport_parameters_t *params, |
2207 | | const quicly_cid_t *original_dcid, const quicly_cid_t *initial_scid, |
2208 | | const quicly_cid_t *retry_scid, const void *stateless_reset_token, size_t expand_by) |
2209 | 0 | { |
2210 | 0 | int ret; |
2211 | |
|
2212 | 0 | #define PUSH_TP(buf, id, block) \ |
2213 | 0 | do { \ |
2214 | 0 | ptls_buffer_push_quicint((buf), (id)); \ |
2215 | 0 | ptls_buffer_push_block((buf), -1, block); \ |
2216 | 0 | } while (0) |
2217 | |
|
2218 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE, |
2219 | 0 | { ptls_buffer_push_quicint(buf, params->max_udp_payload_size); }); |
2220 | 0 | if (params->max_stream_data.bidi_local != 0) |
2221 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, |
2222 | 0 | { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_local); }); |
2223 | 0 | if (params->max_stream_data.bidi_remote != 0) |
2224 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, |
2225 | 0 | { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_remote); }); |
2226 | 0 | if (params->max_stream_data.uni != 0) |
2227 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI, |
2228 | 0 | { ptls_buffer_push_quicint(buf, params->max_stream_data.uni); }); |
2229 | 0 | if (params->max_data != 0) |
2230 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, { ptls_buffer_push_quicint(buf, params->max_data); }); |
2231 | 0 | if (params->max_idle_timeout != 0) |
2232 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, { ptls_buffer_push_quicint(buf, params->max_idle_timeout); }); |
2233 | 0 | if (original_dcid != NULL) |
2234 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID, |
2235 | 0 | { ptls_buffer_pushv(buf, original_dcid->cid, original_dcid->len); }); |
2236 | 0 | if (initial_scid != NULL) |
2237 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID, |
2238 | 0 | { ptls_buffer_pushv(buf, initial_scid->cid, initial_scid->len); }); |
2239 | 0 | if (retry_scid != NULL) |
2240 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID, |
2241 | 0 | { ptls_buffer_pushv(buf, retry_scid->cid, retry_scid->len); }); |
2242 | 0 | if (stateless_reset_token != NULL) |
2243 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN, |
2244 | 0 | { ptls_buffer_pushv(buf, stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN); }); |
2245 | 0 | if (params->max_streams_bidi != 0) |
2246 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI, |
2247 | 0 | { ptls_buffer_push_quicint(buf, params->max_streams_bidi); }); |
2248 | 0 | if (params->max_streams_uni != 0) |
2249 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI, |
2250 | 0 | { ptls_buffer_push_quicint(buf, params->max_streams_uni); }); |
2251 | 0 | if (QUICLY_LOCAL_ACK_DELAY_EXPONENT != QUICLY_DEFAULT_ACK_DELAY_EXPONENT) |
2252 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT, |
2253 | 0 | { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_ACK_DELAY_EXPONENT); }); |
2254 | 0 | if (QUICLY_LOCAL_MAX_ACK_DELAY != QUICLY_DEFAULT_MAX_ACK_DELAY) |
2255 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_MAX_ACK_DELAY); }); |
2256 | 0 | if (params->min_ack_delay_usec != UINT64_MAX) { |
2257 | | /* TODO consider the value we should advertise. */ |
2258 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY, |
2259 | 0 | { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_MAX_ACK_DELAY * 1000 /* in microseconds */); }); |
2260 | 0 | } |
2261 | 0 | if (params->disable_active_migration) |
2262 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, {}); |
2263 | 0 | if (QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT != QUICLY_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) |
2264 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT, |
2265 | 0 | { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT); }); |
2266 | 0 | if (params->max_datagram_frame_size != 0) |
2267 | 0 | PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE, |
2268 | 0 | { ptls_buffer_push_quicint(buf, params->max_datagram_frame_size); }); |
2269 | | /* if requested, add a greasing TP of 1 MTU size so that CH spans across multiple packets */ |
2270 | 0 | if (expand_by != 0) { |
2271 | 0 | PUSH_TP(buf, 31 * 100 + 27, { |
2272 | 0 | if ((ret = ptls_buffer_reserve(buf, expand_by)) != 0) |
2273 | 0 | goto Exit; |
2274 | 0 | memset(buf->base + buf->off, 0, expand_by); |
2275 | 0 | buf->off += expand_by; |
2276 | 0 | }); |
2277 | 0 | } |
2278 | | |
2279 | 0 | #undef PUSH_TP |
2280 | | |
2281 | 0 | ret = 0; |
2282 | 0 | Exit: |
2283 | 0 | return ret; |
2284 | 0 | } |
2285 | | |
2286 | | /** |
2287 | | * sentinel used for indicating that the corresponding TP should be ignored |
2288 | | */ |
2289 | | static const quicly_cid_t _tp_cid_ignore; |
2290 | 0 | #define tp_cid_ignore (*(quicly_cid_t *)&_tp_cid_ignore) |
2291 | | |
2292 | | quicly_error_t quicly_decode_transport_parameter_list(quicly_transport_parameters_t *params, quicly_cid_t *original_dcid, |
2293 | | quicly_cid_t *initial_scid, quicly_cid_t *retry_scid, |
2294 | | void *stateless_reset_token, const uint8_t *src, const uint8_t *end) |
2295 | 0 | { |
2296 | | /* When non-negative, tp_index contains the literal position within the list of transport parameters recognized by this function. |
2297 | | * That index is being used to find duplicates using a 64-bit bitmap (found_bits). When the transport parameter is being processed, |
2298 | | * tp_index is set to -1. */ |
2299 | 0 | #define DECODE_TP(_id, block) \ |
2300 | 0 | do { \ |
2301 | 0 | if (tp_index >= 0) { \ |
2302 | 0 | if (id == (_id)) { \ |
2303 | 0 | if ((found_bits & ((uint64_t)1 << tp_index)) != 0) { \ |
2304 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \ |
2305 | 0 | goto Exit; \ |
2306 | 0 | } \ |
2307 | 0 | found_bits |= (uint64_t)1 << tp_index; \ |
2308 | 0 | {block} tp_index = -1; \ |
2309 | 0 | } else { \ |
2310 | 0 | ++tp_index; \ |
2311 | 0 | } \ |
2312 | 0 | } \ |
2313 | 0 | } while (0) |
2314 | 0 | #define DECODE_CID_TP(_id, dest) \ |
2315 | 0 | DECODE_TP(_id, { \ |
2316 | 0 | size_t cidl = end - src; \ |
2317 | 0 | if (cidl > QUICLY_MAX_CID_LEN_V1) { \ |
2318 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \ |
2319 | 0 | goto Exit; \ |
2320 | 0 | } \ |
2321 | 0 | if (dest == NULL) { \ |
2322 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \ |
2323 | 0 | goto Exit; \ |
2324 | 0 | } else if (dest != &tp_cid_ignore) { \ |
2325 | 0 | quicly_set_cid(dest, ptls_iovec_init(src, cidl)); \ |
2326 | 0 | } \ |
2327 | 0 | src = end; \ |
2328 | 0 | }); |
2329 | |
|
2330 | 0 | uint64_t found_bits = 0; |
2331 | 0 | quicly_error_t ret; |
2332 | | |
2333 | | /* set parameters to their default values */ |
2334 | 0 | *params = default_transport_params; |
2335 | | |
2336 | | /* Set optional parameters to UINT8_MAX. It is used to as a sentinel for detecting missing TPs. */ |
2337 | 0 | if (original_dcid != NULL && original_dcid != &tp_cid_ignore) |
2338 | 0 | original_dcid->len = UINT8_MAX; |
2339 | 0 | if (initial_scid != NULL && initial_scid != &tp_cid_ignore) |
2340 | 0 | initial_scid->len = UINT8_MAX; |
2341 | 0 | if (retry_scid != NULL && retry_scid != &tp_cid_ignore) |
2342 | 0 | retry_scid->len = UINT8_MAX; |
2343 | | |
2344 | | /* decode the parameters block */ |
2345 | 0 | while (src != end) { |
2346 | 0 | uint64_t id; |
2347 | 0 | if ((id = quicly_decodev(&src, end)) == UINT64_MAX) { |
2348 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2349 | 0 | goto Exit; |
2350 | 0 | } |
2351 | 0 | int tp_index = 0; |
2352 | 0 | ptls_decode_open_block(src, end, -1, { |
2353 | 0 | DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID, original_dcid); |
2354 | 0 | DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID, initial_scid); |
2355 | 0 | DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID, retry_scid); |
2356 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE, { |
2357 | 0 | uint64_t v; |
2358 | 0 | if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2359 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2360 | 0 | goto Exit; |
2361 | 0 | } |
2362 | 0 | if (v < 1200) { |
2363 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2364 | 0 | goto Exit; |
2365 | 0 | } |
2366 | 0 | if (v > UINT16_MAX) |
2367 | 0 | v = UINT16_MAX; |
2368 | 0 | params->max_udp_payload_size = (uint16_t)v; |
2369 | 0 | }); |
2370 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, { |
2371 | 0 | if ((params->max_stream_data.bidi_local = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2372 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2373 | 0 | goto Exit; |
2374 | 0 | } |
2375 | 0 | }); |
2376 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, { |
2377 | 0 | if ((params->max_stream_data.bidi_remote = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2378 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2379 | 0 | goto Exit; |
2380 | 0 | } |
2381 | 0 | }); |
2382 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI, { |
2383 | 0 | if ((params->max_stream_data.uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2384 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2385 | 0 | goto Exit; |
2386 | 0 | } |
2387 | 0 | }); |
2388 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, { |
2389 | 0 | if ((params->max_data = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2390 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2391 | 0 | goto Exit; |
2392 | 0 | } |
2393 | 0 | }); |
2394 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN, { |
2395 | 0 | if (!(stateless_reset_token != NULL && end - src == QUICLY_STATELESS_RESET_TOKEN_LEN)) { |
2396 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2397 | 0 | goto Exit; |
2398 | 0 | } |
2399 | 0 | memcpy(stateless_reset_token, src, QUICLY_STATELESS_RESET_TOKEN_LEN); |
2400 | 0 | src = end; |
2401 | 0 | }); |
2402 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, { |
2403 | 0 | if ((params->max_idle_timeout = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2404 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2405 | 0 | goto Exit; |
2406 | 0 | } |
2407 | 0 | }); |
2408 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI, { |
2409 | 0 | if ((params->max_streams_bidi = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2410 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2411 | 0 | goto Exit; |
2412 | 0 | } |
2413 | 0 | }); |
2414 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI, { |
2415 | 0 | if ((params->max_streams_uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2416 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2417 | 0 | goto Exit; |
2418 | 0 | } |
2419 | 0 | }); |
2420 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT, { |
2421 | 0 | uint64_t v; |
2422 | 0 | if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2423 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2424 | 0 | goto Exit; |
2425 | 0 | } |
2426 | 0 | if (v > 20) { |
2427 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2428 | 0 | goto Exit; |
2429 | 0 | } |
2430 | 0 | params->ack_delay_exponent = (uint8_t)v; |
2431 | 0 | }); |
2432 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, { |
2433 | 0 | uint64_t v; |
2434 | 0 | if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2435 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2436 | 0 | goto Exit; |
2437 | 0 | } |
2438 | 0 | if (v >= 16384) { /* "values of 2^14 or greater are invalid" */ |
2439 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2440 | 0 | goto Exit; |
2441 | 0 | } |
2442 | 0 | params->max_ack_delay = (uint16_t)v; |
2443 | 0 | }); |
2444 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY, { |
2445 | 0 | if ((params->min_ack_delay_usec = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2446 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2447 | 0 | goto Exit; |
2448 | 0 | } |
2449 | 0 | if (params->min_ack_delay_usec >= 16777216) { /* "values of 2^24 or greater are invalid" */ |
2450 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2451 | 0 | goto Exit; |
2452 | 0 | } |
2453 | 0 | }); |
2454 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT, { |
2455 | 0 | uint64_t v; |
2456 | 0 | if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2457 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2458 | 0 | goto Exit; |
2459 | 0 | } |
2460 | 0 | if (v < QUICLY_MIN_ACTIVE_CONNECTION_ID_LIMIT) { |
2461 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2462 | 0 | goto Exit; |
2463 | 0 | } |
2464 | 0 | params->active_connection_id_limit = v; |
2465 | 0 | }); |
2466 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, { params->disable_active_migration = 1; }); |
2467 | 0 | DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE, { |
2468 | 0 | uint64_t v; |
2469 | 0 | if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
2470 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2471 | 0 | goto Exit; |
2472 | 0 | } |
2473 | 0 | if (v > UINT16_MAX) |
2474 | 0 | v = UINT16_MAX; |
2475 | 0 | params->max_datagram_frame_size = (uint16_t)v; |
2476 | 0 | }); |
2477 | | /* skip unknown extension */ |
2478 | 0 | if (tp_index >= 0) |
2479 | 0 | src = end; |
2480 | 0 | }); |
2481 | 0 | } |
2482 | | |
2483 | | /* check consistency between the transport parameters */ |
2484 | 0 | if (params->min_ack_delay_usec != UINT64_MAX) { |
2485 | 0 | if (params->min_ack_delay_usec > params->max_ack_delay * 1000) { |
2486 | 0 | ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
2487 | 0 | goto Exit; |
2488 | 0 | } |
2489 | 0 | } |
2490 | | |
2491 | | /* check the absence of CIDs */ |
2492 | 0 | if ((original_dcid != NULL && original_dcid->len == UINT8_MAX) || (initial_scid != NULL && initial_scid->len == UINT8_MAX) || |
2493 | 0 | (retry_scid != NULL && retry_scid->len == UINT8_MAX)) { |
2494 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2495 | 0 | goto Exit; |
2496 | 0 | } |
2497 | | |
2498 | 0 | ret = 0; |
2499 | 0 | Exit: |
2500 | 0 | if (ret == PTLS_ALERT_DECODE_ERROR) |
2501 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2502 | 0 | return ret; |
2503 | |
|
2504 | 0 | #undef DECODE_TP |
2505 | 0 | #undef DECODE_CID_TP |
2506 | 0 | } |
2507 | | |
2508 | | static uint16_t get_transport_parameters_extension_id(uint32_t quic_version) |
2509 | 0 | { |
2510 | 0 | switch (quic_version) { |
2511 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT27: |
2512 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT29: |
2513 | 0 | return QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_DRAFT; |
2514 | 0 | default: |
2515 | 0 | return QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_FINAL; |
2516 | 0 | } |
2517 | 0 | } |
2518 | | |
2519 | | static int collect_transport_parameters(ptls_t *tls, struct st_ptls_handshake_properties_t *properties, uint16_t type) |
2520 | 0 | { |
2521 | 0 | quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties)); |
2522 | 0 | return type == get_transport_parameters_extension_id(conn->super.version); |
2523 | 0 | } |
2524 | | |
2525 | | static quicly_conn_t *create_connection(quicly_context_t *ctx, uint32_t protocol_version, const char *server_name, |
2526 | | struct sockaddr *remote_addr, struct sockaddr *local_addr, ptls_iovec_t *remote_cid, |
2527 | | const quicly_cid_plaintext_t *local_cid, ptls_handshake_properties_t *handshake_properties, |
2528 | | void *appdata, uint32_t initcwnd) |
2529 | 0 | { |
2530 | 0 | ptls_log_conn_state_t log_state_override; |
2531 | 0 | ptls_t *tls; |
2532 | 0 | quicly_conn_t *conn; |
2533 | 0 | quicly_pacer_t *pacer = NULL; |
2534 | | |
2535 | | /* consistency checks */ |
2536 | 0 | assert(remote_addr != NULL && remote_addr->sa_family != AF_UNSPEC); |
2537 | 0 | if (ctx->transport_params.max_datagram_frame_size != 0) |
2538 | 0 | assert(ctx->receive_datagram_frame != NULL); |
2539 | | |
2540 | | /* build log state */ |
2541 | 0 | ptls_log_init_conn_state(&log_state_override, ctx->tls->random_bytes); |
2542 | 0 | switch (remote_addr->sa_family) { |
2543 | 0 | case AF_INET: |
2544 | 0 | ptls_build_v4_mapped_v6_address(&log_state_override.address, &((struct sockaddr_in *)remote_addr)->sin_addr); |
2545 | 0 | break; |
2546 | 0 | case AF_INET6: |
2547 | 0 | log_state_override.address = ((struct sockaddr_in6 *)remote_addr)->sin6_addr; |
2548 | 0 | break; |
2549 | 0 | default: |
2550 | 0 | break; |
2551 | 0 | } |
2552 | | |
2553 | | /* create TLS context */ |
2554 | 0 | ptls_log_conn_state_override = &log_state_override; |
2555 | 0 | tls = ptls_new(ctx->tls, server_name == NULL); |
2556 | 0 | ptls_log_conn_state_override = NULL; |
2557 | 0 | if (tls == NULL) |
2558 | 0 | return NULL; |
2559 | 0 | if (server_name != NULL && ptls_set_server_name(tls, server_name, strlen(server_name)) != 0) { |
2560 | 0 | ptls_free(tls); |
2561 | 0 | return NULL; |
2562 | 0 | } |
2563 | | |
2564 | | /* allocate memory and start creating QUIC context */ |
2565 | 0 | if ((conn = malloc(sizeof(*conn))) == NULL) { |
2566 | 0 | ptls_free(tls); |
2567 | 0 | return NULL; |
2568 | 0 | } |
2569 | 0 | if (ctx->use_pacing && (pacer = malloc(sizeof(*pacer))) == NULL) { |
2570 | 0 | ptls_free(tls); |
2571 | 0 | free(conn); |
2572 | 0 | return NULL; |
2573 | 0 | } |
2574 | 0 | memset(conn, 0, sizeof(*conn)); |
2575 | 0 | conn->super.ctx = ctx; |
2576 | 0 | conn->super.data = appdata; |
2577 | 0 | lock_now(conn, 0); |
2578 | 0 | conn->created_at = conn->stash.now; |
2579 | 0 | conn->super.stats.handshake_confirmed_msec = UINT64_MAX; |
2580 | 0 | conn->crypto.tls = tls; |
2581 | 0 | if (new_path(conn, 0, remote_addr, local_addr) != 0) { |
2582 | 0 | unlock_now(conn); |
2583 | 0 | if (pacer != NULL) |
2584 | 0 | free(pacer); |
2585 | 0 | ptls_free(tls); |
2586 | 0 | free(conn); |
2587 | 0 | return NULL; |
2588 | 0 | } |
2589 | 0 | quicly_local_cid_init_set(&conn->super.local.cid_set, ctx->cid_encryptor, local_cid); |
2590 | 0 | conn->super.local.long_header_src_cid = conn->super.local.cid_set.cids[0].cid; |
2591 | 0 | quicly_remote_cid_init_set(&conn->super.remote.cid_set, remote_cid, ctx->tls->random_bytes); |
2592 | 0 | assert(conn->paths[0]->dcid == 0 && conn->super.remote.cid_set.cids[0].sequence == 0 && |
2593 | 0 | conn->super.remote.cid_set.cids[0].state == QUICLY_REMOTE_CID_IN_USE && "paths[0].dcid uses cids[0]"); |
2594 | 0 | conn->super.state = QUICLY_STATE_FIRSTFLIGHT; |
2595 | 0 | if (server_name != NULL) { |
2596 | 0 | conn->super.local.bidi.next_stream_id = 0; |
2597 | 0 | conn->super.local.uni.next_stream_id = 2; |
2598 | 0 | conn->super.remote.bidi.next_stream_id = 1; |
2599 | 0 | conn->super.remote.uni.next_stream_id = 3; |
2600 | 0 | } else { |
2601 | 0 | conn->super.local.bidi.next_stream_id = 1; |
2602 | 0 | conn->super.local.uni.next_stream_id = 3; |
2603 | 0 | conn->super.remote.bidi.next_stream_id = 0; |
2604 | 0 | conn->super.remote.uni.next_stream_id = 2; |
2605 | 0 | } |
2606 | 0 | conn->super.remote.transport_params = default_transport_params; |
2607 | 0 | conn->super.version = protocol_version; |
2608 | 0 | quicly_linklist_init(&conn->super._default_scheduler.active); |
2609 | 0 | quicly_linklist_init(&conn->super._default_scheduler.blocked); |
2610 | 0 | conn->streams = kh_init(quicly_stream_t); |
2611 | 0 | quicly_maxsender_init(&conn->ingress.max_data.sender, conn->super.ctx->transport_params.max_data); |
2612 | 0 | quicly_maxsender_init(&conn->ingress.max_streams.uni, conn->super.ctx->transport_params.max_streams_uni); |
2613 | 0 | quicly_maxsender_init(&conn->ingress.max_streams.bidi, conn->super.ctx->transport_params.max_streams_bidi); |
2614 | 0 | quicly_loss_init(&conn->egress.loss, &conn->super.ctx->loss, |
2615 | 0 | conn->super.ctx->loss.default_initial_rtt /* FIXME remember initial_rtt in session ticket */, |
2616 | 0 | &conn->super.remote.transport_params.max_ack_delay, &conn->super.remote.transport_params.ack_delay_exponent); |
2617 | 0 | conn->egress.next_pn_to_skip = |
2618 | 0 | calc_next_pn_to_skip(conn->super.ctx->tls, 0, initcwnd, conn->super.ctx->initial_egress_max_udp_payload_size); |
2619 | 0 | conn->egress.max_udp_payload_size = conn->super.ctx->initial_egress_max_udp_payload_size; |
2620 | 0 | init_max_streams(&conn->egress.max_streams.uni); |
2621 | 0 | init_max_streams(&conn->egress.max_streams.bidi); |
2622 | 0 | conn->egress.ack_frequency.update_at = INT64_MAX; |
2623 | 0 | conn->egress.send_ack_at = INT64_MAX; |
2624 | 0 | conn->egress.send_probe_at = INT64_MAX; |
2625 | 0 | conn->super.ctx->init_cc->cb(conn->super.ctx->init_cc, &conn->egress.cc, initcwnd, conn->stash.now); |
2626 | 0 | if (pacer != NULL) { |
2627 | 0 | conn->egress.pacer = pacer; |
2628 | 0 | quicly_pacer_reset(conn->egress.pacer); |
2629 | 0 | } |
2630 | 0 | conn->egress.ecn.state = conn->super.ctx->enable_ecn ? QUICLY_ECN_PROBING : QUICLY_ECN_OFF; |
2631 | 0 | quicly_retire_cid_init(&conn->egress.retire_cid); |
2632 | 0 | quicly_linklist_init(&conn->egress.pending_streams.blocked.uni); |
2633 | 0 | quicly_linklist_init(&conn->egress.pending_streams.blocked.bidi); |
2634 | 0 | quicly_linklist_init(&conn->egress.pending_streams.control); |
2635 | 0 | quicly_ratemeter_init(&conn->egress.ratemeter); |
2636 | 0 | conn->egress.try_jumpstart = 1; |
2637 | 0 | if (handshake_properties != NULL) { |
2638 | 0 | assert(handshake_properties->additional_extensions == NULL); |
2639 | 0 | assert(handshake_properties->collect_extension == NULL); |
2640 | 0 | assert(handshake_properties->collected_extensions == NULL); |
2641 | 0 | conn->crypto.handshake_properties = *handshake_properties; |
2642 | 0 | } else { |
2643 | 0 | conn->crypto.handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}}; |
2644 | 0 | } |
2645 | 0 | conn->crypto.handshake_properties.collect_extension = collect_transport_parameters; |
2646 | 0 | conn->retry_scid.len = UINT8_MAX; |
2647 | 0 | conn->idle_timeout.at = INT64_MAX; |
2648 | 0 | conn->idle_timeout.should_rearm_on_send = 1; |
2649 | 0 | conn->stash.on_ack_stream.active_acked_cache.stream_id = INT64_MIN; |
2650 | |
|
2651 | 0 | *ptls_get_data_ptr(tls) = conn; |
2652 | |
|
2653 | 0 | update_open_count(conn->super.ctx, 1); |
2654 | |
|
2655 | 0 | return conn; |
2656 | 0 | } |
2657 | | |
2658 | | static int client_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots) |
2659 | 0 | { |
2660 | 0 | quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties)); |
2661 | 0 | quicly_error_t ret; |
2662 | |
|
2663 | 0 | assert(properties->client.early_data_acceptance != PTLS_EARLY_DATA_ACCEPTANCE_UNKNOWN); |
2664 | | |
2665 | 0 | if (slots[0].type == UINT16_MAX) { |
2666 | 0 | ret = PTLS_ALERT_MISSING_EXTENSION; |
2667 | 0 | goto Exit; |
2668 | 0 | } |
2669 | 0 | assert(slots[0].type == get_transport_parameters_extension_id(conn->super.version)); |
2670 | 0 | assert(slots[1].type == UINT16_MAX); |
2671 | | |
2672 | 0 | const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len; |
2673 | 0 | quicly_transport_parameters_t params; |
2674 | 0 | quicly_cid_t original_dcid, initial_scid, retry_scid = {}; |
2675 | | |
2676 | | /* obtain pointer to initial CID of the peer. It is guaranteed to exist in the first slot, as TP is received before any frame |
2677 | | * that updates the CID set. */ |
2678 | 0 | quicly_remote_cid_t *remote_cid = &conn->super.remote.cid_set.cids[0]; |
2679 | 0 | assert(remote_cid->sequence == 0); |
2680 | | |
2681 | | /* decode */ |
2682 | 0 | if ((ret = quicly_decode_transport_parameter_list(¶ms, needs_cid_auth(conn) || is_retry(conn) ? &original_dcid : NULL, |
2683 | 0 | needs_cid_auth(conn) ? &initial_scid : &tp_cid_ignore, |
2684 | 0 | needs_cid_auth(conn) ? is_retry(conn) ? &retry_scid : NULL : &tp_cid_ignore, |
2685 | 0 | remote_cid->stateless_reset_token, src, end)) != 0) |
2686 | 0 | goto Exit; |
2687 | | |
2688 | | /* validate CIDs */ |
2689 | 0 | if (needs_cid_auth(conn) || is_retry(conn)) { |
2690 | 0 | if (!quicly_cid_is_equal(&conn->super.original_dcid, ptls_iovec_init(original_dcid.cid, original_dcid.len))) { |
2691 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2692 | 0 | goto Exit; |
2693 | 0 | } |
2694 | 0 | } |
2695 | 0 | if (needs_cid_auth(conn)) { |
2696 | 0 | if (!quicly_cid_is_equal(&remote_cid->cid, ptls_iovec_init(initial_scid.cid, initial_scid.len))) { |
2697 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2698 | 0 | goto Exit; |
2699 | 0 | } |
2700 | 0 | if (is_retry(conn)) { |
2701 | 0 | if (!quicly_cid_is_equal(&conn->retry_scid, ptls_iovec_init(retry_scid.cid, retry_scid.len))) { |
2702 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; |
2703 | 0 | goto Exit; |
2704 | 0 | } |
2705 | 0 | } |
2706 | 0 | } |
2707 | | |
2708 | 0 | if (properties->client.early_data_acceptance == PTLS_EARLY_DATA_ACCEPTED) { |
2709 | 0 | #define ZERORTT_VALIDATE(x) \ |
2710 | 0 | if (params.x < conn->super.remote.transport_params.x) { \ |
2711 | 0 | ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \ |
2712 | 0 | goto Exit; \ |
2713 | 0 | } |
2714 | 0 | ZERORTT_VALIDATE(max_data); |
2715 | 0 | ZERORTT_VALIDATE(max_stream_data.bidi_local); |
2716 | 0 | ZERORTT_VALIDATE(max_stream_data.bidi_remote); |
2717 | 0 | ZERORTT_VALIDATE(max_stream_data.uni); |
2718 | 0 | ZERORTT_VALIDATE(max_streams_bidi); |
2719 | 0 | ZERORTT_VALIDATE(max_streams_uni); |
2720 | 0 | #undef ZERORTT_VALIDATE |
2721 | 0 | } |
2722 | | |
2723 | | /* store the results */ |
2724 | 0 | conn->super.remote.transport_params = params; |
2725 | 0 | ack_frequency_set_next_update_at(conn); |
2726 | |
|
2727 | 0 | Exit: |
2728 | 0 | return compress_handshake_result(ret); |
2729 | 0 | } |
2730 | | |
2731 | | quicly_error_t quicly_connect(quicly_conn_t **_conn, quicly_context_t *ctx, const char *server_name, struct sockaddr *dest_addr, |
2732 | | struct sockaddr *src_addr, const quicly_cid_plaintext_t *new_cid, ptls_iovec_t address_token, |
2733 | | ptls_handshake_properties_t *handshake_properties, |
2734 | | const quicly_transport_parameters_t *resumed_transport_params, void *appdata) |
2735 | 0 | { |
2736 | 0 | const quicly_salt_t *salt; |
2737 | 0 | quicly_conn_t *conn = NULL; |
2738 | 0 | const quicly_cid_t *server_cid; |
2739 | 0 | ptls_buffer_t buf; |
2740 | 0 | size_t epoch_offsets[5] = {0}; |
2741 | 0 | size_t max_early_data_size = 0; |
2742 | 0 | quicly_error_t ret; |
2743 | |
|
2744 | 0 | if ((salt = quicly_get_salt(ctx->initial_version)) == NULL) { |
2745 | 0 | if ((ctx->initial_version & 0x0f0f0f0f) == 0x0a0a0a0a) { |
2746 | | /* greasing version, use our own greasing salt */ |
2747 | 0 | static const quicly_salt_t grease_salt = {.initial = {0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, |
2748 | 0 | 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef}}; |
2749 | 0 | salt = &grease_salt; |
2750 | 0 | } else { |
2751 | 0 | ret = QUICLY_ERROR_INVALID_INITIAL_VERSION; |
2752 | 0 | goto Exit; |
2753 | 0 | } |
2754 | 0 | } |
2755 | | |
2756 | 0 | if ((conn = create_connection( |
2757 | 0 | ctx, ctx->initial_version, server_name, dest_addr, src_addr, NULL, new_cid, handshake_properties, appdata, |
2758 | 0 | quicly_cc_calc_initial_cwnd(ctx->initcwnd_packets, ctx->transport_params.max_udp_payload_size))) == NULL) { |
2759 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
2760 | 0 | goto Exit; |
2761 | 0 | } |
2762 | 0 | conn->super.remote.address_validation.validated = 1; |
2763 | 0 | conn->super.remote.address_validation.send_probe = 1; |
2764 | 0 | if (address_token.len != 0) { |
2765 | 0 | if ((conn->token.base = malloc(address_token.len)) == NULL) { |
2766 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
2767 | 0 | goto Exit; |
2768 | 0 | } |
2769 | 0 | memcpy(conn->token.base, address_token.base, address_token.len); |
2770 | 0 | conn->token.len = address_token.len; |
2771 | 0 | } |
2772 | 0 | server_cid = quicly_get_remote_cid(conn); |
2773 | 0 | conn->super.original_dcid = *server_cid; |
2774 | |
|
2775 | 0 | QUICLY_PROBE(CONNECT, conn, conn->stash.now, conn->super.version); |
2776 | 0 | QUICLY_LOG_CONN(connect, conn, { PTLS_LOG_ELEMENT_UNSIGNED(version, conn->super.version); }); |
2777 | | |
2778 | 0 | if ((ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_INITIAL)) != 0) |
2779 | 0 | goto Exit; |
2780 | 0 | if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &conn->initial->cipher.ingress, &conn->initial->cipher.egress, |
2781 | 0 | ptls_iovec_init(server_cid->cid, server_cid->len), 1, |
2782 | 0 | ptls_iovec_init(salt->initial, sizeof(salt->initial)), conn)) != 0) |
2783 | 0 | goto Exit; |
2784 | | |
2785 | | /* handshake (we always encode authentication CIDs, as we do not (yet) regenerate ClientHello when receiving Retry) */ |
2786 | 0 | ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0); |
2787 | 0 | if ((ret = quicly_encode_transport_parameter_list( |
2788 | 0 | &conn->crypto.transport_params.buf, &conn->super.ctx->transport_params, NULL, &conn->super.local.cid_set.cids[0].cid, |
2789 | 0 | NULL, NULL, conn->super.ctx->expand_client_hello ? conn->super.ctx->initial_egress_max_udp_payload_size : 0)) != 0) |
2790 | 0 | goto Exit; |
2791 | 0 | conn->crypto.transport_params.ext[0] = |
2792 | 0 | (ptls_raw_extension_t){QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_FINAL, |
2793 | 0 | {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}}; |
2794 | 0 | conn->crypto.transport_params.ext[1] = |
2795 | 0 | (ptls_raw_extension_t){QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_DRAFT, |
2796 | 0 | {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}}; |
2797 | 0 | conn->crypto.transport_params.ext[2] = (ptls_raw_extension_t){UINT16_MAX}; |
2798 | 0 | conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext; |
2799 | 0 | conn->crypto.handshake_properties.collected_extensions = client_collected_extensions; |
2800 | |
|
2801 | 0 | ptls_buffer_init(&buf, "", 0); |
2802 | 0 | if (resumed_transport_params != NULL) |
2803 | 0 | conn->crypto.handshake_properties.client.max_early_data_size = &max_early_data_size; |
2804 | 0 | ret = expand_handshake_result( |
2805 | 0 | ptls_handle_message(conn->crypto.tls, &buf, epoch_offsets, 0, NULL, 0, &conn->crypto.handshake_properties)); |
2806 | 0 | conn->crypto.handshake_properties.client.max_early_data_size = NULL; |
2807 | 0 | if (ret != PTLS_ERROR_IN_PROGRESS) { |
2808 | 0 | assert(ret > 0); /* no QUIC errors */ |
2809 | 0 | goto Exit; |
2810 | 0 | } |
2811 | 0 | write_crypto_data(conn, &buf, epoch_offsets); |
2812 | 0 | ptls_buffer_dispose(&buf); |
2813 | |
|
2814 | 0 | if (max_early_data_size != 0) { |
2815 | | /* when attempting 0-RTT, apply the remembered transport parameters */ |
2816 | 0 | #define APPLY(n) conn->super.remote.transport_params.n = resumed_transport_params->n |
2817 | 0 | APPLY(active_connection_id_limit); |
2818 | 0 | APPLY(max_data); |
2819 | 0 | APPLY(max_stream_data.bidi_local); |
2820 | 0 | APPLY(max_stream_data.bidi_remote); |
2821 | 0 | APPLY(max_stream_data.uni); |
2822 | 0 | APPLY(max_streams_bidi); |
2823 | 0 | APPLY(max_streams_uni); |
2824 | 0 | #undef APPLY |
2825 | 0 | if ((ret = apply_remote_transport_params(conn)) != 0) |
2826 | 0 | goto Exit; |
2827 | 0 | } |
2828 | | |
2829 | 0 | *_conn = conn; |
2830 | 0 | ret = 0; |
2831 | |
|
2832 | 0 | Exit: |
2833 | 0 | if (conn != NULL) |
2834 | 0 | unlock_now(conn); |
2835 | 0 | if (ret != 0) { |
2836 | 0 | if (conn != NULL) |
2837 | 0 | quicly_free(conn); |
2838 | 0 | } |
2839 | 0 | return ret; |
2840 | 0 | } |
2841 | | |
2842 | | static int server_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots) |
2843 | 0 | { |
2844 | 0 | quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties)); |
2845 | 0 | quicly_cid_t initial_scid; |
2846 | 0 | quicly_error_t ret; |
2847 | |
|
2848 | 0 | if (slots[0].type == UINT16_MAX) { |
2849 | 0 | ret = PTLS_ALERT_MISSING_EXTENSION; |
2850 | 0 | goto Exit; |
2851 | 0 | } |
2852 | 0 | assert(slots[0].type == get_transport_parameters_extension_id(conn->super.version)); |
2853 | 0 | assert(slots[1].type == UINT16_MAX); |
2854 | | |
2855 | 0 | { /* decode transport_parameters extension */ |
2856 | 0 | const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len; |
2857 | 0 | if ((ret = quicly_decode_transport_parameter_list(&conn->super.remote.transport_params, |
2858 | 0 | needs_cid_auth(conn) ? NULL : &tp_cid_ignore, |
2859 | 0 | needs_cid_auth(conn) ? &initial_scid : &tp_cid_ignore, |
2860 | 0 | needs_cid_auth(conn) ? NULL : &tp_cid_ignore, NULL, src, end)) != 0) |
2861 | 0 | goto Exit; |
2862 | 0 | if (needs_cid_auth(conn) && |
2863 | 0 | !quicly_cid_is_equal(&conn->super.remote.cid_set.cids[0].cid, ptls_iovec_init(initial_scid.cid, initial_scid.len))) { |
2864 | 0 | ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
2865 | 0 | goto Exit; |
2866 | 0 | } |
2867 | 0 | } |
2868 | | |
2869 | | /* setup ack frequency */ |
2870 | 0 | ack_frequency_set_next_update_at(conn); |
2871 | | |
2872 | | /* update UDP max payload size to: |
2873 | | * max(current, min(max_the_remote_sent, remote.tp.max_udp_payload_size, local.tp.max_udp_payload_size)) */ |
2874 | 0 | assert(conn->initial != NULL); |
2875 | 0 | if (conn->egress.max_udp_payload_size < conn->initial->largest_ingress_udp_payload_size) { |
2876 | 0 | uint16_t size = conn->initial->largest_ingress_udp_payload_size; |
2877 | 0 | if (size > conn->super.remote.transport_params.max_udp_payload_size) |
2878 | 0 | size = conn->super.remote.transport_params.max_udp_payload_size; |
2879 | 0 | if (size > conn->super.ctx->transport_params.max_udp_payload_size) |
2880 | 0 | size = conn->super.ctx->transport_params.max_udp_payload_size; |
2881 | 0 | conn->egress.max_udp_payload_size = size; |
2882 | 0 | } |
2883 | | |
2884 | | /* set transport_parameters extension to be sent in EE */ |
2885 | 0 | assert(properties->additional_extensions == NULL); |
2886 | 0 | ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0); |
2887 | 0 | assert(conn->super.local.cid_set.cids[0].sequence == 0 && "make sure that local_cid is in expected state before sending SRT"); |
2888 | 0 | if ((ret = quicly_encode_transport_parameter_list( |
2889 | 0 | &conn->crypto.transport_params.buf, &conn->super.ctx->transport_params, |
2890 | 0 | needs_cid_auth(conn) || is_retry(conn) ? &conn->super.original_dcid : NULL, |
2891 | 0 | needs_cid_auth(conn) ? &conn->super.local.cid_set.cids[0].cid : NULL, |
2892 | 0 | needs_cid_auth(conn) && is_retry(conn) ? &conn->retry_scid : NULL, |
2893 | 0 | conn->super.ctx->cid_encryptor != NULL ? conn->super.local.cid_set.cids[0].stateless_reset_token : NULL, 0)) != 0) |
2894 | 0 | goto Exit; |
2895 | 0 | properties->additional_extensions = conn->crypto.transport_params.ext; |
2896 | 0 | conn->crypto.transport_params.ext[0] = |
2897 | 0 | (ptls_raw_extension_t){get_transport_parameters_extension_id(conn->super.version), |
2898 | 0 | {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}}; |
2899 | 0 | conn->crypto.transport_params.ext[1] = (ptls_raw_extension_t){UINT16_MAX}; |
2900 | 0 | conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext; |
2901 | |
|
2902 | 0 | ret = 0; |
2903 | |
|
2904 | 0 | Exit: |
2905 | 0 | return compress_handshake_result(ret); |
2906 | 0 | } |
2907 | | |
2908 | | static size_t aead_decrypt_core(ptls_aead_context_t *aead, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off) |
2909 | 0 | { |
2910 | 0 | return ptls_aead_decrypt(aead, packet->octets.base + aead_off, packet->octets.base + aead_off, packet->octets.len - aead_off, |
2911 | 0 | pn, packet->octets.base, aead_off); |
2912 | 0 | } |
2913 | | |
2914 | | static int aead_decrypt_fixed_key(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen) |
2915 | 0 | { |
2916 | 0 | ptls_aead_context_t *aead = ctx; |
2917 | |
|
2918 | 0 | if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX) |
2919 | 0 | return QUICLY_ERROR_PACKET_IGNORED; |
2920 | 0 | return 0; |
2921 | 0 | } |
2922 | | |
2923 | | static int aead_decrypt_1rtt(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen) |
2924 | 0 | { |
2925 | 0 | quicly_conn_t *conn = ctx; |
2926 | 0 | struct st_quicly_application_space_t *space = conn->application; |
2927 | 0 | size_t aead_index = (packet->octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0; |
2928 | 0 | int ret; |
2929 | | |
2930 | | /* prepare key, when not available (yet) */ |
2931 | 0 | if (space->cipher.ingress.aead[aead_index] == NULL) { |
2932 | 0 | Retry_1RTT: { |
2933 | | /* Replace the AEAD key at the alternative slot (note: decryption key slots are shared by 0-RTT and 1-RTT), at the same time |
2934 | | * dropping 0-RTT header protection key. */ |
2935 | 0 | if (conn->application->cipher.ingress.header_protection.zero_rtt != NULL) { |
2936 | 0 | ptls_cipher_free(conn->application->cipher.ingress.header_protection.zero_rtt); |
2937 | 0 | conn->application->cipher.ingress.header_protection.zero_rtt = NULL; |
2938 | 0 | } |
2939 | 0 | ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls); |
2940 | 0 | if ((ret = update_1rtt_key(conn, cipher, 0, &space->cipher.ingress.aead[aead_index], space->cipher.ingress.secret)) != 0) |
2941 | 0 | return ret; |
2942 | 0 | ++space->cipher.ingress.key_phase.prepared; |
2943 | 0 | QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE_PREPARE, conn, conn->stash.now, space->cipher.ingress.key_phase.prepared, |
2944 | 0 | QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, cipher->hash->digest_size)); |
2945 | 0 | QUICLY_LOG_CONN(crypto_receive_key_update_prepare, conn, { |
2946 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.ingress.key_phase.prepared); |
2947 | 0 | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.ingress.secret, cipher->hash->digest_size); |
2948 | 0 | }); |
2949 | 0 | } |
2950 | 0 | } |
2951 | | |
2952 | | /* decrypt */ |
2953 | 0 | ptls_aead_context_t *aead = space->cipher.ingress.aead[aead_index]; |
2954 | 0 | if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX) { |
2955 | | /* retry with a new key, if possible */ |
2956 | 0 | if (space->cipher.ingress.key_phase.decrypted == space->cipher.ingress.key_phase.prepared && |
2957 | 0 | space->cipher.ingress.key_phase.decrypted % 2 != aead_index) { |
2958 | | /* reapply AEAD to revert payload to the encrypted form. This assumes that the cipher used in AEAD is CTR. */ |
2959 | 0 | aead_decrypt_core(aead, pn, packet, aead_off); |
2960 | 0 | goto Retry_1RTT; |
2961 | 0 | } |
2962 | | /* otherwise return failure */ |
2963 | 0 | return QUICLY_ERROR_PACKET_IGNORED; |
2964 | 0 | } |
2965 | | |
2966 | | /* update the confirmed key phase and also the egress key phase, if necessary */ |
2967 | 0 | if (space->cipher.ingress.key_phase.prepared != space->cipher.ingress.key_phase.decrypted && |
2968 | 0 | space->cipher.ingress.key_phase.prepared % 2 == aead_index) { |
2969 | 0 | if ((ret = received_key_update(conn, space->cipher.ingress.key_phase.prepared)) != 0) |
2970 | 0 | return ret; |
2971 | 0 | } |
2972 | | |
2973 | 0 | return 0; |
2974 | 0 | } |
2975 | | |
2976 | | static quicly_error_t do_decrypt_packet(ptls_cipher_context_t *header_protection, |
2977 | | int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *), |
2978 | | void *aead_ctx, uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn, |
2979 | | ptls_iovec_t *payload) |
2980 | 0 | { |
2981 | 0 | size_t encrypted_len = packet->octets.len - packet->encrypted_off; |
2982 | 0 | uint8_t hpmask[5] = {0}; |
2983 | 0 | uint32_t pnbits = 0; |
2984 | 0 | size_t pnlen, ptlen, i; |
2985 | | |
2986 | | /* decipher the header protection, as well as obtaining pnbits, pnlen */ |
2987 | 0 | if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE) { |
2988 | 0 | *pn = UINT64_MAX; |
2989 | 0 | return QUICLY_ERROR_PACKET_IGNORED; |
2990 | 0 | } |
2991 | 0 | ptls_cipher_init(header_protection, packet->octets.base + packet->encrypted_off + QUICLY_MAX_PN_SIZE); |
2992 | 0 | ptls_cipher_encrypt(header_protection, hpmask, hpmask, sizeof(hpmask)); |
2993 | 0 | packet->octets.base[0] ^= hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? 0xf : 0x1f); |
2994 | 0 | pnlen = (packet->octets.base[0] & 0x3) + 1; |
2995 | 0 | for (i = 0; i != pnlen; ++i) { |
2996 | 0 | packet->octets.base[packet->encrypted_off + i] ^= hpmask[i + 1]; |
2997 | 0 | pnbits = (pnbits << 8) | packet->octets.base[packet->encrypted_off + i]; |
2998 | 0 | } |
2999 | |
|
3000 | 0 | size_t aead_off = packet->encrypted_off + pnlen; |
3001 | 0 | *pn = quicly_determine_packet_number(pnbits, pnlen * 8, *next_expected_pn); |
3002 | | |
3003 | | /* AEAD decryption */ |
3004 | 0 | int ret; |
3005 | 0 | if ((ret = (*aead_cb)(aead_ctx, *pn, packet, aead_off, &ptlen)) != 0) { |
3006 | 0 | return ret; |
3007 | 0 | } |
3008 | 0 | if (*next_expected_pn <= *pn) |
3009 | 0 | *next_expected_pn = *pn + 1; |
3010 | |
|
3011 | 0 | *payload = ptls_iovec_init(packet->octets.base + aead_off, ptlen); |
3012 | 0 | return 0; |
3013 | 0 | } |
3014 | | |
3015 | | static quicly_error_t decrypt_packet(ptls_cipher_context_t *header_protection, |
3016 | | int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *), void *aead_ctx, |
3017 | | uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn, |
3018 | | ptls_iovec_t *payload) |
3019 | 0 | { |
3020 | 0 | quicly_error_t ret; |
3021 | | |
3022 | | /* decrypt ourselves, or use the pre-decrypted input */ |
3023 | 0 | if (packet->decrypted.pn == UINT64_MAX) { |
3024 | 0 | if ((ret = do_decrypt_packet(header_protection, aead_cb, aead_ctx, next_expected_pn, packet, pn, payload)) != 0) |
3025 | 0 | return ret; |
3026 | 0 | } else { |
3027 | 0 | *payload = ptls_iovec_init(packet->octets.base + packet->encrypted_off, packet->octets.len - packet->encrypted_off); |
3028 | 0 | *pn = packet->decrypted.pn; |
3029 | 0 | if (aead_cb == aead_decrypt_1rtt) { |
3030 | 0 | quicly_conn_t *conn = aead_ctx; |
3031 | 0 | if (conn->application->cipher.ingress.key_phase.decrypted < packet->decrypted.key_phase) { |
3032 | 0 | if ((ret = received_key_update(conn, packet->decrypted.key_phase)) != 0) |
3033 | 0 | return ret; |
3034 | 0 | } |
3035 | 0 | } |
3036 | 0 | if (*next_expected_pn < *pn) |
3037 | 0 | *next_expected_pn = *pn + 1; |
3038 | 0 | } |
3039 | | |
3040 | | /* check reserved bits after AEAD decryption */ |
3041 | 0 | if ((packet->octets.base[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? QUICLY_LONG_HEADER_RESERVED_BITS |
3042 | 0 | : QUICLY_SHORT_HEADER_RESERVED_BITS)) != |
3043 | 0 | 0) { |
3044 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
3045 | 0 | } |
3046 | 0 | if (payload->len == 0) { |
3047 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
3048 | 0 | } |
3049 | | |
3050 | 0 | return 0; |
3051 | 0 | } |
3052 | | |
3053 | | static quicly_error_t do_on_ack_ack(quicly_conn_t *conn, const quicly_sent_packet_t *packet, uint64_t start, uint64_t start_length, |
3054 | | struct st_quicly_sent_ack_additional_t *additional, size_t additional_capacity) |
3055 | 0 | { |
3056 | | /* find the pn space */ |
3057 | 0 | struct st_quicly_pn_space_t *space; |
3058 | 0 | switch (packet->ack_epoch) { |
3059 | 0 | case QUICLY_EPOCH_INITIAL: |
3060 | 0 | space = &conn->initial->super; |
3061 | 0 | break; |
3062 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
3063 | 0 | space = &conn->handshake->super; |
3064 | 0 | break; |
3065 | 0 | case QUICLY_EPOCH_1RTT: |
3066 | 0 | space = &conn->application->super; |
3067 | 0 | break; |
3068 | 0 | default: |
3069 | 0 | assert(!"FIXME"); |
3070 | 0 | return QUICLY_TRANSPORT_ERROR_INTERNAL; |
3071 | 0 | } |
3072 | | |
3073 | | /* subtract given ACK ranges */ |
3074 | 0 | int ret; |
3075 | 0 | uint64_t end = start + start_length; |
3076 | 0 | if ((ret = quicly_ranges_subtract(&space->ack_queue, start, end)) != 0) |
3077 | 0 | return ret; |
3078 | 0 | for (size_t i = 0; i < additional_capacity && additional[i].gap != 0; ++i) { |
3079 | 0 | start = end + additional[i].gap; |
3080 | 0 | end = start + additional[i].length; |
3081 | 0 | if ((ret = quicly_ranges_subtract(&space->ack_queue, start, end)) != 0) |
3082 | 0 | return ret; |
3083 | 0 | } |
3084 | | |
3085 | | /* make adjustments */ |
3086 | 0 | if (space->ack_queue.num_ranges == 0) { |
3087 | 0 | space->largest_pn_received_at = INT64_MAX; |
3088 | 0 | space->unacked_count = 0; |
3089 | 0 | } else if (space->ack_queue.num_ranges > QUICLY_MAX_ACK_BLOCKS) { |
3090 | 0 | quicly_ranges_drop_by_range_indices(&space->ack_queue, space->ack_queue.num_ranges - QUICLY_MAX_ACK_BLOCKS, |
3091 | 0 | space->ack_queue.num_ranges); |
3092 | 0 | } |
3093 | |
|
3094 | 0 | return 0; |
3095 | 0 | } |
3096 | | |
3097 | | static quicly_error_t on_ack_ack_ranges64(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3098 | 0 | { |
3099 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3100 | | |
3101 | | /* TODO log */ |
3102 | |
|
3103 | 0 | return acked ? do_on_ack_ack(conn, packet, sent->data.ack.start, sent->data.ack.ranges64.start_length, |
3104 | 0 | sent->data.ack.ranges64.additional, PTLS_ELEMENTSOF(sent->data.ack.ranges64.additional)) |
3105 | 0 | : 0; |
3106 | 0 | } |
3107 | | |
3108 | | static quicly_error_t on_ack_ack_ranges8(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3109 | 0 | { |
3110 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3111 | | |
3112 | | /* TODO log */ |
3113 | |
|
3114 | 0 | return acked ? do_on_ack_ack(conn, packet, sent->data.ack.start, sent->data.ack.ranges8.start_length, |
3115 | 0 | sent->data.ack.ranges8.additional, PTLS_ELEMENTSOF(sent->data.ack.ranges8.additional)) |
3116 | 0 | : 0; |
3117 | 0 | } |
3118 | | |
3119 | | static quicly_error_t on_ack_stream_ack_one(quicly_conn_t *conn, quicly_stream_id_t stream_id, quicly_sendstate_sent_t *sent) |
3120 | 0 | { |
3121 | 0 | quicly_stream_t *stream; |
3122 | |
|
3123 | 0 | if ((stream = quicly_get_stream(conn, stream_id)) == NULL) |
3124 | 0 | return 0; |
3125 | | |
3126 | 0 | size_t bytes_to_shift; |
3127 | 0 | int ret; |
3128 | 0 | if ((ret = quicly_sendstate_acked(&stream->sendstate, sent, &bytes_to_shift)) != 0) |
3129 | 0 | return ret; |
3130 | 0 | if (bytes_to_shift != 0) { |
3131 | 0 | QUICLY_PROBE(STREAM_ON_SEND_SHIFT, stream->conn, stream->conn->stash.now, stream, bytes_to_shift); |
3132 | 0 | stream->callbacks->on_send_shift(stream, bytes_to_shift); |
3133 | 0 | QUICLY_LOG_CONN(stream_on_send_shift, stream->conn, { |
3134 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
3135 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(delta, bytes_to_shift); |
3136 | 0 | }); |
3137 | 0 | } |
3138 | 0 | if (stream_is_destroyable(stream)) { |
3139 | 0 | destroy_stream(stream, 0); |
3140 | 0 | } else if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE) { |
3141 | 0 | resched_stream_data(stream); |
3142 | 0 | } |
3143 | |
|
3144 | 0 | return 0; |
3145 | 0 | } |
3146 | | |
3147 | | static quicly_error_t on_ack_stream_ack_cached(quicly_conn_t *conn) |
3148 | 0 | { |
3149 | 0 | if (conn->stash.on_ack_stream.active_acked_cache.stream_id == INT64_MIN) |
3150 | 0 | return 0; |
3151 | 0 | quicly_error_t ret = on_ack_stream_ack_one(conn, conn->stash.on_ack_stream.active_acked_cache.stream_id, |
3152 | 0 | &conn->stash.on_ack_stream.active_acked_cache.args); |
3153 | 0 | conn->stash.on_ack_stream.active_acked_cache.stream_id = INT64_MIN; |
3154 | 0 | return ret; |
3155 | 0 | } |
3156 | | |
3157 | | static quicly_error_t on_ack_stream(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3158 | 0 | { |
3159 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3160 | 0 | quicly_error_t ret; |
3161 | |
|
3162 | 0 | if (acked) { |
3163 | |
|
3164 | 0 | QUICLY_PROBE(STREAM_ACKED, conn, conn->stash.now, sent->data.stream.stream_id, sent->data.stream.args.start, |
3165 | 0 | sent->data.stream.args.end - sent->data.stream.args.start); |
3166 | 0 | QUICLY_LOG_CONN(stream_acked, conn, { |
3167 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, sent->data.stream.stream_id); |
3168 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, sent->data.stream.args.start); |
3169 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(len, sent->data.stream.args.end - sent->data.stream.args.start); |
3170 | 0 | }); |
3171 | | |
3172 | 0 | if (packet->frames_in_flight && conn->stash.on_ack_stream.active_acked_cache.stream_id == sent->data.stream.stream_id && |
3173 | 0 | conn->stash.on_ack_stream.active_acked_cache.args.end == sent->data.stream.args.start) { |
3174 | | /* Fast path: append the newly supplied range to the existing cached range. */ |
3175 | 0 | conn->stash.on_ack_stream.active_acked_cache.args.end = sent->data.stream.args.end; |
3176 | 0 | } else { |
3177 | | /* Slow path: submit the cached range, and if possible, cache the newly supplied range. Else submit the newly supplied |
3178 | | * range directly. */ |
3179 | 0 | if ((ret = on_ack_stream_ack_cached(conn)) != 0) |
3180 | 0 | return ret; |
3181 | 0 | if (packet->frames_in_flight) { |
3182 | 0 | conn->stash.on_ack_stream.active_acked_cache.stream_id = sent->data.stream.stream_id; |
3183 | 0 | conn->stash.on_ack_stream.active_acked_cache.args = sent->data.stream.args; |
3184 | 0 | } else { |
3185 | 0 | if ((ret = on_ack_stream_ack_one(conn, sent->data.stream.stream_id, &sent->data.stream.args)) != 0) |
3186 | 0 | return ret; |
3187 | 0 | } |
3188 | 0 | } |
3189 | |
|
3190 | 0 | } else { |
3191 | |
|
3192 | 0 | QUICLY_PROBE(STREAM_LOST, conn, conn->stash.now, sent->data.stream.stream_id, sent->data.stream.args.start, |
3193 | 0 | sent->data.stream.args.end - sent->data.stream.args.start); |
3194 | 0 | QUICLY_LOG_CONN(stream_lost, conn, { |
3195 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, sent->data.stream.stream_id); |
3196 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, sent->data.stream.args.start); |
3197 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(len, sent->data.stream.args.end - sent->data.stream.args.start); |
3198 | 0 | }); |
3199 | | |
3200 | 0 | quicly_stream_t *stream; |
3201 | 0 | if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) == NULL) |
3202 | 0 | return 0; |
3203 | | /* FIXME handle rto error */ |
3204 | 0 | if ((ret = quicly_sendstate_lost(&stream->sendstate, &sent->data.stream.args)) != 0) |
3205 | 0 | return ret; |
3206 | 0 | if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE) |
3207 | 0 | resched_stream_data(stream); |
3208 | 0 | } |
3209 | | |
3210 | 0 | return 0; |
3211 | 0 | } |
3212 | | |
3213 | | static quicly_error_t on_ack_max_stream_data(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3214 | | quicly_sent_t *sent) |
3215 | 0 | { |
3216 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3217 | 0 | quicly_stream_t *stream; |
3218 | |
|
3219 | 0 | if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) != NULL) { |
3220 | 0 | if (acked) { |
3221 | 0 | quicly_maxsender_acked(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args); |
3222 | 0 | } else { |
3223 | 0 | quicly_maxsender_lost(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args); |
3224 | 0 | if (should_send_max_stream_data(stream)) |
3225 | 0 | sched_stream_control(stream); |
3226 | 0 | } |
3227 | 0 | } |
3228 | |
|
3229 | 0 | return 0; |
3230 | 0 | } |
3231 | | |
3232 | | static quicly_error_t on_ack_max_data(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3233 | 0 | { |
3234 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3235 | |
|
3236 | 0 | if (acked) { |
3237 | 0 | quicly_maxsender_acked(&conn->ingress.max_data.sender, &sent->data.max_data.args); |
3238 | 0 | } else { |
3239 | 0 | quicly_maxsender_lost(&conn->ingress.max_data.sender, &sent->data.max_data.args); |
3240 | 0 | } |
3241 | |
|
3242 | 0 | return 0; |
3243 | 0 | } |
3244 | | |
3245 | | static quicly_error_t on_ack_max_streams(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3246 | 0 | { |
3247 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3248 | 0 | quicly_maxsender_t *maxsender = sent->data.max_streams.uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi; |
3249 | 0 | assert(maxsender != NULL); /* we would only receive an ACK if we have sent the frame */ |
3250 | | |
3251 | 0 | if (acked) { |
3252 | 0 | quicly_maxsender_acked(maxsender, &sent->data.max_streams.args); |
3253 | 0 | } else { |
3254 | 0 | quicly_maxsender_lost(maxsender, &sent->data.max_streams.args); |
3255 | 0 | } |
3256 | |
|
3257 | 0 | return 0; |
3258 | 0 | } |
3259 | | |
3260 | | static void on_ack_stream_state_sender(quicly_sender_state_t *sender_state, int acked) |
3261 | 0 | { |
3262 | 0 | *sender_state = acked ? QUICLY_SENDER_STATE_ACKED : QUICLY_SENDER_STATE_SEND; |
3263 | 0 | } |
3264 | | |
3265 | | static quicly_error_t on_ack_reset_stream(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3266 | 0 | { |
3267 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3268 | 0 | quicly_stream_t *stream; |
3269 | |
|
3270 | 0 | if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) { |
3271 | 0 | on_ack_stream_state_sender(&stream->_send_aux.reset_stream.sender_state, acked); |
3272 | 0 | if (stream_is_destroyable(stream)) |
3273 | 0 | destroy_stream(stream, 0); |
3274 | 0 | } |
3275 | |
|
3276 | 0 | return 0; |
3277 | 0 | } |
3278 | | |
3279 | | static quicly_error_t on_ack_stop_sending(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3280 | 0 | { |
3281 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3282 | 0 | quicly_stream_t *stream; |
3283 | |
|
3284 | 0 | if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) { |
3285 | 0 | on_ack_stream_state_sender(&stream->_send_aux.stop_sending.sender_state, acked); |
3286 | 0 | if (stream->_send_aux.stop_sending.sender_state != QUICLY_SENDER_STATE_ACKED) |
3287 | 0 | sched_stream_control(stream); |
3288 | 0 | } |
3289 | |
|
3290 | 0 | return 0; |
3291 | 0 | } |
3292 | | |
3293 | | static quicly_error_t on_ack_streams_blocked(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3294 | | quicly_sent_t *sent) |
3295 | 0 | { |
3296 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3297 | 0 | struct st_quicly_max_streams_t *m = |
3298 | 0 | sent->data.streams_blocked.uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi; |
3299 | |
|
3300 | 0 | if (acked) { |
3301 | 0 | quicly_maxsender_acked(&m->blocked_sender, &sent->data.streams_blocked.args); |
3302 | 0 | } else { |
3303 | 0 | quicly_maxsender_lost(&m->blocked_sender, &sent->data.streams_blocked.args); |
3304 | 0 | } |
3305 | |
|
3306 | 0 | return 0; |
3307 | 0 | } |
3308 | | |
3309 | | static quicly_error_t on_ack_handshake_done(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3310 | | quicly_sent_t *sent) |
3311 | 0 | { |
3312 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3313 | | |
3314 | | /* When lost, reschedule for transmission. When acked, suppress retransmission if scheduled. */ |
3315 | 0 | if (acked) { |
3316 | 0 | conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT; |
3317 | 0 | } else { |
3318 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT; |
3319 | 0 | } |
3320 | 0 | return 0; |
3321 | 0 | } |
3322 | | |
3323 | | static quicly_error_t on_ack_data_blocked(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3324 | 0 | { |
3325 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3326 | |
|
3327 | 0 | if (conn->egress.max_data.permitted == sent->data.data_blocked.offset) { |
3328 | 0 | if (acked) { |
3329 | 0 | conn->egress.data_blocked = QUICLY_SENDER_STATE_ACKED; |
3330 | 0 | } else if (packet->frames_in_flight && conn->egress.data_blocked == QUICLY_SENDER_STATE_UNACKED) { |
3331 | 0 | conn->egress.data_blocked = QUICLY_SENDER_STATE_SEND; |
3332 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
3333 | 0 | } |
3334 | 0 | } |
3335 | |
|
3336 | 0 | return 0; |
3337 | 0 | } |
3338 | | |
3339 | | static quicly_error_t on_ack_stream_data_blocked_frame(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3340 | | quicly_sent_t *sent) |
3341 | 0 | { |
3342 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3343 | 0 | quicly_stream_t *stream; |
3344 | |
|
3345 | 0 | if ((stream = quicly_get_stream(conn, sent->data.stream_data_blocked.stream_id)) == NULL) |
3346 | 0 | return 0; |
3347 | | |
3348 | 0 | if (stream->_send_aux.max_stream_data == sent->data.stream_data_blocked.offset) { |
3349 | 0 | if (acked) { |
3350 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_ACKED; |
3351 | 0 | } else if (packet->frames_in_flight && stream->_send_aux.blocked == QUICLY_SENDER_STATE_UNACKED) { |
3352 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_SEND; |
3353 | 0 | sched_stream_control(stream); |
3354 | 0 | } |
3355 | 0 | } |
3356 | |
|
3357 | 0 | return 0; |
3358 | 0 | } |
3359 | | |
3360 | | static quicly_error_t on_ack_new_token(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
3361 | 0 | { |
3362 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3363 | |
|
3364 | 0 | if (sent->data.new_token.is_inflight) { |
3365 | 0 | --conn->egress.new_token.num_inflight; |
3366 | 0 | sent->data.new_token.is_inflight = 0; |
3367 | 0 | } |
3368 | 0 | if (acked) { |
3369 | 0 | QUICLY_PROBE(NEW_TOKEN_ACKED, conn, conn->stash.now, sent->data.new_token.generation); |
3370 | 0 | QUICLY_LOG_CONN(new_token_acked, conn, { PTLS_LOG_ELEMENT_UNSIGNED(generation, sent->data.new_token.generation); }); |
3371 | 0 | if (conn->egress.new_token.max_acked < sent->data.new_token.generation) |
3372 | 0 | conn->egress.new_token.max_acked = sent->data.new_token.generation; |
3373 | 0 | } |
3374 | | |
3375 | 0 | if (conn->egress.new_token.num_inflight == 0 && conn->egress.new_token.max_acked < conn->egress.new_token.generation) |
3376 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
3377 | |
|
3378 | 0 | return 0; |
3379 | 0 | } |
3380 | | |
3381 | | static quicly_error_t on_ack_new_connection_id(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3382 | | quicly_sent_t *sent) |
3383 | 0 | { |
3384 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3385 | 0 | uint64_t sequence = sent->data.new_connection_id.sequence; |
3386 | |
|
3387 | 0 | if (acked) { |
3388 | 0 | quicly_local_cid_on_acked(&conn->super.local.cid_set, sequence); |
3389 | 0 | } else { |
3390 | 0 | if (quicly_local_cid_on_lost(&conn->super.local.cid_set, sequence)) |
3391 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
3392 | 0 | } |
3393 | |
|
3394 | 0 | return 0; |
3395 | 0 | } |
3396 | | |
3397 | | static quicly_error_t on_ack_retire_connection_id(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, |
3398 | | quicly_sent_t *sent) |
3399 | 0 | { |
3400 | 0 | quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap)); |
3401 | 0 | uint64_t sequence = sent->data.retire_connection_id.sequence; |
3402 | |
|
3403 | 0 | if (!acked) |
3404 | 0 | schedule_retire_connection_id_frame(conn, sequence); |
3405 | |
|
3406 | 0 | return 0; |
3407 | 0 | } |
3408 | | |
3409 | | static uint32_t calc_pacer_send_rate(quicly_conn_t *conn) |
3410 | 0 | { |
3411 | | /* The multiplier uses a hard-coded value of 2x in both the slow start and the congestion avoidance phases. This differs from |
3412 | | * Linux, which uses 1.25x for the latter. The rationale behind this choice is that 1.25x is not sufficiently aggressive |
3413 | | * immediately after a loss event. Following a loss event, the congestion window (CWND) is halved (i.e., beta), but the RTT |
3414 | | * remains high for one RTT and SRTT can remain high even loger, since it is a moving average adjusted with each ACK received. |
3415 | | * Consequently, if the multiplier is set to 1.25x, the calculated send rate could drop to as low as 1.25 * 1/2 = 0.625. By |
3416 | | * using a 2x multiplier, the send rate is guaranteed to become no less than that immediately before the loss event, which would |
3417 | | * have been the link throughput. */ |
3418 | 0 | return quicly_pacer_calc_send_rate(quicly_cc_in_jumpstart(&conn->egress.cc) ? 1 : 2, conn->egress.cc.cwnd, |
3419 | 0 | conn->egress.loss.rtt.smoothed); |
3420 | 0 | } |
3421 | | |
3422 | | static int should_send_datagram_frame(quicly_conn_t *conn) |
3423 | 0 | { |
3424 | 0 | if (conn->egress.datagram_frame_payloads.count == 0) |
3425 | 0 | return 0; |
3426 | 0 | if (conn->application == NULL) |
3427 | 0 | return 0; |
3428 | 0 | if (conn->application->cipher.egress.key.aead == NULL) |
3429 | 0 | return 0; |
3430 | 0 | return 1; |
3431 | 0 | } |
3432 | | |
3433 | | static inline uint64_t calc_amplification_limit_allowance(quicly_conn_t *conn) |
3434 | 0 | { |
3435 | 0 | if (conn->super.remote.address_validation.validated) |
3436 | 0 | return UINT64_MAX; |
3437 | 0 | uint64_t budget = conn->super.stats.num_bytes.received * conn->super.ctx->pre_validation_amplification_limit; |
3438 | 0 | if (budget <= conn->super.stats.num_bytes.sent) |
3439 | 0 | return 0; |
3440 | 0 | return budget - conn->super.stats.num_bytes.sent; |
3441 | 0 | } |
3442 | | |
3443 | | /* Helper function to compute send window based on: |
3444 | | * * state of peer validation, |
3445 | | * * current cwnd, |
3446 | | * * minimum send requirements in |min_bytes_to_send|, and |
3447 | | * * if sending is to be restricted to the minimum, indicated in |restrict_sending| |
3448 | | */ |
3449 | | static size_t calc_send_window(quicly_conn_t *conn, size_t min_bytes_to_send, uint64_t amp_window, uint64_t pacer_window, |
3450 | | int restrict_sending) |
3451 | 0 | { |
3452 | 0 | uint64_t window = 0; |
3453 | 0 | if (restrict_sending) { |
3454 | | /* Send min_bytes_to_send on PTO */ |
3455 | 0 | window = min_bytes_to_send; |
3456 | 0 | } else { |
3457 | | /* Limit to cwnd */ |
3458 | 0 | if (conn->egress.cc.cwnd > conn->egress.loss.sentmap.bytes_in_flight) { |
3459 | 0 | window = conn->egress.cc.cwnd - conn->egress.loss.sentmap.bytes_in_flight; |
3460 | 0 | if (window > pacer_window) |
3461 | 0 | window = pacer_window; |
3462 | 0 | } |
3463 | | /* Allow at least one packet on time-threshold loss detection */ |
3464 | 0 | window = window > min_bytes_to_send ? window : min_bytes_to_send; |
3465 | 0 | } |
3466 | | /* Cap the window by the amount allowed by address validation */ |
3467 | 0 | if (amp_window < window) |
3468 | 0 | window = amp_window; |
3469 | |
|
3470 | 0 | return window; |
3471 | 0 | } |
3472 | | |
3473 | | /** |
3474 | | * Checks if the server is waiting for ClientFinished. When that is the case, the loss timer is deactivated, to avoid repeatedly |
3475 | | * sending 1-RTT packets while the client spends time verifying the certificate chain at the same time buffering 1-RTT packets. |
3476 | | */ |
3477 | | static int is_point5rtt_with_no_handshake_data_to_send(quicly_conn_t *conn) |
3478 | 0 | { |
3479 | | /* bail out unless this is a server-side connection waiting for ClientFinished */ |
3480 | 0 | if (!(conn->handshake != NULL && conn->application != NULL && !quicly_is_client(conn))) |
3481 | 0 | return 0; |
3482 | 0 | quicly_stream_t *stream = quicly_get_stream(conn, (quicly_stream_id_t)-1 - QUICLY_EPOCH_HANDSHAKE); |
3483 | 0 | assert(stream != NULL); |
3484 | 0 | return stream->sendstate.pending.num_ranges == 0 && stream->sendstate.acked.ranges[0].end == stream->sendstate.size_inflight; |
3485 | 0 | } |
3486 | | |
3487 | | static int64_t pacer_can_send_at(quicly_conn_t *conn) |
3488 | 0 | { |
3489 | 0 | if (conn->egress.pacer == NULL) |
3490 | 0 | return 0; |
3491 | | |
3492 | 0 | uint32_t bytes_per_msec = calc_pacer_send_rate(conn); |
3493 | 0 | return quicly_pacer_can_send_at(conn->egress.pacer, bytes_per_msec, conn->egress.max_udp_payload_size); |
3494 | 0 | } |
3495 | | |
3496 | | int64_t quicly_get_first_timeout(quicly_conn_t *conn) |
3497 | 0 | { |
3498 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) |
3499 | 0 | return conn->egress.send_ack_at; |
3500 | | |
3501 | 0 | if (should_send_datagram_frame(conn)) |
3502 | 0 | return 0; |
3503 | | |
3504 | 0 | uint64_t amp_window = calc_amplification_limit_allowance(conn); |
3505 | 0 | int64_t at = conn->idle_timeout.at, pacer_at = pacer_can_send_at(conn); |
3506 | | |
3507 | | /* reduce at to the moment pacer provides credit, if we are not CC-limited and there's something to be sent over CC */ |
3508 | 0 | if (pacer_at < at && calc_send_window(conn, 0, amp_window, UINT64_MAX, 0) > 0) { |
3509 | 0 | if (conn->egress.pending_flows != 0) { |
3510 | | /* crypto streams (as indicated by lower 4 bits) can be sent whenever CWND is available; other flows need application |
3511 | | * packet number space */ |
3512 | 0 | if ((conn->application != NULL && conn->application->cipher.egress.key.header_protection != NULL) || |
3513 | 0 | (conn->egress.pending_flows & 0xf) != 0) |
3514 | 0 | at = pacer_at; |
3515 | 0 | } |
3516 | 0 | if (pacer_at < at && (quicly_linklist_is_linked(&conn->egress.pending_streams.control) || scheduler_can_send(conn))) |
3517 | 0 | at = pacer_at; |
3518 | 0 | } |
3519 | | |
3520 | | /* if something can be sent, return the earliest timeout. Otherwise return the idle timeout. */ |
3521 | 0 | if (amp_window > 0) { |
3522 | 0 | if (conn->egress.loss.alarm_at < at && !is_point5rtt_with_no_handshake_data_to_send(conn)) |
3523 | 0 | at = conn->egress.loss.alarm_at; |
3524 | 0 | if (conn->egress.send_ack_at < at) |
3525 | 0 | at = conn->egress.send_ack_at; |
3526 | 0 | } |
3527 | 0 | if (at > conn->egress.send_probe_at) |
3528 | 0 | at = conn->egress.send_probe_at; |
3529 | |
|
3530 | 0 | return at; |
3531 | 0 | } |
3532 | | |
3533 | | uint64_t quicly_get_next_expected_packet_number(quicly_conn_t *conn) |
3534 | 0 | { |
3535 | 0 | if (!conn->application) |
3536 | 0 | return UINT64_MAX; |
3537 | | |
3538 | 0 | return conn->application->super.next_expected_packet_number; |
3539 | 0 | } |
3540 | | |
3541 | | static int setup_path_dcid(quicly_conn_t *conn, size_t path_index) |
3542 | 0 | { |
3543 | 0 | struct st_quicly_conn_path_t *path = conn->paths[path_index]; |
3544 | 0 | quicly_remote_cid_set_t *set = &conn->super.remote.cid_set; |
3545 | 0 | size_t found = SIZE_MAX; |
3546 | |
|
3547 | 0 | assert(path->dcid == UINT64_MAX); |
3548 | | |
3549 | 0 | if (set->cids[0].cid.len == 0) { |
3550 | | /* if peer CID is zero-length, we can send packets to whatever address without the fear of corelation */ |
3551 | 0 | found = 0; |
3552 | 0 | } else { |
3553 | | /* find the unused entry with a smallest sequence number */ |
3554 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(set->cids); ++i) { |
3555 | 0 | if (set->cids[i].state == QUICLY_REMOTE_CID_AVAILABLE && |
3556 | 0 | (found == SIZE_MAX || set->cids[i].sequence < set->cids[found].sequence)) |
3557 | 0 | found = i; |
3558 | 0 | } |
3559 | 0 | if (found == SIZE_MAX) |
3560 | 0 | return 0; |
3561 | 0 | } |
3562 | | |
3563 | | /* associate */ |
3564 | 0 | set->cids[found].state = QUICLY_REMOTE_CID_IN_USE; |
3565 | 0 | path->dcid = set->cids[found].sequence; |
3566 | |
|
3567 | 0 | return 1; |
3568 | 0 | } |
3569 | | |
3570 | | static quicly_cid_t *get_dcid(quicly_conn_t *conn, size_t path_index) |
3571 | 0 | { |
3572 | 0 | struct st_quicly_conn_path_t *path = conn->paths[path_index]; |
3573 | |
|
3574 | 0 | assert(path->dcid != UINT64_MAX); |
3575 | | |
3576 | | /* lookup DCID and return */ |
3577 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->super.remote.cid_set.cids); ++i) { |
3578 | 0 | if (conn->super.remote.cid_set.cids[i].sequence == path->dcid) |
3579 | 0 | return &conn->super.remote.cid_set.cids[i].cid; |
3580 | 0 | } |
3581 | 0 | assert(!"CID lookup failure"); |
3582 | 0 | return NULL; |
3583 | 0 | } |
3584 | | |
3585 | | /** |
3586 | | * data structure that is used during one call through quicly_send() |
3587 | | */ |
3588 | | struct st_quicly_send_context_t { |
3589 | | /** |
3590 | | * current encryption context |
3591 | | */ |
3592 | | struct { |
3593 | | struct st_quicly_cipher_context_t *cipher; |
3594 | | uint8_t first_byte; |
3595 | | } current; |
3596 | | /** |
3597 | | * packet under construction |
3598 | | */ |
3599 | | struct { |
3600 | | struct st_quicly_cipher_context_t *cipher; |
3601 | | /** |
3602 | | * points to the first byte of the target QUIC packet. It will not point to packet->octets.base[0] when the datagram |
3603 | | * contains multiple QUIC packet. |
3604 | | */ |
3605 | | uint8_t *first_byte_at; |
3606 | | /** |
3607 | | * if the target QUIC packet contains an ack-eliciting frame |
3608 | | */ |
3609 | | uint8_t ack_eliciting : 1; |
3610 | | /** |
3611 | | * if the target datagram should be padded to full size |
3612 | | */ |
3613 | | uint8_t full_size : 1; |
3614 | | } target; |
3615 | | /** |
3616 | | * output buffer into which list of datagrams is written |
3617 | | */ |
3618 | | struct iovec *datagrams; |
3619 | | /** |
3620 | | * max number of datagrams that can be stored in |packets| |
3621 | | */ |
3622 | | size_t max_datagrams; |
3623 | | /** |
3624 | | * number of datagrams currently stored in |packets| |
3625 | | */ |
3626 | | size_t num_datagrams; |
3627 | | /** |
3628 | | * buffer in which packets are built |
3629 | | */ |
3630 | | struct { |
3631 | | /** |
3632 | | * starting position of the current (or next) datagram |
3633 | | */ |
3634 | | uint8_t *datagram; |
3635 | | /** |
3636 | | * end position of the payload buffer |
3637 | | */ |
3638 | | uint8_t *end; |
3639 | | } payload_buf; |
3640 | | /** |
3641 | | * Currently available window for sending (in bytes); the value becomes negative when the sender uses more space than permitted. |
3642 | | * That happens because the sender operates at packet-level rather than byte-level. |
3643 | | */ |
3644 | | ssize_t send_window; |
3645 | | /** |
3646 | | * location where next frame should be written |
3647 | | */ |
3648 | | uint8_t *dst; |
3649 | | /** |
3650 | | * end of the payload area, beyond which frames cannot be written |
3651 | | */ |
3652 | | uint8_t *dst_end; |
3653 | | /** |
3654 | | * address at which payload starts |
3655 | | */ |
3656 | | uint8_t *dst_payload_from; |
3657 | | /** |
3658 | | * first packet number to be used within the lifetime of this send context |
3659 | | */ |
3660 | | uint64_t first_packet_number; |
3661 | | /** |
3662 | | * index of `conn->paths[]` to which we are sending |
3663 | | */ |
3664 | | size_t path_index; |
3665 | | /** |
3666 | | * DCID to be used for the path |
3667 | | */ |
3668 | | quicly_cid_t *dcid; |
3669 | | /** |
3670 | | * if `conn->egress.send_probe_at` should be recalculated |
3671 | | */ |
3672 | | unsigned recalc_send_probe_at : 1; |
3673 | | }; |
3674 | | |
3675 | | static quicly_error_t commit_send_packet(quicly_conn_t *conn, quicly_send_context_t *s, int coalesced) |
3676 | 0 | { |
3677 | 0 | size_t datagram_size, packet_bytes_in_flight; |
3678 | |
|
3679 | 0 | assert(s->target.cipher->aead != NULL); |
3680 | | |
3681 | 0 | assert(s->dst != s->dst_payload_from); |
3682 | | |
3683 | | /* pad so that the pn + payload would be at least 4 bytes */ |
3684 | 0 | while (s->dst - s->dst_payload_from < QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE) |
3685 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_PADDING; |
3686 | |
|
3687 | 0 | if (!coalesced && s->target.full_size) { |
3688 | 0 | assert(s->num_datagrams == 0 || s->datagrams[s->num_datagrams - 1].iov_len == conn->egress.max_udp_payload_size); |
3689 | 0 | const size_t max_size = conn->egress.max_udp_payload_size - QUICLY_AEAD_TAG_SIZE; |
3690 | 0 | assert(s->dst - s->payload_buf.datagram <= max_size); |
3691 | 0 | memset(s->dst, QUICLY_FRAME_TYPE_PADDING, s->payload_buf.datagram + max_size - s->dst); |
3692 | 0 | s->dst = s->payload_buf.datagram + max_size; |
3693 | 0 | } |
3694 | | |
3695 | | /* encode packet size, packet number, key-phase */ |
3696 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at)) { |
3697 | 0 | uint16_t length = s->dst - s->dst_payload_from + s->target.cipher->aead->algo->tag_size + QUICLY_SEND_PN_SIZE; |
3698 | | /* length is always 2 bytes, see _do_prepare_packet */ |
3699 | 0 | length |= 0x4000; |
3700 | 0 | quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE - 2, length); |
3701 | 0 | switch (*s->target.first_byte_at & QUICLY_PACKET_TYPE_BITMASK) { |
3702 | 0 | case QUICLY_PACKET_TYPE_INITIAL: |
3703 | 0 | conn->super.stats.num_packets.initial_sent++; |
3704 | 0 | break; |
3705 | 0 | case QUICLY_PACKET_TYPE_0RTT: |
3706 | 0 | conn->super.stats.num_packets.zero_rtt_sent++; |
3707 | 0 | break; |
3708 | 0 | case QUICLY_PACKET_TYPE_HANDSHAKE: |
3709 | 0 | conn->super.stats.num_packets.handshake_sent++; |
3710 | 0 | break; |
3711 | 0 | } |
3712 | 0 | } else { |
3713 | 0 | if (conn->egress.packet_number >= conn->application->cipher.egress.key_update_pn.next) { |
3714 | 0 | int ret; |
3715 | 0 | if ((ret = update_1rtt_egress_key(conn)) != 0) |
3716 | 0 | return ret; |
3717 | 0 | } |
3718 | 0 | if ((conn->application->cipher.egress.key_phase & 1) != 0) |
3719 | 0 | *s->target.first_byte_at |= QUICLY_KEY_PHASE_BIT; |
3720 | 0 | } |
3721 | 0 | quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE, (uint16_t)conn->egress.packet_number); |
3722 | | |
3723 | | /* encrypt the packet */ |
3724 | 0 | s->dst += s->target.cipher->aead->algo->tag_size; |
3725 | 0 | datagram_size = s->dst - s->payload_buf.datagram; |
3726 | 0 | assert(datagram_size <= conn->egress.max_udp_payload_size); |
3727 | | |
3728 | 0 | conn->super.ctx->crypto_engine->encrypt_packet( |
3729 | 0 | conn->super.ctx->crypto_engine, conn, s->target.cipher->header_protection, s->target.cipher->aead, |
3730 | 0 | ptls_iovec_init(s->payload_buf.datagram, datagram_size), s->target.first_byte_at - s->payload_buf.datagram, |
3731 | 0 | s->dst_payload_from - s->payload_buf.datagram, conn->egress.packet_number, coalesced); |
3732 | | |
3733 | | /* update CC, commit sentmap */ |
3734 | 0 | int on_promoted_path = s->path_index == 0 && !conn->paths[0]->initial; |
3735 | 0 | if (s->target.ack_eliciting) { |
3736 | 0 | packet_bytes_in_flight = s->dst - s->target.first_byte_at; |
3737 | 0 | s->send_window -= packet_bytes_in_flight; |
3738 | 0 | } else { |
3739 | 0 | packet_bytes_in_flight = 0; |
3740 | 0 | } |
3741 | 0 | if (quicly_sentmap_is_open(&conn->egress.loss.sentmap)) { |
3742 | 0 | int cc_limited = conn->egress.loss.sentmap.bytes_in_flight + packet_bytes_in_flight >= |
3743 | 0 | conn->egress.cc.cwnd / 2; /* for the rationale behind this formula, see handle_ack_frame */ |
3744 | 0 | quicly_sentmap_commit(&conn->egress.loss.sentmap, (uint16_t)packet_bytes_in_flight, cc_limited, on_promoted_path); |
3745 | 0 | } |
3746 | |
|
3747 | 0 | if (packet_bytes_in_flight != 0) { |
3748 | 0 | assert(s->path_index == 0 && "CC governs path 0 and data is sent only on that path"); |
3749 | 0 | conn->egress.cc.type->cc_on_sent(&conn->egress.cc, &conn->egress.loss, (uint32_t)packet_bytes_in_flight, conn->stash.now); |
3750 | 0 | if (conn->egress.pacer != NULL) |
3751 | 0 | quicly_pacer_consume_window(conn->egress.pacer, packet_bytes_in_flight); |
3752 | 0 | } |
3753 | | |
3754 | 0 | QUICLY_PROBE(PACKET_SENT, conn, conn->stash.now, conn->egress.packet_number, s->dst - s->target.first_byte_at, |
3755 | 0 | get_epoch(*s->target.first_byte_at), !s->target.ack_eliciting); |
3756 | 0 | QUICLY_LOG_CONN(packet_sent, conn, { |
3757 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number); |
3758 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(len, s->dst - s->target.first_byte_at); |
3759 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(*s->target.first_byte_at)); |
3760 | 0 | PTLS_LOG_ELEMENT_BOOL(ack_only, !s->target.ack_eliciting); |
3761 | 0 | }); |
3762 | | |
3763 | 0 | ++conn->egress.packet_number; |
3764 | 0 | ++conn->super.stats.num_packets.sent; |
3765 | 0 | ++conn->paths[s->path_index]->num_packets.sent; |
3766 | 0 | if (on_promoted_path) |
3767 | 0 | ++conn->super.stats.num_packets.sent_promoted_paths; |
3768 | |
|
3769 | 0 | if (!coalesced) { |
3770 | 0 | conn->super.stats.num_bytes.sent += datagram_size; |
3771 | 0 | s->datagrams[s->num_datagrams++] = (struct iovec){.iov_base = s->payload_buf.datagram, .iov_len = datagram_size}; |
3772 | 0 | s->payload_buf.datagram += datagram_size; |
3773 | 0 | s->target.cipher = NULL; |
3774 | 0 | s->target.first_byte_at = NULL; |
3775 | 0 | } |
3776 | | |
3777 | | /* insert PN gap if necessary, registering the PN to the ack queue so that we'd close the connection in the event of receiving |
3778 | | * an ACK for that gap. */ |
3779 | 0 | if (conn->egress.packet_number >= conn->egress.next_pn_to_skip && !QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte) && |
3780 | 0 | conn->super.state < QUICLY_STATE_CLOSING) { |
3781 | 0 | quicly_error_t ret; |
3782 | 0 | if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now, |
3783 | 0 | QUICLY_EPOCH_1RTT)) != 0) |
3784 | 0 | return ret; |
3785 | 0 | if (quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_invalid_ack) == NULL) |
3786 | 0 | return PTLS_ERROR_NO_MEMORY; |
3787 | 0 | quicly_sentmap_commit(&conn->egress.loss.sentmap, 0, 0, 0); |
3788 | 0 | ++conn->egress.packet_number; |
3789 | 0 | conn->egress.next_pn_to_skip = calc_next_pn_to_skip(conn->super.ctx->tls, conn->egress.packet_number, conn->egress.cc.cwnd, |
3790 | 0 | conn->egress.max_udp_payload_size); |
3791 | 0 | } |
3792 | | |
3793 | 0 | return 0; |
3794 | 0 | } |
3795 | | |
3796 | | static inline uint8_t *emit_cid(uint8_t *dst, const quicly_cid_t *cid) |
3797 | 0 | { |
3798 | 0 | if (cid->len != 0) { |
3799 | 0 | memcpy(dst, cid->cid, cid->len); |
3800 | 0 | dst += cid->len; |
3801 | 0 | } |
3802 | 0 | return dst; |
3803 | 0 | } |
3804 | | |
3805 | | enum allocate_frame_type { |
3806 | | ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING, |
3807 | | ALLOCATE_FRAME_TYPE_ACK_ELICITING, |
3808 | | ALLOCATE_FRAME_TYPE_ACK_ELICITING_NO_CC, |
3809 | | }; |
3810 | | |
3811 | | static quicly_error_t do_allocate_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space, |
3812 | | enum allocate_frame_type frame_type) |
3813 | 0 | { |
3814 | 0 | int coalescible; |
3815 | 0 | quicly_error_t ret; |
3816 | |
|
3817 | 0 | assert((s->current.first_byte & QUICLY_QUIC_BIT) != 0); |
3818 | | |
3819 | | /* allocate and setup the new packet if necessary */ |
3820 | 0 | if (s->dst_end - s->dst < min_space || s->target.first_byte_at == NULL) { |
3821 | 0 | coalescible = 0; |
3822 | 0 | } else if (((*s->target.first_byte_at ^ s->current.first_byte) & QUICLY_PACKET_TYPE_BITMASK) != 0) { |
3823 | 0 | coalescible = QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at); |
3824 | 0 | } else if (s->dst_end - s->dst < min_space) { |
3825 | 0 | coalescible = 0; |
3826 | 0 | } else { |
3827 | | /* use the existing packet */ |
3828 | 0 | goto TargetReady; |
3829 | 0 | } |
3830 | | |
3831 | | /* commit at the same time determining if we will coalesce the packets */ |
3832 | 0 | if (s->target.first_byte_at != NULL) { |
3833 | 0 | if (coalescible) { |
3834 | 0 | size_t overhead = 1 /* type */ + s->dcid->len + QUICLY_SEND_PN_SIZE + s->current.cipher->aead->algo->tag_size; |
3835 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte)) |
3836 | 0 | overhead += 4 /* version */ + 1 /* cidl */ + s->dcid->len + conn->super.local.long_header_src_cid.len + |
3837 | 0 | (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) /* token_length == 0 */ + 2 /* length */; |
3838 | 0 | size_t packet_min_space = QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE; |
3839 | 0 | if (packet_min_space < min_space) |
3840 | 0 | packet_min_space = min_space; |
3841 | 0 | if (overhead + packet_min_space > s->dst_end - s->dst) |
3842 | 0 | coalescible = 0; |
3843 | 0 | } |
3844 | | /* Close the packet under construction. Datagrams being returned by `quicly_send` are padded to full-size (except for the |
3845 | | * last one datagram) so that they can be sent at once using GSO. */ |
3846 | 0 | if (!coalescible) |
3847 | 0 | s->target.full_size = 1; |
3848 | 0 | if ((ret = commit_send_packet(conn, s, coalescible)) != 0) |
3849 | 0 | return ret; |
3850 | 0 | } else { |
3851 | 0 | coalescible = 0; |
3852 | 0 | } |
3853 | | |
3854 | | /* allocate packet */ |
3855 | 0 | if (coalescible) { |
3856 | 0 | s->dst_end += s->target.cipher->aead->algo->tag_size; /* restore the AEAD tag size (tag size can differ bet. epochs) */ |
3857 | 0 | s->target.cipher = s->current.cipher; |
3858 | 0 | } else { |
3859 | 0 | if (s->num_datagrams >= s->max_datagrams) |
3860 | 0 | return QUICLY_ERROR_SENDBUF_FULL; |
3861 | | /* note: send_window (ssize_t) can become negative; see doc-comment */ |
3862 | 0 | if (frame_type == ALLOCATE_FRAME_TYPE_ACK_ELICITING && s->send_window <= 0) |
3863 | 0 | return QUICLY_ERROR_SENDBUF_FULL; |
3864 | 0 | if (s->payload_buf.end - s->payload_buf.datagram < conn->egress.max_udp_payload_size) |
3865 | 0 | return QUICLY_ERROR_SENDBUF_FULL; |
3866 | 0 | s->target.cipher = s->current.cipher; |
3867 | 0 | s->target.full_size = 0; |
3868 | 0 | s->dst = s->payload_buf.datagram; |
3869 | 0 | s->dst_end = s->dst + conn->egress.max_udp_payload_size; |
3870 | 0 | } |
3871 | 0 | s->target.ack_eliciting = 0; |
3872 | |
|
3873 | 0 | QUICLY_PROBE(PACKET_PREPARE, conn, conn->stash.now, s->current.first_byte, QUICLY_PROBE_HEXDUMP(s->dcid->cid, s->dcid->len)); |
3874 | 0 | QUICLY_LOG_CONN(packet_prepare, conn, { |
3875 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(first_octet, s->current.first_byte); |
3876 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(dcid, s->dcid->cid, s->dcid->len); |
3877 | 0 | }); |
3878 | | |
3879 | | /* emit header */ |
3880 | 0 | s->target.first_byte_at = s->dst; |
3881 | 0 | *s->dst++ = s->current.first_byte | 0x1 /* pnlen == 2 */; |
3882 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte)) { |
3883 | 0 | s->dst = quicly_encode32(s->dst, conn->super.version); |
3884 | 0 | *s->dst++ = s->dcid->len; |
3885 | 0 | s->dst = emit_cid(s->dst, s->dcid); |
3886 | 0 | *s->dst++ = conn->super.local.long_header_src_cid.len; |
3887 | 0 | s->dst = emit_cid(s->dst, &conn->super.local.long_header_src_cid); |
3888 | | /* token */ |
3889 | 0 | if (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) { |
3890 | 0 | s->dst = quicly_encodev(s->dst, conn->token.len); |
3891 | 0 | if (conn->token.len != 0) { |
3892 | 0 | assert(s->dst_end - s->dst > conn->token.len); |
3893 | 0 | memcpy(s->dst, conn->token.base, conn->token.len); |
3894 | 0 | s->dst += conn->token.len; |
3895 | 0 | } |
3896 | 0 | } |
3897 | | /* payload length is filled laterwards (see commit_send_packet) */ |
3898 | 0 | *s->dst++ = 0; |
3899 | 0 | *s->dst++ = 0; |
3900 | 0 | } else { |
3901 | 0 | s->dst = emit_cid(s->dst, s->dcid); |
3902 | 0 | } |
3903 | 0 | s->dst += QUICLY_SEND_PN_SIZE; /* space for PN bits, filled in at commit time */ |
3904 | 0 | s->dst_payload_from = s->dst; |
3905 | 0 | assert(s->target.cipher->aead != NULL); |
3906 | 0 | s->dst_end -= s->target.cipher->aead->algo->tag_size; |
3907 | 0 | assert(s->dst_end - s->dst >= QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE); |
3908 | | |
3909 | 0 | if (conn->super.state < QUICLY_STATE_CLOSING) { |
3910 | | /* register to sentmap */ |
3911 | 0 | uint8_t ack_epoch = get_epoch(s->current.first_byte); |
3912 | 0 | if (ack_epoch == QUICLY_EPOCH_0RTT) |
3913 | 0 | ack_epoch = QUICLY_EPOCH_1RTT; |
3914 | 0 | if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now, ack_epoch)) != 0) |
3915 | 0 | return ret; |
3916 | | /* adjust ack-frequency */ |
3917 | 0 | if (conn->stash.now >= conn->egress.ack_frequency.update_at) { |
3918 | 0 | assert(conn->super.remote.transport_params.min_ack_delay_usec != UINT64_MAX); |
3919 | 0 | if (conn->egress.cc.num_loss_episodes >= QUICLY_FIRST_ACK_FREQUENCY_LOSS_EPISODE && conn->initial == NULL && |
3920 | 0 | conn->handshake == NULL) { |
3921 | 0 | uint32_t fraction_of_cwnd = (uint32_t)((uint64_t)conn->egress.cc.cwnd * conn->super.ctx->ack_frequency / 1024); |
3922 | 0 | if (fraction_of_cwnd >= conn->egress.max_udp_payload_size * 3) { |
3923 | 0 | uint32_t packet_tolerance = fraction_of_cwnd / conn->egress.max_udp_payload_size; |
3924 | 0 | if (packet_tolerance > QUICLY_MAX_PACKET_TOLERANCE) |
3925 | 0 | packet_tolerance = QUICLY_MAX_PACKET_TOLERANCE; |
3926 | 0 | s->dst = quicly_encode_ack_frequency_frame(s->dst, conn->egress.ack_frequency.sequence++, packet_tolerance, |
3927 | 0 | conn->super.remote.transport_params.max_ack_delay * 1000, 0); |
3928 | 0 | ++conn->super.stats.num_frames_sent.ack_frequency; |
3929 | 0 | } |
3930 | 0 | } |
3931 | 0 | ack_frequency_set_next_update_at(conn); |
3932 | 0 | } |
3933 | 0 | } |
3934 | | |
3935 | 0 | TargetReady: |
3936 | 0 | if (frame_type != ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING) { |
3937 | 0 | s->target.ack_eliciting = 1; |
3938 | 0 | conn->egress.last_retransmittable_sent_at = conn->stash.now; |
3939 | 0 | } |
3940 | 0 | return 0; |
3941 | 0 | } |
3942 | | |
3943 | | static quicly_error_t allocate_ack_eliciting_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space, |
3944 | | quicly_sent_t **sent, quicly_sent_acked_cb acked) |
3945 | 0 | { |
3946 | 0 | quicly_error_t ret; |
3947 | |
|
3948 | 0 | if ((ret = do_allocate_frame(conn, s, min_space, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0) |
3949 | 0 | return ret; |
3950 | 0 | if ((*sent = quicly_sentmap_allocate(&conn->egress.loss.sentmap, acked)) == NULL) |
3951 | 0 | return PTLS_ERROR_NO_MEMORY; |
3952 | | |
3953 | 0 | return ret; |
3954 | 0 | } |
3955 | | |
3956 | | static quicly_error_t send_ack(quicly_conn_t *conn, struct st_quicly_pn_space_t *space, quicly_send_context_t *s) |
3957 | 0 | { |
3958 | 0 | uint64_t ack_delay; |
3959 | 0 | quicly_error_t ret; |
3960 | |
|
3961 | 0 | if (space->ack_queue.num_ranges == 0) |
3962 | 0 | return 0; |
3963 | | |
3964 | | /* calc ack_delay */ |
3965 | 0 | if (space->largest_pn_received_at < conn->stash.now) { |
3966 | | /* We underreport ack_delay up to 1 milliseconds assuming that QUICLY_LOCAL_ACK_DELAY_EXPONENT is 10. It's considered a |
3967 | | * non-issue because our time measurement is at millisecond granularity anyways. */ |
3968 | 0 | ack_delay = ((conn->stash.now - space->largest_pn_received_at) * 1000) >> QUICLY_LOCAL_ACK_DELAY_EXPONENT; |
3969 | 0 | } else { |
3970 | 0 | ack_delay = 0; |
3971 | 0 | } |
3972 | |
|
3973 | 0 | Emit: /* emit an ACK frame */ |
3974 | 0 | if ((ret = do_allocate_frame(conn, s, QUICLY_ACK_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0) |
3975 | 0 | return ret; |
3976 | 0 | uint8_t *dst = s->dst; |
3977 | 0 | dst = quicly_encode_ack_frame(dst, s->dst_end, &space->ack_queue, space->ecn_counts, ack_delay); |
3978 | | |
3979 | | /* when there's no space, retry with a new MTU-sized packet */ |
3980 | 0 | if (dst == NULL) { |
3981 | | /* [rare case] A coalesced packet might not have enough space to hold only an ACK. If so, pad it, as that's easier than |
3982 | | * rolling back. */ |
3983 | 0 | if (s->dst == s->dst_payload_from) { |
3984 | 0 | assert(s->target.first_byte_at != s->payload_buf.datagram); |
3985 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_PADDING; |
3986 | 0 | } |
3987 | 0 | s->target.full_size = 1; |
3988 | 0 | if ((ret = commit_send_packet(conn, s, 0)) != 0) |
3989 | 0 | return ret; |
3990 | 0 | goto Emit; |
3991 | 0 | } |
3992 | | |
3993 | 0 | ++conn->super.stats.num_frames_sent.ack; |
3994 | 0 | QUICLY_PROBE(ACK_SEND, conn, conn->stash.now, space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end - 1, ack_delay); |
3995 | 0 | QUICLY_LOG_CONN(ack_send, conn, { |
3996 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(largest_acked, space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end - 1); |
3997 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(ack_delay, ack_delay); |
3998 | 0 | }); |
3999 | | |
4000 | | /* when there are no less than QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK (8) gaps, bundle PING once every 4 packets being sent */ |
4001 | 0 | if (space->ack_queue.num_ranges >= QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK && conn->egress.packet_number % 4 == 0 && |
4002 | 0 | dst < s->dst_end) { |
4003 | 0 | *dst++ = QUICLY_FRAME_TYPE_PING; |
4004 | 0 | ++conn->super.stats.num_frames_sent.ping; |
4005 | 0 | QUICLY_PROBE(PING_SEND, conn, conn->stash.now); |
4006 | 0 | QUICLY_LOG_CONN(ping_send, conn, {}); |
4007 | 0 | } |
4008 | | |
4009 | 0 | s->dst = dst; |
4010 | |
|
4011 | 0 | { /* save what's inflight */ |
4012 | 0 | size_t range_index = 0; |
4013 | 0 | while (range_index < space->ack_queue.num_ranges) { |
4014 | 0 | quicly_sent_t *sent; |
4015 | 0 | struct st_quicly_sent_ack_additional_t *additional, *additional_end; |
4016 | | /* allocate */ |
4017 | 0 | if ((sent = quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_ack_ack_ranges8)) == NULL) |
4018 | 0 | return PTLS_ERROR_NO_MEMORY; |
4019 | | /* store the first range, as well as preparing references to the additional slots */ |
4020 | 0 | sent->data.ack.start = space->ack_queue.ranges[range_index].start; |
4021 | 0 | uint64_t length = space->ack_queue.ranges[range_index].end - space->ack_queue.ranges[range_index].start; |
4022 | 0 | if (length <= UINT8_MAX) { |
4023 | 0 | sent->data.ack.ranges8.start_length = length; |
4024 | 0 | additional = sent->data.ack.ranges8.additional; |
4025 | 0 | additional_end = additional + PTLS_ELEMENTSOF(sent->data.ack.ranges8.additional); |
4026 | 0 | } else { |
4027 | 0 | sent->acked = on_ack_ack_ranges64; |
4028 | 0 | sent->data.ack.ranges64.start_length = length; |
4029 | 0 | additional = sent->data.ack.ranges64.additional; |
4030 | 0 | additional_end = additional + PTLS_ELEMENTSOF(sent->data.ack.ranges64.additional); |
4031 | 0 | } |
4032 | | /* store additional ranges, if possible */ |
4033 | 0 | for (++range_index; range_index < space->ack_queue.num_ranges && additional < additional_end; |
4034 | 0 | ++range_index, ++additional) { |
4035 | 0 | uint64_t gap = space->ack_queue.ranges[range_index].start - space->ack_queue.ranges[range_index - 1].end; |
4036 | 0 | uint64_t length = space->ack_queue.ranges[range_index].end - space->ack_queue.ranges[range_index].start; |
4037 | 0 | if (gap > UINT8_MAX || length > UINT8_MAX) |
4038 | 0 | break; |
4039 | 0 | additional->gap = gap; |
4040 | 0 | additional->length = length; |
4041 | 0 | } |
4042 | | /* additional list is zero-terminated, if not full */ |
4043 | 0 | if (additional < additional_end) |
4044 | 0 | additional->gap = 0; |
4045 | 0 | } |
4046 | 0 | } |
4047 | | |
4048 | 0 | space->unacked_count = 0; |
4049 | |
|
4050 | 0 | return ret; |
4051 | 0 | } |
4052 | | |
4053 | | static quicly_error_t prepare_stream_state_sender(quicly_stream_t *stream, quicly_sender_state_t *sender, quicly_send_context_t *s, |
4054 | | size_t min_space, quicly_sent_acked_cb ack_cb) |
4055 | 0 | { |
4056 | 0 | quicly_sent_t *sent; |
4057 | 0 | quicly_error_t ret; |
4058 | |
|
4059 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, min_space, &sent, ack_cb)) != 0) |
4060 | 0 | return ret; |
4061 | 0 | sent->data.stream_state_sender.stream_id = stream->stream_id; |
4062 | 0 | *sender = QUICLY_SENDER_STATE_UNACKED; |
4063 | |
|
4064 | 0 | return 0; |
4065 | 0 | } |
4066 | | |
4067 | | static quicly_error_t send_control_frames_of_stream(quicly_stream_t *stream, quicly_send_context_t *s) |
4068 | 0 | { |
4069 | 0 | quicly_error_t ret; |
4070 | | |
4071 | | /* send STOP_SENDING if necessary */ |
4072 | 0 | if (stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_SEND) { |
4073 | | /* FIXME also send an empty STREAM frame */ |
4074 | 0 | if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.stop_sending.sender_state, s, |
4075 | 0 | QUICLY_STOP_SENDING_FRAME_CAPACITY, on_ack_stop_sending)) != 0) |
4076 | 0 | return ret; |
4077 | 0 | s->dst = quicly_encode_stop_sending_frame(s->dst, stream->stream_id, stream->_send_aux.stop_sending.error_code); |
4078 | 0 | ++stream->conn->super.stats.num_frames_sent.stop_sending; |
4079 | 0 | QUICLY_PROBE(STOP_SENDING_SEND, stream->conn, stream->conn->stash.now, stream->stream_id, |
4080 | 0 | stream->_send_aux.stop_sending.error_code); |
4081 | 0 | QUICLY_LOG_CONN(stop_sending_send, stream->conn, { |
4082 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4083 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, stream->_send_aux.stop_sending.error_code); |
4084 | 0 | }); |
4085 | 0 | } |
4086 | | |
4087 | | /* send MAX_STREAM_DATA if necessary */ |
4088 | 0 | if (should_send_max_stream_data(stream)) { |
4089 | 0 | uint64_t new_value = stream->recvstate.data_off + stream->_recv_aux.window; |
4090 | 0 | quicly_sent_t *sent; |
4091 | | /* prepare */ |
4092 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, QUICLY_MAX_STREAM_DATA_FRAME_CAPACITY, &sent, |
4093 | 0 | on_ack_max_stream_data)) != 0) |
4094 | 0 | return ret; |
4095 | | /* send */ |
4096 | 0 | s->dst = quicly_encode_max_stream_data_frame(s->dst, stream->stream_id, new_value); |
4097 | | /* register ack */ |
4098 | 0 | sent->data.max_stream_data.stream_id = stream->stream_id; |
4099 | 0 | quicly_maxsender_record(&stream->_send_aux.max_stream_data_sender, new_value, &sent->data.max_stream_data.args); |
4100 | | /* update stats */ |
4101 | 0 | ++stream->conn->super.stats.num_frames_sent.max_stream_data; |
4102 | 0 | QUICLY_PROBE(MAX_STREAM_DATA_SEND, stream->conn, stream->conn->stash.now, stream, new_value); |
4103 | 0 | QUICLY_LOG_CONN(max_stream_data_send, stream->conn, { |
4104 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4105 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_value); |
4106 | 0 | }); |
4107 | 0 | } |
4108 | | |
4109 | | /* send RESET_STREAM if necessary */ |
4110 | 0 | if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_SEND) { |
4111 | 0 | if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.reset_stream.sender_state, s, QUICLY_RST_FRAME_CAPACITY, |
4112 | 0 | on_ack_reset_stream)) != 0) |
4113 | 0 | return ret; |
4114 | 0 | s->dst = quicly_encode_reset_stream_frame(s->dst, stream->stream_id, stream->_send_aux.reset_stream.error_code, |
4115 | 0 | stream->sendstate.size_inflight); |
4116 | 0 | ++stream->conn->super.stats.num_frames_sent.reset_stream; |
4117 | 0 | QUICLY_PROBE(RESET_STREAM_SEND, stream->conn, stream->conn->stash.now, stream->stream_id, |
4118 | 0 | stream->_send_aux.reset_stream.error_code, stream->sendstate.size_inflight); |
4119 | 0 | QUICLY_LOG_CONN(reset_stream_send, stream->conn, { |
4120 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4121 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, stream->_send_aux.reset_stream.error_code); |
4122 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(final_size, stream->sendstate.size_inflight); |
4123 | 0 | }); |
4124 | 0 | } |
4125 | | |
4126 | | /* send STREAM_DATA_BLOCKED if necessary */ |
4127 | 0 | if (stream->_send_aux.blocked == QUICLY_SENDER_STATE_SEND) { |
4128 | 0 | quicly_sent_t *sent; |
4129 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, QUICLY_STREAM_DATA_BLOCKED_FRAME_CAPACITY, &sent, |
4130 | 0 | on_ack_stream_data_blocked_frame)) != 0) |
4131 | 0 | return ret; |
4132 | 0 | uint64_t offset = stream->_send_aux.max_stream_data; |
4133 | 0 | sent->data.stream_data_blocked.stream_id = stream->stream_id; |
4134 | 0 | sent->data.stream_data_blocked.offset = offset; |
4135 | 0 | s->dst = quicly_encode_stream_data_blocked_frame(s->dst, stream->stream_id, offset); |
4136 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_UNACKED; |
4137 | 0 | ++stream->conn->super.stats.num_frames_sent.stream_data_blocked; |
4138 | 0 | QUICLY_PROBE(STREAM_DATA_BLOCKED_SEND, stream->conn, stream->conn->stash.now, stream->stream_id, offset); |
4139 | 0 | QUICLY_LOG_CONN(stream_data_blocked_send, stream->conn, { |
4140 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4141 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, offset); |
4142 | 0 | }); |
4143 | 0 | } |
4144 | | |
4145 | 0 | return 0; |
4146 | 0 | } |
4147 | | |
4148 | | static quicly_error_t send_stream_control_frames(quicly_conn_t *conn, quicly_send_context_t *s) |
4149 | 0 | { |
4150 | 0 | quicly_error_t ret = 0; |
4151 | |
|
4152 | 0 | while (s->num_datagrams != s->max_datagrams && quicly_linklist_is_linked(&conn->egress.pending_streams.control)) { |
4153 | 0 | quicly_stream_t *stream = |
4154 | 0 | (void *)((char *)conn->egress.pending_streams.control.next - offsetof(quicly_stream_t, _send_aux.pending_link.control)); |
4155 | 0 | if ((ret = send_control_frames_of_stream(stream, s)) != 0) |
4156 | 0 | goto Exit; |
4157 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.control); |
4158 | 0 | } |
4159 | | |
4160 | 0 | Exit: |
4161 | 0 | return ret; |
4162 | 0 | } |
4163 | | |
4164 | | int quicly_is_blocked(quicly_conn_t *conn) |
4165 | | { |
4166 | | if (conn->egress.max_data.sent < conn->egress.max_data.permitted) |
4167 | | return 0; |
4168 | | |
4169 | | /* schedule the transmission of DATA_BLOCKED frame, if it's new information */ |
4170 | | if (conn->egress.data_blocked == QUICLY_SENDER_STATE_NONE) { |
4171 | | conn->egress.data_blocked = QUICLY_SENDER_STATE_SEND; |
4172 | | conn->egress.pending_flows = QUICLY_PENDING_FLOW_OTHERS_BIT; |
4173 | | } |
4174 | | |
4175 | | return 1; |
4176 | | } |
4177 | | |
4178 | | int quicly_stream_can_send(quicly_stream_t *stream, int at_stream_level) |
4179 | 0 | { |
4180 | | /* return if there is nothing to be sent */ |
4181 | 0 | if (stream->sendstate.pending.num_ranges == 0) |
4182 | 0 | return 0; |
4183 | | |
4184 | | /* return if flow is capped neither by MAX_STREAM_DATA nor (in case we are hitting connection-level flow control) by the number |
4185 | | * of bytes we've already sent */ |
4186 | 0 | uint64_t blocked_at = at_stream_level ? stream->_send_aux.max_stream_data : stream->sendstate.size_inflight; |
4187 | 0 | if (stream->sendstate.pending.ranges[0].start < blocked_at) |
4188 | 0 | return 1; |
4189 | | /* we can always send EOS, if that is the only thing to be sent */ |
4190 | 0 | if (stream->sendstate.pending.ranges[0].start >= stream->sendstate.final_size) { |
4191 | 0 | assert(stream->sendstate.pending.ranges[0].start == stream->sendstate.final_size); |
4192 | 0 | return 1; |
4193 | 0 | } |
4194 | | |
4195 | | /* if known to be blocked at stream-level, schedule the emission of STREAM_DATA_BLOCKED frame */ |
4196 | 0 | if (at_stream_level && stream->_send_aux.blocked == QUICLY_SENDER_STATE_NONE) { |
4197 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_SEND; |
4198 | 0 | sched_stream_control(stream); |
4199 | 0 | } |
4200 | |
|
4201 | 0 | return 0; |
4202 | 0 | } |
4203 | | |
4204 | | int quicly_can_send_data(quicly_conn_t *conn, quicly_send_context_t *s) |
4205 | | { |
4206 | | return s->num_datagrams < s->max_datagrams; |
4207 | | } |
4208 | | |
4209 | | /** |
4210 | | * If necessary, changes the frame representation from one without length field to one that has if necessary. Or, as an alternative, |
4211 | | * prepends PADDING frames. Upon return, `dst` points to the end of the frame being built. `*len`, `*wrote_all`, `*frame_type_at` |
4212 | | * are also updated reflecting their values post-adjustment. |
4213 | | */ |
4214 | | static inline void adjust_stream_frame_layout(uint8_t **dst, uint8_t *const dst_end, size_t *len, int *wrote_all, |
4215 | | uint8_t **frame_at) |
4216 | 0 | { |
4217 | 0 | size_t space_left = (dst_end - *dst) - *len, len_of_len = quicly_encodev_capacity(*len); |
4218 | |
|
4219 | 0 | if (**frame_at == QUICLY_FRAME_TYPE_CRYPTO) { |
4220 | | /* CRYPTO frame: adjust payload length to make space for the length field, if necessary. */ |
4221 | 0 | if (space_left < len_of_len) { |
4222 | 0 | *len = dst_end - *dst - len_of_len; |
4223 | 0 | *wrote_all = 0; |
4224 | 0 | } |
4225 | 0 | } else { |
4226 | | /* STREAM frame: insert length if space can be left for more frames. Otherwise, retain STREAM frame header omitting the |
4227 | | * length field, prepending PADDING if necessary. */ |
4228 | 0 | if (space_left <= len_of_len) { |
4229 | 0 | if (space_left != 0) { |
4230 | 0 | memmove(*frame_at + space_left, *frame_at, *dst + *len - *frame_at); |
4231 | 0 | memset(*frame_at, QUICLY_FRAME_TYPE_PADDING, space_left); |
4232 | 0 | *dst += space_left; |
4233 | 0 | *frame_at += space_left; |
4234 | 0 | } |
4235 | 0 | *dst += *len; |
4236 | 0 | return; |
4237 | 0 | } |
4238 | 0 | **frame_at |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN; |
4239 | 0 | } |
4240 | | |
4241 | | /* insert length before payload of `*len` bytes */ |
4242 | 0 | memmove(*dst + len_of_len, *dst, *len); |
4243 | 0 | *dst = quicly_encodev(*dst, *len); |
4244 | 0 | *dst += *len; |
4245 | 0 | } |
4246 | | |
4247 | | quicly_error_t quicly_send_stream(quicly_stream_t *stream, quicly_send_context_t *s) |
4248 | 0 | { |
4249 | 0 | uint64_t off = stream->sendstate.pending.ranges[0].start; |
4250 | 0 | quicly_sent_t *sent; |
4251 | 0 | uint8_t *dst; /* this pointer points to the current write position within the frame being built, while `s->dst` points to the |
4252 | | * beginning of the frame. */ |
4253 | 0 | size_t len; |
4254 | 0 | int wrote_all, is_fin; |
4255 | 0 | quicly_error_t ret; |
4256 | | |
4257 | | /* write frame type, stream_id and offset, calculate capacity (and store that in `len`) */ |
4258 | 0 | if (stream->stream_id < 0) { |
4259 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, |
4260 | 0 | 1 + quicly_encodev_capacity(off) + 2 /* type + offset + len + 1-byte payload */, |
4261 | 0 | &sent, on_ack_stream)) != 0) |
4262 | 0 | return ret; |
4263 | 0 | dst = s->dst; |
4264 | 0 | *dst++ = QUICLY_FRAME_TYPE_CRYPTO; |
4265 | 0 | dst = quicly_encodev(dst, off); |
4266 | 0 | len = s->dst_end - dst; |
4267 | 0 | } else { |
4268 | 0 | uint8_t header[18], *hp = header + 1; |
4269 | 0 | hp = quicly_encodev(hp, stream->stream_id); |
4270 | 0 | if (off != 0) { |
4271 | 0 | header[0] = QUICLY_FRAME_TYPE_STREAM_BASE | QUICLY_FRAME_TYPE_STREAM_BIT_OFF; |
4272 | 0 | hp = quicly_encodev(hp, off); |
4273 | 0 | } else { |
4274 | 0 | header[0] = QUICLY_FRAME_TYPE_STREAM_BASE; |
4275 | 0 | } |
4276 | 0 | if (off == stream->sendstate.final_size) { |
4277 | 0 | assert(!quicly_sendstate_is_open(&stream->sendstate)); |
4278 | | /* special case for emitting FIN only */ |
4279 | 0 | header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN; |
4280 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header, &sent, on_ack_stream)) != 0) |
4281 | 0 | return ret; |
4282 | 0 | if (hp - header != s->dst_end - s->dst) { |
4283 | 0 | header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN; |
4284 | 0 | *hp++ = 0; /* empty length */ |
4285 | 0 | } |
4286 | 0 | memcpy(s->dst, header, hp - header); |
4287 | 0 | s->dst += hp - header; |
4288 | 0 | len = 0; |
4289 | 0 | wrote_all = 1; |
4290 | 0 | is_fin = 1; |
4291 | 0 | goto UpdateState; |
4292 | 0 | } |
4293 | 0 | if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header + 1, &sent, on_ack_stream)) != 0) |
4294 | 0 | return ret; |
4295 | 0 | dst = s->dst; |
4296 | 0 | memcpy(dst, header, hp - header); |
4297 | 0 | dst += hp - header; |
4298 | 0 | len = s->dst_end - dst; |
4299 | | /* cap by max_stream_data */ |
4300 | 0 | if (off + len > stream->_send_aux.max_stream_data) |
4301 | 0 | len = stream->_send_aux.max_stream_data - off; |
4302 | | /* cap by max_data */ |
4303 | 0 | if (off + len > stream->sendstate.size_inflight) { |
4304 | 0 | uint64_t new_bytes = off + len - stream->sendstate.size_inflight; |
4305 | 0 | if (new_bytes > stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent) { |
4306 | 0 | size_t max_stream_data = |
4307 | 0 | stream->sendstate.size_inflight + stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent; |
4308 | 0 | len = max_stream_data - off; |
4309 | 0 | } |
4310 | 0 | } |
4311 | 0 | } |
4312 | 0 | { /* cap len to the current range */ |
4313 | 0 | uint64_t range_capacity = stream->sendstate.pending.ranges[0].end - off; |
4314 | 0 | if (off + range_capacity > stream->sendstate.final_size) { |
4315 | 0 | assert(!quicly_sendstate_is_open(&stream->sendstate)); |
4316 | 0 | assert(range_capacity > 1); /* see the special case above */ |
4317 | 0 | range_capacity -= 1; |
4318 | 0 | } |
4319 | 0 | if (len > range_capacity) |
4320 | 0 | len = range_capacity; |
4321 | 0 | } |
4322 | | |
4323 | | /* Write payload, adjusting len to actual size. Note that `on_send_emit` might fail (e.g., when underlying pread(2) fails), in |
4324 | | * which case the application will either close the connection immediately or reset the stream. If that happens, we return |
4325 | | * immediately without updating state. */ |
4326 | 0 | assert(len != 0); |
4327 | 0 | size_t emit_off = (size_t)(off - stream->sendstate.acked.ranges[0].end); |
4328 | 0 | QUICLY_PROBE(STREAM_ON_SEND_EMIT, stream->conn, stream->conn->stash.now, stream, emit_off, len); |
4329 | 0 | QUICLY_LOG_CONN(stream_on_send_emit, stream->conn, { |
4330 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4331 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, off); |
4332 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(capacity, len); |
4333 | 0 | }); |
4334 | 0 | stream->callbacks->on_send_emit(stream, emit_off, dst, &len, &wrote_all); |
4335 | 0 | if (stream->conn->super.state >= QUICLY_STATE_CLOSING) { |
4336 | 0 | return QUICLY_ERROR_IS_CLOSING; |
4337 | 0 | } else if (stream->_send_aux.reset_stream.sender_state != QUICLY_SENDER_STATE_NONE) { |
4338 | 0 | return 0; |
4339 | 0 | } |
4340 | 0 | assert(len != 0); |
4341 | | |
4342 | 0 | adjust_stream_frame_layout(&dst, s->dst_end, &len, &wrote_all, &s->dst); |
4343 | | |
4344 | | /* determine if the frame incorporates FIN */ |
4345 | 0 | if (off + len == stream->sendstate.final_size) { |
4346 | 0 | assert(!quicly_sendstate_is_open(&stream->sendstate)); |
4347 | 0 | assert(s->dst != NULL); |
4348 | 0 | is_fin = 1; |
4349 | 0 | *s->dst |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN; |
4350 | 0 | } else { |
4351 | 0 | is_fin = 0; |
4352 | 0 | } |
4353 | | |
4354 | | /* update s->dst now that frame construction is complete */ |
4355 | 0 | s->dst = dst; |
4356 | |
|
4357 | 0 | UpdateState: |
4358 | 0 | if (stream->stream_id < 0) { |
4359 | 0 | ++stream->conn->super.stats.num_frames_sent.crypto; |
4360 | 0 | } else { |
4361 | 0 | ++stream->conn->super.stats.num_frames_sent.stream; |
4362 | 0 | } |
4363 | 0 | stream->conn->super.stats.num_bytes.stream_data_sent += len; |
4364 | 0 | if (off < stream->sendstate.size_inflight) |
4365 | 0 | stream->conn->super.stats.num_bytes.stream_data_resent += |
4366 | 0 | (stream->sendstate.size_inflight < off + len ? stream->sendstate.size_inflight : off + len) - off; |
4367 | 0 | QUICLY_PROBE(STREAM_SEND, stream->conn, stream->conn->stash.now, stream, off, len, is_fin); |
4368 | 0 | QUICLY_LOG_CONN(stream_send, stream->conn, { |
4369 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
4370 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(off, off); |
4371 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(len, len); |
4372 | 0 | PTLS_LOG_ELEMENT_BOOL(is_fin, is_fin); |
4373 | 0 | }); |
4374 | | |
4375 | 0 | QUICLY_PROBE(QUICTRACE_SEND_STREAM, stream->conn, stream->conn->stash.now, stream, off, len, is_fin); |
4376 | | /* update sendstate (and also MAX_DATA counter) */ |
4377 | 0 | if (stream->sendstate.size_inflight < off + len) { |
4378 | 0 | if (stream->stream_id >= 0) |
4379 | 0 | stream->conn->egress.max_data.sent += off + len - stream->sendstate.size_inflight; |
4380 | 0 | stream->sendstate.size_inflight = off + len; |
4381 | 0 | } |
4382 | 0 | if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, off, off + len + is_fin)) != 0) |
4383 | 0 | return ret; |
4384 | 0 | if (wrote_all) { |
4385 | 0 | if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, stream->sendstate.size_inflight, UINT64_MAX)) != 0) |
4386 | 0 | return ret; |
4387 | 0 | } |
4388 | | |
4389 | | /* setup sentmap */ |
4390 | 0 | sent->data.stream.stream_id = stream->stream_id; |
4391 | 0 | sent->data.stream.args.start = off; |
4392 | 0 | sent->data.stream.args.end = off + len + is_fin; |
4393 | |
|
4394 | 0 | return 0; |
4395 | 0 | } |
4396 | | |
4397 | | static inline quicly_error_t init_acks_iter(quicly_conn_t *conn, quicly_sentmap_iter_t *iter) |
4398 | 0 | { |
4399 | 0 | return quicly_loss_init_sentmap_iter(&conn->egress.loss, iter, conn->stash.now, |
4400 | 0 | conn->super.remote.transport_params.max_ack_delay, |
4401 | 0 | conn->super.state >= QUICLY_STATE_CLOSING); |
4402 | 0 | } |
4403 | | |
4404 | | quicly_error_t discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs) |
4405 | 0 | { |
4406 | 0 | quicly_sentmap_iter_t iter; |
4407 | 0 | const quicly_sent_packet_t *sent; |
4408 | 0 | quicly_error_t ret; |
4409 | |
|
4410 | 0 | if ((ret = init_acks_iter(conn, &iter)) != 0) |
4411 | 0 | return ret; |
4412 | | |
4413 | 0 | while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) { |
4414 | 0 | if ((ack_epochs & (1u << sent->ack_epoch)) != 0) { |
4415 | 0 | if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_EXPIRED)) != 0) |
4416 | 0 | return ret; |
4417 | 0 | } else { |
4418 | 0 | quicly_sentmap_skip(&iter); |
4419 | 0 | } |
4420 | 0 | } |
4421 | | |
4422 | 0 | return ret; |
4423 | 0 | } |
4424 | | |
4425 | | /** |
4426 | | * Mark frames of given epoch as pending, until `*bytes_to_mark` becomes zero. |
4427 | | */ |
4428 | | static quicly_error_t mark_frames_on_pto(quicly_conn_t *conn, uint8_t ack_epoch, size_t *bytes_to_mark) |
4429 | 0 | { |
4430 | 0 | quicly_sentmap_iter_t iter; |
4431 | 0 | const quicly_sent_packet_t *sent; |
4432 | 0 | quicly_error_t ret; |
4433 | |
|
4434 | 0 | if ((ret = init_acks_iter(conn, &iter)) != 0) |
4435 | 0 | return ret; |
4436 | | |
4437 | 0 | while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) { |
4438 | 0 | if (sent->ack_epoch == ack_epoch && sent->frames_in_flight) { |
4439 | 0 | *bytes_to_mark = *bytes_to_mark > sent->cc_bytes_in_flight ? *bytes_to_mark - sent->cc_bytes_in_flight : 0; |
4440 | 0 | if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_PTO)) != 0) |
4441 | 0 | return ret; |
4442 | 0 | assert(!sent->frames_in_flight); |
4443 | 0 | if (*bytes_to_mark == 0) |
4444 | 0 | break; |
4445 | 0 | } else { |
4446 | 0 | quicly_sentmap_skip(&iter); |
4447 | 0 | } |
4448 | 0 | } |
4449 | | |
4450 | 0 | return 0; |
4451 | 0 | } |
4452 | | |
4453 | | static void notify_congestion_to_cc(quicly_conn_t *conn, uint16_t lost_bytes, uint64_t lost_pn) |
4454 | 0 | { |
4455 | 0 | if (conn->egress.pn_path_start <= lost_pn) { |
4456 | 0 | conn->egress.cc.type->cc_on_lost(&conn->egress.cc, &conn->egress.loss, lost_bytes, lost_pn, conn->egress.packet_number, |
4457 | 0 | conn->stash.now, conn->egress.max_udp_payload_size); |
4458 | 0 | QUICLY_PROBE(CC_CONGESTION, conn, conn->stash.now, lost_pn + 1, conn->egress.loss.sentmap.bytes_in_flight, |
4459 | 0 | conn->egress.cc.cwnd); |
4460 | 0 | QUICLY_LOG_CONN(cc_congestion, conn, { |
4461 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(max_lost_pn, lost_pn + 1); |
4462 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(flight, conn->egress.loss.sentmap.bytes_in_flight); |
4463 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd); |
4464 | 0 | }); |
4465 | 0 | } |
4466 | 0 | } |
4467 | | |
4468 | | static void on_loss_detected(quicly_loss_t *loss, const quicly_sent_packet_t *lost_packet, int is_time_threshold) |
4469 | 0 | { |
4470 | 0 | quicly_conn_t *conn = (void *)((char *)loss - offsetof(quicly_conn_t, egress.loss)); |
4471 | |
|
4472 | 0 | assert(lost_packet->cc_bytes_in_flight != 0); |
4473 | | |
4474 | 0 | ++conn->super.stats.num_packets.lost; |
4475 | 0 | if (is_time_threshold) |
4476 | 0 | ++conn->super.stats.num_packets.lost_time_threshold; |
4477 | 0 | conn->super.stats.num_bytes.lost += lost_packet->cc_bytes_in_flight; |
4478 | 0 | QUICLY_PROBE(PACKET_LOST, conn, conn->stash.now, lost_packet->packet_number, lost_packet->ack_epoch); |
4479 | 0 | QUICLY_LOG_CONN(packet_lost, conn, { |
4480 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(pn, lost_packet->packet_number); |
4481 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(packet_type, lost_packet->ack_epoch); |
4482 | 0 | }); |
4483 | 0 | notify_congestion_to_cc(conn, lost_packet->cc_bytes_in_flight, lost_packet->packet_number); |
4484 | 0 | QUICLY_PROBE(QUICTRACE_CC_LOST, conn, conn->stash.now, &conn->egress.loss.rtt, conn->egress.cc.cwnd, |
4485 | 0 | conn->egress.loss.sentmap.bytes_in_flight); |
4486 | 0 | } |
4487 | | |
4488 | | static quicly_error_t send_max_streams(quicly_conn_t *conn, int uni, quicly_send_context_t *s) |
4489 | 0 | { |
4490 | 0 | if (!should_send_max_streams(conn, uni)) |
4491 | 0 | return 0; |
4492 | | |
4493 | 0 | quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi; |
4494 | 0 | struct st_quicly_conn_streamgroup_state_t *group = uni ? &conn->super.remote.uni : &conn->super.remote.bidi; |
4495 | 0 | quicly_error_t ret; |
4496 | |
|
4497 | 0 | uint64_t new_count = |
4498 | 0 | group->next_stream_id / 4 + |
4499 | 0 | (uni ? conn->super.ctx->transport_params.max_streams_uni : conn->super.ctx->transport_params.max_streams_bidi) - |
4500 | 0 | group->num_streams; |
4501 | |
|
4502 | 0 | quicly_sent_t *sent; |
4503 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_STREAMS_FRAME_CAPACITY, &sent, on_ack_max_streams)) != 0) |
4504 | 0 | return ret; |
4505 | 0 | s->dst = quicly_encode_max_streams_frame(s->dst, uni, new_count); |
4506 | 0 | sent->data.max_streams.uni = uni; |
4507 | 0 | quicly_maxsender_record(maxsender, new_count, &sent->data.max_streams.args); |
4508 | |
|
4509 | 0 | if (uni) { |
4510 | 0 | ++conn->super.stats.num_frames_sent.max_streams_uni; |
4511 | 0 | } else { |
4512 | 0 | ++conn->super.stats.num_frames_sent.max_streams_bidi; |
4513 | 0 | } |
4514 | 0 | QUICLY_PROBE(MAX_STREAMS_SEND, conn, conn->stash.now, new_count, uni); |
4515 | 0 | QUICLY_LOG_CONN(max_streams_send, conn, { |
4516 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_count); |
4517 | 0 | PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni); |
4518 | 0 | }); |
4519 | | |
4520 | 0 | return 0; |
4521 | 0 | } |
4522 | | |
4523 | | static quicly_error_t send_streams_blocked(quicly_conn_t *conn, int uni, quicly_send_context_t *s) |
4524 | 0 | { |
4525 | 0 | quicly_linklist_t *blocked_list = uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi; |
4526 | 0 | quicly_error_t ret; |
4527 | |
|
4528 | 0 | if (!quicly_linklist_is_linked(blocked_list)) |
4529 | 0 | return 0; |
4530 | | |
4531 | 0 | struct st_quicly_max_streams_t *max_streams = uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi; |
4532 | 0 | quicly_stream_t *oldest_blocked_stream = |
4533 | 0 | (void *)((char *)blocked_list->next - offsetof(quicly_stream_t, _send_aux.pending_link.control)); |
4534 | 0 | assert(max_streams->count == oldest_blocked_stream->stream_id / 4); |
4535 | | |
4536 | 0 | if (!quicly_maxsender_should_send_blocked(&max_streams->blocked_sender, max_streams->count)) |
4537 | 0 | return 0; |
4538 | | |
4539 | 0 | quicly_sent_t *sent; |
4540 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_STREAMS_BLOCKED_FRAME_CAPACITY, &sent, on_ack_streams_blocked)) != 0) |
4541 | 0 | return ret; |
4542 | 0 | s->dst = quicly_encode_streams_blocked_frame(s->dst, uni, max_streams->count); |
4543 | 0 | sent->data.streams_blocked.uni = uni; |
4544 | 0 | quicly_maxsender_record(&max_streams->blocked_sender, max_streams->count, &sent->data.streams_blocked.args); |
4545 | |
|
4546 | 0 | ++conn->super.stats.num_frames_sent.streams_blocked; |
4547 | 0 | QUICLY_PROBE(STREAMS_BLOCKED_SEND, conn, conn->stash.now, max_streams->count, uni); |
4548 | 0 | QUICLY_LOG_CONN(streams_blocked_send, conn, { |
4549 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, max_streams->count); |
4550 | 0 | PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni); |
4551 | 0 | }); |
4552 | | |
4553 | 0 | return 0; |
4554 | 0 | } |
4555 | | |
4556 | | static void open_blocked_streams(quicly_conn_t *conn, int uni) |
4557 | 0 | { |
4558 | 0 | uint64_t count; |
4559 | 0 | quicly_linklist_t *anchor; |
4560 | |
|
4561 | 0 | if (uni) { |
4562 | 0 | count = conn->egress.max_streams.uni.count; |
4563 | 0 | anchor = &conn->egress.pending_streams.blocked.uni; |
4564 | 0 | } else { |
4565 | 0 | count = conn->egress.max_streams.bidi.count; |
4566 | 0 | anchor = &conn->egress.pending_streams.blocked.bidi; |
4567 | 0 | } |
4568 | |
|
4569 | 0 | while (quicly_linklist_is_linked(anchor)) { |
4570 | 0 | quicly_stream_t *stream = (void *)((char *)anchor->next - offsetof(quicly_stream_t, _send_aux.pending_link.control)); |
4571 | 0 | if (stream->stream_id / 4 >= count) |
4572 | 0 | break; |
4573 | 0 | assert(stream->streams_blocked); |
4574 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.control); |
4575 | 0 | stream->streams_blocked = 0; |
4576 | 0 | stream->_send_aux.max_stream_data = quicly_stream_is_unidirectional(stream->stream_id) |
4577 | 0 | ? conn->super.remote.transport_params.max_stream_data.uni |
4578 | 0 | : conn->super.remote.transport_params.max_stream_data.bidi_remote; |
4579 | | /* TODO retain separate flags for stream states so that we do not always need to sched for both control and data */ |
4580 | 0 | sched_stream_control(stream); |
4581 | 0 | resched_stream_data(stream); |
4582 | 0 | } |
4583 | 0 | } |
4584 | | |
4585 | | static quicly_error_t send_handshake_done(quicly_conn_t *conn, quicly_send_context_t *s) |
4586 | 0 | { |
4587 | 0 | quicly_sent_t *sent; |
4588 | 0 | quicly_error_t ret; |
4589 | |
|
4590 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, 1, &sent, on_ack_handshake_done)) != 0) |
4591 | 0 | goto Exit; |
4592 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_HANDSHAKE_DONE; |
4593 | 0 | conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT; |
4594 | 0 | ++conn->super.stats.num_frames_sent.handshake_done; |
4595 | 0 | QUICLY_PROBE(HANDSHAKE_DONE_SEND, conn, conn->stash.now); |
4596 | 0 | QUICLY_LOG_CONN(handshake_done_send, conn, {}); |
4597 | | |
4598 | 0 | ret = 0; |
4599 | 0 | Exit: |
4600 | 0 | return ret; |
4601 | 0 | } |
4602 | | |
4603 | | static quicly_error_t send_data_blocked(quicly_conn_t *conn, quicly_send_context_t *s) |
4604 | 0 | { |
4605 | 0 | quicly_sent_t *sent; |
4606 | 0 | quicly_error_t ret; |
4607 | |
|
4608 | 0 | uint64_t offset = conn->egress.max_data.permitted; |
4609 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_DATA_BLOCKED_FRAME_CAPACITY, &sent, on_ack_data_blocked)) != 0) |
4610 | 0 | goto Exit; |
4611 | 0 | sent->data.data_blocked.offset = offset; |
4612 | 0 | s->dst = quicly_encode_data_blocked_frame(s->dst, offset); |
4613 | 0 | conn->egress.data_blocked = QUICLY_SENDER_STATE_UNACKED; |
4614 | |
|
4615 | 0 | ++conn->super.stats.num_frames_sent.data_blocked; |
4616 | 0 | QUICLY_PROBE(DATA_BLOCKED_SEND, conn, conn->stash.now, offset); |
4617 | 0 | QUICLY_LOG_CONN(data_blocked_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(off, offset); }); |
4618 | | |
4619 | 0 | ret = 0; |
4620 | 0 | Exit: |
4621 | 0 | return ret; |
4622 | 0 | } |
4623 | | |
4624 | | #define QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME 0 |
4625 | | |
4626 | | /** |
4627 | | * derives size of the new CWND given previous delivery rate and min RTTs of the previous and the new session |
4628 | | */ |
4629 | | static uint32_t derive_jumpstart_cwnd(quicly_context_t *ctx, uint32_t new_rtt, uint64_t prev_rate, uint32_t prev_rtt) |
4630 | 0 | { |
4631 | | /* convert previous rate to CWND size */ |
4632 | 0 | double cwnd = (double)prev_rate * prev_rtt / 1000; |
4633 | | |
4634 | | /* if new RTT is smaller, reduce new CWND so that the rate does not become greater than the previous session */ |
4635 | 0 | if (new_rtt < prev_rtt) |
4636 | 0 | cwnd = cwnd * new_rtt / prev_rtt; |
4637 | | |
4638 | | /* cap to the configured value */ |
4639 | 0 | size_t jumpstart_cwnd = |
4640 | 0 | quicly_cc_calc_initial_cwnd(ctx->max_jumpstart_cwnd_packets, ctx->transport_params.max_udp_payload_size); |
4641 | 0 | if (cwnd > jumpstart_cwnd) |
4642 | 0 | cwnd = jumpstart_cwnd; |
4643 | |
|
4644 | 0 | return (uint32_t)cwnd; |
4645 | 0 | } |
4646 | | |
4647 | | static int decode_resumption_info(const uint8_t *src, size_t len, uint64_t *rate, uint32_t *min_rtt) |
4648 | 0 | { |
4649 | 0 | const uint8_t *end = src + len; |
4650 | 0 | int ret = 0; |
4651 | |
|
4652 | 0 | *rate = 0; |
4653 | |
|
4654 | 0 | while (src < end) { |
4655 | 0 | uint64_t id; |
4656 | 0 | if ((id = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
4657 | 0 | ret = PTLS_ALERT_DECODE_ERROR; |
4658 | 0 | goto Exit; |
4659 | 0 | } |
4660 | 0 | ptls_decode_open_block(src, end, -1, { |
4661 | 0 | switch (id) { |
4662 | 0 | case QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME: { |
4663 | 0 | if ((*rate = ptls_decode_quicint(&src, end)) == UINT64_MAX) { |
4664 | 0 | ret = PTLS_ALERT_DECODE_ERROR; |
4665 | 0 | goto Exit; |
4666 | 0 | } |
4667 | 0 | uint64_t v; |
4668 | 0 | if ((v = ptls_decode_quicint(&src, end)) > UINT32_MAX) { |
4669 | 0 | ret = PTLS_ALERT_DECODE_ERROR; |
4670 | 0 | goto Exit; |
4671 | 0 | } |
4672 | 0 | *min_rtt = (uint32_t)v; |
4673 | 0 | } break; |
4674 | 0 | default: |
4675 | | /* ignore unknown types */ |
4676 | 0 | src = end; |
4677 | 0 | break; |
4678 | 0 | } |
4679 | 0 | }); |
4680 | 0 | } |
4681 | | |
4682 | 0 | Exit: |
4683 | 0 | return ret; |
4684 | 0 | } |
4685 | | |
4686 | | static size_t encode_resumption_info(quicly_conn_t *conn, uint8_t *dst, size_t capacity) |
4687 | 0 | { |
4688 | 0 | ptls_buffer_t buf; |
4689 | 0 | int ret; |
4690 | |
|
4691 | 0 | ptls_buffer_init(&buf, dst, capacity); |
4692 | |
|
4693 | 0 | #define PUSH_ENTRY(id, block) \ |
4694 | 0 | do { \ |
4695 | 0 | ptls_buffer_push_quicint(&buf, (id)); \ |
4696 | 0 | ptls_buffer_push_block(&buf, -1, block); \ |
4697 | 0 | } while (0) |
4698 | | |
4699 | | /* emit delivery rate for Careful Resume */ |
4700 | 0 | if (conn->super.stats.token_sent.rate != 0 && conn->super.stats.token_sent.rtt != 0) { |
4701 | 0 | PUSH_ENTRY(QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME, { |
4702 | 0 | ptls_buffer_push_quicint(&buf, conn->super.stats.token_sent.rate); |
4703 | 0 | ptls_buffer_push_quicint(&buf, conn->super.stats.token_sent.rtt); |
4704 | 0 | }); |
4705 | 0 | } |
4706 | | |
4707 | 0 | #undef PUSH_ENTRY |
4708 | | |
4709 | 0 | Exit: |
4710 | 0 | assert(!buf.is_allocated); |
4711 | 0 | return buf.off; |
4712 | 0 | } |
4713 | | |
4714 | | static quicly_error_t send_resumption_token(quicly_conn_t *conn, quicly_send_context_t *s) |
4715 | 0 | { |
4716 | | /* fill conn->super.stats.token_sent the information we are sending now */ |
4717 | 0 | calc_resume_sendrate(conn, &conn->super.stats.token_sent.rate, &conn->super.stats.token_sent.rtt); |
4718 | |
|
4719 | 0 | quicly_address_token_plaintext_t token; |
4720 | 0 | ptls_buffer_t tokenbuf; |
4721 | 0 | uint8_t tokenbuf_small[128]; |
4722 | 0 | quicly_sent_t *sent; |
4723 | 0 | quicly_error_t ret; |
4724 | |
|
4725 | 0 | ptls_buffer_init(&tokenbuf, tokenbuf_small, sizeof(tokenbuf_small)); |
4726 | | |
4727 | | /* build token */ |
4728 | 0 | token = |
4729 | 0 | (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION, conn->super.ctx->now->cb(conn->super.ctx->now)}; |
4730 | 0 | token.remote = conn->paths[0]->address.remote; |
4731 | 0 | token.resumption.len = encode_resumption_info(conn, token.resumption.bytes, sizeof(token.resumption.bytes)); |
4732 | | |
4733 | | /* encrypt */ |
4734 | 0 | if ((ret = conn->super.ctx->generate_resumption_token->cb(conn->super.ctx->generate_resumption_token, conn, &tokenbuf, |
4735 | 0 | &token)) != 0) |
4736 | 0 | goto Exit; |
4737 | 0 | assert(tokenbuf.off < QUICLY_MIN_CLIENT_INITIAL_SIZE / 2 && "this is a ballpark figure, but tokens ought to be small"); |
4738 | | |
4739 | | /* emit frame */ |
4740 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, quicly_new_token_frame_capacity(ptls_iovec_init(tokenbuf.base, tokenbuf.off)), |
4741 | 0 | &sent, on_ack_new_token)) != 0) |
4742 | 0 | goto Exit; |
4743 | 0 | ++conn->egress.new_token.num_inflight; |
4744 | 0 | sent->data.new_token.is_inflight = 1; |
4745 | 0 | sent->data.new_token.generation = conn->egress.new_token.generation; |
4746 | 0 | s->dst = quicly_encode_new_token_frame(s->dst, ptls_iovec_init(tokenbuf.base, tokenbuf.off)); |
4747 | 0 | conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_NEW_TOKEN_BIT; |
4748 | |
|
4749 | 0 | ++conn->super.stats.num_frames_sent.new_token; |
4750 | 0 | QUICLY_PROBE(NEW_TOKEN_SEND, conn, conn->stash.now, tokenbuf.base, tokenbuf.off, sent->data.new_token.generation); |
4751 | 0 | QUICLY_LOG_CONN(new_token_send, conn, { |
4752 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(token, tokenbuf.base, tokenbuf.off); |
4753 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(generation, sent->data.new_token.generation); |
4754 | 0 | }); |
4755 | 0 | ret = 0; |
4756 | 0 | Exit: |
4757 | 0 | ptls_buffer_dispose(&tokenbuf); |
4758 | 0 | return ret; |
4759 | 0 | } |
4760 | | |
4761 | | size_t quicly_send_version_negotiation(quicly_context_t *ctx, ptls_iovec_t dest_cid, ptls_iovec_t src_cid, const uint32_t *versions, |
4762 | | void *payload) |
4763 | 0 | { |
4764 | 0 | uint8_t *dst = payload; |
4765 | | |
4766 | | /* type_flags */ |
4767 | 0 | ctx->tls->random_bytes(dst, 1); |
4768 | 0 | *dst |= QUICLY_LONG_HEADER_BIT; |
4769 | 0 | ++dst; |
4770 | | /* version */ |
4771 | 0 | dst = quicly_encode32(dst, 0); |
4772 | | /* connection-id */ |
4773 | 0 | *dst++ = dest_cid.len; |
4774 | 0 | if (dest_cid.len != 0) { |
4775 | 0 | memcpy(dst, dest_cid.base, dest_cid.len); |
4776 | 0 | dst += dest_cid.len; |
4777 | 0 | } |
4778 | 0 | *dst++ = src_cid.len; |
4779 | 0 | if (src_cid.len != 0) { |
4780 | 0 | memcpy(dst, src_cid.base, src_cid.len); |
4781 | 0 | dst += src_cid.len; |
4782 | 0 | } |
4783 | | /* supported_versions */ |
4784 | 0 | for (const uint32_t *v = versions; *v != 0; ++v) |
4785 | 0 | dst = quicly_encode32(dst, *v); |
4786 | | /* add a greasing version. This also covers the case where an empty list is specified by the caller to indicate rejection. */ |
4787 | 0 | uint32_t grease_version = 0; |
4788 | 0 | if (src_cid.len >= sizeof(grease_version)) |
4789 | 0 | memcpy(&grease_version, src_cid.base, sizeof(grease_version)); |
4790 | 0 | grease_version = (grease_version & 0xf0f0f0f0) | 0x0a0a0a0a; |
4791 | 0 | dst = quicly_encode32(dst, grease_version); |
4792 | |
|
4793 | 0 | return dst - (uint8_t *)payload; |
4794 | 0 | } |
4795 | | |
4796 | | quicly_error_t quicly_retry_calc_cidpair_hash(ptls_hash_algorithm_t *sha256, ptls_iovec_t client_cid, ptls_iovec_t server_cid, |
4797 | | uint64_t *value) |
4798 | 0 | { |
4799 | 0 | uint8_t digest[PTLS_SHA256_DIGEST_SIZE], buf[(QUICLY_MAX_CID_LEN_V1 + 1) * 2], *p = buf; |
4800 | 0 | int ret; |
4801 | |
|
4802 | 0 | *p++ = (uint8_t)client_cid.len; |
4803 | 0 | memcpy(p, client_cid.base, client_cid.len); |
4804 | 0 | p += client_cid.len; |
4805 | 0 | *p++ = (uint8_t)server_cid.len; |
4806 | 0 | memcpy(p, server_cid.base, server_cid.len); |
4807 | 0 | p += server_cid.len; |
4808 | |
|
4809 | 0 | if ((ret = ptls_calc_hash(sha256, digest, buf, p - buf)) != 0) |
4810 | 0 | return ret; |
4811 | 0 | p = digest; |
4812 | 0 | *value = quicly_decode64((void *)&p); |
4813 | |
|
4814 | 0 | return 0; |
4815 | 0 | } |
4816 | | |
4817 | | size_t quicly_send_retry(quicly_context_t *ctx, ptls_aead_context_t *token_encrypt_ctx, uint32_t protocol_version, |
4818 | | struct sockaddr *dest_addr, ptls_iovec_t dest_cid, struct sockaddr *src_addr, ptls_iovec_t src_cid, |
4819 | | ptls_iovec_t odcid, ptls_iovec_t token_prefix, ptls_iovec_t appdata, |
4820 | | ptls_aead_context_t **retry_aead_cache, uint8_t *datagram) |
4821 | 0 | { |
4822 | 0 | quicly_address_token_plaintext_t token; |
4823 | 0 | ptls_buffer_t buf; |
4824 | 0 | quicly_error_t ret; |
4825 | |
|
4826 | 0 | assert(!(src_cid.len == odcid.len && memcmp(src_cid.base, odcid.base, src_cid.len) == 0)); |
4827 | | |
4828 | | /* build token as plaintext */ |
4829 | 0 | token = (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RETRY, ctx->now->cb(ctx->now)}; |
4830 | 0 | set_address(&token.remote, dest_addr); |
4831 | 0 | set_address(&token.local, src_addr); |
4832 | |
|
4833 | 0 | quicly_set_cid(&token.retry.original_dcid, odcid); |
4834 | 0 | quicly_set_cid(&token.retry.client_cid, dest_cid); |
4835 | 0 | quicly_set_cid(&token.retry.server_cid, src_cid); |
4836 | 0 | if (appdata.len != 0) { |
4837 | 0 | assert(appdata.len <= sizeof(token.appdata.bytes)); |
4838 | 0 | memcpy(token.appdata.bytes, appdata.base, appdata.len); |
4839 | 0 | token.appdata.len = appdata.len; |
4840 | 0 | } |
4841 | | |
4842 | | /* start building the packet */ |
4843 | 0 | ptls_buffer_init(&buf, datagram, QUICLY_MIN_CLIENT_INITIAL_SIZE); |
4844 | | |
4845 | | /* first generate a pseudo packet */ |
4846 | 0 | ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, odcid.base, odcid.len); }); |
4847 | 0 | ctx->tls->random_bytes(buf.base + buf.off, 1); |
4848 | 0 | buf.base[buf.off] = QUICLY_PACKET_TYPE_RETRY | (buf.base[buf.off] & 0x0f); |
4849 | 0 | ++buf.off; |
4850 | 0 | ptls_buffer_push32(&buf, protocol_version); |
4851 | 0 | ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, dest_cid.base, dest_cid.len); }); |
4852 | 0 | ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, src_cid.base, src_cid.len); }); |
4853 | 0 | if (token_prefix.len != 0) { |
4854 | 0 | assert(token_prefix.len <= buf.capacity - buf.off); |
4855 | 0 | memcpy(buf.base + buf.off, token_prefix.base, token_prefix.len); |
4856 | 0 | buf.off += token_prefix.len; |
4857 | 0 | } |
4858 | 0 | if ((ret = quicly_encrypt_address_token(ctx->tls->random_bytes, token_encrypt_ctx, &buf, buf.off - token_prefix.len, &token)) != |
4859 | 0 | 0) |
4860 | 0 | goto Exit; |
4861 | | |
4862 | | /* append AEAD tag */ |
4863 | 0 | ret = ptls_buffer_reserve(&buf, PTLS_AESGCM_TAG_SIZE); |
4864 | 0 | assert(ret == 0); |
4865 | 0 | assert(!buf.is_allocated && "retry packet is too large"); |
4866 | 0 | { |
4867 | 0 | ptls_aead_context_t *aead = |
4868 | 0 | retry_aead_cache != NULL && *retry_aead_cache != NULL ? *retry_aead_cache : create_retry_aead(ctx, protocol_version, 1); |
4869 | 0 | ptls_aead_encrypt(aead, buf.base + buf.off, "", 0, 0, buf.base, buf.off); |
4870 | 0 | if (retry_aead_cache != NULL) { |
4871 | 0 | *retry_aead_cache = aead; |
4872 | 0 | } else { |
4873 | 0 | ptls_aead_free(aead); |
4874 | 0 | } |
4875 | 0 | } |
4876 | 0 | buf.off += PTLS_AESGCM_TAG_SIZE; |
4877 | | |
4878 | | /* convert the image to a Retry packet, by stripping the ODCID field */ |
4879 | 0 | memmove(buf.base, buf.base + odcid.len + 1, buf.off - (odcid.len + 1)); |
4880 | 0 | buf.off -= odcid.len + 1; |
4881 | |
|
4882 | 0 | ret = 0; |
4883 | |
|
4884 | 0 | Exit: |
4885 | 0 | return ret == 0 ? buf.off : SIZE_MAX; |
4886 | 0 | } |
4887 | | |
4888 | | static struct st_quicly_pn_space_t *setup_send_space(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s) |
4889 | 0 | { |
4890 | 0 | struct st_quicly_pn_space_t *space = NULL; |
4891 | |
|
4892 | 0 | switch (epoch) { |
4893 | 0 | case QUICLY_EPOCH_INITIAL: |
4894 | 0 | if (conn->initial == NULL || (s->current.cipher = &conn->initial->cipher.egress)->aead == NULL) |
4895 | 0 | return NULL; |
4896 | 0 | s->current.first_byte = QUICLY_PACKET_TYPE_INITIAL; |
4897 | 0 | space = &conn->initial->super; |
4898 | 0 | break; |
4899 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
4900 | 0 | if (conn->handshake == NULL || (s->current.cipher = &conn->handshake->cipher.egress)->aead == NULL) |
4901 | 0 | return NULL; |
4902 | 0 | s->current.first_byte = QUICLY_PACKET_TYPE_HANDSHAKE; |
4903 | 0 | space = &conn->handshake->super; |
4904 | 0 | break; |
4905 | 0 | case QUICLY_EPOCH_0RTT: |
4906 | 0 | case QUICLY_EPOCH_1RTT: |
4907 | 0 | if (conn->application == NULL || conn->application->cipher.egress.key.header_protection == NULL) |
4908 | 0 | return NULL; |
4909 | 0 | if ((epoch == QUICLY_EPOCH_0RTT) == conn->application->one_rtt_writable) |
4910 | 0 | return NULL; |
4911 | 0 | s->current.cipher = &conn->application->cipher.egress.key; |
4912 | 0 | s->current.first_byte = epoch == QUICLY_EPOCH_0RTT ? QUICLY_PACKET_TYPE_0RTT : QUICLY_QUIC_BIT; |
4913 | 0 | space = &conn->application->super; |
4914 | 0 | break; |
4915 | 0 | default: |
4916 | 0 | assert(!"logic flaw"); |
4917 | 0 | break; |
4918 | 0 | } |
4919 | | |
4920 | 0 | return space; |
4921 | 0 | } |
4922 | | |
4923 | | static quicly_error_t send_handshake_flow(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s, int ack_only, int send_probe) |
4924 | 0 | { |
4925 | 0 | struct st_quicly_pn_space_t *space; |
4926 | 0 | quicly_error_t ret = 0; |
4927 | | |
4928 | | /* setup send epoch, or return if it's impossible to send in this epoch */ |
4929 | 0 | if ((space = setup_send_space(conn, epoch, s)) == NULL) |
4930 | 0 | return 0; |
4931 | | |
4932 | | /* send ACK */ |
4933 | 0 | if (space != NULL && (space->unacked_count != 0 || send_probe)) |
4934 | 0 | if ((ret = send_ack(conn, space, s)) != 0) |
4935 | 0 | goto Exit; |
4936 | | |
4937 | 0 | if (!ack_only) { |
4938 | | /* send data */ |
4939 | 0 | while ((conn->egress.pending_flows & (uint8_t)(1 << epoch)) != 0) { |
4940 | 0 | quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch)); |
4941 | 0 | assert(stream != NULL); |
4942 | 0 | if ((ret = quicly_send_stream(stream, s)) != 0) |
4943 | 0 | goto Exit; |
4944 | 0 | resched_stream_data(stream); |
4945 | 0 | send_probe = 0; |
4946 | 0 | } |
4947 | | |
4948 | | /* send probe if requested */ |
4949 | 0 | if (send_probe) { |
4950 | 0 | if ((ret = do_allocate_frame(conn, s, 1, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0) |
4951 | 0 | goto Exit; |
4952 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_PING; |
4953 | 0 | conn->egress.last_retransmittable_sent_at = conn->stash.now; |
4954 | 0 | ++conn->super.stats.num_frames_sent.ping; |
4955 | 0 | QUICLY_PROBE(PING_SEND, conn, conn->stash.now); |
4956 | 0 | QUICLY_LOG_CONN(ping_send, conn, {}); |
4957 | 0 | } |
4958 | 0 | } |
4959 | | |
4960 | 0 | Exit: |
4961 | 0 | return ret; |
4962 | 0 | } |
4963 | | |
4964 | | static quicly_error_t send_connection_close(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s) |
4965 | 0 | { |
4966 | 0 | uint64_t error_code, offending_frame_type; |
4967 | 0 | const char *reason_phrase; |
4968 | 0 | quicly_error_t ret; |
4969 | | |
4970 | | /* setup send epoch, or return if it's impossible to send in this epoch */ |
4971 | 0 | if (setup_send_space(conn, epoch, s) == NULL) |
4972 | 0 | return 0; |
4973 | | |
4974 | | /* determine the payload, masking the application error when sending the frame using an unauthenticated epoch */ |
4975 | 0 | error_code = conn->egress.connection_close.error_code; |
4976 | 0 | offending_frame_type = conn->egress.connection_close.frame_type; |
4977 | 0 | reason_phrase = conn->egress.connection_close.reason_phrase; |
4978 | 0 | if (offending_frame_type == UINT64_MAX) { |
4979 | 0 | switch (get_epoch(s->current.first_byte)) { |
4980 | 0 | case QUICLY_EPOCH_INITIAL: |
4981 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
4982 | 0 | error_code = QUICLY_TRANSPORT_ERROR_APPLICATION; |
4983 | 0 | offending_frame_type = QUICLY_FRAME_TYPE_PADDING; |
4984 | 0 | reason_phrase = ""; |
4985 | 0 | break; |
4986 | 0 | } |
4987 | 0 | } |
4988 | | |
4989 | | /* write frame */ |
4990 | 0 | if ((ret = do_allocate_frame(conn, s, quicly_close_frame_capacity(error_code, offending_frame_type, reason_phrase), |
4991 | 0 | ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0) |
4992 | 0 | return ret; |
4993 | 0 | s->dst = quicly_encode_close_frame(s->dst, error_code, offending_frame_type, reason_phrase); |
4994 | | |
4995 | | /* update counter, probe */ |
4996 | 0 | if (offending_frame_type != UINT64_MAX) { |
4997 | 0 | ++conn->super.stats.num_frames_sent.transport_close; |
4998 | 0 | QUICLY_PROBE(TRANSPORT_CLOSE_SEND, conn, conn->stash.now, error_code, offending_frame_type, reason_phrase); |
4999 | 0 | QUICLY_LOG_CONN(transport_close_send, conn, { |
5000 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, error_code); |
5001 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(frame_type, offending_frame_type); |
5002 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, reason_phrase, strlen(reason_phrase)); |
5003 | 0 | }); |
5004 | 0 | } else { |
5005 | 0 | ++conn->super.stats.num_frames_sent.application_close; |
5006 | 0 | QUICLY_PROBE(APPLICATION_CLOSE_SEND, conn, conn->stash.now, error_code, reason_phrase); |
5007 | 0 | QUICLY_LOG_CONN(application_close_send, conn, { |
5008 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, error_code); |
5009 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, reason_phrase, strlen(reason_phrase)); |
5010 | 0 | }); |
5011 | 0 | } |
5012 | | |
5013 | 0 | return 0; |
5014 | 0 | } |
5015 | | |
5016 | | static quicly_error_t send_new_connection_id(quicly_conn_t *conn, quicly_send_context_t *s, struct st_quicly_local_cid_t *new_cid) |
5017 | 0 | { |
5018 | 0 | quicly_sent_t *sent; |
5019 | 0 | uint64_t retire_prior_to = 0; /* TODO */ |
5020 | 0 | quicly_error_t ret; |
5021 | |
|
5022 | 0 | if ((ret = allocate_ack_eliciting_frame( |
5023 | 0 | conn, s, quicly_new_connection_id_frame_capacity(new_cid->sequence, retire_prior_to, new_cid->cid.len), &sent, |
5024 | 0 | on_ack_new_connection_id)) != 0) |
5025 | 0 | return ret; |
5026 | 0 | sent->data.new_connection_id.sequence = new_cid->sequence; |
5027 | |
|
5028 | 0 | s->dst = quicly_encode_new_connection_id_frame(s->dst, new_cid->sequence, retire_prior_to, new_cid->cid.cid, new_cid->cid.len, |
5029 | 0 | new_cid->stateless_reset_token); |
5030 | |
|
5031 | 0 | ++conn->super.stats.num_frames_sent.new_connection_id; |
5032 | 0 | QUICLY_PROBE(NEW_CONNECTION_ID_SEND, conn, conn->stash.now, new_cid->sequence, retire_prior_to, |
5033 | 0 | QUICLY_PROBE_HEXDUMP(new_cid->cid.cid, new_cid->cid.len), |
5034 | 0 | QUICLY_PROBE_HEXDUMP(new_cid->stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN)); |
5035 | 0 | QUICLY_LOG_CONN(new_connection_id_send, conn, { |
5036 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(sequence, new_cid->sequence); |
5037 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(retire_prior_to, retire_prior_to); |
5038 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(cid, new_cid->cid.cid, new_cid->cid.len); |
5039 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(stateless_reset_token, new_cid->stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN); |
5040 | 0 | }); |
5041 | | |
5042 | 0 | return 0; |
5043 | 0 | } |
5044 | | |
5045 | | static quicly_error_t send_retire_connection_id(quicly_conn_t *conn, quicly_send_context_t *s, uint64_t sequence) |
5046 | 0 | { |
5047 | 0 | quicly_sent_t *sent; |
5048 | 0 | quicly_error_t ret; |
5049 | |
|
5050 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, quicly_retire_connection_id_frame_capacity(sequence), &sent, |
5051 | 0 | on_ack_retire_connection_id)) != 0) |
5052 | 0 | return ret; |
5053 | 0 | sent->data.retire_connection_id.sequence = sequence; |
5054 | |
|
5055 | 0 | s->dst = quicly_encode_retire_connection_id_frame(s->dst, sequence); |
5056 | |
|
5057 | 0 | ++conn->super.stats.num_frames_sent.retire_connection_id; |
5058 | 0 | QUICLY_PROBE(RETIRE_CONNECTION_ID_SEND, conn, conn->stash.now, sequence); |
5059 | 0 | QUICLY_LOG_CONN(retire_connection_id_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(sequence, sequence); }); |
5060 | | |
5061 | 0 | return 0; |
5062 | 0 | } |
5063 | | |
5064 | | static quicly_error_t send_path_challenge(quicly_conn_t *conn, quicly_send_context_t *s, int is_response, const uint8_t *data) |
5065 | 0 | { |
5066 | 0 | quicly_error_t ret; |
5067 | |
|
5068 | 0 | if ((ret = do_allocate_frame(conn, s, QUICLY_PATH_CHALLENGE_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0) |
5069 | 0 | return ret; |
5070 | | |
5071 | 0 | s->dst = quicly_encode_path_challenge_frame(s->dst, is_response, data); |
5072 | 0 | s->target.full_size = 1; /* ensure that the path can transfer full-size packets */ |
5073 | |
|
5074 | 0 | if (!is_response) { |
5075 | 0 | ++conn->super.stats.num_frames_sent.path_challenge; |
5076 | 0 | QUICLY_PROBE(PATH_CHALLENGE_SEND, conn, conn->stash.now, data, QUICLY_PATH_CHALLENGE_DATA_LEN); |
5077 | 0 | QUICLY_LOG_CONN(path_challenge_send, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, data, QUICLY_PATH_CHALLENGE_DATA_LEN); }); |
5078 | 0 | } else { |
5079 | 0 | ++conn->super.stats.num_frames_sent.path_response; |
5080 | 0 | QUICLY_PROBE(PATH_RESPONSE_SEND, conn, conn->stash.now, data, QUICLY_PATH_CHALLENGE_DATA_LEN); |
5081 | 0 | QUICLY_LOG_CONN(path_response_send, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, data, QUICLY_PATH_CHALLENGE_DATA_LEN); }); |
5082 | 0 | } |
5083 | | |
5084 | 0 | return 0; |
5085 | 0 | } |
5086 | | |
5087 | | static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret) |
5088 | 0 | { |
5089 | 0 | quicly_conn_t *conn = *ptls_get_data_ptr(tls); |
5090 | 0 | ptls_context_t *tlsctx = ptls_get_context(tls); |
5091 | 0 | ptls_cipher_suite_t *cipher = ptls_get_cipher(tls); |
5092 | 0 | ptls_cipher_context_t **hp_slot; |
5093 | 0 | ptls_aead_context_t **aead_slot; |
5094 | 0 | int ret; |
5095 | 0 | static const char *log_labels[2][4] = { |
5096 | 0 | {NULL, "CLIENT_EARLY_TRAFFIC_SECRET", "CLIENT_HANDSHAKE_TRAFFIC_SECRET", "CLIENT_TRAFFIC_SECRET_0"}, |
5097 | 0 | {NULL, NULL, "SERVER_HANDSHAKE_TRAFFIC_SECRET", "SERVER_TRAFFIC_SECRET_0"}}; |
5098 | 0 | const char *log_label = log_labels[ptls_is_server(tls) == is_enc][epoch]; |
5099 | |
|
5100 | 0 | QUICLY_PROBE(CRYPTO_UPDATE_SECRET, conn, conn->stash.now, is_enc, epoch, log_label, |
5101 | 0 | QUICLY_PROBE_HEXDUMP(secret, cipher->hash->digest_size)); |
5102 | 0 | QUICLY_LOG_CONN(crypto_update_secret, conn, { |
5103 | 0 | PTLS_LOG_ELEMENT_BOOL(is_enc, is_enc); |
5104 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(epoch, epoch); |
5105 | 0 | PTLS_LOG_ELEMENT_SAFESTR(label, log_label); |
5106 | 0 | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, secret, cipher->hash->digest_size); |
5107 | 0 | }); |
5108 | | |
5109 | 0 | if (tlsctx->log_event != NULL) { |
5110 | 0 | char hexbuf[PTLS_MAX_DIGEST_SIZE * 2 + 1]; |
5111 | 0 | ptls_hexdump(hexbuf, secret, cipher->hash->digest_size); |
5112 | 0 | tlsctx->log_event->cb(tlsctx->log_event, tls, log_label, "%s", hexbuf); |
5113 | 0 | } |
5114 | |
|
5115 | 0 | #define SELECT_CIPHER_CONTEXT(p) \ |
5116 | 0 | do { \ |
5117 | 0 | hp_slot = &(p)->header_protection; \ |
5118 | 0 | aead_slot = &(p)->aead; \ |
5119 | 0 | } while (0) |
5120 | |
|
5121 | 0 | switch (epoch) { |
5122 | 0 | case QUICLY_EPOCH_0RTT: |
5123 | 0 | assert(is_enc == quicly_is_client(conn)); |
5124 | 0 | if (conn->application == NULL && (ret = setup_application_space(conn)) != 0) |
5125 | 0 | return ret; |
5126 | 0 | if (is_enc) { |
5127 | 0 | SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key); |
5128 | 0 | } else { |
5129 | 0 | hp_slot = &conn->application->cipher.ingress.header_protection.zero_rtt; |
5130 | 0 | aead_slot = &conn->application->cipher.ingress.aead[1]; |
5131 | 0 | } |
5132 | 0 | break; |
5133 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
5134 | 0 | if (conn->handshake == NULL && (ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_HANDSHAKE)) != 0) |
5135 | 0 | return ret; |
5136 | 0 | SELECT_CIPHER_CONTEXT(is_enc ? &conn->handshake->cipher.egress : &conn->handshake->cipher.ingress); |
5137 | 0 | break; |
5138 | 0 | case QUICLY_EPOCH_1RTT: { |
5139 | 0 | if (is_enc) |
5140 | 0 | if ((ret = compress_handshake_result(apply_remote_transport_params(conn))) != 0) |
5141 | 0 | return ret; |
5142 | 0 | if (conn->application == NULL && (ret = setup_application_space(conn)) != 0) |
5143 | 0 | return ret; |
5144 | 0 | uint8_t *secret_store; |
5145 | 0 | if (is_enc) { |
5146 | 0 | if (conn->application->cipher.egress.key.aead != NULL) |
5147 | 0 | dispose_cipher(&conn->application->cipher.egress.key); |
5148 | 0 | SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key); |
5149 | 0 | secret_store = conn->application->cipher.egress.secret; |
5150 | 0 | } else { |
5151 | 0 | hp_slot = &conn->application->cipher.ingress.header_protection.one_rtt; |
5152 | 0 | aead_slot = &conn->application->cipher.ingress.aead[0]; |
5153 | 0 | secret_store = conn->application->cipher.ingress.secret; |
5154 | 0 | } |
5155 | 0 | memcpy(secret_store, secret, cipher->hash->digest_size); |
5156 | 0 | } break; |
5157 | 0 | default: |
5158 | 0 | assert(!"logic flaw"); |
5159 | 0 | break; |
5160 | 0 | } |
5161 | | |
5162 | 0 | #undef SELECT_CIPHER_CONTEXT |
5163 | | |
5164 | 0 | if ((ret = setup_cipher(conn, epoch, is_enc, hp_slot, aead_slot, cipher->aead, cipher->hash, secret)) != 0) |
5165 | 0 | return ret; |
5166 | | |
5167 | 0 | if (epoch == QUICLY_EPOCH_1RTT && is_enc) { |
5168 | | /* update states now that we have 1-RTT write key */ |
5169 | 0 | conn->application->one_rtt_writable = 1; |
5170 | 0 | open_blocked_streams(conn, 1); |
5171 | 0 | open_blocked_streams(conn, 0); |
5172 | 0 | if (quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.bidi) || |
5173 | 0 | quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.uni)) |
5174 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
5175 | | /* send the first resumption token using the 0.5 RTT window */ |
5176 | 0 | if (!quicly_is_client(conn) && conn->super.ctx->generate_resumption_token != NULL) { |
5177 | 0 | quicly_error_t ret64 = quicly_send_resumption_token(conn); |
5178 | 0 | assert(ret64 == 0); |
5179 | 0 | } |
5180 | | |
5181 | | /* schedule NEW_CONNECTION_IDs */ |
5182 | 0 | size_t size = local_cid_size(conn); |
5183 | 0 | if (quicly_local_cid_set_size(&conn->super.local.cid_set, size)) |
5184 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
5185 | 0 | } |
5186 | | |
5187 | 0 | return 0; |
5188 | 0 | } |
5189 | | |
5190 | | static quicly_error_t send_other_control_frames(quicly_conn_t *conn, quicly_send_context_t *s) |
5191 | 0 | { |
5192 | 0 | quicly_error_t ret; |
5193 | | |
5194 | | /* MAX_STREAMS */ |
5195 | 0 | if ((ret = send_max_streams(conn, 1, s)) != 0) |
5196 | 0 | return ret; |
5197 | 0 | if ((ret = send_max_streams(conn, 0, s)) != 0) |
5198 | 0 | return ret; |
5199 | | |
5200 | | /* MAX_DATA */ |
5201 | 0 | if (should_send_max_data(conn)) { |
5202 | 0 | quicly_sent_t *sent; |
5203 | 0 | if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_DATA_FRAME_CAPACITY, &sent, on_ack_max_data)) != 0) |
5204 | 0 | return ret; |
5205 | 0 | uint64_t new_value = conn->ingress.max_data.bytes_consumed + conn->super.ctx->transport_params.max_data; |
5206 | 0 | s->dst = quicly_encode_max_data_frame(s->dst, new_value); |
5207 | 0 | quicly_maxsender_record(&conn->ingress.max_data.sender, new_value, &sent->data.max_data.args); |
5208 | 0 | ++conn->super.stats.num_frames_sent.max_data; |
5209 | 0 | QUICLY_PROBE(MAX_DATA_SEND, conn, conn->stash.now, new_value); |
5210 | 0 | QUICLY_LOG_CONN(max_data_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_value); }); |
5211 | 0 | } |
5212 | | |
5213 | | /* DATA_BLOCKED */ |
5214 | 0 | if (conn->egress.data_blocked == QUICLY_SENDER_STATE_SEND && (ret = send_data_blocked(conn, s)) != 0) |
5215 | 0 | return ret; |
5216 | | |
5217 | | /* STREAMS_BLOCKED */ |
5218 | 0 | if ((ret = send_streams_blocked(conn, 1, s)) != 0) |
5219 | 0 | return ret; |
5220 | 0 | if ((ret = send_streams_blocked(conn, 0, s)) != 0) |
5221 | 0 | return ret; |
5222 | | |
5223 | 0 | { /* NEW_CONNECTION_ID */ |
5224 | 0 | size_t i, size = quicly_local_cid_get_size(&conn->super.local.cid_set); |
5225 | 0 | for (i = 0; i < size; i++) { |
5226 | | /* PENDING CIDs are located at the front */ |
5227 | 0 | struct st_quicly_local_cid_t *c = &conn->super.local.cid_set.cids[i]; |
5228 | 0 | if (c->state != QUICLY_LOCAL_CID_STATE_PENDING) |
5229 | 0 | break; |
5230 | 0 | if ((ret = send_new_connection_id(conn, s, c)) != 0) |
5231 | 0 | break; |
5232 | 0 | } |
5233 | 0 | quicly_local_cid_on_sent(&conn->super.local.cid_set, i); |
5234 | 0 | if (ret != 0) |
5235 | 0 | return ret; |
5236 | 0 | } |
5237 | | |
5238 | 0 | { /* RETIRE_CONNECTION_ID */ |
5239 | 0 | size_t i, size = quicly_retire_cid_get_num_pending(&conn->egress.retire_cid); |
5240 | 0 | for (i = 0; i < size; i++) { |
5241 | 0 | uint64_t sequence = conn->egress.retire_cid.sequences[i]; |
5242 | 0 | if ((ret = send_retire_connection_id(conn, s, sequence)) != 0) |
5243 | 0 | break; |
5244 | 0 | } |
5245 | 0 | quicly_retire_cid_shift(&conn->egress.retire_cid, i); |
5246 | 0 | if (ret != 0) |
5247 | 0 | return ret; |
5248 | 0 | } |
5249 | | |
5250 | 0 | return 0; |
5251 | 0 | } |
5252 | | |
5253 | | static quicly_error_t do_send(quicly_conn_t *conn, quicly_send_context_t *s) |
5254 | 0 | { |
5255 | 0 | int restrict_sending = 0, ack_only = 0; |
5256 | 0 | size_t min_packets_to_send = 0, orig_bytes_inflight = 0; |
5257 | 0 | quicly_error_t ret = 0; |
5258 | | |
5259 | | /* handle timeouts */ |
5260 | 0 | if (conn->idle_timeout.at <= conn->stash.now) { |
5261 | 0 | QUICLY_PROBE(IDLE_TIMEOUT, conn, conn->stash.now); |
5262 | 0 | QUICLY_LOG_CONN(idle_timeout, conn, {}); |
5263 | 0 | goto CloseNow; |
5264 | 0 | } |
5265 | | /* handle handshake timeouts */ |
5266 | 0 | if ((conn->initial != NULL || conn->handshake != NULL) && |
5267 | 0 | conn->created_at + (uint64_t)conn->super.ctx->handshake_timeout_rtt_multiplier * conn->egress.loss.rtt.smoothed <= |
5268 | 0 | conn->stash.now) { |
5269 | 0 | QUICLY_PROBE(HANDSHAKE_TIMEOUT, conn, conn->stash.now, conn->stash.now - conn->created_at, conn->egress.loss.rtt.smoothed); |
5270 | 0 | QUICLY_LOG_CONN(handshake_timeout, conn, { |
5271 | 0 | PTLS_LOG_ELEMENT_SIGNED(elapsed, conn->stash.now - conn->created_at); |
5272 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(rtt_smoothed, conn->egress.loss.rtt.smoothed); |
5273 | 0 | }); |
5274 | 0 | conn->super.stats.num_handshake_timeouts++; |
5275 | 0 | goto CloseNow; |
5276 | 0 | } |
5277 | 0 | uint64_t initial_handshake_sent = conn->super.stats.num_packets.initial_sent + conn->super.stats.num_packets.handshake_sent; |
5278 | 0 | if (initial_handshake_sent > conn->super.ctx->max_initial_handshake_packets) { |
5279 | 0 | QUICLY_PROBE(INITIAL_HANDSHAKE_PACKET_EXCEED, conn, conn->stash.now, initial_handshake_sent); |
5280 | 0 | QUICLY_LOG_CONN(initial_handshake_packet_exceed, conn, { PTLS_LOG_ELEMENT_UNSIGNED(num_packets, initial_handshake_sent); }); |
5281 | 0 | conn->super.stats.num_initial_handshake_exceeded++; |
5282 | 0 | goto CloseNow; |
5283 | 0 | } |
5284 | 0 | if (conn->egress.loss.alarm_at <= conn->stash.now) { |
5285 | 0 | if ((ret = quicly_loss_on_alarm(&conn->egress.loss, conn->stash.now, conn->super.remote.transport_params.max_ack_delay, |
5286 | 0 | conn->initial == NULL && conn->handshake == NULL, &min_packets_to_send, &restrict_sending, |
5287 | 0 | on_loss_detected)) != 0) |
5288 | 0 | goto Exit; |
5289 | 0 | assert(min_packets_to_send > 0); |
5290 | 0 | assert(min_packets_to_send <= s->max_datagrams); |
5291 | | |
5292 | 0 | if (restrict_sending) { |
5293 | | /* PTO: when handshake is in progress, send from the very first unacknowledged byte so as to maximize the chance of |
5294 | | * making progress. When handshake is complete, transmit new data if any, else retransmit the oldest unacknowledged data |
5295 | | * that is considered inflight. */ |
5296 | 0 | QUICLY_PROBE(PTO, conn, conn->stash.now, conn->egress.loss.sentmap.bytes_in_flight, conn->egress.cc.cwnd, |
5297 | 0 | conn->egress.loss.pto_count); |
5298 | 0 | QUICLY_LOG_CONN(pto, conn, { |
5299 | 0 | PTLS_LOG_ELEMENT_SIGNED(inflight, conn->egress.loss.sentmap.bytes_in_flight); |
5300 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd); |
5301 | 0 | PTLS_LOG_ELEMENT_SIGNED(pto_count, conn->egress.loss.pto_count); |
5302 | 0 | }); |
5303 | 0 | ++conn->super.stats.num_ptos; |
5304 | 0 | size_t bytes_to_mark = min_packets_to_send * conn->egress.max_udp_payload_size; |
5305 | 0 | if (conn->initial != NULL && (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_INITIAL, &bytes_to_mark)) != 0) |
5306 | 0 | goto Exit; |
5307 | 0 | if (bytes_to_mark != 0 && conn->handshake != NULL && |
5308 | 0 | (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_HANDSHAKE, &bytes_to_mark)) != 0) |
5309 | 0 | goto Exit; |
5310 | | /* Mark already sent 1-RTT data for PTO only if there's no new data, i.e., when scheduler_can_send() return false. */ |
5311 | 0 | if (bytes_to_mark != 0 && !scheduler_can_send(conn) && |
5312 | 0 | (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_1RTT, &bytes_to_mark)) != 0) |
5313 | 0 | goto Exit; |
5314 | 0 | } |
5315 | 0 | } |
5316 | | |
5317 | | /* disable ECN if zero packets where acked in the first 3 PTO of the connection during which all sent packets are ECT(0) */ |
5318 | 0 | if (conn->egress.ecn.state == QUICLY_ECN_PROBING && conn->created_at + conn->egress.loss.rtt.smoothed * 3 < conn->stash.now) { |
5319 | 0 | update_ecn_state(conn, QUICLY_ECN_OFF); |
5320 | | /* TODO reset CC? */ |
5321 | 0 | } |
5322 | |
|
5323 | 0 | { /* calculate send window */ |
5324 | 0 | uint64_t pacer_window = SIZE_MAX; |
5325 | 0 | if (conn->egress.pacer != NULL) { |
5326 | 0 | uint32_t bytes_per_msec = calc_pacer_send_rate(conn); |
5327 | 0 | pacer_window = |
5328 | 0 | quicly_pacer_get_window(conn->egress.pacer, conn->stash.now, bytes_per_msec, conn->egress.max_udp_payload_size); |
5329 | 0 | } |
5330 | 0 | s->send_window = calc_send_window(conn, min_packets_to_send * conn->egress.max_udp_payload_size, |
5331 | 0 | calc_amplification_limit_allowance(conn), pacer_window, restrict_sending); |
5332 | 0 | } |
5333 | |
|
5334 | 0 | orig_bytes_inflight = conn->egress.loss.sentmap.bytes_in_flight; |
5335 | |
|
5336 | 0 | if (s->send_window == 0) |
5337 | 0 | ack_only = 1; |
5338 | |
|
5339 | 0 | s->dcid = get_dcid(conn, s->path_index); |
5340 | | |
5341 | | /* send handshake flows; when PTO fires... |
5342 | | * * quicly running as a client sends either a Handshake probe (or data) if the handshake keys are available, or else an |
5343 | | * Initial probe (or data). |
5344 | | * * quicly running as a server sends both Initial and Handshake probes (or data) if the corresponding keys are available. */ |
5345 | 0 | if (s->path_index == 0) { |
5346 | 0 | if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_INITIAL, s, ack_only, |
5347 | 0 | min_packets_to_send != 0 && (!quicly_is_client(conn) || conn->handshake == NULL))) != 0) |
5348 | 0 | goto Exit; |
5349 | 0 | if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_HANDSHAKE, s, ack_only, min_packets_to_send != 0)) != 0) |
5350 | 0 | goto Exit; |
5351 | 0 | } |
5352 | | |
5353 | | /* setup 0-RTT or 1-RTT send context (as the availability of the two epochs are mutually exclusive, we can try 1-RTT first as an |
5354 | | * optimization), then send application data if that succeeds */ |
5355 | 0 | if (setup_send_space(conn, QUICLY_EPOCH_1RTT, s) != NULL || setup_send_space(conn, QUICLY_EPOCH_0RTT, s) != NULL) { |
5356 | 0 | { /* path_challenge / response */ |
5357 | 0 | struct st_quicly_conn_path_t *path = conn->paths[s->path_index]; |
5358 | 0 | assert(path != NULL); |
5359 | 0 | if (path->path_challenge.send_at <= conn->stash.now) { |
5360 | | /* emit path challenge frame, doing exponential back off using PTO(initial_rtt) */ |
5361 | 0 | if ((ret = send_path_challenge(conn, s, 0, path->path_challenge.data)) != 0) |
5362 | 0 | goto Exit; |
5363 | 0 | path->path_challenge.num_sent += 1; |
5364 | 0 | path->path_challenge.send_at = |
5365 | 0 | conn->stash.now + ((3 * conn->super.ctx->loss.default_initial_rtt) << (path->path_challenge.num_sent - 1)); |
5366 | 0 | s->recalc_send_probe_at = 1; |
5367 | 0 | } |
5368 | 0 | if (path->path_response.send_) { |
5369 | 0 | if ((ret = send_path_challenge(conn, s, 1, path->path_response.data)) != 0) |
5370 | 0 | goto Exit; |
5371 | 0 | path->path_response.send_ = 0; |
5372 | 0 | s->recalc_send_probe_at = 1; |
5373 | 0 | } |
5374 | 0 | } |
5375 | | /* non probing frames are sent only on path zero */ |
5376 | 0 | if (s->path_index == 0) { |
5377 | | /* acks */ |
5378 | 0 | if (conn->application->one_rtt_writable && conn->egress.send_ack_at <= conn->stash.now && |
5379 | 0 | conn->application->super.unacked_count != 0) { |
5380 | 0 | if ((ret = send_ack(conn, &conn->application->super, s)) != 0) |
5381 | 0 | goto Exit; |
5382 | 0 | } |
5383 | | /* DATAGRAM frame. Notes regarding current implementation: |
5384 | | * * Not limited by CC, nor the bytes counted by CC. |
5385 | | * * When given payload is too large and does not fit into a QUIC packet, a packet containing only PADDING frames is |
5386 | | * sent. This is because we do not have a way to retract the generation of a QUIC packet. |
5387 | | * * Does not notify the application that the frame was dropped internally. */ |
5388 | 0 | if (should_send_datagram_frame(conn)) { |
5389 | 0 | for (size_t i = 0; i != conn->egress.datagram_frame_payloads.count; ++i) { |
5390 | 0 | ptls_iovec_t *payload = conn->egress.datagram_frame_payloads.payloads + i; |
5391 | 0 | size_t required_space = quicly_datagram_frame_capacity(*payload); |
5392 | 0 | if ((ret = do_allocate_frame(conn, s, required_space, ALLOCATE_FRAME_TYPE_ACK_ELICITING_NO_CC)) != 0) |
5393 | 0 | goto Exit; |
5394 | 0 | if (s->dst_end - s->dst >= required_space) { |
5395 | 0 | s->dst = quicly_encode_datagram_frame(s->dst, *payload); |
5396 | 0 | QUICLY_PROBE(DATAGRAM_SEND, conn, conn->stash.now, payload->base, payload->len); |
5397 | 0 | QUICLY_LOG_CONN(datagram_send, conn, |
5398 | 0 | { PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(payload, payload->base, payload->len); }); |
5399 | 0 | } else { |
5400 | | /* FIXME: At the moment, we add a padding because we do not have a way to reclaim allocated space, and |
5401 | | * because it is forbidden to send an empty QUIC packet. */ |
5402 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_PADDING; |
5403 | 0 | } |
5404 | 0 | } |
5405 | 0 | } |
5406 | 0 | if (!ack_only) { |
5407 | | /* PTO or loss detection timeout, always send PING. This is the easiest thing to do in terms of timer control. */ |
5408 | 0 | if (min_packets_to_send != 0) { |
5409 | 0 | if ((ret = do_allocate_frame(conn, s, 1, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0) |
5410 | 0 | goto Exit; |
5411 | 0 | *s->dst++ = QUICLY_FRAME_TYPE_PING; |
5412 | 0 | ++conn->super.stats.num_frames_sent.ping; |
5413 | 0 | QUICLY_PROBE(PING_SEND, conn, conn->stash.now); |
5414 | 0 | QUICLY_LOG_CONN(ping_send, conn, {}); |
5415 | 0 | } |
5416 | | /* take actions only permitted for short header packets */ |
5417 | 0 | if (conn->application->one_rtt_writable) { |
5418 | | /* send HANDSHAKE_DONE */ |
5419 | 0 | if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT) != 0 && |
5420 | 0 | (ret = send_handshake_done(conn, s)) != 0) |
5421 | 0 | goto Exit; |
5422 | | /* post-handshake messages */ |
5423 | 0 | if ((conn->egress.pending_flows & (uint8_t)(1 << QUICLY_EPOCH_1RTT)) != 0) { |
5424 | 0 | quicly_stream_t *stream = quicly_get_stream(conn, -(1 + QUICLY_EPOCH_1RTT)); |
5425 | 0 | assert(stream != NULL); |
5426 | 0 | if ((ret = quicly_send_stream(stream, s)) != 0) |
5427 | 0 | goto Exit; |
5428 | 0 | resched_stream_data(stream); |
5429 | 0 | } |
5430 | | /* send other connection-level control frames, and iff we succeed in sending all of them, clear OTHERS_BIT to |
5431 | | * disable `quicly_send` being called right again to send more control frames */ |
5432 | 0 | if ((ret = send_other_control_frames(conn, s)) != 0) |
5433 | 0 | goto Exit; |
5434 | 0 | conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_OTHERS_BIT; |
5435 | | /* send NEW_TOKEN */ |
5436 | 0 | if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_NEW_TOKEN_BIT) != 0 && |
5437 | 0 | (ret = send_resumption_token(conn, s)) != 0) |
5438 | 0 | goto Exit; |
5439 | 0 | } |
5440 | | /* send stream-level control frames */ |
5441 | 0 | if ((ret = send_stream_control_frames(conn, s)) != 0) |
5442 | 0 | goto Exit; |
5443 | | /* send STREAM frames */ |
5444 | 0 | if ((ret = conn->super.ctx->stream_scheduler->do_send(conn->super.ctx->stream_scheduler, conn, s)) != 0) |
5445 | 0 | goto Exit; |
5446 | | /* once more, send control frames related to streams, as the state might have changed */ |
5447 | 0 | if ((ret = send_stream_control_frames(conn, s)) != 0) |
5448 | 0 | goto Exit; |
5449 | 0 | if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_OTHERS_BIT) != 0) { |
5450 | 0 | if ((ret = send_other_control_frames(conn, s)) != 0) |
5451 | 0 | goto Exit; |
5452 | 0 | conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_OTHERS_BIT; |
5453 | 0 | } |
5454 | 0 | } |
5455 | | /* stream operations might have requested emission of NEW_TOKEN at the tail; if so, try to bundle it */ |
5456 | 0 | if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_NEW_TOKEN_BIT) != 0) { |
5457 | 0 | assert(conn->application->one_rtt_writable); |
5458 | 0 | if ((ret = send_resumption_token(conn, s)) != 0) |
5459 | 0 | goto Exit; |
5460 | 0 | } |
5461 | 0 | } |
5462 | 0 | } |
5463 | | |
5464 | 0 | Exit: |
5465 | 0 | if (ret == QUICLY_ERROR_SENDBUF_FULL) { |
5466 | 0 | ret = 0; |
5467 | | /* when the buffer becomes full for the first time, try to use jumpstart; acting after the buffer becomes full does not |
5468 | | * delay switch to jump start, assuming that the buffer provided by the caller of quicly_send is no greater than the burst |
5469 | | * size of the pacer (10 packets) */ |
5470 | 0 | if (conn->egress.try_jumpstart && conn->egress.loss.rtt.minimum != UINT32_MAX) { |
5471 | 0 | conn->egress.try_jumpstart = 0; |
5472 | 0 | conn->super.stats.jumpstart.new_rtt = 0; |
5473 | 0 | conn->super.stats.jumpstart.cwnd = 0; |
5474 | 0 | if (conn->super.ctx->use_pacing && conn->egress.cc.type->cc_jumpstart != NULL && |
5475 | 0 | (conn->super.ctx->default_jumpstart_cwnd_packets != 0 || conn->super.ctx->max_jumpstart_cwnd_packets != 0) && |
5476 | 0 | conn->egress.cc.num_loss_episodes == 0) { |
5477 | 0 | conn->super.stats.jumpstart.new_rtt = conn->egress.loss.rtt.minimum; |
5478 | 0 | if (conn->super.ctx->max_jumpstart_cwnd_packets != 0 && conn->super.stats.jumpstart.prev_rate != 0 && |
5479 | 0 | conn->super.stats.jumpstart.prev_rtt != 0) { |
5480 | | /* Careful Resume */ |
5481 | 0 | conn->super.stats.jumpstart.cwnd = |
5482 | 0 | derive_jumpstart_cwnd(conn->super.ctx, conn->super.stats.jumpstart.new_rtt, |
5483 | 0 | conn->super.stats.jumpstart.prev_rate, conn->super.stats.jumpstart.prev_rtt); |
5484 | 0 | } else if (conn->super.ctx->default_jumpstart_cwnd_packets != 0) { |
5485 | | /* jumpstart without previous information */ |
5486 | 0 | conn->super.stats.jumpstart.cwnd = quicly_cc_calc_initial_cwnd( |
5487 | 0 | conn->super.ctx->default_jumpstart_cwnd_packets, conn->super.ctx->transport_params.max_udp_payload_size); |
5488 | 0 | } |
5489 | | /* Jumpstart only if the amount that can be sent in 1 RTT would be higher than without. Comparison target is CWND + |
5490 | | * inflight, as that is the amount that can be sent at most. Note the flow rate can become smaller due to packets |
5491 | | * paced across the entire RTT during jumpstart. */ |
5492 | 0 | if (conn->super.stats.jumpstart.cwnd <= conn->egress.cc.cwnd + orig_bytes_inflight) |
5493 | 0 | conn->super.stats.jumpstart.cwnd = 0; |
5494 | 0 | } |
5495 | 0 | if (conn->super.stats.jumpstart.cwnd > 0) |
5496 | 0 | conn->egress.cc.type->cc_jumpstart(&conn->egress.cc, conn->super.stats.jumpstart.cwnd, conn->egress.packet_number); |
5497 | 0 | } |
5498 | 0 | } |
5499 | 0 | if (ret == 0 && s->target.first_byte_at != NULL) { |
5500 | | /* last packet can be small-sized, unless it is the first flight sent from the client */ |
5501 | 0 | if ((s->payload_buf.datagram[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL && |
5502 | 0 | (quicly_is_client(conn) || !ack_only)) |
5503 | 0 | s->target.full_size = 1; |
5504 | 0 | commit_send_packet(conn, s, 0); |
5505 | 0 | } |
5506 | 0 | if (ret == 0) { |
5507 | | /* update timers, cc and delivery rate estimator states */ |
5508 | 0 | if (conn->application == NULL || conn->application->super.unacked_count == 0) |
5509 | 0 | conn->egress.send_ack_at = INT64_MAX; /* we have sent ACKs for every epoch (or before address validation) */ |
5510 | 0 | int can_send_stream_data = scheduler_can_send(conn); |
5511 | 0 | update_send_alarm(conn, can_send_stream_data, s->path_index == 0); |
5512 | 0 | update_ratemeter(conn, can_send_stream_data && conn->super.remote.address_validation.validated && |
5513 | 0 | (s->num_datagrams == s->max_datagrams || |
5514 | 0 | conn->egress.loss.sentmap.bytes_in_flight >= conn->egress.cc.cwnd || |
5515 | 0 | pacer_can_send_at(conn) > conn->stash.now)); |
5516 | 0 | if (s->num_datagrams != 0) |
5517 | 0 | update_idle_timeout(conn, 0); |
5518 | 0 | } |
5519 | 0 | return ret; |
5520 | | |
5521 | 0 | CloseNow: |
5522 | 0 | conn->super.state = QUICLY_STATE_DRAINING; |
5523 | 0 | destroy_all_streams(conn, 0, 0); |
5524 | 0 | return QUICLY_ERROR_FREE_CONNECTION; |
5525 | 0 | } |
5526 | | |
5527 | | void quicly_send_datagram_frames(quicly_conn_t *conn, ptls_iovec_t *datagrams, size_t num_datagrams) |
5528 | 0 | { |
5529 | 0 | for (size_t i = 0; i != num_datagrams; ++i) { |
5530 | 0 | if (conn->egress.datagram_frame_payloads.count == PTLS_ELEMENTSOF(conn->egress.datagram_frame_payloads.payloads)) |
5531 | 0 | break; |
5532 | 0 | void *copied; |
5533 | 0 | if ((copied = malloc(datagrams[i].len)) == NULL) |
5534 | 0 | break; |
5535 | 0 | memcpy(copied, datagrams[i].base, datagrams[i].len); |
5536 | 0 | conn->egress.datagram_frame_payloads.payloads[conn->egress.datagram_frame_payloads.count++] = |
5537 | 0 | ptls_iovec_init(copied, datagrams[i].len); |
5538 | 0 | } |
5539 | 0 | } |
5540 | | |
5541 | | int quicly_set_cc(quicly_conn_t *conn, quicly_cc_type_t *cc) |
5542 | 0 | { |
5543 | 0 | return cc->cc_switch(&conn->egress.cc); |
5544 | 0 | } |
5545 | | |
5546 | | quicly_error_t quicly_send(quicly_conn_t *conn, quicly_address_t *dest, quicly_address_t *src, struct iovec *datagrams, |
5547 | | size_t *num_datagrams, void *buf, size_t bufsize) |
5548 | 0 | { |
5549 | 0 | quicly_send_context_t s = {.current = {.first_byte = -1}, |
5550 | 0 | .datagrams = datagrams, |
5551 | 0 | .max_datagrams = *num_datagrams, |
5552 | 0 | .payload_buf = {.datagram = buf, .end = (uint8_t *)buf + bufsize}, |
5553 | 0 | .first_packet_number = conn->egress.packet_number}; |
5554 | 0 | quicly_error_t ret; |
5555 | |
|
5556 | 0 | lock_now(conn, 0); |
5557 | | |
5558 | | /* bail out if there's nothing is scheduled to be sent */ |
5559 | 0 | if (conn->stash.now < quicly_get_first_timeout(conn)) { |
5560 | 0 | ret = 0; |
5561 | 0 | goto Exit; |
5562 | 0 | } |
5563 | | |
5564 | | /* determine DCID of active path; doing so is guaranteed to succeed as the protocol guarantees that there will always be at |
5565 | | * least one non-retired CID available */ |
5566 | 0 | if (conn->paths[0]->dcid == UINT64_MAX) { |
5567 | 0 | int success = setup_path_dcid(conn, 0); |
5568 | 0 | assert(success); |
5569 | 0 | } |
5570 | | |
5571 | 0 | PTLS_LOG_DEFINE_POINT(quicly, send, send_logpoint); |
5572 | 0 | if (QUICLY_PROBE_ENABLED(SEND) || |
5573 | 0 | (ptls_log_point_maybe_active(&send_logpoint) & ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls), |
5574 | 0 | (const char *(*)(void *))ptls_get_server_name, |
5575 | 0 | conn->crypto.tls)) != 0) { |
5576 | 0 | const quicly_cid_t *dcid = get_dcid(conn, 0); |
5577 | 0 | QUICLY_PROBE(SEND, conn, conn->stash.now, conn->super.state, QUICLY_PROBE_HEXDUMP(dcid->cid, dcid->len)); |
5578 | 0 | QUICLY_LOG_CONN(send, conn, { |
5579 | 0 | PTLS_LOG_ELEMENT_SIGNED(state, conn->super.state); |
5580 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(dcid, dcid->cid, dcid->len); |
5581 | 0 | }); |
5582 | 0 | } |
5583 | | |
5584 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) { |
5585 | 0 | quicly_sentmap_iter_t iter; |
5586 | 0 | if ((ret = init_acks_iter(conn, &iter)) != 0) |
5587 | 0 | goto Exit; |
5588 | | /* check if the connection can be closed now (after 3 pto) */ |
5589 | 0 | if (conn->super.state == QUICLY_STATE_DRAINING || |
5590 | 0 | conn->super.stats.num_frames_sent.transport_close + conn->super.stats.num_frames_sent.application_close != 0) { |
5591 | 0 | if (quicly_sentmap_get(&iter)->packet_number == UINT64_MAX) { |
5592 | 0 | assert(quicly_num_streams(conn) == 0); |
5593 | 0 | ret = QUICLY_ERROR_FREE_CONNECTION; |
5594 | 0 | goto Exit; |
5595 | 0 | } |
5596 | 0 | } |
5597 | 0 | if (conn->super.state == QUICLY_STATE_CLOSING && conn->egress.send_ack_at <= conn->stash.now) { |
5598 | | /* destroy all streams; doing so is delayed until the emission of CONNECTION_CLOSE frame to allow quicly_close to be |
5599 | | * called from a stream handler */ |
5600 | 0 | destroy_all_streams(conn, 0, 0); |
5601 | | /* send CONNECTION_CLOSE in all possible epochs */ |
5602 | 0 | s.dcid = get_dcid(conn, 0); |
5603 | 0 | for (size_t epoch = 0; epoch < QUICLY_NUM_EPOCHS; ++epoch) { |
5604 | 0 | if ((ret = send_connection_close(conn, epoch, &s)) != 0) |
5605 | 0 | goto Exit; |
5606 | 0 | } |
5607 | 0 | if ((ret = commit_send_packet(conn, &s, 0)) != 0) |
5608 | 0 | goto Exit; |
5609 | 0 | } |
5610 | | /* wait at least 1ms */ |
5611 | 0 | if ((conn->egress.send_ack_at = quicly_sentmap_get(&iter)->sent_at + get_sentmap_expiration_time(conn)) <= conn->stash.now) |
5612 | 0 | conn->egress.send_ack_at = conn->stash.now + 1; |
5613 | 0 | ret = 0; |
5614 | 0 | goto Exit; |
5615 | 0 | } |
5616 | | |
5617 | | /* try emitting one probe packet on one of the backup paths, or ... (note: API of `quicly_send` allows us to send packets on no |
5618 | | * more than one path at a time) */ |
5619 | 0 | if (conn->egress.send_probe_at <= conn->stash.now) { |
5620 | 0 | for (s.path_index = 1; s.path_index < PTLS_ELEMENTSOF(conn->paths); ++s.path_index) { |
5621 | 0 | if (conn->paths[s.path_index] == NULL || !(conn->stash.now >= conn->paths[s.path_index]->path_challenge.send_at || |
5622 | 0 | conn->paths[s.path_index]->path_response.send_)) |
5623 | 0 | continue; |
5624 | 0 | if (conn->paths[s.path_index]->path_challenge.num_sent > conn->super.ctx->max_probe_packets) { |
5625 | 0 | delete_path(conn, s.path_index); |
5626 | 0 | s.recalc_send_probe_at = 1; |
5627 | 0 | continue; |
5628 | 0 | } |
5629 | | /* determine DCID to be used, if not yet been done; upon failure, this path (being secondary) is discarded */ |
5630 | 0 | if (conn->paths[s.path_index]->dcid == UINT64_MAX && !setup_path_dcid(conn, s.path_index)) { |
5631 | 0 | delete_path(conn, s.path_index); |
5632 | 0 | s.recalc_send_probe_at = 1; |
5633 | 0 | conn->super.stats.num_paths.closed_no_dcid += 1; |
5634 | 0 | continue; |
5635 | 0 | } |
5636 | 0 | if ((ret = do_send(conn, &s)) != 0) |
5637 | 0 | goto Exit; |
5638 | 0 | assert(conn->stash.now < conn->paths[s.path_index]->path_challenge.send_at); |
5639 | 0 | if (s.num_datagrams != 0) |
5640 | 0 | break; |
5641 | 0 | } |
5642 | 0 | } |
5643 | | /* otherwise, emit non-probing packets */ |
5644 | 0 | if (s.num_datagrams == 0) { |
5645 | 0 | s.path_index = 0; |
5646 | 0 | if ((ret = do_send(conn, &s)) != 0) |
5647 | 0 | goto Exit; |
5648 | 0 | } else { |
5649 | 0 | ret = 0; |
5650 | 0 | } |
5651 | | |
5652 | 0 | assert_consistency(conn, s.path_index == 0); |
5653 | |
|
5654 | 0 | Exit: |
5655 | 0 | if (s.path_index == 0) |
5656 | 0 | clear_datagram_frame_payloads(conn); |
5657 | 0 | if (s.recalc_send_probe_at) |
5658 | 0 | recalc_send_probe_at(conn); |
5659 | 0 | if (s.num_datagrams != 0) { |
5660 | 0 | *dest = conn->paths[s.path_index]->address.remote; |
5661 | 0 | *src = conn->paths[s.path_index]->address.local; |
5662 | 0 | } |
5663 | 0 | *num_datagrams = s.num_datagrams; |
5664 | 0 | unlock_now(conn); |
5665 | 0 | return ret; |
5666 | 0 | } |
5667 | | |
5668 | | uint8_t quicly_send_get_ecn_bits(quicly_conn_t *conn) |
5669 | 0 | { |
5670 | 0 | return conn->egress.ecn.state == QUICLY_ECN_OFF ? 0 : 2; /* NON-ECT or ECT(0) */ |
5671 | 0 | } |
5672 | | |
5673 | | size_t quicly_send_close_invalid_token(quicly_context_t *ctx, uint32_t protocol_version, ptls_iovec_t dest_cid, |
5674 | | ptls_iovec_t src_cid, const char *err_desc, void *datagram) |
5675 | 0 | { |
5676 | 0 | struct st_quicly_cipher_context_t egress = {}; |
5677 | 0 | const quicly_salt_t *salt; |
5678 | | |
5679 | | /* setup keys */ |
5680 | 0 | if ((salt = quicly_get_salt(protocol_version)) == NULL) |
5681 | 0 | return SIZE_MAX; |
5682 | 0 | if (setup_initial_encryption(get_aes128gcmsha256(ctx), NULL, &egress, src_cid, 0, |
5683 | 0 | ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL) != 0) |
5684 | 0 | return SIZE_MAX; |
5685 | | |
5686 | 0 | uint8_t *dst = datagram, *length_at; |
5687 | | |
5688 | | /* build packet */ |
5689 | 0 | PTLS_BUILD_ASSERT(QUICLY_SEND_PN_SIZE == 2); |
5690 | 0 | *dst++ = QUICLY_PACKET_TYPE_INITIAL | 0x1 /* 2-byte PN */; |
5691 | 0 | dst = quicly_encode32(dst, protocol_version); |
5692 | 0 | *dst++ = dest_cid.len; |
5693 | 0 | memcpy(dst, dest_cid.base, dest_cid.len); |
5694 | 0 | dst += dest_cid.len; |
5695 | 0 | *dst++ = src_cid.len; |
5696 | 0 | memcpy(dst, src_cid.base, src_cid.len); |
5697 | 0 | dst += src_cid.len; |
5698 | 0 | *dst++ = 0; /* token_length = 0 */ |
5699 | 0 | length_at = dst++; /* length_at to be filled in later as 1-byte varint */ |
5700 | 0 | *dst++ = 0; /* PN = 0 */ |
5701 | 0 | *dst++ = 0; /* ditto */ |
5702 | 0 | uint8_t *payload_from = dst; |
5703 | 0 | dst = quicly_encode_close_frame(dst, QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INVALID_TOKEN), |
5704 | 0 | QUICLY_FRAME_TYPE_PADDING, err_desc); |
5705 | | |
5706 | | /* determine the size of the packet, make adjustments */ |
5707 | 0 | dst += egress.aead->algo->tag_size; |
5708 | 0 | assert(dst - (uint8_t *)datagram <= QUICLY_MIN_CLIENT_INITIAL_SIZE); |
5709 | 0 | assert(dst - length_at - 1 < 64); |
5710 | 0 | *length_at = dst - length_at - 1; |
5711 | 0 | size_t datagram_len = dst - (uint8_t *)datagram; |
5712 | | |
5713 | | /* encrypt packet */ |
5714 | 0 | quicly_default_crypto_engine.encrypt_packet(&quicly_default_crypto_engine, NULL, egress.header_protection, egress.aead, |
5715 | 0 | ptls_iovec_init(datagram, datagram_len), 0, payload_from - (uint8_t *)datagram, 0, |
5716 | 0 | 0); |
5717 | |
|
5718 | 0 | dispose_cipher(&egress); |
5719 | 0 | return datagram_len; |
5720 | 0 | } |
5721 | | |
5722 | | size_t quicly_send_stateless_reset(quicly_context_t *ctx, const void *src_cid, void *payload) |
5723 | 0 | { |
5724 | 0 | uint8_t *base = payload; |
5725 | | |
5726 | | /* build stateless reset packet */ |
5727 | 0 | ctx->tls->random_bytes(base, QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN); |
5728 | 0 | base[0] = (base[0] & ~QUICLY_LONG_HEADER_BIT) | QUICLY_QUIC_BIT; |
5729 | 0 | if (!ctx->cid_encryptor->generate_stateless_reset_token( |
5730 | 0 | ctx->cid_encryptor, base + QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN, src_cid)) |
5731 | 0 | return SIZE_MAX; |
5732 | | |
5733 | 0 | return QUICLY_STATELESS_RESET_PACKET_MIN_LEN; |
5734 | 0 | } |
5735 | | |
5736 | | quicly_error_t quicly_send_resumption_token(quicly_conn_t *conn) |
5737 | | { |
5738 | | assert(!quicly_is_client(conn)); |
5739 | | |
5740 | | if (conn->super.state <= QUICLY_STATE_CONNECTED) { |
5741 | | ++conn->egress.new_token.generation; |
5742 | | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_NEW_TOKEN_BIT; |
5743 | | } |
5744 | | return 0; |
5745 | | } |
5746 | | |
5747 | | static quicly_error_t on_end_closing(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent) |
5748 | 0 | { |
5749 | | /* we stop accepting frames by the time this ack callback is being registered */ |
5750 | 0 | assert(!acked); |
5751 | 0 | return 0; |
5752 | 0 | } |
5753 | | |
5754 | | static quicly_error_t enter_close(quicly_conn_t *conn, int local_is_initiating, int wait_draining) |
5755 | 0 | { |
5756 | 0 | quicly_error_t ret; |
5757 | |
|
5758 | 0 | assert(conn->super.state < QUICLY_STATE_CLOSING); |
5759 | | |
5760 | | /* release all inflight info, register a close timeout */ |
5761 | 0 | if ((ret = discard_sentmap_by_epoch(conn, ~0u)) != 0) |
5762 | 0 | return ret; |
5763 | 0 | if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now, |
5764 | 0 | QUICLY_EPOCH_INITIAL)) != 0) |
5765 | 0 | return ret; |
5766 | 0 | if (quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_end_closing) == NULL) |
5767 | 0 | return PTLS_ERROR_NO_MEMORY; |
5768 | 0 | quicly_sentmap_commit(&conn->egress.loss.sentmap, 0, 0, 0); |
5769 | 0 | ++conn->egress.packet_number; |
5770 | |
|
5771 | 0 | if (local_is_initiating) { |
5772 | 0 | conn->super.state = QUICLY_STATE_CLOSING; |
5773 | 0 | conn->egress.send_ack_at = 0; |
5774 | 0 | } else { |
5775 | 0 | conn->super.state = QUICLY_STATE_DRAINING; |
5776 | 0 | conn->egress.send_ack_at = wait_draining ? conn->stash.now + get_sentmap_expiration_time(conn) : 0; |
5777 | 0 | } |
5778 | |
|
5779 | 0 | setup_next_send(conn); |
5780 | |
|
5781 | 0 | return 0; |
5782 | 0 | } |
5783 | | |
5784 | | quicly_error_t initiate_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, const char *reason_phrase) |
5785 | 0 | { |
5786 | 0 | uint64_t quic_error_code; |
5787 | |
|
5788 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) |
5789 | 0 | return 0; |
5790 | | |
5791 | 0 | if (reason_phrase == NULL) |
5792 | 0 | reason_phrase = ""; |
5793 | | |
5794 | | /* convert error code to QUIC error codes */ |
5795 | 0 | if (err == 0) { |
5796 | 0 | quic_error_code = 0; |
5797 | 0 | frame_type = QUICLY_FRAME_TYPE_PADDING; |
5798 | 0 | } else if (QUICLY_ERROR_IS_QUIC_TRANSPORT(err)) { |
5799 | 0 | quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err); |
5800 | 0 | } else if (QUICLY_ERROR_IS_QUIC_APPLICATION(err)) { |
5801 | 0 | quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err); |
5802 | 0 | frame_type = UINT64_MAX; |
5803 | 0 | } else if (PTLS_ERROR_GET_CLASS(err) == PTLS_ERROR_CLASS_SELF_ALERT) { |
5804 | 0 | quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_CRYPTO(PTLS_ERROR_TO_ALERT(err))); |
5805 | 0 | } else { |
5806 | 0 | quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INTERNAL); |
5807 | 0 | } |
5808 | |
|
5809 | 0 | conn->egress.connection_close.error_code = quic_error_code; |
5810 | 0 | conn->egress.connection_close.frame_type = frame_type; |
5811 | 0 | conn->egress.connection_close.reason_phrase = reason_phrase; |
5812 | 0 | return enter_close(conn, 1, 0); |
5813 | 0 | } |
5814 | | |
5815 | | quicly_error_t quicly_close(quicly_conn_t *conn, quicly_error_t err, const char *reason_phrase) |
5816 | | { |
5817 | | quicly_error_t ret; |
5818 | | |
5819 | | assert(err == 0 || QUICLY_ERROR_IS_QUIC_APPLICATION(err) || QUICLY_ERROR_IS_CONCEALED(err)); |
5820 | | |
5821 | | lock_now(conn, 1); |
5822 | | ret = initiate_close(conn, err, QUICLY_FRAME_TYPE_PADDING /* used when err == 0 */, reason_phrase); |
5823 | | unlock_now(conn); |
5824 | | |
5825 | | return ret; |
5826 | | } |
5827 | | |
5828 | | quicly_error_t quicly_get_or_open_stream(quicly_conn_t *conn, uint64_t stream_id, quicly_stream_t **stream) |
5829 | 0 | { |
5830 | 0 | quicly_error_t ret = 0; |
5831 | |
|
5832 | 0 | if ((*stream = quicly_get_stream(conn, stream_id)) != NULL) |
5833 | 0 | goto Exit; |
5834 | | |
5835 | 0 | if (quicly_stream_is_client_initiated(stream_id) != quicly_is_client(conn)) { |
5836 | | /* check if stream id is within the bounds */ |
5837 | 0 | if (stream_id / 4 >= quicly_get_ingress_max_streams(conn, quicly_stream_is_unidirectional(stream_id))) { |
5838 | 0 | ret = QUICLY_TRANSPORT_ERROR_STREAM_LIMIT; |
5839 | 0 | goto Exit; |
5840 | 0 | } |
5841 | | /* open new streams upto given id */ |
5842 | 0 | struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream_id); |
5843 | 0 | if (group->next_stream_id <= stream_id) { |
5844 | 0 | uint64_t max_stream_data_local, max_stream_data_remote; |
5845 | 0 | if (quicly_stream_is_unidirectional(stream_id)) { |
5846 | 0 | max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.uni; |
5847 | 0 | max_stream_data_remote = 0; |
5848 | 0 | } else { |
5849 | 0 | max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.bidi_remote; |
5850 | 0 | max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.bidi_local; |
5851 | 0 | } |
5852 | 0 | do { |
5853 | 0 | if ((*stream = open_stream(conn, group->next_stream_id, (uint32_t)max_stream_data_local, max_stream_data_remote)) == |
5854 | 0 | NULL) { |
5855 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
5856 | 0 | goto Exit; |
5857 | 0 | } |
5858 | 0 | QUICLY_PROBE(STREAM_ON_OPEN, conn, conn->stash.now, *stream); |
5859 | 0 | QUICLY_LOG_CONN(stream_on_open, conn, { PTLS_LOG_ELEMENT_SIGNED(stream_id, (*stream)->stream_id); }); |
5860 | 0 | if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, *stream)) != 0) { |
5861 | 0 | *stream = NULL; |
5862 | 0 | goto Exit; |
5863 | 0 | } |
5864 | 0 | ++group->num_streams; |
5865 | 0 | group->next_stream_id += 4; |
5866 | 0 | } while (stream_id != (*stream)->stream_id); |
5867 | 0 | } |
5868 | 0 | } |
5869 | | |
5870 | 0 | Exit: |
5871 | 0 | return ret; |
5872 | 0 | } |
5873 | | |
5874 | | static quicly_error_t handle_crypto_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
5875 | 0 | { |
5876 | 0 | quicly_stream_frame_t frame; |
5877 | 0 | quicly_stream_t *stream; |
5878 | 0 | quicly_error_t ret; |
5879 | |
|
5880 | 0 | if ((ret = quicly_decode_crypto_frame(&state->src, state->end, &frame)) != 0) |
5881 | 0 | return ret; |
5882 | 0 | stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + state->epoch)); |
5883 | 0 | assert(stream != NULL); |
5884 | 0 | return apply_stream_frame(stream, &frame); |
5885 | 0 | } |
5886 | | |
5887 | | static quicly_error_t handle_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
5888 | 0 | { |
5889 | 0 | quicly_stream_frame_t frame; |
5890 | 0 | quicly_stream_t *stream; |
5891 | 0 | quicly_error_t ret; |
5892 | |
|
5893 | 0 | if ((ret = quicly_decode_stream_frame(state->frame_type, &state->src, state->end, &frame)) != 0) |
5894 | 0 | return ret; |
5895 | 0 | QUICLY_PROBE(QUICTRACE_RECV_STREAM, conn, conn->stash.now, frame.stream_id, frame.offset, frame.data.len, (int)frame.is_fin); |
5896 | 0 | if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL) |
5897 | 0 | return ret; |
5898 | 0 | return apply_stream_frame(stream, &frame); |
5899 | 0 | } |
5900 | | |
5901 | | static quicly_error_t handle_reset_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
5902 | 0 | { |
5903 | 0 | quicly_reset_stream_frame_t frame; |
5904 | 0 | quicly_stream_t *stream; |
5905 | 0 | quicly_error_t ret; |
5906 | |
|
5907 | 0 | if ((ret = quicly_decode_reset_stream_frame(&state->src, state->end, &frame)) != 0) |
5908 | 0 | return ret; |
5909 | 0 | QUICLY_PROBE(RESET_STREAM_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.app_error_code, frame.final_size); |
5910 | 0 | QUICLY_LOG_CONN(reset_stream_receive, conn, { |
5911 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, (quicly_stream_id_t)frame.stream_id); |
5912 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(app_error_code, frame.app_error_code); |
5913 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(final_size, frame.final_size); |
5914 | 0 | }); |
5915 | | |
5916 | 0 | if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL) |
5917 | 0 | return ret; |
5918 | | |
5919 | 0 | if (!quicly_recvstate_transfer_complete(&stream->recvstate)) { |
5920 | 0 | uint64_t bytes_missing; |
5921 | 0 | if ((ret = quicly_recvstate_reset(&stream->recvstate, frame.final_size, &bytes_missing)) != 0) |
5922 | 0 | return ret; |
5923 | 0 | stream->conn->ingress.max_data.bytes_consumed += bytes_missing; |
5924 | 0 | quicly_error_t err = QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code); |
5925 | 0 | QUICLY_PROBE(STREAM_ON_RECEIVE_RESET, stream->conn, stream->conn->stash.now, stream, err); |
5926 | 0 | QUICLY_LOG_CONN(stream_on_receive_reset, stream->conn, { |
5927 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
5928 | 0 | PTLS_LOG_ELEMENT_SIGNED(err, err); |
5929 | 0 | }); |
5930 | 0 | stream->callbacks->on_receive_reset(stream, err); |
5931 | 0 | if (stream->conn->super.state >= QUICLY_STATE_CLOSING) |
5932 | 0 | return QUICLY_ERROR_IS_CLOSING; |
5933 | 0 | if (stream_is_destroyable(stream)) |
5934 | 0 | destroy_stream(stream, 0); |
5935 | 0 | } |
5936 | | |
5937 | 0 | return 0; |
5938 | 0 | } |
5939 | | |
5940 | | static quicly_error_t handle_ack_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
5941 | 0 | { |
5942 | 0 | quicly_ack_frame_t frame; |
5943 | 0 | quicly_sentmap_iter_t iter; |
5944 | 0 | struct { |
5945 | 0 | uint64_t pn; |
5946 | 0 | int64_t sent_at; |
5947 | 0 | } largest_newly_acked = {UINT64_MAX, INT64_MAX}; |
5948 | 0 | size_t bytes_acked = 0; |
5949 | 0 | int includes_ack_eliciting = 0, includes_late_ack = 0; |
5950 | 0 | quicly_error_t ret; |
5951 | | |
5952 | | /* The flow is considered CC-limited if the packet was sent while `inflight >= 1/2 * CNWD` or acked under the same condition. |
5953 | | * 1/2 of CWND is adopted for fairness with RFC 7661, and also provides correct increase; i.e., if an idle flow goes into |
5954 | | * CC-limited state for X round-trips then becomes idle again, all packets sent during that X round-trips will be considered as |
5955 | | * CC-limited. */ |
5956 | 0 | int cc_limited = !conn->super.ctx->respect_app_limited || conn->egress.loss.sentmap.bytes_in_flight >= conn->egress.cc.cwnd / 2; |
5957 | |
|
5958 | 0 | if ((ret = quicly_decode_ack_frame(&state->src, state->end, &frame, state->frame_type == QUICLY_FRAME_TYPE_ACK_ECN)) != 0) |
5959 | 0 | return ret; |
5960 | | |
5961 | 0 | uint64_t pn_acked = frame.smallest_acknowledged; |
5962 | |
|
5963 | 0 | switch (state->epoch) { |
5964 | 0 | case QUICLY_EPOCH_0RTT: |
5965 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
5966 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
5967 | 0 | conn->super.remote.address_validation.send_probe = 0; |
5968 | 0 | break; |
5969 | 0 | default: |
5970 | 0 | break; |
5971 | 0 | } |
5972 | | |
5973 | 0 | if ((ret = init_acks_iter(conn, &iter)) != 0) |
5974 | 0 | return ret; |
5975 | | |
5976 | | /* TODO log PNs being ACKed too late */ |
5977 | | |
5978 | 0 | size_t gap_index = frame.num_gaps; |
5979 | 0 | while (1) { |
5980 | 0 | assert(frame.ack_block_lengths[gap_index] != 0); |
5981 | | /* Ack blocks are organized in the ACK frame and consequently in the ack_block_lengths array from the largest acked down. |
5982 | | * Processing acks in packet number order requires processing the ack blocks in reverse order. */ |
5983 | 0 | uint64_t pn_block_max = pn_acked + frame.ack_block_lengths[gap_index] - 1; |
5984 | 0 | QUICLY_PROBE(ACK_BLOCK_RECEIVED, conn, conn->stash.now, pn_acked, pn_block_max); |
5985 | 0 | QUICLY_LOG_CONN(ack_block_received, conn, { |
5986 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(ack_block_begin, pn_acked); |
5987 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(ack_block_end, pn_block_max); |
5988 | 0 | }); |
5989 | 0 | while (quicly_sentmap_get(&iter)->packet_number < pn_acked) |
5990 | 0 | quicly_sentmap_skip(&iter); |
5991 | 0 | do { |
5992 | 0 | const quicly_sent_packet_t *sent = quicly_sentmap_get(&iter); |
5993 | 0 | uint64_t pn_sent = sent->packet_number; |
5994 | 0 | assert(pn_acked <= pn_sent); |
5995 | 0 | if (pn_acked < pn_sent) { |
5996 | | /* set pn_acked to pn_sent; or past the end of the ack block, for use with the next ack block */ |
5997 | 0 | if (pn_sent <= pn_block_max) { |
5998 | 0 | pn_acked = pn_sent; |
5999 | 0 | } else { |
6000 | 0 | pn_acked = pn_block_max + 1; |
6001 | 0 | break; |
6002 | 0 | } |
6003 | 0 | } |
6004 | | /* process newly acked packet */ |
6005 | 0 | if (state->epoch != sent->ack_epoch) |
6006 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6007 | 0 | int is_late_ack = 0; |
6008 | 0 | if (sent->ack_eliciting) { |
6009 | 0 | includes_ack_eliciting = 1; |
6010 | 0 | if (sent->cc_bytes_in_flight == 0) { |
6011 | 0 | is_late_ack = 1; |
6012 | 0 | includes_late_ack = 1; |
6013 | 0 | ++conn->super.stats.num_packets.late_acked; |
6014 | 0 | } |
6015 | 0 | } |
6016 | 0 | ++conn->super.stats.num_packets.ack_received; |
6017 | 0 | if (sent->promoted_path) |
6018 | 0 | ++conn->super.stats.num_packets.ack_received_promoted_paths; |
6019 | 0 | if (conn->egress.pn_path_start <= pn_acked) { |
6020 | 0 | largest_newly_acked.pn = pn_acked; |
6021 | 0 | largest_newly_acked.sent_at = sent->sent_at; |
6022 | 0 | } |
6023 | 0 | QUICLY_PROBE(PACKET_ACKED, conn, conn->stash.now, pn_acked, is_late_ack); |
6024 | 0 | QUICLY_LOG_CONN(packet_acked, conn, { |
6025 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(pn, pn_acked); |
6026 | 0 | PTLS_LOG_ELEMENT_BOOL(is_late_ack, is_late_ack); |
6027 | 0 | }); |
6028 | 0 | if (sent->cc_bytes_in_flight != 0) { |
6029 | 0 | if (conn->egress.pn_path_start <= pn_acked) { |
6030 | 0 | bytes_acked += sent->cc_bytes_in_flight; |
6031 | 0 | if (sent->cc_limited) |
6032 | 0 | cc_limited = 1; |
6033 | 0 | } |
6034 | 0 | conn->super.stats.num_bytes.ack_received += sent->cc_bytes_in_flight; |
6035 | 0 | } |
6036 | 0 | if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_ACKED)) != 0) |
6037 | 0 | return ret; |
6038 | 0 | if (state->epoch == QUICLY_EPOCH_1RTT) { |
6039 | 0 | struct st_quicly_application_space_t *space = conn->application; |
6040 | 0 | if (space->cipher.egress.key_update_pn.last <= pn_acked) { |
6041 | 0 | space->cipher.egress.key_update_pn.last = UINT64_MAX; |
6042 | 0 | space->cipher.egress.key_update_pn.next = conn->egress.packet_number + conn->super.ctx->max_packets_per_key; |
6043 | 0 | QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE_CONFIRMED, conn, conn->stash.now, space->cipher.egress.key_update_pn.next); |
6044 | 0 | QUICLY_LOG_CONN(crypto_send_key_update_confirmed, conn, |
6045 | 0 | { PTLS_LOG_ELEMENT_UNSIGNED(next_pn, space->cipher.egress.key_update_pn.next); }); |
6046 | 0 | } |
6047 | 0 | } |
6048 | 0 | ++pn_acked; |
6049 | 0 | } while (pn_acked <= pn_block_max); |
6050 | 0 | assert(pn_acked == pn_block_max + 1); |
6051 | 0 | if (gap_index-- == 0) |
6052 | 0 | break; |
6053 | 0 | pn_acked += frame.gaps[gap_index]; |
6054 | 0 | } |
6055 | | |
6056 | 0 | if ((ret = on_ack_stream_ack_cached(conn)) != 0) |
6057 | 0 | return ret; |
6058 | | |
6059 | 0 | QUICLY_PROBE(ACK_DELAY_RECEIVED, conn, conn->stash.now, frame.ack_delay); |
6060 | 0 | QUICLY_LOG_CONN(ack_delay_received, conn, { PTLS_LOG_ELEMENT_UNSIGNED(ack_delay, frame.ack_delay); }); |
6061 | | |
6062 | 0 | if (largest_newly_acked.pn != UINT64_MAX) |
6063 | 0 | quicly_ratemeter_on_ack(&conn->egress.ratemeter, conn->stash.now, conn->super.stats.num_bytes.ack_received, |
6064 | 0 | largest_newly_acked.pn); |
6065 | | |
6066 | | /* Update loss detection engine on ack. The function uses ack_delay only when the largest_newly_acked is also the largest acked |
6067 | | * so far. So, it does not matter if the ack_delay being passed in does not apply to the largest_newly_acked. */ |
6068 | 0 | quicly_loss_on_ack_received(&conn->egress.loss, largest_newly_acked.pn, state->epoch, conn->stash.now, |
6069 | 0 | largest_newly_acked.sent_at, frame.ack_delay, |
6070 | 0 | includes_ack_eliciting ? includes_late_ack ? QUICLY_LOSS_ACK_RECEIVED_KIND_ACK_ELICITING_LATE_ACK |
6071 | 0 | : QUICLY_LOSS_ACK_RECEIVED_KIND_ACK_ELICITING |
6072 | 0 | : QUICLY_LOSS_ACK_RECEIVED_KIND_NON_ACK_ELICITING); |
6073 | | |
6074 | | /* OnPacketAcked and OnPacketAckedCC */ |
6075 | 0 | if (bytes_acked > 0) { |
6076 | 0 | conn->egress.cc.type->cc_on_acked(&conn->egress.cc, &conn->egress.loss, (uint32_t)bytes_acked, frame.largest_acknowledged, |
6077 | 0 | (uint32_t)(conn->egress.loss.sentmap.bytes_in_flight + bytes_acked), cc_limited, |
6078 | 0 | conn->egress.packet_number, conn->stash.now, conn->egress.max_udp_payload_size); |
6079 | 0 | QUICLY_PROBE(QUICTRACE_CC_ACK, conn, conn->stash.now, &conn->egress.loss.rtt, conn->egress.cc.cwnd, |
6080 | 0 | conn->egress.loss.sentmap.bytes_in_flight); |
6081 | 0 | } |
6082 | |
|
6083 | 0 | QUICLY_PROBE(CC_ACK_RECEIVED, conn, conn->stash.now, frame.largest_acknowledged, bytes_acked, conn->egress.cc.cwnd, |
6084 | 0 | conn->egress.loss.sentmap.bytes_in_flight); |
6085 | 0 | QUICLY_LOG_CONN(cc_ack_received, conn, { |
6086 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(largest_acked, frame.largest_acknowledged); |
6087 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(bytes_acked, bytes_acked); |
6088 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd); |
6089 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(inflight, conn->egress.loss.sentmap.bytes_in_flight); |
6090 | 0 | }); |
6091 | | |
6092 | | /* loss-detection */ |
6093 | 0 | if ((ret = quicly_loss_detect_loss(&conn->egress.loss, conn->stash.now, conn->super.remote.transport_params.max_ack_delay, |
6094 | 0 | conn->initial == NULL && conn->handshake == NULL, on_loss_detected)) != 0) |
6095 | 0 | return ret; |
6096 | | |
6097 | | /* ECN */ |
6098 | 0 | if (conn->egress.ecn.state != QUICLY_ECN_OFF && largest_newly_acked.pn != UINT64_MAX) { |
6099 | | /* if things look suspicious (ECT(1) count becoming non-zero), turn ECN off */ |
6100 | 0 | if (frame.ecn_counts[1] != 0) |
6101 | 0 | update_ecn_state(conn, QUICLY_ECN_OFF); |
6102 | | /* TODO: maybe compare num_packets.acked vs. sum(ecn_counts) to see if any packet has been received as NON-ECT? */ |
6103 | | |
6104 | | /* ECN validation succeeds if at least one packet is acked using one of the expected marks during the probing period */ |
6105 | 0 | if (conn->egress.ecn.state == QUICLY_ECN_PROBING && frame.ecn_counts[0] + frame.ecn_counts[2] > 0) |
6106 | 0 | update_ecn_state(conn, QUICLY_ECN_ON); |
6107 | | |
6108 | | /* check if congestion should be reported */ |
6109 | 0 | int report_congestion = |
6110 | 0 | conn->egress.ecn.state != QUICLY_ECN_OFF && frame.ecn_counts[2] > conn->egress.ecn.counts[state->epoch][2]; |
6111 | | |
6112 | | /* update counters */ |
6113 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(frame.ecn_counts); ++i) { |
6114 | 0 | if (frame.ecn_counts[i] > conn->egress.ecn.counts[state->epoch][i]) { |
6115 | 0 | conn->super.stats.num_packets.acked_ecn_counts[i] += frame.ecn_counts[i] - conn->egress.ecn.counts[state->epoch][i]; |
6116 | 0 | conn->egress.ecn.counts[state->epoch][i] = frame.ecn_counts[i]; |
6117 | 0 | } |
6118 | 0 | } |
6119 | | |
6120 | | /* report congestion */ |
6121 | 0 | if (report_congestion) { |
6122 | 0 | QUICLY_PROBE(ECN_CONGESTION, conn, conn->stash.now, conn->super.stats.num_packets.acked_ecn_counts[2]); |
6123 | 0 | QUICLY_LOG_CONN(ecn_congestion, conn, |
6124 | 0 | { PTLS_LOG_ELEMENT_UNSIGNED(ce_count, conn->super.stats.num_packets.acked_ecn_counts[2]); }); |
6125 | 0 | notify_congestion_to_cc(conn, 0, largest_newly_acked.pn); |
6126 | 0 | } |
6127 | 0 | } |
6128 | | |
6129 | 0 | setup_next_send(conn); |
6130 | |
|
6131 | 0 | return 0; |
6132 | 0 | } |
6133 | | |
6134 | | static quicly_error_t handle_max_stream_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6135 | 0 | { |
6136 | 0 | quicly_max_stream_data_frame_t frame; |
6137 | 0 | quicly_stream_t *stream; |
6138 | 0 | quicly_error_t ret; |
6139 | |
|
6140 | 0 | if ((ret = quicly_decode_max_stream_data_frame(&state->src, state->end, &frame)) != 0) |
6141 | 0 | return ret; |
6142 | | |
6143 | 0 | QUICLY_PROBE(MAX_STREAM_DATA_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.max_stream_data); |
6144 | 0 | QUICLY_LOG_CONN(max_stream_data_receive, conn, { |
6145 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, (quicly_stream_id_t)frame.stream_id); |
6146 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(max_stream_data, frame.max_stream_data); |
6147 | 0 | }); |
6148 | | |
6149 | 0 | if (!quicly_stream_has_send_side(quicly_is_client(conn), frame.stream_id)) |
6150 | 0 | return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; |
6151 | | |
6152 | 0 | if ((stream = quicly_get_stream(conn, frame.stream_id)) == NULL) |
6153 | 0 | return 0; |
6154 | | |
6155 | 0 | if (frame.max_stream_data <= stream->_send_aux.max_stream_data) |
6156 | 0 | return 0; |
6157 | 0 | stream->_send_aux.max_stream_data = frame.max_stream_data; |
6158 | 0 | stream->_send_aux.blocked = QUICLY_SENDER_STATE_NONE; |
6159 | |
|
6160 | 0 | if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE) |
6161 | 0 | resched_stream_data(stream); |
6162 | |
|
6163 | 0 | return 0; |
6164 | 0 | } |
6165 | | |
6166 | | static quicly_error_t handle_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6167 | 0 | { |
6168 | 0 | quicly_data_blocked_frame_t frame; |
6169 | 0 | quicly_error_t ret; |
6170 | |
|
6171 | 0 | if ((ret = quicly_decode_data_blocked_frame(&state->src, state->end, &frame)) != 0) |
6172 | 0 | return ret; |
6173 | | |
6174 | 0 | QUICLY_PROBE(DATA_BLOCKED_RECEIVE, conn, conn->stash.now, frame.offset); |
6175 | 0 | QUICLY_LOG_CONN(data_blocked_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(off, frame.offset); }); |
6176 | | |
6177 | 0 | quicly_maxsender_request_transmit(&conn->ingress.max_data.sender); |
6178 | 0 | if (should_send_max_data(conn)) |
6179 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
6180 | |
|
6181 | 0 | return 0; |
6182 | 0 | } |
6183 | | |
6184 | | static quicly_error_t handle_stream_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6185 | 0 | { |
6186 | 0 | quicly_stream_data_blocked_frame_t frame; |
6187 | 0 | quicly_stream_t *stream; |
6188 | 0 | quicly_error_t ret; |
6189 | |
|
6190 | 0 | if ((ret = quicly_decode_stream_data_blocked_frame(&state->src, state->end, &frame)) != 0) |
6191 | 0 | return ret; |
6192 | | |
6193 | 0 | QUICLY_PROBE(STREAM_DATA_BLOCKED_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.offset); |
6194 | 0 | QUICLY_LOG_CONN(stream_data_blocked_receive, conn, { |
6195 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, frame.stream_id); |
6196 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.offset); |
6197 | 0 | }); |
6198 | | |
6199 | 0 | if (!quicly_stream_has_receive_side(quicly_is_client(conn), frame.stream_id)) |
6200 | 0 | return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; |
6201 | | |
6202 | 0 | if ((stream = quicly_get_stream(conn, frame.stream_id)) != NULL) { |
6203 | 0 | quicly_maxsender_request_transmit(&stream->_send_aux.max_stream_data_sender); |
6204 | 0 | if (should_send_max_stream_data(stream)) |
6205 | 0 | sched_stream_control(stream); |
6206 | 0 | } |
6207 | |
|
6208 | 0 | return 0; |
6209 | 0 | } |
6210 | | |
6211 | | static quicly_error_t handle_streams_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6212 | 0 | { |
6213 | 0 | quicly_streams_blocked_frame_t frame; |
6214 | 0 | int uni = state->frame_type == QUICLY_FRAME_TYPE_STREAMS_BLOCKED_UNI; |
6215 | 0 | quicly_error_t ret; |
6216 | |
|
6217 | 0 | if ((ret = quicly_decode_streams_blocked_frame(&state->src, state->end, &frame)) != 0) |
6218 | 0 | return ret; |
6219 | | |
6220 | 0 | QUICLY_PROBE(STREAMS_BLOCKED_RECEIVE, conn, conn->stash.now, frame.count, uni); |
6221 | 0 | QUICLY_LOG_CONN(streams_blocked_receive, conn, { |
6222 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.count); |
6223 | 0 | PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni); |
6224 | 0 | }); |
6225 | | |
6226 | 0 | if (should_send_max_streams(conn, uni)) { |
6227 | 0 | quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi; |
6228 | 0 | quicly_maxsender_request_transmit(maxsender); |
6229 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
6230 | 0 | } |
6231 | |
|
6232 | 0 | return 0; |
6233 | 0 | } |
6234 | | |
6235 | | static quicly_error_t handle_max_streams_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state, int uni) |
6236 | 0 | { |
6237 | 0 | quicly_max_streams_frame_t frame; |
6238 | 0 | quicly_error_t ret; |
6239 | |
|
6240 | 0 | if ((ret = quicly_decode_max_streams_frame(&state->src, state->end, &frame)) != 0) |
6241 | 0 | return ret; |
6242 | | |
6243 | 0 | QUICLY_PROBE(MAX_STREAMS_RECEIVE, conn, conn->stash.now, frame.count, uni); |
6244 | 0 | QUICLY_LOG_CONN(max_streams_receive, conn, { |
6245 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.count); |
6246 | 0 | PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni); |
6247 | 0 | }); |
6248 | | |
6249 | 0 | if ((ret = update_max_streams(uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi, frame.count)) != 0) |
6250 | 0 | return ret; |
6251 | | |
6252 | 0 | open_blocked_streams(conn, uni); |
6253 | |
|
6254 | 0 | return 0; |
6255 | 0 | } |
6256 | | |
6257 | | static quicly_error_t handle_max_streams_bidi_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6258 | 0 | { |
6259 | 0 | return handle_max_streams_frame(conn, state, 0); |
6260 | 0 | } |
6261 | | |
6262 | | static quicly_error_t handle_max_streams_uni_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6263 | 0 | { |
6264 | 0 | return handle_max_streams_frame(conn, state, 1); |
6265 | 0 | } |
6266 | | |
6267 | | static quicly_error_t handle_path_challenge_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6268 | 0 | { |
6269 | 0 | quicly_path_challenge_frame_t frame; |
6270 | 0 | quicly_error_t ret; |
6271 | |
|
6272 | 0 | if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0) |
6273 | 0 | return ret; |
6274 | | |
6275 | 0 | QUICLY_PROBE(PATH_CHALLENGE_RECEIVE, conn, conn->stash.now, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); |
6276 | 0 | QUICLY_LOG_CONN(path_challenge_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); }); |
6277 | | |
6278 | | /* schedule the emission of PATH_RESPONSE frame */ |
6279 | 0 | struct st_quicly_conn_path_t *path = conn->paths[state->path_index]; |
6280 | 0 | memcpy(path->path_response.data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); |
6281 | 0 | path->path_response.send_ = 1; |
6282 | 0 | conn->egress.send_probe_at = 0; |
6283 | |
|
6284 | 0 | return 0; |
6285 | 0 | } |
6286 | | |
6287 | | static quicly_error_t handle_path_response_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6288 | 0 | { |
6289 | 0 | quicly_path_challenge_frame_t frame; |
6290 | 0 | quicly_error_t ret; |
6291 | |
|
6292 | 0 | if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0) |
6293 | 0 | return ret; |
6294 | | |
6295 | 0 | QUICLY_PROBE(PATH_RESPONSE_RECEIVE, conn, conn->stash.now, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); |
6296 | 0 | QUICLY_LOG_CONN(path_response_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); }); |
6297 | | |
6298 | 0 | struct st_quicly_conn_path_t *path = conn->paths[state->path_index]; |
6299 | |
|
6300 | 0 | if (ptls_mem_equal(path->path_challenge.data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN)) { |
6301 | | /* Path validation succeeded, stop sending PATH_CHALLENGEs. Active path might become changed in `quicly_receive`. */ |
6302 | 0 | path->path_challenge.send_at = INT64_MAX; |
6303 | 0 | recalc_send_probe_at(conn); |
6304 | 0 | conn->super.stats.num_paths.validated += 1; |
6305 | 0 | } |
6306 | |
|
6307 | 0 | return 0; |
6308 | 0 | } |
6309 | | |
6310 | | static quicly_error_t handle_new_token_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6311 | 0 | { |
6312 | 0 | quicly_new_token_frame_t frame; |
6313 | 0 | quicly_error_t ret; |
6314 | |
|
6315 | 0 | if (!quicly_is_client(conn)) |
6316 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6317 | 0 | if ((ret = quicly_decode_new_token_frame(&state->src, state->end, &frame)) != 0) |
6318 | 0 | return ret; |
6319 | 0 | QUICLY_PROBE(NEW_TOKEN_RECEIVE, conn, conn->stash.now, frame.token.base, frame.token.len); |
6320 | 0 | QUICLY_LOG_CONN(new_token_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(token, frame.token.base, frame.token.len); }); |
6321 | 0 | if (conn->super.ctx->save_resumption_token == NULL) |
6322 | 0 | return 0; |
6323 | 0 | return conn->super.ctx->save_resumption_token->cb(conn->super.ctx->save_resumption_token, conn, frame.token); |
6324 | 0 | } |
6325 | | |
6326 | | static quicly_error_t handle_stop_sending_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6327 | 0 | { |
6328 | 0 | quicly_stop_sending_frame_t frame; |
6329 | 0 | quicly_stream_t *stream; |
6330 | 0 | quicly_error_t ret; |
6331 | |
|
6332 | 0 | if ((ret = quicly_decode_stop_sending_frame(&state->src, state->end, &frame)) != 0) |
6333 | 0 | return ret; |
6334 | 0 | QUICLY_PROBE(STOP_SENDING_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.app_error_code); |
6335 | 0 | QUICLY_LOG_CONN(stop_sending_receive, conn, { |
6336 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(stream_id, (quicly_stream_id_t)frame.stream_id); |
6337 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.app_error_code); |
6338 | 0 | }); |
6339 | | |
6340 | 0 | if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL) |
6341 | 0 | return ret; |
6342 | | |
6343 | 0 | if (quicly_sendstate_is_open(&stream->sendstate)) { |
6344 | | /* reset the stream, then notify the application */ |
6345 | 0 | quicly_error_t err = QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code); |
6346 | 0 | quicly_reset_stream(stream, err); |
6347 | 0 | QUICLY_PROBE(STREAM_ON_SEND_STOP, stream->conn, stream->conn->stash.now, stream, err); |
6348 | 0 | QUICLY_LOG_CONN(stream_on_send_stop, stream->conn, { |
6349 | 0 | PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id); |
6350 | 0 | PTLS_LOG_ELEMENT_SIGNED(err, err); |
6351 | 0 | }); |
6352 | 0 | stream->callbacks->on_send_stop(stream, err); |
6353 | 0 | if (stream->conn->super.state >= QUICLY_STATE_CLOSING) |
6354 | 0 | return QUICLY_ERROR_IS_CLOSING; |
6355 | 0 | } |
6356 | | |
6357 | 0 | return 0; |
6358 | 0 | } |
6359 | | |
6360 | | static quicly_error_t handle_max_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6361 | 0 | { |
6362 | 0 | quicly_max_data_frame_t frame; |
6363 | 0 | quicly_error_t ret; |
6364 | |
|
6365 | 0 | if ((ret = quicly_decode_max_data_frame(&state->src, state->end, &frame)) != 0) |
6366 | 0 | return ret; |
6367 | | |
6368 | 0 | QUICLY_PROBE(MAX_DATA_RECEIVE, conn, conn->stash.now, frame.max_data); |
6369 | 0 | QUICLY_LOG_CONN(max_data_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.max_data); }); |
6370 | | |
6371 | 0 | if (frame.max_data <= conn->egress.max_data.permitted) |
6372 | 0 | return 0; |
6373 | 0 | conn->egress.max_data.permitted = frame.max_data; |
6374 | 0 | conn->egress.data_blocked = QUICLY_SENDER_STATE_NONE; /* DATA_BLOCKED has not been sent for the new limit */ |
6375 | |
|
6376 | 0 | return 0; |
6377 | 0 | } |
6378 | | |
6379 | | static quicly_error_t negotiate_using_version(quicly_conn_t *conn, uint32_t version) |
6380 | 0 | { |
6381 | 0 | quicly_error_t ret; |
6382 | | |
6383 | | /* set selected version, update transport parameters extension ID */ |
6384 | 0 | conn->super.version = version; |
6385 | 0 | QUICLY_PROBE(VERSION_SWITCH, conn, conn->stash.now, version); |
6386 | 0 | QUICLY_LOG_CONN(version_switch, conn, { PTLS_LOG_ELEMENT_UNSIGNED(new_version, version); }); |
6387 | | |
6388 | | /* replace initial keys */ |
6389 | 0 | if ((ret = reinstall_initial_encryption(conn, PTLS_ERROR_LIBRARY)) != 0) |
6390 | 0 | return ret; |
6391 | | |
6392 | | /* reschedule all the packets that have been sent for immediate resend */ |
6393 | 0 | if ((ret = discard_sentmap_by_epoch(conn, ~0u)) != 0) |
6394 | 0 | return ret; |
6395 | | |
6396 | 0 | return 0; |
6397 | 0 | } |
6398 | | |
6399 | | static quicly_error_t handle_version_negotiation_packet(quicly_conn_t *conn, quicly_decoded_packet_t *packet) |
6400 | 0 | { |
6401 | 0 | const uint8_t *src = packet->octets.base + packet->encrypted_off, *end = packet->octets.base + packet->octets.len; |
6402 | 0 | uint32_t selected_version = 0; |
6403 | |
|
6404 | 0 | if (src == end || (end - src) % 4 != 0) |
6405 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6406 | | |
6407 | | /* select in the precedence of V1 -> draft29 -> draft27 -> fail */ |
6408 | 0 | while (src != end) { |
6409 | 0 | uint32_t supported_version = quicly_decode32(&src); |
6410 | 0 | switch (supported_version) { |
6411 | 0 | case QUICLY_PROTOCOL_VERSION_1: |
6412 | 0 | selected_version = QUICLY_PROTOCOL_VERSION_1; |
6413 | 0 | break; |
6414 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT29: |
6415 | 0 | if (selected_version == 0 || selected_version == QUICLY_PROTOCOL_VERSION_DRAFT27) |
6416 | 0 | selected_version = QUICLY_PROTOCOL_VERSION_DRAFT29; |
6417 | 0 | break; |
6418 | 0 | case QUICLY_PROTOCOL_VERSION_DRAFT27: |
6419 | 0 | if (selected_version == 0) |
6420 | 0 | selected_version = QUICLY_PROTOCOL_VERSION_DRAFT27; |
6421 | 0 | break; |
6422 | 0 | } |
6423 | 0 | } |
6424 | 0 | if (selected_version == 0) |
6425 | 0 | return handle_close(conn, QUICLY_ERROR_NO_COMPATIBLE_VERSION, UINT64_MAX, ptls_iovec_init("", 0)); |
6426 | | |
6427 | 0 | return negotiate_using_version(conn, selected_version); |
6428 | 0 | } |
6429 | | |
6430 | | static int compare_socket_address(struct sockaddr *x, struct sockaddr *y) |
6431 | 0 | { |
6432 | 0 | #define CMP(a, b) \ |
6433 | 0 | if (a != b) \ |
6434 | 0 | return a < b ? -1 : 1 |
6435 | |
|
6436 | 0 | CMP(x->sa_family, y->sa_family); |
6437 | | |
6438 | 0 | if (x->sa_family == AF_INET) { |
6439 | 0 | struct sockaddr_in *xin = (void *)x, *yin = (void *)y; |
6440 | 0 | CMP(ntohl(xin->sin_addr.s_addr), ntohl(yin->sin_addr.s_addr)); |
6441 | 0 | CMP(ntohs(xin->sin_port), ntohs(yin->sin_port)); |
6442 | 0 | } else if (x->sa_family == AF_INET6) { |
6443 | 0 | struct sockaddr_in6 *xin6 = (void *)x, *yin6 = (void *)y; |
6444 | 0 | int r = memcmp(xin6->sin6_addr.s6_addr, yin6->sin6_addr.s6_addr, sizeof(xin6->sin6_addr.s6_addr)); |
6445 | 0 | if (r != 0) |
6446 | 0 | return r; |
6447 | 0 | CMP(ntohs(xin6->sin6_port), ntohs(yin6->sin6_port)); |
6448 | 0 | CMP(xin6->sin6_scope_id, yin6->sin6_scope_id); |
6449 | 0 | } else if (x->sa_family == AF_UNSPEC) { |
6450 | 0 | return 1; |
6451 | 0 | } else { |
6452 | 0 | assert(!"unknown sa_family"); |
6453 | 0 | } |
6454 | | |
6455 | 0 | #undef CMP |
6456 | 0 | return 0; |
6457 | 0 | } |
6458 | | |
6459 | | static int is_stateless_reset(quicly_conn_t *conn, quicly_decoded_packet_t *decoded) |
6460 | 0 | { |
6461 | 0 | switch (decoded->_is_stateless_reset_cached) { |
6462 | 0 | case QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET: |
6463 | 0 | return 1; |
6464 | 0 | case QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET: |
6465 | 0 | return 0; |
6466 | 0 | default: |
6467 | 0 | break; |
6468 | 0 | } |
6469 | | |
6470 | 0 | if (decoded->octets.len < QUICLY_STATELESS_RESET_PACKET_MIN_LEN) |
6471 | 0 | return 0; |
6472 | | |
6473 | 0 | for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->super.remote.cid_set.cids); ++i) { |
6474 | 0 | if (conn->super.remote.cid_set.cids[0].state == QUICLY_REMOTE_CID_UNAVAILABLE) |
6475 | 0 | continue; |
6476 | 0 | if (memcmp(decoded->octets.base + decoded->octets.len - QUICLY_STATELESS_RESET_TOKEN_LEN, |
6477 | 0 | conn->super.remote.cid_set.cids[i].stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN) == 0) |
6478 | 0 | return 1; |
6479 | 0 | } |
6480 | | |
6481 | 0 | return 0; |
6482 | 0 | } |
6483 | | |
6484 | | int quicly_is_destination(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr, |
6485 | | quicly_decoded_packet_t *decoded) |
6486 | 0 | { |
6487 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(decoded->octets.base[0])) { |
6488 | | /* long header: validate address, then consult the CID */ |
6489 | 0 | if (compare_socket_address(&conn->paths[0]->address.remote.sa, src_addr) != 0) |
6490 | 0 | return 0; |
6491 | 0 | if (conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC && |
6492 | 0 | compare_socket_address(&conn->paths[0]->address.local.sa, dest_addr) != 0) |
6493 | 0 | return 0; |
6494 | | /* server may see the CID generated by the client for Initial and 0-RTT packets */ |
6495 | 0 | if (!quicly_is_client(conn) && decoded->cid.dest.might_be_client_generated) { |
6496 | 0 | const quicly_cid_t *odcid = is_retry(conn) ? &conn->retry_scid : &conn->super.original_dcid; |
6497 | 0 | if (quicly_cid_is_equal(odcid, decoded->cid.dest.encrypted)) |
6498 | 0 | goto Found; |
6499 | 0 | } |
6500 | 0 | } |
6501 | | |
6502 | 0 | if (conn->super.ctx->cid_encryptor != NULL) { |
6503 | | /* Note on multiple CIDs |
6504 | | * Multiple CIDs issued by this host are always based on the same 3-tuple (master_id, thread_id, node_id) |
6505 | | * and the only difference is path_id. Therefore comparing the 3-tuple is enough to cover all CIDs issued by |
6506 | | * this host. |
6507 | | */ |
6508 | 0 | if (conn->super.local.cid_set.plaintext.master_id == decoded->cid.dest.plaintext.master_id && |
6509 | 0 | conn->super.local.cid_set.plaintext.thread_id == decoded->cid.dest.plaintext.thread_id && |
6510 | 0 | conn->super.local.cid_set.plaintext.node_id == decoded->cid.dest.plaintext.node_id) |
6511 | 0 | goto Found; |
6512 | 0 | if (is_stateless_reset(conn, decoded)) |
6513 | 0 | goto Found_StatelessReset; |
6514 | 0 | } else { |
6515 | 0 | if (compare_socket_address(&conn->paths[0]->address.remote.sa, src_addr) == 0) |
6516 | 0 | goto Found; |
6517 | 0 | if (conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC && |
6518 | 0 | compare_socket_address(&conn->paths[0]->address.local.sa, dest_addr) != 0) |
6519 | 0 | return 0; |
6520 | 0 | } |
6521 | | |
6522 | | /* not found */ |
6523 | 0 | return 0; |
6524 | | |
6525 | 0 | Found: |
6526 | 0 | decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET; |
6527 | 0 | return 1; |
6528 | | |
6529 | 0 | Found_StatelessReset: |
6530 | 0 | decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET; |
6531 | 0 | return 1; |
6532 | 0 | } |
6533 | | |
6534 | | quicly_error_t handle_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, ptls_iovec_t reason_phrase) |
6535 | 0 | { |
6536 | 0 | quicly_error_t ret; |
6537 | |
|
6538 | 0 | if (conn->super.state >= QUICLY_STATE_CLOSING) |
6539 | 0 | return 0; |
6540 | | |
6541 | | /* switch to closing state, notify the app (at this moment the streams are accessible), then destroy the streams */ |
6542 | 0 | if ((ret = enter_close(conn, 0, |
6543 | 0 | !(err == QUICLY_ERROR_RECEIVED_STATELESS_RESET || err == QUICLY_ERROR_NO_COMPATIBLE_VERSION))) != 0) |
6544 | 0 | return ret; |
6545 | 0 | if (conn->super.ctx->closed_by_remote != NULL) |
6546 | 0 | conn->super.ctx->closed_by_remote->cb(conn->super.ctx->closed_by_remote, conn, err, frame_type, |
6547 | 0 | (const char *)reason_phrase.base, reason_phrase.len); |
6548 | 0 | destroy_all_streams(conn, err, 0); |
6549 | |
|
6550 | 0 | return 0; |
6551 | 0 | } |
6552 | | |
6553 | | static quicly_error_t handle_transport_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6554 | 0 | { |
6555 | 0 | quicly_transport_close_frame_t frame; |
6556 | 0 | quicly_error_t ret; |
6557 | |
|
6558 | 0 | if ((ret = quicly_decode_transport_close_frame(&state->src, state->end, &frame)) != 0) |
6559 | 0 | return ret; |
6560 | | |
6561 | 0 | QUICLY_PROBE(TRANSPORT_CLOSE_RECEIVE, conn, conn->stash.now, frame.error_code, frame.frame_type, |
6562 | 0 | QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len)); |
6563 | 0 | QUICLY_LOG_CONN(transport_close_receive, conn, { |
6564 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.error_code); |
6565 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(frame_type, frame.frame_type); |
6566 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, (const char *)frame.reason_phrase.base, frame.reason_phrase.len); |
6567 | 0 | }); |
6568 | 0 | return handle_close(conn, QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(frame.error_code), frame.frame_type, frame.reason_phrase); |
6569 | 0 | } |
6570 | | |
6571 | | static quicly_error_t handle_application_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6572 | 0 | { |
6573 | 0 | quicly_application_close_frame_t frame; |
6574 | 0 | quicly_error_t ret; |
6575 | |
|
6576 | 0 | if ((ret = quicly_decode_application_close_frame(&state->src, state->end, &frame)) != 0) |
6577 | 0 | return ret; |
6578 | | |
6579 | 0 | QUICLY_PROBE(APPLICATION_CLOSE_RECEIVE, conn, conn->stash.now, frame.error_code, |
6580 | 0 | QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len)); |
6581 | 0 | QUICLY_LOG_CONN(application_close_receive, conn, { |
6582 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.error_code); |
6583 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, (const char *)frame.reason_phrase.base, frame.reason_phrase.len); |
6584 | 0 | }); |
6585 | 0 | return handle_close(conn, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.error_code), UINT64_MAX, frame.reason_phrase); |
6586 | 0 | } |
6587 | | |
6588 | | static quicly_error_t handle_padding_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6589 | 0 | { |
6590 | 0 | return 0; |
6591 | 0 | } |
6592 | | |
6593 | | static quicly_error_t handle_ping_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6594 | 0 | { |
6595 | 0 | QUICLY_PROBE(PING_RECEIVE, conn, conn->stash.now); |
6596 | 0 | QUICLY_LOG_CONN(ping_receive, conn, {}); |
6597 | | |
6598 | 0 | return 0; |
6599 | 0 | } |
6600 | | |
6601 | | static quicly_error_t handle_new_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6602 | 0 | { |
6603 | 0 | quicly_new_connection_id_frame_t frame; |
6604 | 0 | quicly_error_t ret; |
6605 | | |
6606 | | /* TODO: return error when using zero-length CID */ |
6607 | |
|
6608 | 0 | if ((ret = quicly_decode_new_connection_id_frame(&state->src, state->end, &frame)) != 0) |
6609 | 0 | return ret; |
6610 | | |
6611 | 0 | QUICLY_PROBE(NEW_CONNECTION_ID_RECEIVE, conn, conn->stash.now, frame.sequence, frame.retire_prior_to, |
6612 | 0 | QUICLY_PROBE_HEXDUMP(frame.cid.base, frame.cid.len), |
6613 | 0 | QUICLY_PROBE_HEXDUMP(frame.stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN)); |
6614 | 0 | QUICLY_LOG_CONN(new_connection_id_receive, conn, { |
6615 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence); |
6616 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(retire_prior_to, frame.retire_prior_to); |
6617 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(cid, frame.cid.base, frame.cid.len); |
6618 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(stateless_reset_token, frame.stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN); |
6619 | 0 | }); |
6620 | | |
6621 | 0 | uint64_t unregistered_seqs[QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT]; |
6622 | 0 | size_t num_unregistered_seqs; |
6623 | 0 | if ((ret = quicly_remote_cid_register(&conn->super.remote.cid_set, frame.sequence, frame.cid.base, frame.cid.len, |
6624 | 0 | frame.stateless_reset_token, frame.retire_prior_to, unregistered_seqs, |
6625 | 0 | &num_unregistered_seqs)) != 0) |
6626 | 0 | return ret; |
6627 | | |
6628 | 0 | for (size_t i = 0; i < num_unregistered_seqs; i++) |
6629 | 0 | retire_connection_id(conn, unregistered_seqs[i]); |
6630 | |
|
6631 | 0 | return 0; |
6632 | 0 | } |
6633 | | |
6634 | | static quicly_error_t handle_retire_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6635 | 0 | { |
6636 | 0 | int has_pending; |
6637 | 0 | quicly_retire_connection_id_frame_t frame; |
6638 | 0 | quicly_error_t ret; |
6639 | |
|
6640 | 0 | if ((ret = quicly_decode_retire_connection_id_frame(&state->src, state->end, &frame)) != 0) |
6641 | 0 | return ret; |
6642 | | |
6643 | 0 | QUICLY_PROBE(RETIRE_CONNECTION_ID_RECEIVE, conn, conn->stash.now, frame.sequence); |
6644 | 0 | QUICLY_LOG_CONN(retire_connection_id_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence); }); |
6645 | | |
6646 | 0 | if (frame.sequence >= conn->super.local.cid_set.plaintext.path_id) { |
6647 | | /* Receipt of a RETIRE_CONNECTION_ID frame containing a sequence number greater than any previously sent to the remote peer |
6648 | | * MUST be treated as a connection error of type PROTOCOL_VIOLATION. (19.16) */ |
6649 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6650 | 0 | } |
6651 | | |
6652 | 0 | if ((ret = quicly_local_cid_retire(&conn->super.local.cid_set, frame.sequence, &has_pending)) != 0) |
6653 | 0 | return ret; |
6654 | 0 | if (has_pending) |
6655 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
6656 | |
|
6657 | 0 | return 0; |
6658 | 0 | } |
6659 | | |
6660 | | static quicly_error_t handle_handshake_done_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6661 | 0 | { |
6662 | 0 | quicly_error_t ret; |
6663 | |
|
6664 | 0 | QUICLY_PROBE(HANDSHAKE_DONE_RECEIVE, conn, conn->stash.now); |
6665 | 0 | QUICLY_LOG_CONN(handshake_done_receive, conn, {}); |
6666 | | |
6667 | 0 | if (!quicly_is_client(conn)) |
6668 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6669 | | |
6670 | 0 | assert(conn->initial == NULL); |
6671 | 0 | if (conn->handshake == NULL) |
6672 | 0 | return 0; |
6673 | | |
6674 | 0 | conn->super.remote.address_validation.send_probe = 0; |
6675 | 0 | if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0) |
6676 | 0 | return ret; |
6677 | 0 | setup_next_send(conn); |
6678 | 0 | return 0; |
6679 | 0 | } |
6680 | | |
6681 | | static quicly_error_t handle_datagram_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6682 | 0 | { |
6683 | 0 | quicly_datagram_frame_t frame; |
6684 | 0 | quicly_error_t ret; |
6685 | | |
6686 | | /* check if we advertised support for DATAGRAM frames on this connection */ |
6687 | 0 | if (conn->super.ctx->transport_params.max_datagram_frame_size == 0) |
6688 | 0 | return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; |
6689 | | |
6690 | | /* decode the frame */ |
6691 | 0 | if ((ret = quicly_decode_datagram_frame(state->frame_type, &state->src, state->end, &frame)) != 0) |
6692 | 0 | return ret; |
6693 | 0 | QUICLY_PROBE(DATAGRAM_RECEIVE, conn, conn->stash.now, frame.payload.base, frame.payload.len); |
6694 | 0 | QUICLY_LOG_CONN(datagram_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(payload_len, frame.payload.len); }); |
6695 | | |
6696 | | /* handle the frame. Applications might call quicly_close or other functions that modify the connection state. */ |
6697 | 0 | conn->super.ctx->receive_datagram_frame->cb(conn->super.ctx->receive_datagram_frame, conn, frame.payload); |
6698 | |
|
6699 | 0 | return 0; |
6700 | 0 | } |
6701 | | |
6702 | | static quicly_error_t handle_ack_frequency_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state) |
6703 | 0 | { |
6704 | 0 | quicly_ack_frequency_frame_t frame; |
6705 | 0 | quicly_error_t ret; |
6706 | | |
6707 | | /* recognize the frame only when the support has been advertised */ |
6708 | 0 | if (conn->super.ctx->transport_params.min_ack_delay_usec == UINT64_MAX) |
6709 | 0 | return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; |
6710 | | |
6711 | 0 | if ((ret = quicly_decode_ack_frequency_frame(&state->src, state->end, &frame)) != 0) |
6712 | 0 | return ret; |
6713 | | |
6714 | 0 | QUICLY_PROBE(ACK_FREQUENCY_RECEIVE, conn, conn->stash.now, frame.sequence, frame.packet_tolerance, frame.max_ack_delay, |
6715 | 0 | (int)frame.ignore_order, (int)frame.ignore_ce); |
6716 | 0 | QUICLY_LOG_CONN(ack_frequency_receive, conn, { |
6717 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence); |
6718 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(packet_tolerance, frame.packet_tolerance); |
6719 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(max_ack_delay, frame.max_ack_delay); |
6720 | 0 | PTLS_LOG_ELEMENT_SIGNED(ignore_order, (int)frame.ignore_order); |
6721 | 0 | PTLS_LOG_ELEMENT_SIGNED(ignore_ce, (int)frame.ignore_ce); |
6722 | 0 | }); |
6723 | | |
6724 | | /* Reject Request Max Ack Delay below our TP.min_ack_delay (which is at the moment equal to LOCAL_MAX_ACK_DELAY). */ |
6725 | 0 | if (frame.max_ack_delay < QUICLY_LOCAL_MAX_ACK_DELAY * 1000) |
6726 | 0 | return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6727 | | |
6728 | 0 | if (frame.sequence >= conn->ingress.ack_frequency.next_sequence) { |
6729 | 0 | conn->ingress.ack_frequency.next_sequence = frame.sequence + 1; |
6730 | 0 | conn->application->super.packet_tolerance = |
6731 | 0 | (uint32_t)(frame.packet_tolerance < QUICLY_MAX_PACKET_TOLERANCE ? frame.packet_tolerance : QUICLY_MAX_PACKET_TOLERANCE); |
6732 | 0 | conn->application->super.ignore_order = frame.ignore_order; |
6733 | 0 | } |
6734 | |
|
6735 | 0 | return 0; |
6736 | 0 | } |
6737 | | |
6738 | | static quicly_error_t handle_payload(quicly_conn_t *conn, size_t epoch, size_t path_index, const uint8_t *_src, size_t _len, |
6739 | | uint64_t *offending_frame_type, int *is_ack_only, int *is_probe_only) |
6740 | 0 | { |
6741 | | /* clang-format off */ |
6742 | | |
6743 | | /* `frame_handlers` is an array of frame handlers and the properties of the frames, indexed by the ID of the frame. */ |
6744 | 0 | static const struct st_quicly_frame_handler_t { |
6745 | 0 | quicly_error_t (*cb)(quicly_conn_t *, struct st_quicly_handle_payload_state_t *); /* callback function that handles the |
6746 | | * frame */ |
6747 | 0 | uint8_t permitted_epochs; /* the epochs the frame can appear, calculated as bitwise-or of `1 << epoch` */ |
6748 | 0 | uint8_t ack_eliciting; /* boolean indicating if the frame is ack-eliciting */ |
6749 | 0 | uint8_t probing; /* boolean indicating if the frame is a "probing frame" */ |
6750 | 0 | size_t counter_offset; /* offset of corresponding `conn->super.stats.num_frames_received.type` within quicly_conn_t */ |
6751 | 0 | } frame_handlers[] = { |
6752 | 0 | #define FRAME(n, i, z, h, o, ae, p) \ |
6753 | 0 | { \ |
6754 | 0 | handle_##n##_frame, \ |
6755 | 0 | (i << QUICLY_EPOCH_INITIAL) | (z << QUICLY_EPOCH_0RTT) | (h << QUICLY_EPOCH_HANDSHAKE) | (o << QUICLY_EPOCH_1RTT), \ |
6756 | 0 | ae, \ |
6757 | 0 | p, \ |
6758 | 0 | offsetof(quicly_conn_t, super.stats.num_frames_received.n) \ |
6759 | 0 | } |
6760 | | /* +----------------------+-------------------+---------------+---------+ |
6761 | | * | | permitted epochs | | | |
6762 | | * | frame +----+----+----+----+ ack-eliciting | probing | |
6763 | | * | | IN | 0R | HS | 1R | | | |
6764 | | * +----------------------+----+----+----+----+---------------+---------+ */ |
6765 | 0 | FRAME( padding , 1 , 1 , 1 , 1 , 0 , 1 ), /* 0 */ |
6766 | 0 | FRAME( ping , 1 , 1 , 1 , 1 , 1 , 0 ), |
6767 | 0 | FRAME( ack , 1 , 0 , 1 , 1 , 0 , 0 ), |
6768 | 0 | FRAME( ack , 1 , 0 , 1 , 1 , 0 , 0 ), |
6769 | 0 | FRAME( reset_stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6770 | 0 | FRAME( stop_sending , 0 , 1 , 0 , 1 , 1 , 0 ), |
6771 | 0 | FRAME( crypto , 1 , 0 , 1 , 1 , 1 , 0 ), |
6772 | 0 | FRAME( new_token , 0 , 0 , 0 , 1 , 1 , 0 ), |
6773 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), /* 8 */ |
6774 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6775 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6776 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6777 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6778 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6779 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6780 | 0 | FRAME( stream , 0 , 1 , 0 , 1 , 1 , 0 ), |
6781 | 0 | FRAME( max_data , 0 , 1 , 0 , 1 , 1 , 0 ), /* 16 */ |
6782 | 0 | FRAME( max_stream_data , 0 , 1 , 0 , 1 , 1 , 0 ), |
6783 | 0 | FRAME( max_streams_bidi , 0 , 1 , 0 , 1 , 1 , 0 ), |
6784 | 0 | FRAME( max_streams_uni , 0 , 1 , 0 , 1 , 1 , 0 ), |
6785 | 0 | FRAME( data_blocked , 0 , 1 , 0 , 1 , 1 , 0 ), |
6786 | 0 | FRAME( stream_data_blocked , 0 , 1 , 0 , 1 , 1 , 0 ), |
6787 | 0 | FRAME( streams_blocked , 0 , 1 , 0 , 1 , 1 , 0 ), |
6788 | 0 | FRAME( streams_blocked , 0 , 1 , 0 , 1 , 1 , 0 ), |
6789 | 0 | FRAME( new_connection_id , 0 , 1 , 0 , 1 , 1 , 1 ), /* 24 */ |
6790 | 0 | FRAME( retire_connection_id , 0 , 0 , 0 , 1 , 1 , 0 ), |
6791 | 0 | FRAME( path_challenge , 0 , 1 , 0 , 1 , 1 , 1 ), |
6792 | 0 | FRAME( path_response , 0 , 0 , 0 , 1 , 1 , 1 ), |
6793 | 0 | FRAME( transport_close , 1 , 1 , 1 , 1 , 0 , 0 ), |
6794 | 0 | FRAME( application_close , 0 , 1 , 0 , 1 , 0 , 0 ), |
6795 | 0 | FRAME( handshake_done , 0, 0 , 0 , 1 , 1 , 0 ), |
6796 | | /* +----------------------+----+----+----+----+---------------+---------+ */ |
6797 | 0 | #undef FRAME |
6798 | 0 | }; |
6799 | 0 | static const struct { |
6800 | 0 | uint64_t type; |
6801 | 0 | struct st_quicly_frame_handler_t _; |
6802 | 0 | } ex_frame_handlers[] = { |
6803 | 0 | #define FRAME(uc, lc, i, z, h, o, ae, p) \ |
6804 | 0 | { \ |
6805 | 0 | QUICLY_FRAME_TYPE_##uc, \ |
6806 | 0 | { \ |
6807 | 0 | handle_##lc##_frame, \ |
6808 | 0 | (i << QUICLY_EPOCH_INITIAL) | (z << QUICLY_EPOCH_0RTT) | (h << QUICLY_EPOCH_HANDSHAKE) | (o << QUICLY_EPOCH_1RTT), \ |
6809 | 0 | ae, \ |
6810 | 0 | p, \ |
6811 | 0 | offsetof(quicly_conn_t, super.stats.num_frames_received.lc) \ |
6812 | 0 | }, \ |
6813 | 0 | } |
6814 | | /* +----------------------------------+-------------------+---------------+---------+ |
6815 | | * | frame | permitted epochs | | | |
6816 | | * |------------------+---------------+----+----+----+----+ ack-eliciting | probing | |
6817 | | * | upper-case | lower-case | IN | 0R | HS | 1R | | | |
6818 | | * +------------------+---------------+----+----+----+----+---------------+---------+ */ |
6819 | 0 | FRAME( DATAGRAM_NOLEN , datagram , 0 , 1, 0, 1 , 1 , 0 ), |
6820 | 0 | FRAME( DATAGRAM_WITHLEN , datagram , 0 , 1, 0, 1 , 1 , 0 ), |
6821 | 0 | FRAME( ACK_FREQUENCY , ack_frequency , 0 , 0 , 0 , 1 , 1 , 0 ), |
6822 | | /* +------------------+---------------+-------------------+---------------+---------+ */ |
6823 | 0 | #undef FRAME |
6824 | 0 | {UINT64_MAX}, |
6825 | 0 | }; |
6826 | | /* clang-format on */ |
6827 | |
|
6828 | 0 | struct st_quicly_handle_payload_state_t state = {.epoch = epoch, .path_index = path_index, .src = _src, .end = _src + _len}; |
6829 | 0 | size_t num_frames_ack_eliciting = 0, num_frames_non_probing = 0; |
6830 | 0 | quicly_error_t ret; |
6831 | |
|
6832 | 0 | do { |
6833 | | /* determine the frame type; fast path is available for frame types below 64 */ |
6834 | 0 | const struct st_quicly_frame_handler_t *frame_handler; |
6835 | 0 | state.frame_type = *state.src++; |
6836 | 0 | if (state.frame_type < PTLS_ELEMENTSOF(frame_handlers)) { |
6837 | 0 | frame_handler = frame_handlers + state.frame_type; |
6838 | 0 | } else { |
6839 | | /* slow path */ |
6840 | 0 | --state.src; |
6841 | 0 | if ((state.frame_type = quicly_decodev(&state.src, state.end)) == UINT64_MAX) { |
6842 | 0 | state.frame_type = |
6843 | 0 | QUICLY_FRAME_TYPE_PADDING; /* we cannot signal the offending frame type when failing to decode the frame type */ |
6844 | 0 | ret = QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; |
6845 | 0 | break; |
6846 | 0 | } |
6847 | 0 | size_t i; |
6848 | 0 | for (i = 0; ex_frame_handlers[i].type < state.frame_type; ++i) |
6849 | 0 | ; |
6850 | 0 | if (ex_frame_handlers[i].type != state.frame_type) { |
6851 | 0 | ret = QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; /* not found */ |
6852 | 0 | break; |
6853 | 0 | } |
6854 | 0 | frame_handler = &ex_frame_handlers[i]._; |
6855 | 0 | } |
6856 | | /* check if frame is allowed, then process */ |
6857 | 0 | if ((frame_handler->permitted_epochs & (1 << epoch)) == 0) { |
6858 | 0 | ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6859 | 0 | break; |
6860 | 0 | } |
6861 | 0 | ++*(uint64_t *)((uint8_t *)conn + frame_handler->counter_offset); |
6862 | 0 | if (frame_handler->ack_eliciting) |
6863 | 0 | ++num_frames_ack_eliciting; |
6864 | 0 | if (!frame_handler->probing) |
6865 | 0 | ++num_frames_non_probing; |
6866 | 0 | if ((ret = frame_handler->cb(conn, &state)) != 0) |
6867 | 0 | break; |
6868 | 0 | } while (state.src != state.end); |
6869 | | |
6870 | 0 | *is_ack_only = num_frames_ack_eliciting == 0; |
6871 | 0 | *is_probe_only = num_frames_non_probing == 0; |
6872 | 0 | if (ret != 0) |
6873 | 0 | *offending_frame_type = state.frame_type; |
6874 | 0 | return ret; |
6875 | 0 | } |
6876 | | |
6877 | | static quicly_error_t handle_stateless_reset(quicly_conn_t *conn) |
6878 | 0 | { |
6879 | 0 | QUICLY_PROBE(STATELESS_RESET_RECEIVE, conn, conn->stash.now); |
6880 | 0 | QUICLY_LOG_CONN(stateless_reset_receive, conn, {}); |
6881 | 0 | return handle_close(conn, QUICLY_ERROR_RECEIVED_STATELESS_RESET, UINT64_MAX, ptls_iovec_init("", 0)); |
6882 | 0 | } |
6883 | | |
6884 | | static int validate_retry_tag(quicly_decoded_packet_t *packet, quicly_cid_t *odcid, ptls_aead_context_t *retry_aead) |
6885 | 0 | { |
6886 | 0 | size_t pseudo_packet_len = 1 + odcid->len + packet->encrypted_off; |
6887 | 0 | uint8_t pseudo_packet[pseudo_packet_len]; |
6888 | 0 | pseudo_packet[0] = odcid->len; |
6889 | 0 | memcpy(pseudo_packet + 1, odcid->cid, odcid->len); |
6890 | 0 | memcpy(pseudo_packet + 1 + odcid->len, packet->octets.base, packet->encrypted_off); |
6891 | 0 | return ptls_aead_decrypt(retry_aead, packet->octets.base + packet->encrypted_off, packet->octets.base + packet->encrypted_off, |
6892 | 0 | PTLS_AESGCM_TAG_SIZE, 0, pseudo_packet, pseudo_packet_len) == 0; |
6893 | 0 | } |
6894 | | |
6895 | | quicly_error_t quicly_accept(quicly_conn_t **conn, quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr, |
6896 | | quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token, |
6897 | | const quicly_cid_plaintext_t *new_cid, ptls_handshake_properties_t *handshake_properties, |
6898 | | void *appdata) |
6899 | | { |
6900 | | const quicly_salt_t *salt; |
6901 | | struct { |
6902 | | struct st_quicly_cipher_context_t ingress, egress; |
6903 | | int alive; |
6904 | | } cipher = {}; |
6905 | | ptls_iovec_t payload; |
6906 | | uint64_t next_expected_pn, pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING; |
6907 | | int is_ack_only, is_probe_only; |
6908 | | quicly_error_t ret; |
6909 | | |
6910 | | *conn = NULL; |
6911 | | |
6912 | | /* process initials only */ |
6913 | | if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) != QUICLY_PACKET_TYPE_INITIAL) { |
6914 | | ret = QUICLY_ERROR_PACKET_IGNORED; |
6915 | | goto Exit; |
6916 | | } |
6917 | | if ((salt = quicly_get_salt(packet->version)) == NULL) { |
6918 | | ret = QUICLY_ERROR_PACKET_IGNORED; |
6919 | | goto Exit; |
6920 | | } |
6921 | | if (packet->datagram_size < QUICLY_MIN_CLIENT_INITIAL_SIZE) { |
6922 | | ret = QUICLY_ERROR_PACKET_IGNORED; |
6923 | | goto Exit; |
6924 | | } |
6925 | | if (packet->cid.dest.encrypted.len < 8) { |
6926 | | ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION; |
6927 | | goto Exit; |
6928 | | } |
6929 | | if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &cipher.ingress, &cipher.egress, packet->cid.dest.encrypted, 0, |
6930 | | ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL)) != 0) |
6931 | | goto Exit; |
6932 | | cipher.alive = 1; |
6933 | | next_expected_pn = 0; /* is this correct? do we need to take care of underflow? */ |
6934 | | if ((ret = decrypt_packet(cipher.ingress.header_protection, aead_decrypt_fixed_key, cipher.ingress.aead, &next_expected_pn, |
6935 | | packet, &pn, &payload)) != 0) { |
6936 | | ret = QUICLY_ERROR_DECRYPTION_FAILED; |
6937 | | goto Exit; |
6938 | | } |
6939 | | |
6940 | | /* create connection */ |
6941 | | if ((*conn = create_connection( |
6942 | | ctx, packet->version, NULL, src_addr, dest_addr, &packet->cid.src, new_cid, handshake_properties, appdata, |
6943 | | quicly_cc_calc_initial_cwnd(ctx->initcwnd_packets, ctx->transport_params.max_udp_payload_size))) == NULL) { |
6944 | | ret = PTLS_ERROR_NO_MEMORY; |
6945 | | goto Exit; |
6946 | | } |
6947 | | (*conn)->super.state = QUICLY_STATE_ACCEPTING; |
6948 | | quicly_set_cid(&(*conn)->super.original_dcid, packet->cid.dest.encrypted); |
6949 | | if (address_token != NULL) { |
6950 | | (*conn)->super.remote.address_validation.validated = !address_token->address_mismatch; |
6951 | | switch (address_token->type) { |
6952 | | case QUICLY_ADDRESS_TOKEN_TYPE_RETRY: |
6953 | | if (!address_token->address_mismatch) { |
6954 | | (*conn)->retry_scid = (*conn)->super.original_dcid; |
6955 | | (*conn)->super.original_dcid = address_token->retry.original_dcid; |
6956 | | } |
6957 | | break; |
6958 | | case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION: |
6959 | | if (decode_resumption_info(address_token->resumption.bytes, address_token->resumption.len, |
6960 | | &(*conn)->super.stats.jumpstart.prev_rate, &(*conn)->super.stats.jumpstart.prev_rtt) != 0) { |
6961 | | (*conn)->super.stats.jumpstart.prev_rate = 0; |
6962 | | (*conn)->super.stats.jumpstart.prev_rtt = 0; |
6963 | | } |
6964 | | break; |
6965 | | default: |
6966 | | /* We might not get here as tokens are integrity-protected, but as this is information supplied via network, potentially |
6967 | | * from broken quicly instances, we drop anything unexpected rather than calling abort(). */ |
6968 | | break; |
6969 | | } |
6970 | | } |
6971 | | if ((ret = setup_handshake_space_and_flow(*conn, QUICLY_EPOCH_INITIAL)) != 0) |
6972 | | goto Exit; |
6973 | | (*conn)->initial->super.next_expected_packet_number = next_expected_pn; |
6974 | | (*conn)->initial->cipher.ingress = cipher.ingress; |
6975 | | (*conn)->initial->cipher.egress = cipher.egress; |
6976 | | cipher.alive = 0; |
6977 | | (*conn)->crypto.handshake_properties.collected_extensions = server_collected_extensions; |
6978 | | (*conn)->initial->largest_ingress_udp_payload_size = packet->datagram_size; |
6979 | | |
6980 | | QUICLY_PROBE(ACCEPT, *conn, (*conn)->stash.now, |
6981 | | QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len), address_token); |
6982 | | QUICLY_LOG_CONN(accept, *conn, { |
6983 | | PTLS_LOG_ELEMENT_HEXDUMP(dcid, packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len); |
6984 | | PTLS_LOG_ELEMENT_PTR(address_token, address_token); |
6985 | | }); |
6986 | | QUICLY_PROBE(PACKET_RECEIVED, *conn, (*conn)->stash.now, pn, payload.base, payload.len, get_epoch(packet->octets.base[0])); |
6987 | | QUICLY_LOG_CONN(packet_received, *conn, { |
6988 | | PTLS_LOG_ELEMENT_UNSIGNED(pn, pn); |
6989 | | PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(decrypted, payload.base, payload.len); |
6990 | | PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(packet->octets.base[0])); |
6991 | | }); |
6992 | | |
6993 | | /* handle the input; we ignore is_ack_only, we consult if there's any output from TLS in response to CH anyways */ |
6994 | | (*conn)->super.stats.num_packets.received += 1; |
6995 | | (*conn)->super.stats.num_packets.initial_received += 1; |
6996 | | if (packet->ecn != 0) |
6997 | | (*conn)->super.stats.num_packets.received_ecn_counts[get_ecn_index_from_bits(packet->ecn)] += 1; |
6998 | | (*conn)->super.stats.num_bytes.received += packet->datagram_size; |
6999 | | if ((ret = handle_payload(*conn, QUICLY_EPOCH_INITIAL, 0, payload.base, payload.len, &offending_frame_type, &is_ack_only, |
7000 | | &is_probe_only)) != 0) |
7001 | | goto Exit; |
7002 | | if ((ret = record_receipt(&(*conn)->initial->super, pn, packet->ecn, 0, (*conn)->stash.now, &(*conn)->egress.send_ack_at, |
7003 | | &(*conn)->super.stats.num_packets.received_out_of_order)) != 0) |
7004 | | goto Exit; |
7005 | | |
7006 | | Exit: |
7007 | | if (*conn != NULL) { |
7008 | | if (ret == 0) { |
7009 | | /* if CONNECTION_CLOSE was found and the state advanced to DRAINING, we need to retain that state */ |
7010 | | if ((*conn)->super.state < QUICLY_STATE_CONNECTED) |
7011 | | (*conn)->super.state = QUICLY_STATE_CONNECTED; |
7012 | | } else { |
7013 | | initiate_close(*conn, ret, offending_frame_type, ""); |
7014 | | ret = 0; |
7015 | | } |
7016 | | unlock_now(*conn); |
7017 | | } |
7018 | | if (cipher.alive) { |
7019 | | dispose_cipher(&cipher.ingress); |
7020 | | dispose_cipher(&cipher.egress); |
7021 | | } |
7022 | | return ret; |
7023 | | } |
7024 | | |
7025 | | quicly_error_t quicly_receive(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr, |
7026 | | quicly_decoded_packet_t *packet) |
7027 | 0 | { |
7028 | 0 | ptls_cipher_context_t *header_protection; |
7029 | 0 | struct { |
7030 | 0 | int (*cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *); |
7031 | 0 | void *ctx; |
7032 | 0 | } aead; |
7033 | 0 | struct st_quicly_pn_space_t **space; |
7034 | 0 | size_t epoch, path_index; |
7035 | 0 | ptls_iovec_t payload; |
7036 | 0 | uint64_t pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING; |
7037 | 0 | int is_ack_only, is_probe_only; |
7038 | 0 | quicly_error_t ret; |
7039 | |
|
7040 | 0 | assert(src_addr->sa_family == AF_INET || src_addr->sa_family == AF_INET6); |
7041 | | |
7042 | 0 | lock_now(conn, 0); |
7043 | |
|
7044 | 0 | QUICLY_PROBE(RECEIVE, conn, conn->stash.now, |
7045 | 0 | QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len), packet->octets.base, |
7046 | 0 | packet->octets.len); |
7047 | 0 | QUICLY_LOG_CONN(receive, conn, { |
7048 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(dcid, packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len); |
7049 | 0 | PTLS_LOG_ELEMENT_HEXDUMP(bytes, packet->octets.base, packet->octets.len); |
7050 | 0 | }); |
7051 | | |
7052 | | /* drop packets with invalid server tuple (note: when running as a server, `dest_addr` may not be available depending on the |
7053 | | * socket option being used */ |
7054 | 0 | if (quicly_is_client(conn)) { |
7055 | 0 | if (compare_socket_address(src_addr, &conn->paths[0]->address.remote.sa) != 0) { |
7056 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7057 | 0 | goto Exit; |
7058 | 0 | } |
7059 | 0 | } else if (dest_addr != NULL && dest_addr->sa_family != AF_UNSPEC) { |
7060 | 0 | assert(conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC); |
7061 | 0 | if (compare_socket_address(dest_addr, &conn->paths[0]->address.local.sa) != 0) { |
7062 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7063 | 0 | goto Exit; |
7064 | 0 | } |
7065 | 0 | } |
7066 | | |
7067 | 0 | if (is_stateless_reset(conn, packet)) { |
7068 | 0 | ret = handle_stateless_reset(conn); |
7069 | 0 | goto Exit; |
7070 | 0 | } |
7071 | | |
7072 | | /* Determine the incoming path. path_index may be set to PTLS_ELEMENTSOF(conn->paths), which indicates that a new path needs to |
7073 | | * be created once packet decryption succeeds. */ |
7074 | 0 | for (path_index = 0; path_index < PTLS_ELEMENTSOF(conn->paths); ++path_index) |
7075 | 0 | if (conn->paths[path_index] != NULL && compare_socket_address(src_addr, &conn->paths[path_index]->address.remote.sa) == 0) |
7076 | 0 | break; |
7077 | 0 | if (path_index != 0 && !quicly_is_client(conn) && |
7078 | 0 | (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) || !conn->super.remote.address_validation.validated)) { |
7079 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7080 | 0 | goto Exit; |
7081 | 0 | } |
7082 | 0 | if (path_index == PTLS_ELEMENTSOF(conn->paths) && |
7083 | 0 | conn->super.stats.num_paths.validation_failed >= conn->super.ctx->max_path_validation_failures) { |
7084 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7085 | 0 | goto Exit; |
7086 | 0 | } |
7087 | | |
7088 | | /* add unconditionally, as packet->datagram_size is set only for the first packet within the UDP datagram */ |
7089 | 0 | conn->super.stats.num_bytes.received += packet->datagram_size; |
7090 | |
|
7091 | 0 | switch (conn->super.state) { |
7092 | 0 | case QUICLY_STATE_CLOSING: |
7093 | 0 | ++conn->egress.connection_close.num_packets_received; |
7094 | | /* respond with a CONNECTION_CLOSE frame using exponential back-off */ |
7095 | 0 | if (__builtin_popcountl(conn->egress.connection_close.num_packets_received) == 1) |
7096 | 0 | conn->egress.send_ack_at = 0; |
7097 | 0 | ret = 0; |
7098 | 0 | goto Exit; |
7099 | 0 | case QUICLY_STATE_DRAINING: |
7100 | 0 | ret = 0; |
7101 | 0 | goto Exit; |
7102 | 0 | default: |
7103 | 0 | break; |
7104 | 0 | } |
7105 | | |
7106 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) { |
7107 | 0 | if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) { |
7108 | 0 | if (packet->version == 0) { |
7109 | 0 | ret = handle_version_negotiation_packet(conn, packet); |
7110 | 0 | goto Exit; |
7111 | 0 | } |
7112 | 0 | } |
7113 | 0 | if (packet->version != conn->super.version) { |
7114 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7115 | 0 | goto Exit; |
7116 | 0 | } |
7117 | 0 | switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) { |
7118 | 0 | case QUICLY_PACKET_TYPE_RETRY: { |
7119 | 0 | assert(packet->encrypted_off + PTLS_AESGCM_TAG_SIZE == packet->octets.len); |
7120 | | /* handle only if the connection is the client */ |
7121 | 0 | if (!quicly_is_client(conn)) { |
7122 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7123 | 0 | goto Exit; |
7124 | 0 | } |
7125 | | /* server CID has to change */ |
7126 | 0 | if (quicly_cid_is_equal(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src)) { |
7127 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7128 | 0 | goto Exit; |
7129 | 0 | } |
7130 | | /* do not accept a second Retry */ |
7131 | 0 | if (is_retry(conn)) { |
7132 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7133 | 0 | goto Exit; |
7134 | 0 | } |
7135 | 0 | ptls_aead_context_t *retry_aead = create_retry_aead(conn->super.ctx, conn->super.version, 0); |
7136 | 0 | int retry_ok = validate_retry_tag(packet, &conn->super.remote.cid_set.cids[0].cid, retry_aead); |
7137 | 0 | ptls_aead_free(retry_aead); |
7138 | 0 | if (!retry_ok) { |
7139 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7140 | 0 | goto Exit; |
7141 | 0 | } |
7142 | | /* check size of the Retry packet */ |
7143 | 0 | if (packet->token.len > QUICLY_MAX_TOKEN_LEN) { |
7144 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; /* TODO this is a immediate fatal error, chose a better error code */ |
7145 | 0 | goto Exit; |
7146 | 0 | } |
7147 | | /* store token and ODCID */ |
7148 | 0 | free(conn->token.base); |
7149 | 0 | if ((conn->token.base = malloc(packet->token.len)) == NULL) { |
7150 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
7151 | 0 | goto Exit; |
7152 | 0 | } |
7153 | 0 | memcpy(conn->token.base, packet->token.base, packet->token.len); |
7154 | 0 | conn->token.len = packet->token.len; |
7155 | | /* update DCID */ |
7156 | 0 | quicly_set_cid(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src); |
7157 | 0 | conn->retry_scid = conn->super.remote.cid_set.cids[0].cid; |
7158 | | /* replace initial keys, or drop the keys if this is a response packet to a greased version */ |
7159 | 0 | if ((ret = reinstall_initial_encryption(conn, QUICLY_ERROR_PACKET_IGNORED)) != 0) |
7160 | 0 | goto Exit; |
7161 | | /* schedule retransmit */ |
7162 | 0 | ret = discard_sentmap_by_epoch(conn, ~0u); |
7163 | 0 | goto Exit; |
7164 | 0 | } break; |
7165 | 0 | case QUICLY_PACKET_TYPE_INITIAL: |
7166 | 0 | if (conn->initial == NULL || (header_protection = conn->initial->cipher.ingress.header_protection) == NULL) { |
7167 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7168 | 0 | goto Exit; |
7169 | 0 | } |
7170 | 0 | if (quicly_is_client(conn)) { |
7171 | | /* client: update cid if this is the first Initial packet that's being received */ |
7172 | 0 | if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) |
7173 | 0 | quicly_set_cid(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src); |
7174 | 0 | } else { |
7175 | | /* server: ignore packets that are too small */ |
7176 | 0 | if (packet->datagram_size < QUICLY_MIN_CLIENT_INITIAL_SIZE) { |
7177 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7178 | 0 | goto Exit; |
7179 | 0 | } |
7180 | 0 | } |
7181 | 0 | aead.cb = aead_decrypt_fixed_key; |
7182 | 0 | aead.ctx = conn->initial->cipher.ingress.aead; |
7183 | 0 | space = (void *)&conn->initial; |
7184 | 0 | epoch = QUICLY_EPOCH_INITIAL; |
7185 | 0 | break; |
7186 | 0 | case QUICLY_PACKET_TYPE_HANDSHAKE: |
7187 | 0 | if (conn->handshake == NULL || (header_protection = conn->handshake->cipher.ingress.header_protection) == NULL) { |
7188 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7189 | 0 | goto Exit; |
7190 | 0 | } |
7191 | 0 | aead.cb = aead_decrypt_fixed_key; |
7192 | 0 | aead.ctx = conn->handshake->cipher.ingress.aead; |
7193 | 0 | space = (void *)&conn->handshake; |
7194 | 0 | epoch = QUICLY_EPOCH_HANDSHAKE; |
7195 | 0 | break; |
7196 | 0 | case QUICLY_PACKET_TYPE_0RTT: |
7197 | 0 | if (quicly_is_client(conn)) { |
7198 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7199 | 0 | goto Exit; |
7200 | 0 | } |
7201 | 0 | if (conn->application == NULL || |
7202 | 0 | (header_protection = conn->application->cipher.ingress.header_protection.zero_rtt) == NULL) { |
7203 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7204 | 0 | goto Exit; |
7205 | 0 | } |
7206 | 0 | aead.cb = aead_decrypt_fixed_key; |
7207 | 0 | aead.ctx = conn->application->cipher.ingress.aead[1]; |
7208 | 0 | space = (void *)&conn->application; |
7209 | 0 | epoch = QUICLY_EPOCH_0RTT; |
7210 | 0 | break; |
7211 | 0 | default: |
7212 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7213 | 0 | goto Exit; |
7214 | 0 | } |
7215 | 0 | } else { |
7216 | | /* short header packet */ |
7217 | 0 | if (conn->application == NULL || |
7218 | 0 | (header_protection = conn->application->cipher.ingress.header_protection.one_rtt) == NULL) { |
7219 | 0 | ret = QUICLY_ERROR_PACKET_IGNORED; |
7220 | 0 | goto Exit; |
7221 | 0 | } |
7222 | 0 | aead.cb = aead_decrypt_1rtt; |
7223 | 0 | aead.ctx = conn; |
7224 | 0 | space = (void *)&conn->application; |
7225 | 0 | epoch = QUICLY_EPOCH_1RTT; |
7226 | 0 | } |
7227 | | |
7228 | | /* decrypt */ |
7229 | 0 | if ((ret = decrypt_packet(header_protection, aead.cb, aead.ctx, &(*space)->next_expected_packet_number, packet, &pn, |
7230 | 0 | &payload)) != 0) { |
7231 | 0 | ++conn->super.stats.num_packets.decryption_failed; |
7232 | 0 | QUICLY_PROBE(PACKET_DECRYPTION_FAILED, conn, conn->stash.now, pn); |
7233 | 0 | goto Exit; |
7234 | 0 | } |
7235 | | |
7236 | 0 | QUICLY_PROBE(PACKET_RECEIVED, conn, conn->stash.now, pn, payload.base, payload.len, get_epoch(packet->octets.base[0])); |
7237 | 0 | QUICLY_LOG_CONN(packet_received, conn, { |
7238 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(pn, pn); |
7239 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(decrypted_len, payload.len); |
7240 | 0 | PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(packet->octets.base[0])); |
7241 | 0 | }); |
7242 | | |
7243 | | /* open a new path if necessary, now that decryption succeeded */ |
7244 | 0 | if (path_index == PTLS_ELEMENTSOF(conn->paths) && (ret = open_path(conn, &path_index, src_addr, dest_addr)) != 0) |
7245 | 0 | goto Exit; |
7246 | | |
7247 | | /* update states */ |
7248 | 0 | if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) |
7249 | 0 | conn->super.state = QUICLY_STATE_CONNECTED; |
7250 | 0 | conn->super.stats.num_packets.received += 1; |
7251 | 0 | conn->paths[path_index]->packet_last_received = conn->super.stats.num_packets.received; |
7252 | 0 | conn->paths[path_index]->num_packets.received += 1; |
7253 | 0 | if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) { |
7254 | 0 | switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) { |
7255 | 0 | case QUICLY_PACKET_TYPE_INITIAL: |
7256 | 0 | conn->super.stats.num_packets.initial_received += 1; |
7257 | 0 | break; |
7258 | 0 | case QUICLY_PACKET_TYPE_0RTT: |
7259 | 0 | conn->super.stats.num_packets.zero_rtt_received += 1; |
7260 | 0 | break; |
7261 | 0 | case QUICLY_PACKET_TYPE_HANDSHAKE: |
7262 | 0 | conn->super.stats.num_packets.handshake_received += 1; |
7263 | 0 | break; |
7264 | 0 | } |
7265 | 0 | } |
7266 | 0 | if (packet->ecn != 0) |
7267 | 0 | conn->super.stats.num_packets.received_ecn_counts[get_ecn_index_from_bits(packet->ecn)] += 1; |
7268 | | |
7269 | | /* state updates, that are triggered by the receipt of a packet */ |
7270 | 0 | switch (epoch) { |
7271 | 0 | case QUICLY_EPOCH_INITIAL: |
7272 | | /* update max_ingress_udp_payload_size if necessary */ |
7273 | 0 | if (conn->initial->largest_ingress_udp_payload_size < packet->datagram_size) |
7274 | 0 | conn->initial->largest_ingress_udp_payload_size = packet->datagram_size; |
7275 | 0 | break; |
7276 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
7277 | | /* Discard Initial space before processing the payload of the Handshake packet to avoid the chance of an ACK frame included |
7278 | | * in the Handshake packet setting a loss timer for the Initial packet. */ |
7279 | 0 | if (conn->initial != NULL) { |
7280 | 0 | if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0) |
7281 | 0 | goto Exit; |
7282 | 0 | setup_next_send(conn); |
7283 | 0 | conn->super.remote.address_validation.validated = 1; |
7284 | 0 | } |
7285 | 0 | break; |
7286 | 0 | default: |
7287 | 0 | break; |
7288 | 0 | } |
7289 | | |
7290 | | /* handle the payload */ |
7291 | 0 | if ((ret = handle_payload(conn, epoch, path_index, payload.base, payload.len, &offending_frame_type, &is_ack_only, |
7292 | 0 | &is_probe_only)) != 0) |
7293 | 0 | goto Exit; |
7294 | 0 | if (!is_probe_only && conn->paths[path_index]->probe_only) { |
7295 | 0 | assert(path_index != 0); |
7296 | 0 | conn->paths[path_index]->probe_only = 0; |
7297 | 0 | ++conn->super.stats.num_paths.migration_elicited; |
7298 | 0 | QUICLY_PROBE(ELICIT_PATH_MIGRATION, conn, conn->stash.now, path_index); |
7299 | 0 | QUICLY_LOG_CONN(elicit_path_migration, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); }); |
7300 | 0 | } |
7301 | 0 | if (*space != NULL && conn->super.state < QUICLY_STATE_CLOSING) { |
7302 | 0 | if ((ret = record_receipt(*space, pn, packet->ecn, is_ack_only, conn->stash.now, &conn->egress.send_ack_at, |
7303 | 0 | &conn->super.stats.num_packets.received_out_of_order)) != 0) |
7304 | 0 | goto Exit; |
7305 | 0 | } |
7306 | | |
7307 | | /* state updates post payload processing */ |
7308 | 0 | switch (epoch) { |
7309 | 0 | case QUICLY_EPOCH_INITIAL: |
7310 | 0 | assert(conn->initial != NULL); |
7311 | 0 | if (quicly_is_client(conn) && conn->handshake != NULL && conn->handshake->cipher.egress.aead != NULL) { |
7312 | 0 | if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0) |
7313 | 0 | goto Exit; |
7314 | 0 | setup_next_send(conn); |
7315 | 0 | } |
7316 | 0 | break; |
7317 | 0 | case QUICLY_EPOCH_HANDSHAKE: |
7318 | 0 | if (quicly_is_client(conn)) { |
7319 | | /* Running as a client. |
7320 | | * Respect "disable_migration" TP sent by the remote peer at the end of the TLS handshake. */ |
7321 | 0 | if (conn->paths[0]->address.local.sa.sa_family == AF_UNSPEC && dest_addr != NULL && dest_addr->sa_family != AF_UNSPEC && |
7322 | 0 | ptls_handshake_is_complete(conn->crypto.tls) && conn->super.remote.transport_params.disable_active_migration) |
7323 | 0 | set_address(&conn->paths[0]->address.local, dest_addr); |
7324 | 0 | } else { |
7325 | | /* Running as a server. |
7326 | | * If handshake was just completed, drop handshake context, schedule the first emission of HANDSHAKE_DONE frame. */ |
7327 | 0 | if (ptls_handshake_is_complete(conn->crypto.tls)) { |
7328 | 0 | if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0) |
7329 | 0 | goto Exit; |
7330 | 0 | assert(conn->handshake == NULL); |
7331 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT; |
7332 | 0 | setup_next_send(conn); |
7333 | 0 | } |
7334 | 0 | } |
7335 | 0 | break; |
7336 | 0 | case QUICLY_EPOCH_1RTT: |
7337 | 0 | if (!is_ack_only && should_send_max_data(conn)) |
7338 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
7339 | | /* switch active path to current path, if current path is validated and not probe-only */ |
7340 | 0 | if (path_index != 0 && conn->paths[path_index]->path_challenge.send_at == INT64_MAX && |
7341 | 0 | !conn->paths[path_index]->probe_only) { |
7342 | 0 | if ((ret = promote_path(conn, path_index)) != 0) |
7343 | 0 | goto Exit; |
7344 | 0 | recalc_send_probe_at(conn); |
7345 | 0 | } |
7346 | 0 | break; |
7347 | 0 | default: |
7348 | 0 | break; |
7349 | 0 | } |
7350 | | |
7351 | 0 | update_idle_timeout(conn, 1); |
7352 | |
|
7353 | 0 | Exit: |
7354 | 0 | switch (ret) { |
7355 | 0 | case 0: |
7356 | | /* Avoid time in the past being emitted by quicly_get_first_timeout. We hit the condition below when retransmission is |
7357 | | * suspended by the 3x limit (in which case we have loss.alarm_at set but return INT64_MAX from quicly_get_first_timeout |
7358 | | * until we receive something from the client). |
7359 | | */ |
7360 | 0 | if (conn->egress.loss.alarm_at < conn->stash.now) |
7361 | 0 | conn->egress.loss.alarm_at = conn->stash.now; |
7362 | 0 | assert_consistency(conn, 0); |
7363 | 0 | break; |
7364 | 0 | case PTLS_ERROR_NO_MEMORY: |
7365 | 0 | case QUICLY_ERROR_STATE_EXHAUSTION: |
7366 | 0 | case QUICLY_ERROR_PACKET_IGNORED: |
7367 | 0 | break; |
7368 | 0 | default: /* close connection */ |
7369 | 0 | initiate_close(conn, ret, offending_frame_type, ""); |
7370 | 0 | ret = 0; |
7371 | 0 | break; |
7372 | 0 | } |
7373 | 0 | unlock_now(conn); |
7374 | 0 | return ret; |
7375 | 0 | } |
7376 | | |
7377 | | quicly_error_t quicly_open_stream(quicly_conn_t *conn, quicly_stream_t **_stream, int uni) |
7378 | 0 | { |
7379 | 0 | quicly_stream_t *stream; |
7380 | 0 | struct st_quicly_conn_streamgroup_state_t *group; |
7381 | 0 | uint64_t *max_stream_count; |
7382 | 0 | uint32_t max_stream_data_local; |
7383 | 0 | uint64_t max_stream_data_remote; |
7384 | 0 | quicly_error_t ret; |
7385 | | |
7386 | | /* determine the states */ |
7387 | 0 | if (uni) { |
7388 | 0 | group = &conn->super.local.uni; |
7389 | 0 | max_stream_count = &conn->egress.max_streams.uni.count; |
7390 | 0 | max_stream_data_local = 0; |
7391 | 0 | max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.uni; |
7392 | 0 | } else { |
7393 | 0 | group = &conn->super.local.bidi; |
7394 | 0 | max_stream_count = &conn->egress.max_streams.bidi.count; |
7395 | 0 | max_stream_data_local = (uint32_t)conn->super.ctx->transport_params.max_stream_data.bidi_local; |
7396 | 0 | max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.bidi_remote; |
7397 | 0 | } |
7398 | | |
7399 | | /* open */ |
7400 | 0 | if ((stream = open_stream(conn, group->next_stream_id, max_stream_data_local, max_stream_data_remote)) == NULL) |
7401 | 0 | return PTLS_ERROR_NO_MEMORY; |
7402 | 0 | ++group->num_streams; |
7403 | 0 | group->next_stream_id += 4; |
7404 | | |
7405 | | /* adjust blocked */ |
7406 | 0 | if (stream->stream_id / 4 >= *max_stream_count) { |
7407 | 0 | stream->streams_blocked = 1; |
7408 | 0 | quicly_linklist_insert((uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi)->prev, |
7409 | 0 | &stream->_send_aux.pending_link.control); |
7410 | | /* schedule the emission of STREAMS_BLOCKED if application write key is available (otherwise the scheduling is done when |
7411 | | * the key becomes available) */ |
7412 | 0 | if (stream->conn->application != NULL && stream->conn->application->cipher.egress.key.aead != NULL) |
7413 | 0 | conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT; |
7414 | 0 | } |
7415 | | |
7416 | | /* application-layer initialization */ |
7417 | 0 | QUICLY_PROBE(STREAM_ON_OPEN, conn, conn->stash.now, stream); |
7418 | 0 | QUICLY_LOG_CONN(stream_on_open, conn, {}); |
7419 | | |
7420 | 0 | if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, stream)) != 0) |
7421 | 0 | return ret; |
7422 | | |
7423 | 0 | *_stream = stream; |
7424 | 0 | return 0; |
7425 | 0 | } |
7426 | | |
7427 | | void quicly_reset_stream(quicly_stream_t *stream, quicly_error_t err) |
7428 | | { |
7429 | | assert(quicly_stream_has_send_side(quicly_is_client(stream->conn), stream->stream_id)); |
7430 | | assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err)); |
7431 | | assert(stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE); |
7432 | | assert(!quicly_sendstate_transfer_complete(&stream->sendstate)); |
7433 | | |
7434 | | /* dispose sendbuf state */ |
7435 | | quicly_sendstate_reset(&stream->sendstate); |
7436 | | |
7437 | | /* setup RESET_STREAM */ |
7438 | | stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_SEND; |
7439 | | stream->_send_aux.reset_stream.error_code = QUICLY_ERROR_GET_ERROR_CODE(err); |
7440 | | |
7441 | | /* schedule for delivery */ |
7442 | | sched_stream_control(stream); |
7443 | | resched_stream_data(stream); |
7444 | | } |
7445 | | |
7446 | | void quicly_request_stop(quicly_stream_t *stream, quicly_error_t err) |
7447 | | { |
7448 | | assert(quicly_stream_has_receive_side(quicly_is_client(stream->conn), stream->stream_id)); |
7449 | | assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err)); |
7450 | | |
7451 | | /* send STOP_SENDING if the incoming side of the stream is still open */ |
7452 | | if (stream->recvstate.eos == UINT64_MAX && stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_NONE) { |
7453 | | stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_SEND; |
7454 | | stream->_send_aux.stop_sending.error_code = QUICLY_ERROR_GET_ERROR_CODE(err); |
7455 | | sched_stream_control(stream); |
7456 | | } |
7457 | | } |
7458 | | |
7459 | | socklen_t quicly_get_socklen(struct sockaddr *sa) |
7460 | 570 | { |
7461 | 570 | switch (sa->sa_family) { |
7462 | 570 | case AF_INET: |
7463 | 570 | return sizeof(struct sockaddr_in); |
7464 | 0 | case AF_INET6: |
7465 | 0 | return sizeof(struct sockaddr_in6); |
7466 | 0 | default: |
7467 | 0 | assert(!"unexpected socket type"); |
7468 | 0 | return 0; |
7469 | 570 | } |
7470 | 570 | } |
7471 | | |
7472 | | char *quicly_escape_unsafe_string(char *buf, const void *bytes, size_t len) |
7473 | 0 | { |
7474 | 0 | char *dst = buf; |
7475 | 0 | const char *src = bytes, *end = src + len; |
7476 | |
|
7477 | 0 | for (; src != end; ++src) { |
7478 | 0 | if ((0x20 <= *src && *src <= 0x7e) && !(*src == '"' || *src == '\'' || *src == '\\')) { |
7479 | 0 | *dst++ = *src; |
7480 | 0 | } else { |
7481 | 0 | *dst++ = '\\'; |
7482 | 0 | *dst++ = 'x'; |
7483 | 0 | quicly_byte_to_hex(dst, (uint8_t)*src); |
7484 | 0 | dst += 2; |
7485 | 0 | } |
7486 | 0 | } |
7487 | 0 | *dst = '\0'; |
7488 | |
|
7489 | 0 | return buf; |
7490 | 0 | } |
7491 | | |
7492 | | char *quicly_hexdump(const uint8_t *bytes, size_t len, size_t indent) |
7493 | 0 | { |
7494 | 0 | size_t i, line, row, bufsize = indent == SIZE_MAX ? len * 2 + 1 : (indent + 5 + 3 * 16 + 2 + 16 + 1) * ((len + 15) / 16) + 1; |
7495 | 0 | char *buf, *p; |
7496 | |
|
7497 | 0 | if ((buf = malloc(bufsize)) == NULL) |
7498 | 0 | return NULL; |
7499 | 0 | p = buf; |
7500 | 0 | if (indent == SIZE_MAX) { |
7501 | 0 | for (i = 0; i != len; ++i) { |
7502 | 0 | quicly_byte_to_hex(p, bytes[i]); |
7503 | 0 | p += 2; |
7504 | 0 | } |
7505 | 0 | } else { |
7506 | 0 | for (line = 0; line * 16 < len; ++line) { |
7507 | 0 | for (i = 0; i < indent; ++i) |
7508 | 0 | *p++ = ' '; |
7509 | 0 | quicly_byte_to_hex(p, (line >> 4) & 0xff); |
7510 | 0 | p += 2; |
7511 | 0 | quicly_byte_to_hex(p, (line << 4) & 0xff); |
7512 | 0 | p += 2; |
7513 | 0 | *p++ = ' '; |
7514 | 0 | for (row = 0; row < 16; ++row) { |
7515 | 0 | *p++ = row == 8 ? '-' : ' '; |
7516 | 0 | if (line * 16 + row < len) { |
7517 | 0 | quicly_byte_to_hex(p, bytes[line * 16 + row]); |
7518 | 0 | p += 2; |
7519 | 0 | } else { |
7520 | 0 | *p++ = ' '; |
7521 | 0 | *p++ = ' '; |
7522 | 0 | } |
7523 | 0 | } |
7524 | 0 | *p++ = ' '; |
7525 | 0 | *p++ = ' '; |
7526 | 0 | for (row = 0; row < 16; ++row) { |
7527 | 0 | if (line * 16 + row < len) { |
7528 | 0 | int ch = bytes[line * 16 + row]; |
7529 | 0 | *p++ = 0x20 <= ch && ch < 0x7f ? ch : '.'; |
7530 | 0 | } else { |
7531 | 0 | *p++ = ' '; |
7532 | 0 | } |
7533 | 0 | } |
7534 | 0 | *p++ = '\n'; |
7535 | 0 | } |
7536 | 0 | } |
7537 | 0 | *p++ = '\0'; |
7538 | |
|
7539 | 0 | assert(p - buf <= bufsize); |
7540 | | |
7541 | 0 | return buf; |
7542 | 0 | } |
7543 | | |
7544 | | void quicly_amend_ptls_context(ptls_context_t *ptls) |
7545 | 0 | { |
7546 | 0 | static ptls_update_traffic_key_t update_traffic_key = {update_traffic_key_cb}; |
7547 | |
|
7548 | 0 | ptls->omit_end_of_early_data = 1; |
7549 | 0 | ptls->update_traffic_key = &update_traffic_key; |
7550 | | |
7551 | | /* if TLS 1.3 config permits use of early data, convert the value to 0xffffffff in accordance with QUIC-TLS */ |
7552 | 0 | if (ptls->max_early_data_size != 0) |
7553 | 0 | ptls->max_early_data_size = UINT32_MAX; |
7554 | 0 | } |
7555 | | |
7556 | | quicly_error_t quicly_encrypt_address_token(void (*random_bytes)(void *, size_t), ptls_aead_context_t *aead, ptls_buffer_t *buf, |
7557 | | size_t start_off, const quicly_address_token_plaintext_t *plaintext) |
7558 | 0 | { |
7559 | 0 | quicly_error_t ret; |
7560 | | |
7561 | | /* type and IV */ |
7562 | 0 | if ((ret = ptls_buffer_reserve(buf, 1 + aead->algo->iv_size)) != 0) |
7563 | 0 | goto Exit; |
7564 | 0 | buf->base[buf->off++] = plaintext->type; |
7565 | 0 | random_bytes(buf->base + buf->off, aead->algo->iv_size); |
7566 | 0 | buf->off += aead->algo->iv_size; |
7567 | |
|
7568 | 0 | size_t enc_start = buf->off; |
7569 | | |
7570 | | /* data */ |
7571 | 0 | ptls_buffer_push64(buf, plaintext->issued_at); |
7572 | 0 | { |
7573 | 0 | uint16_t port; |
7574 | 0 | ptls_buffer_push_block(buf, 1, { |
7575 | 0 | switch (plaintext->remote.sa.sa_family) { |
7576 | 0 | case AF_INET: |
7577 | 0 | ptls_buffer_pushv(buf, &plaintext->remote.sin.sin_addr.s_addr, 4); |
7578 | 0 | port = ntohs(plaintext->remote.sin.sin_port); |
7579 | 0 | break; |
7580 | 0 | case AF_INET6: |
7581 | 0 | ptls_buffer_pushv(buf, &plaintext->remote.sin6.sin6_addr, 16); |
7582 | 0 | ptls_buffer_push32(buf, plaintext->remote.sin6.sin6_scope_id); |
7583 | 0 | port = ntohs(plaintext->remote.sin6.sin6_port); |
7584 | 0 | break; |
7585 | 0 | default: |
7586 | 0 | assert(!"unsupported address type"); |
7587 | 0 | break; |
7588 | 0 | } |
7589 | 0 | }); |
7590 | 0 | ptls_buffer_push16(buf, port); |
7591 | 0 | } |
7592 | 0 | switch (plaintext->type) { |
7593 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RETRY: |
7594 | 0 | ptls_buffer_push_block(buf, 1, |
7595 | 0 | { ptls_buffer_pushv(buf, plaintext->retry.original_dcid.cid, plaintext->retry.original_dcid.len); }); |
7596 | 0 | ptls_buffer_push_block(buf, 1, |
7597 | 0 | { ptls_buffer_pushv(buf, plaintext->retry.client_cid.cid, plaintext->retry.client_cid.len); }); |
7598 | 0 | ptls_buffer_push_block(buf, 1, |
7599 | 0 | { ptls_buffer_pushv(buf, plaintext->retry.server_cid.cid, plaintext->retry.server_cid.len); }); |
7600 | 0 | break; |
7601 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION: |
7602 | 0 | ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->resumption.bytes, plaintext->resumption.len); }); |
7603 | 0 | break; |
7604 | 0 | default: |
7605 | 0 | assert(!"unexpected token type"); |
7606 | 0 | abort(); |
7607 | 0 | } |
7608 | 0 | ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->appdata.bytes, plaintext->appdata.len); }); |
7609 | | |
7610 | | /* encrypt, supplying full IV */ |
7611 | 0 | if ((ret = ptls_buffer_reserve(buf, aead->algo->tag_size)) != 0) |
7612 | 0 | goto Exit; |
7613 | 0 | ptls_aead_set_iv(aead, buf->base + enc_start - aead->algo->iv_size); |
7614 | 0 | ptls_aead_encrypt(aead, buf->base + enc_start, buf->base + enc_start, buf->off - enc_start, 0, buf->base + start_off, |
7615 | 0 | enc_start - start_off); |
7616 | 0 | buf->off += aead->algo->tag_size; |
7617 | |
|
7618 | 0 | Exit: |
7619 | 0 | return ret; |
7620 | 0 | } |
7621 | | |
7622 | | quicly_error_t quicly_decrypt_address_token(ptls_aead_context_t *aead, quicly_address_token_plaintext_t *plaintext, |
7623 | | const void *_token, size_t len, size_t prefix_len, const char **err_desc) |
7624 | 0 | { |
7625 | 0 | const uint8_t *const token = _token; |
7626 | 0 | uint8_t ptbuf[QUICLY_MIN_CLIENT_INITIAL_SIZE]; |
7627 | 0 | size_t ptlen; |
7628 | |
|
7629 | 0 | *err_desc = NULL; |
7630 | | |
7631 | | /* check if we can get type and decrypt */ |
7632 | 0 | if (len < prefix_len + 1 + aead->algo->iv_size + aead->algo->tag_size) { |
7633 | 0 | *err_desc = "token too small"; |
7634 | 0 | return PTLS_ALERT_DECODE_ERROR; |
7635 | 0 | } |
7636 | 0 | if (prefix_len + 1 + aead->algo->iv_size + sizeof(ptbuf) + aead->algo->tag_size < len) { |
7637 | 0 | *err_desc = "token too large"; |
7638 | 0 | return PTLS_ALERT_DECODE_ERROR; |
7639 | 0 | } |
7640 | | |
7641 | | /* check type */ |
7642 | 0 | switch (token[prefix_len]) { |
7643 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RETRY: |
7644 | 0 | plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RETRY; |
7645 | 0 | break; |
7646 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION: |
7647 | 0 | plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION; |
7648 | 0 | break; |
7649 | 0 | default: |
7650 | 0 | *err_desc = "unknown token type"; |
7651 | 0 | return PTLS_ALERT_DECODE_ERROR; |
7652 | 0 | } |
7653 | | |
7654 | | /* `goto Exit` can only happen below this line, and that is guaranteed by declaring `ret` here */ |
7655 | 0 | quicly_error_t ret; |
7656 | | |
7657 | | /* decrypt */ |
7658 | 0 | ptls_aead_set_iv(aead, token + prefix_len + 1); |
7659 | 0 | if ((ptlen = ptls_aead_decrypt(aead, ptbuf, token + prefix_len + 1 + aead->algo->iv_size, |
7660 | 0 | len - (prefix_len + 1 + aead->algo->iv_size), 0, token, prefix_len + 1 + aead->algo->iv_size)) == |
7661 | 0 | SIZE_MAX) { |
7662 | 0 | ret = PTLS_ALERT_DECRYPT_ERROR; |
7663 | 0 | *err_desc = "token decryption failure"; |
7664 | 0 | goto Exit; |
7665 | 0 | } |
7666 | | |
7667 | | /* parse */ |
7668 | 0 | const uint8_t *src = ptbuf, *end = src + ptlen; |
7669 | 0 | if ((ret = ptls_decode64(&plaintext->issued_at, &src, end)) != 0) |
7670 | 0 | goto Exit; |
7671 | 0 | { |
7672 | 0 | in_port_t *portaddr; |
7673 | 0 | ptls_decode_open_block(src, end, 1, { |
7674 | 0 | switch (end - src) { |
7675 | 0 | case 4: /* ipv4 */ |
7676 | 0 | plaintext->remote.sin.sin_family = AF_INET; |
7677 | 0 | memcpy(&plaintext->remote.sin.sin_addr.s_addr, src, 4); |
7678 | 0 | portaddr = &plaintext->remote.sin.sin_port; |
7679 | 0 | break; |
7680 | 0 | case 20: /* ipv6 */ |
7681 | 0 | plaintext->remote.sin6 = (struct sockaddr_in6){.sin6_family = AF_INET6}; |
7682 | 0 | memcpy(&plaintext->remote.sin6.sin6_addr, src, 16); |
7683 | 0 | if ((ret = ptls_decode32(&plaintext->remote.sin6.sin6_scope_id, &src, end)) != 0) |
7684 | 0 | goto Exit; |
7685 | 0 | portaddr = &plaintext->remote.sin6.sin6_port; |
7686 | 0 | break; |
7687 | 0 | default: |
7688 | 0 | ret = PTLS_ALERT_DECODE_ERROR; |
7689 | 0 | goto Exit; |
7690 | 0 | } |
7691 | 0 | src = end; |
7692 | 0 | }); |
7693 | 0 | uint16_t port; |
7694 | 0 | if ((ret = ptls_decode16(&port, &src, end)) != 0) |
7695 | 0 | goto Exit; |
7696 | 0 | *portaddr = htons(port); |
7697 | 0 | } |
7698 | 0 | switch (plaintext->type) { |
7699 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RETRY: |
7700 | 0 | #define DECODE_CID(field) \ |
7701 | 0 | do { \ |
7702 | 0 | ptls_decode_open_block(src, end, 1, { \ |
7703 | 0 | if (end - src > sizeof(plaintext->retry.field.cid)) { \ |
7704 | 0 | ret = PTLS_ALERT_DECODE_ERROR; \ |
7705 | 0 | goto Exit; \ |
7706 | 0 | } \ |
7707 | 0 | quicly_set_cid(&plaintext->retry.field, ptls_iovec_init(src, end - src)); \ |
7708 | 0 | src = end; \ |
7709 | 0 | }); \ |
7710 | 0 | } while (0) |
7711 | 0 | DECODE_CID(original_dcid); |
7712 | 0 | DECODE_CID(client_cid); |
7713 | 0 | DECODE_CID(server_cid); |
7714 | 0 | #undef DECODE_CID |
7715 | 0 | break; |
7716 | 0 | case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION: |
7717 | 0 | ptls_decode_open_block(src, end, 1, { |
7718 | 0 | PTLS_BUILD_ASSERT(sizeof(plaintext->resumption.bytes) >= 256); |
7719 | 0 | plaintext->resumption.len = end - src; |
7720 | 0 | memcpy(plaintext->resumption.bytes, src, plaintext->resumption.len); |
7721 | 0 | src = end; |
7722 | 0 | }); |
7723 | 0 | break; |
7724 | 0 | default: |
7725 | 0 | assert(!"unexpected token type"); |
7726 | 0 | abort(); |
7727 | 0 | } |
7728 | 0 | ptls_decode_block(src, end, 1, { |
7729 | 0 | PTLS_BUILD_ASSERT(sizeof(plaintext->appdata.bytes) >= 256); |
7730 | 0 | plaintext->appdata.len = end - src; |
7731 | 0 | memcpy(plaintext->appdata.bytes, src, plaintext->appdata.len); |
7732 | 0 | src = end; |
7733 | 0 | }); |
7734 | 0 | ret = 0; |
7735 | |
|
7736 | 0 | Exit: |
7737 | 0 | if (ret != 0) { |
7738 | 0 | if (*err_desc == NULL) |
7739 | 0 | *err_desc = "token decode error"; |
7740 | | /* promote the error to one that triggers the emission of INVALID_TOKEN_ERROR, if the token looked like a retry */ |
7741 | 0 | if (plaintext->type == QUICLY_ADDRESS_TOKEN_TYPE_RETRY) |
7742 | 0 | ret = QUICLY_TRANSPORT_ERROR_INVALID_TOKEN; |
7743 | 0 | } |
7744 | 0 | return ret; |
7745 | 0 | } |
7746 | | |
7747 | | int quicly_build_session_ticket_auth_data(ptls_buffer_t *auth_data, const quicly_context_t *ctx) |
7748 | 0 | { |
7749 | 0 | int ret; |
7750 | |
|
7751 | 0 | #define PUSH_TP(id, block) \ |
7752 | 0 | do { \ |
7753 | 0 | ptls_buffer_push_quicint(auth_data, id); \ |
7754 | 0 | ptls_buffer_push_block(auth_data, -1, block); \ |
7755 | 0 | } while (0) |
7756 | |
|
7757 | 0 | ptls_buffer_push_block(auth_data, -1, { |
7758 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT, |
7759 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.active_connection_id_limit); }); |
7760 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, |
7761 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_data); }); |
7762 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, |
7763 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.bidi_local); }); |
7764 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, |
7765 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.bidi_remote); }); |
7766 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI, |
7767 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.uni); }); |
7768 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI, |
7769 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_streams_bidi); }); |
7770 | 0 | PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI, |
7771 | 0 | { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_streams_uni); }); |
7772 | 0 | }); |
7773 | | |
7774 | 0 | #undef PUSH_TP |
7775 | | |
7776 | 0 | ret = 0; |
7777 | 0 | Exit: |
7778 | 0 | return ret; |
7779 | 0 | } |
7780 | | |
7781 | | void quicly_stream_noop_on_destroy(quicly_stream_t *stream, quicly_error_t err) |
7782 | 0 | { |
7783 | 0 | } |
7784 | | |
7785 | | void quicly_stream_noop_on_send_shift(quicly_stream_t *stream, size_t delta) |
7786 | 0 | { |
7787 | 0 | } |
7788 | | |
7789 | | void quicly_stream_noop_on_send_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all) |
7790 | 0 | { |
7791 | 0 | } |
7792 | | |
7793 | | void quicly_stream_noop_on_send_stop(quicly_stream_t *stream, quicly_error_t err) |
7794 | 0 | { |
7795 | 0 | } |
7796 | | |
7797 | | void quicly_stream_noop_on_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len) |
7798 | 0 | { |
7799 | 0 | } |
7800 | | |
7801 | | void quicly_stream_noop_on_receive_reset(quicly_stream_t *stream, quicly_error_t err) |
7802 | 0 | { |
7803 | 0 | } |
7804 | | |
7805 | | const quicly_stream_callbacks_t quicly_stream_noop_callbacks = { |
7806 | | quicly_stream_noop_on_destroy, quicly_stream_noop_on_send_shift, quicly_stream_noop_on_send_emit, |
7807 | | quicly_stream_noop_on_send_stop, quicly_stream_noop_on_receive, quicly_stream_noop_on_receive_reset}; |
7808 | | |
7809 | | void quicly__debug_printf(quicly_conn_t *conn, const char *function, int line, const char *fmt, ...) |
7810 | 0 | { |
7811 | 0 | PTLS_LOG_DEFINE_POINT(quicly, debug_message, debug_message_logpoint); |
7812 | 0 | if (QUICLY_PROBE_ENABLED(DEBUG_MESSAGE) || |
7813 | 0 | (ptls_log_point_maybe_active(&debug_message_logpoint) & |
7814 | 0 | ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls), (const char *(*)(void *))ptls_get_server_name, |
7815 | 0 | conn->crypto.tls)) != 0) { |
7816 | 0 | char buf[1024]; |
7817 | 0 | va_list args; |
7818 | |
|
7819 | 0 | va_start(args, fmt); |
7820 | 0 | vsnprintf(buf, sizeof(buf), fmt, args); |
7821 | 0 | va_end(args); |
7822 | |
|
7823 | 0 | QUICLY_PROBE(DEBUG_MESSAGE, conn, function, line, buf); |
7824 | 0 | QUICLY_LOG_CONN(debug_message, conn, { |
7825 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(function, function, strlen(function)); |
7826 | 0 | PTLS_LOG_ELEMENT_SIGNED(line, line); |
7827 | 0 | PTLS_LOG_ELEMENT_UNSAFESTR(message, buf, strlen(buf)); |
7828 | 0 | }); |
7829 | 0 | } |
7830 | 0 | } |
7831 | | |
7832 | | const uint32_t quicly_supported_versions[] = {QUICLY_PROTOCOL_VERSION_1, QUICLY_PROTOCOL_VERSION_DRAFT29, |
7833 | | QUICLY_PROTOCOL_VERSION_DRAFT27, 0}; |