Coverage Report

Created: 2026-02-09 06:30

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/h2o/deps/quicly/lib/quicly.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017 Fastly, Kazuho Oku
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include <assert.h>
23
#include <inttypes.h>
24
#include <arpa/inet.h>
25
#include <sys/types.h>
26
#include <netinet/in.h>
27
#include <netinet/ip.h>
28
#include <pthread.h>
29
#include <stdarg.h>
30
#include <stdio.h>
31
#include <stdlib.h>
32
#include <sys/socket.h>
33
#include <sys/time.h>
34
#include "khash.h"
35
#include "quicly.h"
36
#include "quicly/defaults.h"
37
#include "quicly/sentmap.h"
38
#include "quicly/pacer.h"
39
#include "quicly/frame.h"
40
#include "quicly/streambuf.h"
41
#include "quicly/cc.h"
42
#if QUICLY_USE_DTRACE
43
#include "quicly-probes.h"
44
#endif
45
46
0
#define QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_FINAL 0x39
47
0
#define QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_DRAFT 0xffa5
48
#define QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID 0
49
#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT 1
50
#define QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN 2
51
#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE 3
52
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA 4
53
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 5
54
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 6
55
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI 7
56
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI 8
57
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI 9
58
#define QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT 10
59
#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY 11
60
#define QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION 12
61
#define QUICLY_TRANSPORT_PARAMETER_ID_PREFERRED_ADDRESS 13
62
#define QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT 14
63
#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID 15
64
#define QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID 16
65
#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE 0x20
66
#define QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY 0xff04de1b
67
68
/**
69
 * maximum size of token that quicly accepts
70
 */
71
0
#define QUICLY_MAX_TOKEN_LEN 512
72
/**
73
 * Sends ACK bundled with PING, when number of gaps in the ack queue reaches or exceeds this threshold. This value should be much
74
 * smaller than QUICLY_MAX_RANGES.
75
 */
76
0
#define QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK 8
77
/**
78
 * maximum number of undecryptable packets to buffer
79
 */
80
0
#define QUICLY_MAX_DELAYED_PACKETS 10
81
82
KHASH_MAP_INIT_INT64(quicly_stream_t, quicly_stream_t *)
83
84
#if QUICLY_USE_TRACER
85
0
#define QUICLY_TRACER(label, conn, ...) QUICLY_TRACER_##label(conn, __VA_ARGS__)
86
#else
87
#define QUICLY_TRACER(...)
88
#endif
89
90
#if QUICLY_USE_DTRACE
91
#define QUICLY_PROBE(label, conn, ...)                                                                                             \
92
    do {                                                                                                                           \
93
        quicly_conn_t *_conn = (conn);                                                                                             \
94
        if (PTLS_UNLIKELY(QUICLY_##label##_ENABLED()))                                                                             \
95
            QUICLY_##label(_conn, __VA_ARGS__);                                                                                    \
96
        QUICLY_TRACER(label, _conn, __VA_ARGS__);                                                                                  \
97
    } while (0)
98
#define QUICLY_PROBE_ENABLED(label) QUICLY_##label##_ENABLED()
99
#else
100
0
#define QUICLY_PROBE(label, conn, ...) QUICLY_TRACER(label, conn, __VA_ARGS__)
101
0
#define QUICLY_PROBE_ENABLED(label) 0
102
#endif
103
#define QUICLY_PROBE_HEXDUMP(s, l)                                                                                                 \
104
    ({                                                                                                                             \
105
        size_t _l = (l);                                                                                                           \
106
        ptls_hexdump(alloca(_l * 2 + 1), (s), _l);                                                                                 \
107
    })
108
#define QUICLY_PROBE_ESCAPE_UNSAFE_STRING(s, l)                                                                                    \
109
    ({                                                                                                                             \
110
        size_t _l = (l);                                                                                                           \
111
        quicly_escape_unsafe_string(alloca(_l * 4 + 1), (s), _l);                                                                  \
112
    })
113
114
struct st_quicly_cipher_context_t {
115
    ptls_aead_context_t *aead;
116
    ptls_cipher_context_t *header_protection;
117
};
118
119
struct st_quicly_pn_space_t {
120
    /**
121
     * acks to be sent to remote peer
122
     */
123
    quicly_ranges_t ack_queue;
124
    /**
125
     * time at when the largest pn in the ack_queue has been received (or INT64_MAX if none)
126
     */
127
    int64_t largest_pn_received_at;
128
    /**
129
     *
130
     */
131
    uint64_t next_expected_packet_number;
132
    /**
133
     * number of ACK-eliciting packets that have not been ACKed yet
134
     */
135
    uint32_t unacked_count;
136
    /**
137
     * The previously received packet's ecn value
138
     */
139
    uint8_t prior_ecn : 2;
140
    /**
141
     * ECN in the order of ECT(0), ECT(1), CE
142
     */
143
    uint64_t ecn_counts[3];
144
    /**
145
     * maximum number of ACK-eliciting packets to be queued before sending an ACK
146
     */
147
    uint32_t packet_tolerance;
148
    /**
149
     * Maximum packet reordering before eliciting an immediate ACK. Zero disables immediate ACKS on out of order packets.
150
     */
151
    uint32_t reordering_threshold;
152
    /**
153
     * max(acked packet number, unacked ack-eliciting packet number).
154
     */
155
    uint64_t largest_acked_unacked;
156
    /**
157
     * smallest missing packet number within the packet reordering window.
158
     */
159
    uint64_t smallest_unreported_missing;
160
};
161
162
struct st_quicly_handshake_space_t {
163
    struct st_quicly_pn_space_t super;
164
    struct {
165
        struct st_quicly_cipher_context_t ingress;
166
        struct st_quicly_cipher_context_t egress;
167
    } cipher;
168
    uint16_t largest_ingress_udp_payload_size;
169
};
170
171
struct st_quicly_application_space_t {
172
    struct st_quicly_pn_space_t super;
173
    struct {
174
        struct {
175
            struct {
176
                ptls_cipher_context_t *zero_rtt, *one_rtt;
177
            } header_protection;
178
            ptls_aead_context_t *aead[2]; /* 0-RTT uses aead[1], 1-RTT uses aead[key_phase] */
179
            uint8_t secret[PTLS_MAX_DIGEST_SIZE];
180
            struct {
181
                uint64_t prepared;
182
                uint64_t decrypted;
183
            } key_phase;
184
        } ingress;
185
        struct {
186
            struct st_quicly_cipher_context_t key;
187
            uint8_t secret[PTLS_MAX_DIGEST_SIZE];
188
            uint64_t key_phase;
189
            struct {
190
                /**
191
                 * PN at which key update was initiated. Set to UINT64_MAX once key update is acked.
192
                 */
193
                uint64_t last;
194
                /**
195
                 * PN at which key update should be initiated. Set to UINT64_MAX when key update cannot be initiated.
196
                 */
197
                uint64_t next;
198
            } key_update_pn;
199
        } egress;
200
    } cipher;
201
    int one_rtt_writable;
202
};
203
204
struct st_quicly_conn_path_t {
205
    struct {
206
        /**
207
         * remote address (must not be AF_UNSPEC)
208
         */
209
        quicly_address_t remote;
210
        /**
211
         * local address (may be AF_UNSPEC)
212
         */
213
        quicly_address_t local;
214
    } address;
215
    /**
216
     * DCID being used for the path indicated by the sequence number; or UINT64_MAX if yet to be assigned. Multile paths will share
217
     * the same value of zero if peer CID is zero-length.
218
     */
219
    uint64_t dcid;
220
    /**
221
     * Maximum number of packets being received by the connection when a packet was last received on this path. This value is used
222
     * to determine the least-recently-used path which will be recycled.
223
     */
224
    uint64_t packet_last_received;
225
    /**
226
     * `send_at` indicates when a PATH_CHALLENGE frame carrying `data` should be sent, or if the value is INT64_MAX the path is
227
     * validated
228
     */
229
    struct {
230
        int64_t send_at;
231
        uint64_t num_sent;
232
        uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN];
233
    } path_challenge;
234
    /**
235
     * path response to be sent, if `send_` is set
236
     */
237
    struct {
238
        uint8_t send_;
239
        uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN];
240
    } path_response;
241
    /**
242
     * if this path is the initial path (i.e., the one on which handshake is done)
243
     */
244
    uint8_t initial : 1;
245
    /**
246
     * if only probe packets have been received (and hence have been sent) on the path
247
     */
248
    uint8_t probe_only : 1;
249
    /**
250
     * number of packets being sent / received on the path
251
     */
252
    struct {
253
        uint64_t sent;
254
        uint64_t received;
255
    } num_packets;
256
};
257
258
struct st_quicly_delayed_packet_t {
259
    struct st_quicly_delayed_packet_t *next;
260
    int64_t at;
261
    quicly_decoded_packet_t packet;
262
    uint8_t bytes[1];
263
};
264
265
struct st_quicly_conn_t {
266
    struct _st_quicly_conn_public_t super;
267
    /**
268
     * `paths[0]` is the non-probing path that is guaranteed to exist, others are backups that may be NULL
269
     */
270
    struct st_quicly_conn_path_t *paths[QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT];
271
    /**
272
     * the initial context
273
     */
274
    struct st_quicly_handshake_space_t *initial;
275
    /**
276
     * the handshake context
277
     */
278
    struct st_quicly_handshake_space_t *handshake;
279
    /**
280
     * 0-RTT and 1-RTT context
281
     */
282
    struct st_quicly_application_space_t *application;
283
    /**
284
     * hashtable of streams
285
     */
286
    khash_t(quicly_stream_t) * streams;
287
    /**
288
     *
289
     */
290
    struct {
291
        /**
292
         *
293
         */
294
        struct {
295
            uint64_t bytes_consumed;
296
            quicly_maxsender_t sender;
297
        } max_data;
298
        /**
299
         *
300
         */
301
        struct {
302
            quicly_maxsender_t uni, bidi;
303
        } max_streams;
304
        /**
305
         *
306
         */
307
        struct {
308
            uint64_t next_sequence;
309
        } ack_frequency;
310
    } ingress;
311
    /**
312
     *
313
     */
314
    struct {
315
        /**
316
         * loss recovery
317
         */
318
        quicly_loss_t loss;
319
        /**
320
         * next or the currently encoding packet number
321
         */
322
        uint64_t packet_number;
323
        /**
324
         * next PN to be skipped
325
         */
326
        uint64_t next_pn_to_skip;
327
        /**
328
         *
329
         */
330
        uint16_t max_udp_payload_size;
331
        /**
332
         * valid if state is CLOSING
333
         */
334
        struct {
335
            uint64_t error_code;
336
            uint64_t frame_type; /* UINT64_MAX if application close */
337
            const char *reason_phrase;
338
            unsigned long num_packets_received;
339
        } connection_close;
340
        /**
341
         *
342
         */
343
        struct {
344
            uint64_t permitted;
345
            uint64_t sent;
346
        } max_data;
347
        /**
348
         *
349
         */
350
        struct {
351
            struct st_quicly_max_streams_t {
352
                uint64_t count;
353
                quicly_maxsender_t blocked_sender;
354
            } uni, bidi;
355
        } max_streams;
356
        /**
357
         *
358
         */
359
        struct {
360
            uint64_t generation;
361
            uint64_t max_acked;
362
            uint32_t num_inflight;
363
        } new_token;
364
        /**
365
         *
366
         */
367
        struct {
368
            int64_t update_at;
369
            uint64_t sequence;
370
        } ack_frequency;
371
        /**
372
         *
373
         */
374
        int64_t last_retransmittable_sent_at;
375
        /**
376
         * when to send an ACK, connection close frames or to destroy the connection
377
         */
378
        int64_t send_ack_at;
379
        /**
380
         * when a PATH_CHALLENGE or PATH_RESPONSE frame is to be sent on any path
381
         */
382
        int64_t send_probe_at;
383
        /**
384
         * congestion control
385
         */
386
        quicly_cc_t cc;
387
        /**
388
         * Next PN to be used when the path is initialized or promoted. As loss recovery / CC is reset upon path promotion, ACKs for
389
         * packets with PN below this property are ignored.
390
         */
391
        uint64_t pn_path_start;
392
        /**
393
         * pacer
394
         */
395
        quicly_pacer_t *pacer;
396
        /**
397
         * ECN
398
         */
399
        struct {
400
            enum en_quicly_ecn_state { QUICLY_ECN_OFF, QUICLY_ECN_ON, QUICLY_ECN_PROBING } state;
401
            uint64_t counts[QUICLY_NUM_EPOCHS][3];
402
        } ecn;
403
        /**
404
         * things to be sent at the stream-level, that are not governed by the stream scheduler
405
         */
406
        struct {
407
            /**
408
             * list of blocked streams (sorted in ascending order of stream_ids)
409
             */
410
            struct {
411
                quicly_linklist_t uni;
412
                quicly_linklist_t bidi;
413
            } blocked;
414
            /**
415
             * list of streams with pending control data (e.g., RESET_STREAM)
416
             */
417
            quicly_linklist_t control;
418
        } pending_streams;
419
        /**
420
         * send state for DATA_BLOCKED frame that corresponds to the current value of `conn->egress.max_data.permitted`
421
         */
422
        quicly_sender_state_t data_blocked;
423
        /**
424
         * bit vector indicating if there's any pending crypto data (the insignificant 4 bits), or other non-stream data
425
         */
426
        uint8_t pending_flows;
427
/* The flags below indicate if the respective frames have to be sent or not. There are no false positives. */
428
0
#define QUICLY_PENDING_FLOW_NEW_TOKEN_BIT (1 << 4)
429
0
#define QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT (1 << 5)
430
/* Indicates that MAX_STREAMS, MAX_DATA, DATA_BLOCKED, STREAMS_BLOCKED, NEW_CONNECTION_ID _might_ have to be sent. There could be
431
 * false positives; logic for sending each of these frames have the capability of detecting such false positives. The purpose of
432
 * this bit is to consolidate information as an optimization. */
433
0
#define QUICLY_PENDING_FLOW_OTHERS_BIT (1 << 6)
434
        /**
435
         *
436
         */
437
        uint8_t try_jumpstart : 1;
438
        /**
439
         * payload of DATAGRAM frames to be sent
440
         */
441
        struct {
442
            ptls_iovec_t payloads[10];
443
            size_t count;
444
        } datagram_frame_payloads;
445
        /**
446
         * delivery rate estimator
447
         */
448
        quicly_ratemeter_t ratemeter;
449
    } egress;
450
    /**
451
     * crypto data
452
     */
453
    struct {
454
        ptls_t *tls;
455
        ptls_handshake_properties_t handshake_properties;
456
        struct {
457
            ptls_raw_extension_t ext[2];
458
            ptls_buffer_t buf;
459
        } transport_params;
460
        unsigned async_in_progress : 1;
461
    } crypto;
462
    /**
463
     * token (if the token is a Retry token can be determined by consulting the length of retry_scid)
464
     */
465
    ptls_iovec_t token;
466
    /**
467
     * len=UINT8_MAX if Retry was not used, use client_received_retry() to check
468
     */
469
    quicly_cid_t retry_scid;
470
    /**
471
     *
472
     */
473
    struct {
474
        /**
475
         * The moment when the idle timeout fires (including the additional 3 PTO). The value is set to INT64_MAX while the
476
         * handshake is in progress.
477
         */
478
        int64_t at;
479
        /**
480
         * idle timeout
481
         */
482
        uint8_t should_rearm_on_send : 1;
483
    } idle_timeout;
484
    /**
485
     * records the time when this connection was created
486
     */
487
    int64_t created_at;
488
    /**
489
     *
490
     */
491
    struct {
492
        union {
493
            struct {
494
                struct st_quicly_delayed_packet_linklist_t {
495
                    struct st_quicly_delayed_packet_t *head, **tail;
496
                } zero_rtt, handshake, one_rtt;
497
            };
498
            struct st_quicly_delayed_packet_linklist_t as_array[3];
499
        };
500
        size_t num_packets;
501
        unsigned slots_newly_processible;
502
    } delayed_packets;
503
    /**
504
     * structure to hold various data used internally
505
     */
506
    struct {
507
        /**
508
         * This value holds current time that remains constant while quicly functions that deal with time are running. Only
509
         * available when the lock is held using `lock_now`.
510
         */
511
        int64_t now;
512
        /**
513
         *
514
         */
515
        uint8_t lock_count;
516
        struct {
517
            /**
518
             * This cache is used to concatenate acked ranges of streams before processing them, reducing the frequency of function
519
             * calls to `quicly_sendstate_t` and to the application-level send window management callbacks. This approach works,
520
             * because in most cases acks will contain contiguous ranges of a single stream.
521
             */
522
            struct {
523
                /**
524
                 * set to INT64_MIN when the cache is invalid
525
                 */
526
                quicly_stream_id_t stream_id;
527
                quicly_sendstate_sent_t args;
528
            } active_acked_cache;
529
        } on_ack_stream;
530
    } stash;
531
};
532
533
#if QUICLY_USE_TRACER
534
#include "quicly-tracer.h"
535
#endif
536
537
struct st_quicly_handle_payload_state_t {
538
    const uint8_t *src, *const end;
539
    size_t epoch;
540
    size_t path_index;
541
    uint64_t frame_type;
542
};
543
544
static void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len);
545
546
static const quicly_stream_callbacks_t crypto_stream_callbacks = {quicly_streambuf_destroy, quicly_streambuf_egress_shift,
547
                                                                  quicly_streambuf_egress_emit, NULL, crypto_stream_receive};
548
549
static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret);
550
static quicly_error_t initiate_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, const char *reason_phrase);
551
static quicly_error_t handle_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, ptls_iovec_t reason_phrase);
552
static quicly_error_t discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs);
553
554
quicly_cid_plaintext_t quicly_cid_plaintext_invalid = {.node_id = UINT64_MAX, .thread_id = 0xffffff};
555
556
static const quicly_transport_parameters_t default_transport_params = {.max_udp_payload_size = QUICLY_DEFAULT_MAX_UDP_PAYLOAD_SIZE,
557
                                                                       .ack_delay_exponent = QUICLY_DEFAULT_ACK_DELAY_EXPONENT,
558
                                                                       .max_ack_delay = QUICLY_DEFAULT_MAX_ACK_DELAY,
559
                                                                       .min_ack_delay_usec = UINT64_MAX,
560
                                                                       .active_connection_id_limit =
561
                                                                           QUICLY_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT};
562
563
const quicly_salt_t *quicly_get_salt(uint32_t protocol_version)
564
0
{
565
0
    static const quicly_salt_t
566
0
        v1 = {.initial = {0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17,
567
0
                          0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a},
568
0
              .retry = {.key = {0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e},
569
0
                        .iv = {0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}}},
570
0
        draft29 = {.initial = {0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97,
571
0
                               0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99},
572
0
                   .retry = {.key = {0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b,
573
0
                                     0xe1},
574
0
                             .iv = {0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}}},
575
0
        draft27 = {
576
0
            .initial = {0xc3, 0xee, 0xf7, 0x12, 0xc7, 0x2e, 0xbb, 0x5a, 0x11, 0xa7,
577
0
                        0xd2, 0x43, 0x2b, 0xb4, 0x63, 0x65, 0xbe, 0xf9, 0xf5, 0x02},
578
0
            .retry = {.key = {0x4d, 0x32, 0xec, 0xdb, 0x2a, 0x21, 0x33, 0xc8, 0x41, 0xe4, 0x04, 0x3d, 0xf2, 0x7d, 0x44, 0x30},
579
0
                      .iv = {0x4d, 0x16, 0x11, 0xd0, 0x55, 0x13, 0xa5, 0x52, 0xc5, 0x87, 0xd5, 0x75}}};
580
581
0
    switch (protocol_version) {
582
0
    case QUICLY_PROTOCOL_VERSION_1:
583
0
        return &v1;
584
0
    case QUICLY_PROTOCOL_VERSION_DRAFT29:
585
0
        return &draft29;
586
0
    case QUICLY_PROTOCOL_VERSION_DRAFT27:
587
0
        return &draft27;
588
0
        break;
589
0
    default:
590
0
        return NULL;
591
0
    }
592
0
}
593
594
static int enable_with_ratio255(uint8_t ratio, void (*random_bytes)(void *, size_t))
595
0
{
596
0
    if (ratio == 0)
597
0
        return 0;
598
0
    if (ratio == 255)
599
0
        return 1;
600
601
    /* approximate using 255*257=256*256-1 */
602
0
    uint16_t r;
603
0
    random_bytes(&r, sizeof(r));
604
0
    return r < ratio * 257u;
605
0
}
606
607
static void lock_now(quicly_conn_t *conn, int is_reentrant)
608
0
{
609
0
    if (conn->stash.now == 0) {
610
0
        assert(conn->stash.lock_count == 0);
611
0
        conn->stash.now = conn->super.ctx->now->cb(conn->super.ctx->now);
612
0
    } else {
613
0
        assert(is_reentrant && "caller must be reentrant");
614
0
        assert(conn->stash.lock_count != 0);
615
0
    }
616
617
0
    ++conn->stash.lock_count;
618
0
}
619
620
static void unlock_now(quicly_conn_t *conn)
621
0
{
622
0
    assert(conn->stash.now != 0);
623
624
0
    if (--conn->stash.lock_count == 0)
625
0
        conn->stash.now = 0;
626
0
}
627
628
static void set_address(quicly_address_t *addr, struct sockaddr *sa)
629
0
{
630
0
    if (sa == NULL) {
631
0
        addr->sa.sa_family = AF_UNSPEC;
632
0
        return;
633
0
    }
634
635
0
    switch (sa->sa_family) {
636
0
    case AF_UNSPEC:
637
0
        addr->sa.sa_family = AF_UNSPEC;
638
0
        break;
639
0
    case AF_INET:
640
0
        addr->sin = *(struct sockaddr_in *)sa;
641
0
        break;
642
0
    case AF_INET6:
643
0
        addr->sin6 = *(struct sockaddr_in6 *)sa;
644
0
        break;
645
0
    default:
646
0
        memset(addr, 0xff, sizeof(*addr));
647
0
        assert(!"unexpected address type");
648
0
        break;
649
0
    }
650
0
}
651
652
static ptls_cipher_suite_t *get_aes128gcmsha256(quicly_context_t *ctx)
653
0
{
654
0
    ptls_cipher_suite_t **cs;
655
656
0
    for (cs = ctx->tls->cipher_suites;; ++cs) {
657
0
        assert(cs != NULL);
658
0
        if ((*cs)->id == PTLS_CIPHER_SUITE_AES_128_GCM_SHA256)
659
0
            break;
660
0
    }
661
0
    return *cs;
662
0
}
663
664
static inline uint8_t get_epoch(uint8_t first_byte)
665
0
{
666
0
    if (!QUICLY_PACKET_IS_LONG_HEADER(first_byte))
667
0
        return QUICLY_EPOCH_1RTT;
668
669
0
    switch (first_byte & QUICLY_PACKET_TYPE_BITMASK) {
670
0
    case QUICLY_PACKET_TYPE_INITIAL:
671
0
        return QUICLY_EPOCH_INITIAL;
672
0
    case QUICLY_PACKET_TYPE_HANDSHAKE:
673
0
        return QUICLY_EPOCH_HANDSHAKE;
674
0
    case QUICLY_PACKET_TYPE_0RTT:
675
0
        return QUICLY_EPOCH_0RTT;
676
0
    default:
677
0
        assert(!"FIXME");
678
0
    }
679
0
}
680
681
static ptls_aead_context_t *create_retry_aead(quicly_context_t *ctx, uint32_t protocol_version, int is_enc)
682
0
{
683
0
    const quicly_salt_t *salt = quicly_get_salt(protocol_version);
684
0
    assert(salt != NULL);
685
686
0
    ptls_cipher_suite_t *algo = get_aes128gcmsha256(ctx);
687
0
    ptls_aead_context_t *aead = ptls_aead_new_direct(algo->aead, is_enc, salt->retry.key, salt->retry.iv);
688
0
    assert(aead != NULL);
689
0
    return aead;
690
0
}
691
692
static void dispose_cipher(struct st_quicly_cipher_context_t *ctx)
693
0
{
694
0
    ptls_aead_free(ctx->aead);
695
0
    ptls_cipher_free(ctx->header_protection);
696
0
}
697
698
static void clear_datagram_frame_payloads(quicly_conn_t *conn)
699
0
{
700
0
    for (size_t i = 0; i != conn->egress.datagram_frame_payloads.count; ++i) {
701
0
        free(conn->egress.datagram_frame_payloads.payloads[i].base);
702
0
        conn->egress.datagram_frame_payloads.payloads[i] = ptls_iovec_init(NULL, 0);
703
0
    }
704
0
    conn->egress.datagram_frame_payloads.count = 0;
705
0
}
706
707
/**
708
 * changes the raw bytes being referred to by `packet` to `octets`
709
 */
710
static void adjust_pointers_of_decoded_packet(quicly_decoded_packet_t *packet, uint8_t *octets)
711
0
{
712
0
    uint8_t *orig = packet->octets.base;
713
0
    uintptr_t diff = (uintptr_t)octets - (uintptr_t)packet->octets.base;
714
715
0
#define ADJUST(memb, nullable)                                                                                                     \
716
0
    do {                                                                                                                           \
717
0
        if (!(nullable && packet->memb == NULL)) {                                                                                 \
718
0
            assert(orig <= packet->memb && packet->memb <= orig + packet->octets.len);                                             \
719
0
            packet->memb = (void *)((uintptr_t)packet->memb + diff);                                                               \
720
0
        }                                                                                                                          \
721
0
    } while (0)
722
0
    ADJUST(octets.base, 0);
723
0
    ADJUST(cid.dest.encrypted.base, 1);
724
0
    ADJUST(cid.src.base, 1);
725
0
    ADJUST(token.base, 1);
726
0
#undef ADJUST
727
0
}
728
729
static int is_retry(quicly_conn_t *conn)
730
0
{
731
0
    return conn->retry_scid.len != UINT8_MAX;
732
0
}
733
734
static int needs_cid_auth(quicly_conn_t *conn)
735
0
{
736
0
    switch (conn->super.version) {
737
0
    case QUICLY_PROTOCOL_VERSION_1:
738
0
    case QUICLY_PROTOCOL_VERSION_DRAFT29:
739
0
        return 1;
740
0
    default:
741
0
        return 0;
742
0
    }
743
0
}
744
745
static int64_t get_sentmap_expiration_time(quicly_conn_t *conn)
746
0
{
747
0
    return quicly_loss_get_sentmap_expiration_time(&conn->egress.loss, conn->super.remote.transport_params.max_ack_delay);
748
0
}
749
750
/**
751
 * converts ECN bits to index in the order of ACK-ECN field (i.e., ECT(0) -> 0, ECT(1) -> 1, CE -> 2)
752
 */
753
static size_t get_ecn_index_from_bits(uint8_t bits)
754
0
{
755
0
    assert(1 <= bits && bits <= 3);
756
0
    return (18 >> bits) & 3;
757
0
}
758
759
static void update_ecn_state(quicly_conn_t *conn, enum en_quicly_ecn_state new_state)
760
0
{
761
0
    assert(new_state == QUICLY_ECN_ON || new_state == QUICLY_ECN_OFF);
762
763
0
    conn->egress.ecn.state = new_state;
764
0
    if (new_state == QUICLY_ECN_ON) {
765
0
        ++conn->super.stats.num_paths.ecn_validated;
766
0
    } else {
767
0
        ++conn->super.stats.num_paths.ecn_failed;
768
0
    }
769
770
0
    QUICLY_PROBE(ECN_VALIDATION, conn, conn->stash.now, (int)new_state);
771
0
    QUICLY_LOG_CONN(ecn_validation, conn, { PTLS_LOG_ELEMENT_SIGNED(state, (int)new_state); });
772
0
}
773
774
static void ack_frequency_set_next_update_at(quicly_conn_t *conn)
775
0
{
776
0
    if (conn->super.remote.transport_params.min_ack_delay_usec != UINT64_MAX)
777
0
        conn->egress.ack_frequency.update_at = conn->stash.now + get_sentmap_expiration_time(conn);
778
0
}
779
780
size_t quicly_decode_packet(quicly_context_t *ctx, quicly_decoded_packet_t *packet, const uint8_t *datagram, size_t datagram_size,
781
                            size_t *off)
782
0
{
783
0
    const uint8_t *src = datagram, *src_end = datagram + datagram_size;
784
785
0
    assert(*off <= datagram_size);
786
787
0
    packet->octets = ptls_iovec_init(src + *off, datagram_size - *off);
788
0
    if (packet->octets.len < 2)
789
0
        goto Error;
790
0
    packet->datagram_size = *off == 0 ? datagram_size : 0;
791
0
    packet->token = ptls_iovec_init(NULL, 0);
792
0
    packet->decrypted.pn = UINT64_MAX;
793
0
    packet->ecn = 0; /* non-ECT */
794
795
    /* move the cursor to the second byte */
796
0
    src += *off + 1;
797
798
0
    if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) {
799
        /* long header */
800
0
        uint64_t rest_length;
801
0
        if (src_end - src < 5)
802
0
            goto Error;
803
0
        packet->version = quicly_decode32(&src);
804
0
        packet->cid.dest.encrypted.len = *src++;
805
0
        if (src_end - src < packet->cid.dest.encrypted.len + 1)
806
0
            goto Error;
807
0
        packet->cid.dest.encrypted.base = (uint8_t *)src;
808
0
        src += packet->cid.dest.encrypted.len;
809
0
        packet->cid.src.len = *src++;
810
0
        if (src_end - src < packet->cid.src.len)
811
0
            goto Error;
812
0
        packet->cid.src.base = (uint8_t *)src;
813
0
        src += packet->cid.src.len;
814
0
        switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) {
815
0
        case QUICLY_PACKET_TYPE_INITIAL:
816
0
        case QUICLY_PACKET_TYPE_0RTT:
817
0
            if (ctx->cid_encryptor == NULL || packet->cid.dest.encrypted.len == 0 ||
818
0
                ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, packet->cid.dest.encrypted.base,
819
0
                                                packet->cid.dest.encrypted.len) == SIZE_MAX)
820
0
                packet->cid.dest.plaintext = quicly_cid_plaintext_invalid;
821
0
            packet->cid.dest.might_be_client_generated = 1;
822
0
            break;
823
0
        default:
824
0
            if (ctx->cid_encryptor != NULL) {
825
0
                if (packet->cid.dest.encrypted.len == 0)
826
0
                    goto Error;
827
0
                if (ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext,
828
0
                                                    packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len) == SIZE_MAX)
829
0
                    goto Error;
830
0
            } else {
831
0
                packet->cid.dest.plaintext = quicly_cid_plaintext_invalid;
832
0
            }
833
0
            packet->cid.dest.might_be_client_generated = 0;
834
0
            break;
835
0
        }
836
0
        switch (packet->version) {
837
0
        case QUICLY_PROTOCOL_VERSION_1:
838
0
        case QUICLY_PROTOCOL_VERSION_DRAFT29:
839
0
        case QUICLY_PROTOCOL_VERSION_DRAFT27:
840
            /* these are the recognized versions, and they share the same packet header format */
841
0
            if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_RETRY) {
842
                /* retry */
843
0
                if (src_end - src <= PTLS_AESGCM_TAG_SIZE)
844
0
                    goto Error;
845
0
                packet->token = ptls_iovec_init(src, src_end - src - PTLS_AESGCM_TAG_SIZE);
846
0
                src += packet->token.len;
847
0
                packet->encrypted_off = src - packet->octets.base;
848
0
            } else {
849
                /* coalescible long header packet */
850
0
                if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL) {
851
                    /* initial has a token */
852
0
                    uint64_t token_len;
853
0
                    if ((token_len = quicly_decodev(&src, src_end)) == UINT64_MAX)
854
0
                        goto Error;
855
0
                    if (src_end - src < token_len)
856
0
                        goto Error;
857
0
                    packet->token = ptls_iovec_init(src, token_len);
858
0
                    src += token_len;
859
0
                }
860
0
                if ((rest_length = quicly_decodev(&src, src_end)) == UINT64_MAX)
861
0
                    goto Error;
862
0
                if (rest_length < 1)
863
0
                    goto Error;
864
0
                if (src_end - src < rest_length)
865
0
                    goto Error;
866
0
                packet->encrypted_off = src - packet->octets.base;
867
0
                packet->octets.len = packet->encrypted_off + rest_length;
868
0
            }
869
0
            break;
870
0
        default:
871
            /* VN packet or packets of unknown version cannot be parsed. `encrypted_off` is set to the first byte after SCID. */
872
0
            packet->encrypted_off = src - packet->octets.base;
873
0
        }
874
0
        packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET;
875
0
    } else {
876
        /* short header */
877
0
        if (ctx->cid_encryptor != NULL) {
878
0
            if (src_end - src < QUICLY_MAX_CID_LEN_V1)
879
0
                goto Error;
880
0
            size_t local_cidl = ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, src, 0);
881
0
            if (local_cidl == SIZE_MAX)
882
0
                goto Error;
883
0
            packet->cid.dest.encrypted = ptls_iovec_init(src, local_cidl);
884
0
            src += local_cidl;
885
0
        } else {
886
0
            packet->cid.dest.encrypted = ptls_iovec_init(NULL, 0);
887
0
            packet->cid.dest.plaintext = quicly_cid_plaintext_invalid;
888
0
        }
889
0
        packet->cid.dest.might_be_client_generated = 0;
890
0
        packet->cid.src = ptls_iovec_init(NULL, 0);
891
0
        packet->version = 0;
892
0
        packet->encrypted_off = src - packet->octets.base;
893
0
        packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_MAYBE_STATELESS_RESET;
894
0
    }
895
896
0
    *off += packet->octets.len;
897
0
    return packet->octets.len;
898
899
0
Error:
900
0
    return SIZE_MAX;
901
0
}
902
903
uint64_t quicly_determine_packet_number(uint32_t truncated, size_t num_bits, uint64_t expected)
904
0
{
905
0
    uint64_t win = (uint64_t)1 << num_bits, candidate = (expected & ~(win - 1)) | truncated;
906
907
0
    if (candidate + win / 2 <= expected)
908
0
        return candidate + win;
909
0
    if (candidate > expected + win / 2 && candidate >= win)
910
0
        return candidate - win;
911
0
    return candidate;
912
0
}
913
914
static void assert_consistency(quicly_conn_t *conn, int timer_must_be_in_future)
915
0
{
916
0
    if (conn->super.state >= QUICLY_STATE_CLOSING) {
917
0
        assert(!timer_must_be_in_future || conn->stash.now < conn->egress.send_ack_at);
918
0
        return;
919
0
    }
920
921
0
    if (conn->egress.loss.sentmap.bytes_in_flight != 0 || conn->super.remote.address_validation.send_probe) {
922
0
        assert(conn->egress.loss.alarm_at != INT64_MAX);
923
0
    } else {
924
0
        assert(conn->egress.loss.loss_time == INT64_MAX);
925
0
    }
926
    /* Allow timers not in the future when the remote peer is not yet validated, since we may not be able to send packets even when
927
     * timers fire. */
928
0
    if (timer_must_be_in_future && conn->super.remote.address_validation.validated)
929
0
        assert(conn->stash.now < conn->egress.loss.alarm_at);
930
0
}
931
932
static quicly_error_t on_invalid_ack(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
933
0
{
934
0
    if (acked)
935
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
936
0
    return 0;
937
0
}
938
939
static uint64_t calc_next_pn_to_skip(ptls_context_t *tlsctx, uint64_t next_pn, uint32_t cwnd, uint64_t mtu)
940
0
{
941
0
    static __thread struct {
942
0
        uint32_t values[8];
943
0
        size_t off;
944
0
    } cached_rand;
945
946
0
    if (cached_rand.off == 0) {
947
0
        tlsctx->random_bytes(cached_rand.values, sizeof(cached_rand.values));
948
0
        cached_rand.off = PTLS_ELEMENTSOF(cached_rand.values);
949
0
    }
950
951
    /* on average, skip one PN per every min(256 packets, 8 * CWND) */
952
0
    uint32_t packet_cwnd = cwnd / mtu;
953
0
    if (packet_cwnd < 32)
954
0
        packet_cwnd = 32;
955
0
    uint64_t skip_after = cached_rand.values[--cached_rand.off] % (16 * packet_cwnd);
956
0
    return next_pn + 1 + skip_after;
957
0
}
958
959
static void init_max_streams(struct st_quicly_max_streams_t *m)
960
0
{
961
0
    m->count = 0;
962
0
    quicly_maxsender_init(&m->blocked_sender, -1);
963
0
}
964
965
static quicly_error_t update_max_streams(struct st_quicly_max_streams_t *m, uint64_t count)
966
0
{
967
0
    if (count > (uint64_t)1 << 60)
968
0
        return QUICLY_TRANSPORT_ERROR_STREAM_LIMIT;
969
970
0
    if (m->count < count) {
971
0
        m->count = count;
972
0
        if (m->blocked_sender.max_acked < count)
973
0
            m->blocked_sender.max_acked = count;
974
0
    }
975
976
0
    return 0;
977
0
}
978
979
int quicly_connection_is_ready(quicly_conn_t *conn)
980
0
{
981
0
    return conn->application != NULL;
982
0
}
983
984
static int stream_is_destroyable(quicly_stream_t *stream)
985
0
{
986
0
    if (!quicly_recvstate_transfer_complete(&stream->recvstate))
987
0
        return 0;
988
0
    if (!quicly_sendstate_transfer_complete(&stream->sendstate))
989
0
        return 0;
990
0
    switch (stream->_send_aux.reset_stream.sender_state) {
991
0
    case QUICLY_SENDER_STATE_NONE:
992
0
    case QUICLY_SENDER_STATE_ACKED:
993
0
        break;
994
0
    default:
995
0
        return 0;
996
0
    }
997
0
    return 1;
998
0
}
999
1000
static void sched_stream_control(quicly_stream_t *stream)
1001
0
{
1002
0
    assert(stream->stream_id >= 0);
1003
1004
0
    if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.control))
1005
0
        quicly_linklist_insert(stream->conn->egress.pending_streams.control.prev, &stream->_send_aux.pending_link.control);
1006
0
}
1007
1008
static void resched_stream_data(quicly_stream_t *stream)
1009
0
{
1010
0
    if (stream->stream_id < 0) {
1011
0
        assert(-4 <= stream->stream_id);
1012
0
        uint8_t mask = 1 << -(1 + stream->stream_id);
1013
0
        assert((mask & (QUICLY_PENDING_FLOW_NEW_TOKEN_BIT | QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT |
1014
0
                        QUICLY_PENDING_FLOW_OTHERS_BIT)) == 0);
1015
0
        if (stream->sendstate.pending.num_ranges != 0) {
1016
0
            stream->conn->egress.pending_flows |= mask;
1017
0
        } else {
1018
0
            stream->conn->egress.pending_flows &= ~mask;
1019
0
        }
1020
0
        return;
1021
0
    }
1022
1023
    /* do nothing if blocked */
1024
0
    if (stream->streams_blocked)
1025
0
        return;
1026
1027
0
    quicly_stream_scheduler_t *scheduler = stream->conn->super.ctx->stream_scheduler;
1028
0
    scheduler->update_state(scheduler, stream);
1029
0
}
1030
1031
static int should_send_max_data(quicly_conn_t *conn)
1032
0
{
1033
0
    return quicly_maxsender_should_send_max(&conn->ingress.max_data.sender, conn->ingress.max_data.bytes_consumed,
1034
0
                                            (uint32_t)conn->super.ctx->transport_params.max_data, 512);
1035
0
}
1036
1037
static int should_send_max_stream_data(quicly_stream_t *stream)
1038
0
{
1039
0
    if (stream->recvstate.eos != UINT64_MAX)
1040
0
        return 0;
1041
0
    return quicly_maxsender_should_send_max(&stream->_send_aux.max_stream_data_sender, stream->recvstate.data_off,
1042
0
                                            stream->_recv_aux.window, 512);
1043
0
}
1044
1045
int quicly_stream_sync_sendbuf(quicly_stream_t *stream, int activate)
1046
22.3k
{
1047
22.3k
    int ret;
1048
1049
22.3k
    if (activate) {
1050
22.3k
        if ((ret = quicly_sendstate_activate(&stream->sendstate)) != 0)
1051
0
            return ret;
1052
22.3k
    }
1053
1054
22.3k
    resched_stream_data(stream);
1055
22.3k
    return 0;
1056
22.3k
}
1057
1058
void quicly_stream_sync_recvbuf(quicly_stream_t *stream, size_t shift_amount)
1059
{
1060
    stream->recvstate.data_off += shift_amount;
1061
    if (stream->stream_id >= 0) {
1062
        if (should_send_max_stream_data(stream))
1063
            sched_stream_control(stream);
1064
    }
1065
}
1066
1067
/**
1068
 * calculate how many CIDs we provide to the remote peer
1069
 */
1070
static size_t local_cid_size(const quicly_conn_t *conn)
1071
0
{
1072
0
    PTLS_BUILD_ASSERT(QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT < SIZE_MAX / sizeof(uint64_t));
1073
1074
    /* if we don't have an encryptor, the only CID we issue is the one we send during handshake */
1075
0
    if (conn->super.ctx->cid_encryptor == NULL)
1076
0
        return 1;
1077
1078
0
    uint64_t capacity = conn->super.remote.transport_params.active_connection_id_limit;
1079
0
    if (capacity > QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT)
1080
0
        capacity = QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT;
1081
0
    return capacity;
1082
0
}
1083
1084
/**
1085
 * Resets CIDs associated to paths if they are being retired. To maximize the chance of having enough number of CIDs to run all
1086
 * paths when new CIDs are provided through multiple NCID frames possibly scattered over multiple packets, CIDs are reassigned to
1087
 * the paths lazily.
1088
 */
1089
static void dissociate_cid(quicly_conn_t *conn, uint64_t sequence)
1090
0
{
1091
0
    for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) {
1092
0
        struct st_quicly_conn_path_t *path = conn->paths[i];
1093
0
        if (path != NULL && path->dcid == sequence)
1094
0
            path->dcid = UINT64_MAX;
1095
0
    }
1096
0
}
1097
1098
static int write_crypto_data(quicly_conn_t *conn, ptls_buffer_t *tlsbuf, size_t epoch_offsets[5])
1099
0
{
1100
0
    size_t epoch;
1101
0
    int ret;
1102
1103
0
    if (tlsbuf->off == 0)
1104
0
        return 0;
1105
1106
0
    for (epoch = 0; epoch < 4; ++epoch) {
1107
0
        size_t len = epoch_offsets[epoch + 1] - epoch_offsets[epoch];
1108
0
        if (len == 0)
1109
0
            continue;
1110
0
        quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
1111
0
        assert(stream != NULL);
1112
0
        if ((ret = quicly_streambuf_egress_write(stream, tlsbuf->base + epoch_offsets[epoch], len)) != 0)
1113
0
            return ret;
1114
0
    }
1115
1116
0
    return 0;
1117
0
}
1118
1119
/**
1120
 * compresses a quicly error code into an int, converting QUIC transport error codes into negative ints
1121
 */
1122
static int compress_handshake_result(quicly_error_t quicly_err)
1123
0
{
1124
0
    if (QUICLY_ERROR_IS_QUIC_TRANSPORT(quicly_err)) {
1125
0
        assert(QUICLY_ERROR_GET_ERROR_CODE(quicly_err) <= INT32_MAX);
1126
0
        return (int)-QUICLY_ERROR_GET_ERROR_CODE(quicly_err);
1127
0
    } else {
1128
0
        assert(0 <= quicly_err && quicly_err < INT_MAX);
1129
0
        return (int)quicly_err;
1130
0
    }
1131
0
}
1132
1133
static quicly_error_t expand_handshake_result(int compressed_err)
1134
0
{
1135
0
    if (compressed_err < 0) {
1136
0
        return QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(-compressed_err);
1137
0
    } else {
1138
0
        return compressed_err;
1139
0
    }
1140
0
}
1141
1142
static void crypto_handshake(quicly_conn_t *conn, size_t in_epoch, ptls_iovec_t input)
1143
0
{
1144
0
    ptls_buffer_t output;
1145
0
    size_t epoch_offsets[5] = {0};
1146
1147
0
    assert(!conn->crypto.async_in_progress);
1148
1149
0
    ptls_buffer_init(&output, "", 0);
1150
1151
0
    quicly_error_t handshake_result = expand_handshake_result(ptls_handle_message(
1152
0
        conn->crypto.tls, &output, epoch_offsets, in_epoch, input.base, input.len, &conn->crypto.handshake_properties));
1153
0
    QUICLY_PROBE(CRYPTO_HANDSHAKE, conn, conn->stash.now, handshake_result);
1154
0
    QUICLY_LOG_CONN(crypto_handshake, conn, { PTLS_LOG_ELEMENT_SIGNED(ret, handshake_result); });
1155
0
    switch (handshake_result) {
1156
0
    case 0:
1157
0
    case PTLS_ERROR_IN_PROGRESS:
1158
0
        break;
1159
0
    case PTLS_ERROR_ASYNC_OPERATION:
1160
0
        assert(conn->super.ctx->async_handshake != NULL &&
1161
0
               "async handshake is used but the quicly_context_t::async_handshake is NULL");
1162
0
        conn->crypto.async_in_progress = 1;
1163
0
        conn->super.ctx->async_handshake->cb(conn->super.ctx->async_handshake, conn->crypto.tls);
1164
0
        break;
1165
0
    default:
1166
0
        initiate_close(conn,
1167
0
                       QUICLY_ERROR_IS_QUIC_TRANSPORT(handshake_result) ||
1168
0
                               PTLS_ERROR_GET_CLASS(handshake_result) == PTLS_ERROR_CLASS_SELF_ALERT
1169
0
                           ? handshake_result
1170
0
                           : QUICLY_TRANSPORT_ERROR_INTERNAL,
1171
0
                       QUICLY_FRAME_TYPE_CRYPTO, NULL);
1172
0
        goto Exit;
1173
0
    }
1174
    /* drop 0-RTT write key if 0-RTT is rejected by remote peer */
1175
0
    if (conn->application != NULL && !conn->application->one_rtt_writable && conn->application->cipher.egress.key.aead != NULL) {
1176
0
        assert(quicly_is_client(conn));
1177
0
        if (conn->crypto.handshake_properties.client.early_data_acceptance == PTLS_EARLY_DATA_REJECTED) {
1178
0
            dispose_cipher(&conn->application->cipher.egress.key);
1179
0
            conn->application->cipher.egress.key = (struct st_quicly_cipher_context_t){NULL};
1180
            /* retire all packets with ack_epoch == 3; they are all 0-RTT packets */
1181
0
            quicly_error_t ret;
1182
0
            if ((ret = discard_sentmap_by_epoch(conn, 1u << QUICLY_EPOCH_1RTT)) != 0) {
1183
0
                initiate_close(conn, ret, QUICLY_FRAME_TYPE_CRYPTO, NULL);
1184
0
                goto Exit;
1185
0
            }
1186
0
        }
1187
0
    }
1188
1189
0
    write_crypto_data(conn, &output, epoch_offsets);
1190
1191
0
Exit:
1192
0
    ptls_buffer_dispose(&output);
1193
0
}
1194
1195
void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
1196
0
{
1197
0
    quicly_conn_t *conn = stream->conn;
1198
0
    ptls_iovec_t input;
1199
1200
    /* store input */
1201
0
    if (quicly_streambuf_ingress_receive(stream, off, src, len) != 0)
1202
0
        return;
1203
1204
    /* While the server generates the handshake signature asynchronously, clients would not send additional messages. They cannot
1205
     * generate Finished. They would not send Certificate / CertificateVerify before authenticating the server identity. */
1206
0
    if (conn->crypto.async_in_progress) {
1207
0
        initiate_close(conn, PTLS_ALERT_UNEXPECTED_MESSAGE, QUICLY_FRAME_TYPE_CRYPTO, NULL);
1208
0
        return;
1209
0
    }
1210
1211
    /* feed the input into TLS, send result */
1212
0
    if ((input = quicly_streambuf_ingress_get(stream)).len != 0) {
1213
0
        size_t in_epoch = -(1 + stream->stream_id);
1214
0
        crypto_handshake(conn, in_epoch, input);
1215
0
        quicly_streambuf_ingress_shift(stream, input.len);
1216
0
    }
1217
0
}
1218
1219
quicly_conn_t *quicly_resume_handshake(ptls_t *tls)
1220
0
{
1221
0
    quicly_conn_t *conn;
1222
1223
0
    if ((conn = *ptls_get_data_ptr(tls)) == NULL) {
1224
        /* QUIC connection has been closed while TLS async operation was inflight. */
1225
0
        ptls_free(tls);
1226
0
        return NULL;
1227
0
    }
1228
1229
0
    assert(conn->crypto.async_in_progress);
1230
0
    conn->crypto.async_in_progress = 0;
1231
1232
0
    if (conn->super.state >= QUICLY_STATE_CLOSING)
1233
0
        return conn;
1234
1235
0
    crypto_handshake(conn, 0, ptls_iovec_init(NULL, 0));
1236
0
    return conn;
1237
0
}
1238
1239
static void init_stream_properties(quicly_stream_t *stream, uint32_t initial_max_stream_data_local,
1240
                                   uint64_t initial_max_stream_data_remote)
1241
0
{
1242
0
    int is_client = quicly_is_client(stream->conn);
1243
1244
0
    if (quicly_stream_has_send_side(is_client, stream->stream_id)) {
1245
0
        quicly_sendstate_init(&stream->sendstate);
1246
0
    } else {
1247
0
        quicly_sendstate_init_closed(&stream->sendstate);
1248
0
    }
1249
0
    if (quicly_stream_has_receive_side(is_client, stream->stream_id)) {
1250
0
        quicly_recvstate_init(&stream->recvstate);
1251
0
    } else {
1252
0
        quicly_recvstate_init_closed(&stream->recvstate);
1253
0
    }
1254
0
    stream->streams_blocked = 0;
1255
1256
0
    stream->_send_aux.max_stream_data = initial_max_stream_data_remote;
1257
0
    stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_NONE;
1258
0
    stream->_send_aux.stop_sending.error_code = 0;
1259
0
    stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_NONE;
1260
0
    stream->_send_aux.reset_stream.error_code = 0;
1261
0
    quicly_maxsender_init(&stream->_send_aux.max_stream_data_sender, initial_max_stream_data_local);
1262
0
    stream->_send_aux.blocked = QUICLY_SENDER_STATE_NONE;
1263
0
    quicly_linklist_init(&stream->_send_aux.pending_link.control);
1264
0
    quicly_linklist_init(&stream->_send_aux.pending_link.default_scheduler);
1265
1266
0
    stream->_recv_aux.window = initial_max_stream_data_local;
1267
1268
    /* Set the number of max ranges to be capable of handling following case:
1269
     * * every one of the two packets being sent are lost
1270
     * * average size of a STREAM frame found in a packet is >= ~512 bytes, or small STREAM frame is sent for every other stream
1271
     *   being opened (e.g., sending QPACK encoder/decoder stream frame for each HTTP/3 request)
1272
     * See also: the doc-comment on `_recv_aux.max_ranges`.
1273
     */
1274
0
    uint32_t fragments_minmax = (uint32_t)(stream->conn->super.ctx->transport_params.max_streams_uni +
1275
0
                                           stream->conn->super.ctx->transport_params.max_streams_bidi);
1276
0
    if (fragments_minmax < 63)
1277
0
        fragments_minmax = 63;
1278
0
    if ((stream->_recv_aux.max_ranges = initial_max_stream_data_local / 1024) < fragments_minmax)
1279
0
        stream->_recv_aux.max_ranges = fragments_minmax;
1280
0
}
1281
1282
static void dispose_stream_properties(quicly_stream_t *stream)
1283
0
{
1284
0
    quicly_sendstate_dispose(&stream->sendstate);
1285
0
    quicly_recvstate_dispose(&stream->recvstate);
1286
0
    quicly_maxsender_dispose(&stream->_send_aux.max_stream_data_sender);
1287
0
    quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
1288
0
    quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler);
1289
0
}
1290
1291
static quicly_stream_t *open_stream(quicly_conn_t *conn, uint64_t stream_id, uint32_t initial_max_stream_data_local,
1292
                                    uint64_t initial_max_stream_data_remote)
1293
0
{
1294
0
    quicly_stream_t *stream;
1295
1296
0
    if ((stream = malloc(sizeof(*stream))) == NULL)
1297
0
        return NULL;
1298
0
    stream->conn = conn;
1299
0
    stream->stream_id = stream_id;
1300
0
    stream->callbacks = NULL;
1301
0
    stream->data = NULL;
1302
1303
0
    int r;
1304
0
    khiter_t iter = kh_put(quicly_stream_t, conn->streams, stream_id, &r);
1305
0
    assert(iter != kh_end(conn->streams));
1306
0
    kh_val(conn->streams, iter) = stream;
1307
1308
0
    init_stream_properties(stream, initial_max_stream_data_local, initial_max_stream_data_remote);
1309
1310
0
    return stream;
1311
0
}
1312
1313
static struct st_quicly_conn_streamgroup_state_t *get_streamgroup_state(quicly_conn_t *conn, quicly_stream_id_t stream_id)
1314
0
{
1315
0
    if (quicly_is_client(conn) == quicly_stream_is_client_initiated(stream_id)) {
1316
0
        return quicly_stream_is_unidirectional(stream_id) ? &conn->super.local.uni : &conn->super.local.bidi;
1317
0
    } else {
1318
0
        return quicly_stream_is_unidirectional(stream_id) ? &conn->super.remote.uni : &conn->super.remote.bidi;
1319
0
    }
1320
0
}
1321
1322
static int should_send_max_streams(quicly_conn_t *conn, int uni)
1323
0
{
1324
0
    uint64_t concurrency;
1325
0
    quicly_maxsender_t *maxsender;
1326
0
    struct st_quicly_conn_streamgroup_state_t *group;
1327
1328
0
#define INIT_VARS(type)                                                                                                            \
1329
0
    do {                                                                                                                           \
1330
0
        concurrency = conn->super.ctx->transport_params.max_streams_##type;                                                        \
1331
0
        maxsender = &conn->ingress.max_streams.type;                                                                               \
1332
0
        group = &conn->super.remote.type;                                                                                          \
1333
0
    } while (0)
1334
0
    if (uni) {
1335
0
        INIT_VARS(uni);
1336
0
    } else {
1337
0
        INIT_VARS(bidi);
1338
0
    }
1339
0
#undef INIT_VARS
1340
1341
0
    if (concurrency == 0)
1342
0
        return 0;
1343
1344
0
    if (!quicly_maxsender_should_send_max(maxsender, group->next_stream_id / 4, group->num_streams, 768))
1345
0
        return 0;
1346
1347
0
    return 1;
1348
0
}
1349
1350
static void destroy_stream(quicly_stream_t *stream, quicly_error_t err)
1351
0
{
1352
0
    quicly_conn_t *conn = stream->conn;
1353
1354
0
    QUICLY_PROBE(STREAM_ON_DESTROY, conn, conn->stash.now, stream, err);
1355
0
    QUICLY_LOG_CONN(stream_on_destroy, conn, {
1356
0
        PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
1357
0
        PTLS_LOG_ELEMENT_SIGNED(err, err);
1358
0
    });
1359
1360
0
    if (stream->callbacks != NULL)
1361
0
        stream->callbacks->on_destroy(stream, err);
1362
1363
0
    khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream->stream_id);
1364
0
    assert(iter != kh_end(conn->streams));
1365
0
    kh_del(quicly_stream_t, conn->streams, iter);
1366
1367
0
    if (stream->stream_id < 0) {
1368
0
        size_t epoch = -(1 + stream->stream_id);
1369
0
        stream->conn->egress.pending_flows &= ~(uint8_t)(1 << epoch);
1370
0
    } else {
1371
0
        struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream->stream_id);
1372
0
        --group->num_streams;
1373
0
    }
1374
1375
0
    dispose_stream_properties(stream);
1376
1377
0
    if (conn->application != NULL && should_send_max_streams(conn, quicly_stream_is_unidirectional(stream->stream_id)))
1378
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
1379
1380
0
    free(stream);
1381
0
}
1382
1383
static void destroy_all_streams(quicly_conn_t *conn, quicly_error_t err, int including_crypto_streams)
1384
0
{
1385
0
    quicly_stream_t *stream;
1386
0
    kh_foreach_value(conn->streams, stream, {
1387
        /* TODO do we need to send reset signals to open streams? */
1388
0
        if (including_crypto_streams || stream->stream_id >= 0)
1389
0
            destroy_stream(stream, err);
1390
0
    });
1391
0
    assert(quicly_num_streams(conn) == 0);
1392
0
}
1393
1394
int64_t quicly_foreach_stream(quicly_conn_t *conn, void *thunk, int64_t (*cb)(void *thunk, quicly_stream_t *stream))
1395
0
{
1396
0
    quicly_stream_t *stream;
1397
0
    kh_foreach_value(conn->streams, stream, {
1398
0
        if (stream->stream_id >= 0) {
1399
0
            int64_t ret = cb(thunk, stream);
1400
0
            if (ret != 0)
1401
0
                return ret;
1402
0
        }
1403
0
    });
1404
0
    return 0;
1405
0
}
1406
1407
quicly_stream_t *quicly_get_stream(quicly_conn_t *conn, quicly_stream_id_t stream_id)
1408
0
{
1409
0
    khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream_id);
1410
0
    if (iter != kh_end(conn->streams))
1411
0
        return kh_val(conn->streams, iter);
1412
0
    return NULL;
1413
0
}
1414
1415
ptls_t *quicly_get_tls(quicly_conn_t *conn)
1416
604
{
1417
604
    return conn->crypto.tls;
1418
604
}
1419
1420
uint32_t quicly_num_streams_by_group(quicly_conn_t *conn, int uni, int locally_initiated)
1421
6.68k
{
1422
6.68k
    int server_initiated = quicly_is_client(conn) != locally_initiated;
1423
6.68k
    struct st_quicly_conn_streamgroup_state_t *state = get_streamgroup_state(conn, uni * 2 + server_initiated);
1424
6.68k
    return state->num_streams;
1425
6.68k
}
1426
1427
struct sockaddr *quicly_get_sockname(quicly_conn_t *conn)
1428
0
{
1429
0
    return &conn->paths[0]->address.local.sa;
1430
0
}
1431
1432
struct sockaddr *quicly_get_peername(quicly_conn_t *conn)
1433
604
{
1434
604
    return &conn->paths[0]->address.remote.sa;
1435
604
}
1436
1437
quicly_error_t quicly_get_stats(quicly_conn_t *conn, quicly_stats_t *stats)
1438
{
1439
    /* copy the pre-built stats fields */
1440
    memcpy(stats, &conn->super.stats, sizeof(conn->super.stats));
1441
1442
    /* set or generate the non-pre-built stats fields here */
1443
    stats->rtt = conn->egress.loss.rtt;
1444
    stats->loss_thresholds = conn->egress.loss.thresholds;
1445
    stats->cc = conn->egress.cc;
1446
    /* convert `exit_slow_start_at` to time spent since the connection was created */
1447
    if (stats->cc.exit_slow_start_at != INT64_MAX) {
1448
        assert(stats->cc.exit_slow_start_at >= conn->created_at);
1449
        stats->cc.exit_slow_start_at -= conn->created_at;
1450
    }
1451
    quicly_ratemeter_report(&conn->egress.ratemeter, &stats->delivery_rate);
1452
    stats->num_sentmap_packets_largest = conn->egress.loss.sentmap.num_packets_largest;
1453
1454
    return 0;
1455
}
1456
1457
quicly_error_t quicly_get_delivery_rate(quicly_conn_t *conn, quicly_rate_t *delivery_rate)
1458
0
{
1459
0
    quicly_ratemeter_report(&conn->egress.ratemeter, delivery_rate);
1460
0
    return 0;
1461
0
}
1462
1463
quicly_stream_id_t quicly_get_ingress_max_streams(quicly_conn_t *conn, int uni)
1464
0
{
1465
0
    quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi;
1466
0
    return maxsender->max_committed;
1467
0
}
1468
1469
void quicly_get_max_data(quicly_conn_t *conn, uint64_t *send_permitted, uint64_t *sent, uint64_t *consumed)
1470
8.11k
{
1471
8.11k
    if (send_permitted != NULL)
1472
0
        *send_permitted = conn->egress.max_data.permitted;
1473
8.11k
    if (sent != NULL)
1474
8.11k
        *sent = conn->egress.max_data.sent;
1475
8.11k
    if (consumed != NULL)
1476
0
        *consumed = conn->ingress.max_data.bytes_consumed;
1477
8.11k
}
1478
1479
static void update_idle_timeout(quicly_conn_t *conn, int is_in_receive)
1480
0
{
1481
0
    if (!is_in_receive && !conn->idle_timeout.should_rearm_on_send)
1482
0
        return;
1483
1484
    /* calculate the minimum of the two max_idle_timeout */
1485
0
    int64_t idle_msec = INT64_MAX;
1486
0
    if (conn->initial == NULL && conn->handshake == NULL && conn->super.remote.transport_params.max_idle_timeout != 0)
1487
0
        idle_msec = conn->super.remote.transport_params.max_idle_timeout;
1488
0
    if (conn->super.ctx->transport_params.max_idle_timeout != 0 && conn->super.ctx->transport_params.max_idle_timeout < idle_msec)
1489
0
        idle_msec = conn->super.ctx->transport_params.max_idle_timeout;
1490
1491
0
    if (idle_msec == INT64_MAX)
1492
0
        return;
1493
1494
0
    uint32_t three_pto = 3 * quicly_rtt_get_pto(&conn->egress.loss.rtt, conn->super.remote.transport_params.max_ack_delay,
1495
0
                                                conn->egress.loss.conf->min_pto);
1496
0
    conn->idle_timeout.at = conn->stash.now + (idle_msec > three_pto ? idle_msec : three_pto);
1497
0
    conn->idle_timeout.should_rearm_on_send = is_in_receive;
1498
0
}
1499
1500
static int scheduler_can_send(quicly_conn_t *conn)
1501
0
{
1502
    /* invoke the scheduler only when we are able to send stream data; skipping STATE_ACCEPTING is important as the application
1503
     * would not have setup data pointer. */
1504
0
    switch (conn->super.state) {
1505
0
    case QUICLY_STATE_FIRSTFLIGHT:
1506
0
    case QUICLY_STATE_CONNECTED:
1507
0
        break;
1508
0
    default:
1509
0
        return 0;
1510
0
    }
1511
1512
    /* scheduler would never have data to send, until application keys become available */
1513
0
    if (conn->application == NULL || conn->application->cipher.egress.key.aead == NULL)
1514
0
        return 0;
1515
1516
0
    int conn_is_saturated = !(conn->egress.max_data.sent < conn->egress.max_data.permitted);
1517
0
    return conn->super.ctx->stream_scheduler->can_send(conn->super.ctx->stream_scheduler, conn, conn_is_saturated);
1518
0
}
1519
1520
static void update_send_alarm(quicly_conn_t *conn, int can_send_stream_data, int is_after_send)
1521
0
{
1522
0
    int has_outstanding = conn->egress.loss.sentmap.bytes_in_flight != 0 || conn->super.remote.address_validation.send_probe,
1523
0
        handshake_is_in_progress = conn->initial != NULL || conn->handshake != NULL;
1524
0
    quicly_loss_update_alarm(&conn->egress.loss, conn->stash.now, conn->egress.last_retransmittable_sent_at, has_outstanding,
1525
0
                             can_send_stream_data, handshake_is_in_progress, conn->egress.max_data.sent, is_after_send);
1526
0
}
1527
1528
static void update_ratemeter(quicly_conn_t *conn, int is_cc_limited)
1529
0
{
1530
0
    if (quicly_ratemeter_is_cc_limited(&conn->egress.ratemeter) != is_cc_limited) {
1531
0
        if (is_cc_limited) {
1532
0
            quicly_ratemeter_enter_cc_limited(&conn->egress.ratemeter, conn->egress.packet_number);
1533
0
            QUICLY_PROBE(ENTER_CC_LIMITED, conn, conn->stash.now, conn->egress.packet_number);
1534
0
            QUICLY_LOG_CONN(enter_cc_limited, conn, { PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number); });
1535
0
        } else {
1536
0
            quicly_ratemeter_exit_cc_limited(&conn->egress.ratemeter, conn->egress.packet_number);
1537
0
            QUICLY_PROBE(EXIT_CC_LIMITED, conn, conn->stash.now, conn->egress.packet_number);
1538
0
            QUICLY_LOG_CONN(exit_cc_limited, conn, { PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number); });
1539
0
        }
1540
0
    }
1541
0
}
1542
1543
/**
1544
 * Updates the send alarm and adjusts the delivery rate estimator. This function is called from the receive path. From the sendp
1545
 * path, `update_send_alarm` is called directly.
1546
 */
1547
static void setup_next_send(quicly_conn_t *conn)
1548
0
{
1549
0
    int can_send_stream_data = scheduler_can_send(conn);
1550
1551
0
    update_send_alarm(conn, can_send_stream_data, 0);
1552
1553
    /* When the flow becomes application-limited due to receiving some information, stop collecting delivery rate samples. */
1554
0
    if (!can_send_stream_data)
1555
0
        update_ratemeter(conn, 0);
1556
0
}
1557
1558
static int create_handshake_flow(quicly_conn_t *conn, size_t epoch)
1559
0
{
1560
0
    quicly_stream_t *stream;
1561
0
    int ret;
1562
1563
0
    if ((stream = open_stream(conn, -(quicly_stream_id_t)(1 + epoch), 65536, 65536)) == NULL)
1564
0
        return PTLS_ERROR_NO_MEMORY;
1565
0
    if ((ret = quicly_streambuf_create(stream, sizeof(quicly_streambuf_t))) != 0) {
1566
0
        destroy_stream(stream, ret);
1567
0
        return ret;
1568
0
    }
1569
0
    stream->callbacks = &crypto_stream_callbacks;
1570
1571
0
    return 0;
1572
0
}
1573
1574
static void destroy_handshake_flow(quicly_conn_t *conn, size_t epoch)
1575
0
{
1576
0
    quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
1577
0
    if (stream != NULL)
1578
0
        destroy_stream(stream, 0);
1579
0
}
1580
1581
static struct st_quicly_pn_space_t *alloc_pn_space(size_t sz, uint32_t packet_tolerance)
1582
0
{
1583
0
    struct st_quicly_pn_space_t *space;
1584
1585
0
    if ((space = malloc(sz)) == NULL)
1586
0
        return NULL;
1587
1588
0
    quicly_ranges_init(&space->ack_queue);
1589
0
    space->largest_pn_received_at = INT64_MAX;
1590
0
    space->next_expected_packet_number = 0;
1591
0
    space->unacked_count = 0;
1592
0
    space->prior_ecn = 0;
1593
0
    for (size_t i = 0; i < PTLS_ELEMENTSOF(space->ecn_counts); ++i)
1594
0
        space->ecn_counts[i] = 0;
1595
0
    space->packet_tolerance = packet_tolerance;
1596
0
    space->reordering_threshold = 1;
1597
0
    space->largest_acked_unacked = 0;
1598
0
    space->smallest_unreported_missing = 0;
1599
0
    if (sz != sizeof(*space))
1600
0
        memset((uint8_t *)space + sizeof(*space), 0, sz - sizeof(*space));
1601
1602
0
    return space;
1603
0
}
1604
1605
static void do_free_pn_space(struct st_quicly_pn_space_t *space)
1606
0
{
1607
0
    quicly_ranges_clear(&space->ack_queue);
1608
0
    free(space);
1609
0
}
1610
1611
static void update_smallest_unreported_missing_on_send_ack(quicly_ranges_t *ranges, uint64_t *largest_acked_unacked,
1612
                                                           uint64_t *smallest_unreported_missing, uint32_t reordering_threshold)
1613
0
{
1614
0
    assert(ranges->num_ranges != 0 && "on_send_ack is never called until the first packet is received");
1615
1616
0
    uint64_t largest_acked = ranges->ranges[ranges->num_ranges - 1].end - 1;
1617
0
    if (largest_acked <= *largest_acked_unacked)
1618
0
        return;
1619
0
    *largest_acked_unacked = largest_acked;
1620
1621
0
    if (reordering_threshold <= 1) {
1622
        /* For these cases simply set the smallest_unreported missing to the next expected PN. When reordering_threshold is 0,
1623
         * smallest_unreported_missing isn't used, but it's convenient to keep its state consistent if the threshold changes. */
1624
0
        *smallest_unreported_missing = largest_acked + 1;
1625
0
    } else {
1626
0
        uint64_t largest_pn_outside_reorder_window = largest_acked - (uint64_t)reordering_threshold;
1627
0
        if (largest_pn_outside_reorder_window >= *smallest_unreported_missing)
1628
0
            *smallest_unreported_missing = quicly_ranges_next_missing(ranges, largest_pn_outside_reorder_window + 1, NULL);
1629
0
    }
1630
0
}
1631
1632
static int change_outside_reorder_window(quicly_ranges_t *ranges, uint64_t largest_acked_unacked,
1633
                                         uint64_t *smallest_unreported_missing, uint64_t received_pn, uint32_t reordering_threshold)
1634
0
{
1635
    /* as this function is called after `record_pn`, `received_pn` will be registered */
1636
0
    assert(ranges->num_ranges != 0);
1637
0
    if (reordering_threshold == 0) {
1638
        /* We don't use this when the reordering_threshold is 0, but by
1639
         * maintaining it, we avoid having to do extra work if the
1640
         * reordering_threshold changes. */
1641
0
        *smallest_unreported_missing = largest_acked_unacked + 1;
1642
0
        return 0;
1643
0
    }
1644
1645
0
    uint64_t prev_smallest_unreported_missing = *smallest_unreported_missing;
1646
0
    size_t slots_traversed_for_next_missing = 0;
1647
1648
0
    if (received_pn == prev_smallest_unreported_missing) {
1649
0
        if (received_pn == largest_acked_unacked) {
1650
            // fast path. We received the packets in order.
1651
0
            *smallest_unreported_missing = largest_acked_unacked + 1;
1652
0
        } else {
1653
0
            *smallest_unreported_missing = quicly_ranges_next_missing(ranges, received_pn + 1, &slots_traversed_for_next_missing);
1654
0
        }
1655
0
    }
1656
1657
0
    if (largest_acked_unacked < reordering_threshold)
1658
0
        return 0;
1659
1660
0
    uint64_t largest_pn_outside_reorder_window = largest_acked_unacked - (uint64_t)reordering_threshold;
1661
1662
0
    if (*smallest_unreported_missing <= largest_pn_outside_reorder_window)
1663
0
        *smallest_unreported_missing =
1664
0
            quicly_ranges_next_missing(ranges, largest_pn_outside_reorder_window + 1, &slots_traversed_for_next_missing);
1665
1666
0
    return (prev_smallest_unreported_missing <= largest_pn_outside_reorder_window) ||
1667
0
           received_pn <= largest_pn_outside_reorder_window ||
1668
           // Send an ack if the next smallest unreported missing is past 1/4 of
1669
           // our max ranges to make sure all ack ranges get reported to the
1670
           // peer.
1671
0
           slots_traversed_for_next_missing > QUICLY_MAX_ACK_BLOCKS / 4;
1672
0
}
1673
1674
static quicly_error_t record_pn(quicly_ranges_t *ranges, uint64_t pn, int *is_out_of_order)
1675
0
{
1676
0
    quicly_error_t ret;
1677
1678
0
    *is_out_of_order = 0;
1679
1680
0
    if (ranges->num_ranges != 0) {
1681
        /* fast path that is taken when we receive a packet in-order */
1682
0
        if (ranges->ranges[ranges->num_ranges - 1].end == pn) {
1683
0
            ranges->ranges[ranges->num_ranges - 1].end = pn + 1;
1684
0
            return 0;
1685
0
        }
1686
0
        *is_out_of_order = 1;
1687
0
    }
1688
1689
    /* slow path; we add, then remove the oldest ranges when the number of ranges exceed the maximum */
1690
0
    if ((ret = quicly_ranges_add(ranges, pn, pn + 1)) != 0)
1691
0
        return ret;
1692
0
    if (ranges->num_ranges > QUICLY_MAX_ACK_BLOCKS)
1693
0
        quicly_ranges_drop_by_range_indices(ranges, ranges->num_ranges - QUICLY_MAX_ACK_BLOCKS, ranges->num_ranges);
1694
1695
0
    return 0;
1696
0
}
1697
1698
static quicly_error_t record_receipt(struct st_quicly_pn_space_t *space, uint64_t pn, uint8_t ecn, int is_ack_only,
1699
                                     int64_t received_at, int64_t *send_ack_at, uint64_t *received_out_of_order)
1700
0
{
1701
0
    int ack_now, is_out_of_order;
1702
0
    quicly_error_t ret;
1703
1704
0
    if ((ret = record_pn(&space->ack_queue, pn, &is_out_of_order)) != 0)
1705
0
        goto Exit;
1706
0
    if (is_out_of_order)
1707
0
        *received_out_of_order += 1;
1708
0
    if (!is_ack_only && space->largest_acked_unacked < pn)
1709
0
        space->largest_acked_unacked = pn;
1710
1711
0
    if (space->reordering_threshold == 1) {
1712
        // Keep previous code paths when using RFC 9000 reordering_threshold.
1713
0
        ack_now = !is_ack_only && (is_out_of_order || ecn == IPTOS_ECN_CE);
1714
0
        space->smallest_unreported_missing = space->largest_acked_unacked + 1;
1715
0
    } else {
1716
0
        ack_now = change_outside_reorder_window(&space->ack_queue, space->largest_acked_unacked,
1717
0
                                                &space->smallest_unreported_missing, pn, space->reordering_threshold);
1718
        /* Only ack a change outside the reordering window if the packet is
1719
         * ack-eliciting.
1720
         *
1721
         * Note that we must still call `change_outside_reorder_window` to maintain
1722
         * the correct `smallest_unreported_missing` value. */
1723
0
        ack_now = !is_ack_only && ack_now;
1724
        // https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-11#section-6.4-1
1725
0
        ack_now = ack_now || (ecn == IPTOS_ECN_CE && space->prior_ecn != IPTOS_ECN_CE);
1726
0
    }
1727
1728
    /* update largest_pn_received_at (TODO implement deduplication at an earlier moment?) */
1729
0
    if (space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end == pn + 1)
1730
0
        space->largest_pn_received_at = received_at;
1731
1732
    /* increment ecn counters */
1733
0
    if (ecn != 0)
1734
0
        space->ecn_counts[get_ecn_index_from_bits(ecn)] += 1;
1735
1736
    /* if the received packet is ack-eliciting, update / schedule transmission of ACK */
1737
0
    if (!is_ack_only) {
1738
0
        space->unacked_count++;
1739
0
        if (space->unacked_count >= space->packet_tolerance)
1740
0
            ack_now = 1;
1741
0
    }
1742
1743
0
    if (ack_now) {
1744
0
        *send_ack_at = received_at;
1745
0
    } else if (*send_ack_at == INT64_MAX && space->unacked_count != 0) {
1746
0
        *send_ack_at = received_at + QUICLY_DELAYED_ACK_TIMEOUT;
1747
0
    }
1748
1749
0
    space->prior_ecn = ecn;
1750
0
    ret = 0;
1751
0
Exit:
1752
0
    return ret;
1753
0
}
1754
1755
static void free_handshake_space(struct st_quicly_handshake_space_t **space)
1756
0
{
1757
0
    if (*space != NULL) {
1758
0
        if ((*space)->cipher.ingress.aead != NULL)
1759
0
            dispose_cipher(&(*space)->cipher.ingress);
1760
0
        if ((*space)->cipher.egress.aead != NULL)
1761
0
            dispose_cipher(&(*space)->cipher.egress);
1762
0
        do_free_pn_space(&(*space)->super);
1763
0
        *space = NULL;
1764
0
    }
1765
0
}
1766
1767
static int setup_cipher(quicly_conn_t *conn, size_t epoch, int is_enc, ptls_cipher_context_t **hp_ctx,
1768
                        ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash,
1769
                        const void *secret)
1770
0
{
1771
    /* quicly_accept builds cipher before instantiating a connection. In such case, we use the default crypto engine */
1772
0
    quicly_crypto_engine_t *engine = conn != NULL ? conn->super.ctx->crypto_engine : &quicly_default_crypto_engine;
1773
1774
0
    return engine->setup_cipher(engine, conn, epoch, is_enc, hp_ctx, aead_ctx, aead, hash, secret);
1775
0
}
1776
1777
static int setup_handshake_space_and_flow(quicly_conn_t *conn, size_t epoch)
1778
0
{
1779
0
    struct st_quicly_handshake_space_t **space = epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake;
1780
0
    if ((*space = (void *)alloc_pn_space(sizeof(struct st_quicly_handshake_space_t), 1)) == NULL)
1781
0
        return PTLS_ERROR_NO_MEMORY;
1782
0
    return create_handshake_flow(conn, epoch);
1783
0
}
1784
1785
static void free_application_space(struct st_quicly_application_space_t **space)
1786
0
{
1787
0
    if (*space != NULL) {
1788
0
#define DISPOSE_INGRESS(label, func)                                                                                               \
1789
0
    if ((*space)->cipher.ingress.label != NULL)                                                                                    \
1790
0
    func((*space)->cipher.ingress.label)
1791
0
        DISPOSE_INGRESS(header_protection.zero_rtt, ptls_cipher_free);
1792
0
        DISPOSE_INGRESS(header_protection.one_rtt, ptls_cipher_free);
1793
0
        DISPOSE_INGRESS(aead[0], ptls_aead_free);
1794
0
        DISPOSE_INGRESS(aead[1], ptls_aead_free);
1795
0
#undef DISPOSE_INGRESS
1796
0
        if ((*space)->cipher.egress.key.aead != NULL)
1797
0
            dispose_cipher(&(*space)->cipher.egress.key);
1798
0
        ptls_clear_memory((*space)->cipher.egress.secret, sizeof((*space)->cipher.egress.secret));
1799
0
        do_free_pn_space(&(*space)->super);
1800
0
        *space = NULL;
1801
0
    }
1802
0
}
1803
1804
static int setup_application_space(quicly_conn_t *conn)
1805
0
{
1806
0
    if ((conn->application =
1807
0
             (void *)alloc_pn_space(sizeof(struct st_quicly_application_space_t), QUICLY_DEFAULT_PACKET_TOLERANCE)) == NULL)
1808
0
        return PTLS_ERROR_NO_MEMORY;
1809
1810
    /* prohibit key-update until receiving an ACK for an 1-RTT packet */
1811
0
    conn->application->cipher.egress.key_update_pn.last = 0;
1812
0
    conn->application->cipher.egress.key_update_pn.next = UINT64_MAX;
1813
1814
0
    return create_handshake_flow(conn, QUICLY_EPOCH_1RTT);
1815
0
}
1816
1817
static quicly_error_t discard_handshake_context(quicly_conn_t *conn, size_t epoch)
1818
0
{
1819
0
    quicly_error_t ret;
1820
1821
0
    assert(epoch == QUICLY_EPOCH_INITIAL || epoch == QUICLY_EPOCH_HANDSHAKE);
1822
1823
0
    if ((ret = discard_sentmap_by_epoch(conn, 1u << epoch)) != 0)
1824
0
        return ret;
1825
0
    destroy_handshake_flow(conn, epoch);
1826
0
    if (epoch == QUICLY_EPOCH_HANDSHAKE) {
1827
0
        assert(conn->stash.now != 0);
1828
0
        conn->super.stats.handshake_confirmed_msec = conn->stash.now - conn->created_at;
1829
0
    }
1830
0
    free_handshake_space(epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake);
1831
1832
0
    return 0;
1833
0
}
1834
1835
static quicly_error_t apply_remote_transport_params(quicly_conn_t *conn)
1836
0
{
1837
0
    quicly_error_t ret;
1838
1839
0
    conn->egress.max_data.permitted = conn->super.remote.transport_params.max_data;
1840
0
    if ((ret = update_max_streams(&conn->egress.max_streams.uni, conn->super.remote.transport_params.max_streams_uni)) != 0)
1841
0
        return ret;
1842
0
    if ((ret = update_max_streams(&conn->egress.max_streams.bidi, conn->super.remote.transport_params.max_streams_bidi)) != 0)
1843
0
        return ret;
1844
1845
0
    return 0;
1846
0
}
1847
1848
static int update_1rtt_key(quicly_conn_t *conn, ptls_cipher_suite_t *cipher, int is_enc, ptls_aead_context_t **aead,
1849
                           uint8_t *secret)
1850
0
{
1851
0
    uint8_t new_secret[PTLS_MAX_DIGEST_SIZE];
1852
0
    ptls_aead_context_t *new_aead = NULL;
1853
0
    int ret;
1854
1855
    /* generate next AEAD key */
1856
0
    if ((ret = ptls_hkdf_expand_label(cipher->hash, new_secret, cipher->hash->digest_size,
1857
0
                                      ptls_iovec_init(secret, cipher->hash->digest_size), "quic ku", ptls_iovec_init(NULL, 0),
1858
0
                                      NULL)) != 0)
1859
0
        goto Exit;
1860
0
    if ((ret = setup_cipher(conn, QUICLY_EPOCH_1RTT, is_enc, NULL, &new_aead, cipher->aead, cipher->hash, new_secret)) != 0)
1861
0
        goto Exit;
1862
1863
    /* success! update AEAD and secret */
1864
0
    if (*aead != NULL)
1865
0
        ptls_aead_free(*aead);
1866
0
    *aead = new_aead;
1867
0
    new_aead = NULL;
1868
0
    memcpy(secret, new_secret, cipher->hash->digest_size);
1869
1870
0
    ret = 0;
1871
0
Exit:
1872
0
    if (new_aead != NULL)
1873
0
        ptls_aead_free(new_aead);
1874
0
    ptls_clear_memory(new_secret, cipher->hash->digest_size);
1875
0
    return ret;
1876
0
}
1877
1878
static int update_1rtt_egress_key(quicly_conn_t *conn)
1879
0
{
1880
0
    struct st_quicly_application_space_t *space = conn->application;
1881
0
    ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls);
1882
0
    int ret;
1883
1884
    /* generate next AEAD key, and increment key phase if it succeeds */
1885
0
    if ((ret = update_1rtt_key(conn, cipher, 1, &space->cipher.egress.key.aead, space->cipher.egress.secret)) != 0)
1886
0
        return ret;
1887
0
    ++space->cipher.egress.key_phase;
1888
1889
    /* signal that we are waiting for an ACK */
1890
0
    space->cipher.egress.key_update_pn.last = conn->egress.packet_number;
1891
0
    space->cipher.egress.key_update_pn.next = UINT64_MAX;
1892
1893
0
    QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE, conn, conn->stash.now, space->cipher.egress.key_phase,
1894
0
                 QUICLY_PROBE_HEXDUMP(space->cipher.egress.secret, cipher->hash->digest_size));
1895
0
    QUICLY_LOG_CONN(crypto_send_key_update, conn, {
1896
0
        PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.egress.key_phase);
1897
0
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.egress.secret, cipher->hash->digest_size);
1898
0
    });
1899
1900
0
    return 0;
1901
0
}
1902
1903
static int received_key_update(quicly_conn_t *conn, uint64_t newly_decrypted_key_phase)
1904
0
{
1905
0
    struct st_quicly_application_space_t *space = conn->application;
1906
1907
0
    assert(space->cipher.ingress.key_phase.decrypted < newly_decrypted_key_phase);
1908
0
    assert(newly_decrypted_key_phase <= space->cipher.ingress.key_phase.prepared);
1909
1910
0
    space->cipher.ingress.key_phase.decrypted = newly_decrypted_key_phase;
1911
1912
0
    QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE, conn, conn->stash.now, space->cipher.ingress.key_phase.decrypted,
1913
0
                 QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, ptls_get_cipher(conn->crypto.tls)->hash->digest_size));
1914
0
    QUICLY_LOG_CONN(crypto_receive_key_update, conn, {
1915
0
        PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.ingress.key_phase.decrypted);
1916
0
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.ingress.secret,
1917
0
                                         ptls_get_cipher(conn->crypto.tls)->hash->digest_size);
1918
0
    });
1919
1920
0
    if (space->cipher.egress.key_phase < space->cipher.ingress.key_phase.decrypted) {
1921
0
        return update_1rtt_egress_key(conn);
1922
0
    } else {
1923
0
        return 0;
1924
0
    }
1925
0
}
1926
1927
static void calc_resume_sendrate(quicly_conn_t *conn, uint64_t *rate, uint32_t *rtt)
1928
0
{
1929
0
    quicly_rate_t reported;
1930
1931
0
    quicly_ratemeter_report(&conn->egress.ratemeter, &reported);
1932
1933
0
    if (reported.smoothed != 0 || reported.latest != 0) {
1934
0
        *rate = reported.smoothed > reported.latest ? reported.smoothed : reported.latest;
1935
0
        *rtt = conn->egress.loss.rtt.minimum;
1936
0
    } else {
1937
0
        *rate = 0;
1938
0
        *rtt = 0;
1939
0
    }
1940
0
}
1941
1942
static inline void update_open_count(quicly_context_t *ctx, ssize_t delta)
1943
0
{
1944
0
    if (ctx->update_open_count != NULL)
1945
0
        ctx->update_open_count->cb(ctx->update_open_count, delta);
1946
0
}
1947
1948
0
#define LONGEST_ADDRESS_STR "[0000:1111:2222:3333:4444:5555:6666:7777]:12345"
1949
static void stringify_address(char *buf, struct sockaddr *sa)
1950
0
{
1951
0
    char *p = buf;
1952
0
    uint16_t port = 0;
1953
1954
0
    p = buf;
1955
0
    switch (sa->sa_family) {
1956
0
    case AF_INET:
1957
0
        inet_ntop(AF_INET, &((struct sockaddr_in *)sa)->sin_addr, p, sizeof(LONGEST_ADDRESS_STR));
1958
0
        p += strlen(p);
1959
0
        port = ntohs(((struct sockaddr_in *)sa)->sin_port);
1960
0
        break;
1961
0
    case AF_INET6:
1962
0
        *p++ = '[';
1963
0
        inet_ntop(AF_INET6, &((struct sockaddr_in6 *)sa)->sin6_addr, p, sizeof(LONGEST_ADDRESS_STR));
1964
0
        *p++ = ']';
1965
0
        port = ntohs(((struct sockaddr_in *)sa)->sin_port);
1966
0
        break;
1967
0
    default:
1968
0
        assert("unexpected address family");
1969
0
        break;
1970
0
    }
1971
1972
0
    *p++ = ':';
1973
0
    sprintf(p, "%" PRIu16, port);
1974
0
}
1975
1976
static int new_path(quicly_conn_t *conn, size_t path_index, struct sockaddr *remote_addr, struct sockaddr *local_addr)
1977
0
{
1978
0
    struct st_quicly_conn_path_t *path;
1979
1980
0
    assert(conn->paths[path_index] == NULL);
1981
1982
0
    if ((path = malloc(sizeof(*conn->paths[path_index]))) == NULL)
1983
0
        return PTLS_ERROR_NO_MEMORY;
1984
1985
0
    if (path_index == 0) {
1986
        /* default path used for handshake */
1987
0
        *path = (struct st_quicly_conn_path_t){
1988
0
            .dcid = 0,
1989
0
            .path_challenge.send_at = INT64_MAX,
1990
0
            .initial = 1,
1991
0
            .probe_only = 0,
1992
0
        };
1993
0
    } else {
1994
0
        *path = (struct st_quicly_conn_path_t){
1995
0
            .dcid = UINT64_MAX,
1996
0
            .path_challenge.send_at = 0,
1997
0
            .probe_only = 1,
1998
0
        };
1999
0
        conn->super.ctx->tls->random_bytes(path->path_challenge.data, sizeof(path->path_challenge.data));
2000
0
        conn->super.stats.num_paths.created += 1;
2001
0
    }
2002
0
    set_address(&path->address.remote, remote_addr);
2003
0
    set_address(&path->address.local, local_addr);
2004
2005
0
    conn->paths[path_index] = path;
2006
2007
0
    PTLS_LOG_DEFINE_POINT(quicly, new_path, new_path_logpoint);
2008
0
    if (QUICLY_PROBE_ENABLED(NEW_PATH) ||
2009
0
        (ptls_log_point_maybe_active(&new_path_logpoint) & ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls),
2010
0
                                                                                      (const char *(*)(void *))ptls_get_server_name,
2011
0
                                                                                      conn->crypto.tls)) != 0) {
2012
0
        char remote[sizeof(LONGEST_ADDRESS_STR)];
2013
0
        stringify_address(remote, &path->address.remote.sa);
2014
0
        QUICLY_PROBE(NEW_PATH, conn, conn->stash.now, path_index, remote);
2015
0
        QUICLY_LOG_CONN(new_path, conn, {
2016
0
            PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index);
2017
0
            PTLS_LOG_ELEMENT_SAFESTR(remote, remote);
2018
0
        });
2019
0
    }
2020
2021
0
    return 0;
2022
0
}
2023
2024
static int do_delete_path(quicly_conn_t *conn, struct st_quicly_conn_path_t *path)
2025
0
{
2026
0
    int ret = 0;
2027
2028
0
    if (path->dcid != UINT64_MAX && conn->super.remote.cid_set.cids[0].cid.len != 0) {
2029
0
        uint64_t cid = path->dcid;
2030
0
        dissociate_cid(conn, cid);
2031
0
        ret = quicly_remote_cid_unregister(&conn->super.remote.cid_set, cid);
2032
0
        assert(conn->super.remote.cid_set.retired.count != 0);
2033
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
2034
0
    }
2035
2036
0
    free(path);
2037
2038
0
    return ret;
2039
0
}
2040
2041
static int delete_path(quicly_conn_t *conn, size_t path_index)
2042
0
{
2043
0
    QUICLY_PROBE(DELETE_PATH, conn, conn->stash.now, path_index);
2044
0
    QUICLY_LOG_CONN(delete_path, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); });
2045
2046
0
    struct st_quicly_conn_path_t *path = conn->paths[path_index];
2047
0
    conn->paths[path_index] = NULL;
2048
0
    if (path->path_challenge.send_at != INT64_MAX)
2049
0
        conn->super.stats.num_paths.validation_failed += 1;
2050
2051
0
    return do_delete_path(conn, path);
2052
0
}
2053
2054
/**
2055
 * paths[0] (the default path) is freed and the path specified by `path_index` is promoted
2056
 */
2057
static quicly_error_t promote_path(quicly_conn_t *conn, size_t path_index)
2058
0
{
2059
0
    quicly_error_t ret;
2060
2061
0
    QUICLY_PROBE(PROMOTE_PATH, conn, conn->stash.now, path_index);
2062
0
    QUICLY_LOG_CONN(promote_path, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); });
2063
2064
0
    { /* mark all packets as lost, as it is unlikely that packets sent on the old path would be acknowledged */
2065
0
        quicly_sentmap_iter_t iter;
2066
0
        if ((ret = quicly_loss_init_sentmap_iter(&conn->egress.loss, &iter, conn->stash.now,
2067
0
                                                 conn->super.remote.transport_params.max_ack_delay, 0)) != 0)
2068
0
            return ret;
2069
0
        const quicly_sent_packet_t *sent;
2070
0
        while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) {
2071
0
            if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_PTO)) != 0)
2072
0
                return ret;
2073
0
        }
2074
0
    }
2075
2076
    /* reset CC (FIXME flush sentmap and reset loss recovery) */
2077
0
    conn->egress.cc.type->cc_init->cb(
2078
0
        conn->egress.cc.type->cc_init, &conn->egress.cc,
2079
0
        quicly_cc_calc_initial_cwnd(conn->super.ctx->initcwnd_packets, conn->egress.max_udp_payload_size), conn->stash.now);
2080
0
    if (conn->super.stats.num_rapid_start != 0 && conn->egress.cc.type->enable_rapid_start != NULL)
2081
0
        conn->egress.cc.type->enable_rapid_start(&conn->egress.cc, conn->stash.now);
2082
2083
    /* set jumpstart target */
2084
0
    calc_resume_sendrate(conn, &conn->super.stats.jumpstart.prev_rate, &conn->super.stats.jumpstart.prev_rtt);
2085
2086
    /* reset RTT estimate, adopting SRTT of the original path as initial RTT (TODO calculate RTT based on path challenge RT) */
2087
0
    quicly_rtt_init(&conn->egress.loss.rtt, &conn->super.ctx->loss,
2088
0
                    conn->egress.loss.rtt.smoothed < conn->super.ctx->loss.default_initial_rtt
2089
0
                        ? conn->egress.loss.rtt.smoothed
2090
0
                        : conn->super.ctx->loss.default_initial_rtt);
2091
2092
    /* reset ratemeter */
2093
0
    quicly_ratemeter_init(&conn->egress.ratemeter);
2094
2095
    /* remember PN when the path was promoted */
2096
0
    conn->egress.pn_path_start = conn->egress.packet_number;
2097
2098
    /* update path mapping */
2099
0
    struct st_quicly_conn_path_t *path = conn->paths[0];
2100
0
    conn->paths[0] = conn->paths[path_index];
2101
0
    conn->paths[path_index] = NULL;
2102
0
    conn->super.stats.num_paths.promoted += 1;
2103
2104
0
    ret = do_delete_path(conn, path);
2105
2106
    /* rearm the loss timer, now that the RTT estimate has been changed */
2107
0
    setup_next_send(conn);
2108
2109
0
    return ret;
2110
0
}
2111
2112
static int open_path(quicly_conn_t *conn, size_t *path_index, struct sockaddr *remote_addr, struct sockaddr *local_addr)
2113
0
{
2114
0
    int ret;
2115
2116
    /* choose a slot that in unused or the least-recently-used one that has completed validation */
2117
0
    *path_index = SIZE_MAX;
2118
0
    for (size_t i = 1; i < PTLS_ELEMENTSOF(conn->paths); ++i) {
2119
0
        struct st_quicly_conn_path_t *p = conn->paths[i];
2120
0
        if (p == NULL) {
2121
0
            *path_index = i;
2122
0
            break;
2123
0
        }
2124
0
        if (p->path_challenge.send_at != INT64_MAX)
2125
0
            continue;
2126
0
        if (*path_index == SIZE_MAX || p->packet_last_received < conn->paths[*path_index]->packet_last_received)
2127
0
            *path_index = i;
2128
0
    }
2129
0
    if (*path_index == SIZE_MAX)
2130
0
        return QUICLY_ERROR_PACKET_IGNORED;
2131
2132
    /* free existing path info */
2133
0
    if (conn->paths[*path_index] != NULL && (ret = delete_path(conn, *path_index)) != 0)
2134
0
        return ret;
2135
2136
    /* initialize new path info */
2137
0
    if ((ret = new_path(conn, *path_index, remote_addr, local_addr)) != 0)
2138
0
        return ret;
2139
2140
    /* schedule emission of PATH_CHALLENGE */
2141
0
    conn->egress.send_probe_at = 0;
2142
2143
0
    return 0;
2144
0
}
2145
2146
static void recalc_send_probe_at(quicly_conn_t *conn)
2147
0
{
2148
0
    conn->egress.send_probe_at = INT64_MAX;
2149
2150
0
    for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) {
2151
0
        if (conn->paths[i] == NULL)
2152
0
            continue;
2153
0
        if (conn->egress.send_probe_at > conn->paths[i]->path_challenge.send_at)
2154
0
            conn->egress.send_probe_at = conn->paths[i]->path_challenge.send_at;
2155
0
        if (conn->paths[i]->path_response.send_) {
2156
0
            conn->egress.send_probe_at = 0;
2157
0
            break;
2158
0
        }
2159
0
    }
2160
0
}
2161
2162
void quicly_free(quicly_conn_t *conn)
2163
{
2164
    lock_now(conn, 0);
2165
2166
    QUICLY_PROBE(FREE, conn, conn->stash.now);
2167
    QUICLY_LOG_CONN(free, conn, {});
2168
2169
    PTLS_LOG_DEFINE_POINT(quicly, conn_stats, conn_stats_logpoint);
2170
    if (QUICLY_PROBE_ENABLED(CONN_STATS) ||
2171
        (ptls_log_point_maybe_active(&conn_stats_logpoint) &
2172
         ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls), (const char *(*)(void *))ptls_get_server_name,
2173
                                    conn->crypto.tls)) != 0) {
2174
        quicly_stats_t stats;
2175
        if (quicly_get_stats(conn, &stats) == 0) {
2176
            QUICLY_PROBE(CONN_STATS, conn, conn->stash.now, &stats, sizeof(stats));
2177
#define EMIT_FIELD(fld, lit) PTLS_LOG__DO_ELEMENT_UNSIGNED(lit, stats.fld);
2178
            QUICLY_LOG_CONN(conn_stats, conn, { QUICLY_STATS_FOREACH(EMIT_FIELD); });
2179
#undef EMIT_FIELD
2180
        }
2181
    }
2182
2183
    destroy_all_streams(conn, 0, 1);
2184
    update_open_count(conn->super.ctx, -1);
2185
    clear_datagram_frame_payloads(conn);
2186
2187
    for (size_t i = 0; i != PTLS_ELEMENTSOF(conn->delayed_packets.as_array); ++i) {
2188
        while (conn->delayed_packets.as_array[i].head != NULL) {
2189
            struct st_quicly_delayed_packet_t *delayed = conn->delayed_packets.as_array[i].head;
2190
            conn->delayed_packets.as_array[i].head = delayed->next;
2191
            free(delayed);
2192
        }
2193
    }
2194
2195
    quicly_maxsender_dispose(&conn->ingress.max_data.sender);
2196
    quicly_maxsender_dispose(&conn->ingress.max_streams.uni);
2197
    quicly_maxsender_dispose(&conn->ingress.max_streams.bidi);
2198
    quicly_loss_dispose(&conn->egress.loss);
2199
2200
    kh_destroy(quicly_stream_t, conn->streams);
2201
2202
    assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.uni));
2203
    assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.bidi));
2204
    assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.control));
2205
    assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.active));
2206
    assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.blocked));
2207
2208
    free_handshake_space(&conn->initial);
2209
    free_handshake_space(&conn->handshake);
2210
    free_application_space(&conn->application);
2211
2212
    ptls_buffer_dispose(&conn->crypto.transport_params.buf);
2213
2214
    for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->paths); ++i) {
2215
        if (conn->paths[i] != NULL)
2216
            delete_path(conn, i);
2217
    }
2218
2219
    /* `crytpo.tls` is disposed late, because logging relies on `ptls_skip_tracing` */
2220
    if (conn->crypto.async_in_progress) {
2221
        /* When async signature generation is inflight, `ptls_free` will be called from `quicly_resume_handshake` laterwards. */
2222
        *ptls_get_data_ptr(conn->crypto.tls) = NULL;
2223
    } else {
2224
        ptls_free(conn->crypto.tls);
2225
    }
2226
2227
    unlock_now(conn);
2228
2229
    if (conn->egress.pacer != NULL)
2230
        free(conn->egress.pacer);
2231
    free(conn->token.base);
2232
    free(conn);
2233
}
2234
2235
static int calc_initial_key(ptls_cipher_suite_t *cs, uint8_t *traffic_secret, const void *master_secret, const char *label)
2236
0
{
2237
0
    return ptls_hkdf_expand_label(cs->hash, traffic_secret, cs->hash->digest_size,
2238
0
                                  ptls_iovec_init(master_secret, cs->hash->digest_size), label, ptls_iovec_init(NULL, 0), NULL);
2239
0
}
2240
2241
int quicly_calc_initial_keys(ptls_cipher_suite_t *cs, uint8_t *ingress, uint8_t *egress, ptls_iovec_t cid, int is_client,
2242
                             ptls_iovec_t salt)
2243
0
{
2244
0
    static const char *labels[2] = {"client in", "server in"};
2245
0
    uint8_t master_secret[PTLS_MAX_DIGEST_SIZE];
2246
0
    int ret;
2247
2248
    /* extract master secret */
2249
0
    if ((ret = ptls_hkdf_extract(cs->hash, master_secret, salt, cid)) != 0)
2250
0
        goto Exit;
2251
2252
    /* calc secrets */
2253
0
    if (ingress != NULL && (ret = calc_initial_key(cs, ingress, master_secret, labels[is_client])) != 0)
2254
0
        goto Exit;
2255
0
    if (egress != NULL && (ret = calc_initial_key(cs, egress, master_secret, labels[!is_client])) != 0)
2256
0
        goto Exit;
2257
2258
0
Exit:
2259
0
    ptls_clear_memory(master_secret, sizeof(master_secret));
2260
0
    return ret;
2261
0
}
2262
2263
/**
2264
 * @param conn maybe NULL when called by quicly_accept
2265
 */
2266
static int setup_initial_encryption(ptls_cipher_suite_t *cs, struct st_quicly_cipher_context_t *ingress,
2267
                                    struct st_quicly_cipher_context_t *egress, ptls_iovec_t cid, int is_client, ptls_iovec_t salt,
2268
                                    quicly_conn_t *conn)
2269
0
{
2270
0
    struct {
2271
0
        uint8_t ingress[PTLS_MAX_DIGEST_SIZE];
2272
0
        uint8_t egress[PTLS_MAX_DIGEST_SIZE];
2273
0
    } secrets;
2274
0
    int ret;
2275
2276
0
    if ((ret = quicly_calc_initial_keys(cs, ingress != NULL ? secrets.ingress : NULL, egress != NULL ? secrets.egress : NULL, cid,
2277
0
                                        is_client, salt)) != 0)
2278
0
        goto Exit;
2279
2280
0
    if (ingress != NULL && (ret = setup_cipher(conn, QUICLY_EPOCH_INITIAL, 0, &ingress->header_protection, &ingress->aead, cs->aead,
2281
0
                                               cs->hash, secrets.ingress)) != 0)
2282
0
        goto Exit;
2283
0
    if (egress != NULL && (ret = setup_cipher(conn, QUICLY_EPOCH_INITIAL, 1, &egress->header_protection, &egress->aead, cs->aead,
2284
0
                                              cs->hash, secrets.egress)) != 0)
2285
0
        goto Exit;
2286
2287
0
Exit:
2288
0
    ptls_clear_memory(&secrets, sizeof(secrets));
2289
0
    return ret;
2290
0
}
2291
2292
static quicly_error_t reinstall_initial_encryption(quicly_conn_t *conn, quicly_error_t err_code_if_unknown_version)
2293
0
{
2294
0
    const quicly_salt_t *salt;
2295
2296
    /* get salt */
2297
0
    if ((salt = quicly_get_salt(conn->super.version)) == NULL)
2298
0
        return err_code_if_unknown_version;
2299
2300
    /* dispose existing context */
2301
0
    dispose_cipher(&conn->initial->cipher.ingress);
2302
0
    dispose_cipher(&conn->initial->cipher.egress);
2303
2304
    /* setup encryption context */
2305
0
    return setup_initial_encryption(
2306
0
        get_aes128gcmsha256(conn->super.ctx), &conn->initial->cipher.ingress, &conn->initial->cipher.egress,
2307
0
        ptls_iovec_init(conn->super.remote.cid_set.cids[0].cid.cid, conn->super.remote.cid_set.cids[0].cid.len), 1,
2308
0
        ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL);
2309
0
}
2310
2311
static quicly_error_t apply_stream_frame(quicly_stream_t *stream, quicly_stream_frame_t *frame)
2312
0
{
2313
0
    quicly_error_t ret;
2314
2315
0
    QUICLY_PROBE(STREAM_RECEIVE, stream->conn, stream->conn->stash.now, stream, frame->offset, frame->data.base, frame->data.len,
2316
0
                 (int)frame->is_fin);
2317
0
    QUICLY_LOG_CONN(stream_receive, stream->conn, {
2318
0
        PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
2319
0
        PTLS_LOG_ELEMENT_UNSIGNED(off, frame->offset);
2320
0
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(data, frame->data.base, frame->data.len);
2321
0
        PTLS_LOG_ELEMENT_BOOL(is_fin, frame->is_fin);
2322
0
    });
2323
2324
0
    if (quicly_recvstate_transfer_complete(&stream->recvstate))
2325
0
        return 0;
2326
2327
    /* flow control */
2328
0
    if (stream->stream_id >= 0) {
2329
        /* STREAMs */
2330
0
        uint64_t max_stream_data = frame->offset + frame->data.len;
2331
0
        if ((int64_t)stream->_recv_aux.window < (int64_t)max_stream_data - (int64_t)stream->recvstate.data_off)
2332
0
            return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL;
2333
0
        if (stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end < max_stream_data) {
2334
0
            uint64_t newly_received =
2335
0
                max_stream_data - stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end;
2336
0
            if (stream->conn->ingress.max_data.bytes_consumed + newly_received >
2337
0
                stream->conn->ingress.max_data.sender.max_committed)
2338
0
                return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL;
2339
0
            stream->conn->ingress.max_data.bytes_consumed += newly_received;
2340
            /* FIXME send MAX_DATA if necessary */
2341
0
        }
2342
0
    } else {
2343
        /* CRYPTO streams; maybe add different limit for 1-RTT CRYPTO? */
2344
0
        if (frame->offset + frame->data.len > stream->conn->super.ctx->max_crypto_bytes)
2345
0
            return QUICLY_TRANSPORT_ERROR_CRYPTO_BUFFER_EXCEEDED;
2346
0
    }
2347
2348
    /* update recvbuf */
2349
0
    size_t apply_len = frame->data.len;
2350
0
    if ((ret = quicly_recvstate_update(&stream->recvstate, frame->offset, &apply_len, frame->is_fin,
2351
0
                                       stream->_recv_aux.max_ranges)) != 0)
2352
0
        return ret;
2353
2354
0
    if (apply_len != 0 || quicly_recvstate_transfer_complete(&stream->recvstate)) {
2355
0
        uint64_t buf_offset = frame->offset + frame->data.len - apply_len - stream->recvstate.data_off;
2356
0
        size_t apply_off = frame->data.len - apply_len;
2357
0
        QUICLY_PROBE(STREAM_ON_RECEIVE, stream->conn, stream->conn->stash.now, stream, (size_t)buf_offset, apply_off, apply_len);
2358
0
        QUICLY_LOG_CONN(stream_on_receive, stream->conn, {
2359
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
2360
0
            PTLS_LOG_ELEMENT_UNSIGNED(buf_off, buf_offset);
2361
0
            PTLS_LOG_ELEMENT_UNSIGNED(apply_off, apply_off);
2362
0
            PTLS_LOG_ELEMENT_UNSIGNED(apply_len, apply_len);
2363
0
        });
2364
0
        stream->callbacks->on_receive(stream, (size_t)buf_offset, frame->data.base + apply_off, apply_len);
2365
0
        if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
2366
0
            return QUICLY_ERROR_IS_CLOSING;
2367
0
    }
2368
2369
0
    if (should_send_max_stream_data(stream))
2370
0
        sched_stream_control(stream);
2371
2372
0
    if (stream_is_destroyable(stream))
2373
0
        destroy_stream(stream, 0);
2374
2375
0
    return 0;
2376
0
}
2377
2378
int quicly_encode_transport_parameter_list(ptls_buffer_t *buf, const quicly_transport_parameters_t *params,
2379
                                           const quicly_cid_t *original_dcid, const quicly_cid_t *initial_scid,
2380
                                           const quicly_cid_t *retry_scid, const void *stateless_reset_token, size_t expand_by)
2381
0
{
2382
0
    int ret;
2383
2384
0
#define PUSH_TP(buf, id, block)                                                                                                    \
2385
0
    do {                                                                                                                           \
2386
0
        ptls_buffer_push_quicint((buf), (id));                                                                                     \
2387
0
        ptls_buffer_push_block((buf), -1, block);                                                                                  \
2388
0
    } while (0)
2389
2390
0
    PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE,
2391
0
            { ptls_buffer_push_quicint(buf, params->max_udp_payload_size); });
2392
0
    if (params->max_stream_data.bidi_local != 0)
2393
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
2394
0
                { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_local); });
2395
0
    if (params->max_stream_data.bidi_remote != 0)
2396
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
2397
0
                { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_remote); });
2398
0
    if (params->max_stream_data.uni != 0)
2399
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI,
2400
0
                { ptls_buffer_push_quicint(buf, params->max_stream_data.uni); });
2401
0
    if (params->max_data != 0)
2402
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, { ptls_buffer_push_quicint(buf, params->max_data); });
2403
0
    if (params->max_idle_timeout != 0)
2404
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, { ptls_buffer_push_quicint(buf, params->max_idle_timeout); });
2405
0
    if (original_dcid != NULL)
2406
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID,
2407
0
                { ptls_buffer_pushv(buf, original_dcid->cid, original_dcid->len); });
2408
0
    if (initial_scid != NULL)
2409
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID,
2410
0
                { ptls_buffer_pushv(buf, initial_scid->cid, initial_scid->len); });
2411
0
    if (retry_scid != NULL)
2412
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID,
2413
0
                { ptls_buffer_pushv(buf, retry_scid->cid, retry_scid->len); });
2414
0
    if (stateless_reset_token != NULL)
2415
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN,
2416
0
                { ptls_buffer_pushv(buf, stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN); });
2417
0
    if (params->max_streams_bidi != 0)
2418
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI,
2419
0
                { ptls_buffer_push_quicint(buf, params->max_streams_bidi); });
2420
0
    if (params->max_streams_uni != 0)
2421
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI,
2422
0
                { ptls_buffer_push_quicint(buf, params->max_streams_uni); });
2423
0
    if (QUICLY_LOCAL_ACK_DELAY_EXPONENT != QUICLY_DEFAULT_ACK_DELAY_EXPONENT)
2424
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT,
2425
0
                { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_ACK_DELAY_EXPONENT); });
2426
0
    if (QUICLY_LOCAL_MAX_ACK_DELAY != QUICLY_DEFAULT_MAX_ACK_DELAY)
2427
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_MAX_ACK_DELAY); });
2428
0
    if (params->min_ack_delay_usec != UINT64_MAX) {
2429
        /* TODO consider the value we should advertise. */
2430
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY,
2431
0
                { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_MAX_ACK_DELAY * 1000 /* in microseconds */); });
2432
0
    }
2433
0
    if (params->disable_active_migration)
2434
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, {});
2435
0
    if (QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT != QUICLY_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT)
2436
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT,
2437
0
                { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_ACTIVE_CONNECTION_ID_LIMIT); });
2438
0
    if (params->max_datagram_frame_size != 0)
2439
0
        PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE,
2440
0
                { ptls_buffer_push_quicint(buf, params->max_datagram_frame_size); });
2441
    /* if requested, add a greasing TP of 1 MTU size so that CH spans across multiple packets */
2442
0
    if (expand_by != 0) {
2443
0
        PUSH_TP(buf, 31 * 100 + 27, {
2444
0
            if ((ret = ptls_buffer_reserve(buf, expand_by)) != 0)
2445
0
                goto Exit;
2446
0
            memset(buf->base + buf->off, 0, expand_by);
2447
0
            buf->off += expand_by;
2448
0
        });
2449
0
    }
2450
2451
0
#undef PUSH_TP
2452
2453
0
    ret = 0;
2454
0
Exit:
2455
0
    return ret;
2456
0
}
2457
2458
/**
2459
 * sentinel used for indicating that the corresponding TP should be ignored
2460
 */
2461
static const quicly_cid_t _tp_cid_ignore;
2462
0
#define tp_cid_ignore (*(quicly_cid_t *)&_tp_cid_ignore)
2463
2464
quicly_error_t quicly_decode_transport_parameter_list(quicly_transport_parameters_t *params, quicly_cid_t *original_dcid,
2465
                                                      quicly_cid_t *initial_scid, quicly_cid_t *retry_scid,
2466
                                                      void *stateless_reset_token, const uint8_t *src, const uint8_t *end)
2467
0
{
2468
/* When non-negative, tp_index contains the literal position within the list of transport parameters recognized by this function.
2469
 * That index is being used to find duplicates using a 64-bit bitmap (found_bits). When the transport parameter is being processed,
2470
 * tp_index is set to -1. */
2471
0
#define DECODE_TP(_id, block)                                                                                                      \
2472
0
    do {                                                                                                                           \
2473
0
        if (tp_index >= 0) {                                                                                                       \
2474
0
            if (id == (_id)) {                                                                                                     \
2475
0
                if ((found_bits & ((uint64_t)1 << tp_index)) != 0) {                                                               \
2476
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;                                                              \
2477
0
                    goto Exit;                                                                                                     \
2478
0
                }                                                                                                                  \
2479
0
                found_bits |= (uint64_t)1 << tp_index;                                                                             \
2480
0
                {block} tp_index = -1;                                                                                             \
2481
0
            } else {                                                                                                               \
2482
0
                ++tp_index;                                                                                                        \
2483
0
            }                                                                                                                      \
2484
0
        }                                                                                                                          \
2485
0
    } while (0)
2486
0
#define DECODE_CID_TP(_id, dest)                                                                                                   \
2487
0
    DECODE_TP(_id, {                                                                                                               \
2488
0
        size_t cidl = end - src;                                                                                                   \
2489
0
        if (cidl > QUICLY_MAX_CID_LEN_V1) {                                                                                        \
2490
0
            ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;                                                                      \
2491
0
            goto Exit;                                                                                                             \
2492
0
        }                                                                                                                          \
2493
0
        if (dest == NULL) {                                                                                                        \
2494
0
            ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;                                                                      \
2495
0
            goto Exit;                                                                                                             \
2496
0
        } else if (dest != &tp_cid_ignore) {                                                                                       \
2497
0
            quicly_set_cid(dest, ptls_iovec_init(src, cidl));                                                                      \
2498
0
        }                                                                                                                          \
2499
0
        src = end;                                                                                                                 \
2500
0
    });
2501
2502
0
    uint64_t found_bits = 0;
2503
0
    quicly_error_t ret;
2504
2505
    /* set parameters to their default values */
2506
0
    *params = default_transport_params;
2507
2508
    /* Set optional parameters to UINT8_MAX. It is used to as a sentinel for detecting missing TPs. */
2509
0
    if (original_dcid != NULL && original_dcid != &tp_cid_ignore)
2510
0
        original_dcid->len = UINT8_MAX;
2511
0
    if (initial_scid != NULL && initial_scid != &tp_cid_ignore)
2512
0
        initial_scid->len = UINT8_MAX;
2513
0
    if (retry_scid != NULL && retry_scid != &tp_cid_ignore)
2514
0
        retry_scid->len = UINT8_MAX;
2515
2516
    /* decode the parameters block */
2517
0
    while (src != end) {
2518
0
        uint64_t id;
2519
0
        if ((id = quicly_decodev(&src, end)) == UINT64_MAX) {
2520
0
            ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2521
0
            goto Exit;
2522
0
        }
2523
0
        int tp_index = 0;
2524
0
        ptls_decode_open_block(src, end, -1, {
2525
0
            DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID, original_dcid);
2526
0
            DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_SOURCE_CONNECTION_ID, initial_scid);
2527
0
            DECODE_CID_TP(QUICLY_TRANSPORT_PARAMETER_ID_RETRY_SOURCE_CONNECTION_ID, retry_scid);
2528
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_UDP_PAYLOAD_SIZE, {
2529
0
                uint64_t v;
2530
0
                if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2531
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2532
0
                    goto Exit;
2533
0
                }
2534
0
                if (v < 1200) {
2535
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2536
0
                    goto Exit;
2537
0
                }
2538
0
                if (v > UINT16_MAX)
2539
0
                    v = UINT16_MAX;
2540
0
                params->max_udp_payload_size = (uint16_t)v;
2541
0
            });
2542
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, {
2543
0
                if ((params->max_stream_data.bidi_local = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2544
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2545
0
                    goto Exit;
2546
0
                }
2547
0
            });
2548
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, {
2549
0
                if ((params->max_stream_data.bidi_remote = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2550
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2551
0
                    goto Exit;
2552
0
                }
2553
0
            });
2554
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI, {
2555
0
                if ((params->max_stream_data.uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2556
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2557
0
                    goto Exit;
2558
0
                }
2559
0
            });
2560
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, {
2561
0
                if ((params->max_data = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2562
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2563
0
                    goto Exit;
2564
0
                }
2565
0
            });
2566
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN, {
2567
0
                if (!(stateless_reset_token != NULL && end - src == QUICLY_STATELESS_RESET_TOKEN_LEN)) {
2568
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2569
0
                    goto Exit;
2570
0
                }
2571
0
                memcpy(stateless_reset_token, src, QUICLY_STATELESS_RESET_TOKEN_LEN);
2572
0
                src = end;
2573
0
            });
2574
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, {
2575
0
                if ((params->max_idle_timeout = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2576
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2577
0
                    goto Exit;
2578
0
                }
2579
0
            });
2580
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI, {
2581
0
                if ((params->max_streams_bidi = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2582
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2583
0
                    goto Exit;
2584
0
                }
2585
0
            });
2586
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI, {
2587
0
                if ((params->max_streams_uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2588
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2589
0
                    goto Exit;
2590
0
                }
2591
0
            });
2592
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT, {
2593
0
                uint64_t v;
2594
0
                if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2595
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2596
0
                    goto Exit;
2597
0
                }
2598
0
                if (v > 20) {
2599
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2600
0
                    goto Exit;
2601
0
                }
2602
0
                params->ack_delay_exponent = (uint8_t)v;
2603
0
            });
2604
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, {
2605
0
                uint64_t v;
2606
0
                if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2607
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2608
0
                    goto Exit;
2609
0
                }
2610
0
                if (v >= 16384) { /* "values of 2^14 or greater are invalid" */
2611
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2612
0
                    goto Exit;
2613
0
                }
2614
0
                params->max_ack_delay = (uint16_t)v;
2615
0
            });
2616
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MIN_ACK_DELAY, {
2617
0
                if ((params->min_ack_delay_usec = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2618
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2619
0
                    goto Exit;
2620
0
                }
2621
0
            });
2622
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT, {
2623
0
                uint64_t v;
2624
0
                if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2625
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2626
0
                    goto Exit;
2627
0
                }
2628
0
                if (v < QUICLY_MIN_ACTIVE_CONNECTION_ID_LIMIT) {
2629
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2630
0
                    goto Exit;
2631
0
                }
2632
0
                params->active_connection_id_limit = v;
2633
0
            });
2634
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, { params->disable_active_migration = 1; });
2635
0
            DECODE_TP(QUICLY_TRANSPORT_PARAMETER_ID_MAX_DATAGRAM_FRAME_SIZE, {
2636
0
                uint64_t v;
2637
0
                if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
2638
0
                    ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2639
0
                    goto Exit;
2640
0
                }
2641
0
                if (v > UINT16_MAX)
2642
0
                    v = UINT16_MAX;
2643
0
                params->max_datagram_frame_size = (uint16_t)v;
2644
0
            });
2645
            /* skip unknown extension */
2646
0
            if (tp_index >= 0)
2647
0
                src = end;
2648
0
        });
2649
0
    }
2650
2651
    /* check consistency between the transport parameters */
2652
0
    if (params->min_ack_delay_usec != UINT64_MAX) {
2653
0
        if (params->min_ack_delay_usec > params->max_ack_delay * 1000) {
2654
0
            ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
2655
0
            goto Exit;
2656
0
        }
2657
0
    }
2658
2659
    /* check the absence of CIDs */
2660
0
    if ((original_dcid != NULL && original_dcid->len == UINT8_MAX) || (initial_scid != NULL && initial_scid->len == UINT8_MAX) ||
2661
0
        (retry_scid != NULL && retry_scid->len == UINT8_MAX)) {
2662
0
        ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2663
0
        goto Exit;
2664
0
    }
2665
2666
0
    ret = 0;
2667
0
Exit:
2668
0
    if (ret == PTLS_ALERT_DECODE_ERROR)
2669
0
        ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2670
0
    return ret;
2671
2672
0
#undef DECODE_TP
2673
0
#undef DECODE_CID_TP
2674
0
}
2675
2676
static uint16_t get_transport_parameters_extension_id(uint32_t quic_version)
2677
0
{
2678
0
    switch (quic_version) {
2679
0
    case QUICLY_PROTOCOL_VERSION_DRAFT27:
2680
0
    case QUICLY_PROTOCOL_VERSION_DRAFT29:
2681
0
        return QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_DRAFT;
2682
0
    default:
2683
0
        return QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS_FINAL;
2684
0
    }
2685
0
}
2686
2687
static int collect_transport_parameters(ptls_t *tls, struct st_ptls_handshake_properties_t *properties, uint16_t type)
2688
0
{
2689
0
    quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties));
2690
0
    return type == get_transport_parameters_extension_id(conn->super.version);
2691
0
}
2692
2693
static quicly_conn_t *create_connection(quicly_context_t *ctx, uint32_t protocol_version, const char *server_name,
2694
                                        struct sockaddr *remote_addr, struct sockaddr *local_addr, ptls_iovec_t *remote_cid,
2695
                                        const quicly_cid_plaintext_t *local_cid, ptls_handshake_properties_t *handshake_properties,
2696
                                        void *appdata, uint32_t initcwnd)
2697
0
{
2698
0
    ptls_log_conn_state_t log_state_override;
2699
0
    ptls_t *tls;
2700
0
    quicly_conn_t *conn;
2701
0
    quicly_pacer_t *pacer = NULL;
2702
2703
    /* consistency checks */
2704
0
    assert(remote_addr != NULL && remote_addr->sa_family != AF_UNSPEC);
2705
0
    if (ctx->transport_params.max_datagram_frame_size != 0)
2706
0
        assert(ctx->receive_datagram_frame != NULL);
2707
2708
    /* build log state */
2709
0
    ptls_log_init_conn_state(&log_state_override, ctx->tls->random_bytes);
2710
0
    switch (remote_addr->sa_family) {
2711
0
    case AF_INET:
2712
0
        ptls_build_v4_mapped_v6_address(&log_state_override.address, &((struct sockaddr_in *)remote_addr)->sin_addr);
2713
0
        break;
2714
0
    case AF_INET6:
2715
0
        log_state_override.address = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
2716
0
        break;
2717
0
    default:
2718
0
        break;
2719
0
    }
2720
2721
    /* create TLS context */
2722
0
    ptls_log_conn_state_override = &log_state_override;
2723
0
    tls = ptls_new(ctx->tls, server_name == NULL);
2724
0
    ptls_log_conn_state_override = NULL;
2725
0
    if (tls == NULL)
2726
0
        return NULL;
2727
0
    if (server_name != NULL && ptls_set_server_name(tls, server_name, strlen(server_name)) != 0) {
2728
0
        ptls_free(tls);
2729
0
        return NULL;
2730
0
    }
2731
2732
    /* allocate memory and start creating QUIC context */
2733
0
    if ((conn = malloc(sizeof(*conn))) == NULL) {
2734
0
        ptls_free(tls);
2735
0
        return NULL;
2736
0
    }
2737
0
    if (enable_with_ratio255(ctx->enable_ratio.pacing, ctx->tls->random_bytes) && (pacer = malloc(sizeof(*pacer))) == NULL) {
2738
0
        ptls_free(tls);
2739
0
        free(conn);
2740
0
        return NULL;
2741
0
    }
2742
0
    memset(conn, 0, sizeof(*conn));
2743
0
    conn->super.ctx = ctx;
2744
0
    conn->super.data = appdata;
2745
0
    lock_now(conn, 0);
2746
0
    conn->created_at = conn->stash.now;
2747
0
    conn->super.stats.handshake_confirmed_msec = UINT64_MAX;
2748
0
    conn->super.stats.num_paced = pacer != NULL;
2749
0
    conn->super.stats.num_respected_app_limited =
2750
0
        enable_with_ratio255(conn->super.ctx->enable_ratio.respect_app_limited, ctx->tls->random_bytes);
2751
0
    conn->crypto.tls = tls;
2752
0
    if (new_path(conn, 0, remote_addr, local_addr) != 0) {
2753
0
        unlock_now(conn);
2754
0
        if (pacer != NULL)
2755
0
            free(pacer);
2756
0
        ptls_free(tls);
2757
0
        free(conn);
2758
0
        return NULL;
2759
0
    }
2760
0
    quicly_local_cid_init_set(&conn->super.local.cid_set, ctx->cid_encryptor, local_cid);
2761
0
    conn->super.local.long_header_src_cid = conn->super.local.cid_set.cids[0].cid;
2762
0
    quicly_remote_cid_init_set(&conn->super.remote.cid_set, remote_cid, ctx->tls->random_bytes);
2763
0
    assert(conn->paths[0]->dcid == 0 && conn->super.remote.cid_set.cids[0].sequence == 0 &&
2764
0
           conn->super.remote.cid_set.cids[0].state == QUICLY_REMOTE_CID_IN_USE && "paths[0].dcid uses cids[0]");
2765
0
    conn->super.state = QUICLY_STATE_FIRSTFLIGHT;
2766
0
    if (server_name != NULL) {
2767
0
        conn->super.local.bidi.next_stream_id = 0;
2768
0
        conn->super.local.uni.next_stream_id = 2;
2769
0
        conn->super.remote.bidi.next_stream_id = 1;
2770
0
        conn->super.remote.uni.next_stream_id = 3;
2771
0
    } else {
2772
0
        conn->super.local.bidi.next_stream_id = 1;
2773
0
        conn->super.local.uni.next_stream_id = 3;
2774
0
        conn->super.remote.bidi.next_stream_id = 0;
2775
0
        conn->super.remote.uni.next_stream_id = 2;
2776
0
    }
2777
0
    conn->super.remote.transport_params = default_transport_params;
2778
0
    conn->super.version = protocol_version;
2779
0
    quicly_linklist_init(&conn->super._default_scheduler.active);
2780
0
    quicly_linklist_init(&conn->super._default_scheduler.blocked);
2781
0
    conn->streams = kh_init(quicly_stream_t);
2782
0
    quicly_maxsender_init(&conn->ingress.max_data.sender, conn->super.ctx->transport_params.max_data);
2783
0
    quicly_maxsender_init(&conn->ingress.max_streams.uni, conn->super.ctx->transport_params.max_streams_uni);
2784
0
    quicly_maxsender_init(&conn->ingress.max_streams.bidi, conn->super.ctx->transport_params.max_streams_bidi);
2785
0
    quicly_loss_init(&conn->egress.loss, &conn->super.ctx->loss,
2786
0
                     conn->super.ctx->loss.default_initial_rtt /* FIXME remember initial_rtt in session ticket */,
2787
0
                     &conn->super.remote.transport_params.max_ack_delay, &conn->super.remote.transport_params.ack_delay_exponent);
2788
0
    conn->egress.max_udp_payload_size = conn->super.ctx->initial_egress_max_udp_payload_size;
2789
0
    init_max_streams(&conn->egress.max_streams.uni);
2790
0
    init_max_streams(&conn->egress.max_streams.bidi);
2791
0
    conn->egress.ack_frequency.update_at = INT64_MAX;
2792
0
    conn->egress.send_ack_at = INT64_MAX;
2793
0
    conn->egress.send_probe_at = INT64_MAX;
2794
0
    conn->super.ctx->init_cc->cb(conn->super.ctx->init_cc, &conn->egress.cc, initcwnd, conn->stash.now);
2795
0
    if (conn->egress.cc.type->enable_rapid_start != NULL &&
2796
0
        enable_with_ratio255(conn->super.ctx->enable_ratio.rapid_start, conn->super.ctx->tls->random_bytes)) {
2797
0
        conn->egress.cc.type->enable_rapid_start(&conn->egress.cc, conn->stash.now);
2798
0
        conn->super.stats.num_rapid_start = 1;
2799
0
    }
2800
0
    if (pacer != NULL) {
2801
0
        conn->egress.pacer = pacer;
2802
0
        quicly_pacer_reset(conn->egress.pacer);
2803
0
    }
2804
0
    conn->egress.ecn.state = enable_with_ratio255(conn->super.ctx->enable_ratio.ecn, conn->super.ctx->tls->random_bytes)
2805
0
                                 ? QUICLY_ECN_PROBING
2806
0
                                 : QUICLY_ECN_OFF;
2807
0
    quicly_linklist_init(&conn->egress.pending_streams.blocked.uni);
2808
0
    quicly_linklist_init(&conn->egress.pending_streams.blocked.bidi);
2809
0
    quicly_linklist_init(&conn->egress.pending_streams.control);
2810
0
    quicly_ratemeter_init(&conn->egress.ratemeter);
2811
0
    conn->egress.try_jumpstart = 1;
2812
0
    if (handshake_properties != NULL) {
2813
0
        assert(handshake_properties->additional_extensions == NULL);
2814
0
        assert(handshake_properties->collect_extension == NULL);
2815
0
        assert(handshake_properties->collected_extensions == NULL);
2816
0
        conn->crypto.handshake_properties = *handshake_properties;
2817
0
    } else {
2818
0
        conn->crypto.handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}};
2819
0
    }
2820
0
    conn->crypto.handshake_properties.collect_extension = collect_transport_parameters;
2821
0
    conn->retry_scid.len = UINT8_MAX;
2822
0
    conn->idle_timeout.at = INT64_MAX;
2823
0
    conn->idle_timeout.should_rearm_on_send = 1;
2824
0
    for (size_t i = 0; i != PTLS_ELEMENTSOF(conn->delayed_packets.as_array); ++i)
2825
0
        conn->delayed_packets.as_array[i].tail = &conn->delayed_packets.as_array[i].head;
2826
0
    conn->stash.on_ack_stream.active_acked_cache.stream_id = INT64_MIN;
2827
2828
0
    *ptls_get_data_ptr(tls) = conn;
2829
2830
0
    update_open_count(conn->super.ctx, 1);
2831
2832
0
    return conn;
2833
0
}
2834
2835
static int client_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots)
2836
0
{
2837
0
    quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties));
2838
0
    quicly_error_t ret;
2839
2840
0
    assert(properties->client.early_data_acceptance != PTLS_EARLY_DATA_ACCEPTANCE_UNKNOWN);
2841
2842
0
    if (slots[0].type == UINT16_MAX) {
2843
0
        ret = PTLS_ALERT_MISSING_EXTENSION;
2844
0
        goto Exit;
2845
0
    }
2846
0
    assert(slots[0].type == get_transport_parameters_extension_id(conn->super.version));
2847
0
    assert(slots[1].type == UINT16_MAX);
2848
2849
0
    const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len;
2850
0
    quicly_transport_parameters_t params;
2851
0
    quicly_cid_t original_dcid, initial_scid, retry_scid = {};
2852
2853
    /* obtain pointer to initial CID of the peer. It is guaranteed to exist in the first slot, as TP is received before any frame
2854
     * that updates the CID set. */
2855
0
    quicly_remote_cid_t *remote_cid = &conn->super.remote.cid_set.cids[0];
2856
0
    assert(remote_cid->sequence == 0);
2857
2858
    /* decode */
2859
0
    if ((ret = quicly_decode_transport_parameter_list(&params, needs_cid_auth(conn) || is_retry(conn) ? &original_dcid : NULL,
2860
0
                                                      needs_cid_auth(conn) ? &initial_scid : &tp_cid_ignore,
2861
0
                                                      needs_cid_auth(conn) ? is_retry(conn) ? &retry_scid : NULL : &tp_cid_ignore,
2862
0
                                                      remote_cid->stateless_reset_token, src, end)) != 0)
2863
0
        goto Exit;
2864
2865
    /* validate CIDs */
2866
0
    if (needs_cid_auth(conn) || is_retry(conn)) {
2867
0
        if (!quicly_cid_is_equal(&conn->super.original_dcid, ptls_iovec_init(original_dcid.cid, original_dcid.len))) {
2868
0
            ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2869
0
            goto Exit;
2870
0
        }
2871
0
    }
2872
0
    if (needs_cid_auth(conn)) {
2873
0
        if (!quicly_cid_is_equal(&remote_cid->cid, ptls_iovec_init(initial_scid.cid, initial_scid.len))) {
2874
0
            ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2875
0
            goto Exit;
2876
0
        }
2877
0
        if (is_retry(conn)) {
2878
0
            if (!quicly_cid_is_equal(&conn->retry_scid, ptls_iovec_init(retry_scid.cid, retry_scid.len))) {
2879
0
                ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
2880
0
                goto Exit;
2881
0
            }
2882
0
        }
2883
0
    }
2884
2885
0
    if (properties->client.early_data_acceptance == PTLS_EARLY_DATA_ACCEPTED) {
2886
0
#define ZERORTT_VALIDATE(x)                                                                                                        \
2887
0
    if (params.x < conn->super.remote.transport_params.x) {                                                                        \
2888
0
        ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;                                                                          \
2889
0
        goto Exit;                                                                                                                 \
2890
0
    }
2891
0
        ZERORTT_VALIDATE(max_data);
2892
0
        ZERORTT_VALIDATE(max_stream_data.bidi_local);
2893
0
        ZERORTT_VALIDATE(max_stream_data.bidi_remote);
2894
0
        ZERORTT_VALIDATE(max_stream_data.uni);
2895
0
        ZERORTT_VALIDATE(max_streams_bidi);
2896
0
        ZERORTT_VALIDATE(max_streams_uni);
2897
0
#undef ZERORTT_VALIDATE
2898
0
    }
2899
2900
    /* store the results */
2901
0
    conn->super.remote.transport_params = params;
2902
0
    ack_frequency_set_next_update_at(conn);
2903
2904
0
Exit:
2905
0
    return compress_handshake_result(ret);
2906
0
}
2907
2908
quicly_error_t quicly_connect(quicly_conn_t **_conn, quicly_context_t *ctx, const char *server_name, struct sockaddr *dest_addr,
2909
                              struct sockaddr *src_addr, const quicly_cid_plaintext_t *new_cid, ptls_iovec_t address_token,
2910
                              ptls_handshake_properties_t *handshake_properties,
2911
                              const quicly_transport_parameters_t *resumed_transport_params, void *appdata)
2912
0
{
2913
0
    const quicly_salt_t *salt;
2914
0
    quicly_conn_t *conn = NULL;
2915
0
    const quicly_cid_t *server_cid;
2916
0
    ptls_buffer_t buf;
2917
0
    size_t epoch_offsets[5] = {0};
2918
0
    size_t max_early_data_size = 0;
2919
0
    quicly_error_t ret;
2920
2921
0
    if ((salt = quicly_get_salt(ctx->initial_version)) == NULL) {
2922
0
        if ((ctx->initial_version & 0x0f0f0f0f) == 0x0a0a0a0a) {
2923
            /* greasing version, use our own greasing salt */
2924
0
            static const quicly_salt_t grease_salt = {.initial = {0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad,
2925
0
                                                                  0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef}};
2926
0
            salt = &grease_salt;
2927
0
        } else {
2928
0
            ret = QUICLY_ERROR_INVALID_INITIAL_VERSION;
2929
0
            goto Exit;
2930
0
        }
2931
0
    }
2932
2933
0
    if ((conn = create_connection(
2934
0
             ctx, ctx->initial_version, server_name, dest_addr, src_addr, NULL, new_cid, handshake_properties, appdata,
2935
0
             quicly_cc_calc_initial_cwnd(ctx->initcwnd_packets, ctx->transport_params.max_udp_payload_size))) == NULL) {
2936
0
        ret = PTLS_ERROR_NO_MEMORY;
2937
0
        goto Exit;
2938
0
    }
2939
0
    conn->super.remote.address_validation.validated = 1;
2940
0
    conn->super.remote.address_validation.send_probe = 1;
2941
0
    if (address_token.len != 0) {
2942
0
        if ((conn->token.base = malloc(address_token.len)) == NULL) {
2943
0
            ret = PTLS_ERROR_NO_MEMORY;
2944
0
            goto Exit;
2945
0
        }
2946
0
        memcpy(conn->token.base, address_token.base, address_token.len);
2947
0
        conn->token.len = address_token.len;
2948
0
    }
2949
0
    server_cid = quicly_get_remote_cid(conn);
2950
0
    conn->super.original_dcid = *server_cid;
2951
2952
0
    QUICLY_PROBE(CONNECT, conn, conn->stash.now, conn->super.version);
2953
0
    QUICLY_LOG_CONN(connect, conn, { PTLS_LOG_ELEMENT_UNSIGNED(version, conn->super.version); });
2954
2955
0
    if ((ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_INITIAL)) != 0)
2956
0
        goto Exit;
2957
0
    if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &conn->initial->cipher.ingress, &conn->initial->cipher.egress,
2958
0
                                        ptls_iovec_init(server_cid->cid, server_cid->len), 1,
2959
0
                                        ptls_iovec_init(salt->initial, sizeof(salt->initial)), conn)) != 0)
2960
0
        goto Exit;
2961
2962
    /* handshake (we always encode authentication CIDs, as we do not (yet) regenerate ClientHello when receiving Retry) */
2963
0
    ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0);
2964
0
    if ((ret = quicly_encode_transport_parameter_list(
2965
0
             &conn->crypto.transport_params.buf, &conn->super.ctx->transport_params, NULL, &conn->super.local.cid_set.cids[0].cid,
2966
0
             NULL, NULL, conn->super.ctx->expand_client_hello ? conn->super.ctx->initial_egress_max_udp_payload_size : 0)) != 0)
2967
0
        goto Exit;
2968
0
    conn->crypto.transport_params.ext[0] =
2969
0
        (ptls_raw_extension_t){get_transport_parameters_extension_id(conn->super.version),
2970
0
                               {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}};
2971
0
    conn->crypto.transport_params.ext[1] = (ptls_raw_extension_t){UINT16_MAX};
2972
0
    conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext;
2973
0
    conn->crypto.handshake_properties.collected_extensions = client_collected_extensions;
2974
2975
0
    ptls_buffer_init(&buf, "", 0);
2976
0
    if (resumed_transport_params != NULL)
2977
0
        conn->crypto.handshake_properties.client.max_early_data_size = &max_early_data_size;
2978
0
    ret = expand_handshake_result(
2979
0
        ptls_handle_message(conn->crypto.tls, &buf, epoch_offsets, 0, NULL, 0, &conn->crypto.handshake_properties));
2980
0
    conn->crypto.handshake_properties.client.max_early_data_size = NULL;
2981
0
    if (ret != PTLS_ERROR_IN_PROGRESS) {
2982
0
        assert(ret > 0); /* no QUIC errors */
2983
0
        goto Exit;
2984
0
    }
2985
0
    write_crypto_data(conn, &buf, epoch_offsets);
2986
0
    ptls_buffer_dispose(&buf);
2987
2988
0
    if (max_early_data_size != 0) {
2989
        /* when attempting 0-RTT, apply the remembered transport parameters */
2990
0
#define APPLY(n) conn->super.remote.transport_params.n = resumed_transport_params->n
2991
0
        APPLY(active_connection_id_limit);
2992
0
        APPLY(max_data);
2993
0
        APPLY(max_stream_data.bidi_local);
2994
0
        APPLY(max_stream_data.bidi_remote);
2995
0
        APPLY(max_stream_data.uni);
2996
0
        APPLY(max_streams_bidi);
2997
0
        APPLY(max_streams_uni);
2998
0
#undef APPLY
2999
0
        if ((ret = apply_remote_transport_params(conn)) != 0)
3000
0
            goto Exit;
3001
0
    }
3002
3003
0
    *_conn = conn;
3004
0
    ret = 0;
3005
3006
0
Exit:
3007
0
    if (conn != NULL)
3008
0
        unlock_now(conn);
3009
0
    if (ret != 0) {
3010
0
        if (conn != NULL)
3011
0
            quicly_free(conn);
3012
0
    }
3013
0
    return ret;
3014
0
}
3015
3016
static int server_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots)
3017
0
{
3018
0
    quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties));
3019
0
    quicly_cid_t initial_scid;
3020
0
    quicly_error_t ret;
3021
3022
0
    if (slots[0].type == UINT16_MAX) {
3023
0
        ret = PTLS_ALERT_MISSING_EXTENSION;
3024
0
        goto Exit;
3025
0
    }
3026
0
    assert(slots[0].type == get_transport_parameters_extension_id(conn->super.version));
3027
0
    assert(slots[1].type == UINT16_MAX);
3028
3029
0
    { /* decode transport_parameters extension */
3030
0
        const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len;
3031
0
        if ((ret = quicly_decode_transport_parameter_list(&conn->super.remote.transport_params,
3032
0
                                                          needs_cid_auth(conn) ? NULL : &tp_cid_ignore,
3033
0
                                                          needs_cid_auth(conn) ? &initial_scid : &tp_cid_ignore,
3034
0
                                                          needs_cid_auth(conn) ? NULL : &tp_cid_ignore, NULL, src, end)) != 0)
3035
0
            goto Exit;
3036
0
        if (needs_cid_auth(conn) &&
3037
0
            !quicly_cid_is_equal(&conn->super.remote.cid_set.cids[0].cid, ptls_iovec_init(initial_scid.cid, initial_scid.len))) {
3038
0
            ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
3039
0
            goto Exit;
3040
0
        }
3041
0
    }
3042
3043
    /* setup ack frequency */
3044
0
    ack_frequency_set_next_update_at(conn);
3045
3046
    /* update UDP max payload size, as described in the doc-comment of
3047
    `quicly_context_t::initial_egress_max_udp_payload_size` */
3048
0
    assert(conn->initial != NULL);
3049
0
    if (conn->egress.max_udp_payload_size < conn->initial->largest_ingress_udp_payload_size) {
3050
0
        uint16_t size = conn->initial->largest_ingress_udp_payload_size;
3051
0
        if (size > conn->super.ctx->transport_params.max_udp_payload_size)
3052
0
            size = conn->super.ctx->transport_params.max_udp_payload_size;
3053
0
        conn->egress.max_udp_payload_size = size;
3054
0
    }
3055
0
    if (conn->egress.max_udp_payload_size > conn->super.remote.transport_params.max_udp_payload_size)
3056
0
        conn->egress.max_udp_payload_size = conn->super.remote.transport_params.max_udp_payload_size;
3057
3058
    /* set transport_parameters extension to be sent in EE */
3059
0
    assert(properties->additional_extensions == NULL);
3060
0
    ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0);
3061
0
    assert(conn->super.local.cid_set.cids[0].sequence == 0 && "make sure that local_cid is in expected state before sending SRT");
3062
0
    if ((ret = quicly_encode_transport_parameter_list(
3063
0
             &conn->crypto.transport_params.buf, &conn->super.ctx->transport_params,
3064
0
             needs_cid_auth(conn) || is_retry(conn) ? &conn->super.original_dcid : NULL,
3065
0
             needs_cid_auth(conn) ? &conn->super.local.cid_set.cids[0].cid : NULL,
3066
0
             needs_cid_auth(conn) && is_retry(conn) ? &conn->retry_scid : NULL,
3067
0
             conn->super.ctx->cid_encryptor != NULL ? conn->super.local.cid_set.cids[0].stateless_reset_token : NULL, 0)) != 0)
3068
0
        goto Exit;
3069
0
    properties->additional_extensions = conn->crypto.transport_params.ext;
3070
0
    conn->crypto.transport_params.ext[0] =
3071
0
        (ptls_raw_extension_t){get_transport_parameters_extension_id(conn->super.version),
3072
0
                               {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}};
3073
0
    conn->crypto.transport_params.ext[1] = (ptls_raw_extension_t){UINT16_MAX};
3074
0
    conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext;
3075
3076
0
    ret = 0;
3077
3078
0
Exit:
3079
0
    return compress_handshake_result(ret);
3080
0
}
3081
3082
static size_t aead_decrypt_core(ptls_aead_context_t *aead, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off)
3083
0
{
3084
0
    return ptls_aead_decrypt(aead, packet->octets.base + aead_off, packet->octets.base + aead_off, packet->octets.len - aead_off,
3085
0
                             pn, packet->octets.base, aead_off);
3086
0
}
3087
3088
static int aead_decrypt_fixed_key(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen)
3089
0
{
3090
0
    ptls_aead_context_t *aead = ctx;
3091
3092
0
    if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX)
3093
0
        return QUICLY_ERROR_PACKET_IGNORED;
3094
0
    return 0;
3095
0
}
3096
3097
static int aead_decrypt_1rtt(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen)
3098
0
{
3099
0
    quicly_conn_t *conn = ctx;
3100
0
    struct st_quicly_application_space_t *space = conn->application;
3101
0
    size_t aead_index = (packet->octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
3102
0
    int ret;
3103
3104
    /* prepare key, when not available (yet) */
3105
0
    if (space->cipher.ingress.aead[aead_index] == NULL) {
3106
0
    Retry_1RTT: {
3107
        /* Replace the AEAD key at the alternative slot (note: decryption key slots are shared by 0-RTT and 1-RTT), at the same time
3108
         * dropping 0-RTT header protection key. */
3109
0
        if (conn->application->cipher.ingress.header_protection.zero_rtt != NULL) {
3110
0
            ptls_cipher_free(conn->application->cipher.ingress.header_protection.zero_rtt);
3111
0
            conn->application->cipher.ingress.header_protection.zero_rtt = NULL;
3112
0
        }
3113
0
        ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls);
3114
0
        if ((ret = update_1rtt_key(conn, cipher, 0, &space->cipher.ingress.aead[aead_index], space->cipher.ingress.secret)) != 0)
3115
0
            return ret;
3116
0
        ++space->cipher.ingress.key_phase.prepared;
3117
0
        QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE_PREPARE, conn, conn->stash.now, space->cipher.ingress.key_phase.prepared,
3118
0
                     QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, cipher->hash->digest_size));
3119
0
        QUICLY_LOG_CONN(crypto_receive_key_update_prepare, conn, {
3120
0
            PTLS_LOG_ELEMENT_UNSIGNED(phase, space->cipher.ingress.key_phase.prepared);
3121
0
            PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, space->cipher.ingress.secret, cipher->hash->digest_size);
3122
0
        });
3123
0
    }
3124
0
    }
3125
3126
    /* decrypt */
3127
0
    ptls_aead_context_t *aead = space->cipher.ingress.aead[aead_index];
3128
0
    if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX) {
3129
        /* retry with a new key, if possible */
3130
0
        if (space->cipher.ingress.key_phase.decrypted == space->cipher.ingress.key_phase.prepared &&
3131
0
            space->cipher.ingress.key_phase.decrypted % 2 != aead_index) {
3132
            /* reapply AEAD to revert payload to the encrypted form. This assumes that the cipher used in AEAD is CTR. */
3133
0
            aead_decrypt_core(aead, pn, packet, aead_off);
3134
0
            goto Retry_1RTT;
3135
0
        }
3136
        /* otherwise return failure */
3137
0
        return QUICLY_ERROR_PACKET_IGNORED;
3138
0
    }
3139
3140
    /* update the confirmed key phase and also the egress key phase, if necessary */
3141
0
    if (space->cipher.ingress.key_phase.prepared != space->cipher.ingress.key_phase.decrypted &&
3142
0
        space->cipher.ingress.key_phase.prepared % 2 == aead_index) {
3143
0
        if ((ret = received_key_update(conn, space->cipher.ingress.key_phase.prepared)) != 0)
3144
0
            return ret;
3145
0
    }
3146
3147
0
    return 0;
3148
0
}
3149
3150
static quicly_error_t do_decrypt_packet(ptls_cipher_context_t *header_protection,
3151
                                        int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *),
3152
                                        void *aead_ctx, uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn,
3153
                                        ptls_iovec_t *payload)
3154
0
{
3155
0
    size_t encrypted_len = packet->octets.len - packet->encrypted_off;
3156
0
    uint8_t hpmask[5] = {0};
3157
0
    uint32_t pnbits = 0;
3158
0
    size_t pnlen, ptlen, i;
3159
3160
    /* decipher the header protection, as well as obtaining pnbits, pnlen */
3161
0
    if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE) {
3162
0
        *pn = UINT64_MAX;
3163
0
        return QUICLY_ERROR_PACKET_IGNORED;
3164
0
    }
3165
0
    ptls_cipher_init(header_protection, packet->octets.base + packet->encrypted_off + QUICLY_MAX_PN_SIZE);
3166
0
    ptls_cipher_encrypt(header_protection, hpmask, hpmask, sizeof(hpmask));
3167
0
    packet->octets.base[0] ^= hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? 0xf : 0x1f);
3168
0
    pnlen = (packet->octets.base[0] & 0x3) + 1;
3169
0
    for (i = 0; i != pnlen; ++i) {
3170
0
        packet->octets.base[packet->encrypted_off + i] ^= hpmask[i + 1];
3171
0
        pnbits = (pnbits << 8) | packet->octets.base[packet->encrypted_off + i];
3172
0
    }
3173
3174
0
    size_t aead_off = packet->encrypted_off + pnlen;
3175
0
    *pn = quicly_determine_packet_number(pnbits, pnlen * 8, *next_expected_pn);
3176
3177
    /* AEAD decryption */
3178
0
    int ret;
3179
0
    if ((ret = (*aead_cb)(aead_ctx, *pn, packet, aead_off, &ptlen)) != 0) {
3180
0
        return ret;
3181
0
    }
3182
0
    if (*next_expected_pn <= *pn)
3183
0
        *next_expected_pn = *pn + 1;
3184
3185
0
    *payload = ptls_iovec_init(packet->octets.base + aead_off, ptlen);
3186
0
    return 0;
3187
0
}
3188
3189
static quicly_error_t decrypt_packet(ptls_cipher_context_t *header_protection,
3190
                                     int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *), void *aead_ctx,
3191
                                     uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn,
3192
                                     ptls_iovec_t *payload)
3193
0
{
3194
0
    quicly_error_t ret;
3195
3196
    /* decrypt ourselves, or use the pre-decrypted input */
3197
0
    if (packet->decrypted.pn == UINT64_MAX) {
3198
0
        if ((ret = do_decrypt_packet(header_protection, aead_cb, aead_ctx, next_expected_pn, packet, pn, payload)) != 0)
3199
0
            return ret;
3200
0
    } else {
3201
0
        *payload = ptls_iovec_init(packet->octets.base + packet->encrypted_off, packet->octets.len - packet->encrypted_off);
3202
0
        *pn = packet->decrypted.pn;
3203
0
        if (aead_cb == aead_decrypt_1rtt) {
3204
0
            quicly_conn_t *conn = aead_ctx;
3205
0
            if (conn->application->cipher.ingress.key_phase.decrypted < packet->decrypted.key_phase) {
3206
0
                if ((ret = received_key_update(conn, packet->decrypted.key_phase)) != 0)
3207
0
                    return ret;
3208
0
            }
3209
0
        }
3210
0
        if (*next_expected_pn < *pn)
3211
0
            *next_expected_pn = *pn + 1;
3212
0
    }
3213
3214
    /* check reserved bits after AEAD decryption */
3215
0
    if ((packet->octets.base[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? QUICLY_LONG_HEADER_RESERVED_BITS
3216
0
                                                                                        : QUICLY_SHORT_HEADER_RESERVED_BITS)) !=
3217
0
        0) {
3218
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
3219
0
    }
3220
0
    if (payload->len == 0) {
3221
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
3222
0
    }
3223
3224
0
    return 0;
3225
0
}
3226
3227
static quicly_error_t do_on_ack_ack(quicly_conn_t *conn, const quicly_sent_packet_t *packet, uint64_t start, uint64_t start_length,
3228
                                    struct st_quicly_sent_ack_additional_t *additional, size_t additional_capacity)
3229
0
{
3230
    /* find the pn space */
3231
0
    struct st_quicly_pn_space_t *space;
3232
0
    switch (packet->ack_epoch) {
3233
0
    case QUICLY_EPOCH_INITIAL:
3234
0
        space = &conn->initial->super;
3235
0
        break;
3236
0
    case QUICLY_EPOCH_HANDSHAKE:
3237
0
        space = &conn->handshake->super;
3238
0
        break;
3239
0
    case QUICLY_EPOCH_1RTT:
3240
0
        space = &conn->application->super;
3241
0
        break;
3242
0
    default:
3243
0
        assert(!"FIXME");
3244
0
        return QUICLY_TRANSPORT_ERROR_INTERNAL;
3245
0
    }
3246
3247
    /* subtract given ACK ranges */
3248
0
    int ret;
3249
0
    uint64_t end = start + start_length;
3250
0
    if ((ret = quicly_ranges_subtract(&space->ack_queue, start, end)) != 0)
3251
0
        return ret;
3252
0
    for (size_t i = 0; i < additional_capacity && additional[i].gap != 0; ++i) {
3253
0
        start = end + additional[i].gap;
3254
0
        end = start + additional[i].length;
3255
0
        if ((ret = quicly_ranges_subtract(&space->ack_queue, start, end)) != 0)
3256
0
            return ret;
3257
0
    }
3258
3259
    /* make adjustments */
3260
0
    if (space->ack_queue.num_ranges == 0) {
3261
0
        space->largest_pn_received_at = INT64_MAX;
3262
0
        space->unacked_count = 0;
3263
0
    } else if (space->ack_queue.num_ranges > QUICLY_MAX_ACK_BLOCKS) {
3264
0
        quicly_ranges_drop_by_range_indices(&space->ack_queue, space->ack_queue.num_ranges - QUICLY_MAX_ACK_BLOCKS,
3265
0
                                            space->ack_queue.num_ranges);
3266
0
    }
3267
3268
0
    return 0;
3269
0
}
3270
3271
static quicly_error_t on_ack_ack_ranges64(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3272
0
{
3273
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3274
3275
    /* TODO log */
3276
3277
0
    return acked ? do_on_ack_ack(conn, packet, sent->data.ack.start, sent->data.ack.ranges64.start_length,
3278
0
                                 sent->data.ack.ranges64.additional, PTLS_ELEMENTSOF(sent->data.ack.ranges64.additional))
3279
0
                 : 0;
3280
0
}
3281
3282
static quicly_error_t on_ack_ack_ranges8(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3283
0
{
3284
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3285
3286
    /* TODO log */
3287
3288
0
    return acked ? do_on_ack_ack(conn, packet, sent->data.ack.start, sent->data.ack.ranges8.start_length,
3289
0
                                 sent->data.ack.ranges8.additional, PTLS_ELEMENTSOF(sent->data.ack.ranges8.additional))
3290
0
                 : 0;
3291
0
}
3292
3293
static quicly_error_t on_ack_stream_ack_one(quicly_conn_t *conn, quicly_stream_id_t stream_id, quicly_sendstate_sent_t *sent)
3294
0
{
3295
0
    quicly_stream_t *stream;
3296
3297
0
    if ((stream = quicly_get_stream(conn, stream_id)) == NULL)
3298
0
        return 0;
3299
3300
0
    size_t bytes_to_shift;
3301
0
    int ret;
3302
0
    if ((ret = quicly_sendstate_acked(&stream->sendstate, sent, &bytes_to_shift)) != 0)
3303
0
        return ret;
3304
0
    if (bytes_to_shift != 0) {
3305
0
        QUICLY_PROBE(STREAM_ON_SEND_SHIFT, stream->conn, stream->conn->stash.now, stream, bytes_to_shift);
3306
0
        stream->callbacks->on_send_shift(stream, bytes_to_shift);
3307
0
        QUICLY_LOG_CONN(stream_on_send_shift, stream->conn, {
3308
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
3309
0
            PTLS_LOG_ELEMENT_UNSIGNED(delta, bytes_to_shift);
3310
0
        });
3311
0
    }
3312
0
    if (stream_is_destroyable(stream)) {
3313
0
        destroy_stream(stream, 0);
3314
0
    } else if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE) {
3315
0
        resched_stream_data(stream);
3316
0
    }
3317
3318
0
    return 0;
3319
0
}
3320
3321
static quicly_error_t on_ack_stream_ack_cached(quicly_conn_t *conn)
3322
0
{
3323
0
    if (conn->stash.on_ack_stream.active_acked_cache.stream_id == INT64_MIN)
3324
0
        return 0;
3325
0
    quicly_error_t ret = on_ack_stream_ack_one(conn, conn->stash.on_ack_stream.active_acked_cache.stream_id,
3326
0
                                               &conn->stash.on_ack_stream.active_acked_cache.args);
3327
0
    conn->stash.on_ack_stream.active_acked_cache.stream_id = INT64_MIN;
3328
0
    return ret;
3329
0
}
3330
3331
static quicly_error_t on_ack_stream(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3332
0
{
3333
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3334
0
    quicly_error_t ret;
3335
3336
0
    if (acked) {
3337
3338
0
        QUICLY_PROBE(STREAM_ACKED, conn, conn->stash.now, sent->data.stream.stream_id, sent->data.stream.args.start,
3339
0
                     sent->data.stream.args.end - sent->data.stream.args.start);
3340
0
        QUICLY_LOG_CONN(stream_acked, conn, {
3341
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, sent->data.stream.stream_id);
3342
0
            PTLS_LOG_ELEMENT_UNSIGNED(off, sent->data.stream.args.start);
3343
0
            PTLS_LOG_ELEMENT_UNSIGNED(len, sent->data.stream.args.end - sent->data.stream.args.start);
3344
0
        });
3345
3346
0
        if (packet->frames_in_flight && conn->stash.on_ack_stream.active_acked_cache.stream_id == sent->data.stream.stream_id &&
3347
0
            conn->stash.on_ack_stream.active_acked_cache.args.end == sent->data.stream.args.start) {
3348
            /* Fast path: append the newly supplied range to the existing cached range. */
3349
0
            conn->stash.on_ack_stream.active_acked_cache.args.end = sent->data.stream.args.end;
3350
0
        } else {
3351
            /* Slow path: submit the cached range, and if possible, cache the newly supplied range. Else submit the newly supplied
3352
             * range directly. */
3353
0
            if ((ret = on_ack_stream_ack_cached(conn)) != 0)
3354
0
                return ret;
3355
0
            if (packet->frames_in_flight) {
3356
0
                conn->stash.on_ack_stream.active_acked_cache.stream_id = sent->data.stream.stream_id;
3357
0
                conn->stash.on_ack_stream.active_acked_cache.args = sent->data.stream.args;
3358
0
            } else {
3359
0
                if ((ret = on_ack_stream_ack_one(conn, sent->data.stream.stream_id, &sent->data.stream.args)) != 0)
3360
0
                    return ret;
3361
0
            }
3362
0
        }
3363
3364
0
    } else {
3365
3366
0
        QUICLY_PROBE(STREAM_LOST, conn, conn->stash.now, sent->data.stream.stream_id, sent->data.stream.args.start,
3367
0
                     sent->data.stream.args.end - sent->data.stream.args.start);
3368
0
        QUICLY_LOG_CONN(stream_lost, conn, {
3369
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, sent->data.stream.stream_id);
3370
0
            PTLS_LOG_ELEMENT_UNSIGNED(off, sent->data.stream.args.start);
3371
0
            PTLS_LOG_ELEMENT_UNSIGNED(len, sent->data.stream.args.end - sent->data.stream.args.start);
3372
0
        });
3373
3374
0
        quicly_stream_t *stream;
3375
0
        if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) == NULL)
3376
0
            return 0;
3377
        /* FIXME handle rto error */
3378
0
        if ((ret = quicly_sendstate_lost(&stream->sendstate, &sent->data.stream.args)) != 0)
3379
0
            return ret;
3380
0
        if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE)
3381
0
            resched_stream_data(stream);
3382
0
    }
3383
3384
0
    return 0;
3385
0
}
3386
3387
static quicly_error_t on_ack_max_stream_data(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3388
                                             quicly_sent_t *sent)
3389
0
{
3390
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3391
0
    quicly_stream_t *stream;
3392
3393
0
    if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) != NULL) {
3394
0
        if (acked) {
3395
0
            quicly_maxsender_acked(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args);
3396
0
        } else {
3397
0
            quicly_maxsender_lost(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args);
3398
0
            if (should_send_max_stream_data(stream))
3399
0
                sched_stream_control(stream);
3400
0
        }
3401
0
    }
3402
3403
0
    return 0;
3404
0
}
3405
3406
static quicly_error_t on_ack_max_data(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3407
0
{
3408
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3409
3410
0
    if (acked) {
3411
0
        quicly_maxsender_acked(&conn->ingress.max_data.sender, &sent->data.max_data.args);
3412
0
    } else {
3413
0
        quicly_maxsender_lost(&conn->ingress.max_data.sender, &sent->data.max_data.args);
3414
0
    }
3415
3416
0
    return 0;
3417
0
}
3418
3419
static quicly_error_t on_ack_max_streams(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3420
0
{
3421
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3422
0
    quicly_maxsender_t *maxsender = sent->data.max_streams.uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi;
3423
0
    assert(maxsender != NULL); /* we would only receive an ACK if we have sent the frame */
3424
3425
0
    if (acked) {
3426
0
        quicly_maxsender_acked(maxsender, &sent->data.max_streams.args);
3427
0
    } else {
3428
0
        quicly_maxsender_lost(maxsender, &sent->data.max_streams.args);
3429
0
    }
3430
3431
0
    return 0;
3432
0
}
3433
3434
static void on_ack_stream_state_sender(quicly_sender_state_t *sender_state, int acked)
3435
0
{
3436
0
    *sender_state = acked ? QUICLY_SENDER_STATE_ACKED : QUICLY_SENDER_STATE_SEND;
3437
0
}
3438
3439
static quicly_error_t on_ack_reset_stream(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3440
0
{
3441
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3442
0
    quicly_stream_t *stream;
3443
3444
0
    if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) {
3445
0
        on_ack_stream_state_sender(&stream->_send_aux.reset_stream.sender_state, acked);
3446
0
        if (stream_is_destroyable(stream))
3447
0
            destroy_stream(stream, 0);
3448
0
    }
3449
3450
0
    return 0;
3451
0
}
3452
3453
static quicly_error_t on_ack_stop_sending(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3454
0
{
3455
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3456
0
    quicly_stream_t *stream;
3457
3458
0
    if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) {
3459
0
        on_ack_stream_state_sender(&stream->_send_aux.stop_sending.sender_state, acked);
3460
0
        if (stream->_send_aux.stop_sending.sender_state != QUICLY_SENDER_STATE_ACKED)
3461
0
            sched_stream_control(stream);
3462
0
    }
3463
3464
0
    return 0;
3465
0
}
3466
3467
static quicly_error_t on_ack_streams_blocked(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3468
                                             quicly_sent_t *sent)
3469
0
{
3470
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3471
0
    struct st_quicly_max_streams_t *m =
3472
0
        sent->data.streams_blocked.uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi;
3473
3474
0
    if (acked) {
3475
0
        quicly_maxsender_acked(&m->blocked_sender, &sent->data.streams_blocked.args);
3476
0
    } else {
3477
0
        quicly_maxsender_lost(&m->blocked_sender, &sent->data.streams_blocked.args);
3478
0
    }
3479
3480
0
    return 0;
3481
0
}
3482
3483
static quicly_error_t on_ack_handshake_done(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3484
                                            quicly_sent_t *sent)
3485
0
{
3486
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3487
3488
    /* When lost, reschedule for transmission. When acked, suppress retransmission if scheduled. */
3489
0
    if (acked) {
3490
0
        conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
3491
0
    } else {
3492
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
3493
0
    }
3494
0
    return 0;
3495
0
}
3496
3497
static quicly_error_t on_ack_data_blocked(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3498
0
{
3499
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3500
3501
0
    if (conn->egress.max_data.permitted == sent->data.data_blocked.offset) {
3502
0
        if (acked) {
3503
0
            conn->egress.data_blocked = QUICLY_SENDER_STATE_ACKED;
3504
0
        } else if (packet->frames_in_flight && conn->egress.data_blocked == QUICLY_SENDER_STATE_UNACKED) {
3505
0
            conn->egress.data_blocked = QUICLY_SENDER_STATE_SEND;
3506
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
3507
0
        }
3508
0
    }
3509
3510
0
    return 0;
3511
0
}
3512
3513
static quicly_error_t on_ack_stream_data_blocked_frame(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3514
                                                       quicly_sent_t *sent)
3515
0
{
3516
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3517
0
    quicly_stream_t *stream;
3518
3519
0
    if ((stream = quicly_get_stream(conn, sent->data.stream_data_blocked.stream_id)) == NULL)
3520
0
        return 0;
3521
3522
0
    if (stream->_send_aux.max_stream_data == sent->data.stream_data_blocked.offset) {
3523
0
        if (acked) {
3524
0
            stream->_send_aux.blocked = QUICLY_SENDER_STATE_ACKED;
3525
0
        } else if (packet->frames_in_flight && stream->_send_aux.blocked == QUICLY_SENDER_STATE_UNACKED) {
3526
0
            stream->_send_aux.blocked = QUICLY_SENDER_STATE_SEND;
3527
0
            sched_stream_control(stream);
3528
0
        }
3529
0
    }
3530
3531
0
    return 0;
3532
0
}
3533
3534
static quicly_error_t on_ack_new_token(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
3535
0
{
3536
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3537
3538
0
    if (sent->data.new_token.is_inflight) {
3539
0
        --conn->egress.new_token.num_inflight;
3540
0
        sent->data.new_token.is_inflight = 0;
3541
0
    }
3542
0
    if (acked) {
3543
0
        QUICLY_PROBE(NEW_TOKEN_ACKED, conn, conn->stash.now, sent->data.new_token.generation);
3544
0
        QUICLY_LOG_CONN(new_token_acked, conn, { PTLS_LOG_ELEMENT_UNSIGNED(generation, sent->data.new_token.generation); });
3545
0
        if (conn->egress.new_token.max_acked < sent->data.new_token.generation)
3546
0
            conn->egress.new_token.max_acked = sent->data.new_token.generation;
3547
0
    }
3548
3549
0
    if (conn->egress.new_token.num_inflight == 0 && conn->egress.new_token.max_acked < conn->egress.new_token.generation)
3550
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
3551
3552
0
    return 0;
3553
0
}
3554
3555
static quicly_error_t on_ack_new_connection_id(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3556
                                               quicly_sent_t *sent)
3557
0
{
3558
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3559
0
    uint64_t sequence = sent->data.new_connection_id.sequence;
3560
3561
0
    if (acked) {
3562
0
        quicly_local_cid_on_acked(&conn->super.local.cid_set, sequence);
3563
0
    } else {
3564
0
        if (quicly_local_cid_on_lost(&conn->super.local.cid_set, sequence))
3565
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
3566
0
    }
3567
3568
0
    return 0;
3569
0
}
3570
3571
static quicly_error_t on_ack_retire_connection_id(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked,
3572
                                                  quicly_sent_t *sent)
3573
0
{
3574
0
    quicly_conn_t *conn = (quicly_conn_t *)((char *)map - offsetof(quicly_conn_t, egress.loss.sentmap));
3575
0
    uint64_t sequence = sent->data.retire_connection_id.sequence;
3576
0
    int ret;
3577
3578
0
    if (!acked) {
3579
0
        if ((ret = quicly_remote_cid_push_retired(&conn->super.remote.cid_set, sequence)) != 0)
3580
0
            return ret;
3581
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
3582
0
    }
3583
3584
0
    return 0;
3585
0
}
3586
3587
static uint32_t calc_pacer_send_rate(quicly_conn_t *conn)
3588
0
{
3589
0
    uint32_t multiplier;
3590
3591
0
    if (conn->egress.cc.num_loss_episodes == 0) {
3592
0
        if (quicly_cc_in_jumpstart(&conn->egress.cc)) {
3593
0
            multiplier = 1;
3594
0
        } else {
3595
0
            multiplier = quicly_cc_rapid_start_use_3x(&conn->egress.cc.rapid_start, &conn->egress.loss.rtt) ? 3 : 2;
3596
0
        }
3597
0
    } else {
3598
        /* We use of 2x during congestion avoidance, which is different from Linux using 1.25x. The rationale behind this choice is
3599
         * that 1.25x is not sufficiently aggressive immediately after a loss event. Following a loss event, the congestion window
3600
         * (CWND) is halved (i.e., beta), but the RTT remains high for one RTT and SRTT can remain high even loger, since it is a
3601
         * moving average adjusted with each ACK received. Consequently, if the multiplier is set to 1.25x, the calculated send rate
3602
         * could drop to as low as 1.25 * 1/2 = 0.625. By using a 2x multiplier, the send rate is guaranteed to become no less than
3603
         * that immediately before the loss event, which would have been the link throughput. */
3604
0
        multiplier = 2;
3605
0
    }
3606
3607
0
    return quicly_pacer_calc_send_rate(multiplier, conn->egress.cc.cwnd, conn->egress.loss.rtt.smoothed);
3608
0
}
3609
3610
static int should_send_datagram_frame(quicly_conn_t *conn)
3611
0
{
3612
0
    if (conn->egress.datagram_frame_payloads.count == 0)
3613
0
        return 0;
3614
0
    if (conn->application == NULL)
3615
0
        return 0;
3616
0
    if (conn->application->cipher.egress.key.aead == NULL)
3617
0
        return 0;
3618
0
    return 1;
3619
0
}
3620
3621
static inline uint64_t calc_amplification_limit_allowance(quicly_conn_t *conn)
3622
0
{
3623
0
    if (conn->super.remote.address_validation.validated)
3624
0
        return UINT64_MAX;
3625
0
    uint64_t budget = conn->super.stats.num_bytes.received * conn->super.ctx->pre_validation_amplification_limit;
3626
0
    if (budget <= conn->super.stats.num_bytes.sent)
3627
0
        return 0;
3628
0
    return budget - conn->super.stats.num_bytes.sent;
3629
0
}
3630
3631
/* Helper function to compute send window based on:
3632
 * * state of peer validation,
3633
 * * current cwnd,
3634
 * * minimum send requirements in |min_bytes_to_send|, and
3635
 * * if sending is to be restricted to the minimum, indicated in |restrict_sending|
3636
 */
3637
static size_t calc_send_window(quicly_conn_t *conn, size_t min_bytes_to_send, uint64_t amp_window, uint64_t pacer_window,
3638
                               int restrict_sending)
3639
0
{
3640
0
    uint64_t window = 0;
3641
0
    if (restrict_sending) {
3642
        /* Send min_bytes_to_send on PTO */
3643
0
        window = min_bytes_to_send;
3644
0
    } else {
3645
        /* Limit to cwnd */
3646
0
        if (conn->egress.cc.cwnd > conn->egress.loss.sentmap.bytes_in_flight) {
3647
0
            window = conn->egress.cc.cwnd - conn->egress.loss.sentmap.bytes_in_flight;
3648
0
            if (window > pacer_window)
3649
0
                window = pacer_window;
3650
0
        }
3651
        /* Allow at least one packet on time-threshold loss detection */
3652
0
        window = window > min_bytes_to_send ? window : min_bytes_to_send;
3653
0
    }
3654
    /* Cap the window by the amount allowed by address validation */
3655
0
    if (amp_window < window)
3656
0
        window = amp_window;
3657
3658
0
    return window;
3659
0
}
3660
3661
/**
3662
 * Checks if the server is waiting for ClientFinished. When that is the case, the loss timer is deactivated, to avoid repeatedly
3663
 * sending 1-RTT packets while the client spends time verifying the certificate chain at the same time buffering 1-RTT packets.
3664
 */
3665
static int is_point5rtt_with_no_handshake_data_to_send(quicly_conn_t *conn)
3666
0
{
3667
    /* bail out unless this is a server-side connection waiting for ClientFinished */
3668
0
    if (!(conn->handshake != NULL && conn->application != NULL && !quicly_is_client(conn)))
3669
0
        return 0;
3670
0
    quicly_stream_t *stream = quicly_get_stream(conn, (quicly_stream_id_t)-1 - QUICLY_EPOCH_HANDSHAKE);
3671
0
    assert(stream != NULL);
3672
0
    return stream->sendstate.pending.num_ranges == 0 && stream->sendstate.acked.ranges[0].end == stream->sendstate.size_inflight;
3673
0
}
3674
3675
static int64_t pacer_can_send_at(quicly_conn_t *conn)
3676
0
{
3677
0
    if (conn->egress.pacer == NULL)
3678
0
        return 0;
3679
3680
0
    uint32_t bytes_per_msec = calc_pacer_send_rate(conn);
3681
0
    return quicly_pacer_can_send_at(conn->egress.pacer, bytes_per_msec, conn->egress.max_udp_payload_size);
3682
0
}
3683
3684
int64_t quicly_get_first_timeout(quicly_conn_t *conn)
3685
0
{
3686
0
    if (conn->super.state >= QUICLY_STATE_CLOSING)
3687
0
        return conn->egress.send_ack_at;
3688
3689
0
    if (should_send_datagram_frame(conn))
3690
0
        return 0;
3691
3692
0
    uint64_t amp_window = calc_amplification_limit_allowance(conn);
3693
0
    int64_t at = conn->idle_timeout.at, pacer_at = pacer_can_send_at(conn);
3694
3695
    /* reduce at to the moment pacer provides credit, if we are not CC-limited and there's something to be sent over CC */
3696
0
    if (pacer_at < at && calc_send_window(conn, 0, amp_window, UINT64_MAX, 0) > 0) {
3697
0
        if (conn->egress.pending_flows != 0) {
3698
            /* crypto streams (as indicated by lower 4 bits) can be sent whenever CWND is available; other flows need application
3699
             * packet number space */
3700
0
            if ((conn->application != NULL && conn->application->cipher.egress.key.header_protection != NULL) ||
3701
0
                (conn->egress.pending_flows & 0xf) != 0)
3702
0
                at = pacer_at;
3703
0
        }
3704
0
        if (pacer_at < at && (quicly_linklist_is_linked(&conn->egress.pending_streams.control) || scheduler_can_send(conn)))
3705
0
            at = pacer_at;
3706
0
    }
3707
3708
    /* if something can be sent, return the earliest timeout. Otherwise return the idle timeout. */
3709
0
    if (amp_window > 0) {
3710
0
        if (conn->egress.loss.alarm_at < at && !is_point5rtt_with_no_handshake_data_to_send(conn))
3711
0
            at = conn->egress.loss.alarm_at;
3712
0
        if (conn->egress.send_ack_at < at)
3713
0
            at = conn->egress.send_ack_at;
3714
0
    }
3715
0
    if (at > conn->egress.send_probe_at)
3716
0
        at = conn->egress.send_probe_at;
3717
3718
0
    return at;
3719
0
}
3720
3721
uint64_t quicly_get_next_expected_packet_number(quicly_conn_t *conn)
3722
0
{
3723
0
    if (!conn->application)
3724
0
        return UINT64_MAX;
3725
3726
0
    return conn->application->super.next_expected_packet_number;
3727
0
}
3728
3729
static int setup_path_dcid(quicly_conn_t *conn, size_t path_index)
3730
0
{
3731
0
    struct st_quicly_conn_path_t *path = conn->paths[path_index];
3732
0
    quicly_remote_cid_set_t *set = &conn->super.remote.cid_set;
3733
0
    size_t found = SIZE_MAX;
3734
3735
0
    assert(path->dcid == UINT64_MAX);
3736
3737
0
    if (set->cids[0].cid.len == 0) {
3738
        /* if peer CID is zero-length, we can send packets to whatever address without the fear of corelation */
3739
0
        found = 0;
3740
0
    } else {
3741
        /* find the unused entry with a smallest sequence number */
3742
0
        for (size_t i = 0; i < PTLS_ELEMENTSOF(set->cids); ++i) {
3743
0
            if (set->cids[i].state == QUICLY_REMOTE_CID_AVAILABLE &&
3744
0
                (found == SIZE_MAX || set->cids[i].sequence < set->cids[found].sequence))
3745
0
                found = i;
3746
0
        }
3747
0
        if (found == SIZE_MAX)
3748
0
            return 0;
3749
0
    }
3750
3751
    /* associate */
3752
0
    set->cids[found].state = QUICLY_REMOTE_CID_IN_USE;
3753
0
    path->dcid = set->cids[found].sequence;
3754
3755
0
    return 1;
3756
0
}
3757
3758
static quicly_cid_t *get_dcid(quicly_conn_t *conn, size_t path_index)
3759
0
{
3760
0
    struct st_quicly_conn_path_t *path = conn->paths[path_index];
3761
3762
0
    assert(path->dcid != UINT64_MAX);
3763
3764
    /* lookup DCID and return */
3765
0
    for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->super.remote.cid_set.cids); ++i) {
3766
0
        if (conn->super.remote.cid_set.cids[i].sequence == path->dcid)
3767
0
            return &conn->super.remote.cid_set.cids[i].cid;
3768
0
    }
3769
0
    assert(!"CID lookup failure");
3770
0
    return NULL;
3771
0
}
3772
3773
/**
3774
 * data structure that is used during one call through quicly_send()
3775
 */
3776
struct st_quicly_send_context_t {
3777
    /**
3778
     * current encryption context
3779
     */
3780
    struct {
3781
        struct st_quicly_cipher_context_t *cipher;
3782
        uint8_t first_byte;
3783
    } current;
3784
    /**
3785
     * packet under construction
3786
     */
3787
    struct {
3788
        struct st_quicly_cipher_context_t *cipher;
3789
        /**
3790
         * points to the first byte of the target QUIC packet. It will not point to packet->octets.base[0] when the datagram
3791
         * contains multiple QUIC packet.
3792
         */
3793
        uint8_t *first_byte_at;
3794
        /**
3795
         * if the target QUIC packet contains an ack-eliciting frame
3796
         */
3797
        uint8_t ack_eliciting : 1;
3798
        /**
3799
         * if the target datagram should be padded to full size
3800
         */
3801
        uint8_t full_size : 1;
3802
    } target;
3803
    /**
3804
     * output buffer into which list of datagrams is written
3805
     */
3806
    struct iovec *datagrams;
3807
    /**
3808
     * max number of datagrams that can be stored in |packets|
3809
     */
3810
    size_t max_datagrams;
3811
    /**
3812
     * number of datagrams currently stored in |packets|
3813
     */
3814
    size_t num_datagrams;
3815
    /**
3816
     * buffer in which packets are built
3817
     */
3818
    struct {
3819
        /**
3820
         * starting position of the current (or next) datagram
3821
         */
3822
        uint8_t *datagram;
3823
        /**
3824
         * end position of the payload buffer
3825
         */
3826
        uint8_t *end;
3827
    } payload_buf;
3828
    /**
3829
     * Currently available window for sending (in bytes); the value becomes negative when the sender uses more space than permitted.
3830
     * That happens because the sender operates at packet-level rather than byte-level.
3831
     */
3832
    ssize_t send_window;
3833
    /**
3834
     * location where next frame should be written
3835
     */
3836
    uint8_t *dst;
3837
    /**
3838
     * end of the payload area, beyond which frames cannot be written
3839
     */
3840
    uint8_t *dst_end;
3841
    /**
3842
     * address at which payload starts
3843
     */
3844
    uint8_t *dst_payload_from;
3845
    /**
3846
     * index of `conn->paths[]` to which we are sending
3847
     */
3848
    size_t path_index;
3849
    /**
3850
     * DCID to be used for the path
3851
     */
3852
    quicly_cid_t *dcid;
3853
    /**
3854
     * if `conn->egress.send_probe_at` should be recalculated
3855
     */
3856
    unsigned recalc_send_probe_at : 1;
3857
};
3858
3859
static quicly_error_t commit_send_packet(quicly_conn_t *conn, quicly_send_context_t *s, int coalesced)
3860
0
{
3861
0
    size_t datagram_size, packet_bytes_in_flight;
3862
3863
0
    assert(s->target.cipher->aead != NULL);
3864
3865
0
    assert(s->dst != s->dst_payload_from);
3866
3867
    /* pad so that the pn + payload would be at least 4 bytes */
3868
0
    while (s->dst - s->dst_payload_from < QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE)
3869
0
        *s->dst++ = QUICLY_FRAME_TYPE_PADDING;
3870
3871
0
    if (!coalesced && s->target.full_size) {
3872
0
        assert(s->num_datagrams == 0 || s->datagrams[s->num_datagrams - 1].iov_len == conn->egress.max_udp_payload_size);
3873
0
        const size_t max_size = conn->egress.max_udp_payload_size - QUICLY_AEAD_TAG_SIZE;
3874
0
        assert(s->dst - s->payload_buf.datagram <= max_size);
3875
0
        memset(s->dst, QUICLY_FRAME_TYPE_PADDING, s->payload_buf.datagram + max_size - s->dst);
3876
0
        s->dst = s->payload_buf.datagram + max_size;
3877
0
    }
3878
3879
    /* encode packet size, packet number, key-phase */
3880
0
    if (QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at)) {
3881
0
        uint16_t length = s->dst - s->dst_payload_from + s->target.cipher->aead->algo->tag_size + QUICLY_SEND_PN_SIZE;
3882
        /* length is always 2 bytes, see _do_prepare_packet */
3883
0
        length |= 0x4000;
3884
0
        quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE - 2, length);
3885
0
        switch (*s->target.first_byte_at & QUICLY_PACKET_TYPE_BITMASK) {
3886
0
        case QUICLY_PACKET_TYPE_INITIAL:
3887
0
            conn->super.stats.num_packets.initial_sent++;
3888
0
            break;
3889
0
        case QUICLY_PACKET_TYPE_0RTT:
3890
0
            conn->super.stats.num_packets.zero_rtt_sent++;
3891
0
            break;
3892
0
        case QUICLY_PACKET_TYPE_HANDSHAKE:
3893
0
            conn->super.stats.num_packets.handshake_sent++;
3894
0
            break;
3895
0
        }
3896
0
    } else {
3897
0
        if (conn->egress.packet_number >= conn->application->cipher.egress.key_update_pn.next) {
3898
0
            int ret;
3899
0
            if ((ret = update_1rtt_egress_key(conn)) != 0)
3900
0
                return ret;
3901
0
        }
3902
0
        if ((conn->application->cipher.egress.key_phase & 1) != 0)
3903
0
            *s->target.first_byte_at |= QUICLY_KEY_PHASE_BIT;
3904
0
    }
3905
0
    quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE, (uint16_t)conn->egress.packet_number);
3906
3907
    /* encrypt the packet */
3908
0
    s->dst += s->target.cipher->aead->algo->tag_size;
3909
0
    datagram_size = s->dst - s->payload_buf.datagram;
3910
0
    assert(datagram_size <= conn->egress.max_udp_payload_size);
3911
3912
0
    conn->super.ctx->crypto_engine->encrypt_packet(
3913
0
        conn->super.ctx->crypto_engine, conn, s->target.cipher->header_protection, s->target.cipher->aead,
3914
0
        ptls_iovec_init(s->payload_buf.datagram, datagram_size), s->target.first_byte_at - s->payload_buf.datagram,
3915
0
        s->dst_payload_from - s->payload_buf.datagram, conn->egress.packet_number, coalesced);
3916
3917
    /* update CC, commit sentmap */
3918
0
    int on_promoted_path = s->path_index == 0 && !conn->paths[0]->initial;
3919
0
    if (s->target.ack_eliciting) {
3920
0
        packet_bytes_in_flight = s->dst - s->target.first_byte_at;
3921
0
        s->send_window -= packet_bytes_in_flight;
3922
0
    } else {
3923
0
        packet_bytes_in_flight = 0;
3924
0
    }
3925
0
    if (quicly_sentmap_is_open(&conn->egress.loss.sentmap)) {
3926
0
        int cc_limited = conn->egress.loss.sentmap.bytes_in_flight + packet_bytes_in_flight >=
3927
0
                         conn->egress.cc.cwnd / 2; /* for the rationale behind this formula, see handle_ack_frame */
3928
0
        quicly_sentmap_commit(&conn->egress.loss.sentmap, (uint16_t)packet_bytes_in_flight, cc_limited, on_promoted_path);
3929
0
    }
3930
3931
0
    if (packet_bytes_in_flight != 0) {
3932
0
        assert(s->path_index == 0 && "CC governs path 0 and data is sent only on that path");
3933
0
        conn->egress.cc.type->cc_on_sent(&conn->egress.cc, &conn->egress.loss, (uint32_t)packet_bytes_in_flight, conn->stash.now);
3934
0
        if (conn->egress.pacer != NULL)
3935
0
            quicly_pacer_consume_window(conn->egress.pacer, packet_bytes_in_flight);
3936
0
    }
3937
3938
0
    QUICLY_PROBE(PACKET_SENT, conn, conn->stash.now, conn->egress.packet_number, s->dst - s->target.first_byte_at,
3939
0
                 get_epoch(*s->target.first_byte_at), !s->target.ack_eliciting);
3940
0
    QUICLY_LOG_CONN(packet_sent, conn, {
3941
0
        PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number);
3942
0
        PTLS_LOG_ELEMENT_UNSIGNED(len, s->dst - s->target.first_byte_at);
3943
0
        PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(*s->target.first_byte_at));
3944
0
        PTLS_LOG_ELEMENT_BOOL(ack_only, !s->target.ack_eliciting);
3945
0
    });
3946
3947
0
    ++conn->egress.packet_number;
3948
0
    ++conn->super.stats.num_packets.sent;
3949
0
    ++conn->paths[s->path_index]->num_packets.sent;
3950
0
    if (on_promoted_path)
3951
0
        ++conn->super.stats.num_packets.sent_promoted_paths;
3952
3953
0
    if (!coalesced) {
3954
0
        conn->super.stats.num_bytes.sent += datagram_size;
3955
0
        s->datagrams[s->num_datagrams++] = (struct iovec){.iov_base = s->payload_buf.datagram, .iov_len = datagram_size};
3956
0
        s->payload_buf.datagram += datagram_size;
3957
0
        s->target.cipher = NULL;
3958
0
        s->target.first_byte_at = NULL;
3959
0
    }
3960
3961
    /* insert PN gap if necessary, registering the PN to the ack queue so that we'd close the connection in the event of receiving
3962
     * an ACK for that gap. */
3963
0
    if (conn->egress.packet_number >= conn->egress.next_pn_to_skip && !QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte) &&
3964
0
        conn->super.state < QUICLY_STATE_CLOSING) {
3965
0
        quicly_error_t ret;
3966
0
        if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now,
3967
0
                                          QUICLY_EPOCH_1RTT)) != 0)
3968
0
            return ret;
3969
0
        if (quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_invalid_ack) == NULL)
3970
0
            return PTLS_ERROR_NO_MEMORY;
3971
0
        quicly_sentmap_commit(&conn->egress.loss.sentmap, 0, 0, 0);
3972
0
        ++conn->egress.packet_number;
3973
0
        conn->egress.next_pn_to_skip = calc_next_pn_to_skip(conn->super.ctx->tls, conn->egress.packet_number, conn->egress.cc.cwnd,
3974
0
                                                            conn->egress.max_udp_payload_size);
3975
0
    }
3976
3977
0
    return 0;
3978
0
}
3979
3980
static inline uint8_t *emit_cid(uint8_t *dst, const quicly_cid_t *cid)
3981
0
{
3982
0
    if (cid->len != 0) {
3983
0
        memcpy(dst, cid->cid, cid->len);
3984
0
        dst += cid->len;
3985
0
    }
3986
0
    return dst;
3987
0
}
3988
3989
enum allocate_frame_type {
3990
    ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING,
3991
    ALLOCATE_FRAME_TYPE_ACK_ELICITING,
3992
    ALLOCATE_FRAME_TYPE_ACK_ELICITING_NO_CC,
3993
};
3994
3995
static quicly_error_t do_allocate_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space,
3996
                                        enum allocate_frame_type frame_type)
3997
0
{
3998
0
    int coalescible;
3999
0
    quicly_error_t ret;
4000
4001
0
    assert((s->current.first_byte & QUICLY_QUIC_BIT) != 0);
4002
4003
    /* allocate and setup the new packet if necessary */
4004
0
    if (s->dst_end - s->dst < min_space || s->target.first_byte_at == NULL) {
4005
0
        coalescible = 0;
4006
0
    } else if (((*s->target.first_byte_at ^ s->current.first_byte) & QUICLY_PACKET_TYPE_BITMASK) != 0) {
4007
0
        coalescible = QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at);
4008
0
    } else if (s->dst_end - s->dst < min_space) {
4009
0
        coalescible = 0;
4010
0
    } else {
4011
        /* use the existing packet */
4012
0
        goto TargetReady;
4013
0
    }
4014
4015
    /* commit at the same time determining if we will coalesce the packets */
4016
0
    if (s->target.first_byte_at != NULL) {
4017
0
        if (coalescible) {
4018
0
            size_t overhead = 1 /* type */ + s->dcid->len + QUICLY_SEND_PN_SIZE + s->current.cipher->aead->algo->tag_size;
4019
0
            if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte))
4020
0
                overhead += 4 /* version */ + 1 /* cidl */ + s->dcid->len + conn->super.local.long_header_src_cid.len +
4021
0
                            (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) /* token_length == 0 */ + 2 /* length */;
4022
0
            size_t packet_min_space = QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE;
4023
0
            if (packet_min_space < min_space)
4024
0
                packet_min_space = min_space;
4025
0
            if (overhead + packet_min_space > s->dst_end - s->dst)
4026
0
                coalescible = 0;
4027
0
        }
4028
        /* Close the packet under construction. Datagrams being returned by `quicly_send` are padded to full-size (except for the
4029
         * last one datagram) so that they can be sent at once using GSO. */
4030
0
        if (!coalescible)
4031
0
            s->target.full_size = 1;
4032
0
        if ((ret = commit_send_packet(conn, s, coalescible)) != 0)
4033
0
            return ret;
4034
0
    } else {
4035
0
        coalescible = 0;
4036
0
    }
4037
4038
    /* allocate packet */
4039
0
    if (coalescible) {
4040
0
        s->dst_end += s->target.cipher->aead->algo->tag_size; /* restore the AEAD tag size (tag size can differ bet. epochs) */
4041
0
        s->target.cipher = s->current.cipher;
4042
0
    } else {
4043
0
        if (s->num_datagrams >= s->max_datagrams)
4044
0
            return QUICLY_ERROR_SENDBUF_FULL;
4045
        /* note: send_window (ssize_t) can become negative; see doc-comment */
4046
0
        if (frame_type == ALLOCATE_FRAME_TYPE_ACK_ELICITING && s->send_window <= 0)
4047
0
            return QUICLY_ERROR_SENDBUF_FULL;
4048
0
        if (s->payload_buf.end - s->payload_buf.datagram < conn->egress.max_udp_payload_size)
4049
0
            return QUICLY_ERROR_SENDBUF_FULL;
4050
0
        s->target.cipher = s->current.cipher;
4051
0
        s->target.full_size = 0;
4052
0
        s->dst = s->payload_buf.datagram;
4053
0
        s->dst_end = s->dst + conn->egress.max_udp_payload_size;
4054
0
    }
4055
0
    s->target.ack_eliciting = 0;
4056
4057
0
    QUICLY_PROBE(PACKET_PREPARE, conn, conn->stash.now, s->current.first_byte, QUICLY_PROBE_HEXDUMP(s->dcid->cid, s->dcid->len));
4058
0
    QUICLY_LOG_CONN(packet_prepare, conn, {
4059
0
        PTLS_LOG_ELEMENT_UNSIGNED(first_octet, s->current.first_byte);
4060
0
        PTLS_LOG_ELEMENT_HEXDUMP(dcid, s->dcid->cid, s->dcid->len);
4061
0
    });
4062
4063
    /* emit header */
4064
0
    s->target.first_byte_at = s->dst;
4065
0
    *s->dst++ = s->current.first_byte | 0x1 /* pnlen == 2 */;
4066
0
    if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte)) {
4067
0
        s->dst = quicly_encode32(s->dst, conn->super.version);
4068
0
        *s->dst++ = s->dcid->len;
4069
0
        s->dst = emit_cid(s->dst, s->dcid);
4070
0
        *s->dst++ = conn->super.local.long_header_src_cid.len;
4071
0
        s->dst = emit_cid(s->dst, &conn->super.local.long_header_src_cid);
4072
        /* token */
4073
0
        if (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) {
4074
0
            s->dst = quicly_encodev(s->dst, conn->token.len);
4075
0
            if (conn->token.len != 0) {
4076
0
                assert(s->dst_end - s->dst > conn->token.len);
4077
0
                memcpy(s->dst, conn->token.base, conn->token.len);
4078
0
                s->dst += conn->token.len;
4079
0
            }
4080
0
        }
4081
        /* payload length is filled laterwards (see commit_send_packet) */
4082
0
        *s->dst++ = 0;
4083
0
        *s->dst++ = 0;
4084
0
    } else {
4085
0
        s->dst = emit_cid(s->dst, s->dcid);
4086
0
    }
4087
0
    s->dst += QUICLY_SEND_PN_SIZE; /* space for PN bits, filled in at commit time */
4088
0
    s->dst_payload_from = s->dst;
4089
0
    assert(s->target.cipher->aead != NULL);
4090
0
    s->dst_end -= s->target.cipher->aead->algo->tag_size;
4091
0
    assert(s->dst_end - s->dst >= QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE);
4092
4093
0
    if (conn->super.state < QUICLY_STATE_CLOSING) {
4094
        /* register to sentmap */
4095
0
        uint8_t ack_epoch = get_epoch(s->current.first_byte);
4096
0
        if (ack_epoch == QUICLY_EPOCH_0RTT)
4097
0
            ack_epoch = QUICLY_EPOCH_1RTT;
4098
0
        if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now, ack_epoch)) != 0)
4099
0
            return ret;
4100
        /* adjust ack-frequency */
4101
0
        if (frame_type == ALLOCATE_FRAME_TYPE_ACK_ELICITING && conn->stash.now >= conn->egress.ack_frequency.update_at &&
4102
0
            s->dst_end - s->dst >= QUICLY_ACK_FREQUENCY_FRAME_CAPACITY + min_space) {
4103
0
            assert(conn->super.remote.transport_params.min_ack_delay_usec != UINT64_MAX);
4104
0
            if (conn->egress.cc.num_loss_episodes >= QUICLY_FIRST_ACK_FREQUENCY_LOSS_EPISODE && conn->initial == NULL &&
4105
0
                conn->handshake == NULL) {
4106
0
                uint32_t fraction_of_cwnd = (uint32_t)((uint64_t)conn->egress.cc.cwnd * conn->super.ctx->ack_frequency / 1024);
4107
0
                if (fraction_of_cwnd >= conn->egress.max_udp_payload_size * 3) {
4108
0
                    uint32_t packet_tolerance = fraction_of_cwnd / conn->egress.max_udp_payload_size;
4109
0
                    if (packet_tolerance > QUICLY_MAX_PACKET_TOLERANCE)
4110
0
                        packet_tolerance = QUICLY_MAX_PACKET_TOLERANCE;
4111
                    /* TODO: Discuss (and possibly test) the strategy for choosing max_ack_delay; note the chosen value should be
4112
                     * passed to quicly_loss_detect_loss too. */
4113
0
                    uint64_t max_ack_delay = conn->super.remote.transport_params.max_ack_delay * 1000;
4114
0
                    uint64_t reordering_threshold =
4115
0
                        conn->egress.loss.thresholds.use_packet_based ? QUICLY_LOSS_DEFAULT_PACKET_THRESHOLD : 0;
4116
                    /* TODO: Adjust the max_ack_delay we use for loss recovery to be consistent with this value */
4117
0
                    s->dst = quicly_encode_ack_frequency_frame(s->dst, conn->egress.ack_frequency.sequence++, packet_tolerance,
4118
0
                                                               max_ack_delay, reordering_threshold);
4119
0
                    ++conn->super.stats.num_frames_sent.ack_frequency;
4120
0
                }
4121
0
            }
4122
0
            ack_frequency_set_next_update_at(conn);
4123
0
        }
4124
0
    }
4125
4126
0
TargetReady:
4127
0
    if (frame_type != ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING) {
4128
0
        s->target.ack_eliciting = 1;
4129
0
        conn->egress.last_retransmittable_sent_at = conn->stash.now;
4130
0
    }
4131
0
    return 0;
4132
0
}
4133
4134
static quicly_error_t allocate_ack_eliciting_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space,
4135
                                                   quicly_sent_t **sent, quicly_sent_acked_cb acked)
4136
0
{
4137
0
    quicly_error_t ret;
4138
4139
0
    if ((ret = do_allocate_frame(conn, s, min_space, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0)
4140
0
        return ret;
4141
0
    if ((*sent = quicly_sentmap_allocate(&conn->egress.loss.sentmap, acked)) == NULL)
4142
0
        return PTLS_ERROR_NO_MEMORY;
4143
4144
0
    return ret;
4145
0
}
4146
4147
static quicly_error_t send_ack(quicly_conn_t *conn, struct st_quicly_pn_space_t *space, quicly_send_context_t *s)
4148
0
{
4149
0
    uint64_t ack_delay;
4150
0
    quicly_error_t ret;
4151
4152
0
    if (space->ack_queue.num_ranges == 0)
4153
0
        return 0;
4154
4155
    /* calc ack_delay */
4156
0
    if (space->largest_pn_received_at < conn->stash.now) {
4157
        /* We underreport ack_delay up to 1 milliseconds assuming that QUICLY_LOCAL_ACK_DELAY_EXPONENT is 10. It's considered a
4158
         * non-issue because our time measurement is at millisecond granularity anyways. */
4159
0
        ack_delay = ((conn->stash.now - space->largest_pn_received_at) * 1000) >> QUICLY_LOCAL_ACK_DELAY_EXPONENT;
4160
0
    } else {
4161
0
        ack_delay = 0;
4162
0
    }
4163
4164
0
Emit: /* emit an ACK frame */
4165
0
    if ((ret = do_allocate_frame(conn, s, QUICLY_ACK_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0)
4166
0
        return ret;
4167
0
    uint8_t *dst = s->dst;
4168
0
    dst = quicly_encode_ack_frame(dst, s->dst_end, &space->ack_queue, space->ecn_counts, ack_delay);
4169
4170
    /* when there's no space, retry with a new MTU-sized packet */
4171
0
    if (dst == NULL) {
4172
        /* [rare case] A coalesced packet might not have enough space to hold only an ACK. If so, pad it, as that's easier than
4173
         * rolling back. */
4174
0
        if (s->dst == s->dst_payload_from) {
4175
0
            assert(s->target.first_byte_at != s->payload_buf.datagram);
4176
0
            *s->dst++ = QUICLY_FRAME_TYPE_PADDING;
4177
0
        }
4178
0
        s->target.full_size = 1;
4179
0
        if ((ret = commit_send_packet(conn, s, 0)) != 0)
4180
0
            return ret;
4181
0
        goto Emit;
4182
0
    }
4183
4184
0
    ++conn->super.stats.num_frames_sent.ack;
4185
0
    QUICLY_PROBE(ACK_SEND, conn, conn->stash.now, space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end - 1, ack_delay);
4186
0
    QUICLY_LOG_CONN(ack_send, conn, {
4187
0
        PTLS_LOG_ELEMENT_UNSIGNED(largest_acked, space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end - 1);
4188
0
        PTLS_LOG_ELEMENT_UNSIGNED(ack_delay, ack_delay);
4189
0
    });
4190
4191
    /* when there are no less than QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK (8) gaps, bundle PING once every 4 packets being sent */
4192
0
    if (space->ack_queue.num_ranges >= QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK && conn->egress.packet_number % 4 == 0 &&
4193
0
        dst < s->dst_end) {
4194
0
        *dst++ = QUICLY_FRAME_TYPE_PING;
4195
0
        ++conn->super.stats.num_frames_sent.ping;
4196
0
        QUICLY_PROBE(PING_SEND, conn, conn->stash.now);
4197
0
        QUICLY_LOG_CONN(ping_send, conn, {});
4198
0
    }
4199
4200
0
    s->dst = dst;
4201
4202
0
    { /* save what's inflight */
4203
0
        size_t range_index = 0;
4204
0
        while (range_index < space->ack_queue.num_ranges) {
4205
0
            quicly_sent_t *sent;
4206
0
            struct st_quicly_sent_ack_additional_t *additional, *additional_end;
4207
            /* allocate */
4208
0
            if ((sent = quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_ack_ack_ranges8)) == NULL)
4209
0
                return PTLS_ERROR_NO_MEMORY;
4210
            /* store the first range, as well as preparing references to the additional slots */
4211
0
            sent->data.ack.start = space->ack_queue.ranges[range_index].start;
4212
0
            uint64_t length = space->ack_queue.ranges[range_index].end - space->ack_queue.ranges[range_index].start;
4213
0
            if (length <= UINT8_MAX) {
4214
0
                sent->data.ack.ranges8.start_length = length;
4215
0
                additional = sent->data.ack.ranges8.additional;
4216
0
                additional_end = additional + PTLS_ELEMENTSOF(sent->data.ack.ranges8.additional);
4217
0
            } else {
4218
0
                sent->acked = on_ack_ack_ranges64;
4219
0
                sent->data.ack.ranges64.start_length = length;
4220
0
                additional = sent->data.ack.ranges64.additional;
4221
0
                additional_end = additional + PTLS_ELEMENTSOF(sent->data.ack.ranges64.additional);
4222
0
            }
4223
            /* store additional ranges, if possible */
4224
0
            for (++range_index; range_index < space->ack_queue.num_ranges && additional < additional_end;
4225
0
                 ++range_index, ++additional) {
4226
0
                uint64_t gap = space->ack_queue.ranges[range_index].start - space->ack_queue.ranges[range_index - 1].end;
4227
0
                uint64_t length = space->ack_queue.ranges[range_index].end - space->ack_queue.ranges[range_index].start;
4228
0
                if (gap > UINT8_MAX || length > UINT8_MAX)
4229
0
                    break;
4230
0
                additional->gap = gap;
4231
0
                additional->length = length;
4232
0
            }
4233
            /* additional list is zero-terminated, if not full */
4234
0
            if (additional < additional_end)
4235
0
                additional->gap = 0;
4236
0
        }
4237
0
    }
4238
4239
0
    space->unacked_count = 0;
4240
0
    update_smallest_unreported_missing_on_send_ack(&space->ack_queue, &space->largest_acked_unacked,
4241
0
                                                   &space->smallest_unreported_missing, space->reordering_threshold);
4242
0
    return ret;
4243
0
}
4244
4245
static quicly_error_t prepare_stream_state_sender(quicly_stream_t *stream, quicly_sender_state_t *sender, quicly_send_context_t *s,
4246
                                                  size_t min_space, quicly_sent_acked_cb ack_cb)
4247
0
{
4248
0
    quicly_sent_t *sent;
4249
0
    quicly_error_t ret;
4250
4251
0
    if ((ret = allocate_ack_eliciting_frame(stream->conn, s, min_space, &sent, ack_cb)) != 0)
4252
0
        return ret;
4253
0
    sent->data.stream_state_sender.stream_id = stream->stream_id;
4254
0
    *sender = QUICLY_SENDER_STATE_UNACKED;
4255
4256
0
    return 0;
4257
0
}
4258
4259
static quicly_error_t send_control_frames_of_stream(quicly_stream_t *stream, quicly_send_context_t *s)
4260
0
{
4261
0
    quicly_error_t ret;
4262
4263
    /* send STOP_SENDING if necessary */
4264
0
    if (stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_SEND) {
4265
        /* FIXME also send an empty STREAM frame */
4266
0
        if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.stop_sending.sender_state, s,
4267
0
                                               QUICLY_STOP_SENDING_FRAME_CAPACITY, on_ack_stop_sending)) != 0)
4268
0
            return ret;
4269
0
        s->dst = quicly_encode_stop_sending_frame(s->dst, stream->stream_id, stream->_send_aux.stop_sending.error_code);
4270
0
        ++stream->conn->super.stats.num_frames_sent.stop_sending;
4271
0
        QUICLY_PROBE(STOP_SENDING_SEND, stream->conn, stream->conn->stash.now, stream->stream_id,
4272
0
                     stream->_send_aux.stop_sending.error_code);
4273
0
        QUICLY_LOG_CONN(stop_sending_send, stream->conn, {
4274
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4275
0
            PTLS_LOG_ELEMENT_UNSIGNED(error_code, stream->_send_aux.stop_sending.error_code);
4276
0
        });
4277
0
    }
4278
4279
    /* send MAX_STREAM_DATA if necessary */
4280
0
    if (should_send_max_stream_data(stream)) {
4281
0
        uint64_t new_value = stream->recvstate.data_off + stream->_recv_aux.window;
4282
0
        quicly_sent_t *sent;
4283
        /* prepare */
4284
0
        if ((ret = allocate_ack_eliciting_frame(stream->conn, s, QUICLY_MAX_STREAM_DATA_FRAME_CAPACITY, &sent,
4285
0
                                                on_ack_max_stream_data)) != 0)
4286
0
            return ret;
4287
        /* send */
4288
0
        s->dst = quicly_encode_max_stream_data_frame(s->dst, stream->stream_id, new_value);
4289
        /* register ack */
4290
0
        sent->data.max_stream_data.stream_id = stream->stream_id;
4291
0
        quicly_maxsender_record(&stream->_send_aux.max_stream_data_sender, new_value, &sent->data.max_stream_data.args);
4292
        /* update stats */
4293
0
        ++stream->conn->super.stats.num_frames_sent.max_stream_data;
4294
0
        QUICLY_PROBE(MAX_STREAM_DATA_SEND, stream->conn, stream->conn->stash.now, stream, new_value);
4295
0
        QUICLY_LOG_CONN(max_stream_data_send, stream->conn, {
4296
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4297
0
            PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_value);
4298
0
        });
4299
0
    }
4300
4301
    /* send RESET_STREAM if necessary */
4302
0
    if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_SEND) {
4303
0
        if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.reset_stream.sender_state, s, QUICLY_RST_FRAME_CAPACITY,
4304
0
                                               on_ack_reset_stream)) != 0)
4305
0
            return ret;
4306
0
        s->dst = quicly_encode_reset_stream_frame(s->dst, stream->stream_id, stream->_send_aux.reset_stream.error_code,
4307
0
                                                  stream->sendstate.size_inflight);
4308
0
        ++stream->conn->super.stats.num_frames_sent.reset_stream;
4309
0
        QUICLY_PROBE(RESET_STREAM_SEND, stream->conn, stream->conn->stash.now, stream->stream_id,
4310
0
                     stream->_send_aux.reset_stream.error_code, stream->sendstate.size_inflight);
4311
0
        QUICLY_LOG_CONN(reset_stream_send, stream->conn, {
4312
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4313
0
            PTLS_LOG_ELEMENT_UNSIGNED(error_code, stream->_send_aux.reset_stream.error_code);
4314
0
            PTLS_LOG_ELEMENT_UNSIGNED(final_size, stream->sendstate.size_inflight);
4315
0
        });
4316
0
    }
4317
4318
    /* send STREAM_DATA_BLOCKED if necessary */
4319
0
    if (stream->_send_aux.blocked == QUICLY_SENDER_STATE_SEND) {
4320
0
        quicly_sent_t *sent;
4321
0
        if ((ret = allocate_ack_eliciting_frame(stream->conn, s, QUICLY_STREAM_DATA_BLOCKED_FRAME_CAPACITY, &sent,
4322
0
                                                on_ack_stream_data_blocked_frame)) != 0)
4323
0
            return ret;
4324
0
        uint64_t offset = stream->_send_aux.max_stream_data;
4325
0
        sent->data.stream_data_blocked.stream_id = stream->stream_id;
4326
0
        sent->data.stream_data_blocked.offset = offset;
4327
0
        s->dst = quicly_encode_stream_data_blocked_frame(s->dst, stream->stream_id, offset);
4328
0
        stream->_send_aux.blocked = QUICLY_SENDER_STATE_UNACKED;
4329
0
        ++stream->conn->super.stats.num_frames_sent.stream_data_blocked;
4330
0
        QUICLY_PROBE(STREAM_DATA_BLOCKED_SEND, stream->conn, stream->conn->stash.now, stream->stream_id, offset);
4331
0
        QUICLY_LOG_CONN(stream_data_blocked_send, stream->conn, {
4332
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4333
0
            PTLS_LOG_ELEMENT_UNSIGNED(maximum, offset);
4334
0
        });
4335
0
    }
4336
4337
0
    return 0;
4338
0
}
4339
4340
static quicly_error_t send_stream_control_frames(quicly_conn_t *conn, quicly_send_context_t *s)
4341
0
{
4342
0
    quicly_error_t ret = 0;
4343
4344
0
    while (s->num_datagrams != s->max_datagrams && quicly_linklist_is_linked(&conn->egress.pending_streams.control)) {
4345
0
        quicly_stream_t *stream =
4346
0
            (void *)((char *)conn->egress.pending_streams.control.next - offsetof(quicly_stream_t, _send_aux.pending_link.control));
4347
0
        if ((ret = send_control_frames_of_stream(stream, s)) != 0)
4348
0
            goto Exit;
4349
0
        quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
4350
0
    }
4351
4352
0
Exit:
4353
0
    return ret;
4354
0
}
4355
4356
int quicly_is_blocked(quicly_conn_t *conn)
4357
{
4358
    if (conn->egress.max_data.sent < conn->egress.max_data.permitted)
4359
        return 0;
4360
4361
    /* schedule the transmission of DATA_BLOCKED frame, if it's new information */
4362
    if (conn->egress.data_blocked == QUICLY_SENDER_STATE_NONE) {
4363
        conn->egress.data_blocked = QUICLY_SENDER_STATE_SEND;
4364
        conn->egress.pending_flows = QUICLY_PENDING_FLOW_OTHERS_BIT;
4365
    }
4366
4367
    return 1;
4368
}
4369
4370
int quicly_stream_can_send(quicly_stream_t *stream, int at_stream_level)
4371
0
{
4372
    /* return if there is nothing to be sent */
4373
0
    if (stream->sendstate.pending.num_ranges == 0)
4374
0
        return 0;
4375
4376
    /* return if flow is capped neither by MAX_STREAM_DATA nor (in case we are hitting connection-level flow control) by the number
4377
     * of bytes we've already sent */
4378
0
    uint64_t blocked_at = at_stream_level ? stream->_send_aux.max_stream_data : stream->sendstate.size_inflight;
4379
0
    if (stream->sendstate.pending.ranges[0].start < blocked_at)
4380
0
        return 1;
4381
    /* we can always send EOS, if that is the only thing to be sent */
4382
0
    if (stream->sendstate.pending.ranges[0].start >= stream->sendstate.final_size) {
4383
0
        assert(stream->sendstate.pending.ranges[0].start == stream->sendstate.final_size);
4384
0
        return 1;
4385
0
    }
4386
4387
    /* if known to be blocked at stream-level, schedule the emission of STREAM_DATA_BLOCKED frame */
4388
0
    if (at_stream_level && stream->_send_aux.blocked == QUICLY_SENDER_STATE_NONE) {
4389
0
        stream->_send_aux.blocked = QUICLY_SENDER_STATE_SEND;
4390
0
        sched_stream_control(stream);
4391
0
    }
4392
4393
0
    return 0;
4394
0
}
4395
4396
int quicly_can_send_data(quicly_conn_t *conn, quicly_send_context_t *s)
4397
{
4398
    return s->num_datagrams < s->max_datagrams;
4399
}
4400
4401
/**
4402
 * If necessary, changes the frame representation from one without length field to one that has if necessary. Or, as an alternative,
4403
 * prepends PADDING frames. Upon return, `dst` points to the end of the frame being built. `*len`, `*wrote_all`, `*frame_type_at`
4404
 * are also updated reflecting their values post-adjustment.
4405
 */
4406
static inline void adjust_stream_frame_layout(uint8_t **dst, uint8_t *const dst_end, size_t *len, int *wrote_all,
4407
                                              uint8_t **frame_at)
4408
0
{
4409
0
    size_t space_left = (dst_end - *dst) - *len, len_of_len = quicly_encodev_capacity(*len);
4410
4411
0
    if (**frame_at == QUICLY_FRAME_TYPE_CRYPTO) {
4412
        /* CRYPTO frame: adjust payload length to make space for the length field, if necessary. */
4413
0
        if (space_left < len_of_len) {
4414
0
            *len = dst_end - *dst - len_of_len;
4415
0
            *wrote_all = 0;
4416
0
        }
4417
0
    } else {
4418
        /* STREAM frame: insert length if space can be left for more frames. Otherwise, retain STREAM frame header omitting the
4419
         * length field, prepending PADDING if necessary. */
4420
0
        if (space_left <= len_of_len) {
4421
0
            if (space_left != 0) {
4422
0
                memmove(*frame_at + space_left, *frame_at, *dst + *len - *frame_at);
4423
0
                memset(*frame_at, QUICLY_FRAME_TYPE_PADDING, space_left);
4424
0
                *dst += space_left;
4425
0
                *frame_at += space_left;
4426
0
            }
4427
0
            *dst += *len;
4428
0
            return;
4429
0
        }
4430
0
        **frame_at |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN;
4431
0
    }
4432
4433
    /* insert length before payload of `*len` bytes */
4434
0
    memmove(*dst + len_of_len, *dst, *len);
4435
0
    *dst = quicly_encodev(*dst, *len);
4436
0
    *dst += *len;
4437
0
}
4438
4439
quicly_error_t quicly_send_stream(quicly_stream_t *stream, quicly_send_context_t *s)
4440
{
4441
    uint64_t off = stream->sendstate.pending.ranges[0].start;
4442
    quicly_sent_t *sent;
4443
    uint8_t *dst; /* this pointer points to the current write position within the frame being built, while `s->dst` points to the
4444
                   * beginning of the frame. */
4445
    size_t len;
4446
    int wrote_all, is_fin;
4447
    quicly_error_t ret;
4448
4449
    /* write frame type, stream_id and offset, calculate capacity (and store that in `len`) */
4450
    if (stream->stream_id < 0) {
4451
        if ((ret = allocate_ack_eliciting_frame(stream->conn, s,
4452
                                                1 + quicly_encodev_capacity(off) + 2 /* type + offset + len + 1-byte payload */,
4453
                                                &sent, on_ack_stream)) != 0)
4454
            return ret;
4455
        dst = s->dst;
4456
        *dst++ = QUICLY_FRAME_TYPE_CRYPTO;
4457
        dst = quicly_encodev(dst, off);
4458
        len = s->dst_end - dst;
4459
    } else {
4460
        uint8_t header[18], *hp = header + 1;
4461
        hp = quicly_encodev(hp, stream->stream_id);
4462
        if (off != 0) {
4463
            header[0] = QUICLY_FRAME_TYPE_STREAM_BASE | QUICLY_FRAME_TYPE_STREAM_BIT_OFF;
4464
            hp = quicly_encodev(hp, off);
4465
        } else {
4466
            header[0] = QUICLY_FRAME_TYPE_STREAM_BASE;
4467
        }
4468
        if (off == stream->sendstate.final_size) {
4469
            assert(!quicly_sendstate_is_open(&stream->sendstate));
4470
            /* special case for emitting FIN only */
4471
            header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN;
4472
            if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header, &sent, on_ack_stream)) != 0)
4473
                return ret;
4474
            if (hp - header != s->dst_end - s->dst) {
4475
                header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN;
4476
                *hp++ = 0; /* empty length */
4477
            }
4478
            memcpy(s->dst, header, hp - header);
4479
            s->dst += hp - header;
4480
            len = 0;
4481
            wrote_all = 1;
4482
            is_fin = 1;
4483
            goto UpdateState;
4484
        }
4485
        if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header + 1, &sent, on_ack_stream)) != 0)
4486
            return ret;
4487
        dst = s->dst;
4488
        memcpy(dst, header, hp - header);
4489
        dst += hp - header;
4490
        len = s->dst_end - dst;
4491
        /* cap by max_stream_data */
4492
        if (off + len > stream->_send_aux.max_stream_data)
4493
            len = stream->_send_aux.max_stream_data - off;
4494
        /* cap by max_data */
4495
        if (off + len > stream->sendstate.size_inflight) {
4496
            uint64_t new_bytes = off + len - stream->sendstate.size_inflight;
4497
            if (new_bytes > stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent) {
4498
                size_t max_stream_data =
4499
                    stream->sendstate.size_inflight + stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent;
4500
                len = max_stream_data - off;
4501
            }
4502
        }
4503
    }
4504
    { /* cap len to the current range */
4505
        uint64_t range_capacity = stream->sendstate.pending.ranges[0].end - off;
4506
        if (off + range_capacity > stream->sendstate.final_size) {
4507
            assert(!quicly_sendstate_is_open(&stream->sendstate));
4508
            assert(range_capacity > 1); /* see the special case above */
4509
            range_capacity -= 1;
4510
        }
4511
        if (len > range_capacity)
4512
            len = range_capacity;
4513
    }
4514
4515
    /* Write payload, adjusting len to actual size. Note that `on_send_emit` might fail (e.g., when underlying pread(2) fails), in
4516
     * which case the application will either close the connection immediately or reset the stream. If that happens, we return
4517
     * immediately without updating state. */
4518
    assert(len != 0);
4519
    size_t emit_off = (size_t)(off - stream->sendstate.acked.ranges[0].end);
4520
    QUICLY_PROBE(STREAM_ON_SEND_EMIT, stream->conn, stream->conn->stash.now, stream, emit_off, len);
4521
    QUICLY_LOG_CONN(stream_on_send_emit, stream->conn, {
4522
        PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4523
        PTLS_LOG_ELEMENT_UNSIGNED(off, off);
4524
        PTLS_LOG_ELEMENT_UNSIGNED(capacity, len);
4525
    });
4526
    stream->callbacks->on_send_emit(stream, emit_off, dst, &len, &wrote_all);
4527
    if (stream->conn->super.state >= QUICLY_STATE_CLOSING) {
4528
        return QUICLY_ERROR_IS_CLOSING;
4529
    } else if (stream->_send_aux.reset_stream.sender_state != QUICLY_SENDER_STATE_NONE) {
4530
        return 0;
4531
    }
4532
    assert(len != 0);
4533
4534
    adjust_stream_frame_layout(&dst, s->dst_end, &len, &wrote_all, &s->dst);
4535
4536
    /* determine if the frame incorporates FIN */
4537
    if (off + len == stream->sendstate.final_size) {
4538
        assert(!quicly_sendstate_is_open(&stream->sendstate));
4539
        assert(s->dst != NULL);
4540
        is_fin = 1;
4541
        *s->dst |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN;
4542
    } else {
4543
        is_fin = 0;
4544
    }
4545
4546
    /* update s->dst now that frame construction is complete */
4547
    s->dst = dst;
4548
4549
UpdateState:
4550
    if (stream->stream_id < 0) {
4551
        ++stream->conn->super.stats.num_frames_sent.crypto;
4552
    } else {
4553
        ++stream->conn->super.stats.num_frames_sent.stream;
4554
    }
4555
    stream->conn->super.stats.num_bytes.stream_data_sent += len;
4556
    if (off < stream->sendstate.size_inflight)
4557
        stream->conn->super.stats.num_bytes.stream_data_resent +=
4558
            (stream->sendstate.size_inflight < off + len ? stream->sendstate.size_inflight : off + len) - off;
4559
    QUICLY_PROBE(STREAM_SEND, stream->conn, stream->conn->stash.now, stream, off, s->dst - len, len, is_fin, wrote_all);
4560
    QUICLY_LOG_CONN(stream_send, stream->conn, {
4561
        PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
4562
        PTLS_LOG_ELEMENT_UNSIGNED(off, off);
4563
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(data, s->dst - len, len);
4564
        PTLS_LOG_ELEMENT_BOOL(is_fin, is_fin);
4565
        PTLS_LOG_ELEMENT_BOOL(wrote_all, wrote_all);
4566
    });
4567
4568
    QUICLY_PROBE(QUICTRACE_SEND_STREAM, stream->conn, stream->conn->stash.now, stream, off, len, is_fin);
4569
    /* update sendstate (and also MAX_DATA counter) */
4570
    if (stream->sendstate.size_inflight < off + len) {
4571
        if (stream->stream_id >= 0)
4572
            stream->conn->egress.max_data.sent += off + len - stream->sendstate.size_inflight;
4573
        stream->sendstate.size_inflight = off + len;
4574
    }
4575
    if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, off, off + len + is_fin)) != 0)
4576
        return ret;
4577
    if (wrote_all) {
4578
        if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, stream->sendstate.size_inflight, UINT64_MAX)) != 0)
4579
            return ret;
4580
    }
4581
4582
    /* setup sentmap */
4583
    sent->data.stream.stream_id = stream->stream_id;
4584
    sent->data.stream.args.start = off;
4585
    sent->data.stream.args.end = off + len + is_fin;
4586
4587
    return 0;
4588
}
4589
4590
static inline quicly_error_t init_acks_iter(quicly_conn_t *conn, quicly_sentmap_iter_t *iter)
4591
0
{
4592
0
    return quicly_loss_init_sentmap_iter(&conn->egress.loss, iter, conn->stash.now,
4593
0
                                         conn->super.remote.transport_params.max_ack_delay,
4594
0
                                         conn->super.state >= QUICLY_STATE_CLOSING);
4595
0
}
4596
4597
quicly_error_t discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs)
4598
0
{
4599
0
    quicly_sentmap_iter_t iter;
4600
0
    const quicly_sent_packet_t *sent;
4601
0
    quicly_error_t ret;
4602
4603
0
    if ((ret = init_acks_iter(conn, &iter)) != 0)
4604
0
        return ret;
4605
4606
0
    while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) {
4607
0
        if ((ack_epochs & (1u << sent->ack_epoch)) != 0) {
4608
0
            if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_EXPIRED)) != 0)
4609
0
                return ret;
4610
0
        } else {
4611
0
            quicly_sentmap_skip(&iter);
4612
0
        }
4613
0
    }
4614
4615
0
    return ret;
4616
0
}
4617
4618
/**
4619
 * Mark frames of given epoch as pending, until `*bytes_to_mark` becomes zero.
4620
 */
4621
static quicly_error_t mark_frames_on_pto(quicly_conn_t *conn, uint8_t ack_epoch, size_t *bytes_to_mark)
4622
0
{
4623
0
    quicly_sentmap_iter_t iter;
4624
0
    const quicly_sent_packet_t *sent;
4625
0
    quicly_error_t ret;
4626
4627
0
    if ((ret = init_acks_iter(conn, &iter)) != 0)
4628
0
        return ret;
4629
4630
0
    while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) {
4631
0
        if (sent->ack_epoch == ack_epoch && sent->frames_in_flight) {
4632
0
            *bytes_to_mark = *bytes_to_mark > sent->cc_bytes_in_flight ? *bytes_to_mark - sent->cc_bytes_in_flight : 0;
4633
0
            if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_PTO)) != 0)
4634
0
                return ret;
4635
0
            assert(!sent->frames_in_flight);
4636
0
            if (*bytes_to_mark == 0)
4637
0
                break;
4638
0
        } else {
4639
0
            quicly_sentmap_skip(&iter);
4640
0
        }
4641
0
    }
4642
4643
0
    return 0;
4644
0
}
4645
4646
static void notify_congestion_to_cc(quicly_conn_t *conn, uint16_t lost_bytes, uint64_t lost_pn)
4647
0
{
4648
0
    if (conn->egress.pn_path_start <= lost_pn) {
4649
0
        conn->egress.cc.type->cc_on_lost(&conn->egress.cc, &conn->egress.loss, lost_bytes, lost_pn, conn->egress.packet_number,
4650
0
                                         conn->stash.now, conn->egress.max_udp_payload_size);
4651
0
        QUICLY_PROBE(CC_CONGESTION, conn, conn->stash.now, lost_pn + 1, conn->egress.loss.sentmap.bytes_in_flight,
4652
0
                     conn->egress.cc.cwnd);
4653
0
        QUICLY_LOG_CONN(cc_congestion, conn, {
4654
0
            PTLS_LOG_ELEMENT_UNSIGNED(max_lost_pn, lost_pn + 1);
4655
0
            PTLS_LOG_ELEMENT_UNSIGNED(flight, conn->egress.loss.sentmap.bytes_in_flight);
4656
0
            PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd);
4657
0
        });
4658
0
    }
4659
0
}
4660
4661
static void on_loss_detected(quicly_loss_t *loss, const quicly_sent_packet_t *lost_packet, int is_time_threshold)
4662
0
{
4663
0
    quicly_conn_t *conn = (void *)((char *)loss - offsetof(quicly_conn_t, egress.loss));
4664
4665
0
    assert(lost_packet->cc_bytes_in_flight != 0);
4666
4667
0
    ++conn->super.stats.num_packets.lost;
4668
0
    if (is_time_threshold)
4669
0
        ++conn->super.stats.num_packets.lost_time_threshold;
4670
0
    conn->super.stats.num_bytes.lost += lost_packet->cc_bytes_in_flight;
4671
0
    QUICLY_PROBE(PACKET_LOST, conn, conn->stash.now, lost_packet->packet_number, lost_packet->ack_epoch);
4672
0
    QUICLY_LOG_CONN(packet_lost, conn, {
4673
0
        PTLS_LOG_ELEMENT_UNSIGNED(pn, lost_packet->packet_number);
4674
0
        PTLS_LOG_ELEMENT_UNSIGNED(packet_type, lost_packet->ack_epoch);
4675
0
    });
4676
0
    notify_congestion_to_cc(conn, lost_packet->cc_bytes_in_flight, lost_packet->packet_number);
4677
0
    QUICLY_PROBE(QUICTRACE_CC_LOST, conn, conn->stash.now, &conn->egress.loss.rtt, conn->egress.cc.cwnd,
4678
0
                 conn->egress.loss.sentmap.bytes_in_flight);
4679
0
}
4680
4681
static quicly_error_t send_max_streams(quicly_conn_t *conn, int uni, quicly_send_context_t *s)
4682
0
{
4683
0
    if (!should_send_max_streams(conn, uni))
4684
0
        return 0;
4685
4686
0
    quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi;
4687
0
    struct st_quicly_conn_streamgroup_state_t *group = uni ? &conn->super.remote.uni : &conn->super.remote.bidi;
4688
0
    quicly_error_t ret;
4689
4690
0
    uint64_t new_count =
4691
0
        group->next_stream_id / 4 +
4692
0
        (uni ? conn->super.ctx->transport_params.max_streams_uni : conn->super.ctx->transport_params.max_streams_bidi) -
4693
0
        group->num_streams;
4694
4695
0
    quicly_sent_t *sent;
4696
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_STREAMS_FRAME_CAPACITY, &sent, on_ack_max_streams)) != 0)
4697
0
        return ret;
4698
0
    s->dst = quicly_encode_max_streams_frame(s->dst, uni, new_count);
4699
0
    sent->data.max_streams.uni = uni;
4700
0
    quicly_maxsender_record(maxsender, new_count, &sent->data.max_streams.args);
4701
4702
0
    if (uni) {
4703
0
        ++conn->super.stats.num_frames_sent.max_streams_uni;
4704
0
    } else {
4705
0
        ++conn->super.stats.num_frames_sent.max_streams_bidi;
4706
0
    }
4707
0
    QUICLY_PROBE(MAX_STREAMS_SEND, conn, conn->stash.now, new_count, uni);
4708
0
    QUICLY_LOG_CONN(max_streams_send, conn, {
4709
0
        PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_count);
4710
0
        PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni);
4711
0
    });
4712
4713
0
    return 0;
4714
0
}
4715
4716
static quicly_error_t send_streams_blocked(quicly_conn_t *conn, int uni, quicly_send_context_t *s)
4717
0
{
4718
0
    quicly_linklist_t *blocked_list = uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi;
4719
0
    quicly_error_t ret;
4720
4721
0
    if (!quicly_linklist_is_linked(blocked_list))
4722
0
        return 0;
4723
4724
0
    struct st_quicly_max_streams_t *max_streams = uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi;
4725
0
    quicly_stream_t *oldest_blocked_stream =
4726
0
        (void *)((char *)blocked_list->next - offsetof(quicly_stream_t, _send_aux.pending_link.control));
4727
0
    assert(max_streams->count == oldest_blocked_stream->stream_id / 4);
4728
4729
0
    if (!quicly_maxsender_should_send_blocked(&max_streams->blocked_sender, max_streams->count))
4730
0
        return 0;
4731
4732
0
    quicly_sent_t *sent;
4733
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_STREAMS_BLOCKED_FRAME_CAPACITY, &sent, on_ack_streams_blocked)) != 0)
4734
0
        return ret;
4735
0
    s->dst = quicly_encode_streams_blocked_frame(s->dst, uni, max_streams->count);
4736
0
    sent->data.streams_blocked.uni = uni;
4737
0
    quicly_maxsender_record(&max_streams->blocked_sender, max_streams->count, &sent->data.streams_blocked.args);
4738
4739
0
    ++conn->super.stats.num_frames_sent.streams_blocked;
4740
0
    QUICLY_PROBE(STREAMS_BLOCKED_SEND, conn, conn->stash.now, max_streams->count, uni);
4741
0
    QUICLY_LOG_CONN(streams_blocked_send, conn, {
4742
0
        PTLS_LOG_ELEMENT_UNSIGNED(maximum, max_streams->count);
4743
0
        PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni);
4744
0
    });
4745
4746
0
    return 0;
4747
0
}
4748
4749
static void open_blocked_streams(quicly_conn_t *conn, int uni)
4750
0
{
4751
0
    uint64_t count;
4752
0
    quicly_linklist_t *anchor;
4753
4754
0
    if (uni) {
4755
0
        count = conn->egress.max_streams.uni.count;
4756
0
        anchor = &conn->egress.pending_streams.blocked.uni;
4757
0
    } else {
4758
0
        count = conn->egress.max_streams.bidi.count;
4759
0
        anchor = &conn->egress.pending_streams.blocked.bidi;
4760
0
    }
4761
4762
0
    while (quicly_linklist_is_linked(anchor)) {
4763
0
        quicly_stream_t *stream = (void *)((char *)anchor->next - offsetof(quicly_stream_t, _send_aux.pending_link.control));
4764
0
        if (stream->stream_id / 4 >= count)
4765
0
            break;
4766
0
        assert(stream->streams_blocked);
4767
0
        quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
4768
0
        stream->streams_blocked = 0;
4769
0
        stream->_send_aux.max_stream_data = quicly_stream_is_unidirectional(stream->stream_id)
4770
0
                                                ? conn->super.remote.transport_params.max_stream_data.uni
4771
0
                                                : conn->super.remote.transport_params.max_stream_data.bidi_remote;
4772
        /* TODO retain separate flags for stream states so that we do not always need to sched for both control and data */
4773
0
        sched_stream_control(stream);
4774
0
        resched_stream_data(stream);
4775
0
    }
4776
0
}
4777
4778
static quicly_error_t send_handshake_done(quicly_conn_t *conn, quicly_send_context_t *s)
4779
0
{
4780
0
    quicly_sent_t *sent;
4781
0
    quicly_error_t ret;
4782
4783
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, 1, &sent, on_ack_handshake_done)) != 0)
4784
0
        goto Exit;
4785
0
    *s->dst++ = QUICLY_FRAME_TYPE_HANDSHAKE_DONE;
4786
0
    conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
4787
0
    ++conn->super.stats.num_frames_sent.handshake_done;
4788
0
    QUICLY_PROBE(HANDSHAKE_DONE_SEND, conn, conn->stash.now);
4789
0
    QUICLY_LOG_CONN(handshake_done_send, conn, {});
4790
4791
0
    ret = 0;
4792
0
Exit:
4793
0
    return ret;
4794
0
}
4795
4796
static quicly_error_t send_data_blocked(quicly_conn_t *conn, quicly_send_context_t *s)
4797
0
{
4798
0
    quicly_sent_t *sent;
4799
0
    quicly_error_t ret;
4800
4801
0
    uint64_t offset = conn->egress.max_data.permitted;
4802
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_DATA_BLOCKED_FRAME_CAPACITY, &sent, on_ack_data_blocked)) != 0)
4803
0
        goto Exit;
4804
0
    sent->data.data_blocked.offset = offset;
4805
0
    s->dst = quicly_encode_data_blocked_frame(s->dst, offset);
4806
0
    conn->egress.data_blocked = QUICLY_SENDER_STATE_UNACKED;
4807
4808
0
    ++conn->super.stats.num_frames_sent.data_blocked;
4809
0
    QUICLY_PROBE(DATA_BLOCKED_SEND, conn, conn->stash.now, offset);
4810
0
    QUICLY_LOG_CONN(data_blocked_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(off, offset); });
4811
4812
0
    ret = 0;
4813
0
Exit:
4814
0
    return ret;
4815
0
}
4816
4817
#define QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME 0
4818
4819
/**
4820
 * derives size of the new CWND given previous delivery rate and min RTTs of the previous and the new session
4821
 */
4822
static uint32_t derive_jumpstart_cwnd(quicly_context_t *ctx, uint32_t new_rtt, uint64_t prev_rate, uint32_t prev_rtt)
4823
0
{
4824
    /* convert previous rate to CWND size */
4825
0
    double cwnd = (double)prev_rate * prev_rtt / 1000;
4826
4827
    /* if new RTT is smaller, reduce new CWND so that the rate does not become greater than the previous session */
4828
0
    if (new_rtt < prev_rtt)
4829
0
        cwnd = cwnd * new_rtt / prev_rtt;
4830
4831
    /* cap to the configured value */
4832
0
    size_t jumpstart_cwnd =
4833
0
        quicly_cc_calc_initial_cwnd(ctx->max_jumpstart_cwnd_packets, ctx->transport_params.max_udp_payload_size);
4834
0
    if (cwnd > jumpstart_cwnd)
4835
0
        cwnd = jumpstart_cwnd;
4836
4837
0
    return (uint32_t)cwnd;
4838
0
}
4839
4840
static int decode_resumption_info(const uint8_t *src, size_t len, uint64_t *rate, uint32_t *min_rtt)
4841
0
{
4842
0
    const uint8_t *end = src + len;
4843
0
    int ret = 0;
4844
4845
0
    *rate = 0;
4846
4847
0
    while (src < end) {
4848
0
        uint64_t id;
4849
0
        if ((id = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
4850
0
            ret = PTLS_ALERT_DECODE_ERROR;
4851
0
            goto Exit;
4852
0
        }
4853
0
        ptls_decode_open_block(src, end, -1, {
4854
0
            switch (id) {
4855
0
            case QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME: {
4856
0
                if ((*rate = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
4857
0
                    ret = PTLS_ALERT_DECODE_ERROR;
4858
0
                    goto Exit;
4859
0
                }
4860
0
                uint64_t v;
4861
0
                if ((v = ptls_decode_quicint(&src, end)) > UINT32_MAX) {
4862
0
                    ret = PTLS_ALERT_DECODE_ERROR;
4863
0
                    goto Exit;
4864
0
                }
4865
0
                *min_rtt = (uint32_t)v;
4866
0
            } break;
4867
0
            default:
4868
                /* ignore unknown types */
4869
0
                src = end;
4870
0
                break;
4871
0
            }
4872
0
        });
4873
0
    }
4874
4875
0
Exit:
4876
0
    return ret;
4877
0
}
4878
4879
static size_t encode_resumption_info(quicly_conn_t *conn, uint8_t *dst, size_t capacity)
4880
0
{
4881
0
    ptls_buffer_t buf;
4882
0
    int ret;
4883
4884
0
    ptls_buffer_init(&buf, dst, capacity);
4885
4886
0
#define PUSH_ENTRY(id, block)                                                                                                      \
4887
0
    do {                                                                                                                           \
4888
0
        ptls_buffer_push_quicint(&buf, (id));                                                                                      \
4889
0
        ptls_buffer_push_block(&buf, -1, block);                                                                                   \
4890
0
    } while (0)
4891
4892
    /* emit delivery rate for Careful Resume */
4893
0
    if (conn->super.stats.token_sent.rate != 0 && conn->super.stats.token_sent.rtt != 0) {
4894
0
        PUSH_ENTRY(QUICLY_RESUMPTION_ENTRY_TYPE_CAREFUL_RESUME, {
4895
0
            ptls_buffer_push_quicint(&buf, conn->super.stats.token_sent.rate);
4896
0
            ptls_buffer_push_quicint(&buf, conn->super.stats.token_sent.rtt);
4897
0
        });
4898
0
    }
4899
4900
0
#undef PUSH_ENTRY
4901
4902
0
Exit:
4903
0
    assert(!buf.is_allocated);
4904
0
    return buf.off;
4905
0
}
4906
4907
static quicly_error_t send_resumption_token(quicly_conn_t *conn, quicly_send_context_t *s)
4908
0
{
4909
    /* fill conn->super.stats.token_sent the information we are sending now */
4910
0
    calc_resume_sendrate(conn, &conn->super.stats.token_sent.rate, &conn->super.stats.token_sent.rtt);
4911
4912
0
    quicly_address_token_plaintext_t token;
4913
0
    ptls_buffer_t tokenbuf;
4914
0
    uint8_t tokenbuf_small[128];
4915
0
    quicly_sent_t *sent;
4916
0
    quicly_error_t ret;
4917
4918
0
    ptls_buffer_init(&tokenbuf, tokenbuf_small, sizeof(tokenbuf_small));
4919
4920
    /* build token */
4921
0
    token =
4922
0
        (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION, conn->super.ctx->now->cb(conn->super.ctx->now)};
4923
0
    token.remote = conn->paths[0]->address.remote;
4924
0
    token.resumption.len = encode_resumption_info(conn, token.resumption.bytes, sizeof(token.resumption.bytes));
4925
4926
    /* encrypt */
4927
0
    if ((ret = conn->super.ctx->generate_resumption_token->cb(conn->super.ctx->generate_resumption_token, conn, &tokenbuf,
4928
0
                                                              &token)) != 0)
4929
0
        goto Exit;
4930
0
    assert(tokenbuf.off < QUICLY_MIN_CLIENT_INITIAL_SIZE / 2 && "this is a ballpark figure, but tokens ought to be small");
4931
4932
    /* emit frame */
4933
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, quicly_new_token_frame_capacity(ptls_iovec_init(tokenbuf.base, tokenbuf.off)),
4934
0
                                            &sent, on_ack_new_token)) != 0)
4935
0
        goto Exit;
4936
0
    ++conn->egress.new_token.num_inflight;
4937
0
    sent->data.new_token.is_inflight = 1;
4938
0
    sent->data.new_token.generation = conn->egress.new_token.generation;
4939
0
    s->dst = quicly_encode_new_token_frame(s->dst, ptls_iovec_init(tokenbuf.base, tokenbuf.off));
4940
0
    conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_NEW_TOKEN_BIT;
4941
4942
0
    ++conn->super.stats.num_frames_sent.new_token;
4943
0
    QUICLY_PROBE(NEW_TOKEN_SEND, conn, conn->stash.now, tokenbuf.base, tokenbuf.off, sent->data.new_token.generation);
4944
0
    QUICLY_LOG_CONN(new_token_send, conn, {
4945
0
        PTLS_LOG_ELEMENT_HEXDUMP(token, tokenbuf.base, tokenbuf.off);
4946
0
        PTLS_LOG_ELEMENT_UNSIGNED(generation, sent->data.new_token.generation);
4947
0
    });
4948
0
    ret = 0;
4949
0
Exit:
4950
0
    ptls_buffer_dispose(&tokenbuf);
4951
0
    return ret;
4952
0
}
4953
4954
size_t quicly_send_version_negotiation(quicly_context_t *ctx, ptls_iovec_t dest_cid, ptls_iovec_t src_cid, const uint32_t *versions,
4955
                                       void *payload)
4956
0
{
4957
0
    uint8_t *dst = payload;
4958
4959
    /* type_flags */
4960
0
    ctx->tls->random_bytes(dst, 1);
4961
0
    *dst |= QUICLY_LONG_HEADER_BIT;
4962
0
    ++dst;
4963
    /* version */
4964
0
    dst = quicly_encode32(dst, 0);
4965
    /* connection-id */
4966
0
    *dst++ = dest_cid.len;
4967
0
    if (dest_cid.len != 0) {
4968
0
        memcpy(dst, dest_cid.base, dest_cid.len);
4969
0
        dst += dest_cid.len;
4970
0
    }
4971
0
    *dst++ = src_cid.len;
4972
0
    if (src_cid.len != 0) {
4973
0
        memcpy(dst, src_cid.base, src_cid.len);
4974
0
        dst += src_cid.len;
4975
0
    }
4976
    /* supported_versions */
4977
0
    for (const uint32_t *v = versions; *v != 0; ++v)
4978
0
        dst = quicly_encode32(dst, *v);
4979
    /* add a greasing version. This also covers the case where an empty list is specified by the caller to indicate rejection. */
4980
0
    uint32_t grease_version = 0;
4981
0
    if (src_cid.len >= sizeof(grease_version))
4982
0
        memcpy(&grease_version, src_cid.base, sizeof(grease_version));
4983
0
    grease_version = (grease_version & 0xf0f0f0f0) | 0x0a0a0a0a;
4984
0
    dst = quicly_encode32(dst, grease_version);
4985
4986
0
    return dst - (uint8_t *)payload;
4987
0
}
4988
4989
quicly_error_t quicly_retry_calc_cidpair_hash(ptls_hash_algorithm_t *sha256, ptls_iovec_t client_cid, ptls_iovec_t server_cid,
4990
                                              uint64_t *value)
4991
0
{
4992
0
    uint8_t digest[PTLS_SHA256_DIGEST_SIZE], buf[(QUICLY_MAX_CID_LEN_V1 + 1) * 2], *p = buf;
4993
0
    int ret;
4994
4995
0
    *p++ = (uint8_t)client_cid.len;
4996
0
    memcpy(p, client_cid.base, client_cid.len);
4997
0
    p += client_cid.len;
4998
0
    *p++ = (uint8_t)server_cid.len;
4999
0
    memcpy(p, server_cid.base, server_cid.len);
5000
0
    p += server_cid.len;
5001
5002
0
    if ((ret = ptls_calc_hash(sha256, digest, buf, p - buf)) != 0)
5003
0
        return ret;
5004
0
    p = digest;
5005
0
    *value = quicly_decode64((void *)&p);
5006
5007
0
    return 0;
5008
0
}
5009
5010
size_t quicly_send_retry(quicly_context_t *ctx, ptls_aead_context_t *token_encrypt_ctx, uint32_t protocol_version,
5011
                         struct sockaddr *dest_addr, ptls_iovec_t dest_cid, struct sockaddr *src_addr, ptls_iovec_t src_cid,
5012
                         ptls_iovec_t odcid, ptls_iovec_t token_prefix, ptls_iovec_t appdata,
5013
                         ptls_aead_context_t **retry_aead_cache, uint8_t *datagram)
5014
0
{
5015
0
    quicly_address_token_plaintext_t token;
5016
0
    ptls_buffer_t buf;
5017
0
    quicly_error_t ret;
5018
5019
0
    assert(!(src_cid.len == odcid.len && memcmp(src_cid.base, odcid.base, src_cid.len) == 0));
5020
5021
    /* build token as plaintext */
5022
0
    token = (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RETRY, ctx->now->cb(ctx->now)};
5023
0
    set_address(&token.remote, dest_addr);
5024
0
    set_address(&token.local, src_addr);
5025
5026
0
    quicly_set_cid(&token.retry.original_dcid, odcid);
5027
0
    quicly_set_cid(&token.retry.client_cid, dest_cid);
5028
0
    quicly_set_cid(&token.retry.server_cid, src_cid);
5029
0
    if (appdata.len != 0) {
5030
0
        assert(appdata.len <= sizeof(token.appdata.bytes));
5031
0
        memcpy(token.appdata.bytes, appdata.base, appdata.len);
5032
0
        token.appdata.len = appdata.len;
5033
0
    }
5034
5035
    /* start building the packet */
5036
0
    ptls_buffer_init(&buf, datagram, QUICLY_MIN_CLIENT_INITIAL_SIZE);
5037
5038
    /* first generate a pseudo packet */
5039
0
    ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, odcid.base, odcid.len); });
5040
0
    ctx->tls->random_bytes(buf.base + buf.off, 1);
5041
0
    buf.base[buf.off] = QUICLY_PACKET_TYPE_RETRY | (buf.base[buf.off] & 0x0f);
5042
0
    ++buf.off;
5043
0
    ptls_buffer_push32(&buf, protocol_version);
5044
0
    ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, dest_cid.base, dest_cid.len); });
5045
0
    ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, src_cid.base, src_cid.len); });
5046
0
    if (token_prefix.len != 0) {
5047
0
        assert(token_prefix.len <= buf.capacity - buf.off);
5048
0
        memcpy(buf.base + buf.off, token_prefix.base, token_prefix.len);
5049
0
        buf.off += token_prefix.len;
5050
0
    }
5051
0
    if ((ret = quicly_encrypt_address_token(ctx->tls->random_bytes, token_encrypt_ctx, &buf, buf.off - token_prefix.len, &token)) !=
5052
0
        0)
5053
0
        goto Exit;
5054
5055
    /* append AEAD tag */
5056
0
    ret = ptls_buffer_reserve(&buf, PTLS_AESGCM_TAG_SIZE);
5057
0
    assert(ret == 0);
5058
0
    assert(!buf.is_allocated && "retry packet is too large");
5059
0
    {
5060
0
        ptls_aead_context_t *aead =
5061
0
            retry_aead_cache != NULL && *retry_aead_cache != NULL ? *retry_aead_cache : create_retry_aead(ctx, protocol_version, 1);
5062
0
        ptls_aead_encrypt(aead, buf.base + buf.off, "", 0, 0, buf.base, buf.off);
5063
0
        if (retry_aead_cache != NULL) {
5064
0
            *retry_aead_cache = aead;
5065
0
        } else {
5066
0
            ptls_aead_free(aead);
5067
0
        }
5068
0
    }
5069
0
    buf.off += PTLS_AESGCM_TAG_SIZE;
5070
5071
    /* convert the image to a Retry packet, by stripping the ODCID field */
5072
0
    memmove(buf.base, buf.base + odcid.len + 1, buf.off - (odcid.len + 1));
5073
0
    buf.off -= odcid.len + 1;
5074
5075
0
    ret = 0;
5076
5077
0
Exit:
5078
0
    return ret == 0 ? buf.off : SIZE_MAX;
5079
0
}
5080
5081
static struct st_quicly_pn_space_t *setup_send_space(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s)
5082
0
{
5083
0
    struct st_quicly_pn_space_t *space = NULL;
5084
5085
0
    switch (epoch) {
5086
0
    case QUICLY_EPOCH_INITIAL:
5087
0
        if (conn->initial == NULL || (s->current.cipher = &conn->initial->cipher.egress)->aead == NULL)
5088
0
            return NULL;
5089
0
        s->current.first_byte = QUICLY_PACKET_TYPE_INITIAL;
5090
0
        space = &conn->initial->super;
5091
0
        break;
5092
0
    case QUICLY_EPOCH_HANDSHAKE:
5093
0
        if (conn->handshake == NULL || (s->current.cipher = &conn->handshake->cipher.egress)->aead == NULL)
5094
0
            return NULL;
5095
0
        s->current.first_byte = QUICLY_PACKET_TYPE_HANDSHAKE;
5096
0
        space = &conn->handshake->super;
5097
0
        break;
5098
0
    case QUICLY_EPOCH_0RTT:
5099
0
    case QUICLY_EPOCH_1RTT:
5100
0
        if (conn->application == NULL || conn->application->cipher.egress.key.header_protection == NULL)
5101
0
            return NULL;
5102
0
        if ((epoch == QUICLY_EPOCH_0RTT) == conn->application->one_rtt_writable)
5103
0
            return NULL;
5104
0
        s->current.cipher = &conn->application->cipher.egress.key;
5105
0
        s->current.first_byte = epoch == QUICLY_EPOCH_0RTT ? QUICLY_PACKET_TYPE_0RTT : QUICLY_QUIC_BIT;
5106
0
        space = &conn->application->super;
5107
0
        break;
5108
0
    default:
5109
0
        assert(!"logic flaw");
5110
0
        break;
5111
0
    }
5112
5113
0
    return space;
5114
0
}
5115
5116
static quicly_error_t send_handshake_flow(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s, int ack_only, int send_probe)
5117
0
{
5118
0
    struct st_quicly_pn_space_t *space;
5119
0
    quicly_error_t ret = 0;
5120
5121
    /* setup send epoch, or return if it's impossible to send in this epoch */
5122
0
    if ((space = setup_send_space(conn, epoch, s)) == NULL)
5123
0
        return 0;
5124
5125
    /* send ACK */
5126
0
    if (space != NULL && (space->unacked_count != 0 || send_probe))
5127
0
        if ((ret = send_ack(conn, space, s)) != 0)
5128
0
            goto Exit;
5129
5130
0
    if (!ack_only) {
5131
        /* send data */
5132
0
        while ((conn->egress.pending_flows & (uint8_t)(1 << epoch)) != 0) {
5133
0
            quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
5134
0
            assert(stream != NULL);
5135
0
            if ((ret = quicly_send_stream(stream, s)) != 0)
5136
0
                goto Exit;
5137
0
            resched_stream_data(stream);
5138
0
            send_probe = 0;
5139
0
        }
5140
5141
        /* send probe if requested */
5142
0
        if (send_probe) {
5143
0
            if ((ret = do_allocate_frame(conn, s, 1, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0)
5144
0
                goto Exit;
5145
0
            *s->dst++ = QUICLY_FRAME_TYPE_PING;
5146
0
            conn->egress.last_retransmittable_sent_at = conn->stash.now;
5147
0
            ++conn->super.stats.num_frames_sent.ping;
5148
0
            QUICLY_PROBE(PING_SEND, conn, conn->stash.now);
5149
0
            QUICLY_LOG_CONN(ping_send, conn, {});
5150
0
        }
5151
0
    }
5152
5153
0
Exit:
5154
0
    return ret;
5155
0
}
5156
5157
static quicly_error_t send_connection_close(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s)
5158
0
{
5159
0
    uint64_t error_code, offending_frame_type;
5160
0
    const char *reason_phrase;
5161
0
    quicly_error_t ret;
5162
5163
    /* setup send epoch, or return if it's impossible to send in this epoch */
5164
0
    if (setup_send_space(conn, epoch, s) == NULL)
5165
0
        return 0;
5166
5167
    /* determine the payload, masking the application error when sending the frame using an unauthenticated epoch */
5168
0
    error_code = conn->egress.connection_close.error_code;
5169
0
    offending_frame_type = conn->egress.connection_close.frame_type;
5170
0
    reason_phrase = conn->egress.connection_close.reason_phrase;
5171
0
    if (offending_frame_type == UINT64_MAX) {
5172
0
        switch (get_epoch(s->current.first_byte)) {
5173
0
        case QUICLY_EPOCH_INITIAL:
5174
0
        case QUICLY_EPOCH_HANDSHAKE:
5175
0
            error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_APPLICATION);
5176
0
            offending_frame_type = QUICLY_FRAME_TYPE_PADDING;
5177
0
            reason_phrase = "";
5178
0
            break;
5179
0
        }
5180
0
    }
5181
5182
    /* write frame */
5183
0
    if ((ret = do_allocate_frame(conn, s, quicly_close_frame_capacity(error_code, offending_frame_type, reason_phrase),
5184
0
                                 ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0)
5185
0
        return ret;
5186
0
    s->dst = quicly_encode_close_frame(s->dst, error_code, offending_frame_type, reason_phrase);
5187
5188
    /* update counter, probe */
5189
0
    if (offending_frame_type != UINT64_MAX) {
5190
0
        ++conn->super.stats.num_frames_sent.transport_close;
5191
0
        QUICLY_PROBE(TRANSPORT_CLOSE_SEND, conn, conn->stash.now, error_code, offending_frame_type, reason_phrase);
5192
0
        QUICLY_LOG_CONN(transport_close_send, conn, {
5193
0
            PTLS_LOG_ELEMENT_UNSIGNED(error_code, error_code);
5194
0
            PTLS_LOG_ELEMENT_UNSIGNED(frame_type, offending_frame_type);
5195
0
            PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, reason_phrase, strlen(reason_phrase));
5196
0
        });
5197
0
    } else {
5198
0
        ++conn->super.stats.num_frames_sent.application_close;
5199
0
        QUICLY_PROBE(APPLICATION_CLOSE_SEND, conn, conn->stash.now, error_code, reason_phrase);
5200
0
        QUICLY_LOG_CONN(application_close_send, conn, {
5201
0
            PTLS_LOG_ELEMENT_UNSIGNED(error_code, error_code);
5202
0
            PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, reason_phrase, strlen(reason_phrase));
5203
0
        });
5204
0
    }
5205
5206
0
    return 0;
5207
0
}
5208
5209
static quicly_error_t send_new_connection_id(quicly_conn_t *conn, quicly_send_context_t *s, struct st_quicly_local_cid_t *new_cid)
5210
0
{
5211
0
    quicly_sent_t *sent;
5212
0
    uint64_t retire_prior_to = 0; /* TODO */
5213
0
    quicly_error_t ret;
5214
5215
0
    if ((ret = allocate_ack_eliciting_frame(
5216
0
             conn, s, quicly_new_connection_id_frame_capacity(new_cid->sequence, retire_prior_to, new_cid->cid.len), &sent,
5217
0
             on_ack_new_connection_id)) != 0)
5218
0
        return ret;
5219
0
    sent->data.new_connection_id.sequence = new_cid->sequence;
5220
5221
0
    s->dst = quicly_encode_new_connection_id_frame(s->dst, new_cid->sequence, retire_prior_to, new_cid->cid.cid, new_cid->cid.len,
5222
0
                                                   new_cid->stateless_reset_token);
5223
5224
0
    ++conn->super.stats.num_frames_sent.new_connection_id;
5225
0
    QUICLY_PROBE(NEW_CONNECTION_ID_SEND, conn, conn->stash.now, new_cid->sequence, retire_prior_to,
5226
0
                 QUICLY_PROBE_HEXDUMP(new_cid->cid.cid, new_cid->cid.len),
5227
0
                 QUICLY_PROBE_HEXDUMP(new_cid->stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN));
5228
0
    QUICLY_LOG_CONN(new_connection_id_send, conn, {
5229
0
        PTLS_LOG_ELEMENT_UNSIGNED(sequence, new_cid->sequence);
5230
0
        PTLS_LOG_ELEMENT_UNSIGNED(retire_prior_to, retire_prior_to);
5231
0
        PTLS_LOG_ELEMENT_HEXDUMP(cid, new_cid->cid.cid, new_cid->cid.len);
5232
0
        PTLS_LOG_ELEMENT_HEXDUMP(stateless_reset_token, new_cid->stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN);
5233
0
    });
5234
5235
0
    return 0;
5236
0
}
5237
5238
static quicly_error_t send_retire_connection_id(quicly_conn_t *conn, quicly_send_context_t *s, uint64_t sequence)
5239
0
{
5240
0
    quicly_sent_t *sent;
5241
0
    quicly_error_t ret;
5242
5243
0
    if ((ret = allocate_ack_eliciting_frame(conn, s, quicly_retire_connection_id_frame_capacity(sequence), &sent,
5244
0
                                            on_ack_retire_connection_id)) != 0)
5245
0
        return ret;
5246
0
    sent->data.retire_connection_id.sequence = sequence;
5247
5248
0
    s->dst = quicly_encode_retire_connection_id_frame(s->dst, sequence);
5249
5250
0
    ++conn->super.stats.num_frames_sent.retire_connection_id;
5251
0
    QUICLY_PROBE(RETIRE_CONNECTION_ID_SEND, conn, conn->stash.now, sequence);
5252
0
    QUICLY_LOG_CONN(retire_connection_id_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(sequence, sequence); });
5253
5254
0
    return 0;
5255
0
}
5256
5257
static quicly_error_t send_path_challenge(quicly_conn_t *conn, quicly_send_context_t *s, int is_response, const uint8_t *data)
5258
0
{
5259
0
    quicly_error_t ret;
5260
5261
0
    if ((ret = do_allocate_frame(conn, s, QUICLY_PATH_CHALLENGE_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) != 0)
5262
0
        return ret;
5263
5264
0
    s->dst = quicly_encode_path_challenge_frame(s->dst, is_response, data);
5265
0
    s->target.full_size = 1; /* ensure that the path can transfer full-size packets */
5266
5267
0
    if (!is_response) {
5268
0
        ++conn->super.stats.num_frames_sent.path_challenge;
5269
0
        QUICLY_PROBE(PATH_CHALLENGE_SEND, conn, conn->stash.now, data, QUICLY_PATH_CHALLENGE_DATA_LEN);
5270
0
        QUICLY_LOG_CONN(path_challenge_send, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, data, QUICLY_PATH_CHALLENGE_DATA_LEN); });
5271
0
    } else {
5272
0
        ++conn->super.stats.num_frames_sent.path_response;
5273
0
        QUICLY_PROBE(PATH_RESPONSE_SEND, conn, conn->stash.now, data, QUICLY_PATH_CHALLENGE_DATA_LEN);
5274
0
        QUICLY_LOG_CONN(path_response_send, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, data, QUICLY_PATH_CHALLENGE_DATA_LEN); });
5275
0
    }
5276
5277
0
    return 0;
5278
0
}
5279
5280
static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret)
5281
0
{
5282
0
    quicly_conn_t *conn = *ptls_get_data_ptr(tls);
5283
0
    ptls_context_t *tlsctx = ptls_get_context(tls);
5284
0
    ptls_cipher_suite_t *cipher = ptls_get_cipher(tls);
5285
0
    ptls_cipher_context_t **hp_slot;
5286
0
    ptls_aead_context_t **aead_slot;
5287
0
    int ret;
5288
0
    static const char *log_labels[2][4] = {
5289
0
        {NULL, "CLIENT_EARLY_TRAFFIC_SECRET", "CLIENT_HANDSHAKE_TRAFFIC_SECRET", "CLIENT_TRAFFIC_SECRET_0"},
5290
0
        {NULL, NULL, "SERVER_HANDSHAKE_TRAFFIC_SECRET", "SERVER_TRAFFIC_SECRET_0"}};
5291
0
    const char *log_label = log_labels[ptls_is_server(tls) == is_enc][epoch];
5292
5293
0
    QUICLY_PROBE(CRYPTO_UPDATE_SECRET, conn, conn->stash.now, is_enc, epoch, log_label,
5294
0
                 QUICLY_PROBE_HEXDUMP(secret, cipher->hash->digest_size));
5295
0
    QUICLY_LOG_CONN(crypto_update_secret, conn, {
5296
0
        PTLS_LOG_ELEMENT_BOOL(is_enc, is_enc);
5297
0
        PTLS_LOG_ELEMENT_UNSIGNED(epoch, epoch);
5298
0
        PTLS_LOG_ELEMENT_SAFESTR(label, log_label);
5299
0
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(secret, secret, cipher->hash->digest_size);
5300
0
    });
5301
5302
0
    if (tlsctx->log_event != NULL) {
5303
0
        char hexbuf[PTLS_MAX_DIGEST_SIZE * 2 + 1];
5304
0
        ptls_hexdump(hexbuf, secret, cipher->hash->digest_size);
5305
0
        tlsctx->log_event->cb(tlsctx->log_event, tls, log_label, "%s", hexbuf);
5306
0
    }
5307
5308
0
#define SELECT_CIPHER_CONTEXT(p)                                                                                                   \
5309
0
    do {                                                                                                                           \
5310
0
        hp_slot = &(p)->header_protection;                                                                                         \
5311
0
        aead_slot = &(p)->aead;                                                                                                    \
5312
0
    } while (0)
5313
5314
0
    switch (epoch) {
5315
0
    case QUICLY_EPOCH_0RTT:
5316
0
        assert(is_enc == quicly_is_client(conn));
5317
0
        if (conn->application == NULL && (ret = setup_application_space(conn)) != 0)
5318
0
            return ret;
5319
0
        if (is_enc) {
5320
0
            SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key);
5321
0
        } else {
5322
0
            hp_slot = &conn->application->cipher.ingress.header_protection.zero_rtt;
5323
0
            aead_slot = &conn->application->cipher.ingress.aead[1];
5324
0
            conn->delayed_packets.slots_newly_processible |= 1
5325
0
                                                             << (&conn->delayed_packets.zero_rtt - conn->delayed_packets.as_array);
5326
0
        }
5327
0
        break;
5328
0
    case QUICLY_EPOCH_HANDSHAKE:
5329
0
        if (conn->handshake == NULL && (ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
5330
0
            return ret;
5331
0
        SELECT_CIPHER_CONTEXT(is_enc ? &conn->handshake->cipher.egress : &conn->handshake->cipher.ingress);
5332
0
        if (!is_enc)
5333
0
            conn->delayed_packets.slots_newly_processible |= 1
5334
0
                                                             << (&conn->delayed_packets.handshake - conn->delayed_packets.as_array);
5335
0
        break;
5336
0
    case QUICLY_EPOCH_1RTT: {
5337
0
        if (is_enc)
5338
0
            if ((ret = compress_handshake_result(apply_remote_transport_params(conn))) != 0)
5339
0
                return ret;
5340
0
        if (conn->application == NULL && (ret = setup_application_space(conn)) != 0)
5341
0
            return ret;
5342
0
        uint8_t *secret_store;
5343
0
        if (is_enc) {
5344
0
            if (conn->application->cipher.egress.key.aead != NULL)
5345
0
                dispose_cipher(&conn->application->cipher.egress.key);
5346
0
            SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key);
5347
0
            secret_store = conn->application->cipher.egress.secret;
5348
0
        } else {
5349
0
            hp_slot = &conn->application->cipher.ingress.header_protection.one_rtt;
5350
0
            aead_slot = &conn->application->cipher.ingress.aead[0];
5351
0
            secret_store = conn->application->cipher.ingress.secret;
5352
0
            conn->delayed_packets.slots_newly_processible |= 1 << (&conn->delayed_packets.one_rtt - conn->delayed_packets.as_array);
5353
0
        }
5354
0
        memcpy(secret_store, secret, cipher->hash->digest_size);
5355
0
    } break;
5356
0
    default:
5357
0
        assert(!"logic flaw");
5358
0
        break;
5359
0
    }
5360
5361
0
#undef SELECT_CIPHER_CONTEXT
5362
5363
0
    if ((ret = setup_cipher(conn, epoch, is_enc, hp_slot, aead_slot, cipher->aead, cipher->hash, secret)) != 0)
5364
0
        return ret;
5365
5366
0
    if (epoch == QUICLY_EPOCH_1RTT && is_enc) {
5367
        /* update states now that we have 1-RTT write key */
5368
0
        conn->application->one_rtt_writable = 1;
5369
0
        open_blocked_streams(conn, 1);
5370
0
        open_blocked_streams(conn, 0);
5371
0
        if (quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.bidi) ||
5372
0
            quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.uni))
5373
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
5374
        /* send the first resumption token using the 0.5 RTT window */
5375
0
        if (!quicly_is_client(conn) && conn->super.ctx->generate_resumption_token != NULL) {
5376
0
            quicly_error_t ret64 = quicly_send_resumption_token(conn);
5377
0
            assert(ret64 == 0);
5378
0
        }
5379
5380
        /* schedule NEW_CONNECTION_IDs */
5381
0
        size_t size = local_cid_size(conn);
5382
0
        if (quicly_local_cid_set_size(&conn->super.local.cid_set, size))
5383
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
5384
0
    }
5385
5386
0
    return 0;
5387
0
}
5388
5389
static quicly_error_t send_other_control_frames(quicly_conn_t *conn, quicly_send_context_t *s)
5390
0
{
5391
0
    quicly_error_t ret;
5392
5393
    /* MAX_STREAMS */
5394
0
    if ((ret = send_max_streams(conn, 1, s)) != 0)
5395
0
        return ret;
5396
0
    if ((ret = send_max_streams(conn, 0, s)) != 0)
5397
0
        return ret;
5398
5399
    /* MAX_DATA */
5400
0
    if (should_send_max_data(conn)) {
5401
0
        quicly_sent_t *sent;
5402
0
        if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_DATA_FRAME_CAPACITY, &sent, on_ack_max_data)) != 0)
5403
0
            return ret;
5404
0
        uint64_t new_value = conn->ingress.max_data.bytes_consumed + conn->super.ctx->transport_params.max_data;
5405
0
        s->dst = quicly_encode_max_data_frame(s->dst, new_value);
5406
0
        quicly_maxsender_record(&conn->ingress.max_data.sender, new_value, &sent->data.max_data.args);
5407
0
        ++conn->super.stats.num_frames_sent.max_data;
5408
0
        QUICLY_PROBE(MAX_DATA_SEND, conn, conn->stash.now, new_value);
5409
0
        QUICLY_LOG_CONN(max_data_send, conn, { PTLS_LOG_ELEMENT_UNSIGNED(maximum, new_value); });
5410
0
    }
5411
5412
    /* DATA_BLOCKED */
5413
0
    if (conn->egress.data_blocked == QUICLY_SENDER_STATE_SEND && (ret = send_data_blocked(conn, s)) != 0)
5414
0
        return ret;
5415
5416
    /* STREAMS_BLOCKED */
5417
0
    if ((ret = send_streams_blocked(conn, 1, s)) != 0)
5418
0
        return ret;
5419
0
    if ((ret = send_streams_blocked(conn, 0, s)) != 0)
5420
0
        return ret;
5421
5422
0
    { /* NEW_CONNECTION_ID */
5423
0
        size_t i, size = quicly_local_cid_get_size(&conn->super.local.cid_set);
5424
0
        for (i = 0; i < size; i++) {
5425
            /* PENDING CIDs are located at the front */
5426
0
            struct st_quicly_local_cid_t *c = &conn->super.local.cid_set.cids[i];
5427
0
            if (c->state != QUICLY_LOCAL_CID_STATE_PENDING)
5428
0
                break;
5429
0
            if ((ret = send_new_connection_id(conn, s, c)) != 0)
5430
0
                break;
5431
0
        }
5432
0
        quicly_local_cid_on_sent(&conn->super.local.cid_set, i);
5433
0
        if (ret != 0)
5434
0
            return ret;
5435
0
    }
5436
5437
0
    { /* RETIRE_CONNECTION_ID */
5438
0
        size_t i;
5439
0
        for (i = 0; i < conn->super.remote.cid_set.retired.count; ++i) {
5440
0
            uint64_t sequence = conn->super.remote.cid_set.retired.cids[i];
5441
0
            if ((ret = send_retire_connection_id(conn, s, sequence)) != 0)
5442
0
                break;
5443
0
        }
5444
0
        quicly_remote_cid_shift_retired(&conn->super.remote.cid_set, i);
5445
0
        if (ret != 0)
5446
0
            return ret;
5447
0
    }
5448
5449
0
    return 0;
5450
0
}
5451
5452
static quicly_error_t do_send(quicly_conn_t *conn, quicly_send_context_t *s)
5453
0
{
5454
0
    int restrict_sending = 0, ack_only = 0;
5455
0
    size_t min_packets_to_send = 0, orig_bytes_inflight = 0;
5456
0
    quicly_error_t ret = 0;
5457
5458
    /* handle timeouts */
5459
0
    if (conn->idle_timeout.at <= conn->stash.now) {
5460
0
        QUICLY_PROBE(IDLE_TIMEOUT, conn, conn->stash.now);
5461
0
        QUICLY_LOG_CONN(idle_timeout, conn, {});
5462
0
        goto CloseNow;
5463
0
    }
5464
    /* handle handshake timeouts */
5465
0
    if ((conn->initial != NULL || conn->handshake != NULL) &&
5466
0
        conn->created_at + (uint64_t)conn->super.ctx->handshake_timeout_rtt_multiplier * conn->egress.loss.rtt.smoothed <=
5467
0
            conn->stash.now) {
5468
0
        QUICLY_PROBE(HANDSHAKE_TIMEOUT, conn, conn->stash.now, conn->stash.now - conn->created_at, conn->egress.loss.rtt.smoothed);
5469
0
        QUICLY_LOG_CONN(handshake_timeout, conn, {
5470
0
            PTLS_LOG_ELEMENT_SIGNED(elapsed, conn->stash.now - conn->created_at);
5471
0
            PTLS_LOG_ELEMENT_UNSIGNED(rtt_smoothed, conn->egress.loss.rtt.smoothed);
5472
0
        });
5473
0
        conn->super.stats.num_handshake_timeouts++;
5474
0
        goto CloseNow;
5475
0
    }
5476
0
    uint64_t initial_handshake_sent = conn->super.stats.num_packets.initial_sent + conn->super.stats.num_packets.handshake_sent;
5477
0
    if (initial_handshake_sent > conn->super.ctx->max_initial_handshake_packets) {
5478
0
        QUICLY_PROBE(INITIAL_HANDSHAKE_PACKET_EXCEED, conn, conn->stash.now, initial_handshake_sent);
5479
0
        QUICLY_LOG_CONN(initial_handshake_packet_exceed, conn, { PTLS_LOG_ELEMENT_UNSIGNED(num_packets, initial_handshake_sent); });
5480
0
        conn->super.stats.num_initial_handshake_exceeded++;
5481
0
        goto CloseNow;
5482
0
    }
5483
0
    if (conn->egress.loss.alarm_at <= conn->stash.now) {
5484
0
        if ((ret = quicly_loss_on_alarm(&conn->egress.loss, conn->stash.now, conn->super.remote.transport_params.max_ack_delay,
5485
0
                                        conn->initial == NULL && conn->handshake == NULL, &min_packets_to_send, &restrict_sending,
5486
0
                                        on_loss_detected)) != 0)
5487
0
            goto Exit;
5488
0
        assert(min_packets_to_send > 0);
5489
0
        assert(min_packets_to_send <= s->max_datagrams);
5490
5491
0
        if (restrict_sending) {
5492
            /* PTO: when handshake is in progress, send from the very first unacknowledged byte so as to maximize the chance of
5493
             * making progress. When handshake is complete, transmit new data if any, else retransmit the oldest unacknowledged data
5494
             * that is considered inflight. */
5495
0
            QUICLY_PROBE(PTO, conn, conn->stash.now, conn->egress.loss.sentmap.bytes_in_flight, conn->egress.cc.cwnd,
5496
0
                         conn->egress.loss.pto_count);
5497
0
            QUICLY_LOG_CONN(pto, conn, {
5498
0
                PTLS_LOG_ELEMENT_SIGNED(inflight, conn->egress.loss.sentmap.bytes_in_flight);
5499
0
                PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd);
5500
0
                PTLS_LOG_ELEMENT_SIGNED(pto_count, conn->egress.loss.pto_count);
5501
0
            });
5502
0
            ++conn->super.stats.num_ptos;
5503
0
            size_t bytes_to_mark = min_packets_to_send * conn->egress.max_udp_payload_size;
5504
0
            if (conn->initial != NULL && (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_INITIAL, &bytes_to_mark)) != 0)
5505
0
                goto Exit;
5506
0
            if (bytes_to_mark != 0 && conn->handshake != NULL &&
5507
0
                (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_HANDSHAKE, &bytes_to_mark)) != 0)
5508
0
                goto Exit;
5509
            /* Mark already sent 1-RTT data for PTO only if there's no new data, i.e., when scheduler_can_send() return false. */
5510
0
            if (bytes_to_mark != 0 && !scheduler_can_send(conn) &&
5511
0
                (ret = mark_frames_on_pto(conn, QUICLY_EPOCH_1RTT, &bytes_to_mark)) != 0)
5512
0
                goto Exit;
5513
0
        }
5514
0
    }
5515
5516
    /* disable ECN if zero packets where acked in the first 3 PTO of the connection during which all sent packets are ECT(0) */
5517
0
    if (conn->egress.ecn.state == QUICLY_ECN_PROBING && conn->created_at + conn->egress.loss.rtt.smoothed * 3 < conn->stash.now) {
5518
0
        update_ecn_state(conn, QUICLY_ECN_OFF);
5519
        /* TODO reset CC? */
5520
0
    }
5521
5522
0
    { /* calculate send window */
5523
0
        uint64_t pacer_window = SIZE_MAX;
5524
0
        if (conn->egress.pacer != NULL) {
5525
0
            uint32_t bytes_per_msec = calc_pacer_send_rate(conn);
5526
0
            pacer_window =
5527
0
                quicly_pacer_get_window(conn->egress.pacer, conn->stash.now, bytes_per_msec, conn->egress.max_udp_payload_size);
5528
0
        }
5529
0
        s->send_window = calc_send_window(conn, min_packets_to_send * conn->egress.max_udp_payload_size,
5530
0
                                          calc_amplification_limit_allowance(conn), pacer_window, restrict_sending);
5531
0
    }
5532
5533
0
    orig_bytes_inflight = conn->egress.loss.sentmap.bytes_in_flight;
5534
5535
0
    if (s->send_window == 0)
5536
0
        ack_only = 1;
5537
5538
0
    s->dcid = get_dcid(conn, s->path_index);
5539
5540
    /* send handshake flows; when PTO fires...
5541
     *  * quicly running as a client sends either a Handshake probe (or data) if the handshake keys are available, or else an
5542
     *    Initial probe (or data).
5543
     *  * quicly running as a server sends both Initial and Handshake probes (or data) if the corresponding keys are available. */
5544
0
    if (s->path_index == 0) {
5545
0
        if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_INITIAL, s, ack_only,
5546
0
                                       min_packets_to_send != 0 && (!quicly_is_client(conn) || conn->handshake == NULL))) != 0)
5547
0
            goto Exit;
5548
0
        if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_HANDSHAKE, s, ack_only, min_packets_to_send != 0)) != 0)
5549
0
            goto Exit;
5550
0
    }
5551
5552
    /* setup 0-RTT or 1-RTT send context (as the availability of the two epochs are mutually exclusive, we can try 1-RTT first as an
5553
     * optimization), then send application data if that succeeds */
5554
0
    if (setup_send_space(conn, QUICLY_EPOCH_1RTT, s) != NULL || setup_send_space(conn, QUICLY_EPOCH_0RTT, s) != NULL) {
5555
0
        { /* path_challenge / response */
5556
0
            struct st_quicly_conn_path_t *path = conn->paths[s->path_index];
5557
0
            assert(path != NULL);
5558
0
            if (path->path_challenge.send_at <= conn->stash.now) {
5559
                /* emit path challenge frame, doing exponential back off using PTO(initial_rtt) */
5560
0
                if ((ret = send_path_challenge(conn, s, 0, path->path_challenge.data)) != 0)
5561
0
                    goto Exit;
5562
0
                path->path_challenge.num_sent += 1;
5563
0
                path->path_challenge.send_at =
5564
0
                    conn->stash.now + ((3 * conn->super.ctx->loss.default_initial_rtt) << (path->path_challenge.num_sent - 1));
5565
0
                s->recalc_send_probe_at = 1;
5566
0
            }
5567
0
            if (path->path_response.send_) {
5568
0
                if ((ret = send_path_challenge(conn, s, 1, path->path_response.data)) != 0)
5569
0
                    goto Exit;
5570
0
                path->path_response.send_ = 0;
5571
0
                s->recalc_send_probe_at = 1;
5572
0
            }
5573
0
        }
5574
        /* non probing frames are sent only on path zero */
5575
0
        if (s->path_index == 0) {
5576
            /* acks */
5577
0
            if (conn->application->one_rtt_writable && conn->egress.send_ack_at <= conn->stash.now &&
5578
0
                conn->application->super.unacked_count != 0) {
5579
0
                if ((ret = send_ack(conn, &conn->application->super, s)) != 0)
5580
0
                    goto Exit;
5581
0
            }
5582
            /* DATAGRAM frame. Notes regarding current implementation:
5583
             * * Not limited by CC, nor the bytes counted by CC.
5584
             * * When given payload is too large and does not fit into a QUIC packet, a packet containing only PADDING frames is
5585
             *   sent. This is because we do not have a way to retract the generation of a QUIC packet.
5586
             * * Does not notify the application that the frame was dropped internally. */
5587
0
            if (should_send_datagram_frame(conn)) {
5588
0
                for (size_t i = 0; i != conn->egress.datagram_frame_payloads.count; ++i) {
5589
0
                    ptls_iovec_t *payload = conn->egress.datagram_frame_payloads.payloads + i;
5590
0
                    size_t required_space = quicly_datagram_frame_capacity(*payload);
5591
0
                    if ((ret = do_allocate_frame(conn, s, required_space, ALLOCATE_FRAME_TYPE_ACK_ELICITING_NO_CC)) != 0)
5592
0
                        goto Exit;
5593
0
                    if (s->dst_end - s->dst >= required_space) {
5594
0
                        s->dst = quicly_encode_datagram_frame(s->dst, *payload);
5595
0
                        QUICLY_PROBE(DATAGRAM_SEND, conn, conn->stash.now, payload->base, payload->len);
5596
0
                        QUICLY_LOG_CONN(datagram_send, conn,
5597
0
                                        { PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(payload, payload->base, payload->len); });
5598
0
                    } else {
5599
                        /* FIXME: At the moment, we add a padding because we do not have a way to reclaim allocated space, and
5600
                         * because it is forbidden to send an empty QUIC packet. */
5601
0
                        *s->dst++ = QUICLY_FRAME_TYPE_PADDING;
5602
0
                    }
5603
0
                }
5604
0
            }
5605
0
            if (!ack_only) {
5606
                /* PTO or loss detection timeout, always send PING. This is the easiest thing to do in terms of timer control. */
5607
0
                if (min_packets_to_send != 0) {
5608
0
                    if ((ret = do_allocate_frame(conn, s, 1, ALLOCATE_FRAME_TYPE_ACK_ELICITING)) != 0)
5609
0
                        goto Exit;
5610
0
                    if (get_epoch(s->current.first_byte) == QUICLY_EPOCH_1RTT &&
5611
0
                        conn->super.remote.transport_params.min_ack_delay_usec != UINT64_MAX) {
5612
0
                        *s->dst++ = QUICLY_FRAME_TYPE_IMMEDIATE_ACK;
5613
0
                        ++conn->super.stats.num_frames_sent.immediate_ack;
5614
0
                        QUICLY_PROBE(IMMEDIATE_ACK_SEND, conn, conn->stash.now);
5615
0
                        QUICLY_LOG_CONN(immediate_ack_send, conn, {});
5616
0
                    } else {
5617
0
                        *s->dst++ = QUICLY_FRAME_TYPE_PING;
5618
0
                        ++conn->super.stats.num_frames_sent.ping;
5619
0
                        QUICLY_PROBE(PING_SEND, conn, conn->stash.now);
5620
0
                        QUICLY_LOG_CONN(ping_send, conn, {});
5621
0
                    }
5622
0
                }
5623
                /* take actions only permitted for short header packets */
5624
0
                if (conn->application->one_rtt_writable) {
5625
                    /* send HANDSHAKE_DONE */
5626
0
                    if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT) != 0 &&
5627
0
                        (ret = send_handshake_done(conn, s)) != 0)
5628
0
                        goto Exit;
5629
                    /* post-handshake messages */
5630
0
                    if ((conn->egress.pending_flows & (uint8_t)(1 << QUICLY_EPOCH_1RTT)) != 0) {
5631
0
                        quicly_stream_t *stream = quicly_get_stream(conn, -(1 + QUICLY_EPOCH_1RTT));
5632
0
                        assert(stream != NULL);
5633
0
                        if ((ret = quicly_send_stream(stream, s)) != 0)
5634
0
                            goto Exit;
5635
0
                        resched_stream_data(stream);
5636
0
                    }
5637
                    /* send other connection-level control frames, and iff we succeed in sending all of them, clear OTHERS_BIT to
5638
                     * disable `quicly_send` being called right again to send more control frames */
5639
0
                    if ((ret = send_other_control_frames(conn, s)) != 0)
5640
0
                        goto Exit;
5641
0
                    conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_OTHERS_BIT;
5642
                    /* send NEW_TOKEN */
5643
0
                    if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_NEW_TOKEN_BIT) != 0 &&
5644
0
                        (ret = send_resumption_token(conn, s)) != 0)
5645
0
                        goto Exit;
5646
0
                }
5647
                /* send stream-level control frames */
5648
0
                if ((ret = send_stream_control_frames(conn, s)) != 0)
5649
0
                    goto Exit;
5650
                /* send STREAM frames */
5651
0
                if ((ret = conn->super.ctx->stream_scheduler->do_send(conn->super.ctx->stream_scheduler, conn, s)) != 0)
5652
0
                    goto Exit;
5653
                /* once more, send control frames related to streams, as the state might have changed */
5654
0
                if ((ret = send_stream_control_frames(conn, s)) != 0)
5655
0
                    goto Exit;
5656
0
                if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_OTHERS_BIT) != 0) {
5657
0
                    if ((ret = send_other_control_frames(conn, s)) != 0)
5658
0
                        goto Exit;
5659
0
                    conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_OTHERS_BIT;
5660
0
                }
5661
0
            }
5662
            /* stream operations might have requested emission of NEW_TOKEN at the tail; if so, try to bundle it */
5663
0
            if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_NEW_TOKEN_BIT) != 0) {
5664
0
                assert(conn->application->one_rtt_writable);
5665
0
                if ((ret = send_resumption_token(conn, s)) != 0)
5666
0
                    goto Exit;
5667
0
            }
5668
0
        }
5669
0
    }
5670
5671
0
Exit:
5672
0
    if (ret == QUICLY_ERROR_SENDBUF_FULL) {
5673
0
        ret = 0;
5674
        /* when the buffer becomes full for the first time, try to use jumpstart; acting after the buffer becomes full does not
5675
         * delay switch to jump start, assuming that the buffer provided by the caller of quicly_send is no greater than the burst
5676
         * size of the pacer (10 packets) */
5677
0
        if (conn->egress.try_jumpstart && conn->egress.loss.rtt.minimum != UINT32_MAX) {
5678
0
            conn->egress.try_jumpstart = 0;
5679
0
            conn->super.stats.jumpstart.new_rtt = 0;
5680
0
            conn->super.stats.jumpstart.cwnd = 0;
5681
0
            if (conn->egress.pacer != NULL && conn->egress.cc.type->cc_jumpstart != NULL &&
5682
0
                (conn->super.ctx->default_jumpstart_cwnd_packets != 0 || conn->super.ctx->max_jumpstart_cwnd_packets != 0) &&
5683
0
                conn->egress.cc.num_loss_episodes == 0) {
5684
0
                conn->super.stats.jumpstart.new_rtt = conn->egress.loss.rtt.minimum;
5685
0
                if (conn->super.ctx->max_jumpstart_cwnd_packets != 0 && conn->super.stats.jumpstart.prev_rate != 0 &&
5686
0
                    conn->super.stats.jumpstart.prev_rtt != 0) {
5687
                    /* Careful Resume */
5688
0
                    conn->super.stats.jumpstart.cwnd =
5689
0
                        derive_jumpstart_cwnd(conn->super.ctx, conn->super.stats.jumpstart.new_rtt,
5690
0
                                              conn->super.stats.jumpstart.prev_rate, conn->super.stats.jumpstart.prev_rtt);
5691
0
                } else if (conn->super.ctx->default_jumpstart_cwnd_packets != 0) {
5692
                    /* jumpstart without previous information */
5693
0
                    conn->super.stats.jumpstart.cwnd = quicly_cc_calc_initial_cwnd(
5694
0
                        conn->super.ctx->default_jumpstart_cwnd_packets, conn->super.ctx->transport_params.max_udp_payload_size);
5695
0
                }
5696
                /* Jumpstart only if the amount that can be sent in 1 RTT would be higher than without. Comparison target is CWND +
5697
                 * inflight, as that is the amount that can be sent at most. Note the flow rate can become smaller due to packets
5698
                 * paced across the entire RTT during jumpstart. */
5699
0
                if (conn->super.stats.jumpstart.cwnd <= conn->egress.cc.cwnd + orig_bytes_inflight)
5700
0
                    conn->super.stats.jumpstart.cwnd = 0;
5701
0
            }
5702
            /* disable jumpstart probablistically based on the specified ratios; disablement is observable from the probes as
5703
             * `jumpstart.cwnd == 0` */
5704
0
            if (conn->super.stats.jumpstart.cwnd > 0) {
5705
0
                conn->super.stats.num_jumpstart_applicable = 1;
5706
0
                uint8_t ratio = conn->super.stats.jumpstart.prev_rate != 0 ? conn->super.ctx->enable_ratio.jumpstart.resume
5707
0
                                                                           : conn->super.ctx->enable_ratio.jumpstart.non_resume;
5708
0
                if (!enable_with_ratio255(ratio, conn->super.ctx->tls->random_bytes))
5709
0
                    conn->super.stats.jumpstart.cwnd = 0;
5710
0
                QUICLY_PROBE(ENTER_JUMPSTART, conn, conn->stash.now, conn->egress.packet_number,
5711
0
                             conn->super.stats.jumpstart.new_rtt, conn->egress.cc.cwnd, conn->super.stats.jumpstart.cwnd);
5712
0
                QUICLY_LOG_CONN(enter_jumpstart, conn, {
5713
0
                    PTLS_LOG_ELEMENT_UNSIGNED(pn, conn->egress.packet_number);
5714
0
                    PTLS_LOG_ELEMENT_UNSIGNED(rtt, conn->super.stats.jumpstart.new_rtt);
5715
0
                    PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd);
5716
0
                    PTLS_LOG_ELEMENT_UNSIGNED(jumpstart_cwnd, conn->super.stats.jumpstart.cwnd);
5717
0
                });
5718
0
            }
5719
0
            if (conn->super.stats.jumpstart.cwnd > 0)
5720
0
                conn->egress.cc.type->cc_jumpstart(&conn->egress.cc, conn->super.stats.jumpstart.cwnd, conn->egress.packet_number);
5721
0
        }
5722
0
    }
5723
0
    if (ret == 0 && s->target.first_byte_at != NULL) {
5724
        /* last packet can be small-sized, unless it is the first flight sent from the client */
5725
0
        if ((s->payload_buf.datagram[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL &&
5726
0
            (quicly_is_client(conn) || !ack_only))
5727
0
            s->target.full_size = 1;
5728
0
        commit_send_packet(conn, s, 0);
5729
0
    }
5730
0
    if (ret == 0) {
5731
        /* update timers, cc and delivery rate estimator states */
5732
0
        if (conn->application == NULL || conn->application->super.unacked_count == 0)
5733
0
            conn->egress.send_ack_at = INT64_MAX; /* we have sent ACKs for every epoch (or before address validation) */
5734
0
        int can_send_stream_data = scheduler_can_send(conn);
5735
0
        update_send_alarm(conn, can_send_stream_data, s->path_index == 0);
5736
0
        update_ratemeter(conn, can_send_stream_data && conn->super.remote.address_validation.validated &&
5737
0
                                   (s->num_datagrams == s->max_datagrams ||
5738
0
                                    conn->egress.loss.sentmap.bytes_in_flight >= conn->egress.cc.cwnd ||
5739
0
                                    pacer_can_send_at(conn) > conn->stash.now));
5740
0
        if (s->num_datagrams != 0)
5741
0
            update_idle_timeout(conn, 0);
5742
0
    }
5743
0
    return ret;
5744
5745
0
CloseNow:
5746
0
    conn->super.state = QUICLY_STATE_DRAINING;
5747
0
    destroy_all_streams(conn, 0, 0);
5748
0
    return QUICLY_ERROR_FREE_CONNECTION;
5749
0
}
5750
5751
void quicly_send_datagram_frames(quicly_conn_t *conn, ptls_iovec_t *datagrams, size_t num_datagrams)
5752
0
{
5753
0
    for (size_t i = 0; i != num_datagrams; ++i) {
5754
0
        if (conn->egress.datagram_frame_payloads.count == PTLS_ELEMENTSOF(conn->egress.datagram_frame_payloads.payloads))
5755
0
            break;
5756
0
        void *copied;
5757
0
        if ((copied = malloc(datagrams[i].len)) == NULL)
5758
0
            break;
5759
0
        memcpy(copied, datagrams[i].base, datagrams[i].len);
5760
0
        conn->egress.datagram_frame_payloads.payloads[conn->egress.datagram_frame_payloads.count++] =
5761
0
            ptls_iovec_init(copied, datagrams[i].len);
5762
0
    }
5763
0
}
5764
5765
int quicly_set_cc(quicly_conn_t *conn, quicly_cc_type_t *cc)
5766
0
{
5767
0
    return cc->cc_switch(&conn->egress.cc);
5768
0
}
5769
5770
static quicly_error_t do_send_closed(quicly_conn_t *conn, quicly_send_context_t *s)
5771
0
{
5772
0
    assert(s->path_index == 0);
5773
5774
0
    quicly_sentmap_iter_t iter;
5775
0
    quicly_error_t ret;
5776
5777
0
    if ((ret = init_acks_iter(conn, &iter)) != 0)
5778
0
        goto Exit;
5779
5780
    /* check if the connection can be closed now (after 3 pto) */
5781
0
    if (conn->super.state == QUICLY_STATE_DRAINING ||
5782
0
        conn->super.stats.num_frames_sent.transport_close + conn->super.stats.num_frames_sent.application_close != 0) {
5783
0
        if (quicly_sentmap_get(&iter)->packet_number == UINT64_MAX) {
5784
0
            assert(quicly_num_streams(conn) == 0);
5785
0
            ret = QUICLY_ERROR_FREE_CONNECTION;
5786
0
            goto Exit;
5787
0
        }
5788
0
    }
5789
5790
0
    if (conn->super.state == QUICLY_STATE_CLOSING && conn->egress.send_ack_at <= conn->stash.now) {
5791
        /* destroy all streams; doing so is delayed until the emission of CONNECTION_CLOSE frame to allow quicly_close to be called
5792
         * from a stream handler */
5793
0
        destroy_all_streams(conn, 0, 0);
5794
        /* send CONNECTION_CLOSE in all possible epochs */
5795
0
        s->dcid = get_dcid(conn, 0);
5796
0
        for (size_t epoch = 0; epoch < QUICLY_NUM_EPOCHS; ++epoch) {
5797
0
            if ((ret = send_connection_close(conn, epoch, s)) != 0)
5798
0
                goto Exit;
5799
0
        }
5800
0
        if ((ret = commit_send_packet(conn, s, 0)) != 0)
5801
0
            goto Exit;
5802
0
    }
5803
5804
    /* wait at least 1ms */
5805
0
    if ((conn->egress.send_ack_at = quicly_sentmap_get(&iter)->sent_at + get_sentmap_expiration_time(conn)) <= conn->stash.now)
5806
0
        conn->egress.send_ack_at = conn->stash.now + 1;
5807
5808
0
    ret = 0;
5809
5810
0
Exit:
5811
0
    return ret;
5812
0
}
5813
5814
quicly_error_t quicly_send(quicly_conn_t *conn, quicly_address_t *dest, quicly_address_t *src, struct iovec *datagrams,
5815
                           size_t *num_datagrams, void *buf, size_t bufsize)
5816
{
5817
    quicly_send_context_t s = {.current = {.first_byte = -1},
5818
                               .datagrams = datagrams,
5819
                               .max_datagrams = *num_datagrams,
5820
                               .payload_buf = {.datagram = buf, .end = (uint8_t *)buf + bufsize}};
5821
    quicly_error_t ret;
5822
5823
    lock_now(conn, 0);
5824
5825
    /* bail out if there's nothing scheduled to be sent */
5826
    if (conn->stash.now < quicly_get_first_timeout(conn)) {
5827
        ret = 0;
5828
        goto Exit;
5829
    }
5830
5831
    /* determine DCID of active path; doing so is guaranteed to succeed as the protocol guarantees that there will always be at
5832
     * least one non-retired CID available */
5833
    if (conn->paths[0]->dcid == UINT64_MAX) {
5834
        int success = setup_path_dcid(conn, 0);
5835
        assert(success);
5836
    }
5837
5838
    PTLS_LOG_DEFINE_POINT(quicly, send, send_logpoint);
5839
    if (QUICLY_PROBE_ENABLED(SEND) ||
5840
        (ptls_log_point_maybe_active(&send_logpoint) & ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls),
5841
                                                                                  (const char *(*)(void *))ptls_get_server_name,
5842
                                                                                  conn->crypto.tls)) != 0) {
5843
        const quicly_cid_t *dcid = get_dcid(conn, 0);
5844
        QUICLY_PROBE(SEND, conn, conn->stash.now, conn->super.state, QUICLY_PROBE_HEXDUMP(dcid->cid, dcid->len));
5845
        QUICLY_LOG_CONN(send, conn, {
5846
            PTLS_LOG_ELEMENT_SIGNED(state, conn->super.state);
5847
            PTLS_LOG_ELEMENT_HEXDUMP(dcid, dcid->cid, dcid->len);
5848
        });
5849
    }
5850
5851
    if (conn->super.state >= QUICLY_STATE_CLOSING) {
5852
        ret = do_send_closed(conn, &s);
5853
        goto Exit;
5854
    }
5855
5856
    /* try emitting one probe packet on one of the backup paths, or ... (note: API of `quicly_send` allows us to send packets on no
5857
     * more than one path at a time) */
5858
    if (conn->egress.send_probe_at <= conn->stash.now) {
5859
        for (s.path_index = 1; s.path_index < PTLS_ELEMENTSOF(conn->paths); ++s.path_index) {
5860
            if (conn->paths[s.path_index] == NULL || !(conn->stash.now >= conn->paths[s.path_index]->path_challenge.send_at ||
5861
                                                       conn->paths[s.path_index]->path_response.send_))
5862
                continue;
5863
            if (conn->paths[s.path_index]->path_challenge.num_sent > conn->super.ctx->max_probe_packets) {
5864
                if ((ret = delete_path(conn, s.path_index)) != 0) {
5865
                    initiate_close(conn, ret, QUICLY_FRAME_TYPE_PADDING, NULL);
5866
                    assert(conn->super.state >= QUICLY_STATE_CLOSING);
5867
                    s.path_index = 0;
5868
                    ret = do_send_closed(conn, &s);
5869
                    goto Exit;
5870
                }
5871
                s.recalc_send_probe_at = 1;
5872
                continue;
5873
            }
5874
            /* determine DCID to be used, if not yet been done; upon failure, this path (being secondary) is discarded */
5875
            if (conn->paths[s.path_index]->dcid == UINT64_MAX && !setup_path_dcid(conn, s.path_index)) {
5876
                ret = delete_path(conn, s.path_index);
5877
                assert(ret == 0 && "path->dcid is UINT64_MAX and therefore does not trigger an error");
5878
                s.recalc_send_probe_at = 1;
5879
                conn->super.stats.num_paths.closed_no_dcid += 1;
5880
                continue;
5881
            }
5882
            if ((ret = do_send(conn, &s)) != 0)
5883
                goto Exit;
5884
            assert(conn->stash.now < conn->paths[s.path_index]->path_challenge.send_at);
5885
            if (s.num_datagrams != 0)
5886
                break;
5887
        }
5888
    }
5889
    /* otherwise, emit non-probing packets */
5890
    if (s.num_datagrams == 0) {
5891
        s.path_index = 0;
5892
        if ((ret = do_send(conn, &s)) != 0)
5893
            goto Exit;
5894
    } else {
5895
        ret = 0;
5896
    }
5897
5898
    assert_consistency(conn, s.path_index == 0);
5899
5900
Exit:
5901
    if (s.path_index == 0)
5902
        clear_datagram_frame_payloads(conn);
5903
    if (s.recalc_send_probe_at)
5904
        recalc_send_probe_at(conn);
5905
    if (s.num_datagrams != 0) {
5906
        *dest = conn->paths[s.path_index]->address.remote;
5907
        *src = conn->paths[s.path_index]->address.local;
5908
    }
5909
    *num_datagrams = s.num_datagrams;
5910
    unlock_now(conn);
5911
    return ret;
5912
}
5913
5914
uint8_t quicly_send_get_ecn_bits(quicly_conn_t *conn)
5915
0
{
5916
0
    return conn->egress.ecn.state == QUICLY_ECN_OFF ? 0 : 2; /* NON-ECT or ECT(0) */
5917
0
}
5918
5919
size_t quicly_send_close_invalid_token(quicly_context_t *ctx, uint32_t protocol_version, ptls_iovec_t dest_cid,
5920
                                       ptls_iovec_t src_cid, const char *err_desc, void *datagram)
5921
0
{
5922
0
    struct st_quicly_cipher_context_t egress = {};
5923
0
    const quicly_salt_t *salt;
5924
5925
    /* setup keys */
5926
0
    if ((salt = quicly_get_salt(protocol_version)) == NULL)
5927
0
        return SIZE_MAX;
5928
0
    if (setup_initial_encryption(get_aes128gcmsha256(ctx), NULL, &egress, src_cid, 0,
5929
0
                                 ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL) != 0)
5930
0
        return SIZE_MAX;
5931
5932
0
    uint8_t *dst = datagram, *length_at;
5933
5934
    /* build packet */
5935
0
    PTLS_BUILD_ASSERT(QUICLY_SEND_PN_SIZE == 2);
5936
0
    *dst++ = QUICLY_PACKET_TYPE_INITIAL | 0x1 /* 2-byte PN */;
5937
0
    dst = quicly_encode32(dst, protocol_version);
5938
0
    *dst++ = dest_cid.len;
5939
0
    memcpy(dst, dest_cid.base, dest_cid.len);
5940
0
    dst += dest_cid.len;
5941
0
    *dst++ = src_cid.len;
5942
0
    memcpy(dst, src_cid.base, src_cid.len);
5943
0
    dst += src_cid.len;
5944
0
    *dst++ = 0;        /* token_length = 0 */
5945
0
    length_at = dst++; /* length_at to be filled in later as 1-byte varint */
5946
0
    *dst++ = 0;        /* PN = 0 */
5947
0
    *dst++ = 0;        /* ditto */
5948
0
    uint8_t *payload_from = dst;
5949
0
    dst = quicly_encode_close_frame(dst, QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INVALID_TOKEN),
5950
0
                                    QUICLY_FRAME_TYPE_PADDING, err_desc);
5951
5952
    /* determine the size of the packet, make adjustments */
5953
0
    dst += egress.aead->algo->tag_size;
5954
0
    assert(dst - (uint8_t *)datagram <= QUICLY_MIN_CLIENT_INITIAL_SIZE);
5955
0
    assert(dst - length_at - 1 < 64);
5956
0
    *length_at = dst - length_at - 1;
5957
0
    size_t datagram_len = dst - (uint8_t *)datagram;
5958
5959
    /* encrypt packet */
5960
0
    quicly_default_crypto_engine.encrypt_packet(&quicly_default_crypto_engine, NULL, egress.header_protection, egress.aead,
5961
0
                                                ptls_iovec_init(datagram, datagram_len), 0, payload_from - (uint8_t *)datagram, 0,
5962
0
                                                0);
5963
5964
0
    dispose_cipher(&egress);
5965
0
    return datagram_len;
5966
0
}
5967
5968
size_t quicly_send_stateless_reset(quicly_context_t *ctx, const void *src_cid, void *payload)
5969
0
{
5970
0
    uint8_t *base = payload;
5971
5972
    /* build stateless reset packet */
5973
0
    ctx->tls->random_bytes(base, QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN);
5974
0
    base[0] = (base[0] & ~QUICLY_LONG_HEADER_BIT) | QUICLY_QUIC_BIT;
5975
0
    if (!ctx->cid_encryptor->generate_stateless_reset_token(
5976
0
            ctx->cid_encryptor, base + QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN, src_cid))
5977
0
        return SIZE_MAX;
5978
5979
0
    return QUICLY_STATELESS_RESET_PACKET_MIN_LEN;
5980
0
}
5981
5982
quicly_error_t quicly_send_resumption_token(quicly_conn_t *conn)
5983
{
5984
    assert(!quicly_is_client(conn));
5985
5986
    if (conn->super.state <= QUICLY_STATE_CONNECTED) {
5987
        ++conn->egress.new_token.generation;
5988
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_NEW_TOKEN_BIT;
5989
    }
5990
    return 0;
5991
}
5992
5993
static quicly_error_t on_end_closing(quicly_sentmap_t *map, const quicly_sent_packet_t *packet, int acked, quicly_sent_t *sent)
5994
0
{
5995
    /* we stop accepting frames by the time this ack callback is being registered */
5996
0
    assert(!acked);
5997
0
    return 0;
5998
0
}
5999
6000
static quicly_error_t enter_close(quicly_conn_t *conn, int local_is_initiating, int wait_draining)
6001
0
{
6002
0
    quicly_error_t ret;
6003
6004
0
    assert(conn->super.state < QUICLY_STATE_CLOSING);
6005
6006
    /* release all inflight info, register a close timeout */
6007
0
    if ((ret = discard_sentmap_by_epoch(conn, ~0u)) != 0)
6008
0
        return ret;
6009
0
    if ((ret = quicly_sentmap_prepare(&conn->egress.loss.sentmap, conn->egress.packet_number, conn->stash.now,
6010
0
                                      QUICLY_EPOCH_INITIAL)) != 0)
6011
0
        return ret;
6012
0
    if (quicly_sentmap_allocate(&conn->egress.loss.sentmap, on_end_closing) == NULL)
6013
0
        return PTLS_ERROR_NO_MEMORY;
6014
0
    quicly_sentmap_commit(&conn->egress.loss.sentmap, 0, 0, 0);
6015
0
    ++conn->egress.packet_number;
6016
6017
0
    if (local_is_initiating) {
6018
0
        conn->super.state = QUICLY_STATE_CLOSING;
6019
0
        conn->egress.send_ack_at = 0;
6020
0
    } else {
6021
0
        conn->super.state = QUICLY_STATE_DRAINING;
6022
0
        conn->egress.send_ack_at = wait_draining ? conn->stash.now + get_sentmap_expiration_time(conn) : 0;
6023
0
    }
6024
6025
0
    setup_next_send(conn);
6026
6027
0
    return 0;
6028
0
}
6029
6030
quicly_error_t initiate_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, const char *reason_phrase)
6031
0
{
6032
0
    uint64_t quic_error_code;
6033
6034
0
    if (conn->super.state >= QUICLY_STATE_CLOSING)
6035
0
        return 0;
6036
6037
    /* convert error code to QUIC error codes */
6038
0
    if (err == 0) {
6039
0
        quic_error_code = 0;
6040
0
        frame_type = QUICLY_FRAME_TYPE_PADDING;
6041
0
    } else if (err == QUICLY_ERROR_STATE_EXHAUSTION) {
6042
        /* State exhaution is an error induced by the peer, but as there is no specific error code, the generic error code
6043
         * (PROTOCOL_VIOLATION) is used. The exact cause is communicated using the reason phrase field because it is sometimes
6044
         * difficult for the peer to understand the problem without; e.g., when an ACK triggering the loss of a RETIRE_CONNECTION_ID
6045
         * frame leading to the overflow of `quicly_conn_t::egress.retire_cid`. */
6046
0
        quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION);
6047
0
        if (reason_phrase == NULL)
6048
0
            reason_phrase = "state exhaustion";
6049
0
    } else if (QUICLY_ERROR_IS_QUIC_TRANSPORT(err)) {
6050
0
        quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
6051
0
    } else if (QUICLY_ERROR_IS_QUIC_APPLICATION(err)) {
6052
0
        quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
6053
0
        frame_type = UINT64_MAX;
6054
0
    } else if (PTLS_ERROR_GET_CLASS(err) == PTLS_ERROR_CLASS_SELF_ALERT) {
6055
0
        quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_CRYPTO(PTLS_ERROR_TO_ALERT(err)));
6056
0
    } else {
6057
0
        quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INTERNAL);
6058
0
    }
6059
6060
0
    if (reason_phrase == NULL)
6061
0
        reason_phrase = "";
6062
6063
0
    conn->egress.connection_close.error_code = quic_error_code;
6064
0
    conn->egress.connection_close.frame_type = frame_type;
6065
0
    conn->egress.connection_close.reason_phrase = reason_phrase;
6066
0
    return enter_close(conn, 1, 0);
6067
0
}
6068
6069
quicly_error_t quicly_close(quicly_conn_t *conn, quicly_error_t err, const char *reason_phrase)
6070
{
6071
    quicly_error_t ret;
6072
6073
    assert(err == 0 || QUICLY_ERROR_IS_QUIC_APPLICATION(err) || QUICLY_ERROR_IS_CONCEALED(err));
6074
6075
    lock_now(conn, 1);
6076
    ret = initiate_close(conn, err, QUICLY_FRAME_TYPE_PADDING /* used when err == 0 */, reason_phrase);
6077
    unlock_now(conn);
6078
6079
    return ret;
6080
}
6081
6082
quicly_error_t quicly_get_or_open_stream(quicly_conn_t *conn, uint64_t stream_id, quicly_stream_t **stream)
6083
0
{
6084
0
    quicly_error_t ret = 0;
6085
6086
0
    if ((*stream = quicly_get_stream(conn, stream_id)) != NULL)
6087
0
        goto Exit;
6088
6089
0
    if (quicly_stream_is_client_initiated(stream_id) != quicly_is_client(conn)) {
6090
        /* check if stream id is within the bounds */
6091
0
        if (stream_id / 4 >= quicly_get_ingress_max_streams(conn, quicly_stream_is_unidirectional(stream_id))) {
6092
0
            ret = QUICLY_TRANSPORT_ERROR_STREAM_LIMIT;
6093
0
            goto Exit;
6094
0
        }
6095
        /* open new streams upto given id */
6096
0
        struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream_id);
6097
0
        if (group->next_stream_id <= stream_id) {
6098
0
            uint64_t max_stream_data_local, max_stream_data_remote;
6099
0
            if (quicly_stream_is_unidirectional(stream_id)) {
6100
0
                max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.uni;
6101
0
                max_stream_data_remote = 0;
6102
0
            } else {
6103
0
                max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.bidi_remote;
6104
0
                max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.bidi_local;
6105
0
            }
6106
0
            do {
6107
0
                if ((*stream = open_stream(conn, group->next_stream_id, (uint32_t)max_stream_data_local, max_stream_data_remote)) ==
6108
0
                    NULL) {
6109
0
                    ret = PTLS_ERROR_NO_MEMORY;
6110
0
                    goto Exit;
6111
0
                }
6112
0
                QUICLY_PROBE(STREAM_ON_OPEN, conn, conn->stash.now, *stream);
6113
0
                QUICLY_LOG_CONN(stream_on_open, conn, { PTLS_LOG_ELEMENT_SIGNED(stream_id, (*stream)->stream_id); });
6114
0
                if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, *stream)) != 0) {
6115
0
                    *stream = NULL;
6116
0
                    goto Exit;
6117
0
                }
6118
0
                ++group->num_streams;
6119
0
                group->next_stream_id += 4;
6120
0
            } while (stream_id != (*stream)->stream_id);
6121
0
        }
6122
0
    }
6123
6124
0
Exit:
6125
0
    return ret;
6126
0
}
6127
6128
static quicly_error_t handle_crypto_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6129
0
{
6130
0
    quicly_stream_frame_t frame;
6131
0
    quicly_stream_t *stream;
6132
0
    quicly_error_t ret;
6133
6134
0
    if ((ret = quicly_decode_crypto_frame(&state->src, state->end, &frame)) != 0)
6135
0
        return ret;
6136
0
    stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + state->epoch));
6137
0
    assert(stream != NULL);
6138
0
    return apply_stream_frame(stream, &frame);
6139
0
}
6140
6141
static quicly_error_t handle_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6142
0
{
6143
0
    quicly_stream_frame_t frame;
6144
0
    quicly_stream_t *stream;
6145
0
    quicly_error_t ret;
6146
6147
0
    if ((ret = quicly_decode_stream_frame(state->frame_type, &state->src, state->end, &frame)) != 0)
6148
0
        return ret;
6149
0
    QUICLY_PROBE(QUICTRACE_RECV_STREAM, conn, conn->stash.now, frame.stream_id, frame.offset, frame.data.len, (int)frame.is_fin);
6150
0
    if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
6151
0
        return ret;
6152
0
    return apply_stream_frame(stream, &frame);
6153
0
}
6154
6155
static quicly_error_t handle_reset_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6156
0
{
6157
0
    quicly_reset_stream_frame_t frame;
6158
0
    quicly_stream_t *stream;
6159
0
    quicly_error_t ret;
6160
6161
0
    if ((ret = quicly_decode_reset_stream_frame(&state->src, state->end, &frame)) != 0)
6162
0
        return ret;
6163
0
    QUICLY_PROBE(RESET_STREAM_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.app_error_code, frame.final_size);
6164
0
    QUICLY_LOG_CONN(reset_stream_receive, conn, {
6165
0
        PTLS_LOG_ELEMENT_SIGNED(stream_id, (quicly_stream_id_t)frame.stream_id);
6166
0
        PTLS_LOG_ELEMENT_UNSIGNED(app_error_code, frame.app_error_code);
6167
0
        PTLS_LOG_ELEMENT_UNSIGNED(final_size, frame.final_size);
6168
0
    });
6169
6170
0
    if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
6171
0
        return ret;
6172
6173
0
    if (frame.final_size > stream->recvstate.data_off + stream->_recv_aux.window)
6174
0
        return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL;
6175
6176
0
    if (!quicly_recvstate_transfer_complete(&stream->recvstate)) {
6177
0
        uint64_t bytes_missing;
6178
0
        if ((ret = quicly_recvstate_reset(&stream->recvstate, frame.final_size, &bytes_missing)) != 0)
6179
0
            return ret;
6180
0
        stream->conn->ingress.max_data.bytes_consumed += bytes_missing;
6181
0
        quicly_error_t err = QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code);
6182
0
        QUICLY_PROBE(STREAM_ON_RECEIVE_RESET, stream->conn, stream->conn->stash.now, stream, err);
6183
0
        QUICLY_LOG_CONN(stream_on_receive_reset, stream->conn, {
6184
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
6185
0
            PTLS_LOG_ELEMENT_SIGNED(err, err);
6186
0
        });
6187
0
        stream->callbacks->on_receive_reset(stream, err);
6188
0
        if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
6189
0
            return QUICLY_ERROR_IS_CLOSING;
6190
0
        if (stream_is_destroyable(stream))
6191
0
            destroy_stream(stream, 0);
6192
0
    }
6193
6194
0
    return 0;
6195
0
}
6196
6197
static quicly_error_t handle_ack_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6198
0
{
6199
0
    quicly_ack_frame_t frame;
6200
0
    quicly_sentmap_iter_t iter;
6201
0
    struct {
6202
0
        uint64_t pn;
6203
0
        int64_t sent_at;
6204
0
    } largest_newly_acked = {UINT64_MAX, INT64_MAX};
6205
0
    size_t bytes_acked = 0;
6206
0
    int includes_ack_eliciting = 0, includes_late_ack = 0;
6207
0
    quicly_error_t ret;
6208
6209
    /* The flow is considered CC-limited if the packet was sent while `inflight >= 1/2 * CNWD` or acked under the same condition.
6210
     * 1/2 of CWND is adopted for fairness with RFC 7661, and also provides correct increase; i.e., if an idle flow goes into
6211
     * CC-limited state for X round-trips then becomes idle again, all packets sent during that X round-trips will be considered as
6212
     * CC-limited. */
6213
0
    int cc_limited =
6214
0
        conn->super.stats.num_respected_app_limited == 0 || conn->egress.loss.sentmap.bytes_in_flight >= conn->egress.cc.cwnd / 2;
6215
6216
0
    if ((ret = quicly_decode_ack_frame(&state->src, state->end, &frame, state->frame_type == QUICLY_FRAME_TYPE_ACK_ECN)) != 0)
6217
0
        return ret;
6218
6219
    /* early bail out if the peer is acking a PN that would have never been sent */
6220
0
    if (frame.largest_acknowledged > conn->egress.packet_number)
6221
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6222
6223
0
    uint64_t pn_acked = frame.smallest_acknowledged;
6224
6225
0
    switch (state->epoch) {
6226
0
    case QUICLY_EPOCH_0RTT:
6227
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6228
0
    case QUICLY_EPOCH_HANDSHAKE:
6229
0
        conn->super.remote.address_validation.send_probe = 0;
6230
0
        break;
6231
0
    default:
6232
0
        break;
6233
0
    }
6234
6235
0
    if ((ret = init_acks_iter(conn, &iter)) != 0)
6236
0
        return ret;
6237
6238
    /* TODO log PNs being ACKed too late */
6239
6240
0
    size_t gap_index = frame.num_gaps;
6241
0
    while (1) {
6242
0
        assert(frame.ack_block_lengths[gap_index] != 0);
6243
        /* Ack blocks are organized in the ACK frame and consequently in the ack_block_lengths array from the largest acked down.
6244
         * Processing acks in packet number order requires processing the ack blocks in reverse order. */
6245
0
        uint64_t pn_block_max = pn_acked + frame.ack_block_lengths[gap_index] - 1;
6246
0
        QUICLY_PROBE(ACK_BLOCK_RECEIVED, conn, conn->stash.now, pn_acked, pn_block_max);
6247
0
        QUICLY_LOG_CONN(ack_block_received, conn, {
6248
0
            PTLS_LOG_ELEMENT_UNSIGNED(ack_block_begin, pn_acked);
6249
0
            PTLS_LOG_ELEMENT_UNSIGNED(ack_block_end, pn_block_max);
6250
0
        });
6251
0
        while (quicly_sentmap_get(&iter)->packet_number < pn_acked)
6252
0
            quicly_sentmap_skip(&iter);
6253
0
        do {
6254
0
            const quicly_sent_packet_t *sent = quicly_sentmap_get(&iter);
6255
0
            uint64_t pn_sent = sent->packet_number;
6256
0
            assert(pn_acked <= pn_sent);
6257
0
            if (pn_acked < pn_sent) {
6258
                /* set pn_acked to pn_sent; or past the end of the ack block, for use with the next ack block */
6259
0
                if (pn_sent <= pn_block_max) {
6260
0
                    pn_acked = pn_sent;
6261
0
                } else {
6262
0
                    pn_acked = pn_block_max + 1;
6263
0
                    break;
6264
0
                }
6265
0
            }
6266
            /* process newly acked packet */
6267
0
            if (state->epoch != sent->ack_epoch)
6268
0
                return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6269
0
            int is_late_ack = 0;
6270
0
            if (sent->ack_eliciting) {
6271
0
                includes_ack_eliciting = 1;
6272
0
                if (sent->cc_bytes_in_flight == 0) {
6273
0
                    is_late_ack = 1;
6274
0
                    includes_late_ack = 1;
6275
0
                    ++conn->super.stats.num_packets.late_acked;
6276
0
                }
6277
0
            }
6278
0
            ++conn->super.stats.num_packets.ack_received;
6279
0
            if (sent->promoted_path)
6280
0
                ++conn->super.stats.num_packets.ack_received_promoted_paths;
6281
0
            if (conn->egress.pn_path_start <= pn_acked) {
6282
0
                largest_newly_acked.pn = pn_acked;
6283
0
                largest_newly_acked.sent_at = sent->sent_at;
6284
0
            }
6285
0
            QUICLY_PROBE(PACKET_ACKED, conn, conn->stash.now, pn_acked, is_late_ack);
6286
0
            QUICLY_LOG_CONN(packet_acked, conn, {
6287
0
                PTLS_LOG_ELEMENT_UNSIGNED(pn, pn_acked);
6288
0
                PTLS_LOG_ELEMENT_BOOL(is_late_ack, is_late_ack);
6289
0
            });
6290
0
            if (sent->cc_bytes_in_flight != 0) {
6291
0
                if (conn->egress.pn_path_start <= pn_acked) {
6292
0
                    bytes_acked += sent->cc_bytes_in_flight;
6293
0
                    if (sent->cc_limited)
6294
0
                        cc_limited = 1;
6295
0
                }
6296
0
                conn->super.stats.num_bytes.ack_received += sent->cc_bytes_in_flight;
6297
0
            }
6298
0
            if ((ret = quicly_sentmap_update(&conn->egress.loss.sentmap, &iter, QUICLY_SENTMAP_EVENT_ACKED)) != 0)
6299
0
                return ret;
6300
0
            if (state->epoch == QUICLY_EPOCH_1RTT) {
6301
0
                struct st_quicly_application_space_t *space = conn->application;
6302
0
                if (space->cipher.egress.key_update_pn.last <= pn_acked) {
6303
0
                    space->cipher.egress.key_update_pn.last = UINT64_MAX;
6304
0
                    space->cipher.egress.key_update_pn.next = conn->egress.packet_number + conn->super.ctx->max_packets_per_key;
6305
0
                    QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE_CONFIRMED, conn, conn->stash.now, space->cipher.egress.key_update_pn.next);
6306
0
                    QUICLY_LOG_CONN(crypto_send_key_update_confirmed, conn,
6307
0
                                    { PTLS_LOG_ELEMENT_UNSIGNED(next_pn, space->cipher.egress.key_update_pn.next); });
6308
0
                }
6309
0
            }
6310
0
            ++pn_acked;
6311
0
        } while (pn_acked <= pn_block_max);
6312
0
        assert(pn_acked == pn_block_max + 1);
6313
0
        if (gap_index-- == 0)
6314
0
            break;
6315
0
        pn_acked += frame.gaps[gap_index];
6316
0
    }
6317
6318
0
    if ((ret = on_ack_stream_ack_cached(conn)) != 0)
6319
0
        return ret;
6320
6321
0
    QUICLY_PROBE(ACK_DELAY_RECEIVED, conn, conn->stash.now, frame.ack_delay);
6322
0
    QUICLY_LOG_CONN(ack_delay_received, conn, { PTLS_LOG_ELEMENT_UNSIGNED(ack_delay, frame.ack_delay); });
6323
6324
0
    if (largest_newly_acked.pn != UINT64_MAX)
6325
0
        quicly_ratemeter_on_ack(&conn->egress.ratemeter, conn->stash.now, conn->super.stats.num_bytes.ack_received,
6326
0
                                largest_newly_acked.pn);
6327
6328
    /* Update loss detection engine on ack. The function uses ack_delay only when the largest_newly_acked is also the largest acked
6329
     * so far. So, it does not matter if the ack_delay being passed in does not apply to the largest_newly_acked. */
6330
0
    quicly_loss_on_ack_received(&conn->egress.loss, largest_newly_acked.pn, state->epoch, conn->stash.now,
6331
0
                                largest_newly_acked.sent_at, frame.ack_delay,
6332
0
                                includes_ack_eliciting ? includes_late_ack ? QUICLY_LOSS_ACK_RECEIVED_KIND_ACK_ELICITING_LATE_ACK
6333
0
                                                                           : QUICLY_LOSS_ACK_RECEIVED_KIND_ACK_ELICITING
6334
0
                                                       : QUICLY_LOSS_ACK_RECEIVED_KIND_NON_ACK_ELICITING);
6335
6336
    /* OnPacketAcked and OnPacketAckedCC */
6337
0
    if (bytes_acked > 0) {
6338
0
        conn->egress.cc.type->cc_on_acked(&conn->egress.cc, &conn->egress.loss, (uint32_t)bytes_acked, frame.largest_acknowledged,
6339
0
                                          (uint32_t)(conn->egress.loss.sentmap.bytes_in_flight + bytes_acked), cc_limited,
6340
0
                                          conn->egress.packet_number, conn->stash.now, conn->egress.max_udp_payload_size);
6341
0
        QUICLY_PROBE(QUICTRACE_CC_ACK, conn, conn->stash.now, &conn->egress.loss.rtt, conn->egress.cc.cwnd,
6342
0
                     conn->egress.loss.sentmap.bytes_in_flight);
6343
0
    }
6344
6345
0
    QUICLY_PROBE(CC_ACK_RECEIVED, conn, conn->stash.now, frame.largest_acknowledged, bytes_acked, conn->egress.cc.cwnd,
6346
0
                 conn->egress.loss.sentmap.bytes_in_flight);
6347
0
    QUICLY_LOG_CONN(cc_ack_received, conn, {
6348
0
        PTLS_LOG_ELEMENT_UNSIGNED(largest_acked, frame.largest_acknowledged);
6349
0
        PTLS_LOG_ELEMENT_UNSIGNED(bytes_acked, bytes_acked);
6350
0
        PTLS_LOG_ELEMENT_UNSIGNED(cwnd, conn->egress.cc.cwnd);
6351
0
        PTLS_LOG_ELEMENT_UNSIGNED(inflight, conn->egress.loss.sentmap.bytes_in_flight);
6352
0
    });
6353
6354
    /* loss-detection  */
6355
0
    if ((ret = quicly_loss_detect_loss(&conn->egress.loss, conn->stash.now, conn->super.remote.transport_params.max_ack_delay,
6356
0
                                       conn->initial == NULL && conn->handshake == NULL, on_loss_detected)) != 0)
6357
0
        return ret;
6358
6359
    /* ECN */
6360
0
    if (conn->egress.ecn.state != QUICLY_ECN_OFF && largest_newly_acked.pn != UINT64_MAX) {
6361
        /* if things look suspicious (ECT(1) count becoming non-zero), turn ECN off */
6362
0
        if (frame.ecn_counts[1] != 0)
6363
0
            update_ecn_state(conn, QUICLY_ECN_OFF);
6364
        /* TODO: maybe compare num_packets.acked vs. sum(ecn_counts) to see if any packet has been received as NON-ECT? */
6365
6366
        /* ECN validation succeeds if at least one packet is acked using one of the expected marks during the probing period */
6367
0
        if (conn->egress.ecn.state == QUICLY_ECN_PROBING && frame.ecn_counts[0] + frame.ecn_counts[2] > 0)
6368
0
            update_ecn_state(conn, QUICLY_ECN_ON);
6369
6370
        /* check if congestion should be reported */
6371
0
        int report_congestion =
6372
0
            conn->egress.ecn.state != QUICLY_ECN_OFF && frame.ecn_counts[2] > conn->egress.ecn.counts[state->epoch][2];
6373
6374
        /* update counters */
6375
0
        for (size_t i = 0; i < PTLS_ELEMENTSOF(frame.ecn_counts); ++i) {
6376
0
            if (frame.ecn_counts[i] > conn->egress.ecn.counts[state->epoch][i]) {
6377
0
                conn->super.stats.num_packets.acked_ecn_counts[i] += frame.ecn_counts[i] - conn->egress.ecn.counts[state->epoch][i];
6378
0
                conn->egress.ecn.counts[state->epoch][i] = frame.ecn_counts[i];
6379
0
            }
6380
0
        }
6381
6382
        /* report congestion */
6383
0
        if (report_congestion) {
6384
0
            QUICLY_PROBE(ECN_CONGESTION, conn, conn->stash.now, conn->super.stats.num_packets.acked_ecn_counts[2]);
6385
0
            QUICLY_LOG_CONN(ecn_congestion, conn,
6386
0
                            { PTLS_LOG_ELEMENT_UNSIGNED(ce_count, conn->super.stats.num_packets.acked_ecn_counts[2]); });
6387
0
            notify_congestion_to_cc(conn, 0, largest_newly_acked.pn);
6388
0
        }
6389
0
    }
6390
6391
0
    setup_next_send(conn);
6392
6393
0
    return 0;
6394
0
}
6395
6396
static quicly_error_t handle_max_stream_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6397
0
{
6398
0
    quicly_max_stream_data_frame_t frame;
6399
0
    quicly_stream_t *stream;
6400
0
    quicly_error_t ret;
6401
6402
0
    if ((ret = quicly_decode_max_stream_data_frame(&state->src, state->end, &frame)) != 0)
6403
0
        return ret;
6404
6405
0
    QUICLY_PROBE(MAX_STREAM_DATA_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.max_stream_data);
6406
0
    QUICLY_LOG_CONN(max_stream_data_receive, conn, {
6407
0
        PTLS_LOG_ELEMENT_SIGNED(stream_id, (quicly_stream_id_t)frame.stream_id);
6408
0
        PTLS_LOG_ELEMENT_UNSIGNED(max_stream_data, frame.max_stream_data);
6409
0
    });
6410
6411
0
    if (!quicly_stream_has_send_side(quicly_is_client(conn), frame.stream_id))
6412
0
        return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
6413
6414
0
    if ((stream = quicly_get_stream(conn, frame.stream_id)) == NULL)
6415
0
        return 0;
6416
6417
0
    if (frame.max_stream_data <= stream->_send_aux.max_stream_data)
6418
0
        return 0;
6419
0
    stream->_send_aux.max_stream_data = frame.max_stream_data;
6420
0
    stream->_send_aux.blocked = QUICLY_SENDER_STATE_NONE;
6421
6422
0
    if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE)
6423
0
        resched_stream_data(stream);
6424
6425
0
    return 0;
6426
0
}
6427
6428
static quicly_error_t handle_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6429
0
{
6430
0
    quicly_data_blocked_frame_t frame;
6431
0
    quicly_error_t ret;
6432
6433
0
    if ((ret = quicly_decode_data_blocked_frame(&state->src, state->end, &frame)) != 0)
6434
0
        return ret;
6435
6436
0
    QUICLY_PROBE(DATA_BLOCKED_RECEIVE, conn, conn->stash.now, frame.offset);
6437
0
    QUICLY_LOG_CONN(data_blocked_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(off, frame.offset); });
6438
6439
0
    quicly_maxsender_request_transmit(&conn->ingress.max_data.sender);
6440
0
    if (should_send_max_data(conn))
6441
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
6442
6443
0
    return 0;
6444
0
}
6445
6446
static quicly_error_t handle_stream_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6447
0
{
6448
0
    quicly_stream_data_blocked_frame_t frame;
6449
0
    quicly_stream_t *stream;
6450
0
    quicly_error_t ret;
6451
6452
0
    if ((ret = quicly_decode_stream_data_blocked_frame(&state->src, state->end, &frame)) != 0)
6453
0
        return ret;
6454
6455
0
    QUICLY_PROBE(STREAM_DATA_BLOCKED_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.offset);
6456
0
    QUICLY_LOG_CONN(stream_data_blocked_receive, conn, {
6457
0
        PTLS_LOG_ELEMENT_SIGNED(stream_id, frame.stream_id);
6458
0
        PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.offset);
6459
0
    });
6460
6461
0
    if (!quicly_stream_has_receive_side(quicly_is_client(conn), frame.stream_id))
6462
0
        return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
6463
6464
0
    if ((stream = quicly_get_stream(conn, frame.stream_id)) != NULL) {
6465
0
        quicly_maxsender_request_transmit(&stream->_send_aux.max_stream_data_sender);
6466
0
        if (should_send_max_stream_data(stream))
6467
0
            sched_stream_control(stream);
6468
0
    }
6469
6470
0
    return 0;
6471
0
}
6472
6473
static quicly_error_t handle_streams_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6474
0
{
6475
0
    quicly_streams_blocked_frame_t frame;
6476
0
    int uni = state->frame_type == QUICLY_FRAME_TYPE_STREAMS_BLOCKED_UNI;
6477
0
    quicly_error_t ret;
6478
6479
0
    if ((ret = quicly_decode_streams_blocked_frame(&state->src, state->end, &frame)) != 0)
6480
0
        return ret;
6481
6482
0
    QUICLY_PROBE(STREAMS_BLOCKED_RECEIVE, conn, conn->stash.now, frame.count, uni);
6483
0
    QUICLY_LOG_CONN(streams_blocked_receive, conn, {
6484
0
        PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.count);
6485
0
        PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni);
6486
0
    });
6487
6488
0
    if (should_send_max_streams(conn, uni)) {
6489
0
        quicly_maxsender_t *maxsender = uni ? &conn->ingress.max_streams.uni : &conn->ingress.max_streams.bidi;
6490
0
        quicly_maxsender_request_transmit(maxsender);
6491
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
6492
0
    }
6493
6494
0
    return 0;
6495
0
}
6496
6497
static quicly_error_t handle_max_streams_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state, int uni)
6498
0
{
6499
0
    quicly_max_streams_frame_t frame;
6500
0
    quicly_error_t ret;
6501
6502
0
    if ((ret = quicly_decode_max_streams_frame(&state->src, state->end, &frame)) != 0)
6503
0
        return ret;
6504
6505
0
    QUICLY_PROBE(MAX_STREAMS_RECEIVE, conn, conn->stash.now, frame.count, uni);
6506
0
    QUICLY_LOG_CONN(max_streams_receive, conn, {
6507
0
        PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.count);
6508
0
        PTLS_LOG_ELEMENT_BOOL(is_unidirectional, uni);
6509
0
    });
6510
6511
0
    if ((ret = update_max_streams(uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi, frame.count)) != 0)
6512
0
        return ret;
6513
6514
0
    open_blocked_streams(conn, uni);
6515
6516
0
    return 0;
6517
0
}
6518
6519
static quicly_error_t handle_max_streams_bidi_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6520
0
{
6521
0
    return handle_max_streams_frame(conn, state, 0);
6522
0
}
6523
6524
static quicly_error_t handle_max_streams_uni_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6525
0
{
6526
0
    return handle_max_streams_frame(conn, state, 1);
6527
0
}
6528
6529
static quicly_error_t handle_path_challenge_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6530
0
{
6531
0
    quicly_path_challenge_frame_t frame;
6532
0
    quicly_error_t ret;
6533
6534
0
    if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0)
6535
0
        return ret;
6536
6537
0
    QUICLY_PROBE(PATH_CHALLENGE_RECEIVE, conn, conn->stash.now, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN);
6538
0
    QUICLY_LOG_CONN(path_challenge_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); });
6539
6540
    /* schedule the emission of PATH_RESPONSE frame */
6541
0
    struct st_quicly_conn_path_t *path = conn->paths[state->path_index];
6542
0
    memcpy(path->path_response.data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN);
6543
0
    path->path_response.send_ = 1;
6544
0
    conn->egress.send_probe_at = 0;
6545
6546
0
    return 0;
6547
0
}
6548
6549
static quicly_error_t handle_path_response_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6550
0
{
6551
0
    quicly_path_challenge_frame_t frame;
6552
0
    quicly_error_t ret;
6553
6554
0
    if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0)
6555
0
        return ret;
6556
6557
0
    QUICLY_PROBE(PATH_RESPONSE_RECEIVE, conn, conn->stash.now, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN);
6558
0
    QUICLY_LOG_CONN(path_response_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN); });
6559
6560
0
    struct st_quicly_conn_path_t *path = conn->paths[state->path_index];
6561
6562
0
    if (ptls_mem_equal(path->path_challenge.data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN)) {
6563
        /* Path validation succeeded, stop sending PATH_CHALLENGEs. Active path might become changed in `quicly_receive`. */
6564
0
        path->path_challenge.send_at = INT64_MAX;
6565
0
        recalc_send_probe_at(conn);
6566
0
        conn->super.stats.num_paths.validated += 1;
6567
0
    }
6568
6569
0
    return 0;
6570
0
}
6571
6572
static quicly_error_t handle_new_token_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6573
0
{
6574
0
    quicly_new_token_frame_t frame;
6575
0
    quicly_error_t ret;
6576
6577
0
    if (!quicly_is_client(conn))
6578
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6579
0
    if ((ret = quicly_decode_new_token_frame(&state->src, state->end, &frame)) != 0)
6580
0
        return ret;
6581
0
    QUICLY_PROBE(NEW_TOKEN_RECEIVE, conn, conn->stash.now, frame.token.base, frame.token.len);
6582
0
    QUICLY_LOG_CONN(new_token_receive, conn, { PTLS_LOG_ELEMENT_HEXDUMP(token, frame.token.base, frame.token.len); });
6583
0
    if (conn->super.ctx->save_resumption_token == NULL)
6584
0
        return 0;
6585
0
    return conn->super.ctx->save_resumption_token->cb(conn->super.ctx->save_resumption_token, conn, frame.token);
6586
0
}
6587
6588
static quicly_error_t handle_stop_sending_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6589
0
{
6590
0
    quicly_stop_sending_frame_t frame;
6591
0
    quicly_stream_t *stream;
6592
0
    quicly_error_t ret;
6593
6594
0
    if ((ret = quicly_decode_stop_sending_frame(&state->src, state->end, &frame)) != 0)
6595
0
        return ret;
6596
0
    QUICLY_PROBE(STOP_SENDING_RECEIVE, conn, conn->stash.now, frame.stream_id, frame.app_error_code);
6597
0
    QUICLY_LOG_CONN(stop_sending_receive, conn, {
6598
0
        PTLS_LOG_ELEMENT_UNSIGNED(stream_id, (quicly_stream_id_t)frame.stream_id);
6599
0
        PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.app_error_code);
6600
0
    });
6601
6602
0
    if ((ret = quicly_get_or_open_stream(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
6603
0
        return ret;
6604
6605
0
    if (quicly_sendstate_is_open(&stream->sendstate)) {
6606
        /* reset the stream, then notify the application */
6607
0
        quicly_error_t err = QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code);
6608
0
        quicly_reset_stream(stream, err);
6609
0
        QUICLY_PROBE(STREAM_ON_SEND_STOP, stream->conn, stream->conn->stash.now, stream, err);
6610
0
        QUICLY_LOG_CONN(stream_on_send_stop, stream->conn, {
6611
0
            PTLS_LOG_ELEMENT_SIGNED(stream_id, stream->stream_id);
6612
0
            PTLS_LOG_ELEMENT_SIGNED(err, err);
6613
0
        });
6614
0
        stream->callbacks->on_send_stop(stream, err);
6615
0
        if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
6616
0
            return QUICLY_ERROR_IS_CLOSING;
6617
0
    }
6618
6619
0
    return 0;
6620
0
}
6621
6622
static quicly_error_t handle_max_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6623
0
{
6624
0
    quicly_max_data_frame_t frame;
6625
0
    quicly_error_t ret;
6626
6627
0
    if ((ret = quicly_decode_max_data_frame(&state->src, state->end, &frame)) != 0)
6628
0
        return ret;
6629
6630
0
    QUICLY_PROBE(MAX_DATA_RECEIVE, conn, conn->stash.now, frame.max_data);
6631
0
    QUICLY_LOG_CONN(max_data_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(maximum, frame.max_data); });
6632
6633
0
    if (frame.max_data <= conn->egress.max_data.permitted)
6634
0
        return 0;
6635
0
    conn->egress.max_data.permitted = frame.max_data;
6636
0
    conn->egress.data_blocked = QUICLY_SENDER_STATE_NONE; /* DATA_BLOCKED has not been sent for the new limit */
6637
6638
0
    return 0;
6639
0
}
6640
6641
static quicly_error_t negotiate_using_version(quicly_conn_t *conn, uint32_t version)
6642
0
{
6643
0
    quicly_error_t ret;
6644
6645
    /* set selected version, update transport parameters extension ID */
6646
0
    conn->super.version = version;
6647
0
    QUICLY_PROBE(VERSION_SWITCH, conn, conn->stash.now, version);
6648
0
    QUICLY_LOG_CONN(version_switch, conn, { PTLS_LOG_ELEMENT_UNSIGNED(new_version, version); });
6649
6650
    /* replace initial keys */
6651
0
    if ((ret = reinstall_initial_encryption(conn, PTLS_ERROR_LIBRARY)) != 0)
6652
0
        return ret;
6653
6654
    /* reschedule all the packets that have been sent for immediate resend */
6655
0
    if ((ret = discard_sentmap_by_epoch(conn, ~0u)) != 0)
6656
0
        return ret;
6657
6658
0
    return 0;
6659
0
}
6660
6661
static quicly_error_t handle_version_negotiation_packet(quicly_conn_t *conn, quicly_decoded_packet_t *packet)
6662
0
{
6663
0
    const uint8_t *src = packet->octets.base + packet->encrypted_off, *end = packet->octets.base + packet->octets.len;
6664
0
    uint32_t selected_version = 0;
6665
6666
0
    if (src == end || (end - src) % 4 != 0)
6667
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6668
6669
    /* select in the precedence of V1 -> draft29 -> draft27 -> fail */
6670
0
    while (src != end) {
6671
0
        uint32_t supported_version = quicly_decode32(&src);
6672
0
        switch (supported_version) {
6673
0
        case QUICLY_PROTOCOL_VERSION_1:
6674
0
            selected_version = QUICLY_PROTOCOL_VERSION_1;
6675
0
            break;
6676
0
        case QUICLY_PROTOCOL_VERSION_DRAFT29:
6677
0
            if (selected_version == 0 || selected_version == QUICLY_PROTOCOL_VERSION_DRAFT27)
6678
0
                selected_version = QUICLY_PROTOCOL_VERSION_DRAFT29;
6679
0
            break;
6680
0
        case QUICLY_PROTOCOL_VERSION_DRAFT27:
6681
0
            if (selected_version == 0)
6682
0
                selected_version = QUICLY_PROTOCOL_VERSION_DRAFT27;
6683
0
            break;
6684
0
        }
6685
0
    }
6686
0
    if (selected_version == 0)
6687
0
        return handle_close(conn, QUICLY_ERROR_NO_COMPATIBLE_VERSION, UINT64_MAX, ptls_iovec_init("", 0));
6688
6689
0
    return negotiate_using_version(conn, selected_version);
6690
0
}
6691
6692
static int compare_socket_address(struct sockaddr *x, struct sockaddr *y)
6693
0
{
6694
0
#define CMP(a, b)                                                                                                                  \
6695
0
    if (a != b)                                                                                                                    \
6696
0
    return a < b ? -1 : 1
6697
6698
0
    CMP(x->sa_family, y->sa_family);
6699
6700
0
    if (x->sa_family == AF_INET) {
6701
0
        struct sockaddr_in *xin = (void *)x, *yin = (void *)y;
6702
0
        CMP(ntohl(xin->sin_addr.s_addr), ntohl(yin->sin_addr.s_addr));
6703
0
        CMP(ntohs(xin->sin_port), ntohs(yin->sin_port));
6704
0
    } else if (x->sa_family == AF_INET6) {
6705
0
        struct sockaddr_in6 *xin6 = (void *)x, *yin6 = (void *)y;
6706
0
        int r = memcmp(xin6->sin6_addr.s6_addr, yin6->sin6_addr.s6_addr, sizeof(xin6->sin6_addr.s6_addr));
6707
0
        if (r != 0)
6708
0
            return r;
6709
0
        CMP(ntohs(xin6->sin6_port), ntohs(yin6->sin6_port));
6710
0
        CMP(xin6->sin6_scope_id, yin6->sin6_scope_id);
6711
0
    } else if (x->sa_family == AF_UNSPEC) {
6712
0
        return 1;
6713
0
    } else {
6714
0
        assert(!"unknown sa_family");
6715
0
    }
6716
6717
0
#undef CMP
6718
0
    return 0;
6719
0
}
6720
6721
static int is_stateless_reset(quicly_conn_t *conn, quicly_decoded_packet_t *decoded)
6722
0
{
6723
0
    switch (decoded->_is_stateless_reset_cached) {
6724
0
    case QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET:
6725
0
        return 1;
6726
0
    case QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET:
6727
0
        return 0;
6728
0
    default:
6729
0
        break;
6730
0
    }
6731
6732
0
    if (decoded->octets.len < QUICLY_STATELESS_RESET_PACKET_MIN_LEN)
6733
0
        return 0;
6734
6735
0
    for (size_t i = 0; i < PTLS_ELEMENTSOF(conn->super.remote.cid_set.cids); ++i) {
6736
0
        if (conn->super.remote.cid_set.cids[0].state == QUICLY_REMOTE_CID_UNAVAILABLE)
6737
0
            continue;
6738
0
        if (memcmp(decoded->octets.base + decoded->octets.len - QUICLY_STATELESS_RESET_TOKEN_LEN,
6739
0
                   conn->super.remote.cid_set.cids[i].stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN) == 0)
6740
0
            return 1;
6741
0
    }
6742
6743
0
    return 0;
6744
0
}
6745
6746
int quicly_is_destination(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr,
6747
                          quicly_decoded_packet_t *decoded)
6748
0
{
6749
0
    if (QUICLY_PACKET_IS_LONG_HEADER(decoded->octets.base[0])) {
6750
        /* long header: validate address, then consult the CID */
6751
0
        if (compare_socket_address(&conn->paths[0]->address.remote.sa, src_addr) != 0)
6752
0
            return 0;
6753
0
        if (conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC &&
6754
0
            compare_socket_address(&conn->paths[0]->address.local.sa, dest_addr) != 0)
6755
0
            return 0;
6756
        /* server may see the CID generated by the client for Initial and 0-RTT packets */
6757
0
        if (!quicly_is_client(conn) && decoded->cid.dest.might_be_client_generated) {
6758
0
            const quicly_cid_t *odcid = is_retry(conn) ? &conn->retry_scid : &conn->super.original_dcid;
6759
0
            if (quicly_cid_is_equal(odcid, decoded->cid.dest.encrypted))
6760
0
                goto Found;
6761
0
        }
6762
0
    }
6763
6764
0
    if (conn->super.ctx->cid_encryptor != NULL) {
6765
        /* Note on multiple CIDs
6766
         * Multiple CIDs issued by this host are always based on the same 3-tuple (master_id, thread_id, node_id)
6767
         * and the only difference is path_id. Therefore comparing the 3-tuple is enough to cover all CIDs issued by
6768
         * this host.
6769
         */
6770
0
        if (conn->super.local.cid_set.plaintext.master_id == decoded->cid.dest.plaintext.master_id &&
6771
0
            conn->super.local.cid_set.plaintext.thread_id == decoded->cid.dest.plaintext.thread_id &&
6772
0
            conn->super.local.cid_set.plaintext.node_id == decoded->cid.dest.plaintext.node_id)
6773
0
            goto Found;
6774
0
        if (is_stateless_reset(conn, decoded))
6775
0
            goto Found_StatelessReset;
6776
0
    } else {
6777
0
        if (compare_socket_address(&conn->paths[0]->address.remote.sa, src_addr) == 0)
6778
0
            goto Found;
6779
0
        if (conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC &&
6780
0
            compare_socket_address(&conn->paths[0]->address.local.sa, dest_addr) != 0)
6781
0
            return 0;
6782
0
    }
6783
6784
    /* not found */
6785
0
    return 0;
6786
6787
0
Found:
6788
0
    decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET;
6789
0
    return 1;
6790
6791
0
Found_StatelessReset:
6792
0
    decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET;
6793
0
    return 1;
6794
0
}
6795
6796
quicly_error_t handle_close(quicly_conn_t *conn, quicly_error_t err, uint64_t frame_type, ptls_iovec_t reason_phrase)
6797
0
{
6798
0
    quicly_error_t ret;
6799
6800
0
    if (conn->super.state >= QUICLY_STATE_CLOSING)
6801
0
        return 0;
6802
6803
    /* switch to closing state, notify the app (at this moment the streams are accessible), then destroy the streams */
6804
0
    if ((ret = enter_close(conn, 0,
6805
0
                           !(err == QUICLY_ERROR_RECEIVED_STATELESS_RESET || err == QUICLY_ERROR_NO_COMPATIBLE_VERSION))) != 0)
6806
0
        return ret;
6807
0
    if (conn->super.ctx->closed_by_remote != NULL)
6808
0
        conn->super.ctx->closed_by_remote->cb(conn->super.ctx->closed_by_remote, conn, err, frame_type,
6809
0
                                              (const char *)reason_phrase.base, reason_phrase.len);
6810
0
    destroy_all_streams(conn, err, 0);
6811
6812
0
    return QUICLY_ERROR_IS_CLOSING;
6813
0
}
6814
6815
static quicly_error_t handle_transport_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6816
0
{
6817
0
    quicly_transport_close_frame_t frame;
6818
0
    quicly_error_t ret;
6819
6820
0
    if ((ret = quicly_decode_transport_close_frame(&state->src, state->end, &frame)) != 0)
6821
0
        return ret;
6822
6823
0
    QUICLY_PROBE(TRANSPORT_CLOSE_RECEIVE, conn, conn->stash.now, frame.error_code, frame.frame_type,
6824
0
                 QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len));
6825
0
    QUICLY_LOG_CONN(transport_close_receive, conn, {
6826
0
        PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.error_code);
6827
0
        PTLS_LOG_ELEMENT_UNSIGNED(frame_type, frame.frame_type);
6828
0
        PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, (const char *)frame.reason_phrase.base, frame.reason_phrase.len);
6829
0
    });
6830
0
    return handle_close(conn, QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(frame.error_code), frame.frame_type, frame.reason_phrase);
6831
0
}
6832
6833
static quicly_error_t handle_application_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6834
0
{
6835
0
    quicly_application_close_frame_t frame;
6836
0
    quicly_error_t ret;
6837
6838
0
    if ((ret = quicly_decode_application_close_frame(&state->src, state->end, &frame)) != 0)
6839
0
        return ret;
6840
6841
0
    QUICLY_PROBE(APPLICATION_CLOSE_RECEIVE, conn, conn->stash.now, frame.error_code,
6842
0
                 QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len));
6843
0
    QUICLY_LOG_CONN(application_close_receive, conn, {
6844
0
        PTLS_LOG_ELEMENT_UNSIGNED(error_code, frame.error_code);
6845
0
        PTLS_LOG_ELEMENT_UNSAFESTR(reason_phrase, (const char *)frame.reason_phrase.base, frame.reason_phrase.len);
6846
0
    });
6847
0
    return handle_close(conn, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.error_code), UINT64_MAX, frame.reason_phrase);
6848
0
}
6849
6850
static quicly_error_t handle_padding_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6851
0
{
6852
0
    return 0;
6853
0
}
6854
6855
static quicly_error_t handle_ping_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6856
0
{
6857
0
    QUICLY_PROBE(PING_RECEIVE, conn, conn->stash.now);
6858
0
    QUICLY_LOG_CONN(ping_receive, conn, {});
6859
6860
0
    return 0;
6861
0
}
6862
6863
static quicly_error_t handle_new_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6864
0
{
6865
0
    quicly_new_connection_id_frame_t frame;
6866
0
    quicly_error_t ret;
6867
6868
    /* TODO: return error when using zero-length CID */
6869
6870
0
    if ((ret = quicly_decode_new_connection_id_frame(&state->src, state->end, &frame)) != 0)
6871
0
        return ret;
6872
6873
0
    QUICLY_PROBE(NEW_CONNECTION_ID_RECEIVE, conn, conn->stash.now, frame.sequence, frame.retire_prior_to,
6874
0
                 QUICLY_PROBE_HEXDUMP(frame.cid.base, frame.cid.len),
6875
0
                 QUICLY_PROBE_HEXDUMP(frame.stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN));
6876
0
    QUICLY_LOG_CONN(new_connection_id_receive, conn, {
6877
0
        PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence);
6878
0
        PTLS_LOG_ELEMENT_UNSIGNED(retire_prior_to, frame.retire_prior_to);
6879
0
        PTLS_LOG_ELEMENT_HEXDUMP(cid, frame.cid.base, frame.cid.len);
6880
0
        PTLS_LOG_ELEMENT_HEXDUMP(stateless_reset_token, frame.stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN);
6881
0
    });
6882
6883
0
    size_t orig_num_retired = conn->super.remote.cid_set.retired.count;
6884
0
    if ((ret = quicly_remote_cid_register(&conn->super.remote.cid_set, frame.sequence, frame.cid.base, frame.cid.len,
6885
0
                                          frame.stateless_reset_token, frame.retire_prior_to)) != 0)
6886
0
        return ret;
6887
0
    if (orig_num_retired != conn->super.remote.cid_set.retired.count) {
6888
0
        for (size_t i = orig_num_retired; i < conn->super.remote.cid_set.retired.count; ++i)
6889
0
            dissociate_cid(conn, conn->super.remote.cid_set.retired.cids[i]);
6890
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
6891
0
    }
6892
6893
0
    return 0;
6894
0
}
6895
6896
static quicly_error_t handle_retire_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6897
0
{
6898
0
    int has_pending;
6899
0
    quicly_retire_connection_id_frame_t frame;
6900
0
    quicly_error_t ret;
6901
6902
0
    if ((ret = quicly_decode_retire_connection_id_frame(&state->src, state->end, &frame)) != 0)
6903
0
        return ret;
6904
6905
0
    QUICLY_PROBE(RETIRE_CONNECTION_ID_RECEIVE, conn, conn->stash.now, frame.sequence);
6906
0
    QUICLY_LOG_CONN(retire_connection_id_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence); });
6907
6908
0
    if (frame.sequence >= conn->super.local.cid_set.plaintext.path_id) {
6909
        /* Receipt of a RETIRE_CONNECTION_ID frame containing a sequence number greater than any previously sent to the remote peer
6910
         * MUST be treated as a connection error of type PROTOCOL_VIOLATION. (19.16) */
6911
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6912
0
    }
6913
6914
0
    if ((ret = quicly_local_cid_retire(&conn->super.local.cid_set, frame.sequence, &has_pending)) != 0)
6915
0
        return ret;
6916
0
    if (has_pending)
6917
0
        conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
6918
6919
0
    return 0;
6920
0
}
6921
6922
static quicly_error_t handle_handshake_done_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6923
0
{
6924
0
    quicly_error_t ret;
6925
6926
0
    QUICLY_PROBE(HANDSHAKE_DONE_RECEIVE, conn, conn->stash.now);
6927
0
    QUICLY_LOG_CONN(handshake_done_receive, conn, {});
6928
6929
0
    if (!quicly_is_client(conn))
6930
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6931
6932
0
    assert(conn->initial == NULL);
6933
0
    if (conn->handshake == NULL)
6934
0
        return 0;
6935
6936
0
    conn->super.remote.address_validation.send_probe = 0;
6937
0
    if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
6938
0
        return ret;
6939
0
    setup_next_send(conn);
6940
0
    return 0;
6941
0
}
6942
6943
static quicly_error_t handle_datagram_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6944
0
{
6945
0
    quicly_datagram_frame_t frame;
6946
0
    quicly_error_t ret;
6947
6948
    /* check if we advertised support for DATAGRAM frames on this connection */
6949
0
    if (conn->super.ctx->transport_params.max_datagram_frame_size == 0)
6950
0
        return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
6951
6952
    /* decode the frame */
6953
0
    if ((ret = quicly_decode_datagram_frame(state->frame_type, &state->src, state->end, &frame)) != 0)
6954
0
        return ret;
6955
0
    QUICLY_PROBE(DATAGRAM_RECEIVE, conn, conn->stash.now, frame.payload.base, frame.payload.len);
6956
0
    QUICLY_LOG_CONN(datagram_receive, conn, { PTLS_LOG_ELEMENT_UNSIGNED(payload_len, frame.payload.len); });
6957
6958
    /* handle the frame. Applications might call quicly_close or other functions that modify the connection state. */
6959
0
    conn->super.ctx->receive_datagram_frame->cb(conn->super.ctx->receive_datagram_frame, conn, frame.payload);
6960
6961
0
    return 0;
6962
0
}
6963
6964
static quicly_error_t handle_ack_frequency_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
6965
0
{
6966
0
    quicly_ack_frequency_frame_t frame;
6967
0
    quicly_error_t ret;
6968
6969
    /* recognize the frame only when the support has been advertised */
6970
0
    if (conn->super.ctx->transport_params.min_ack_delay_usec == UINT64_MAX)
6971
0
        return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
6972
6973
0
    if ((ret = quicly_decode_ack_frequency_frame(&state->src, state->end, &frame)) != 0)
6974
0
        return ret;
6975
6976
0
    QUICLY_PROBE(ACK_FREQUENCY_RECEIVE, conn, conn->stash.now, frame.sequence, frame.packet_tolerance, frame.max_ack_delay,
6977
0
                 frame.reordering_threshold);
6978
0
    QUICLY_LOG_CONN(ack_frequency_receive, conn, {
6979
0
        PTLS_LOG_ELEMENT_UNSIGNED(sequence, frame.sequence);
6980
0
        PTLS_LOG_ELEMENT_UNSIGNED(packet_tolerance, frame.packet_tolerance);
6981
0
        PTLS_LOG_ELEMENT_UNSIGNED(max_ack_delay, frame.max_ack_delay);
6982
0
        PTLS_LOG_ELEMENT_UNSIGNED(reordering_threshold, frame.reordering_threshold);
6983
0
    });
6984
6985
    /* Reject Request Max Ack Delay below our TP.min_ack_delay (which is at the moment equal to LOCAL_MAX_ACK_DELAY). */
6986
0
    if (frame.max_ack_delay < QUICLY_LOCAL_MAX_ACK_DELAY * 1000 || frame.max_ack_delay >= (1 << 14) * 1000)
6987
0
        return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
6988
6989
    // TODO: use received frame.max_ack_delay. We currently use a constant (25 ms) and
6990
    // ignore the value set by our transport parameter (see max_ack_delay field comment).
6991
6992
0
    if (frame.sequence >= conn->ingress.ack_frequency.next_sequence) {
6993
0
        conn->ingress.ack_frequency.next_sequence = frame.sequence + 1;
6994
0
        conn->application->super.packet_tolerance =
6995
0
            (uint32_t)(frame.packet_tolerance < QUICLY_MAX_PACKET_TOLERANCE ? frame.packet_tolerance : QUICLY_MAX_PACKET_TOLERANCE);
6996
0
        conn->application->super.reordering_threshold = frame.reordering_threshold;
6997
0
    }
6998
6999
0
    return 0;
7000
0
}
7001
7002
static quicly_error_t handle_immediate_ack_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
7003
0
{
7004
    /* recognize the frame only when the support has been advertised */
7005
0
    if (conn->super.ctx->transport_params.min_ack_delay_usec == UINT64_MAX)
7006
0
        return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
7007
0
    conn->egress.send_ack_at = conn->stash.now;
7008
0
    return 0;
7009
0
}
7010
7011
static quicly_error_t handle_payload(quicly_conn_t *conn, size_t epoch, size_t path_index, const uint8_t *_src, size_t _len,
7012
                                     uint64_t *offending_frame_type, int *is_ack_only, int *is_probe_only)
7013
0
{
7014
    /* clang-format off */
7015
7016
    /* `frame_handlers` is an array of frame handlers and the properties of the frames, indexed by the ID of the frame. */
7017
0
    static const struct st_quicly_frame_handler_t {
7018
0
        quicly_error_t (*cb)(quicly_conn_t *, struct st_quicly_handle_payload_state_t *); /* callback function that handles the
7019
                                                                                           * frame */
7020
0
        uint8_t permitted_epochs;  /* the epochs the frame can appear, calculated as bitwise-or of `1 << epoch` */
7021
0
        uint8_t ack_eliciting;     /* boolean indicating if the frame is ack-eliciting */
7022
0
        uint8_t probing;           /* boolean indicating if the frame is a "probing frame" */
7023
0
        size_t counter_offset;     /* offset of corresponding `conn->super.stats.num_frames_received.type` within quicly_conn_t */
7024
0
    } frame_handlers[] = {
7025
0
#define FRAME(n, i, z, h, o, ae, p)                                                                                                \
7026
0
    {                                                                                                                              \
7027
0
        handle_##n##_frame,                                                                                                        \
7028
0
        (i << QUICLY_EPOCH_INITIAL) | (z << QUICLY_EPOCH_0RTT) | (h << QUICLY_EPOCH_HANDSHAKE) | (o << QUICLY_EPOCH_1RTT),         \
7029
0
        ae,                                                                                                                        \
7030
0
        p,                                                                                                                         \
7031
0
        offsetof(quicly_conn_t, super.stats.num_frames_received.n)                                                                 \
7032
0
    }
7033
        /*   +----------------------+-------------------+---------------+---------+
7034
         *   |                      |  permitted epochs |               |         |
7035
         *   |        frame         +----+----+----+----+ ack-eliciting | probing |
7036
         *   |                      | IN | 0R | HS | 1R |               |         |
7037
         *   +----------------------+----+----+----+----+---------------+---------+ */
7038
0
        FRAME( padding              ,  1 ,  1 ,  1 ,  1 ,             0 ,       1 ), /* 0 */
7039
0
        FRAME( ping                 ,  1 ,  1 ,  1 ,  1 ,             1 ,       0 ),
7040
0
        FRAME( ack                  ,  1 ,  0 ,  1 ,  1 ,             0 ,       0 ),
7041
0
        FRAME( ack                  ,  1 ,  0 ,  1 ,  1 ,             0 ,       0 ),
7042
0
        FRAME( reset_stream         ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7043
0
        FRAME( stop_sending         ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7044
0
        FRAME( crypto               ,  1 ,  0 ,  1 ,  1 ,             1 ,       0 ),
7045
0
        FRAME( new_token            ,  0 ,  0 ,  0 ,  1 ,             1 ,       0 ),
7046
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ), /* 8 */
7047
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7048
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7049
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7050
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7051
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7052
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7053
0
        FRAME( stream               ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7054
0
        FRAME( max_data             ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ), /* 16 */
7055
0
        FRAME( max_stream_data      ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7056
0
        FRAME( max_streams_bidi     ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7057
0
        FRAME( max_streams_uni      ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7058
0
        FRAME( data_blocked         ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7059
0
        FRAME( stream_data_blocked  ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7060
0
        FRAME( streams_blocked      ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7061
0
        FRAME( streams_blocked      ,  0 ,  1 ,  0 ,  1 ,             1 ,       0 ),
7062
0
        FRAME( new_connection_id    ,  0 ,  1 ,  0 ,  1 ,             1 ,       1 ), /* 24 */
7063
0
        FRAME( retire_connection_id ,  0 ,  0 ,  0 ,  1 ,             1 ,       0 ),
7064
0
        FRAME( path_challenge       ,  0 ,  1 ,  0 ,  1 ,             1 ,       1 ),
7065
0
        FRAME( path_response        ,  0 ,  0 ,  0 ,  1 ,             1 ,       1 ),
7066
0
        FRAME( transport_close      ,  1 ,  1 ,  1 ,  1 ,             0 ,       0 ),
7067
0
        FRAME( application_close    ,  0 ,  1 ,  0 ,  1 ,             0 ,       0 ),
7068
0
        FRAME( handshake_done       ,  0,   0 ,  0 ,  1 ,             1 ,       0 ),
7069
0
        FRAME( immediate_ack        ,  0,   0 ,  0 ,  1 ,             1 ,       0 ),
7070
        /*   +----------------------+----+----+----+----+---------------+---------+ */
7071
0
#undef FRAME
7072
0
    };
7073
0
    static const struct {
7074
0
        uint64_t type;
7075
0
        struct st_quicly_frame_handler_t _;
7076
0
    } ex_frame_handlers[] = {
7077
0
#define FRAME(uc, lc, i, z, h, o, ae, p)                                                                                           \
7078
0
    {                                                                                                                              \
7079
0
        QUICLY_FRAME_TYPE_##uc,                                                                                                    \
7080
0
        {                                                                                                                          \
7081
0
            handle_##lc##_frame,                                                                                                   \
7082
0
            (i << QUICLY_EPOCH_INITIAL) | (z << QUICLY_EPOCH_0RTT) | (h << QUICLY_EPOCH_HANDSHAKE) | (o << QUICLY_EPOCH_1RTT),     \
7083
0
            ae,                                                                                                                    \
7084
0
            p,                                                                                                                     \
7085
0
            offsetof(quicly_conn_t, super.stats.num_frames_received.lc)                                                            \
7086
0
        },                                                                                                                         \
7087
0
    }
7088
        /*   +----------------------------------+-------------------+---------------+---------+
7089
         *   |               frame              |  permitted epochs |               |         |
7090
         *   |------------------+---------------+----+----+----+----+ ack-eliciting | probing |
7091
         *   |    upper-case    |  lower-case   | IN | 0R | HS | 1R |               |         |
7092
         *   +------------------+---------------+----+----+----+----+---------------+---------+ */
7093
0
        FRAME( DATAGRAM_NOLEN   , datagram      ,  0 ,  1,   0,   1 ,             1 ,       0 ),
7094
0
        FRAME( DATAGRAM_WITHLEN , datagram      ,  0 ,  1,   0,   1 ,             1 ,       0 ),
7095
0
        FRAME( ACK_FREQUENCY    , ack_frequency ,  0 ,  0 ,  0 ,  1 ,             1 ,       0 ),
7096
        /*   +------------------+---------------+-------------------+---------------+---------+ */
7097
0
#undef FRAME
7098
0
        {UINT64_MAX},
7099
0
    };
7100
    /* clang-format on */
7101
7102
0
    struct st_quicly_handle_payload_state_t state = {.epoch = epoch, .path_index = path_index, .src = _src, .end = _src + _len};
7103
0
    size_t num_frames_ack_eliciting = 0, num_frames_non_probing = 0;
7104
0
    quicly_error_t ret;
7105
7106
0
    do {
7107
        /* determine the frame type; fast path is available for frame types below 64 */
7108
0
        const struct st_quicly_frame_handler_t *frame_handler;
7109
0
        state.frame_type = *state.src++;
7110
0
        if (state.frame_type < PTLS_ELEMENTSOF(frame_handlers)) {
7111
0
            frame_handler = frame_handlers + state.frame_type;
7112
0
        } else {
7113
            /* slow path */
7114
0
            --state.src;
7115
0
            if ((state.frame_type = quicly_decodev(&state.src, state.end)) == UINT64_MAX) {
7116
0
                state.frame_type =
7117
0
                    QUICLY_FRAME_TYPE_PADDING; /* we cannot signal the offending frame type when failing to decode the frame type */
7118
0
                ret = QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
7119
0
                break;
7120
0
            }
7121
0
            size_t i;
7122
0
            for (i = 0; ex_frame_handlers[i].type < state.frame_type; ++i)
7123
0
                ;
7124
0
            if (ex_frame_handlers[i].type != state.frame_type) {
7125
0
                ret = QUICLY_TRANSPORT_ERROR_FRAME_ENCODING; /* not found */
7126
0
                break;
7127
0
            }
7128
0
            frame_handler = &ex_frame_handlers[i]._;
7129
0
        }
7130
        /* check if frame is allowed, then process */
7131
0
        if ((frame_handler->permitted_epochs & (1 << epoch)) == 0) {
7132
0
            ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
7133
0
            break;
7134
0
        }
7135
0
        ++*(uint64_t *)((uint8_t *)conn + frame_handler->counter_offset);
7136
0
        if (frame_handler->ack_eliciting)
7137
0
            ++num_frames_ack_eliciting;
7138
0
        if (!frame_handler->probing)
7139
0
            ++num_frames_non_probing;
7140
0
        if ((ret = frame_handler->cb(conn, &state)) != 0)
7141
0
            break;
7142
0
    } while (state.src != state.end);
7143
7144
0
    *is_ack_only = num_frames_ack_eliciting == 0;
7145
0
    *is_probe_only = num_frames_non_probing == 0;
7146
0
    if (ret != 0)
7147
0
        *offending_frame_type = state.frame_type;
7148
0
    return ret;
7149
0
}
7150
7151
static quicly_error_t handle_stateless_reset(quicly_conn_t *conn)
7152
0
{
7153
0
    QUICLY_PROBE(STATELESS_RESET_RECEIVE, conn, conn->stash.now);
7154
0
    QUICLY_LOG_CONN(stateless_reset_receive, conn, {});
7155
0
    return handle_close(conn, QUICLY_ERROR_RECEIVED_STATELESS_RESET, UINT64_MAX, ptls_iovec_init("", 0));
7156
0
}
7157
7158
static int validate_retry_tag(quicly_decoded_packet_t *packet, quicly_cid_t *odcid, ptls_aead_context_t *retry_aead)
7159
0
{
7160
0
    size_t pseudo_packet_len = 1 + odcid->len + packet->encrypted_off;
7161
0
    uint8_t pseudo_packet[pseudo_packet_len];
7162
0
    pseudo_packet[0] = odcid->len;
7163
0
    memcpy(pseudo_packet + 1, odcid->cid, odcid->len);
7164
0
    memcpy(pseudo_packet + 1 + odcid->len, packet->octets.base, packet->encrypted_off);
7165
0
    return ptls_aead_decrypt(retry_aead, packet->octets.base + packet->encrypted_off, packet->octets.base + packet->encrypted_off,
7166
0
                             PTLS_AESGCM_TAG_SIZE, 0, pseudo_packet, pseudo_packet_len) == 0;
7167
0
}
7168
7169
quicly_error_t quicly_accept(quicly_conn_t **conn, quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr,
7170
                             quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token,
7171
                             const quicly_cid_plaintext_t *new_cid, ptls_handshake_properties_t *handshake_properties,
7172
                             void *appdata)
7173
0
{
7174
0
    const quicly_salt_t *salt;
7175
0
    struct {
7176
0
        struct st_quicly_cipher_context_t ingress, egress;
7177
0
        int alive;
7178
0
    } cipher = {};
7179
0
    ptls_iovec_t payload;
7180
0
    uint64_t next_expected_pn, pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING;
7181
0
    int is_ack_only, is_probe_only;
7182
0
    quicly_error_t ret;
7183
7184
0
    *conn = NULL;
7185
7186
    /* process initials only */
7187
0
    if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) != QUICLY_PACKET_TYPE_INITIAL) {
7188
0
        ret = QUICLY_ERROR_PACKET_IGNORED;
7189
0
        goto Exit;
7190
0
    }
7191
0
    if ((salt = quicly_get_salt(packet->version)) == NULL) {
7192
0
        ret = QUICLY_ERROR_PACKET_IGNORED;
7193
0
        goto Exit;
7194
0
    }
7195
0
    if (packet->datagram_size < QUICLY_MIN_CLIENT_INITIAL_SIZE) {
7196
0
        ret = QUICLY_ERROR_PACKET_IGNORED;
7197
0
        goto Exit;
7198
0
    }
7199
0
    if (packet->cid.dest.encrypted.len < 8) {
7200
0
        ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
7201
0
        goto Exit;
7202
0
    }
7203
0
    if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &cipher.ingress, &cipher.egress, packet->cid.dest.encrypted, 0,
7204
0
                                        ptls_iovec_init(salt->initial, sizeof(salt->initial)), NULL)) != 0)
7205
0
        goto Exit;
7206
0
    cipher.alive = 1;
7207
0
    next_expected_pn = 0; /* is this correct? do we need to take care of underflow? */
7208
0
    if ((ret = decrypt_packet(cipher.ingress.header_protection, aead_decrypt_fixed_key, cipher.ingress.aead, &next_expected_pn,
7209
0
                              packet, &pn, &payload)) != 0) {
7210
0
        ret = QUICLY_ERROR_DECRYPTION_FAILED;
7211
0
        goto Exit;
7212
0
    }
7213
7214
    /* create connection */
7215
0
    if ((*conn = create_connection(
7216
0
             ctx, packet->version, NULL, src_addr, dest_addr, &packet->cid.src, new_cid, handshake_properties, appdata,
7217
0
             quicly_cc_calc_initial_cwnd(ctx->initcwnd_packets, ctx->transport_params.max_udp_payload_size))) == NULL) {
7218
0
        ret = PTLS_ERROR_NO_MEMORY;
7219
0
        goto Exit;
7220
0
    }
7221
0
    (*conn)->super.state = QUICLY_STATE_ACCEPTING;
7222
0
    quicly_set_cid(&(*conn)->super.original_dcid, packet->cid.dest.encrypted);
7223
0
    if (address_token != NULL) {
7224
0
        (*conn)->super.remote.address_validation.validated = !address_token->address_mismatch;
7225
0
        switch (address_token->type) {
7226
0
        case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
7227
0
            if (!address_token->address_mismatch) {
7228
0
                (*conn)->retry_scid = (*conn)->super.original_dcid;
7229
0
                (*conn)->super.original_dcid = address_token->retry.original_dcid;
7230
0
            }
7231
0
            break;
7232
0
        case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
7233
0
            if (decode_resumption_info(address_token->resumption.bytes, address_token->resumption.len,
7234
0
                                       &(*conn)->super.stats.jumpstart.prev_rate, &(*conn)->super.stats.jumpstart.prev_rtt) != 0) {
7235
0
                (*conn)->super.stats.jumpstart.prev_rate = 0;
7236
0
                (*conn)->super.stats.jumpstart.prev_rtt = 0;
7237
0
            }
7238
0
            break;
7239
0
        default:
7240
            /* We might not get here as tokens are integrity-protected, but as this is information supplied via network, potentially
7241
             * from broken quicly instances, we drop anything unexpected rather than calling abort(). */
7242
0
            break;
7243
0
        }
7244
0
    }
7245
0
    if ((ret = setup_handshake_space_and_flow(*conn, QUICLY_EPOCH_INITIAL)) != 0)
7246
0
        goto Exit;
7247
0
    (*conn)->initial->super.next_expected_packet_number = next_expected_pn;
7248
0
    (*conn)->initial->cipher.ingress = cipher.ingress;
7249
0
    (*conn)->initial->cipher.egress = cipher.egress;
7250
0
    cipher.alive = 0;
7251
0
    (*conn)->crypto.handshake_properties.collected_extensions = server_collected_extensions;
7252
0
    (*conn)->initial->largest_ingress_udp_payload_size = packet->datagram_size;
7253
7254
0
    QUICLY_PROBE(ACCEPT, *conn, (*conn)->stash.now,
7255
0
                 QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len), address_token);
7256
0
    QUICLY_LOG_CONN(accept, *conn, {
7257
0
        PTLS_LOG_ELEMENT_HEXDUMP(dcid, packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len);
7258
0
        if (address_token != NULL) {
7259
0
            PTLS_LOG_ELEMENT_UNSIGNED(type, address_token->type);
7260
0
            PTLS_LOG_ELEMENT_UNSIGNED(issued_at, address_token->issued_at);
7261
0
            PTLS_LOG_ELEMENT_BOOL(address_mismatch, address_token->address_mismatch);
7262
0
            switch (address_token->type) {
7263
0
            case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
7264
0
                PTLS_LOG_ELEMENT_HEXDUMP(original_dcid, address_token->retry.original_dcid.cid,
7265
0
                                         address_token->retry.original_dcid.len);
7266
0
                PTLS_LOG_ELEMENT_HEXDUMP(client_cid, address_token->retry.client_cid.cid, address_token->retry.client_cid.len);
7267
0
                PTLS_LOG_ELEMENT_HEXDUMP(server_cid, address_token->retry.server_cid.cid, address_token->retry.server_cid.len);
7268
0
                break;
7269
0
            case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
7270
0
                PTLS_LOG_ELEMENT_UNSIGNED(rate, (*conn)->super.stats.jumpstart.prev_rate);
7271
0
                PTLS_LOG_ELEMENT_UNSIGNED(rtt, (*conn)->super.stats.jumpstart.prev_rtt);
7272
0
                break;
7273
0
            }
7274
0
        }
7275
0
    });
7276
0
    QUICLY_PROBE(PACKET_RECEIVED, *conn, (*conn)->stash.now, pn, payload.base, payload.len, get_epoch(packet->octets.base[0]));
7277
0
    QUICLY_LOG_CONN(packet_received, *conn, {
7278
0
        PTLS_LOG_ELEMENT_UNSIGNED(pn, pn);
7279
0
        PTLS_LOG_APPDATA_ELEMENT_HEXDUMP(decrypted, payload.base, payload.len);
7280
0
        PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(packet->octets.base[0]));
7281
0
    });
7282
7283
    /* handle the input; we ignore is_ack_only, we consult if there's any output from TLS in response to CH anyways */
7284
0
    (*conn)->super.stats.num_packets.received += 1;
7285
0
    (*conn)->super.stats.num_packets.initial_received += 1;
7286
0
    if (packet->ecn != 0)
7287
0
        (*conn)->super.stats.num_packets.received_ecn_counts[get_ecn_index_from_bits(packet->ecn)] += 1;
7288
0
    (*conn)->super.stats.num_bytes.received += packet->datagram_size;
7289
0
    if ((ret = handle_payload(*conn, QUICLY_EPOCH_INITIAL, 0, payload.base, payload.len, &offending_frame_type, &is_ack_only,
7290
0
                              &is_probe_only)) != 0)
7291
0
        goto Exit;
7292
0
    if ((ret = record_receipt(&(*conn)->initial->super, pn, packet->ecn, 0, (*conn)->stash.now, &(*conn)->egress.send_ack_at,
7293
0
                              &(*conn)->super.stats.num_packets.received_out_of_order)) != 0)
7294
0
        goto Exit;
7295
7296
0
Exit:
7297
0
    if (*conn != NULL) {
7298
0
        if (ret == 0) {
7299
            /* if CONNECTION_CLOSE was found and the state advanced to DRAINING, we need to retain that state */
7300
0
            if ((*conn)->super.state < QUICLY_STATE_CONNECTED)
7301
0
                (*conn)->super.state = QUICLY_STATE_CONNECTED;
7302
0
        } else {
7303
0
            initiate_close(*conn, ret, offending_frame_type, "");
7304
0
            ret = 0;
7305
0
        }
7306
0
        unlock_now(*conn);
7307
0
    }
7308
0
    if (cipher.alive) {
7309
0
        dispose_cipher(&cipher.ingress);
7310
0
        dispose_cipher(&cipher.egress);
7311
0
    }
7312
0
    return ret;
7313
0
}
7314
7315
/**
7316
 * @param receive_delay  set to -1 when received for the first time, but if buffered for replay, contains how long the packet has
7317
 *                       been delayed
7318
 */
7319
static quicly_error_t do_receive(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr,
7320
                                 quicly_decoded_packet_t *packet, int64_t receive_delay, int *might_be_reorder)
7321
0
{
7322
0
    ptls_cipher_context_t *header_protection;
7323
0
    struct {
7324
0
        int (*cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *);
7325
0
        void *ctx;
7326
0
    } aead;
7327
0
    struct st_quicly_pn_space_t **space;
7328
0
    size_t epoch, path_index;
7329
0
    ptls_iovec_t payload;
7330
0
    uint64_t pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING;
7331
0
    int is_ack_only, is_probe_only;
7332
0
    quicly_error_t ret;
7333
7334
0
    assert(src_addr->sa_family == AF_INET || src_addr->sa_family == AF_INET6);
7335
7336
0
    *might_be_reorder = 0;
7337
7338
0
    QUICLY_PROBE(RECEIVE, conn, conn->stash.now,
7339
0
                 QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len), packet->octets.base,
7340
0
                 packet->octets.len, receive_delay);
7341
0
    QUICLY_LOG_CONN(receive, conn, {
7342
0
        PTLS_LOG_ELEMENT_HEXDUMP(dcid, packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len);
7343
0
        PTLS_LOG_ELEMENT_HEXDUMP(bytes, packet->octets.base, packet->octets.len);
7344
0
        PTLS_LOG_ELEMENT_SIGNED(receive_delay, receive_delay);
7345
0
    });
7346
7347
    /* drop packets with invalid server tuple (note: when running as a server, `dest_addr` may not be available depending on the
7348
     * socket option being used */
7349
0
    if (quicly_is_client(conn)) {
7350
0
        if (compare_socket_address(src_addr, &conn->paths[0]->address.remote.sa) != 0) {
7351
0
            ret = QUICLY_ERROR_PACKET_IGNORED;
7352
0
            goto Exit;
7353
0
        }
7354
0
    } else if (dest_addr != NULL && dest_addr->sa_family != AF_UNSPEC) {
7355
0
        assert(conn->paths[0]->address.local.sa.sa_family != AF_UNSPEC);
7356
0
        if (compare_socket_address(dest_addr, &conn->paths[0]->address.local.sa) != 0) {
7357
0
            ret = QUICLY_ERROR_PACKET_IGNORED;
7358
0
            goto Exit;
7359
0
        }
7360
0
    }
7361
7362
0
    if (is_stateless_reset(conn, packet)) {
7363
0
        ret = handle_stateless_reset(conn);
7364
0
        goto Exit;
7365
0
    }
7366
7367
    /* Determine the incoming path. path_index may be set to PTLS_ELEMENTSOF(conn->paths), which indicates that a new path needs to
7368
     * be created once packet decryption succeeds. */
7369
0
    for (path_index = 0; path_index < PTLS_ELEMENTSOF(conn->paths); ++path_index)
7370
0
        if (conn->paths[path_index] != NULL && compare_socket_address(src_addr, &conn->paths[path_index]->address.remote.sa) == 0)
7371
0
            break;
7372
0
    if (path_index != 0 && !quicly_is_client(conn) &&
7373
0
        (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) || !conn->super.remote.address_validation.validated)) {
7374
0
        ret = QUICLY_ERROR_PACKET_IGNORED;
7375
0
        goto Exit;
7376
0
    }
7377
0
    if (path_index == PTLS_ELEMENTSOF(conn->paths) &&
7378
0
        conn->super.stats.num_paths.validation_failed >= conn->super.ctx->max_path_validation_failures) {
7379
0
        ret = QUICLY_ERROR_PACKET_IGNORED;
7380
0
        goto Exit;
7381
0
    }
7382
7383
    /* add unconditionally, as packet->datagram_size is set only for the first packet within the UDP datagram */
7384
0
    conn->super.stats.num_bytes.received += packet->datagram_size;
7385
7386
0
    switch (conn->super.state) {
7387
0
    case QUICLY_STATE_CLOSING:
7388
0
        ++conn->egress.connection_close.num_packets_received;
7389
        /* respond with a CONNECTION_CLOSE frame using exponential back-off */
7390
0
        if (__builtin_popcountl(conn->egress.connection_close.num_packets_received) == 1)
7391
0
            conn->egress.send_ack_at = 0;
7392
0
        ret = 0;
7393
0
        goto Exit;
7394
0
    case QUICLY_STATE_DRAINING:
7395
0
        ret = 0;
7396
0
        goto Exit;
7397
0
    default:
7398
0
        break;
7399
0
    }
7400
7401
0
    if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) {
7402
0
        if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) {
7403
0
            if (packet->version == 0) {
7404
0
                ret = handle_version_negotiation_packet(conn, packet);
7405
0
                goto Exit;
7406
0
            }
7407
0
        }
7408
0
        if (packet->version != conn->super.version) {
7409
0
            ret = QUICLY_ERROR_PACKET_IGNORED;
7410
0
            goto Exit;
7411
0
        }
7412
0
        switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) {
7413
0
        case QUICLY_PACKET_TYPE_RETRY: {
7414
0
            assert(packet->encrypted_off + PTLS_AESGCM_TAG_SIZE == packet->octets.len);
7415
            /* handle only if the connection is the client */
7416
0
            if (!quicly_is_client(conn)) {
7417
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7418
0
                goto Exit;
7419
0
            }
7420
            /* server CID has to change */
7421
0
            if (quicly_cid_is_equal(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src)) {
7422
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7423
0
                goto Exit;
7424
0
            }
7425
            /* do not accept a second Retry */
7426
0
            if (is_retry(conn)) {
7427
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7428
0
                goto Exit;
7429
0
            }
7430
0
            ptls_aead_context_t *retry_aead = create_retry_aead(conn->super.ctx, conn->super.version, 0);
7431
0
            int retry_ok = validate_retry_tag(packet, &conn->super.remote.cid_set.cids[0].cid, retry_aead);
7432
0
            ptls_aead_free(retry_aead);
7433
0
            if (!retry_ok) {
7434
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7435
0
                goto Exit;
7436
0
            }
7437
            /* check size of the Retry packet */
7438
0
            if (packet->token.len > QUICLY_MAX_TOKEN_LEN) {
7439
0
                ret = QUICLY_ERROR_PACKET_IGNORED; /* TODO this is a immediate fatal error, chose a better error code */
7440
0
                goto Exit;
7441
0
            }
7442
            /* store token and ODCID */
7443
0
            free(conn->token.base);
7444
0
            if ((conn->token.base = malloc(packet->token.len)) == NULL) {
7445
0
                ret = PTLS_ERROR_NO_MEMORY;
7446
0
                goto Exit;
7447
0
            }
7448
0
            memcpy(conn->token.base, packet->token.base, packet->token.len);
7449
0
            conn->token.len = packet->token.len;
7450
            /* update DCID */
7451
0
            quicly_set_cid(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src);
7452
0
            conn->retry_scid = conn->super.remote.cid_set.cids[0].cid;
7453
            /* replace initial keys, or drop the keys if this is a response packet to a greased version */
7454
0
            if ((ret = reinstall_initial_encryption(conn, QUICLY_ERROR_PACKET_IGNORED)) != 0)
7455
0
                goto Exit;
7456
            /* schedule retransmit */
7457
0
            ret = discard_sentmap_by_epoch(conn, ~0u);
7458
0
            goto Exit;
7459
0
        } break;
7460
0
        case QUICLY_PACKET_TYPE_INITIAL:
7461
0
            if (conn->initial == NULL || (header_protection = conn->initial->cipher.ingress.header_protection) == NULL) {
7462
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7463
0
                goto Exit;
7464
0
            }
7465
0
            if (quicly_is_client(conn)) {
7466
                /* client: update cid if this is the first Initial packet that's being received */
7467
0
                if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT)
7468
0
                    quicly_set_cid(&conn->super.remote.cid_set.cids[0].cid, packet->cid.src);
7469
0
            } else {
7470
                /* server: ignore packets that are too small */
7471
0
                if (packet->datagram_size < QUICLY_MIN_CLIENT_INITIAL_SIZE) {
7472
0
                    ret = QUICLY_ERROR_PACKET_IGNORED;
7473
0
                    goto Exit;
7474
0
                }
7475
0
            }
7476
0
            aead.cb = aead_decrypt_fixed_key;
7477
0
            aead.ctx = conn->initial->cipher.ingress.aead;
7478
0
            space = (void *)&conn->initial;
7479
0
            epoch = QUICLY_EPOCH_INITIAL;
7480
0
            break;
7481
0
        case QUICLY_PACKET_TYPE_HANDSHAKE:
7482
0
            if (conn->handshake == NULL || (header_protection = conn->handshake->cipher.ingress.header_protection) == NULL) {
7483
0
                if (!(conn->application != NULL && conn->application->cipher.ingress.header_protection.one_rtt != NULL))
7484
0
                    *might_be_reorder = 1;
7485
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7486
0
                goto Exit;
7487
0
            }
7488
0
            aead.cb = aead_decrypt_fixed_key;
7489
0
            aead.ctx = conn->handshake->cipher.ingress.aead;
7490
0
            space = (void *)&conn->handshake;
7491
0
            epoch = QUICLY_EPOCH_HANDSHAKE;
7492
0
            break;
7493
0
        case QUICLY_PACKET_TYPE_0RTT:
7494
0
            if (quicly_is_client(conn)) {
7495
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7496
0
                goto Exit;
7497
0
            }
7498
0
            if (conn->application == NULL ||
7499
0
                (header_protection = conn->application->cipher.ingress.header_protection.zero_rtt) == NULL) {
7500
0
                if (!(conn->application != NULL && conn->application->cipher.ingress.header_protection.one_rtt != NULL))
7501
0
                    *might_be_reorder = 1;
7502
0
                ret = QUICLY_ERROR_PACKET_IGNORED;
7503
0
                goto Exit;
7504
0
            }
7505
0
            aead.cb = aead_decrypt_fixed_key;
7506
0
            aead.ctx = conn->application->cipher.ingress.aead[1];
7507
0
            space = (void *)&conn->application;
7508
0
            epoch = QUICLY_EPOCH_0RTT;
7509
0
            break;
7510
0
        default:
7511
0
            ret = QUICLY_ERROR_PACKET_IGNORED;
7512
0
            goto Exit;
7513
0
        }
7514
0
    } else {
7515
        /* short header packet */
7516
0
        if (conn->application == NULL ||
7517
0
            (header_protection = conn->application->cipher.ingress.header_protection.one_rtt) == NULL) {
7518
0
            *might_be_reorder = 1;
7519
0
            ret = QUICLY_ERROR_PACKET_IGNORED;
7520
0
            goto Exit;
7521
0
        }
7522
0
        aead.cb = aead_decrypt_1rtt;
7523
0
        aead.ctx = conn;
7524
0
        space = (void *)&conn->application;
7525
0
        epoch = QUICLY_EPOCH_1RTT;
7526
0
    }
7527
7528
    /* decrypt */
7529
0
    if ((ret = decrypt_packet(header_protection, aead.cb, aead.ctx, &(*space)->next_expected_packet_number, packet, &pn,
7530
0
                              &payload)) != 0) {
7531
0
        ++conn->super.stats.num_packets.decryption_failed;
7532
0
        QUICLY_PROBE(PACKET_DECRYPTION_FAILED, conn, conn->stash.now, pn);
7533
0
        goto Exit;
7534
0
    }
7535
7536
0
    QUICLY_PROBE(PACKET_RECEIVED, conn, conn->stash.now, pn, payload.base, payload.len, get_epoch(packet->octets.base[0]));
7537
0
    QUICLY_LOG_CONN(packet_received, conn, {
7538
0
        PTLS_LOG_ELEMENT_UNSIGNED(pn, pn);
7539
0
        PTLS_LOG_ELEMENT_UNSIGNED(decrypted_len, payload.len);
7540
0
        PTLS_LOG_ELEMENT_UNSIGNED(packet_type, get_epoch(packet->octets.base[0]));
7541
0
    });
7542
7543
    /* open a new path if necessary, now that decryption succeeded */
7544
0
    if (path_index == PTLS_ELEMENTSOF(conn->paths) && (ret = open_path(conn, &path_index, src_addr, dest_addr)) != 0)
7545
0
        goto Exit;
7546
7547
    /* update states */
7548
0
    if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT)
7549
0
        conn->super.state = QUICLY_STATE_CONNECTED;
7550
0
    conn->super.stats.num_packets.received += 1;
7551
0
    conn->paths[path_index]->packet_last_received = conn->super.stats.num_packets.received;
7552
0
    conn->paths[path_index]->num_packets.received += 1;
7553
0
    if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) {
7554
0
        switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) {
7555
0
        case QUICLY_PACKET_TYPE_INITIAL:
7556
0
            conn->super.stats.num_packets.initial_received += 1;
7557
0
            break;
7558
0
        case QUICLY_PACKET_TYPE_0RTT:
7559
0
            conn->super.stats.num_packets.zero_rtt_received += 1;
7560
0
            break;
7561
0
        case QUICLY_PACKET_TYPE_HANDSHAKE:
7562
0
            conn->super.stats.num_packets.handshake_received += 1;
7563
0
            break;
7564
0
        }
7565
0
    }
7566
0
    if (packet->ecn != 0)
7567
0
        conn->super.stats.num_packets.received_ecn_counts[get_ecn_index_from_bits(packet->ecn)] += 1;
7568
7569
    /* state updates, that are triggered by the receipt of a packet */
7570
0
    switch (epoch) {
7571
0
    case QUICLY_EPOCH_INITIAL:
7572
        /* update max_ingress_udp_payload_size if necessary */
7573
0
        if (conn->initial->largest_ingress_udp_payload_size < packet->datagram_size)
7574
0
            conn->initial->largest_ingress_udp_payload_size = packet->datagram_size;
7575
0
        break;
7576
0
    case QUICLY_EPOCH_HANDSHAKE:
7577
        /* Discard Initial space before processing the payload of the Handshake packet to avoid the chance of an ACK frame included
7578
         * in the Handshake packet setting a loss timer for the Initial packet. */
7579
0
        if (conn->initial != NULL) {
7580
0
            if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0)
7581
0
                goto Exit;
7582
0
            setup_next_send(conn);
7583
0
            conn->super.remote.address_validation.validated = 1;
7584
0
        }
7585
0
        break;
7586
0
    default:
7587
0
        break;
7588
0
    }
7589
7590
    /* handle the payload */
7591
0
    if ((ret = handle_payload(conn, epoch, path_index, payload.base, payload.len, &offending_frame_type, &is_ack_only,
7592
0
                              &is_probe_only)) != 0)
7593
0
        goto Exit;
7594
0
    if (!is_probe_only && conn->paths[path_index]->probe_only) {
7595
0
        assert(path_index != 0);
7596
0
        conn->paths[path_index]->probe_only = 0;
7597
0
        ++conn->super.stats.num_paths.migration_elicited;
7598
0
        QUICLY_PROBE(ELICIT_PATH_MIGRATION, conn, conn->stash.now, path_index);
7599
0
        QUICLY_LOG_CONN(elicit_path_migration, conn, { PTLS_LOG_ELEMENT_UNSIGNED(path_index, path_index); });
7600
0
    }
7601
0
    if (*space != NULL && conn->super.state < QUICLY_STATE_CLOSING) {
7602
0
        if ((ret = record_receipt(*space, pn, packet->ecn, is_ack_only, conn->stash.now - (receive_delay >= 0 ? receive_delay : 0),
7603
0
                                  &conn->egress.send_ack_at, &conn->super.stats.num_packets.received_out_of_order)) != 0)
7604
0
            goto Exit;
7605
0
    }
7606
7607
    /* state updates post payload processing */
7608
0
    switch (epoch) {
7609
0
    case QUICLY_EPOCH_INITIAL:
7610
0
        assert(conn->initial != NULL);
7611
0
        if (quicly_is_client(conn) && conn->handshake != NULL && conn->handshake->cipher.egress.aead != NULL) {
7612
0
            if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0)
7613
0
                goto Exit;
7614
0
            setup_next_send(conn);
7615
0
        }
7616
0
        break;
7617
0
    case QUICLY_EPOCH_HANDSHAKE:
7618
0
        if (quicly_is_client(conn)) {
7619
            /* Running as a client.
7620
             * Respect "disable_migration" TP sent by the remote peer at the end of the TLS handshake. */
7621
0
            if (conn->paths[0]->address.local.sa.sa_family == AF_UNSPEC && dest_addr != NULL && dest_addr->sa_family != AF_UNSPEC &&
7622
0
                ptls_handshake_is_complete(conn->crypto.tls) && conn->super.remote.transport_params.disable_active_migration)
7623
0
                set_address(&conn->paths[0]->address.local, dest_addr);
7624
0
        } else {
7625
            /* Running as a server.
7626
             * If handshake was just completed, drop handshake context, schedule the first emission of HANDSHAKE_DONE frame. */
7627
0
            if (ptls_handshake_is_complete(conn->crypto.tls)) {
7628
0
                if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
7629
0
                    goto Exit;
7630
0
                assert(conn->handshake == NULL);
7631
0
                conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
7632
0
                setup_next_send(conn);
7633
0
            }
7634
0
        }
7635
0
        break;
7636
0
    case QUICLY_EPOCH_1RTT:
7637
0
        if (!is_ack_only && should_send_max_data(conn))
7638
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
7639
        /* switch active path to current path, if current path is validated and not probe-only */
7640
0
        if (path_index != 0 && conn->paths[path_index]->path_challenge.send_at == INT64_MAX &&
7641
0
            !conn->paths[path_index]->probe_only) {
7642
0
            if ((ret = promote_path(conn, path_index)) != 0)
7643
0
                goto Exit;
7644
0
            recalc_send_probe_at(conn);
7645
0
        }
7646
0
        break;
7647
0
    default:
7648
0
        break;
7649
0
    }
7650
7651
0
    update_idle_timeout(conn, 1);
7652
7653
0
Exit:
7654
0
    switch (ret) {
7655
0
    case 0:
7656
        /* Avoid time in the past being emitted by quicly_get_first_timeout. We hit the condition below when retransmission is
7657
         * suspended by the 3x limit (in which case we have loss.alarm_at set but return INT64_MAX from quicly_get_first_timeout
7658
         * until we receive something from the client).
7659
         */
7660
0
        if (conn->egress.loss.alarm_at < conn->stash.now)
7661
0
            conn->egress.loss.alarm_at = conn->stash.now;
7662
0
        assert_consistency(conn, 0);
7663
0
        break;
7664
0
    case PTLS_ERROR_NO_MEMORY:
7665
0
    case QUICLY_ERROR_STATE_EXHAUSTION:
7666
0
    case QUICLY_ERROR_PACKET_IGNORED:
7667
0
        break;
7668
0
    default: /* close connection */
7669
0
        initiate_close(conn, ret, offending_frame_type, "");
7670
0
        ret = 0;
7671
0
        break;
7672
0
    }
7673
0
    return ret;
7674
0
}
7675
7676
quicly_error_t quicly_receive(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr,
7677
                              quicly_decoded_packet_t *packet)
7678
0
{
7679
0
    lock_now(conn, 0);
7680
7681
0
    int might_be_reorder;
7682
0
    quicly_error_t ret = do_receive(conn, dest_addr, src_addr, packet, -1, &might_be_reorder);
7683
7684
0
    if (might_be_reorder) {
7685
7686
0
        if (conn->delayed_packets.num_packets < QUICLY_MAX_DELAYED_PACKETS &&
7687
0
            compare_socket_address(&conn->paths[0]->address.remote.sa, src_addr) == 0) {
7688
            /* instantiate the delayed packet */
7689
0
            struct st_quicly_delayed_packet_t *delayed;
7690
0
            if ((delayed = malloc(offsetof(struct st_quicly_delayed_packet_t, bytes) + packet->octets.len)) == NULL) {
7691
0
                ret = PTLS_ERROR_NO_MEMORY;
7692
0
                goto Exit;
7693
0
            }
7694
0
            delayed->next = NULL;
7695
0
            delayed->at = conn->stash.now;
7696
0
            delayed->packet = *packet;
7697
0
            memcpy(delayed->bytes, packet->octets.base, packet->octets.len);
7698
0
            adjust_pointers_of_decoded_packet(&delayed->packet, delayed->bytes);
7699
            /* attach */
7700
0
            size_t slot;
7701
0
            if ((delayed->packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_0RTT) {
7702
0
                slot = &conn->delayed_packets.zero_rtt - conn->delayed_packets.as_array;
7703
0
            } else if ((delayed->packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_HANDSHAKE) {
7704
0
                slot = &conn->delayed_packets.handshake - conn->delayed_packets.as_array;
7705
0
            } else {
7706
0
                assert(!QUICLY_PACKET_IS_LONG_HEADER(delayed->packet.octets.base[0]));
7707
0
                slot = &conn->delayed_packets.one_rtt - conn->delayed_packets.as_array;
7708
0
            }
7709
0
            *conn->delayed_packets.as_array[slot].tail = delayed;
7710
0
            conn->delayed_packets.as_array[slot].tail = &delayed->next;
7711
0
            ++conn->delayed_packets.num_packets;
7712
0
            if (conn->super.stats.num_packets.max_delayed < conn->delayed_packets.num_packets)
7713
0
                conn->super.stats.num_packets.max_delayed = conn->delayed_packets.num_packets;
7714
0
        }
7715
7716
0
    } else if (ret == 0) { /* if state has advanced, process delayed slots that have become processible */
7717
7718
0
        for (size_t slot = 0; conn->delayed_packets.slots_newly_processible != 0; ++slot) {
7719
0
            if ((conn->delayed_packets.slots_newly_processible & (1 << slot)) == 0)
7720
0
                continue;
7721
0
            conn->delayed_packets.slots_newly_processible ^= 1 << slot;
7722
7723
            /* processes each delayed packet */
7724
0
            struct st_quicly_delayed_packet_t *delayed;
7725
0
            while ((delayed = conn->delayed_packets.as_array[slot].head) != NULL) {
7726
                /* detach */
7727
0
                if ((conn->delayed_packets.as_array[slot].head = delayed->next) == NULL)
7728
0
                    conn->delayed_packets.as_array[slot].tail = &conn->delayed_packets.as_array[slot].head;
7729
0
                --conn->delayed_packets.num_packets;
7730
                /* process the packet and free */
7731
0
                int might_be_reorder;
7732
0
                ret = do_receive(conn, NULL, &conn->paths[0]->address.remote.sa, &delayed->packet, conn->stash.now - delayed->at,
7733
0
                                 &might_be_reorder);
7734
0
                free(delayed);
7735
0
                switch (ret) {
7736
0
                case 0:
7737
0
                    conn->super.stats.num_packets.delayed_used += 1;
7738
0
                    break;
7739
0
                case QUICLY_ERROR_PACKET_IGNORED:
7740
0
                case QUICLY_ERROR_DECRYPTION_FAILED:
7741
0
                    break;
7742
0
                default: /* bail out if a fatal error has been raised */
7743
0
                    goto Exit;
7744
0
                }
7745
0
            }
7746
0
        }
7747
0
    }
7748
7749
0
Exit:
7750
0
    unlock_now(conn);
7751
0
    return ret;
7752
0
}
7753
7754
quicly_error_t quicly_open_stream(quicly_conn_t *conn, quicly_stream_t **_stream, int uni)
7755
0
{
7756
0
    quicly_stream_t *stream;
7757
0
    struct st_quicly_conn_streamgroup_state_t *group;
7758
0
    uint64_t *max_stream_count;
7759
0
    uint32_t max_stream_data_local;
7760
0
    uint64_t max_stream_data_remote;
7761
0
    quicly_error_t ret;
7762
7763
    /* determine the states */
7764
0
    if (uni) {
7765
0
        group = &conn->super.local.uni;
7766
0
        max_stream_count = &conn->egress.max_streams.uni.count;
7767
0
        max_stream_data_local = 0;
7768
0
        max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.uni;
7769
0
    } else {
7770
0
        group = &conn->super.local.bidi;
7771
0
        max_stream_count = &conn->egress.max_streams.bidi.count;
7772
0
        max_stream_data_local = (uint32_t)conn->super.ctx->transport_params.max_stream_data.bidi_local;
7773
0
        max_stream_data_remote = conn->super.remote.transport_params.max_stream_data.bidi_remote;
7774
0
    }
7775
7776
    /* open */
7777
0
    if ((stream = open_stream(conn, group->next_stream_id, max_stream_data_local, max_stream_data_remote)) == NULL)
7778
0
        return PTLS_ERROR_NO_MEMORY;
7779
0
    ++group->num_streams;
7780
0
    group->next_stream_id += 4;
7781
7782
    /* adjust blocked */
7783
0
    if (stream->stream_id / 4 >= *max_stream_count) {
7784
0
        stream->streams_blocked = 1;
7785
0
        quicly_linklist_insert((uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi)->prev,
7786
0
                               &stream->_send_aux.pending_link.control);
7787
        /* schedule the emission of STREAMS_BLOCKED if application write key is available (otherwise the scheduling is done when
7788
         * the key becomes available) */
7789
0
        if (stream->conn->application != NULL && stream->conn->application->cipher.egress.key.aead != NULL)
7790
0
            conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;
7791
0
    }
7792
7793
    /* application-layer initialization */
7794
0
    QUICLY_PROBE(STREAM_ON_OPEN, conn, conn->stash.now, stream);
7795
0
    QUICLY_LOG_CONN(stream_on_open, conn, {});
7796
7797
0
    if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, stream)) != 0)
7798
0
        return ret;
7799
7800
0
    *_stream = stream;
7801
0
    return 0;
7802
0
}
7803
7804
void quicly_reset_stream(quicly_stream_t *stream, quicly_error_t err)
7805
{
7806
    assert(quicly_stream_has_send_side(quicly_is_client(stream->conn), stream->stream_id));
7807
    assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err));
7808
    assert(stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE);
7809
    assert(!quicly_sendstate_transfer_complete(&stream->sendstate));
7810
7811
    /* dispose sendbuf state */
7812
    quicly_sendstate_reset(&stream->sendstate);
7813
7814
    /* setup RESET_STREAM */
7815
    stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_SEND;
7816
    stream->_send_aux.reset_stream.error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
7817
7818
    /* schedule for delivery */
7819
    sched_stream_control(stream);
7820
    resched_stream_data(stream);
7821
}
7822
7823
void quicly_request_stop(quicly_stream_t *stream, quicly_error_t err)
7824
{
7825
    assert(quicly_stream_has_receive_side(quicly_is_client(stream->conn), stream->stream_id));
7826
    assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err));
7827
7828
    /* send STOP_SENDING if the incoming side of the stream is still open */
7829
    if (stream->recvstate.eos == UINT64_MAX && stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_NONE) {
7830
        stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_SEND;
7831
        stream->_send_aux.stop_sending.error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
7832
        sched_stream_control(stream);
7833
    }
7834
}
7835
7836
socklen_t quicly_get_socklen(struct sockaddr *sa)
7837
604
{
7838
604
    switch (sa->sa_family) {
7839
604
    case AF_INET:
7840
604
        return sizeof(struct sockaddr_in);
7841
0
    case AF_INET6:
7842
0
        return sizeof(struct sockaddr_in6);
7843
0
    default:
7844
0
        assert(!"unexpected socket type");
7845
0
        return 0;
7846
604
    }
7847
604
}
7848
7849
char *quicly_escape_unsafe_string(char *buf, const void *bytes, size_t len)
7850
0
{
7851
0
    char *dst = buf;
7852
0
    const char *src = bytes, *end = src + len;
7853
7854
0
    for (; src != end; ++src) {
7855
0
        if ((0x20 <= *src && *src <= 0x7e) && !(*src == '"' || *src == '\'' || *src == '\\')) {
7856
0
            *dst++ = *src;
7857
0
        } else {
7858
0
            *dst++ = '\\';
7859
0
            *dst++ = 'x';
7860
0
            quicly_byte_to_hex(dst, (uint8_t)*src);
7861
0
            dst += 2;
7862
0
        }
7863
0
    }
7864
0
    *dst = '\0';
7865
7866
0
    return buf;
7867
0
}
7868
7869
char *quicly_hexdump(const uint8_t *bytes, size_t len, size_t indent)
7870
0
{
7871
0
    size_t i, line, row, bufsize = indent == SIZE_MAX ? len * 2 + 1 : (indent + 5 + 3 * 16 + 2 + 16 + 1) * ((len + 15) / 16) + 1;
7872
0
    char *buf, *p;
7873
7874
0
    if ((buf = malloc(bufsize)) == NULL)
7875
0
        return NULL;
7876
0
    p = buf;
7877
0
    if (indent == SIZE_MAX) {
7878
0
        for (i = 0; i != len; ++i) {
7879
0
            quicly_byte_to_hex(p, bytes[i]);
7880
0
            p += 2;
7881
0
        }
7882
0
    } else {
7883
0
        for (line = 0; line * 16 < len; ++line) {
7884
0
            for (i = 0; i < indent; ++i)
7885
0
                *p++ = ' ';
7886
0
            quicly_byte_to_hex(p, (line >> 4) & 0xff);
7887
0
            p += 2;
7888
0
            quicly_byte_to_hex(p, (line << 4) & 0xff);
7889
0
            p += 2;
7890
0
            *p++ = ' ';
7891
0
            for (row = 0; row < 16; ++row) {
7892
0
                *p++ = row == 8 ? '-' : ' ';
7893
0
                if (line * 16 + row < len) {
7894
0
                    quicly_byte_to_hex(p, bytes[line * 16 + row]);
7895
0
                    p += 2;
7896
0
                } else {
7897
0
                    *p++ = ' ';
7898
0
                    *p++ = ' ';
7899
0
                }
7900
0
            }
7901
0
            *p++ = ' ';
7902
0
            *p++ = ' ';
7903
0
            for (row = 0; row < 16; ++row) {
7904
0
                if (line * 16 + row < len) {
7905
0
                    int ch = bytes[line * 16 + row];
7906
0
                    *p++ = 0x20 <= ch && ch < 0x7f ? ch : '.';
7907
0
                } else {
7908
0
                    *p++ = ' ';
7909
0
                }
7910
0
            }
7911
0
            *p++ = '\n';
7912
0
        }
7913
0
    }
7914
0
    *p++ = '\0';
7915
7916
0
    assert(p - buf <= bufsize);
7917
7918
0
    return buf;
7919
0
}
7920
7921
void quicly_amend_ptls_context(ptls_context_t *ptls)
7922
0
{
7923
0
    static ptls_update_traffic_key_t update_traffic_key = {update_traffic_key_cb};
7924
7925
0
    ptls->omit_end_of_early_data = 1;
7926
0
    ptls->update_traffic_key = &update_traffic_key;
7927
7928
    /* if TLS 1.3 config permits use of early data, convert the value to 0xffffffff in accordance with QUIC-TLS */
7929
0
    if (ptls->max_early_data_size != 0)
7930
0
        ptls->max_early_data_size = UINT32_MAX;
7931
0
}
7932
7933
quicly_error_t quicly_encrypt_address_token(void (*random_bytes)(void *, size_t), ptls_aead_context_t *aead, ptls_buffer_t *buf,
7934
                                            size_t start_off, const quicly_address_token_plaintext_t *plaintext)
7935
0
{
7936
0
    quicly_error_t ret;
7937
7938
    /* type and IV */
7939
0
    if ((ret = ptls_buffer_reserve(buf, 1 + aead->algo->iv_size)) != 0)
7940
0
        goto Exit;
7941
0
    buf->base[buf->off++] = plaintext->type;
7942
0
    random_bytes(buf->base + buf->off, aead->algo->iv_size);
7943
0
    buf->off += aead->algo->iv_size;
7944
7945
0
    size_t enc_start = buf->off;
7946
7947
    /* data */
7948
0
    ptls_buffer_push64(buf, plaintext->issued_at);
7949
0
    {
7950
0
        uint16_t port;
7951
0
        ptls_buffer_push_block(buf, 1, {
7952
0
            switch (plaintext->remote.sa.sa_family) {
7953
0
            case AF_INET:
7954
0
                ptls_buffer_pushv(buf, &plaintext->remote.sin.sin_addr.s_addr, 4);
7955
0
                port = ntohs(plaintext->remote.sin.sin_port);
7956
0
                break;
7957
0
            case AF_INET6:
7958
0
                ptls_buffer_pushv(buf, &plaintext->remote.sin6.sin6_addr, 16);
7959
0
                ptls_buffer_push32(buf, plaintext->remote.sin6.sin6_scope_id);
7960
0
                port = ntohs(plaintext->remote.sin6.sin6_port);
7961
0
                break;
7962
0
            default:
7963
0
                assert(!"unsupported address type");
7964
0
                break;
7965
0
            }
7966
0
        });
7967
0
        ptls_buffer_push16(buf, port);
7968
0
    }
7969
0
    switch (plaintext->type) {
7970
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
7971
0
        ptls_buffer_push_block(buf, 1,
7972
0
                               { ptls_buffer_pushv(buf, plaintext->retry.original_dcid.cid, plaintext->retry.original_dcid.len); });
7973
0
        ptls_buffer_push_block(buf, 1,
7974
0
                               { ptls_buffer_pushv(buf, plaintext->retry.client_cid.cid, plaintext->retry.client_cid.len); });
7975
0
        ptls_buffer_push_block(buf, 1,
7976
0
                               { ptls_buffer_pushv(buf, plaintext->retry.server_cid.cid, plaintext->retry.server_cid.len); });
7977
0
        break;
7978
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
7979
0
        ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->resumption.bytes, plaintext->resumption.len); });
7980
0
        break;
7981
0
    default:
7982
0
        assert(!"unexpected token type");
7983
0
        abort();
7984
0
    }
7985
0
    ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->appdata.bytes, plaintext->appdata.len); });
7986
7987
    /* encrypt, supplying full IV */
7988
0
    if ((ret = ptls_buffer_reserve(buf, aead->algo->tag_size)) != 0)
7989
0
        goto Exit;
7990
0
    ptls_aead_set_iv(aead, buf->base + enc_start - aead->algo->iv_size);
7991
0
    ptls_aead_encrypt(aead, buf->base + enc_start, buf->base + enc_start, buf->off - enc_start, 0, buf->base + start_off,
7992
0
                      enc_start - start_off);
7993
0
    buf->off += aead->algo->tag_size;
7994
7995
0
Exit:
7996
0
    return ret;
7997
0
}
7998
7999
quicly_error_t quicly_decrypt_address_token(ptls_aead_context_t *aead, quicly_address_token_plaintext_t *plaintext,
8000
                                            const void *_token, size_t len, size_t prefix_len, const char **err_desc)
8001
0
{
8002
0
    const uint8_t *const token = _token;
8003
0
    uint8_t ptbuf[QUICLY_MIN_CLIENT_INITIAL_SIZE];
8004
0
    size_t ptlen;
8005
8006
0
    *err_desc = NULL;
8007
8008
    /* check if we can get type and decrypt */
8009
0
    if (len < prefix_len + 1 + aead->algo->iv_size + aead->algo->tag_size) {
8010
0
        *err_desc = "token too small";
8011
0
        return PTLS_ALERT_DECODE_ERROR;
8012
0
    }
8013
0
    if (prefix_len + 1 + aead->algo->iv_size + sizeof(ptbuf) + aead->algo->tag_size < len) {
8014
0
        *err_desc = "token too large";
8015
0
        return PTLS_ALERT_DECODE_ERROR;
8016
0
    }
8017
8018
    /* check type */
8019
0
    switch (token[prefix_len]) {
8020
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
8021
0
        plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RETRY;
8022
0
        break;
8023
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
8024
0
        plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION;
8025
0
        break;
8026
0
    default:
8027
0
        *err_desc = "unknown token type";
8028
0
        return PTLS_ALERT_DECODE_ERROR;
8029
0
    }
8030
8031
    /* `goto Exit` can only happen below this line, and that is guaranteed by declaring `ret` here */
8032
0
    quicly_error_t ret;
8033
8034
    /* decrypt */
8035
0
    ptls_aead_set_iv(aead, token + prefix_len + 1);
8036
0
    if ((ptlen = ptls_aead_decrypt(aead, ptbuf, token + prefix_len + 1 + aead->algo->iv_size,
8037
0
                                   len - (prefix_len + 1 + aead->algo->iv_size), 0, token, prefix_len + 1 + aead->algo->iv_size)) ==
8038
0
        SIZE_MAX) {
8039
0
        ret = PTLS_ALERT_DECRYPT_ERROR;
8040
0
        *err_desc = "token decryption failure";
8041
0
        goto Exit;
8042
0
    }
8043
8044
    /* parse */
8045
0
    const uint8_t *src = ptbuf, *end = src + ptlen;
8046
0
    if ((ret = ptls_decode64(&plaintext->issued_at, &src, end)) != 0)
8047
0
        goto Exit;
8048
0
    {
8049
0
        in_port_t *portaddr;
8050
0
        ptls_decode_open_block(src, end, 1, {
8051
0
            switch (end - src) {
8052
0
            case 4: /* ipv4 */
8053
0
                plaintext->remote.sin.sin_family = AF_INET;
8054
0
                memcpy(&plaintext->remote.sin.sin_addr.s_addr, src, 4);
8055
0
                portaddr = &plaintext->remote.sin.sin_port;
8056
0
                break;
8057
0
            case 20: /* ipv6 */
8058
0
                plaintext->remote.sin6 = (struct sockaddr_in6){.sin6_family = AF_INET6};
8059
0
                memcpy(&plaintext->remote.sin6.sin6_addr, src, 16);
8060
0
                if ((ret = ptls_decode32(&plaintext->remote.sin6.sin6_scope_id, &src, end)) != 0)
8061
0
                    goto Exit;
8062
0
                portaddr = &plaintext->remote.sin6.sin6_port;
8063
0
                break;
8064
0
            default:
8065
0
                ret = PTLS_ALERT_DECODE_ERROR;
8066
0
                goto Exit;
8067
0
            }
8068
0
            src = end;
8069
0
        });
8070
0
        uint16_t port;
8071
0
        if ((ret = ptls_decode16(&port, &src, end)) != 0)
8072
0
            goto Exit;
8073
0
        *portaddr = htons(port);
8074
0
    }
8075
0
    switch (plaintext->type) {
8076
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
8077
0
#define DECODE_CID(field)                                                                                                          \
8078
0
    do {                                                                                                                           \
8079
0
        ptls_decode_open_block(src, end, 1, {                                                                                      \
8080
0
            if (end - src > sizeof(plaintext->retry.field.cid)) {                                                                  \
8081
0
                ret = PTLS_ALERT_DECODE_ERROR;                                                                                     \
8082
0
                goto Exit;                                                                                                         \
8083
0
            }                                                                                                                      \
8084
0
            quicly_set_cid(&plaintext->retry.field, ptls_iovec_init(src, end - src));                                              \
8085
0
            src = end;                                                                                                             \
8086
0
        });                                                                                                                        \
8087
0
    } while (0)
8088
0
        DECODE_CID(original_dcid);
8089
0
        DECODE_CID(client_cid);
8090
0
        DECODE_CID(server_cid);
8091
0
#undef DECODE_CID
8092
0
        break;
8093
0
    case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
8094
0
        ptls_decode_open_block(src, end, 1, {
8095
0
            PTLS_BUILD_ASSERT(sizeof(plaintext->resumption.bytes) >= 256);
8096
0
            plaintext->resumption.len = end - src;
8097
0
            memcpy(plaintext->resumption.bytes, src, plaintext->resumption.len);
8098
0
            src = end;
8099
0
        });
8100
0
        break;
8101
0
    default:
8102
0
        assert(!"unexpected token type");
8103
0
        abort();
8104
0
    }
8105
0
    ptls_decode_block(src, end, 1, {
8106
0
        PTLS_BUILD_ASSERT(sizeof(plaintext->appdata.bytes) >= 256);
8107
0
        plaintext->appdata.len = end - src;
8108
0
        memcpy(plaintext->appdata.bytes, src, plaintext->appdata.len);
8109
0
        src = end;
8110
0
    });
8111
0
    ret = 0;
8112
8113
0
Exit:
8114
0
    if (ret != 0) {
8115
0
        if (*err_desc == NULL)
8116
0
            *err_desc = "token decode error";
8117
        /* promote the error to one that triggers the emission of INVALID_TOKEN_ERROR, if the token looked like a retry */
8118
0
        if (plaintext->type == QUICLY_ADDRESS_TOKEN_TYPE_RETRY)
8119
0
            ret = QUICLY_TRANSPORT_ERROR_INVALID_TOKEN;
8120
0
    }
8121
0
    return ret;
8122
0
}
8123
8124
int quicly_build_session_ticket_auth_data(ptls_buffer_t *auth_data, const quicly_context_t *ctx)
8125
0
{
8126
0
    int ret;
8127
8128
0
#define PUSH_TP(id, block)                                                                                                         \
8129
0
    do {                                                                                                                           \
8130
0
        ptls_buffer_push_quicint(auth_data, id);                                                                                   \
8131
0
        ptls_buffer_push_block(auth_data, -1, block);                                                                              \
8132
0
    } while (0)
8133
8134
0
    ptls_buffer_push_block(auth_data, -1, {
8135
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_ACTIVE_CONNECTION_ID_LIMIT,
8136
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.active_connection_id_limit); });
8137
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA,
8138
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_data); });
8139
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
8140
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.bidi_local); });
8141
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
8142
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.bidi_remote); });
8143
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI,
8144
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_stream_data.uni); });
8145
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI,
8146
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_streams_bidi); });
8147
0
        PUSH_TP(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI,
8148
0
                { ptls_buffer_push_quicint(auth_data, ctx->transport_params.max_streams_uni); });
8149
0
    });
8150
8151
0
#undef PUSH_TP
8152
8153
0
    ret = 0;
8154
0
Exit:
8155
0
    return ret;
8156
0
}
8157
8158
void quicly_stream_noop_on_destroy(quicly_stream_t *stream, quicly_error_t err)
8159
0
{
8160
0
}
8161
8162
void quicly_stream_noop_on_send_shift(quicly_stream_t *stream, size_t delta)
8163
0
{
8164
0
}
8165
8166
void quicly_stream_noop_on_send_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all)
8167
0
{
8168
0
}
8169
8170
void quicly_stream_noop_on_send_stop(quicly_stream_t *stream, quicly_error_t err)
8171
0
{
8172
0
}
8173
8174
void quicly_stream_noop_on_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
8175
0
{
8176
0
}
8177
8178
void quicly_stream_noop_on_receive_reset(quicly_stream_t *stream, quicly_error_t err)
8179
0
{
8180
0
}
8181
8182
const quicly_stream_callbacks_t quicly_stream_noop_callbacks = {
8183
    quicly_stream_noop_on_destroy,   quicly_stream_noop_on_send_shift, quicly_stream_noop_on_send_emit,
8184
    quicly_stream_noop_on_send_stop, quicly_stream_noop_on_receive,    quicly_stream_noop_on_receive_reset};
8185
8186
void quicly__debug_printf(quicly_conn_t *conn, const char *function, int line, const char *fmt, ...)
8187
0
{
8188
0
    PTLS_LOG_DEFINE_POINT(quicly, debug_message, debug_message_logpoint);
8189
0
    if (QUICLY_PROBE_ENABLED(DEBUG_MESSAGE) ||
8190
0
        (ptls_log_point_maybe_active(&debug_message_logpoint) &
8191
0
         ptls_log_conn_maybe_active(ptls_get_log_state(conn->crypto.tls), (const char *(*)(void *))ptls_get_server_name,
8192
0
                                    conn->crypto.tls)) != 0) {
8193
0
        char buf[1024];
8194
0
        va_list args;
8195
8196
0
        va_start(args, fmt);
8197
0
        vsnprintf(buf, sizeof(buf), fmt, args);
8198
0
        va_end(args);
8199
8200
0
        QUICLY_PROBE(DEBUG_MESSAGE, conn, function, line, buf);
8201
0
        QUICLY_LOG_CONN(debug_message, conn, {
8202
0
            PTLS_LOG_ELEMENT_UNSAFESTR(function, function, strlen(function));
8203
0
            PTLS_LOG_ELEMENT_SIGNED(line, line);
8204
0
            PTLS_LOG_ELEMENT_UNSAFESTR(message, buf, strlen(buf));
8205
0
        });
8206
0
    }
8207
0
}
8208
8209
const uint32_t quicly_supported_versions[] = {QUICLY_PROTOCOL_VERSION_1, QUICLY_PROTOCOL_VERSION_DRAFT29,
8210
                                              QUICLY_PROTOCOL_VERSION_DRAFT27, 0};