Coverage Report

Created: 2026-04-12 07:08

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl/ssl/quic/quic_txp.c
Line
Count
Source
1
/*
2
 * Copyright 2022-2026 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
#include "internal/quic_txp.h"
11
#include "internal/quic_fifd.h"
12
#include "internal/quic_stream_map.h"
13
#include "internal/quic_error.h"
14
#include "internal/common.h"
15
#include <openssl/err.h>
16
17
0
#define MIN_CRYPTO_HDR_SIZE 3
18
19
0
#define MIN_FRAME_SIZE_HANDSHAKE_DONE 1
20
0
#define MIN_FRAME_SIZE_MAX_DATA 2
21
0
#define MIN_FRAME_SIZE_ACK 5
22
0
#define MIN_FRAME_SIZE_CRYPTO (MIN_CRYPTO_HDR_SIZE + 1)
23
0
#define MIN_FRAME_SIZE_STREAM 3 /* minimum useful size (for non-FIN) */
24
0
#define MIN_FRAME_SIZE_MAX_STREAMS_BIDI 2
25
0
#define MIN_FRAME_SIZE_MAX_STREAMS_UNI 2
26
27
/*
28
 * Packet Archetypes
29
 * =================
30
 */
31
32
/* Generate normal packets containing most frame types, subject to EL. */
33
0
#define TX_PACKETISER_ARCHETYPE_NORMAL 0
34
35
/*
36
 * A probe packet is different in that:
37
 *   - It bypasses CC, but *is* counted as in flight for purposes of CC;
38
 *   - It must be ACK-eliciting.
39
 */
40
0
#define TX_PACKETISER_ARCHETYPE_PROBE 1
41
42
/*
43
 * An ACK-only packet is different in that:
44
 *   - It bypasses CC, and is considered a 'non-inflight' packet;
45
 *   - It may not contain anything other than an ACK frame, not even padding.
46
 */
47
0
#define TX_PACKETISER_ARCHETYPE_ACK_ONLY 2
48
49
0
#define TX_PACKETISER_ARCHETYPE_NUM 3
50
51
struct ossl_quic_tx_packetiser_st {
52
    OSSL_QUIC_TX_PACKETISER_ARGS args;
53
54
    /*
55
     * Opaque initial token blob provided by caller. TXP frees using the
56
     * callback when it is no longer needed.
57
     */
58
    const unsigned char *initial_token;
59
    size_t initial_token_len;
60
    ossl_quic_initial_token_free_fn *initial_token_free_cb;
61
    void *initial_token_free_cb_arg;
62
63
    /* Subcomponents of the TXP that we own. */
64
    QUIC_FIFD fifd; /* QUIC Frame-in-Flight Dispatcher */
65
66
    /* Internal state. */
67
    uint64_t next_pn[QUIC_PN_SPACE_NUM]; /* Next PN to use in given PN space. */
68
    OSSL_TIME last_tx_time; /* Last time a packet was generated, or 0. */
69
70
    size_t unvalidated_credit; /* Limit of data we can send until validated */
71
72
    /* Internal state - frame (re)generation flags. */
73
    unsigned int want_handshake_done : 1;
74
    unsigned int want_max_data : 1;
75
    unsigned int want_max_streams_bidi : 1;
76
    unsigned int want_max_streams_uni : 1;
77
78
    /* Internal state - frame (re)generation flags - per PN space. */
79
    unsigned int want_ack : QUIC_PN_SPACE_NUM;
80
    unsigned int force_ack_eliciting : QUIC_PN_SPACE_NUM;
81
82
    /*
83
     * Internal state - connection close terminal state.
84
     * Once this is set, it is not unset unlike other want_ flags - we keep
85
     * sending it in every packet.
86
     */
87
    unsigned int want_conn_close : 1;
88
89
    /* Has the handshake been completed? */
90
    unsigned int handshake_complete : 1;
91
92
    OSSL_QUIC_FRAME_CONN_CLOSE conn_close_frame;
93
94
    /*
95
     * Counts of the number of bytes received and sent while in the closing
96
     * state.
97
     */
98
    uint64_t closing_bytes_recv;
99
    uint64_t closing_bytes_xmit;
100
101
    /* Internal state - packet assembly. */
102
    struct txp_el {
103
        unsigned char *scratch; /* scratch buffer for packet assembly */
104
        size_t scratch_len; /* number of bytes allocated for scratch */
105
        OSSL_QTX_IOVEC *iovec; /* scratch iovec array for use with QTX */
106
        size_t alloc_iovec; /* size of iovec array */
107
    } el[QUIC_ENC_LEVEL_NUM];
108
109
    /* Message callback related arguments */
110
    ossl_msg_cb msg_callback;
111
    void *msg_callback_arg;
112
    SSL *msg_callback_ssl;
113
114
    /* Callbacks. */
115
    void (*ack_tx_cb)(const OSSL_QUIC_FRAME_ACK *ack,
116
        uint32_t pn_space,
117
        void *arg);
118
    void *ack_tx_cb_arg;
119
};
120
121
/*
122
 * The TX helper records state used while generating frames into packets. It
123
 * enables serialization into the packet to be done "transactionally" where
124
 * serialization of a frame can be rolled back if it fails midway (e.g. if it
125
 * does not fit).
126
 */
127
struct tx_helper {
128
    OSSL_QUIC_TX_PACKETISER *txp;
129
    /*
130
     * The Maximum Packet Payload Length in bytes. This is the amount of
131
     * space we have to generate frames into.
132
     */
133
    size_t max_ppl;
134
    /*
135
     * Number of bytes we have generated so far.
136
     */
137
    size_t bytes_appended;
138
    /*
139
     * Number of scratch bytes in txp->scratch we have used so far. Some iovecs
140
     * will reference this scratch buffer. When we need to use more of it (e.g.
141
     * when we need to put frame headers somewhere), we append to the scratch
142
     * buffer, resizing if necessary, and increase this accordingly.
143
     */
144
    size_t scratch_bytes;
145
    /*
146
     * Bytes reserved in the MaxPPL budget. We keep this number of bytes spare
147
     * until reserve_allowed is set to 1. Currently this is always at most 1, as
148
     * a PING frame takes up one byte and this mechanism is only used to ensure
149
     * we can encode a PING frame if we have been asked to ensure a packet is
150
     * ACK-eliciting and we are unusure if we are going to add any other
151
     * ACK-eliciting frames before we reach our MaxPPL budget.
152
     */
153
    size_t reserve;
154
    /*
155
     * Number of iovecs we have currently appended. This is the number of
156
     * entries valid in txp->iovec.
157
     */
158
    size_t num_iovec;
159
    /* The EL this TX helper is being used for. */
160
    uint32_t enc_level;
161
    /*
162
     * Whether we are allowed to make use of the reserve bytes in our MaxPPL
163
     * budget. This is used to ensure we have room to append a PING frame later
164
     * if we need to. Once we know we will not need to append a PING frame, this
165
     * is set to 1.
166
     */
167
    unsigned int reserve_allowed : 1;
168
    /*
169
     * Set to 1 if we have appended a STREAM frame with an implicit length. If
170
     * this happens we should never append another frame after that frame as it
171
     * cannot be validly encoded. This is just a safety check.
172
     */
173
    unsigned int done_implicit : 1;
174
    struct {
175
        /*
176
         * The fields in this structure are valid if active is set, which means
177
         * that a serialization transaction is currently in progress.
178
         */
179
        unsigned char *data;
180
        WPACKET wpkt;
181
        unsigned int active : 1;
182
    } txn;
183
};
184
185
static void tx_helper_rollback(struct tx_helper *h);
186
static int txp_el_ensure_iovec(struct txp_el *el, size_t num);
187
188
/* Initialises the TX helper. */
189
static int tx_helper_init(struct tx_helper *h, OSSL_QUIC_TX_PACKETISER *txp,
190
    uint32_t enc_level, size_t max_ppl, size_t reserve)
191
0
{
192
0
    if (reserve > max_ppl)
193
0
        return 0;
194
195
0
    h->txp = txp;
196
0
    h->enc_level = enc_level;
197
0
    h->max_ppl = max_ppl;
198
0
    h->reserve = reserve;
199
0
    h->num_iovec = 0;
200
0
    h->bytes_appended = 0;
201
0
    h->scratch_bytes = 0;
202
0
    h->reserve_allowed = 0;
203
0
    h->done_implicit = 0;
204
0
    h->txn.data = NULL;
205
0
    h->txn.active = 0;
206
207
0
    if (max_ppl > h->txp->el[enc_level].scratch_len) {
208
0
        unsigned char *scratch;
209
210
0
        scratch = OPENSSL_realloc(h->txp->el[enc_level].scratch, max_ppl);
211
0
        if (scratch == NULL)
212
0
            return 0;
213
214
0
        h->txp->el[enc_level].scratch = scratch;
215
0
        h->txp->el[enc_level].scratch_len = max_ppl;
216
0
    }
217
218
0
    return 1;
219
0
}
220
221
static void tx_helper_cleanup(struct tx_helper *h)
222
0
{
223
0
    if (h->txn.active)
224
0
        tx_helper_rollback(h);
225
226
0
    h->txp = NULL;
227
0
}
228
229
static void tx_helper_unrestrict(struct tx_helper *h)
230
0
{
231
0
    h->reserve_allowed = 1;
232
0
}
233
234
/*
235
 * Append an extent of memory to the iovec list. The memory must remain
236
 * allocated until we finish generating the packet and call the QTX.
237
 *
238
 * In general, the buffers passed to this function will be from one of two
239
 * ranges:
240
 *
241
 *   - Application data contained in stream buffers managed elsewhere
242
 *     in the QUIC stack; or
243
 *
244
 *   - Control frame data appended into txp->scratch using tx_helper_begin and
245
 *     tx_helper_commit.
246
 *
247
 */
248
static int tx_helper_append_iovec(struct tx_helper *h,
249
    const unsigned char *buf,
250
    size_t buf_len)
251
0
{
252
0
    struct txp_el *el = &h->txp->el[h->enc_level];
253
254
0
    if (buf_len == 0)
255
0
        return 1;
256
257
0
    if (!ossl_assert(!h->done_implicit))
258
0
        return 0;
259
260
0
    if (!txp_el_ensure_iovec(el, h->num_iovec + 1))
261
0
        return 0;
262
263
0
    el->iovec[h->num_iovec].buf = buf;
264
0
    el->iovec[h->num_iovec].buf_len = buf_len;
265
266
0
    ++h->num_iovec;
267
0
    h->bytes_appended += buf_len;
268
0
    return 1;
269
0
}
270
271
/*
272
 * How many more bytes of space do we have left in our plaintext packet payload?
273
 */
274
static size_t tx_helper_get_space_left(struct tx_helper *h)
275
0
{
276
0
    return h->max_ppl
277
0
        - (h->reserve_allowed ? 0 : h->reserve) - h->bytes_appended;
278
0
}
279
280
/*
281
 * Begin a control frame serialization transaction. This allows the
282
 * serialization of the control frame to be backed out if it turns out it won't
283
 * fit. Write the control frame to the returned WPACKET. Ensure you always
284
 * call tx_helper_rollback or tx_helper_commit (or tx_helper_cleanup). Returns
285
 * NULL on failure.
286
 */
287
static WPACKET *tx_helper_begin(struct tx_helper *h)
288
0
{
289
0
    size_t space_left, len;
290
0
    unsigned char *data;
291
0
    struct txp_el *el = &h->txp->el[h->enc_level];
292
293
0
    if (!ossl_assert(!h->txn.active))
294
0
        return NULL;
295
296
0
    if (!ossl_assert(!h->done_implicit))
297
0
        return NULL;
298
299
0
    data = (unsigned char *)el->scratch + h->scratch_bytes;
300
0
    len = el->scratch_len - h->scratch_bytes;
301
302
0
    space_left = tx_helper_get_space_left(h);
303
0
    if (!ossl_assert(space_left <= len))
304
0
        return NULL;
305
306
0
    if (!WPACKET_init_static_len(&h->txn.wpkt, data, len, 0))
307
0
        return NULL;
308
309
0
    if (!WPACKET_set_max_size(&h->txn.wpkt, space_left)) {
310
0
        WPACKET_cleanup(&h->txn.wpkt);
311
0
        return NULL;
312
0
    }
313
314
0
    h->txn.data = data;
315
0
    h->txn.active = 1;
316
0
    return &h->txn.wpkt;
317
0
}
318
319
static void tx_helper_end(struct tx_helper *h, int success)
320
0
{
321
0
    if (success)
322
0
        WPACKET_finish(&h->txn.wpkt);
323
0
    else
324
0
        WPACKET_cleanup(&h->txn.wpkt);
325
326
0
    h->txn.active = 0;
327
0
    h->txn.data = NULL;
328
0
}
329
330
/* Abort a control frame serialization transaction. */
331
static void tx_helper_rollback(struct tx_helper *h)
332
0
{
333
0
    if (!h->txn.active)
334
0
        return;
335
336
0
    tx_helper_end(h, 0);
337
0
}
338
339
/* Commit a control frame. */
340
static int tx_helper_commit(struct tx_helper *h)
341
0
{
342
0
    size_t l = 0;
343
344
0
    if (!h->txn.active)
345
0
        return 0;
346
347
0
    if (!WPACKET_get_total_written(&h->txn.wpkt, &l)) {
348
0
        tx_helper_end(h, 0);
349
0
        return 0;
350
0
    }
351
352
0
    if (!tx_helper_append_iovec(h, h->txn.data, l)) {
353
0
        tx_helper_end(h, 0);
354
0
        return 0;
355
0
    }
356
357
0
    if (h->txp->msg_callback != NULL && l > 0) {
358
0
        uint64_t ftype;
359
0
        int ctype = SSL3_RT_QUIC_FRAME_FULL;
360
0
        PACKET pkt;
361
362
0
        if (!PACKET_buf_init(&pkt, h->txn.data, l)
363
0
            || !ossl_quic_wire_peek_frame_header(&pkt, &ftype, NULL)) {
364
0
            tx_helper_end(h, 0);
365
0
            return 0;
366
0
        }
367
368
0
        if (ftype == OSSL_QUIC_FRAME_TYPE_PADDING)
369
0
            ctype = SSL3_RT_QUIC_FRAME_PADDING;
370
0
        else if (OSSL_QUIC_FRAME_TYPE_IS_STREAM(ftype)
371
0
            || ftype == OSSL_QUIC_FRAME_TYPE_CRYPTO)
372
0
            ctype = SSL3_RT_QUIC_FRAME_HEADER;
373
374
0
        h->txp->msg_callback(1, OSSL_QUIC1_VERSION, ctype, h->txn.data, l,
375
0
            h->txp->msg_callback_ssl,
376
0
            h->txp->msg_callback_arg);
377
0
    }
378
379
0
    h->scratch_bytes += l;
380
0
    tx_helper_end(h, 1);
381
0
    return 1;
382
0
}
383
384
struct archetype_data {
385
    unsigned int allow_ack : 1;
386
    unsigned int allow_ping : 1;
387
    unsigned int allow_crypto : 1;
388
    unsigned int allow_handshake_done : 1;
389
    unsigned int allow_path_challenge : 1;
390
    unsigned int allow_path_response : 1;
391
    unsigned int allow_new_conn_id : 1;
392
    unsigned int allow_retire_conn_id : 1;
393
    unsigned int allow_stream_rel : 1;
394
    unsigned int allow_conn_fc : 1;
395
    unsigned int allow_conn_close : 1;
396
    unsigned int allow_cfq_other : 1;
397
    unsigned int allow_new_token : 1;
398
    unsigned int allow_force_ack_eliciting : 1;
399
    unsigned int allow_padding : 1;
400
    unsigned int require_ack_eliciting : 1;
401
    unsigned int bypass_cc : 1;
402
};
403
404
struct txp_pkt_geom {
405
    size_t cmpl, cmppl, hwm, pkt_overhead;
406
    uint32_t archetype;
407
    struct archetype_data adata;
408
};
409
410
struct txp_pkt {
411
    struct tx_helper h;
412
    int h_valid;
413
    QUIC_TXPIM_PKT *tpkt;
414
    QUIC_STREAM *stream_head;
415
    QUIC_PKT_HDR phdr;
416
    struct txp_pkt_geom geom;
417
    int force_pad;
418
};
419
420
static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
421
    void *arg);
422
static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
423
    QUIC_TXPIM_PKT *pkt, void *arg);
424
static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id,
425
    QUIC_TXPIM_PKT *pkt, void *arg);
426
static void on_sstream_updated(uint64_t stream_id, void *arg);
427
static int sstream_is_pending(QUIC_SSTREAM *sstream);
428
static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp,
429
    uint32_t enc_level,
430
    uint32_t archetype,
431
    uint64_t cc_limit,
432
    uint32_t *conn_close_enc_level);
433
static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp);
434
static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
435
    size_t pl,
436
    uint32_t enc_level,
437
    size_t hdr_len,
438
    size_t *r);
439
static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp);
440
static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp,
441
    struct txp_pkt *pkt,
442
    int chosen_for_conn_close);
443
static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp,
444
    uint32_t enc_level, uint32_t archetype,
445
    size_t running_total);
446
static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp);
447
static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt,
448
    OSSL_QUIC_TX_PACKETISER *txp);
449
static int txp_pkt_append_padding(struct txp_pkt *pkt,
450
    OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes);
451
static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp, struct txp_pkt *pkt,
452
    uint32_t archetype, int *txpim_pkt_reffed);
453
static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp,
454
    uint64_t cc_limit);
455
456
/**
457
 * Sets the validated state of a QUIC TX packetiser.
458
 *
459
 * This function marks the provided QUIC TX packetiser as having its credit
460
 * fully validated by setting its `unvalidated_credit` field to `SIZE_MAX`.
461
 *
462
 * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
463
 */
464
void ossl_quic_tx_packetiser_set_validated(OSSL_QUIC_TX_PACKETISER *txp)
465
0
{
466
0
    txp->unvalidated_credit = SIZE_MAX;
467
0
    return;
468
0
}
469
470
/**
471
 * Adds unvalidated credit to a QUIC TX packetiser.
472
 *
473
 * This function increases the unvalidated credit of the provided QUIC TX
474
 * packetiser. If the current unvalidated credit is not `SIZE_MAX`, the
475
 * function adds three times the specified `credit` value, ensuring it does
476
 * not exceed the maximum allowable value (`SIZE_MAX - 1`). If the addition
477
 * would cause an overflow, the unvalidated credit is capped at
478
 * `SIZE_MAX - 1`. If the current unvalidated credit is already `SIZE_MAX`,
479
 * the function does nothing.
480
 *
481
 * @param txp    A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
482
 * @param credit The amount of credit to add, multiplied by 3.
483
 */
484
void ossl_quic_tx_packetiser_add_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
485
    size_t credit)
486
0
{
487
0
    if (txp->unvalidated_credit != SIZE_MAX) {
488
0
        if ((SIZE_MAX - txp->unvalidated_credit) > (credit * 3))
489
0
            txp->unvalidated_credit += credit * 3;
490
0
        else
491
0
            txp->unvalidated_credit = SIZE_MAX - 1;
492
0
    }
493
494
0
    return;
495
0
}
496
497
/**
498
 * Consumes unvalidated credit from a QUIC TX packetiser.
499
 *
500
 * This function decreases the unvalidated credit of the specified
501
 * QUIC TX packetiser by the given `credit` value. If the unvalidated credit
502
 * is set to `SIZE_MAX`, the function does nothing, as `SIZE_MAX` represents
503
 * an unlimited credit state.
504
 *
505
 * @param txp    A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
506
 * @param credit The amount of credit to consume.
507
 */
508
void ossl_quic_tx_packetiser_consume_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
509
    size_t credit)
510
0
{
511
0
    if (txp->unvalidated_credit != SIZE_MAX) {
512
0
        if (txp->unvalidated_credit < credit)
513
0
            txp->unvalidated_credit = 0;
514
0
        else
515
0
            txp->unvalidated_credit -= credit;
516
0
    }
517
0
}
518
519
/**
520
 * Checks if the QUIC TX packetiser has sufficient unvalidated credit.
521
 *
522
 * This function determines whether the unvalidated credit of the specified
523
 * QUIC TX packetiser exceeds the required credit value (`req_credit`).
524
 * If the unvalidated credit is greater than `req_credit`, the function
525
 * returns 1 (true); otherwise, it returns 0 (false).
526
 *
527
 * @param txp        A pointer to the OSSL_QUIC_TX_PACKETISER structure to check.
528
 * @param req_credit The required credit value to compare against.
529
 *
530
 * @return 1 if the unvalidated credit exceeds `req_credit`, 0 otherwise.
531
 */
532
int ossl_quic_tx_packetiser_check_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
533
    size_t req_credit)
534
0
{
535
0
    return (txp->unvalidated_credit > req_credit);
536
0
}
537
538
OSSL_QUIC_TX_PACKETISER *ossl_quic_tx_packetiser_new(const OSSL_QUIC_TX_PACKETISER_ARGS *args)
539
0
{
540
0
    OSSL_QUIC_TX_PACKETISER *txp;
541
542
0
    if (args == NULL
543
0
        || args->qtx == NULL
544
0
        || args->txpim == NULL
545
0
        || args->cfq == NULL
546
0
        || args->ackm == NULL
547
0
        || args->qsm == NULL
548
0
        || args->conn_txfc == NULL
549
0
        || args->conn_rxfc == NULL
550
0
        || args->max_streams_bidi_rxfc == NULL
551
0
        || args->max_streams_uni_rxfc == NULL
552
0
        || args->protocol_version == 0) {
553
0
        ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
554
0
        return NULL;
555
0
    }
556
557
0
    txp = OPENSSL_zalloc(sizeof(*txp));
558
0
    if (txp == NULL)
559
0
        return NULL;
560
561
0
    txp->args = *args;
562
0
    txp->last_tx_time = ossl_time_zero();
563
564
0
    if (!ossl_quic_fifd_init(&txp->fifd,
565
0
            txp->args.cfq, txp->args.ackm, txp->args.txpim,
566
0
            get_sstream_by_id, txp,
567
0
            on_regen_notify, txp,
568
0
            on_confirm_notify, txp,
569
0
            on_sstream_updated, txp,
570
0
            args->get_qlog_cb,
571
0
            args->get_qlog_cb_arg)) {
572
0
        OPENSSL_free(txp);
573
0
        return NULL;
574
0
    }
575
576
0
    return txp;
577
0
}
578
579
void ossl_quic_tx_packetiser_free(OSSL_QUIC_TX_PACKETISER *txp)
580
0
{
581
0
    uint32_t enc_level;
582
583
0
    if (txp == NULL)
584
0
        return;
585
586
0
    ossl_quic_tx_packetiser_set_initial_token(txp, NULL, 0, NULL, NULL);
587
0
    ossl_quic_fifd_cleanup(&txp->fifd);
588
0
    OPENSSL_free(txp->conn_close_frame.reason);
589
590
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
591
0
        enc_level < QUIC_ENC_LEVEL_NUM;
592
0
        ++enc_level) {
593
0
        OPENSSL_free(txp->el[enc_level].iovec);
594
0
        OPENSSL_free(txp->el[enc_level].scratch);
595
0
    }
596
597
0
    OPENSSL_free(txp);
598
0
}
599
600
/*
601
 * Determine if an Initial packet token length is reasonable based on the
602
 * current MDPL, returning 1 if it is OK.
603
 *
604
 * The real PMTU to the peer could differ from our (pessimistic) understanding
605
 * of the PMTU, therefore it is possible we could receive an Initial token from
606
 * a server in a Retry packet which is bigger than the MDPL. In this case it is
607
 * impossible for us ever to make forward progress and we need to error out
608
 * and fail the connection attempt.
609
 *
610
 * The specific boundary condition is complex: for example, after the size of
611
 * the Initial token, there are the Initial packet header overheads and then
612
 * encryption/AEAD tag overheads. After that, the minimum room for frame data in
613
 * order to guarantee forward progress must be guaranteed. For example, a crypto
614
 * stream needs to always be able to serialize at least one byte in a CRYPTO
615
 * frame in order to make forward progress. Because the offset field of a CRYPTO
616
 * frame uses a variable-length integer, the number of bytes needed to ensure
617
 * this also varies.
618
 *
619
 * Rather than trying to get this boundary condition check actually right,
620
 * require a reasonable amount of slack to avoid pathological behaviours. (After
621
 * all, transmitting a CRYPTO stream one byte at a time is probably not
622
 * desirable anyway.)
623
 *
624
 * We choose 160 bytes as the required margin, which is double the rough
625
 * estimation of the minimum we would require to guarantee forward progress
626
 * under worst case packet overheads.
627
 */
628
0
#define TXP_REQUIRED_TOKEN_MARGIN 160
629
630
static int txp_check_token_len(size_t token_len, size_t mdpl)
631
0
{
632
0
    if (token_len == 0)
633
0
        return 1;
634
635
0
    if (token_len >= mdpl)
636
0
        return 0;
637
638
0
    if (TXP_REQUIRED_TOKEN_MARGIN >= mdpl)
639
        /* (should not be possible because MDPL must be at least 1200) */
640
0
        return 0;
641
642
0
    if (token_len > mdpl - TXP_REQUIRED_TOKEN_MARGIN)
643
0
        return 0;
644
645
0
    return 1;
646
0
}
647
648
int ossl_quic_tx_packetiser_set_initial_token(OSSL_QUIC_TX_PACKETISER *txp,
649
    const unsigned char *token,
650
    size_t token_len,
651
    ossl_quic_initial_token_free_fn *free_cb,
652
    void *free_cb_arg)
653
0
{
654
0
    if (!txp_check_token_len(token_len, txp_get_mdpl(txp)))
655
0
        return 0;
656
657
0
    if (txp->initial_token != NULL && txp->initial_token_free_cb != NULL)
658
0
        txp->initial_token_free_cb(txp->initial_token, txp->initial_token_len,
659
0
            txp->initial_token_free_cb_arg);
660
661
0
    txp->initial_token = token;
662
0
    txp->initial_token_len = token_len;
663
0
    txp->initial_token_free_cb = free_cb;
664
0
    txp->initial_token_free_cb_arg = free_cb_arg;
665
0
    return 1;
666
0
}
667
668
int ossl_quic_tx_packetiser_set_protocol_version(OSSL_QUIC_TX_PACKETISER *txp,
669
    uint32_t protocol_version)
670
0
{
671
0
    txp->args.protocol_version = protocol_version;
672
0
    return 1;
673
0
}
674
675
int ossl_quic_tx_packetiser_set_cur_dcid(OSSL_QUIC_TX_PACKETISER *txp,
676
    const QUIC_CONN_ID *dcid)
677
0
{
678
0
    if (dcid == NULL) {
679
0
        ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
680
0
        return 0;
681
0
    }
682
683
0
    txp->args.cur_dcid = *dcid;
684
0
    return 1;
685
0
}
686
687
int ossl_quic_tx_packetiser_set_cur_scid(OSSL_QUIC_TX_PACKETISER *txp,
688
    const QUIC_CONN_ID *scid)
689
0
{
690
0
    if (scid == NULL) {
691
0
        ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
692
0
        return 0;
693
0
    }
694
695
0
    txp->args.cur_scid = *scid;
696
0
    return 1;
697
0
}
698
699
/* Change the destination L4 address the TXP uses to send datagrams. */
700
int ossl_quic_tx_packetiser_set_peer(OSSL_QUIC_TX_PACKETISER *txp,
701
    const BIO_ADDR *peer)
702
0
{
703
0
    if (peer == NULL) {
704
0
        BIO_ADDR_clear(&txp->args.peer);
705
0
        return 1;
706
0
    }
707
708
0
    return BIO_ADDR_copy(&txp->args.peer, peer);
709
0
}
710
711
int ossl_quic_tx_packetiser_set_ack_delay_exponent(OSSL_QUIC_TX_PACKETISER *txp,
712
    uint32_t exp)
713
0
{
714
0
    if (exp > QUIC_MAX_ACK_DELAY_EXP) {
715
0
        ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
716
0
        return 0;
717
0
    }
718
719
0
    txp->args.ack_delay_exponent = exp;
720
0
    return 1;
721
0
}
722
723
void ossl_quic_tx_packetiser_set_ack_tx_cb(OSSL_QUIC_TX_PACKETISER *txp,
724
    void (*cb)(const OSSL_QUIC_FRAME_ACK *ack,
725
        uint32_t pn_space,
726
        void *arg),
727
    void *cb_arg)
728
0
{
729
0
    txp->ack_tx_cb = cb;
730
0
    txp->ack_tx_cb_arg = cb_arg;
731
0
}
732
733
void ossl_quic_tx_packetiser_set_qlog_cb(OSSL_QUIC_TX_PACKETISER *txp,
734
    QLOG *(*get_qlog_cb)(void *arg),
735
    void *get_qlog_cb_arg)
736
0
{
737
0
    ossl_quic_fifd_set_qlog_cb(&txp->fifd, get_qlog_cb, get_qlog_cb_arg);
738
0
}
739
740
int ossl_quic_tx_packetiser_discard_enc_level(OSSL_QUIC_TX_PACKETISER *txp,
741
    uint32_t enc_level)
742
0
{
743
0
    if (enc_level >= QUIC_ENC_LEVEL_NUM) {
744
0
        ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
745
0
        return 0;
746
0
    }
747
748
0
    if (enc_level != QUIC_ENC_LEVEL_0RTT)
749
0
        txp->args.crypto[ossl_quic_enc_level_to_pn_space(enc_level)] = NULL;
750
751
0
    return 1;
752
0
}
753
754
void ossl_quic_tx_packetiser_notify_handshake_complete(OSSL_QUIC_TX_PACKETISER *txp)
755
0
{
756
0
    txp->handshake_complete = 1;
757
0
}
758
759
void ossl_quic_tx_packetiser_schedule_handshake_done(OSSL_QUIC_TX_PACKETISER *txp)
760
0
{
761
0
    txp->want_handshake_done = 1;
762
0
}
763
764
void ossl_quic_tx_packetiser_schedule_ack_eliciting(OSSL_QUIC_TX_PACKETISER *txp,
765
    uint32_t pn_space)
766
0
{
767
0
    txp->force_ack_eliciting |= (1UL << pn_space);
768
0
}
769
770
void ossl_quic_tx_packetiser_schedule_ack(OSSL_QUIC_TX_PACKETISER *txp,
771
    uint32_t pn_space)
772
0
{
773
0
    txp->want_ack |= (1UL << pn_space);
774
0
}
775
776
0
#define TXP_ERR_INTERNAL 0 /* Internal (e.g. alloc) error */
777
0
#define TXP_ERR_SUCCESS 1 /* Success */
778
0
#define TXP_ERR_SPACE 2 /* Not enough room for another packet */
779
#define TXP_ERR_INPUT 3 /* Invalid/malformed input */
780
781
/*
782
 * Generates a datagram by polling the various ELs to determine if they want to
783
 * generate any frames, and generating a datagram which coalesces packets for
784
 * any ELs which do.
785
 */
786
int ossl_quic_tx_packetiser_generate(OSSL_QUIC_TX_PACKETISER *txp,
787
    QUIC_TXP_STATUS *status)
788
0
{
789
    /*
790
     * Called to generate one or more datagrams, each containing one or more
791
     * packets.
792
     *
793
     * There are some tricky things to note here:
794
     *
795
     *   - The TXP is only concerned with generating encrypted packets;
796
     *     other packets use a different path.
797
     *
798
     *   - Any datagram containing an Initial packet must have a payload length
799
     *     (DPL) of at least 1200 bytes. This padding need not necessarily be
800
     *     found in the Initial packet.
801
     *
802
     *     - It is desirable to be able to coalesce an Initial packet
803
     *       with a Handshake packet. Since, before generating the Handshake
804
     *       packet, we do not know how long it will be, we cannot know the
805
     *       correct amount of padding to ensure a DPL of at least 1200 bytes.
806
     *       Thus this padding must added to the Handshake packet (or whatever
807
     *       packet is the last in the datagram).
808
     *
809
     *     - However, at the time that we generate the Initial packet,
810
     *       we do not actually know for sure that we will be followed
811
     *       in the datagram by another packet. For example, suppose we have
812
     *       some queued data (e.g. crypto stream data for the HANDSHAKE EL)
813
     *       it looks like we will want to send on the HANDSHAKE EL.
814
     *       We could assume padding will be placed in the Handshake packet
815
     *       subsequently and avoid adding any padding to the Initial packet
816
     *       (which would leave no room for the Handshake packet in the
817
     *       datagram).
818
     *
819
     *       However, this is not actually a safe assumption. Suppose that we
820
     *       are using a link with a MDPL of 1200 bytes, the minimum allowed by
821
     *       QUIC. Suppose that the Initial packet consumes 1195 bytes in total.
822
     *       Since it is not possible to fit a Handshake packet in just 5 bytes,
823
     *       upon trying to add a Handshake packet after generating the Initial
824
     *       packet, we will discover we have no room to fit it! This is not a
825
     *       problem in itself as another datagram can be sent subsequently, but
826
     *       it is a problem because we were counting to use that packet to hold
827
     *       the essential padding. But if we have already finished encrypting
828
     *       the Initial packet, we cannot go and add padding to it anymore.
829
     *       This leaves us stuck.
830
     *
831
     * Because of this, we have to plan multiple packets simultaneously, such
832
     * that we can start generating a Handshake (or 0-RTT or 1-RTT, or so on)
833
     * packet while still having the option to go back and add padding to the
834
     * Initial packet if it turns out to be needed.
835
     *
836
     * Trying to predict ahead of time (e.g. during Initial packet generation)
837
     * whether we will successfully generate a subsequent packet is fraught with
838
     * error as it relies on a large number of variables:
839
     *
840
     *   - Do we have room to fit a packet header? (Consider that due to
841
     *     variable-length integer encoding this is highly variable and can even
842
     *     depend on payload length due to a variable-length Length field.)
843
     *
844
     *   - Can we fit even a single one of the frames we want to put in this
845
     *     packet in the packet? (Each frame type has a bespoke encoding. While
846
     *     our encodings of some frame types are adaptive based on the available
847
     *     room - e.g. STREAM frames - ultimately all frame types have some
848
     *     absolute minimum number of bytes to be successfully encoded. For
849
     *     example, if after an Initial packet there is enough room to encode
850
     *     only one byte of frame data, it is quite likely we can't send any of
851
     *     the frames we wanted to send.) While this is not strictly a problem
852
     *     because we could just fill the packet with padding frames, this is a
853
     *     pointless packet and is wasteful.
854
     *
855
     * Thus we adopt a multi-phase architecture:
856
     *
857
     *   1. Archetype Selection: Determine desired packet archetype.
858
     *
859
     *   2. Packet Staging: Generation of packet information and packet payload
860
     *      data (frame data) into staging areas.
861
     *
862
     *   3. Packet Adjustment: Adjustment of staged packets, adding padding to
863
     *      the staged packets if needed.
864
     *
865
     *   4. Commit: The packets are sent to the QTX and recorded as having been
866
     *      sent to the FIFM.
867
     *
868
     */
869
0
    int res = 0, rc;
870
0
    uint32_t archetype, enc_level;
871
0
    uint32_t conn_close_enc_level = QUIC_ENC_LEVEL_NUM;
872
0
    struct txp_pkt pkt[QUIC_ENC_LEVEL_NUM];
873
0
    size_t pkts_done = 0;
874
0
    uint64_t cc_limit = txp->args.cc_method->get_tx_allowance(txp->args.cc_data);
875
0
    int need_padding = 0, txpim_pkt_reffed;
876
877
0
    memset(status, 0, sizeof(*status));
878
879
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
880
0
        enc_level < QUIC_ENC_LEVEL_NUM;
881
0
        ++enc_level)
882
0
        pkt[enc_level].h_valid = 0;
883
884
    /*
885
     * Should not be needed, but a sanity check in case anyone else has been
886
     * using the QTX.
887
     */
888
0
    ossl_qtx_finish_dgram(txp->args.qtx);
889
890
    /* 1. Archetype Selection */
891
0
    archetype = txp_determine_archetype(txp, cc_limit);
892
893
    /* 2. Packet Staging */
894
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
895
0
        enc_level < QUIC_ENC_LEVEL_NUM;
896
0
        ++enc_level) {
897
0
        size_t running_total = (enc_level > QUIC_ENC_LEVEL_INITIAL)
898
0
            ? pkt[enc_level - 1].geom.hwm
899
0
            : 0;
900
901
0
        pkt[enc_level].geom.hwm = running_total;
902
903
0
        if (!txp_should_try_staging(txp, enc_level, archetype, cc_limit,
904
0
                &conn_close_enc_level))
905
0
            continue;
906
907
0
        if (!txp_pkt_init(&pkt[enc_level], txp, enc_level, archetype,
908
0
                running_total))
909
            /*
910
             * If this fails this is not a fatal error - it means the geometry
911
             * planning determined there was not enough space for another
912
             * packet. So just proceed with what we've already planned for.
913
             */
914
0
            break;
915
916
0
        rc = txp_generate_for_el(txp, &pkt[enc_level],
917
0
            conn_close_enc_level == enc_level);
918
0
        if (rc != TXP_ERR_SUCCESS)
919
0
            goto out;
920
921
0
        if (pkt[enc_level].force_pad)
922
            /*
923
             * txp_generate_for_el emitted a frame which forces packet padding.
924
             */
925
0
            need_padding = 1;
926
927
0
        pkt[enc_level].geom.hwm = running_total
928
0
            + pkt[enc_level].h.bytes_appended
929
0
            + pkt[enc_level].geom.pkt_overhead;
930
0
    }
931
932
    /* 3. Packet Adjustment */
933
0
    if (pkt[QUIC_ENC_LEVEL_INITIAL].h_valid
934
0
        && pkt[QUIC_ENC_LEVEL_INITIAL].h.bytes_appended > 0)
935
        /*
936
         * We have an Initial packet in this datagram, so we need to make sure
937
         * the total size of the datagram is adequate.
938
         */
939
0
        need_padding = 1;
940
941
0
    if (need_padding) {
942
0
        size_t total_dgram_size = 0;
943
0
        const size_t min_dpl = QUIC_MIN_INITIAL_DGRAM_LEN;
944
0
        uint32_t pad_el = QUIC_ENC_LEVEL_NUM;
945
946
0
        for (enc_level = QUIC_ENC_LEVEL_INITIAL;
947
0
            enc_level < QUIC_ENC_LEVEL_NUM;
948
0
            ++enc_level)
949
0
            if (pkt[enc_level].h_valid && pkt[enc_level].h.bytes_appended > 0) {
950
0
                if (pad_el == QUIC_ENC_LEVEL_NUM
951
                    /*
952
                     * We might not be able to add padding, for example if we
953
                     * are using the ACK_ONLY archetype.
954
                     */
955
0
                    && pkt[enc_level].geom.adata.allow_padding
956
0
                    && !pkt[enc_level].h.done_implicit)
957
0
                    pad_el = enc_level;
958
959
0
                txp_pkt_postgen_update_pkt_overhead(&pkt[enc_level], txp);
960
0
                total_dgram_size += pkt[enc_level].geom.pkt_overhead
961
0
                    + pkt[enc_level].h.bytes_appended;
962
0
            }
963
964
0
        if (pad_el != QUIC_ENC_LEVEL_NUM && total_dgram_size < min_dpl) {
965
0
            size_t deficit = min_dpl - total_dgram_size;
966
967
0
            if (!txp_pkt_append_padding(&pkt[pad_el], txp, deficit))
968
0
                goto out;
969
970
0
            total_dgram_size += deficit;
971
972
            /*
973
             * Padding frames make a packet ineligible for being a non-inflight
974
             * packet.
975
             */
976
0
            pkt[pad_el].tpkt->ackm_pkt.is_inflight = 1;
977
0
        }
978
979
        /*
980
         * If we have failed to make a datagram of adequate size, for example
981
         * because we have a padding requirement but are using the ACK_ONLY
982
         * archetype (because we are CC limited), which precludes us from
983
         * sending padding, give up on generating the datagram - there is
984
         * nothing we can do.
985
         */
986
0
        if (total_dgram_size < min_dpl) {
987
0
            res = 1;
988
0
            goto out;
989
0
        }
990
0
    }
991
992
    /* 4. Commit */
993
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
994
0
        enc_level < QUIC_ENC_LEVEL_NUM;
995
0
        ++enc_level) {
996
997
0
        if (!pkt[enc_level].h_valid)
998
            /* Did not attempt to generate a packet for this EL. */
999
0
            continue;
1000
1001
0
        if (pkt[enc_level].h.bytes_appended == 0)
1002
            /* Nothing was generated for this EL, so skip. */
1003
0
            continue;
1004
1005
0
        if (!ossl_quic_tx_packetiser_check_unvalidated_credit(txp,
1006
0
                pkt[enc_level].h.bytes_appended)) {
1007
0
            res = TXP_ERR_SPACE;
1008
0
            goto out;
1009
0
        }
1010
0
        ossl_quic_tx_packetiser_consume_unvalidated_credit(txp, pkt[enc_level].h.bytes_appended);
1011
1012
0
        rc = txp_pkt_commit(txp, &pkt[enc_level], archetype,
1013
0
            &txpim_pkt_reffed);
1014
0
        if (rc) {
1015
0
            status->sent_ack_eliciting
1016
0
                = status->sent_ack_eliciting
1017
0
                || pkt[enc_level].tpkt->ackm_pkt.is_ack_eliciting;
1018
1019
0
            if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE)
1020
0
                status->sent_handshake
1021
0
                    = (pkt[enc_level].h_valid
1022
0
                        && pkt[enc_level].h.bytes_appended > 0);
1023
0
        }
1024
1025
0
        if (txpim_pkt_reffed)
1026
0
            pkt[enc_level].tpkt = NULL; /* don't free */
1027
1028
0
        if (!rc)
1029
0
            goto out;
1030
1031
0
        ++pkts_done;
1032
0
    }
1033
1034
    /* Flush & Cleanup */
1035
0
    res = 1;
1036
0
out:
1037
0
    ossl_qtx_finish_dgram(txp->args.qtx);
1038
1039
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
1040
0
        enc_level < QUIC_ENC_LEVEL_NUM;
1041
0
        ++enc_level)
1042
0
        txp_pkt_cleanup(&pkt[enc_level], txp);
1043
1044
0
    status->sent_pkt = pkts_done;
1045
1046
0
    return res;
1047
0
}
1048
1049
static const struct archetype_data archetypes[QUIC_ENC_LEVEL_NUM][TX_PACKETISER_ARCHETYPE_NUM] = {
1050
    /* EL 0(INITIAL) */
1051
    {
1052
        /* EL 0(INITIAL) - Archetype 0(NORMAL) */
1053
        {
1054
            /*allow_ack                       =*/1,
1055
            /*allow_ping                      =*/1,
1056
            /*allow_crypto                    =*/1,
1057
            /*allow_handshake_done            =*/0,
1058
            /*allow_path_challenge            =*/0,
1059
            /*allow_path_response             =*/0,
1060
            /*allow_new_conn_id               =*/0,
1061
            /*allow_retire_conn_id            =*/0,
1062
            /*allow_stream_rel                =*/0,
1063
            /*allow_conn_fc                   =*/0,
1064
            /*allow_conn_close                =*/1,
1065
            /*allow_cfq_other                 =*/0,
1066
            /*allow_new_token                 =*/0,
1067
            /*allow_force_ack_eliciting       =*/1,
1068
            /*allow_padding                   =*/1,
1069
            /*require_ack_eliciting           =*/0,
1070
            /*bypass_cc                       =*/0,
1071
        },
1072
        /* EL 0(INITIAL) - Archetype 1(PROBE) */
1073
        {
1074
            /*allow_ack                       =*/1,
1075
            /*allow_ping                      =*/1,
1076
            /*allow_crypto                    =*/1,
1077
            /*allow_handshake_done            =*/0,
1078
            /*allow_path_challenge            =*/0,
1079
            /*allow_path_response             =*/0,
1080
            /*allow_new_conn_id               =*/0,
1081
            /*allow_retire_conn_id            =*/0,
1082
            /*allow_stream_rel                =*/0,
1083
            /*allow_conn_fc                   =*/0,
1084
            /*allow_conn_close                =*/1,
1085
            /*allow_cfq_other                 =*/0,
1086
            /*allow_new_token                 =*/0,
1087
            /*allow_force_ack_eliciting       =*/1,
1088
            /*allow_padding                   =*/1,
1089
            /*require_ack_eliciting           =*/1,
1090
            /*bypass_cc                       =*/1,
1091
        },
1092
        /* EL 0(INITIAL) - Archetype 2(ACK_ONLY) */
1093
        {
1094
            /*allow_ack                       =*/1,
1095
            /*allow_ping                      =*/0,
1096
            /*allow_crypto                    =*/0,
1097
            /*allow_handshake_done            =*/0,
1098
            /*allow_path_challenge            =*/0,
1099
            /*allow_path_response             =*/0,
1100
            /*allow_new_conn_id               =*/0,
1101
            /*allow_retire_conn_id            =*/0,
1102
            /*allow_stream_rel                =*/0,
1103
            /*allow_conn_fc                   =*/0,
1104
            /*allow_conn_close                =*/0,
1105
            /*allow_cfq_other                 =*/0,
1106
            /*allow_new_token                 =*/0,
1107
            /*allow_force_ack_eliciting       =*/1,
1108
            /*allow_padding                   =*/0,
1109
            /*require_ack_eliciting           =*/0,
1110
            /*bypass_cc                       =*/1,
1111
        },
1112
    },
1113
    /* EL 1(0RTT) */
1114
    {
1115
        /* EL 1(0RTT) - Archetype 0(NORMAL) */
1116
        {
1117
            /*allow_ack                       =*/0,
1118
            /*allow_ping                      =*/1,
1119
            /*allow_crypto                    =*/0,
1120
            /*allow_handshake_done            =*/0,
1121
            /*allow_path_challenge            =*/0,
1122
            /*allow_path_response             =*/0,
1123
            /*allow_new_conn_id               =*/1,
1124
            /*allow_retire_conn_id            =*/1,
1125
            /*allow_stream_rel                =*/1,
1126
            /*allow_conn_fc                   =*/1,
1127
            /*allow_conn_close                =*/1,
1128
            /*allow_cfq_other                 =*/0,
1129
            /*allow_new_token                 =*/0,
1130
            /*allow_force_ack_eliciting       =*/0,
1131
            /*allow_padding                   =*/1,
1132
            /*require_ack_eliciting           =*/0,
1133
            /*bypass_cc                       =*/0,
1134
        },
1135
        /* EL 1(0RTT) - Archetype 1(PROBE) */
1136
        {
1137
            /*allow_ack                       =*/0,
1138
            /*allow_ping                      =*/1,
1139
            /*allow_crypto                    =*/0,
1140
            /*allow_handshake_done            =*/0,
1141
            /*allow_path_challenge            =*/0,
1142
            /*allow_path_response             =*/0,
1143
            /*allow_new_conn_id               =*/1,
1144
            /*allow_retire_conn_id            =*/1,
1145
            /*allow_stream_rel                =*/1,
1146
            /*allow_conn_fc                   =*/1,
1147
            /*allow_conn_close                =*/1,
1148
            /*allow_cfq_other                 =*/0,
1149
            /*allow_new_token                 =*/0,
1150
            /*allow_force_ack_eliciting       =*/0,
1151
            /*allow_padding                   =*/1,
1152
            /*require_ack_eliciting           =*/1,
1153
            /*bypass_cc                       =*/1,
1154
        },
1155
        /* EL 1(0RTT) - Archetype 2(ACK_ONLY) */
1156
        {
1157
            /*allow_ack                       =*/0,
1158
            /*allow_ping                      =*/0,
1159
            /*allow_crypto                    =*/0,
1160
            /*allow_handshake_done            =*/0,
1161
            /*allow_path_challenge            =*/0,
1162
            /*allow_path_response             =*/0,
1163
            /*allow_new_conn_id               =*/0,
1164
            /*allow_retire_conn_id            =*/0,
1165
            /*allow_stream_rel                =*/0,
1166
            /*allow_conn_fc                   =*/0,
1167
            /*allow_conn_close                =*/0,
1168
            /*allow_cfq_other                 =*/0,
1169
            /*allow_new_token                 =*/0,
1170
            /*allow_force_ack_eliciting       =*/0,
1171
            /*allow_padding                   =*/0,
1172
            /*require_ack_eliciting           =*/0,
1173
            /*bypass_cc                       =*/1,
1174
        },
1175
    },
1176
    /* EL (HANDSHAKE) */
1177
    {
1178
        /* EL 2(HANDSHAKE) - Archetype 0(NORMAL) */
1179
        {
1180
            /*allow_ack                       =*/1,
1181
            /*allow_ping                      =*/1,
1182
            /*allow_crypto                    =*/1,
1183
            /*allow_handshake_done            =*/0,
1184
            /*allow_path_challenge            =*/0,
1185
            /*allow_path_response             =*/0,
1186
            /*allow_new_conn_id               =*/0,
1187
            /*allow_retire_conn_id            =*/0,
1188
            /*allow_stream_rel                =*/0,
1189
            /*allow_conn_fc                   =*/0,
1190
            /*allow_conn_close                =*/1,
1191
            /*allow_cfq_other                 =*/0,
1192
            /*allow_new_token                 =*/0,
1193
            /*allow_force_ack_eliciting       =*/1,
1194
            /*allow_padding                   =*/1,
1195
            /*require_ack_eliciting           =*/0,
1196
            /*bypass_cc                       =*/0,
1197
        },
1198
        /* EL 2(HANDSHAKE) - Archetype 1(PROBE) */
1199
        {
1200
            /*allow_ack                       =*/1,
1201
            /*allow_ping                      =*/1,
1202
            /*allow_crypto                    =*/1,
1203
            /*allow_handshake_done            =*/0,
1204
            /*allow_path_challenge            =*/0,
1205
            /*allow_path_response             =*/0,
1206
            /*allow_new_conn_id               =*/0,
1207
            /*allow_retire_conn_id            =*/0,
1208
            /*allow_stream_rel                =*/0,
1209
            /*allow_conn_fc                   =*/0,
1210
            /*allow_conn_close                =*/1,
1211
            /*allow_cfq_other                 =*/0,
1212
            /*allow_new_token                 =*/0,
1213
            /*allow_force_ack_eliciting       =*/1,
1214
            /*allow_padding                   =*/1,
1215
            /*require_ack_eliciting           =*/1,
1216
            /*bypass_cc                       =*/1,
1217
        },
1218
        /* EL 2(HANDSHAKE) - Archetype 2(ACK_ONLY) */
1219
        {
1220
            /*allow_ack                       =*/1,
1221
            /*allow_ping                      =*/0,
1222
            /*allow_crypto                    =*/0,
1223
            /*allow_handshake_done            =*/0,
1224
            /*allow_path_challenge            =*/0,
1225
            /*allow_path_response             =*/0,
1226
            /*allow_new_conn_id               =*/0,
1227
            /*allow_retire_conn_id            =*/0,
1228
            /*allow_stream_rel                =*/0,
1229
            /*allow_conn_fc                   =*/0,
1230
            /*allow_conn_close                =*/0,
1231
            /*allow_cfq_other                 =*/0,
1232
            /*allow_new_token                 =*/0,
1233
            /*allow_force_ack_eliciting       =*/1,
1234
            /*allow_padding                   =*/0,
1235
            /*require_ack_eliciting           =*/0,
1236
            /*bypass_cc                       =*/1,
1237
        },
1238
    },
1239
    /* EL 3(1RTT) */
1240
    {
1241
        /* EL 3(1RTT) - Archetype 0(NORMAL) */
1242
        {
1243
            /*allow_ack                       =*/1,
1244
            /*allow_ping                      =*/1,
1245
            /*allow_crypto                    =*/1,
1246
            /*allow_handshake_done            =*/1,
1247
            /*allow_path_challenge            =*/0,
1248
            /*allow_path_response             =*/1,
1249
            /*allow_new_conn_id               =*/1,
1250
            /*allow_retire_conn_id            =*/1,
1251
            /*allow_stream_rel                =*/1,
1252
            /*allow_conn_fc                   =*/1,
1253
            /*allow_conn_close                =*/1,
1254
            /*allow_cfq_other                 =*/1,
1255
            /*allow_new_token                 =*/1,
1256
            /*allow_force_ack_eliciting       =*/1,
1257
            /*allow_padding                   =*/1,
1258
            /*require_ack_eliciting           =*/0,
1259
            /*bypass_cc                       =*/0,
1260
        },
1261
        /* EL 3(1RTT) - Archetype 1(PROBE) */
1262
        {
1263
            /*allow_ack                       =*/1,
1264
            /*allow_ping                      =*/1,
1265
            /*allow_crypto                    =*/1,
1266
            /*allow_handshake_done            =*/1,
1267
            /*allow_path_challenge            =*/0,
1268
            /*allow_path_response             =*/1,
1269
            /*allow_new_conn_id               =*/1,
1270
            /*allow_retire_conn_id            =*/1,
1271
            /*allow_stream_rel                =*/1,
1272
            /*allow_conn_fc                   =*/1,
1273
            /*allow_conn_close                =*/1,
1274
            /*allow_cfq_other                 =*/1,
1275
            /*allow_new_token                 =*/1,
1276
            /*allow_force_ack_eliciting       =*/1,
1277
            /*allow_padding                   =*/1,
1278
            /*require_ack_eliciting           =*/1,
1279
            /*bypass_cc                       =*/1,
1280
        },
1281
        /* EL 3(1RTT) - Archetype 2(ACK_ONLY) */
1282
        {
1283
            /*allow_ack                       =*/1,
1284
            /*allow_ping                      =*/0,
1285
            /*allow_crypto                    =*/0,
1286
            /*allow_handshake_done            =*/0,
1287
            /*allow_path_challenge            =*/0,
1288
            /*allow_path_response             =*/0,
1289
            /*allow_new_conn_id               =*/0,
1290
            /*allow_retire_conn_id            =*/0,
1291
            /*allow_stream_rel                =*/0,
1292
            /*allow_conn_fc                   =*/0,
1293
            /*allow_conn_close                =*/0,
1294
            /*allow_cfq_other                 =*/0,
1295
            /*allow_new_token                 =*/0,
1296
            /*allow_force_ack_eliciting       =*/1,
1297
            /*allow_padding                   =*/0,
1298
            /*require_ack_eliciting           =*/0,
1299
            /*bypass_cc                       =*/1,
1300
        } }
1301
};
1302
1303
static int txp_get_archetype_data(uint32_t enc_level,
1304
    uint32_t archetype,
1305
    struct archetype_data *a)
1306
0
{
1307
0
    if (enc_level >= QUIC_ENC_LEVEL_NUM
1308
0
        || archetype >= TX_PACKETISER_ARCHETYPE_NUM)
1309
0
        return 0;
1310
1311
    /* No need to avoid copying this as it should not exceed one int in size. */
1312
0
    *a = archetypes[enc_level][archetype];
1313
0
    return 1;
1314
0
}
1315
1316
static int txp_determine_geometry(OSSL_QUIC_TX_PACKETISER *txp,
1317
    uint32_t archetype,
1318
    uint32_t enc_level,
1319
    size_t running_total,
1320
    QUIC_PKT_HDR *phdr,
1321
    struct txp_pkt_geom *geom)
1322
0
{
1323
0
    size_t mdpl, cmpl, hdr_len;
1324
1325
    /* Get information about packet archetype. */
1326
0
    if (!txp_get_archetype_data(enc_level, archetype, &geom->adata))
1327
0
        return 0;
1328
1329
    /* Assemble packet header. */
1330
0
    phdr->type = ossl_quic_enc_level_to_pkt_type(enc_level);
1331
0
    phdr->spin_bit = 0;
1332
0
    phdr->pn_len = (unsigned int)txp_determine_pn_len(txp);
1333
0
    phdr->partial = 0;
1334
0
    phdr->fixed = 1;
1335
0
    phdr->reserved = 0;
1336
0
    phdr->version = txp->args.protocol_version;
1337
0
    phdr->dst_conn_id = txp->args.cur_dcid;
1338
0
    phdr->src_conn_id = txp->args.cur_scid;
1339
1340
    /*
1341
     * We need to know the length of the payload to get an accurate header
1342
     * length for non-1RTT packets, because the Length field found in
1343
     * Initial/Handshake/0-RTT packets uses a variable-length encoding. However,
1344
     * we don't have a good idea of the length of our payload, because the
1345
     * length of the payload depends on the room in the datagram after fitting
1346
     * the header, which depends on the size of the header.
1347
     *
1348
     * In general, it does not matter if a packet is slightly shorter (because
1349
     * e.g. we predicted use of a 2-byte length field, but ended up only needing
1350
     * a 1-byte length field). However this does matter for Initial packets
1351
     * which must be at least 1200 bytes, which is also the assumed default MTU;
1352
     * therefore in many cases Initial packets will be padded to 1200 bytes,
1353
     * which means if we overestimated the header size, we will be short by a
1354
     * few bytes and the server will ignore the packet for being too short. In
1355
     * this case, however, such packets always *will* be padded to meet 1200
1356
     * bytes, which requires a 2-byte length field, so we don't actually need to
1357
     * worry about this. Thus we estimate the header length assuming a 2-byte
1358
     * length field here, which should in practice work well in all cases.
1359
     */
1360
0
    phdr->len = OSSL_QUIC_VLINT_2B_MAX - phdr->pn_len;
1361
1362
0
    if (enc_level == QUIC_ENC_LEVEL_INITIAL) {
1363
0
        phdr->token = txp->initial_token;
1364
0
        phdr->token_len = txp->initial_token_len;
1365
0
    } else {
1366
0
        phdr->token = NULL;
1367
0
        phdr->token_len = 0;
1368
0
    }
1369
1370
0
    hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(phdr->dst_conn_id.id_len,
1371
0
        phdr);
1372
0
    if (hdr_len == 0)
1373
0
        return 0;
1374
1375
    /* MDPL: Maximum datagram payload length. */
1376
0
    mdpl = txp_get_mdpl(txp);
1377
1378
    /*
1379
     * CMPL: Maximum encoded packet size we can put into this datagram given any
1380
     * previous packets coalesced into it.
1381
     */
1382
0
    if (running_total > mdpl)
1383
        /* Should not be possible, but if it happens: */
1384
0
        cmpl = 0;
1385
0
    else
1386
0
        cmpl = mdpl - running_total;
1387
1388
    /* CMPPL: Maximum amount we can put into the current packet payload */
1389
0
    if (!txp_determine_ppl_from_pl(txp, cmpl, enc_level, hdr_len, &geom->cmppl))
1390
0
        return 0;
1391
1392
0
    geom->cmpl = cmpl;
1393
0
    geom->pkt_overhead = cmpl - geom->cmppl;
1394
0
    geom->archetype = archetype;
1395
0
    return 1;
1396
0
}
1397
1398
static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp,
1399
    uint64_t cc_limit)
1400
0
{
1401
0
    OSSL_ACKM_PROBE_INFO *probe_info
1402
0
        = ossl_ackm_get0_probe_request(txp->args.ackm);
1403
0
    uint32_t pn_space;
1404
1405
    /*
1406
     * If ACKM has requested probe generation (e.g. due to PTO), we generate a
1407
     * Probe-archetype packet. Actually, we determine archetype on a
1408
     * per-datagram basis, so if any EL wants a probe, do a pass in which
1409
     * we try and generate a probe (if needed) for all ELs.
1410
     */
1411
0
    if (probe_info->anti_deadlock_initial > 0
1412
0
        || probe_info->anti_deadlock_handshake > 0)
1413
0
        return TX_PACKETISER_ARCHETYPE_PROBE;
1414
1415
0
    for (pn_space = QUIC_PN_SPACE_INITIAL;
1416
0
        pn_space < QUIC_PN_SPACE_NUM;
1417
0
        ++pn_space)
1418
0
        if (probe_info->pto[pn_space] > 0)
1419
0
            return TX_PACKETISER_ARCHETYPE_PROBE;
1420
1421
    /*
1422
     * If we are out of CC budget, we cannot send a normal packet,
1423
     * but we can do an ACK-only packet (potentially, if we
1424
     * want to send an ACK).
1425
     */
1426
0
    if (cc_limit == 0)
1427
0
        return TX_PACKETISER_ARCHETYPE_ACK_ONLY;
1428
1429
    /* All other packets. */
1430
0
    return TX_PACKETISER_ARCHETYPE_NORMAL;
1431
0
}
1432
1433
static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp,
1434
    uint32_t enc_level,
1435
    uint32_t archetype,
1436
    uint64_t cc_limit,
1437
    uint32_t *conn_close_enc_level)
1438
0
{
1439
0
    struct archetype_data a;
1440
0
    uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
1441
0
    QUIC_CFQ_ITEM *cfq_item;
1442
1443
0
    if (!ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level))
1444
0
        return 0;
1445
1446
0
    if (!txp_get_archetype_data(enc_level, archetype, &a))
1447
0
        return 0;
1448
1449
0
    if (!a.bypass_cc && cc_limit == 0)
1450
        /* CC not allowing us to send. */
1451
0
        return 0;
1452
1453
    /*
1454
     * We can produce CONNECTION_CLOSE frames on any EL in principle, which
1455
     * means we need to choose which EL we would prefer to use. After a
1456
     * connection is fully established we have only one provisioned EL and this
1457
     * is a non-issue. Where multiple ELs are provisioned, it is possible the
1458
     * peer does not have the keys for the EL yet, which suggests in general it
1459
     * is preferable to use the lowest EL which is still provisioned.
1460
     *
1461
     * However (RFC 9000 s. 10.2.3 & 12.5) we are also required to not send
1462
     * application CONNECTION_CLOSE frames in non-1-RTT ELs, so as to not
1463
     * potentially leak application data on a connection which has yet to be
1464
     * authenticated. Thus when we have an application CONNECTION_CLOSE frame
1465
     * queued and need to send it on a non-1-RTT EL, we have to convert it
1466
     * into a transport CONNECTION_CLOSE frame which contains no application
1467
     * data. Since this loses information, it suggests we should use the 1-RTT
1468
     * EL to avoid this if possible, even if a lower EL is also available.
1469
     *
1470
     * At the same time, just because we have the 1-RTT EL provisioned locally
1471
     * does not necessarily mean the peer does, for example if a handshake
1472
     * CRYPTO frame has been lost. It is fairly important that CONNECTION_CLOSE
1473
     * is signalled in a way we know our peer can decrypt, as we stop processing
1474
     * connection retransmission logic for real after connection close and
1475
     * simply 'blindly' retransmit the same CONNECTION_CLOSE frame.
1476
     *
1477
     * This is not a major concern for clients, since if a client has a 1-RTT EL
1478
     * provisioned the server is guaranteed to also have a 1-RTT EL provisioned.
1479
     *
1480
     * TODO(QUIC FUTURE): Revisit this when when have reached a decision on how
1481
     * best to implement this
1482
     */
1483
0
    if (*conn_close_enc_level > enc_level
1484
0
        && *conn_close_enc_level != QUIC_ENC_LEVEL_1RTT)
1485
0
        *conn_close_enc_level = enc_level;
1486
1487
    /* Do we need to send a PTO probe? */
1488
0
    if (a.allow_force_ack_eliciting) {
1489
0
        OSSL_ACKM_PROBE_INFO *probe_info
1490
0
            = ossl_ackm_get0_probe_request(txp->args.ackm);
1491
1492
0
        if ((enc_level == QUIC_ENC_LEVEL_INITIAL
1493
0
                && probe_info->anti_deadlock_initial > 0)
1494
0
            || (enc_level == QUIC_ENC_LEVEL_HANDSHAKE
1495
0
                && probe_info->anti_deadlock_handshake > 0)
1496
0
            || probe_info->pto[pn_space] > 0)
1497
0
            return 1;
1498
0
    }
1499
1500
    /* Does the crypto stream for this EL want to produce anything? */
1501
0
    if (a.allow_crypto && sstream_is_pending(txp->args.crypto[pn_space]))
1502
0
        return 1;
1503
1504
    /* Does the ACKM for this PN space want to produce anything? */
1505
0
    if (a.allow_ack && (ossl_ackm_is_ack_desired(txp->args.ackm, pn_space) || (txp->want_ack & (1UL << pn_space)) != 0))
1506
0
        return 1;
1507
1508
    /* Do we need to force emission of an ACK-eliciting packet? */
1509
0
    if (a.allow_force_ack_eliciting
1510
0
        && (txp->force_ack_eliciting & (1UL << pn_space)) != 0)
1511
0
        return 1;
1512
1513
    /* Does the connection-level RXFC want to produce a frame? */
1514
0
    if (a.allow_conn_fc && (txp->want_max_data || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0)))
1515
0
        return 1;
1516
1517
    /* Do we want to produce a MAX_STREAMS frame? */
1518
0
    if (a.allow_conn_fc
1519
0
        && (txp->want_max_streams_bidi
1520
0
            || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc,
1521
0
                0)
1522
0
            || txp->want_max_streams_uni
1523
0
            || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc,
1524
0
                0)))
1525
0
        return 1;
1526
1527
    /* Do we want to produce a HANDSHAKE_DONE frame? */
1528
0
    if (a.allow_handshake_done && txp->want_handshake_done)
1529
0
        return 1;
1530
1531
    /* Do we want to produce a CONNECTION_CLOSE frame? */
1532
0
    if (a.allow_conn_close && txp->want_conn_close && *conn_close_enc_level == enc_level)
1533
        /*
1534
         * This is a bit of a special case since CONNECTION_CLOSE can appear in
1535
         * most packet types, and when we decide we want to send it this status
1536
         * isn't tied to a specific EL. So if we want to send it, we send it
1537
         * only on the lowest non-dropped EL.
1538
         */
1539
0
        return 1;
1540
1541
    /* Does the CFQ have any frames queued for this PN space? */
1542
0
    if (enc_level != QUIC_ENC_LEVEL_0RTT)
1543
0
        for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
1544
0
            cfq_item != NULL;
1545
0
            cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
1546
0
            uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
1547
1548
0
            switch (frame_type) {
1549
0
            case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
1550
0
                if (a.allow_new_conn_id)
1551
0
                    return 1;
1552
0
                break;
1553
0
            case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
1554
0
                if (a.allow_retire_conn_id)
1555
0
                    return 1;
1556
0
                break;
1557
0
            case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
1558
0
                if (a.allow_new_token)
1559
0
                    return 1;
1560
0
                break;
1561
0
            case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
1562
0
                if (a.allow_path_response)
1563
0
                    return 1;
1564
0
                break;
1565
0
            default:
1566
0
                if (a.allow_cfq_other)
1567
0
                    return 1;
1568
0
                break;
1569
0
            }
1570
0
        }
1571
1572
0
    if (a.allow_stream_rel && txp->handshake_complete) {
1573
0
        QUIC_STREAM_ITER it;
1574
1575
        /* If there are any active streams, 0/1-RTT wants to produce a packet.
1576
         * Whether a stream is on the active list is required to be precise
1577
         * (i.e., a stream is never on the active list if we cannot produce a
1578
         * frame for it), and all stream-related frames are governed by
1579
         * a.allow_stream_rel (i.e., if we can send one type of stream-related
1580
         * frame, we can send any of them), so we don't need to inspect
1581
         * individual streams on the active list, just confirm that the active
1582
         * list is non-empty.
1583
         */
1584
0
        ossl_quic_stream_iter_init(&it, txp->args.qsm, 0);
1585
0
        if (it.stream != NULL)
1586
0
            return 1;
1587
0
    }
1588
1589
0
    return 0;
1590
0
}
1591
1592
static int sstream_is_pending(QUIC_SSTREAM *sstream)
1593
0
{
1594
0
    OSSL_QUIC_FRAME_STREAM hdr;
1595
0
    OSSL_QTX_IOVEC iov[2];
1596
0
    size_t num_iov = OSSL_NELEM(iov);
1597
1598
0
    return ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov, &num_iov);
1599
0
}
1600
1601
/* Determine how many bytes we should use for the encoded PN. */
1602
static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp)
1603
0
{
1604
0
    return 4; /* TODO(QUIC FUTURE) */
1605
0
}
1606
1607
/* Determine plaintext packet payload length from payload length. */
1608
static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
1609
    size_t pl,
1610
    uint32_t enc_level,
1611
    size_t hdr_len,
1612
    size_t *r)
1613
0
{
1614
0
    if (pl < hdr_len)
1615
0
        return 0;
1616
1617
0
    pl -= hdr_len;
1618
1619
0
    if (!ossl_qtx_calculate_plaintext_payload_len(txp->args.qtx, enc_level,
1620
0
            pl, &pl))
1621
0
        return 0;
1622
1623
0
    *r = pl;
1624
0
    return 1;
1625
0
}
1626
1627
static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp)
1628
0
{
1629
0
    return ossl_qtx_get_mdpl(txp->args.qtx);
1630
0
}
1631
1632
static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
1633
    void *arg)
1634
0
{
1635
0
    OSSL_QUIC_TX_PACKETISER *txp = arg;
1636
0
    QUIC_STREAM *s;
1637
1638
0
    if (stream_id == UINT64_MAX)
1639
0
        return txp->args.crypto[pn_space];
1640
1641
0
    s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1642
0
    if (s == NULL)
1643
0
        return NULL;
1644
1645
0
    return s->sstream;
1646
0
}
1647
1648
static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
1649
    QUIC_TXPIM_PKT *pkt, void *arg)
1650
0
{
1651
0
    OSSL_QUIC_TX_PACKETISER *txp = arg;
1652
1653
0
    switch (frame_type) {
1654
0
    case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
1655
0
        txp->want_handshake_done = 1;
1656
0
        break;
1657
0
    case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
1658
0
        txp->want_max_data = 1;
1659
0
        break;
1660
0
    case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
1661
0
        txp->want_max_streams_bidi = 1;
1662
0
        break;
1663
0
    case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
1664
0
        txp->want_max_streams_uni = 1;
1665
0
        break;
1666
0
    case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
1667
0
        txp->want_ack |= (1UL << pkt->ackm_pkt.pkt_space);
1668
0
        break;
1669
0
    case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA: {
1670
0
        QUIC_STREAM *s
1671
0
            = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1672
1673
0
        if (s == NULL)
1674
0
            return;
1675
1676
0
        s->want_max_stream_data = 1;
1677
0
        ossl_quic_stream_map_update_state(txp->args.qsm, s);
1678
0
    } break;
1679
0
    case OSSL_QUIC_FRAME_TYPE_STOP_SENDING: {
1680
0
        QUIC_STREAM *s
1681
0
            = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1682
1683
0
        if (s == NULL)
1684
0
            return;
1685
1686
0
        ossl_quic_stream_map_schedule_stop_sending(txp->args.qsm, s);
1687
0
    } break;
1688
0
    case OSSL_QUIC_FRAME_TYPE_RESET_STREAM: {
1689
0
        QUIC_STREAM *s
1690
0
            = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1691
1692
0
        if (s == NULL)
1693
0
            return;
1694
1695
0
        s->want_reset_stream = 1;
1696
0
        ossl_quic_stream_map_update_state(txp->args.qsm, s);
1697
0
    } break;
1698
0
    default:
1699
0
        assert(0);
1700
0
        break;
1701
0
    }
1702
0
}
1703
1704
static int txp_need_ping(OSSL_QUIC_TX_PACKETISER *txp,
1705
    uint32_t pn_space,
1706
    const struct archetype_data *adata)
1707
0
{
1708
0
    return adata->allow_ping
1709
0
        && (adata->require_ack_eliciting
1710
0
            || (txp->force_ack_eliciting & (1UL << pn_space)) != 0);
1711
0
}
1712
1713
static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp,
1714
    uint32_t enc_level, uint32_t archetype,
1715
    size_t running_total)
1716
0
{
1717
0
    uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
1718
1719
0
    if (!txp_determine_geometry(txp, archetype, enc_level,
1720
0
            running_total, &pkt->phdr, &pkt->geom))
1721
0
        return 0;
1722
1723
    /*
1724
     * Initialise TX helper. If we must be ACK eliciting, reserve 1 byte for
1725
     * PING.
1726
     */
1727
0
    if (!tx_helper_init(&pkt->h, txp, enc_level,
1728
0
            pkt->geom.cmppl,
1729
0
            txp_need_ping(txp, pn_space, &pkt->geom.adata) ? 1 : 0))
1730
0
        return 0;
1731
1732
0
    pkt->h_valid = 1;
1733
0
    pkt->tpkt = NULL;
1734
0
    pkt->stream_head = NULL;
1735
0
    pkt->force_pad = 0;
1736
0
    return 1;
1737
0
}
1738
1739
static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp)
1740
0
{
1741
0
    if (!pkt->h_valid)
1742
0
        return;
1743
1744
0
    tx_helper_cleanup(&pkt->h);
1745
0
    pkt->h_valid = 0;
1746
1747
0
    if (pkt->tpkt != NULL) {
1748
0
        ossl_quic_txpim_pkt_release(txp->args.txpim, pkt->tpkt);
1749
0
        pkt->tpkt = NULL;
1750
0
    }
1751
0
}
1752
1753
static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt,
1754
    OSSL_QUIC_TX_PACKETISER *txp)
1755
0
{
1756
    /*
1757
     * After we have staged and generated our packets, but before we commit
1758
     * them, it is possible for the estimated packet overhead (packet header +
1759
     * AEAD tag size) to shrink slightly because we generated a short packet
1760
     * whose which can be represented in fewer bytes as a variable-length
1761
     * integer than we were (pessimistically) budgeting for. We need to account
1762
     * for this to ensure that we get our padding calculation exactly right.
1763
     *
1764
     * Update pkt_overhead to be accurate now that we know how much data is
1765
     * going in a packet.
1766
     */
1767
0
    size_t hdr_len, ciphertext_len;
1768
1769
0
    if (pkt->h.enc_level == QUIC_ENC_LEVEL_INITIAL)
1770
        /*
1771
         * Don't update overheads for the INITIAL EL - we have not finished
1772
         * appending padding to it and would potentially miscalculate the
1773
         * correct padding if we now update the pkt_overhead field to switch to
1774
         * e.g. a 1-byte length field in the packet header. Since we are padding
1775
         * to QUIC_MIN_INITIAL_DGRAM_LEN which requires a 2-byte length field,
1776
         * this is guaranteed to be moot anyway. See comment in
1777
         * txp_determine_geometry for more information.
1778
         */
1779
0
        return 1;
1780
1781
0
    if (!ossl_qtx_calculate_ciphertext_payload_len(txp->args.qtx, pkt->h.enc_level,
1782
0
            pkt->h.bytes_appended,
1783
0
            &ciphertext_len))
1784
0
        return 0;
1785
1786
0
    pkt->phdr.len = ciphertext_len;
1787
1788
0
    hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(pkt->phdr.dst_conn_id.id_len,
1789
0
        &pkt->phdr);
1790
1791
0
    pkt->geom.pkt_overhead = hdr_len + ciphertext_len - pkt->h.bytes_appended;
1792
0
    return 1;
1793
0
}
1794
1795
static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id,
1796
    QUIC_TXPIM_PKT *pkt, void *arg)
1797
0
{
1798
0
    OSSL_QUIC_TX_PACKETISER *txp = arg;
1799
1800
0
    switch (frame_type) {
1801
0
    case OSSL_QUIC_FRAME_TYPE_STOP_SENDING: {
1802
0
        QUIC_STREAM *s
1803
0
            = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1804
1805
0
        if (s == NULL)
1806
0
            return;
1807
1808
0
        s->acked_stop_sending = 1;
1809
0
        ossl_quic_stream_map_update_state(txp->args.qsm, s);
1810
0
    } break;
1811
0
    case OSSL_QUIC_FRAME_TYPE_RESET_STREAM: {
1812
0
        QUIC_STREAM *s
1813
0
            = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1814
1815
0
        if (s == NULL)
1816
0
            return;
1817
1818
        /*
1819
         * We must already be in RESET_SENT or RESET_RECVD if we are
1820
         * here, so we don't need to check state here.
1821
         */
1822
0
        ossl_quic_stream_map_notify_reset_stream_acked(txp->args.qsm, s);
1823
0
        ossl_quic_stream_map_update_state(txp->args.qsm, s);
1824
0
    } break;
1825
0
    default:
1826
0
        assert(0);
1827
0
        break;
1828
0
    }
1829
0
}
1830
1831
static int txp_pkt_append_padding(struct txp_pkt *pkt,
1832
    OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes)
1833
0
{
1834
0
    WPACKET *wpkt;
1835
1836
0
    if (num_bytes == 0)
1837
0
        return 1;
1838
1839
0
    if (!ossl_assert(pkt->h_valid))
1840
0
        return 0;
1841
1842
0
    if (!ossl_assert(pkt->tpkt != NULL))
1843
0
        return 0;
1844
1845
0
    wpkt = tx_helper_begin(&pkt->h);
1846
0
    if (wpkt == NULL)
1847
0
        return 0;
1848
1849
0
    if (!ossl_quic_wire_encode_padding(wpkt, num_bytes)) {
1850
0
        tx_helper_rollback(&pkt->h);
1851
0
        return 0;
1852
0
    }
1853
1854
0
    if (!tx_helper_commit(&pkt->h))
1855
0
        return 0;
1856
1857
0
    pkt->tpkt->ackm_pkt.num_bytes += num_bytes;
1858
    /* Cannot be non-inflight if we have a PADDING frame */
1859
0
    pkt->tpkt->ackm_pkt.is_inflight = 1;
1860
0
    return 1;
1861
0
}
1862
1863
static void on_sstream_updated(uint64_t stream_id, void *arg)
1864
0
{
1865
0
    OSSL_QUIC_TX_PACKETISER *txp = arg;
1866
0
    QUIC_STREAM *s;
1867
1868
0
    s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1869
0
    if (s == NULL)
1870
0
        return;
1871
1872
0
    ossl_quic_stream_map_update_state(txp->args.qsm, s);
1873
0
}
1874
1875
/*
1876
 * Returns 1 if we can send that many bytes in closing state, 0 otherwise.
1877
 * Also maintains the bytes sent state if it returns a success.
1878
 */
1879
static int try_commit_conn_close(OSSL_QUIC_TX_PACKETISER *txp, size_t n)
1880
0
{
1881
0
    int res;
1882
1883
    /* We can always send the first connection close frame */
1884
0
    if (txp->closing_bytes_recv == 0)
1885
0
        return 1;
1886
1887
    /*
1888
     * RFC 9000 s. 10.2.1 Closing Connection State:
1889
     *      To avoid being used for an amplification attack, such
1890
     *      endpoints MUST limit the cumulative size of packets it sends
1891
     *      to three times the cumulative size of the packets that are
1892
     *      received and attributed to the connection.
1893
     * and:
1894
     *      An endpoint in the closing state MUST either discard packets
1895
     *      received from an unvalidated address or limit the cumulative
1896
     *      size of packets it sends to an unvalidated address to three
1897
     *      times the size of packets it receives from that address.
1898
     */
1899
0
    res = txp->closing_bytes_xmit + n <= txp->closing_bytes_recv * 3;
1900
1901
    /*
1902
     * Attribute the bytes to the connection, if we are allowed to send them
1903
     * and this isn't the first closing frame.
1904
     */
1905
0
    if (res && txp->closing_bytes_recv != 0)
1906
0
        txp->closing_bytes_xmit += n;
1907
0
    return res;
1908
0
}
1909
1910
void ossl_quic_tx_packetiser_record_received_closing_bytes(
1911
    OSSL_QUIC_TX_PACKETISER *txp, size_t n)
1912
0
{
1913
0
    txp->closing_bytes_recv += n;
1914
0
}
1915
1916
static int txp_generate_pre_token(OSSL_QUIC_TX_PACKETISER *txp,
1917
    struct txp_pkt *pkt,
1918
    int chosen_for_conn_close,
1919
    int *can_be_non_inflight)
1920
0
{
1921
0
    const uint32_t enc_level = pkt->h.enc_level;
1922
0
    const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
1923
0
    const struct archetype_data *a = &pkt->geom.adata;
1924
0
    QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
1925
0
    struct tx_helper *h = &pkt->h;
1926
0
    const OSSL_QUIC_FRAME_ACK *ack;
1927
0
    OSSL_QUIC_FRAME_ACK ack2;
1928
1929
0
    tpkt->ackm_pkt.largest_acked = QUIC_PN_INVALID;
1930
1931
    /* ACK Frames (Regenerate) */
1932
0
    if (a->allow_ack
1933
0
        && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_ACK
1934
0
        && (((txp->want_ack & (1UL << pn_space)) != 0)
1935
0
            || ossl_ackm_is_ack_desired(txp->args.ackm, pn_space))
1936
0
        && (ack = ossl_ackm_get_ack_frame(txp->args.ackm, pn_space)) != NULL) {
1937
0
        WPACKET *wpkt = tx_helper_begin(h);
1938
1939
0
        if (wpkt == NULL)
1940
0
            return 0;
1941
1942
        /* We do not currently support ECN */
1943
0
        ack2 = *ack;
1944
0
        ack2.ecn_present = 0;
1945
1946
0
        if (ossl_quic_wire_encode_frame_ack(wpkt,
1947
0
                txp->args.ack_delay_exponent,
1948
0
                &ack2)) {
1949
0
            if (!tx_helper_commit(h))
1950
0
                return 0;
1951
1952
0
            tpkt->had_ack_frame = 1;
1953
1954
0
            if (ack->num_ack_ranges > 0)
1955
0
                tpkt->ackm_pkt.largest_acked = ack->ack_ranges[0].end;
1956
1957
0
            if (txp->ack_tx_cb != NULL)
1958
0
                txp->ack_tx_cb(&ack2, pn_space, txp->ack_tx_cb_arg);
1959
0
        } else {
1960
0
            tx_helper_rollback(h);
1961
0
        }
1962
0
    }
1963
1964
    /* CONNECTION_CLOSE Frames (Regenerate) */
1965
0
    if (a->allow_conn_close && txp->want_conn_close && chosen_for_conn_close) {
1966
0
        WPACKET *wpkt = tx_helper_begin(h);
1967
0
        OSSL_QUIC_FRAME_CONN_CLOSE f, *pf = &txp->conn_close_frame;
1968
0
        size_t l;
1969
1970
0
        if (wpkt == NULL)
1971
0
            return 0;
1972
1973
        /*
1974
         * Application CONNECTION_CLOSE frames may only be sent in the
1975
         * Application PN space, as otherwise they may be sent before a
1976
         * connection is authenticated and leak application data. Therefore, if
1977
         * we need to send a CONNECTION_CLOSE frame in another PN space and were
1978
         * given an application CONNECTION_CLOSE frame, convert it into a
1979
         * transport CONNECTION_CLOSE frame, removing any sensitive application
1980
         * data.
1981
         *
1982
         * RFC 9000 s. 10.2.3: "A CONNECTION_CLOSE of type 0x1d MUST be replaced
1983
         * by a CONNECTION_CLOSE of type 0x1c when sending the frame in Initial
1984
         * or Handshake packets. Otherwise, information about the application
1985
         * state might be revealed. Endpoints MUST clear the value of the Reason
1986
         * Phrase field and SHOULD use the APPLICATION_ERROR code when
1987
         * converting to a CONNECTION_CLOSE of type 0x1c."
1988
         */
1989
0
        if (pn_space != QUIC_PN_SPACE_APP && pf->is_app) {
1990
0
            pf = &f;
1991
0
            pf->is_app = 0;
1992
0
            pf->frame_type = 0;
1993
0
            pf->error_code = OSSL_QUIC_ERR_APPLICATION_ERROR;
1994
0
            pf->reason = NULL;
1995
0
            pf->reason_len = 0;
1996
0
        }
1997
1998
0
        if (ossl_quic_wire_encode_frame_conn_close(wpkt, pf)
1999
0
            && WPACKET_get_total_written(wpkt, &l)
2000
0
            && try_commit_conn_close(txp, l)) {
2001
0
            if (!tx_helper_commit(h))
2002
0
                return 0;
2003
2004
0
            tpkt->had_conn_close = 1;
2005
0
            *can_be_non_inflight = 0;
2006
0
        } else {
2007
0
            tx_helper_rollback(h);
2008
0
        }
2009
0
    }
2010
2011
0
    return 1;
2012
0
}
2013
2014
static int try_len(size_t space_left, size_t orig_len,
2015
    size_t base_hdr_len, size_t lenbytes,
2016
    uint64_t maxn, size_t *hdr_len, size_t *payload_len)
2017
0
{
2018
0
    size_t n;
2019
0
    size_t maxn_ = maxn > SIZE_MAX ? SIZE_MAX : (size_t)maxn;
2020
2021
0
    *hdr_len = base_hdr_len + lenbytes;
2022
2023
0
    if (orig_len == 0 && space_left >= *hdr_len) {
2024
0
        *payload_len = 0;
2025
0
        return 1;
2026
0
    }
2027
2028
0
    n = orig_len;
2029
0
    if (n > maxn_)
2030
0
        n = maxn_;
2031
0
    if (n + *hdr_len > space_left)
2032
0
        n = (space_left >= *hdr_len) ? space_left - *hdr_len : 0;
2033
2034
0
    *payload_len = n;
2035
0
    return n > 0;
2036
0
}
2037
2038
static int determine_len(size_t space_left, size_t orig_len,
2039
    size_t base_hdr_len,
2040
    uint64_t *hlen, uint64_t *len)
2041
0
{
2042
0
    int ok = 0;
2043
0
    size_t chosen_payload_len = 0;
2044
0
    size_t chosen_hdr_len = 0;
2045
0
    size_t payload_len[4], hdr_len[4];
2046
0
    int i, valid[4] = { 0 };
2047
2048
0
    valid[0] = try_len(space_left, orig_len, base_hdr_len,
2049
0
        1, OSSL_QUIC_VLINT_1B_MAX,
2050
0
        &hdr_len[0], &payload_len[0]);
2051
0
    valid[1] = try_len(space_left, orig_len, base_hdr_len,
2052
0
        2, OSSL_QUIC_VLINT_2B_MAX,
2053
0
        &hdr_len[1], &payload_len[1]);
2054
0
    valid[2] = try_len(space_left, orig_len, base_hdr_len,
2055
0
        4, OSSL_QUIC_VLINT_4B_MAX,
2056
0
        &hdr_len[2], &payload_len[2]);
2057
0
    valid[3] = try_len(space_left, orig_len, base_hdr_len,
2058
0
        8, OSSL_QUIC_VLINT_8B_MAX,
2059
0
        &hdr_len[3], &payload_len[3]);
2060
2061
0
    for (i = OSSL_NELEM(valid) - 1; i >= 0; --i)
2062
0
        if (valid[i] && payload_len[i] >= chosen_payload_len) {
2063
0
            chosen_payload_len = payload_len[i];
2064
0
            chosen_hdr_len = hdr_len[i];
2065
0
            ok = 1;
2066
0
        }
2067
2068
0
    *hlen = chosen_hdr_len;
2069
0
    *len = chosen_payload_len;
2070
0
    return ok;
2071
0
}
2072
2073
/*
2074
 * Given a CRYPTO frame header with accurate chdr->len and a budget
2075
 * (space_left), try to find the optimal value of chdr->len to fill as much of
2076
 * the budget as possible. This is slightly hairy because larger values of
2077
 * chdr->len cause larger encoded sizes of the length field of the frame, which
2078
 * in turn mean less space available for payload data. We check all possible
2079
 * encodings and choose the optimal encoding.
2080
 */
2081
static int determine_crypto_len(struct tx_helper *h,
2082
    OSSL_QUIC_FRAME_CRYPTO *chdr,
2083
    size_t space_left,
2084
    uint64_t *hlen,
2085
    uint64_t *len)
2086
0
{
2087
0
    size_t orig_len;
2088
0
    size_t base_hdr_len; /* CRYPTO header length without length field */
2089
2090
0
    if (chdr->len > SIZE_MAX)
2091
0
        return 0;
2092
2093
0
    orig_len = (size_t)chdr->len;
2094
2095
0
    chdr->len = 0;
2096
0
    base_hdr_len = ossl_quic_wire_get_encoded_frame_len_crypto_hdr(chdr);
2097
0
    chdr->len = orig_len;
2098
0
    if (base_hdr_len == 0)
2099
0
        return 0;
2100
2101
0
    --base_hdr_len;
2102
2103
0
    return determine_len(space_left, orig_len, base_hdr_len, hlen, len);
2104
0
}
2105
2106
static int determine_stream_len(struct tx_helper *h,
2107
    OSSL_QUIC_FRAME_STREAM *shdr,
2108
    size_t space_left,
2109
    uint64_t *hlen,
2110
    uint64_t *len)
2111
0
{
2112
0
    size_t orig_len;
2113
0
    size_t base_hdr_len; /* STREAM header length without length field */
2114
2115
0
    if (shdr->len > SIZE_MAX)
2116
0
        return 0;
2117
2118
0
    orig_len = (size_t)shdr->len;
2119
2120
0
    shdr->len = 0;
2121
0
    base_hdr_len = ossl_quic_wire_get_encoded_frame_len_stream_hdr(shdr);
2122
0
    shdr->len = orig_len;
2123
0
    if (base_hdr_len == 0)
2124
0
        return 0;
2125
2126
0
    if (shdr->has_explicit_len)
2127
0
        --base_hdr_len;
2128
2129
0
    return determine_len(space_left, orig_len, base_hdr_len, hlen, len);
2130
0
}
2131
2132
static int txp_generate_crypto_frames(OSSL_QUIC_TX_PACKETISER *txp,
2133
    struct txp_pkt *pkt,
2134
    int *have_ack_eliciting)
2135
0
{
2136
0
    const uint32_t enc_level = pkt->h.enc_level;
2137
0
    const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
2138
0
    QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
2139
0
    struct tx_helper *h = &pkt->h;
2140
0
    size_t num_stream_iovec;
2141
0
    OSSL_QUIC_FRAME_STREAM shdr = { 0 };
2142
0
    OSSL_QUIC_FRAME_CRYPTO chdr = { 0 };
2143
0
    OSSL_QTX_IOVEC iov[2];
2144
0
    uint64_t hdr_bytes;
2145
0
    WPACKET *wpkt;
2146
0
    QUIC_TXPIM_CHUNK chunk = { 0 };
2147
0
    size_t i, space_left;
2148
2149
0
    for (i = 0;; ++i) {
2150
0
        space_left = tx_helper_get_space_left(h);
2151
2152
0
        if (space_left < MIN_FRAME_SIZE_CRYPTO)
2153
0
            return 1; /* no point trying */
2154
2155
        /* Do we have any CRYPTO data waiting? */
2156
0
        num_stream_iovec = OSSL_NELEM(iov);
2157
0
        if (!ossl_quic_sstream_get_stream_frame(txp->args.crypto[pn_space],
2158
0
                i, &shdr, iov,
2159
0
                &num_stream_iovec))
2160
0
            return 1; /* nothing to do */
2161
2162
        /* Convert STREAM frame header to CRYPTO frame header */
2163
0
        chdr.offset = shdr.offset;
2164
0
        chdr.len = shdr.len;
2165
2166
0
        if (chdr.len == 0)
2167
0
            return 1; /* nothing to do */
2168
2169
        /* Find best fit (header length, payload length) combination. */
2170
0
        if (!determine_crypto_len(h, &chdr, space_left, &hdr_bytes,
2171
0
                &chdr.len))
2172
0
            return 1; /* can't fit anything */
2173
2174
        /*
2175
         * Truncate IOVs to match our chosen length.
2176
         *
2177
         * The length cannot be more than SIZE_MAX because this length comes
2178
         * from our send stream buffer.
2179
         */
2180
0
        ossl_quic_sstream_adjust_iov((size_t)chdr.len, iov, num_stream_iovec);
2181
2182
        /*
2183
         * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
2184
         * the stream data.)
2185
         */
2186
0
        if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3))
2187
0
            return 0; /* alloc error */
2188
2189
        /* Encode the header. */
2190
0
        wpkt = tx_helper_begin(h);
2191
0
        if (wpkt == NULL)
2192
0
            return 0; /* alloc error */
2193
2194
0
        if (!ossl_quic_wire_encode_frame_crypto_hdr(wpkt, &chdr)) {
2195
0
            tx_helper_rollback(h);
2196
0
            return 1; /* can't fit */
2197
0
        }
2198
2199
0
        if (!tx_helper_commit(h))
2200
0
            return 0; /* alloc error */
2201
2202
        /* Add payload iovecs to the helper (infallible). */
2203
0
        for (i = 0; i < num_stream_iovec; ++i)
2204
0
            tx_helper_append_iovec(h, iov[i].buf, iov[i].buf_len);
2205
2206
0
        *have_ack_eliciting = 1;
2207
0
        tx_helper_unrestrict(h); /* no longer need PING */
2208
2209
        /* Log chunk to TXPIM. */
2210
0
        chunk.stream_id = UINT64_MAX; /* crypto stream */
2211
0
        chunk.start = chdr.offset;
2212
0
        chunk.end = chdr.offset + chdr.len - 1;
2213
0
        chunk.has_fin = 0; /* Crypto stream never ends */
2214
0
        if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
2215
0
            return 0; /* alloc error */
2216
0
    }
2217
0
}
2218
2219
struct chunk_info {
2220
    OSSL_QUIC_FRAME_STREAM shdr;
2221
    uint64_t orig_len;
2222
    OSSL_QTX_IOVEC iov[2];
2223
    size_t num_stream_iovec;
2224
    int valid;
2225
};
2226
2227
static int txp_plan_stream_chunk(OSSL_QUIC_TX_PACKETISER *txp,
2228
    struct tx_helper *h,
2229
    QUIC_SSTREAM *sstream,
2230
    QUIC_TXFC *stream_txfc,
2231
    size_t skip,
2232
    struct chunk_info *chunk,
2233
    uint64_t consumed)
2234
0
{
2235
0
    uint64_t fc_credit, fc_swm, fc_limit;
2236
2237
0
    chunk->num_stream_iovec = OSSL_NELEM(chunk->iov);
2238
0
    chunk->valid = ossl_quic_sstream_get_stream_frame(sstream, skip,
2239
0
        &chunk->shdr,
2240
0
        chunk->iov,
2241
0
        &chunk->num_stream_iovec);
2242
0
    if (!chunk->valid)
2243
0
        return 1;
2244
2245
0
    if (!ossl_assert(chunk->shdr.len > 0 || chunk->shdr.is_fin))
2246
        /* Should only have 0-length chunk if FIN */
2247
0
        return 0;
2248
2249
0
    chunk->orig_len = chunk->shdr.len;
2250
2251
    /* Clamp according to connection and stream-level TXFC. */
2252
0
    fc_credit = ossl_quic_txfc_get_credit(stream_txfc, consumed);
2253
0
    fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
2254
0
    fc_limit = fc_swm + fc_credit;
2255
2256
0
    if (chunk->shdr.len > 0 && chunk->shdr.offset + chunk->shdr.len > fc_limit) {
2257
0
        chunk->shdr.len = (fc_limit <= chunk->shdr.offset)
2258
0
            ? 0
2259
0
            : fc_limit - chunk->shdr.offset;
2260
0
        chunk->shdr.is_fin = 0;
2261
0
    }
2262
2263
0
    if (chunk->shdr.len == 0 && !chunk->shdr.is_fin) {
2264
        /*
2265
         * Nothing to do due to TXFC. Since SSTREAM returns chunks in ascending
2266
         * order of offset we don't need to check any later chunks, so stop
2267
         * iterating here.
2268
         */
2269
0
        chunk->valid = 0;
2270
0
        return 1;
2271
0
    }
2272
2273
0
    return 1;
2274
0
}
2275
2276
/*
2277
 * Returns 0 on fatal error (e.g. allocation failure), 1 on success.
2278
 * *packet_full is set to 1 if there is no longer enough room for another STREAM
2279
 * frame.
2280
 */
2281
static int txp_generate_stream_frames(OSSL_QUIC_TX_PACKETISER *txp,
2282
    struct txp_pkt *pkt,
2283
    uint64_t id,
2284
    QUIC_SSTREAM *sstream,
2285
    QUIC_TXFC *stream_txfc,
2286
    QUIC_STREAM *next_stream,
2287
    int *have_ack_eliciting,
2288
    int *packet_full,
2289
    uint64_t *new_credit_consumed,
2290
    uint64_t conn_consumed)
2291
0
{
2292
0
    int rc = 0;
2293
0
    struct chunk_info chunks[2] = { 0 };
2294
0
    const uint32_t enc_level = pkt->h.enc_level;
2295
0
    QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
2296
0
    struct tx_helper *h = &pkt->h;
2297
0
    OSSL_QUIC_FRAME_STREAM *shdr;
2298
0
    WPACKET *wpkt;
2299
0
    QUIC_TXPIM_CHUNK chunk;
2300
0
    size_t i, j, space_left;
2301
0
    int can_fill_payload, use_explicit_len;
2302
0
    int could_have_following_chunk;
2303
0
    uint64_t orig_len;
2304
0
    uint64_t hdr_len_implicit, payload_len_implicit;
2305
0
    uint64_t hdr_len_explicit, payload_len_explicit;
2306
0
    uint64_t fc_swm, fc_new_hwm;
2307
2308
0
    fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
2309
0
    fc_new_hwm = fc_swm;
2310
2311
    /*
2312
     * Load the first two chunks if any offered by the send stream. We retrieve
2313
     * the next chunk in advance so we can determine if we need to send any more
2314
     * chunks from the same stream after this one, which is needed when
2315
     * determining when we can use an implicit length in a STREAM frame.
2316
     */
2317
0
    for (i = 0; i < 2; ++i) {
2318
0
        if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i, &chunks[i],
2319
0
                conn_consumed))
2320
0
            goto err;
2321
2322
0
        if (i == 0 && !chunks[i].valid) {
2323
            /* No chunks, nothing to do. */
2324
0
            rc = 1;
2325
0
            goto err;
2326
0
        }
2327
0
        chunks[i].shdr.stream_id = id;
2328
0
    }
2329
2330
0
    for (i = 0;; ++i) {
2331
0
        space_left = tx_helper_get_space_left(h);
2332
2333
0
        if (!chunks[i % 2].valid) {
2334
            /* Out of chunks; we're done. */
2335
0
            rc = 1;
2336
0
            goto err;
2337
0
        }
2338
2339
0
        if (space_left < MIN_FRAME_SIZE_STREAM) {
2340
0
            *packet_full = 1;
2341
0
            rc = 1;
2342
0
            goto err;
2343
0
        }
2344
2345
0
        if (!ossl_assert(!h->done_implicit))
2346
            /*
2347
             * Logic below should have ensured we didn't append an
2348
             * implicit-length unless we filled the packet or didn't have
2349
             * another stream to handle, so this should not be possible.
2350
             */
2351
0
            goto err;
2352
2353
0
        shdr = &chunks[i % 2].shdr;
2354
0
        orig_len = chunks[i % 2].orig_len;
2355
0
        if (i > 0)
2356
            /* Load next chunk for lookahead. */
2357
0
            if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i + 1,
2358
0
                    &chunks[(i + 1) % 2], conn_consumed))
2359
0
                goto err;
2360
2361
        /*
2362
         * Find best fit (header length, payload length) combination for if we
2363
         * use an implicit length.
2364
         */
2365
0
        shdr->has_explicit_len = 0;
2366
0
        hdr_len_implicit = payload_len_implicit = 0;
2367
0
        if (!determine_stream_len(h, shdr, space_left,
2368
0
                &hdr_len_implicit, &payload_len_implicit)) {
2369
0
            *packet_full = 1;
2370
0
            rc = 1;
2371
0
            goto err; /* can't fit anything */
2372
0
        }
2373
2374
        /*
2375
         * If there is a next stream, we don't use the implicit length so we can
2376
         * add more STREAM frames after this one, unless there is enough data
2377
         * for this STREAM frame to fill the packet.
2378
         */
2379
0
        can_fill_payload = (hdr_len_implicit + payload_len_implicit
2380
0
            >= space_left);
2381
2382
        /*
2383
         * Is there is a stream after this one, or another chunk pending
2384
         * transmission in this stream?
2385
         */
2386
0
        could_have_following_chunk
2387
0
            = (next_stream != NULL || chunks[(i + 1) % 2].valid);
2388
2389
        /* Choose between explicit or implicit length representations. */
2390
0
        use_explicit_len = !((can_fill_payload || !could_have_following_chunk)
2391
0
            && !pkt->force_pad);
2392
2393
0
        if (use_explicit_len) {
2394
            /*
2395
             * Find best fit (header length, payload length) combination for if
2396
             * we use an explicit length.
2397
             */
2398
0
            shdr->has_explicit_len = 1;
2399
0
            hdr_len_explicit = payload_len_explicit = 0;
2400
0
            if (!determine_stream_len(h, shdr, space_left,
2401
0
                    &hdr_len_explicit, &payload_len_explicit)) {
2402
0
                *packet_full = 1;
2403
0
                rc = 1;
2404
0
                goto err; /* can't fit anything */
2405
0
            }
2406
2407
0
            shdr->len = payload_len_explicit;
2408
0
        } else {
2409
0
            *packet_full = 1;
2410
0
            shdr->has_explicit_len = 0;
2411
0
            shdr->len = payload_len_implicit;
2412
0
        }
2413
2414
        /* If this is a FIN, don't keep filling the packet with more FINs. */
2415
0
        if (shdr->is_fin)
2416
0
            chunks[(i + 1) % 2].valid = 0;
2417
2418
        /*
2419
         * We are now committed to our length (shdr->len can't change).
2420
         * If we truncated the chunk, clear the FIN bit.
2421
         */
2422
0
        if (shdr->len < orig_len)
2423
0
            shdr->is_fin = 0;
2424
2425
        /* Truncate IOVs to match our chosen length. */
2426
0
        ossl_quic_sstream_adjust_iov((size_t)shdr->len, chunks[i % 2].iov,
2427
0
            chunks[i % 2].num_stream_iovec);
2428
2429
        /*
2430
         * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
2431
         * the stream data.)
2432
         */
2433
0
        if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3))
2434
0
            goto err; /* alloc error */
2435
2436
        /* Encode the header. */
2437
0
        wpkt = tx_helper_begin(h);
2438
0
        if (wpkt == NULL)
2439
0
            goto err; /* alloc error */
2440
2441
0
        if (!ossl_assert(ossl_quic_wire_encode_frame_stream_hdr(wpkt, shdr))) {
2442
            /* (Should not be possible.) */
2443
0
            tx_helper_rollback(h);
2444
0
            *packet_full = 1;
2445
0
            rc = 1;
2446
0
            goto err; /* can't fit */
2447
0
        }
2448
2449
0
        if (!tx_helper_commit(h))
2450
0
            goto err; /* alloc error */
2451
2452
        /* Add payload iovecs to the helper (infallible). */
2453
0
        for (j = 0; j < chunks[i % 2].num_stream_iovec; ++j)
2454
0
            tx_helper_append_iovec(h, chunks[i % 2].iov[j].buf,
2455
0
                chunks[i % 2].iov[j].buf_len);
2456
2457
0
        *have_ack_eliciting = 1;
2458
0
        tx_helper_unrestrict(h); /* no longer need PING */
2459
0
        if (!shdr->has_explicit_len)
2460
0
            h->done_implicit = 1;
2461
2462
        /* Log new TXFC credit which was consumed. */
2463
0
        if (shdr->len > 0 && shdr->offset + shdr->len > fc_new_hwm)
2464
0
            fc_new_hwm = shdr->offset + shdr->len;
2465
2466
        /* Log chunk to TXPIM. */
2467
0
        chunk.stream_id = shdr->stream_id;
2468
0
        chunk.start = shdr->offset;
2469
0
        chunk.end = shdr->offset + shdr->len - 1;
2470
0
        chunk.has_fin = shdr->is_fin;
2471
0
        chunk.has_stop_sending = 0;
2472
0
        chunk.has_reset_stream = 0;
2473
0
        if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
2474
0
            goto err; /* alloc error */
2475
2476
0
        if (shdr->len < orig_len) {
2477
            /*
2478
             * If we did not serialize all of this chunk we definitely do not
2479
             * want to try the next chunk
2480
             */
2481
0
            rc = 1;
2482
0
            goto err;
2483
0
        }
2484
0
    }
2485
2486
0
err:
2487
0
    *new_credit_consumed = fc_new_hwm - fc_swm;
2488
0
    return rc;
2489
0
}
2490
2491
static void txp_enlink_tmp(QUIC_STREAM **tmp_head, QUIC_STREAM *stream)
2492
0
{
2493
0
    stream->txp_next = *tmp_head;
2494
0
    *tmp_head = stream;
2495
0
}
2496
2497
static int txp_generate_stream_related(OSSL_QUIC_TX_PACKETISER *txp,
2498
    struct txp_pkt *pkt,
2499
    int *have_ack_eliciting,
2500
    QUIC_STREAM **tmp_head)
2501
0
{
2502
0
    QUIC_STREAM_ITER it;
2503
0
    WPACKET *wpkt;
2504
0
    uint64_t cwm;
2505
0
    QUIC_STREAM *stream, *snext;
2506
0
    struct tx_helper *h = &pkt->h;
2507
0
    uint64_t conn_consumed = 0;
2508
2509
0
    for (ossl_quic_stream_iter_init(&it, txp->args.qsm, 1);
2510
0
        it.stream != NULL;) {
2511
2512
0
        stream = it.stream;
2513
0
        ossl_quic_stream_iter_next(&it);
2514
0
        snext = it.stream;
2515
2516
0
        stream->txp_sent_fc = 0;
2517
0
        stream->txp_sent_stop_sending = 0;
2518
0
        stream->txp_sent_reset_stream = 0;
2519
0
        stream->txp_blocked = 0;
2520
0
        stream->txp_txfc_new_credit_consumed = 0;
2521
2522
        /* Stream Abort Frames (STOP_SENDING, RESET_STREAM) */
2523
0
        if (stream->want_stop_sending) {
2524
0
            OSSL_QUIC_FRAME_STOP_SENDING f;
2525
2526
0
            wpkt = tx_helper_begin(h);
2527
0
            if (wpkt == NULL)
2528
0
                return 0; /* alloc error */
2529
2530
0
            f.stream_id = stream->id;
2531
0
            f.app_error_code = stream->stop_sending_aec;
2532
0
            if (!ossl_quic_wire_encode_frame_stop_sending(wpkt, &f)) {
2533
0
                tx_helper_rollback(h); /* can't fit */
2534
0
                txp_enlink_tmp(tmp_head, stream);
2535
0
                break;
2536
0
            }
2537
2538
0
            if (!tx_helper_commit(h))
2539
0
                return 0; /* alloc error */
2540
2541
0
            *have_ack_eliciting = 1;
2542
0
            tx_helper_unrestrict(h); /* no longer need PING */
2543
0
            stream->txp_sent_stop_sending = 1;
2544
0
        }
2545
2546
0
        if (stream->want_reset_stream) {
2547
0
            OSSL_QUIC_FRAME_RESET_STREAM f;
2548
2549
0
            if (!ossl_assert(stream->send_state == QUIC_SSTREAM_STATE_RESET_SENT))
2550
0
                return 0;
2551
2552
0
            wpkt = tx_helper_begin(h);
2553
0
            if (wpkt == NULL)
2554
0
                return 0; /* alloc error */
2555
2556
0
            f.stream_id = stream->id;
2557
0
            f.app_error_code = stream->reset_stream_aec;
2558
0
            if (!ossl_quic_stream_send_get_final_size(stream, &f.final_size))
2559
0
                return 0; /* should not be possible */
2560
2561
0
            if (!ossl_quic_wire_encode_frame_reset_stream(wpkt, &f)) {
2562
0
                tx_helper_rollback(h); /* can't fit */
2563
0
                txp_enlink_tmp(tmp_head, stream);
2564
0
                break;
2565
0
            }
2566
2567
0
            if (!tx_helper_commit(h))
2568
0
                return 0; /* alloc error */
2569
2570
0
            *have_ack_eliciting = 1;
2571
0
            tx_helper_unrestrict(h); /* no longer need PING */
2572
0
            stream->txp_sent_reset_stream = 1;
2573
2574
            /*
2575
             * The final size of the stream as indicated by RESET_STREAM is used
2576
             * to ensure a consistent view of flow control state by both
2577
             * parties; if we happen to send a RESET_STREAM that consumes more
2578
             * flow control credit, make sure we account for that.
2579
             */
2580
0
            if (!ossl_assert(f.final_size <= ossl_quic_txfc_get_swm(&stream->txfc)))
2581
0
                return 0;
2582
2583
0
            stream->txp_txfc_new_credit_consumed
2584
0
                = f.final_size - ossl_quic_txfc_get_swm(&stream->txfc);
2585
0
        }
2586
2587
        /*
2588
         * Stream Flow Control Frames (MAX_STREAM_DATA)
2589
         *
2590
         * RFC 9000 s. 13.3: "An endpoint SHOULD stop sending MAX_STREAM_DATA
2591
         * frames when the receiving part of the stream enters a "Size Known" or
2592
         * "Reset Recvd" state." -- In practice, RECV is the only state
2593
         * in which it makes sense to generate more MAX_STREAM_DATA frames.
2594
         */
2595
0
        if (stream->recv_state == QUIC_RSTREAM_STATE_RECV
2596
0
            && (stream->want_max_stream_data
2597
0
                || ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 0))) {
2598
2599
0
            wpkt = tx_helper_begin(h);
2600
0
            if (wpkt == NULL)
2601
0
                return 0; /* alloc error */
2602
2603
0
            cwm = ossl_quic_rxfc_get_cwm(&stream->rxfc);
2604
2605
0
            if (!ossl_quic_wire_encode_frame_max_stream_data(wpkt, stream->id,
2606
0
                    cwm)) {
2607
0
                tx_helper_rollback(h); /* can't fit */
2608
0
                txp_enlink_tmp(tmp_head, stream);
2609
0
                break;
2610
0
            }
2611
2612
0
            if (!tx_helper_commit(h))
2613
0
                return 0; /* alloc error */
2614
2615
0
            *have_ack_eliciting = 1;
2616
0
            tx_helper_unrestrict(h); /* no longer need PING */
2617
0
            stream->txp_sent_fc = 1;
2618
0
        }
2619
2620
        /*
2621
         * Stream Data Frames (STREAM)
2622
         *
2623
         * RFC 9000 s. 3.3: A sender MUST NOT send a STREAM [...] frame for a
2624
         * stream in the "Reset Sent" state [or any terminal state]. We don't
2625
         * send any more STREAM frames if we are sending, have sent, or are
2626
         * planning to send, RESET_STREAM. The other terminal state is Data
2627
         * Recvd, but txp_generate_stream_frames() is guaranteed to generate
2628
         * nothing in this case.
2629
         */
2630
0
        if (ossl_quic_stream_has_send_buffer(stream)
2631
0
            && !ossl_quic_stream_send_is_reset(stream)) {
2632
0
            int packet_full = 0;
2633
2634
0
            if (!ossl_assert(!stream->want_reset_stream))
2635
0
                return 0;
2636
2637
0
            if (!txp_generate_stream_frames(txp, pkt,
2638
0
                    stream->id, stream->sstream,
2639
0
                    &stream->txfc,
2640
0
                    snext,
2641
0
                    have_ack_eliciting,
2642
0
                    &packet_full,
2643
0
                    &stream->txp_txfc_new_credit_consumed,
2644
0
                    conn_consumed)) {
2645
                /* Fatal error (allocation, etc.) */
2646
0
                txp_enlink_tmp(tmp_head, stream);
2647
0
                return 0;
2648
0
            }
2649
0
            conn_consumed += stream->txp_txfc_new_credit_consumed;
2650
2651
0
            if (packet_full) {
2652
0
                txp_enlink_tmp(tmp_head, stream);
2653
0
                break;
2654
0
            }
2655
0
        }
2656
2657
0
        txp_enlink_tmp(tmp_head, stream);
2658
0
    }
2659
2660
0
    return 1;
2661
0
}
2662
2663
static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp,
2664
    struct txp_pkt *pkt,
2665
    int chosen_for_conn_close)
2666
0
{
2667
0
    int rc = TXP_ERR_SUCCESS;
2668
0
    const uint32_t enc_level = pkt->h.enc_level;
2669
0
    const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
2670
0
    int have_ack_eliciting = 0, done_pre_token = 0;
2671
0
    const struct archetype_data a = pkt->geom.adata;
2672
    /*
2673
     * Cleared if we encode any non-ACK-eliciting frame type which rules out the
2674
     * packet being a non-inflight frame. This means any non-ACK ACK-eliciting
2675
     * frame, even PADDING frames. ACK eliciting frames always cause a packet to
2676
     * become ineligible for non-inflight treatment so it is not necessary to
2677
     * clear this in cases where have_ack_eliciting is set, as it is ignored in
2678
     * that case.
2679
     */
2680
0
    int can_be_non_inflight = 1;
2681
0
    QUIC_CFQ_ITEM *cfq_item;
2682
0
    QUIC_TXPIM_PKT *tpkt = NULL;
2683
0
    struct tx_helper *h = &pkt->h;
2684
2685
    /* Maximum PN reached? */
2686
0
    if (!ossl_quic_pn_valid(txp->next_pn[pn_space]))
2687
0
        goto fatal_err;
2688
2689
0
    if (!ossl_assert(pkt->tpkt == NULL))
2690
0
        goto fatal_err;
2691
2692
0
    if ((pkt->tpkt = tpkt = ossl_quic_txpim_pkt_alloc(txp->args.txpim)) == NULL)
2693
0
        goto fatal_err;
2694
2695
    /*
2696
     * Frame Serialization
2697
     * ===================
2698
     *
2699
     * We now serialize frames into the packet in descending order of priority.
2700
     */
2701
2702
    /* HANDSHAKE_DONE (Regenerate) */
2703
0
    if (a.allow_handshake_done && txp->want_handshake_done
2704
0
        && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_HANDSHAKE_DONE) {
2705
0
        WPACKET *wpkt = tx_helper_begin(h);
2706
2707
0
        if (wpkt == NULL)
2708
0
            goto fatal_err;
2709
2710
0
        if (ossl_quic_wire_encode_frame_handshake_done(wpkt)) {
2711
0
            tpkt->had_handshake_done_frame = 1;
2712
0
            have_ack_eliciting = 1;
2713
2714
0
            if (!tx_helper_commit(h))
2715
0
                goto fatal_err;
2716
2717
0
            tx_helper_unrestrict(h); /* no longer need PING */
2718
0
        } else {
2719
0
            tx_helper_rollback(h);
2720
0
        }
2721
0
    }
2722
2723
    /* MAX_DATA (Regenerate) */
2724
0
    if (a.allow_conn_fc
2725
0
        && (txp->want_max_data
2726
0
            || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0))
2727
0
        && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_DATA) {
2728
0
        WPACKET *wpkt = tx_helper_begin(h);
2729
0
        uint64_t cwm = ossl_quic_rxfc_get_cwm(txp->args.conn_rxfc);
2730
2731
0
        if (wpkt == NULL)
2732
0
            goto fatal_err;
2733
2734
0
        if (ossl_quic_wire_encode_frame_max_data(wpkt, cwm)) {
2735
0
            tpkt->had_max_data_frame = 1;
2736
0
            have_ack_eliciting = 1;
2737
2738
0
            if (!tx_helper_commit(h))
2739
0
                goto fatal_err;
2740
2741
0
            tx_helper_unrestrict(h); /* no longer need PING */
2742
0
        } else {
2743
0
            tx_helper_rollback(h);
2744
0
        }
2745
0
    }
2746
2747
    /* MAX_STREAMS_BIDI (Regenerate) */
2748
0
    if (a.allow_conn_fc
2749
0
        && (txp->want_max_streams_bidi
2750
0
            || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 0))
2751
0
        && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_BIDI) {
2752
0
        WPACKET *wpkt = tx_helper_begin(h);
2753
0
        uint64_t max_streams
2754
0
            = ossl_quic_rxfc_get_cwm(txp->args.max_streams_bidi_rxfc);
2755
2756
0
        if (wpkt == NULL)
2757
0
            goto fatal_err;
2758
2759
0
        if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/0,
2760
0
                max_streams)) {
2761
0
            tpkt->had_max_streams_bidi_frame = 1;
2762
0
            have_ack_eliciting = 1;
2763
2764
0
            if (!tx_helper_commit(h))
2765
0
                goto fatal_err;
2766
2767
0
            tx_helper_unrestrict(h); /* no longer need PING */
2768
0
        } else {
2769
0
            tx_helper_rollback(h);
2770
0
        }
2771
0
    }
2772
2773
    /* MAX_STREAMS_UNI (Regenerate) */
2774
0
    if (a.allow_conn_fc
2775
0
        && (txp->want_max_streams_uni
2776
0
            || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 0))
2777
0
        && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_UNI) {
2778
0
        WPACKET *wpkt = tx_helper_begin(h);
2779
0
        uint64_t max_streams
2780
0
            = ossl_quic_rxfc_get_cwm(txp->args.max_streams_uni_rxfc);
2781
2782
0
        if (wpkt == NULL)
2783
0
            goto fatal_err;
2784
2785
0
        if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/1,
2786
0
                max_streams)) {
2787
0
            tpkt->had_max_streams_uni_frame = 1;
2788
0
            have_ack_eliciting = 1;
2789
2790
0
            if (!tx_helper_commit(h))
2791
0
                goto fatal_err;
2792
2793
0
            tx_helper_unrestrict(h); /* no longer need PING */
2794
0
        } else {
2795
0
            tx_helper_rollback(h);
2796
0
        }
2797
0
    }
2798
2799
    /* GCR Frames */
2800
0
    for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
2801
0
        cfq_item != NULL;
2802
0
        cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
2803
0
        uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
2804
0
        const unsigned char *encoded = ossl_quic_cfq_item_get_encoded(cfq_item);
2805
0
        size_t encoded_len = ossl_quic_cfq_item_get_encoded_len(cfq_item);
2806
2807
0
        switch (frame_type) {
2808
0
        case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
2809
0
            if (!a.allow_new_conn_id)
2810
0
                continue;
2811
0
            break;
2812
0
        case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
2813
0
            if (!a.allow_retire_conn_id)
2814
0
                continue;
2815
0
            break;
2816
0
        case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
2817
0
            if (!a.allow_new_token)
2818
0
                continue;
2819
2820
            /*
2821
             * NEW_TOKEN frames are handled via GCR, but some
2822
             * Regenerate-strategy frames should come before them (namely
2823
             * ACK, CONNECTION_CLOSE, PATH_CHALLENGE and PATH_RESPONSE). If
2824
             * we find a NEW_TOKEN frame, do these now. If there are no
2825
             * NEW_TOKEN frames in the GCR queue we will handle these below.
2826
             */
2827
0
            if (!done_pre_token)
2828
0
                if (txp_generate_pre_token(txp, pkt,
2829
0
                        chosen_for_conn_close,
2830
0
                        &can_be_non_inflight))
2831
0
                    done_pre_token = 1;
2832
2833
0
            break;
2834
0
        case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
2835
0
            if (!a.allow_path_response)
2836
0
                continue;
2837
2838
            /*
2839
             * RFC 9000 s. 8.2.2: An endpoint MUST expand datagrams that
2840
             * contain a PATH_RESPONSE frame to at least the smallest
2841
             * allowed maximum datagram size of 1200 bytes.
2842
             */
2843
0
            pkt->force_pad = 1;
2844
0
            break;
2845
0
        default:
2846
0
            if (!a.allow_cfq_other)
2847
0
                continue;
2848
0
            break;
2849
0
        }
2850
2851
        /*
2852
         * If the frame is too big, don't try to schedule any more GCR frames in
2853
         * this packet rather than sending subsequent ones out of order.
2854
         */
2855
0
        if (encoded_len > tx_helper_get_space_left(h))
2856
0
            break;
2857
2858
0
        if (!tx_helper_append_iovec(h, encoded, encoded_len))
2859
0
            goto fatal_err;
2860
2861
0
        ossl_quic_txpim_pkt_add_cfq_item(tpkt, cfq_item);
2862
2863
0
        if (ossl_quic_frame_type_is_ack_eliciting(frame_type)) {
2864
0
            have_ack_eliciting = 1;
2865
0
            tx_helper_unrestrict(h); /* no longer need PING */
2866
0
        }
2867
0
    }
2868
2869
    /*
2870
     * If we didn't generate ACK, CONNECTION_CLOSE, PATH_CHALLENGE or
2871
     * PATH_RESPONSE (as desired) before, do so now.
2872
     */
2873
0
    if (!done_pre_token)
2874
0
        if (txp_generate_pre_token(txp, pkt,
2875
0
                chosen_for_conn_close,
2876
0
                &can_be_non_inflight))
2877
0
            done_pre_token = 1;
2878
2879
    /* CRYPTO Frames */
2880
0
    if (a.allow_crypto)
2881
0
        if (!txp_generate_crypto_frames(txp, pkt, &have_ack_eliciting))
2882
0
            goto fatal_err;
2883
2884
    /* Stream-specific frames */
2885
0
    if (a.allow_stream_rel && txp->handshake_complete)
2886
0
        if (!txp_generate_stream_related(txp, pkt,
2887
0
                &have_ack_eliciting,
2888
0
                &pkt->stream_head))
2889
0
            goto fatal_err;
2890
2891
    /* PING */
2892
0
    tx_helper_unrestrict(h);
2893
2894
0
    if (!have_ack_eliciting && txp_need_ping(txp, pn_space, &a)) {
2895
0
        WPACKET *wpkt;
2896
2897
0
        assert(h->reserve > 0);
2898
0
        wpkt = tx_helper_begin(h);
2899
0
        if (wpkt == NULL)
2900
0
            goto fatal_err;
2901
2902
0
        if (!ossl_quic_wire_encode_frame_ping(wpkt)
2903
0
            || !tx_helper_commit(h))
2904
            /*
2905
             * We treat a request to be ACK-eliciting as a requirement, so this
2906
             * is an error.
2907
             */
2908
0
            goto fatal_err;
2909
2910
0
        have_ack_eliciting = 1;
2911
0
    }
2912
2913
    /* PADDING is added by ossl_quic_tx_packetiser_generate(). */
2914
2915
    /*
2916
     * ACKM Data
2917
     * =========
2918
     */
2919
0
    if (have_ack_eliciting)
2920
0
        can_be_non_inflight = 0;
2921
2922
    /* ACKM Data */
2923
0
    tpkt->ackm_pkt.num_bytes = h->bytes_appended + pkt->geom.pkt_overhead;
2924
0
    tpkt->ackm_pkt.pkt_num = txp->next_pn[pn_space];
2925
    /* largest_acked is set in txp_generate_pre_token */
2926
0
    tpkt->ackm_pkt.pkt_space = pn_space;
2927
0
    tpkt->ackm_pkt.is_inflight = !can_be_non_inflight;
2928
0
    tpkt->ackm_pkt.is_ack_eliciting = have_ack_eliciting;
2929
0
    tpkt->ackm_pkt.is_pto_probe = 0;
2930
0
    tpkt->ackm_pkt.is_mtu_probe = 0;
2931
0
    tpkt->ackm_pkt.time = txp->args.now(txp->args.now_arg);
2932
0
    tpkt->pkt_type = pkt->phdr.type;
2933
2934
    /* Done. */
2935
0
    return rc;
2936
2937
0
fatal_err:
2938
    /*
2939
     * Handler for fatal errors, i.e. errors causing us to abort the entire
2940
     * packet rather than just one frame. Examples of such errors include
2941
     * allocation errors.
2942
     */
2943
0
    if (tpkt != NULL) {
2944
0
        ossl_quic_txpim_pkt_release(txp->args.txpim, tpkt);
2945
0
        pkt->tpkt = NULL;
2946
0
    }
2947
0
    return TXP_ERR_INTERNAL;
2948
0
}
2949
2950
/*
2951
 * Commits and queues a packet for transmission. There is no backing out after
2952
 * this.
2953
 *
2954
 * This:
2955
 *
2956
 *   - Sends the packet to the QTX for encryption and transmission;
2957
 *
2958
 *   - Records the packet as having been transmitted in FIFM. ACKM is informed,
2959
 *     etc. and the TXPIM record is filed.
2960
 *
2961
 *   - Informs various subsystems of frames that were sent and clears frame
2962
 *     wanted flags so that we do not generate the same frames again.
2963
 *
2964
 * Assumptions:
2965
 *
2966
 *   - pkt is a txp_pkt for the correct EL;
2967
 *
2968
 *   - pkt->tpkt is valid;
2969
 *
2970
 *   - pkt->tpkt->ackm_pkt has been fully filled in;
2971
 *
2972
 *   - Stream chunk records have been appended to pkt->tpkt for STREAM and
2973
 *     CRYPTO frames, but not for RESET_STREAM or STOP_SENDING frames;
2974
 *
2975
 *   - The chosen stream list for the packet can be fully walked from
2976
 *     pkt->stream_head using stream->txp_next;
2977
 *
2978
 *   - pkt->has_ack_eliciting is set correctly.
2979
 *
2980
 */
2981
static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp,
2982
    struct txp_pkt *pkt,
2983
    uint32_t archetype,
2984
    int *txpim_pkt_reffed)
2985
0
{
2986
0
    int rc = 1;
2987
0
    uint32_t enc_level = pkt->h.enc_level;
2988
0
    uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
2989
0
    QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
2990
0
    QUIC_STREAM *stream;
2991
0
    OSSL_QTX_PKT txpkt;
2992
0
    struct archetype_data a;
2993
2994
0
    *txpim_pkt_reffed = 0;
2995
2996
    /* Cannot send a packet with an empty payload. */
2997
0
    if (pkt->h.bytes_appended == 0)
2998
0
        return 0;
2999
3000
0
    if (!txp_get_archetype_data(enc_level, archetype, &a))
3001
0
        return 0;
3002
3003
    /* Packet Information for QTX */
3004
0
    txpkt.hdr = &pkt->phdr;
3005
0
    txpkt.iovec = txp->el[enc_level].iovec;
3006
0
    txpkt.num_iovec = pkt->h.num_iovec;
3007
0
    txpkt.local = NULL;
3008
0
    txpkt.peer = BIO_ADDR_family(&txp->args.peer) == AF_UNSPEC
3009
0
        ? NULL
3010
0
        : &txp->args.peer;
3011
0
    txpkt.pn = txp->next_pn[pn_space];
3012
0
    txpkt.flags = OSSL_QTX_PKT_FLAG_COALESCE; /* always try to coalesce */
3013
3014
    /* Generate TXPIM chunks representing STOP_SENDING and RESET_STREAM frames. */
3015
0
    for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next)
3016
0
        if (stream->txp_sent_stop_sending || stream->txp_sent_reset_stream) {
3017
            /* Log STOP_SENDING/RESET_STREAM chunk to TXPIM. */
3018
0
            QUIC_TXPIM_CHUNK chunk;
3019
3020
0
            chunk.stream_id = stream->id;
3021
0
            chunk.start = UINT64_MAX;
3022
0
            chunk.end = 0;
3023
0
            chunk.has_fin = 0;
3024
0
            chunk.has_stop_sending = stream->txp_sent_stop_sending;
3025
0
            chunk.has_reset_stream = stream->txp_sent_reset_stream;
3026
0
            if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
3027
0
                return 0; /* alloc error */
3028
0
        }
3029
3030
    /* Dispatch to FIFD. */
3031
0
    if (!ossl_quic_fifd_pkt_commit(&txp->fifd, tpkt))
3032
0
        return 0;
3033
3034
    /*
3035
     * Transmission and Post-Packet Generation Bookkeeping
3036
     * ===================================================
3037
     *
3038
     * No backing out anymore - at this point the ACKM has recorded the packet
3039
     * as having been sent, so we need to increment our next PN counter, or
3040
     * the ACKM will complain when we try to record a duplicate packet with
3041
     * the same PN later. At this point actually sending the packet may still
3042
     * fail. In this unlikely event it will simply be handled as though it
3043
     * were a lost packet.
3044
     */
3045
0
    ++txp->next_pn[pn_space];
3046
0
    *txpim_pkt_reffed = 1;
3047
3048
    /* Send the packet. */
3049
0
    if (!ossl_qtx_write_pkt(txp->args.qtx, &txpkt))
3050
0
        return 0;
3051
3052
    /*
3053
     * Record FC and stream abort frames as sent; deactivate streams which no
3054
     * longer have anything to do.
3055
     */
3056
0
    for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next) {
3057
0
        if (stream->txp_sent_fc) {
3058
0
            stream->want_max_stream_data = 0;
3059
0
            ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 1);
3060
0
        }
3061
3062
0
        if (stream->txp_sent_stop_sending)
3063
0
            stream->want_stop_sending = 0;
3064
3065
0
        if (stream->txp_sent_reset_stream)
3066
0
            stream->want_reset_stream = 0;
3067
3068
0
        if (stream->txp_txfc_new_credit_consumed > 0) {
3069
0
            if (!ossl_assert(ossl_quic_txfc_consume_credit(&stream->txfc,
3070
0
                    stream->txp_txfc_new_credit_consumed)))
3071
                /*
3072
                 * Should not be possible, but we should continue with our
3073
                 * bookkeeping as we have already committed the packet to the
3074
                 * FIFD. Just change the value we return.
3075
                 */
3076
0
                rc = 0;
3077
3078
0
            stream->txp_txfc_new_credit_consumed = 0;
3079
0
        }
3080
3081
        /*
3082
         * If we no longer need to generate any flow control (MAX_STREAM_DATA),
3083
         * STOP_SENDING or RESET_STREAM frames, nor any STREAM frames (because
3084
         * the stream is drained of data or TXFC-blocked), we can mark the
3085
         * stream as inactive.
3086
         */
3087
0
        ossl_quic_stream_map_update_state(txp->args.qsm, stream);
3088
3089
0
        if (ossl_quic_stream_has_send_buffer(stream)
3090
0
            && !ossl_quic_sstream_has_pending(stream->sstream)
3091
0
            && ossl_quic_sstream_get_final_size(stream->sstream, NULL))
3092
            /*
3093
             * Transition to DATA_SENT if stream has a final size and we have
3094
             * sent all data.
3095
             */
3096
0
            ossl_quic_stream_map_notify_all_data_sent(txp->args.qsm, stream);
3097
0
    }
3098
3099
    /* We have now sent the packet, so update state accordingly. */
3100
0
    if (tpkt->ackm_pkt.is_ack_eliciting)
3101
0
        txp->force_ack_eliciting &= ~(1UL << pn_space);
3102
3103
0
    if (tpkt->had_handshake_done_frame)
3104
0
        txp->want_handshake_done = 0;
3105
3106
0
    if (tpkt->had_max_data_frame) {
3107
0
        txp->want_max_data = 0;
3108
0
        ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 1);
3109
0
    }
3110
3111
0
    if (tpkt->had_max_streams_bidi_frame) {
3112
0
        txp->want_max_streams_bidi = 0;
3113
0
        ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 1);
3114
0
    }
3115
3116
0
    if (tpkt->had_max_streams_uni_frame) {
3117
0
        txp->want_max_streams_uni = 0;
3118
0
        ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 1);
3119
0
    }
3120
3121
0
    if (tpkt->had_ack_frame)
3122
0
        txp->want_ack &= ~(1UL << pn_space);
3123
3124
0
    if (tpkt->had_conn_close)
3125
0
        txp->want_conn_close = 0;
3126
3127
    /*
3128
     * Decrement probe request counts if we have sent a packet that meets
3129
     * the requirement of a probe, namely being ACK-eliciting.
3130
     */
3131
0
    if (tpkt->ackm_pkt.is_ack_eliciting) {
3132
0
        OSSL_ACKM_PROBE_INFO *probe_info
3133
0
            = ossl_ackm_get0_probe_request(txp->args.ackm);
3134
3135
0
        if (enc_level == QUIC_ENC_LEVEL_INITIAL
3136
0
            && probe_info->anti_deadlock_initial > 0)
3137
0
            --probe_info->anti_deadlock_initial;
3138
3139
0
        if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE
3140
0
            && probe_info->anti_deadlock_handshake > 0)
3141
0
            --probe_info->anti_deadlock_handshake;
3142
3143
0
        if (a.allow_force_ack_eliciting /* (i.e., not for 0-RTT) */
3144
0
            && probe_info->pto[pn_space] > 0)
3145
0
            --probe_info->pto[pn_space];
3146
0
    }
3147
3148
0
    return rc;
3149
0
}
3150
3151
/* Ensure the iovec array is at least num elements long. */
3152
static int txp_el_ensure_iovec(struct txp_el *el, size_t num)
3153
0
{
3154
0
    OSSL_QTX_IOVEC *iovec;
3155
3156
0
    if (el->alloc_iovec >= num)
3157
0
        return 1;
3158
3159
0
    num = el->alloc_iovec != 0 ? el->alloc_iovec * 2 : 8;
3160
3161
0
    iovec = OPENSSL_realloc_array(el->iovec, num, sizeof(OSSL_QTX_IOVEC));
3162
0
    if (iovec == NULL)
3163
0
        return 0;
3164
3165
0
    el->iovec = iovec;
3166
0
    el->alloc_iovec = num;
3167
0
    return 1;
3168
0
}
3169
3170
int ossl_quic_tx_packetiser_schedule_conn_close(OSSL_QUIC_TX_PACKETISER *txp,
3171
    const OSSL_QUIC_FRAME_CONN_CLOSE *f)
3172
0
{
3173
0
    char *reason = NULL;
3174
0
    size_t reason_len = f->reason_len;
3175
0
    size_t max_reason_len = txp_get_mdpl(txp) / 2;
3176
3177
0
    if (txp->want_conn_close)
3178
0
        return 0;
3179
3180
    /*
3181
     * Arbitrarily limit the length of the reason length string to half of the
3182
     * MDPL.
3183
     */
3184
0
    if (reason_len > max_reason_len)
3185
0
        reason_len = max_reason_len;
3186
3187
0
    if (reason_len > 0) {
3188
0
        reason = OPENSSL_memdup(f->reason, reason_len);
3189
0
        if (reason == NULL)
3190
0
            return 0;
3191
0
    }
3192
3193
0
    txp->conn_close_frame = *f;
3194
0
    txp->conn_close_frame.reason = reason;
3195
0
    txp->conn_close_frame.reason_len = reason_len;
3196
0
    txp->want_conn_close = 1;
3197
0
    return 1;
3198
0
}
3199
3200
void ossl_quic_tx_packetiser_set_msg_callback(OSSL_QUIC_TX_PACKETISER *txp,
3201
    ossl_msg_cb msg_callback,
3202
    SSL *msg_callback_ssl)
3203
0
{
3204
0
    txp->msg_callback = msg_callback;
3205
0
    txp->msg_callback_ssl = msg_callback_ssl;
3206
0
}
3207
3208
void ossl_quic_tx_packetiser_set_msg_callback_arg(OSSL_QUIC_TX_PACKETISER *txp,
3209
    void *msg_callback_arg)
3210
0
{
3211
0
    txp->msg_callback_arg = msg_callback_arg;
3212
0
}
3213
3214
QUIC_PN ossl_quic_tx_packetiser_get_next_pn(OSSL_QUIC_TX_PACKETISER *txp,
3215
    uint32_t pn_space)
3216
0
{
3217
0
    if (pn_space >= QUIC_PN_SPACE_NUM)
3218
0
        return UINT64_MAX;
3219
3220
0
    return txp->next_pn[pn_space];
3221
0
}
3222
3223
OSSL_TIME ossl_quic_tx_packetiser_get_deadline(OSSL_QUIC_TX_PACKETISER *txp)
3224
0
{
3225
    /*
3226
     * TXP-specific deadline computations which rely on TXP innards. This is in
3227
     * turn relied on by the QUIC_CHANNEL code to determine the channel event
3228
     * handling deadline.
3229
     */
3230
0
    OSSL_TIME deadline = ossl_time_infinite();
3231
0
    uint32_t enc_level, pn_space;
3232
3233
    /*
3234
     * ACK generation is not CC-gated - packets containing only ACKs are allowed
3235
     * to bypass CC. We want to generate ACK frames even if we are currently
3236
     * restricted by CC so the peer knows we have received data. The generate
3237
     * call will take care of selecting the correct packet archetype.
3238
     */
3239
0
    for (enc_level = QUIC_ENC_LEVEL_INITIAL;
3240
0
        enc_level < QUIC_ENC_LEVEL_NUM;
3241
0
        ++enc_level)
3242
0
        if (ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level)) {
3243
0
            pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
3244
0
            deadline = ossl_time_min(deadline,
3245
0
                ossl_ackm_get_ack_deadline(txp->args.ackm, pn_space));
3246
0
        }
3247
3248
    /* When will CC let us send more? */
3249
0
    if (txp->args.cc_method->get_tx_allowance(txp->args.cc_data) == 0)
3250
0
        deadline = ossl_time_min(deadline,
3251
0
            txp->args.cc_method->get_wakeup_deadline(txp->args.cc_data));
3252
3253
0
    return deadline;
3254
0
}