Coverage Report

Created: 2025-11-29 06:45

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/h2o/lib/http3/server.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2018 Fastly, Kazuho Oku
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include <sys/socket.h>
23
#include "khash.h"
24
#include "h2o/absprio.h"
25
#include "h2o/http3_common.h"
26
#include "h2o/http3_server.h"
27
#include "h2o/http3_internal.h"
28
#include "./../probes_.h"
29
30
/**
31
 * the scheduler
32
 */
33
struct st_h2o_http3_req_scheduler_t {
34
    struct {
35
        struct {
36
            h2o_linklist_t high;
37
            h2o_linklist_t low;
38
        } urgencies[H2O_ABSPRIO_NUM_URGENCY_LEVELS];
39
        size_t smallest_urgency;
40
    } active;
41
    h2o_linklist_t conn_blocked;
42
};
43
44
/**
45
 *
46
 */
47
struct st_h2o_http3_req_scheduler_node_t {
48
    h2o_linklist_t link;
49
    h2o_absprio_t priority;
50
    uint64_t call_cnt;
51
};
52
53
/**
54
 * callback used to compare precedence of the entries within the same urgency level (e.g., by comparing stream IDs)
55
 */
56
typedef int (*h2o_http3_req_scheduler_compare_cb)(struct st_h2o_http3_req_scheduler_t *sched,
57
                                                  const struct st_h2o_http3_req_scheduler_node_t *x,
58
                                                  const struct st_h2o_http3_req_scheduler_node_t *y);
59
60
/**
61
 * Once the size of the request body being received exceeds thit limit, streaming mode will be used (if possible), and the
62
 * concurrency of such requests would be limited to one per connection. This is set to 1 to avoid blocking requests that send
63
 * small payloads without a FIN as well as to have parity with http2.
64
 */
65
0
#define H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK 1
66
67
enum h2o_http3_server_stream_state {
68
    /**
69
     * receiving headers
70
     */
71
    H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS,
72
    /**
73
     * receiving request body (runs concurrently)
74
     */
75
    H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK,
76
    /**
77
     * blocked, waiting to be unblocked one by one (either in streaming mode or in non-streaming mode)
78
     */
79
    H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED,
80
    /**
81
     * in non-streaming mode, receiving body
82
     */
83
    H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED,
84
    /**
85
     * in non-streaming mode, waiting for the request to be processed
86
     */
87
    H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING,
88
    /**
89
     * request has been processed, waiting for the response headers
90
     */
91
    H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS,
92
    /**
93
     * sending body (the generator MAY have closed, but the transmission to the client is still ongoing)
94
     */
95
    H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY,
96
    /**
97
     * all data has been sent and ACKed, waiting for the transport stream to close (req might be disposed when entering this state)
98
     */
99
    H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT
100
};
101
102
struct st_h2o_http3_server_stream_t;
103
KHASH_MAP_INIT_INT64(stream, struct st_h2o_http3_server_stream_t *)
104
105
struct st_h2o_http3_server_conn_t {
106
    h2o_conn_t super;
107
    h2o_http3_conn_t h3;
108
    ptls_handshake_properties_t handshake_properties;
109
    /**
110
     * link-list of pending requests using st_h2o_http3_server_stream_t::link
111
     */
112
    struct {
113
        /**
114
         * holds streams in RECV_BODY_BLOCKED state. They are promoted one by one to the POST_BLOCK State.
115
         */
116
        h2o_linklist_t recv_body_blocked;
117
        /**
118
         * holds streams that are in request streaming mode.
119
         */
120
        h2o_linklist_t req_streaming;
121
        /**
122
         * holds streams in REQ_PENDING state or RECV_BODY_POST_BLOCK state (that is using streaming; i.e., write_req.cb != NULL).
123
         */
124
        h2o_linklist_t pending;
125
    } delayed_streams;
126
    /**
127
     * responses blocked by SETTINGS frame yet to arrive (e.g., CONNECT-UDP requests waiting for SETTINGS to see if
128
     * datagram-flow-id can be sent). There is no separate state for streams linked here, because these streams are techincally
129
     * indifferent from those that are currently queued by the filters after `h2o_send` is called.
130
     */
131
    h2o_linklist_t streams_resp_settings_blocked;
132
    /**
133
     * next application-level timeout
134
     */
135
    h2o_timer_t timeout;
136
    /**
137
     * counter (the order MUST match that of h2o_http3_server_stream_state; it is accessed by index via the use of counters[])
138
     */
139
    union {
140
        struct {
141
            uint32_t recv_headers;
142
            uint32_t recv_body_before_block;
143
            uint32_t recv_body_blocked;
144
            uint32_t recv_body_unblocked;
145
            uint32_t req_pending;
146
            uint32_t send_headers;
147
            uint32_t send_body;
148
            uint32_t close_wait;
149
        };
150
        uint32_t counters[1];
151
    } num_streams;
152
    /**
153
     * Number of streams that is request streaming. The state can be in either one of SEND_HEADERS, SEND_BODY, CLOSE_WAIT.
154
     */
155
    uint32_t num_streams_req_streaming;
156
    /**
157
     * number of streams in tunneling mode
158
     */
159
    uint32_t num_streams_tunnelling;
160
    /**
161
     * scheduler
162
     */
163
    struct {
164
        /**
165
         * States for request streams.
166
         */
167
        struct st_h2o_http3_req_scheduler_t reqs;
168
        /**
169
         * States for unidirectional streams. Each element is a bit vector where slot for each stream is defined as: 1 << stream_id.
170
         */
171
        struct {
172
            uint16_t active;
173
            uint16_t conn_blocked;
174
        } uni;
175
    } scheduler;
176
    /**
177
     * stream map used for datagram flows
178
     * TODO: Get rid of this structure once we drop support for masque draft-03; RFC 9297 uses quater stream ID instead of
179
     * dynamically mapping streams with flow IDs.
180
     */
181
    khash_t(stream) * datagram_flows;
182
    /**
183
     * the earliest moment (in terms of max_data.sent) when the next resumption token can be sent
184
     */
185
    uint64_t skip_jumpstart_token_until;
186
    /**
187
     * timeout entry used for graceful shutdown
188
     */
189
    h2o_timer_t _graceful_shutdown_timeout;
190
};
191
192
/**
193
 * sendvec, with additional field that contains the starting offset of the content
194
 */
195
struct st_h2o_http3_server_sendvec_t {
196
    h2o_sendvec_t vec;
197
    /**
198
     * Starting offset of the content carried by the vector, or UINT64_MAX if it is not carrying body
199
     */
200
    uint64_t entity_offset;
201
};
202
203
struct st_h2o_http3_server_stream_t {
204
    quicly_stream_t *quic;
205
    struct {
206
        h2o_buffer_t *buf;
207
        quicly_error_t (*handle_input)(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end,
208
                                       int in_generator, const char **err_desc);
209
        uint64_t bytes_left_in_data_frame;
210
    } recvbuf;
211
    struct {
212
        H2O_VECTOR(struct st_h2o_http3_server_sendvec_t) vecs;
213
        size_t off_within_first_vec;
214
        size_t min_index_to_addref;
215
        uint64_t final_size, final_body_size;
216
        uint8_t data_frame_header_buf[9];
217
    } sendbuf;
218
    enum h2o_http3_server_stream_state state;
219
    h2o_linklist_t link;
220
    h2o_linklist_t link_resp_settings_blocked;
221
    h2o_ostream_t ostr_final;
222
    struct st_h2o_http3_req_scheduler_node_t scheduler;
223
    /**
224
     * if read is blocked
225
     */
226
    uint8_t read_blocked : 1;
227
    /**
228
     * if h2o_proceed_response has been invoked, or if the invocation has been requested
229
     */
230
    uint8_t proceed_requested : 1;
231
    /**
232
     * this flag is set by on_send_emit, triggers the invocation h2o_proceed_response in scheduler_do_send, used by do_send to
233
     * take different actions based on if it has been called while scheduler_do_send is running.
234
     */
235
    uint8_t proceed_while_sending : 1;
236
    /**
237
     * if a PRIORITY_UPDATE frame has been received
238
     */
239
    uint8_t received_priority_update : 1;
240
    /**
241
     * used in CLOSE_WAIT state to determine if h2o_dispose_request has been called
242
     */
243
    uint8_t req_disposed : 1;
244
    /**
245
     * indicates if the request is in streaming mode
246
     */
247
    uint8_t req_streaming : 1;
248
    /**
249
     * buffer to hold the request body (or a chunk of, if in streaming mode), or CONNECT payload
250
     */
251
    h2o_buffer_t *req_body;
252
    /**
253
     * flow ID used by masque over H3_DATAGRAMS
254
     */
255
    uint64_t datagram_flow_id;
256
    /**
257
     * the request. Placed at the end, as it holds the pool.
258
     */
259
    h2o_req_t req;
260
};
261
262
static int foreach_request(h2o_conn_t *_conn, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata);
263
static void initiate_graceful_shutdown(h2o_conn_t *_conn);
264
static void close_idle_connection(h2o_conn_t *_conn);
265
static void on_stream_destroy(quicly_stream_t *qs, quicly_error_t err);
266
static quicly_error_t handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src,
267
                                                 const uint8_t *src_end, int in_generator, const char **err_desc);
268
static quicly_error_t handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src,
269
                                               const uint8_t *src_end, int in_generator, const char **err_desc);
270
271
static const h2o_sendvec_callbacks_t self_allocated_vec_callbacks = {h2o_sendvec_read_raw, NULL},
272
                                     immutable_vec_callbacks = {h2o_sendvec_read_raw, NULL};
273
274
static int sendvec_size_is_for_recycle(size_t size)
275
1.71k
{
276
1.71k
    if (h2o_socket_ssl_buffer_allocator.conf->memsize / 2 <= size && size <= h2o_socket_ssl_buffer_allocator.conf->memsize)
277
0
        return 1;
278
1.71k
    return 0;
279
1.71k
}
280
281
static void dispose_sendvec(struct st_h2o_http3_server_sendvec_t *vec)
282
4.42k
{
283
4.42k
    if (vec->vec.callbacks == &self_allocated_vec_callbacks) {
284
858
        if (sendvec_size_is_for_recycle(vec->vec.len)) {
285
0
            h2o_mem_free_recycle(&h2o_socket_ssl_buffer_allocator, vec->vec.raw);
286
858
        } else {
287
858
            free(vec->vec.raw);
288
858
        }
289
858
    }
290
4.42k
}
291
292
static void req_scheduler_init(struct st_h2o_http3_req_scheduler_t *sched)
293
6.29k
{
294
6.29k
    size_t i;
295
296
56.6k
    for (i = 0; i < H2O_ABSPRIO_NUM_URGENCY_LEVELS; ++i) {
297
50.3k
        h2o_linklist_init_anchor(&sched->active.urgencies[i].high);
298
50.3k
        h2o_linklist_init_anchor(&sched->active.urgencies[i].low);
299
50.3k
    }
300
6.29k
    sched->active.smallest_urgency = i;
301
6.29k
    h2o_linklist_init_anchor(&sched->conn_blocked);
302
6.29k
}
303
304
static void req_scheduler_activate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node,
305
                                   h2o_http3_req_scheduler_compare_cb comp)
306
1.57k
{
307
    /* unlink if necessary */
308
1.57k
    if (h2o_linklist_is_linked(&node->link))
309
6
        h2o_linklist_unlink(&node->link);
310
311
1.57k
    if (!node->priority.incremental || node->call_cnt == 0) {
312
        /* non-incremental streams and the first emission of incremental streams go in strict order */
313
1.56k
        h2o_linklist_t *anchor = &sched->active.urgencies[node->priority.urgency].high, *pos;
314
1.56k
        for (pos = anchor->prev; pos != anchor; pos = pos->prev) {
315
0
            struct st_h2o_http3_req_scheduler_node_t *node_at_pos =
316
0
                H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, pos);
317
0
            if (comp(sched, node_at_pos, node) < 0)
318
0
                break;
319
0
        }
320
1.56k
        h2o_linklist_insert(pos->next, &node->link);
321
1.56k
    } else {
322
        /* once sent, incremental streams go into a lower list */
323
6
        h2o_linklist_insert(&sched->active.urgencies[node->priority.urgency].low, &node->link);
324
6
    }
325
326
    /* book keeping */
327
1.57k
    if (node->priority.urgency < sched->active.smallest_urgency)
328
1.56k
        sched->active.smallest_urgency = node->priority.urgency;
329
1.57k
}
330
331
static void req_scheduler_update_smallest_urgency_post_removal(struct st_h2o_http3_req_scheduler_t *sched, size_t changed)
332
8.54k
{
333
8.54k
    if (sched->active.smallest_urgency < changed)
334
0
        return;
335
336
    /* search from the location that *might* have changed */
337
8.54k
    sched->active.smallest_urgency = changed;
338
42.5k
    while (h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].high) &&
339
42.5k
           h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].low)) {
340
42.5k
        ++sched->active.smallest_urgency;
341
42.5k
        if (sched->active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS)
342
8.54k
            break;
343
42.5k
    }
344
8.54k
}
345
346
static void req_scheduler_deactivate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node)
347
8.54k
{
348
8.54k
    if (h2o_linklist_is_linked(&node->link))
349
1.56k
        h2o_linklist_unlink(&node->link);
350
351
8.54k
    req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency);
352
8.54k
}
353
354
static void req_scheduler_setup_for_next(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node,
355
                                         h2o_http3_req_scheduler_compare_cb comp)
356
429
{
357
429
    assert(h2o_linklist_is_linked(&node->link));
358
359
    /* reschedule to achieve round-robin behavior */
360
429
    if (node->priority.incremental)
361
6
        req_scheduler_activate(sched, node, comp);
362
429
}
363
364
static void req_scheduler_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node)
365
0
{
366
0
    if (h2o_linklist_is_linked(&node->link))
367
0
        h2o_linklist_unlink(&node->link);
368
369
0
    h2o_linklist_insert(&sched->conn_blocked, &node->link);
370
371
0
    req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency);
372
0
}
373
374
static void req_scheduler_unblock_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, h2o_http3_req_scheduler_compare_cb comp)
375
0
{
376
0
    while (!h2o_linklist_is_empty(&sched->conn_blocked)) {
377
0
        struct st_h2o_http3_req_scheduler_node_t *node =
378
0
            H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, sched->conn_blocked.next);
379
0
        req_scheduler_activate(sched, node, comp);
380
0
    }
381
0
}
382
383
static int req_scheduler_compare_stream_id(struct st_h2o_http3_req_scheduler_t *sched,
384
                                           const struct st_h2o_http3_req_scheduler_node_t *x,
385
                                           const struct st_h2o_http3_req_scheduler_node_t *y)
386
0
{
387
0
    struct st_h2o_http3_server_stream_t *sx = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, x),
388
0
                                        *sy = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, y);
389
0
    if (sx->quic->stream_id < sy->quic->stream_id) {
390
0
        return -1;
391
0
    } else if (sx->quic->stream_id > sy->quic->stream_id) {
392
0
        return 1;
393
0
    } else {
394
0
        return 0;
395
0
    }
396
0
}
397
398
static struct st_h2o_http3_server_conn_t *get_conn(struct st_h2o_http3_server_stream_t *stream)
399
84.3k
{
400
84.3k
    return (void *)stream->req.conn;
401
84.3k
}
402
403
static uint32_t *get_state_counter(struct st_h2o_http3_server_conn_t *conn, enum h2o_http3_server_stream_state state)
404
26.1k
{
405
26.1k
    return conn->num_streams.counters + (size_t)state;
406
26.1k
}
407
408
static void handle_priority_change(struct st_h2o_http3_server_stream_t *stream, const char *value, size_t len, h2o_absprio_t base)
409
0
{
410
0
    int reactivate = 0;
411
412
0
    if (h2o_linklist_is_linked(&stream->scheduler.link)) {
413
0
        req_scheduler_deactivate(&get_conn(stream)->scheduler.reqs, &stream->scheduler);
414
0
        reactivate = 1;
415
0
    }
416
417
    /* update priority, using provided value as the base */
418
0
    stream->scheduler.priority = base;
419
0
    h2o_absprio_parse_priority(value, len, &stream->scheduler.priority);
420
421
0
    if (reactivate)
422
0
        req_scheduler_activate(&get_conn(stream)->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id);
423
0
}
424
425
static void tunnel_on_udp_read(h2o_req_t *_req, h2o_iovec_t *datagrams, size_t num_datagrams)
426
0
{
427
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req);
428
0
    h2o_http3_send_h3_datagrams(&get_conn(stream)->h3, stream->datagram_flow_id, datagrams, num_datagrams);
429
0
}
430
431
static void request_run_delayed(struct st_h2o_http3_server_conn_t *conn)
432
776
{
433
776
    if (!h2o_timer_is_linked(&conn->timeout))
434
776
        h2o_timer_link(conn->super.ctx->loop, 0, &conn->timeout);
435
776
}
436
437
static void check_run_blocked(struct st_h2o_http3_server_conn_t *conn)
438
0
{
439
0
    if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming <
440
0
            conn->super.ctx->globalconf->http3.max_concurrent_streaming_requests_per_connection &&
441
0
        !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked))
442
0
        request_run_delayed(conn);
443
0
}
444
445
static void pre_dispose_request(struct st_h2o_http3_server_stream_t *stream)
446
6.29k
{
447
6.29k
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
448
6.29k
    size_t i;
449
450
    /* release vectors */
451
8.57k
    for (i = 0; i != stream->sendbuf.vecs.size; ++i)
452
2.28k
        dispose_sendvec(stream->sendbuf.vecs.entries + i);
453
454
    /* dispose request body buffer */
455
6.29k
    if (stream->req_body != NULL)
456
456
        h2o_buffer_dispose(&stream->req_body);
457
458
    /* clean up request streaming */
459
6.29k
    if (stream->req_streaming && !stream->req.is_tunnel_req) {
460
0
        assert(conn->num_streams_req_streaming != 0);
461
0
        stream->req_streaming = 0;
462
0
        --conn->num_streams_req_streaming;
463
0
        check_run_blocked(conn);
464
0
    }
465
466
    /* remove stream from datagram flow list */
467
6.29k
    if (stream->datagram_flow_id != UINT64_MAX) {
468
671
        khiter_t iter = kh_get(stream, conn->datagram_flows, stream->datagram_flow_id);
469
        /* it's possible the tunnel wasn't established yet */
470
671
        if (iter != kh_end(conn->datagram_flows))
471
0
            kh_del(stream, conn->datagram_flows, iter);
472
671
    }
473
474
6.29k
    if (stream->req.is_tunnel_req)
475
224
        --get_conn(stream)->num_streams_tunnelling;
476
6.29k
}
477
478
static void set_state(struct st_h2o_http3_server_stream_t *stream, enum h2o_http3_server_stream_state state, int in_generator)
479
6.76k
{
480
6.76k
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
481
6.76k
    enum h2o_http3_server_stream_state old_state = stream->state;
482
483
6.76k
    H2O_PROBE_CONN(H3S_STREAM_SET_STATE, &conn->super, stream->quic->stream_id, (unsigned)state);
484
485
6.76k
    --*get_state_counter(conn, old_state);
486
6.76k
    stream->state = state;
487
6.76k
    ++*get_state_counter(conn, stream->state);
488
489
6.76k
    switch (state) {
490
0
    case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED:
491
0
        assert(conn->delayed_streams.recv_body_blocked.prev == &stream->link || !"stream is not registered to the recv_body list?");
492
0
        break;
493
1.81k
    case H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT: {
494
1.81k
        if (h2o_linklist_is_linked(&stream->link))
495
0
            h2o_linklist_unlink(&stream->link);
496
1.81k
        pre_dispose_request(stream);
497
1.81k
        if (!in_generator) {
498
1.81k
            h2o_dispose_request(&stream->req);
499
1.81k
            stream->req_disposed = 1;
500
1.81k
        }
501
1.81k
        static const quicly_stream_callbacks_t close_wait_callbacks = {on_stream_destroy,
502
1.81k
                                                                       quicly_stream_noop_on_send_shift,
503
1.81k
                                                                       quicly_stream_noop_on_send_emit,
504
1.81k
                                                                       quicly_stream_noop_on_send_stop,
505
1.81k
                                                                       quicly_stream_noop_on_receive,
506
1.81k
                                                                       quicly_stream_noop_on_receive_reset};
507
1.81k
        stream->quic->callbacks = &close_wait_callbacks;
508
1.81k
    } break;
509
4.94k
    default:
510
4.94k
        break;
511
6.76k
    }
512
6.76k
}
513
514
/**
515
 * Shutdowns a stream. Note that a request stream should not be shut down until receiving some QUIC frame that refers to that
516
 * stream, but we might might have created stream state due to receiving a PRIORITY_UPDATE frame prior to that (see
517
 * handle_priority_update_frame).
518
 */
519
static void shutdown_stream(struct st_h2o_http3_server_stream_t *stream, quicly_error_t stop_sending_code,
520
                            quicly_error_t reset_code, int in_generator, int reset_only_if_open)
521
1.01k
{
522
1.01k
    assert(stream->state < H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT);
523
1.01k
    if (quicly_stream_has_receive_side(0, stream->quic->stream_id)) {
524
        /* send STOP_SENDING unless RESET_STREAM was received; we send STOP_SENDING even if all data up to EOS have been received,
525
         * as it is allowed and might be beneficial in case ACKs are lost */
526
1.01k
        if (!(quicly_recvstate_transfer_complete(&stream->quic->recvstate) && stream->quic->recvstate.eos == UINT64_MAX))
527
1.01k
            quicly_request_stop(stream->quic, stop_sending_code);
528
1.01k
        if (h2o_linklist_is_linked(&stream->link))
529
0
            h2o_linklist_unlink(&stream->link);
530
1.01k
    }
531
1.01k
    if (reset_only_if_open && quicly_stream_has_send_side(0, stream->quic->stream_id) &&
532
0
        !quicly_sendstate_is_open(&stream->quic->sendstate)) {
533
0
        if (stream->state < H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY)
534
0
            set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, in_generator);
535
1.01k
    } else {
536
1.01k
        if (quicly_stream_has_send_side(0, stream->quic->stream_id) &&
537
1.01k
            !quicly_sendstate_transfer_complete(&stream->quic->sendstate))
538
1.01k
            quicly_reset_stream(stream->quic, reset_code);
539
1.01k
        set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, in_generator);
540
1.01k
    }
541
1.01k
}
542
543
static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa)
544
0
{
545
0
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
546
0
    struct sockaddr *src = quicly_get_sockname(conn->h3.super.quic);
547
0
    socklen_t len = src->sa_family == AF_UNSPEC ? sizeof(struct sockaddr) : quicly_get_socklen(src);
548
0
    memcpy(sa, src, len);
549
0
    return len;
550
0
}
551
552
static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa)
553
490
{
554
490
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
555
490
    struct sockaddr *src = quicly_get_peername(conn->h3.super.quic);
556
490
    socklen_t len = quicly_get_socklen(src);
557
490
    memcpy(sa, src, len);
558
490
    return len;
559
490
}
560
561
static ptls_t *get_ptls(h2o_conn_t *_conn)
562
490
{
563
490
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
564
490
    return quicly_get_tls(conn->h3.super.quic);
565
490
}
566
567
static const char *get_ssl_server_name(h2o_conn_t *conn)
568
0
{
569
0
    ptls_t *ptls = get_ptls(conn);
570
0
    return ptls_get_server_name(ptls);
571
0
}
572
573
static ptls_log_conn_state_t *log_state(h2o_conn_t *conn)
574
0
{
575
0
    ptls_t *ptls = get_ptls(conn);
576
0
    return ptls_get_log_state(ptls);
577
0
}
578
579
static uint64_t get_req_id(h2o_req_t *req)
580
0
{
581
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, req);
582
0
    return stream->quic->stream_id;
583
0
}
584
585
static uint32_t num_reqs_inflight(h2o_conn_t *_conn)
586
0
{
587
0
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
588
0
    return quicly_num_streams_by_group(conn->h3.super.quic, 0, 0);
589
0
}
590
591
static quicly_tracer_t *get_tracer(h2o_conn_t *_conn)
592
0
{
593
0
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
594
0
    return quicly_get_tracer(conn->h3.super.quic);
595
0
}
596
597
static h2o_iovec_t log_extensible_priorities(h2o_req_t *_req)
598
0
{
599
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req);
600
0
    char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof("u=" H2O_UINT8_LONGEST_STR ",i=?1"));
601
0
    int len =
602
0
        sprintf(buf, "u=%" PRIu8 "%s", stream->scheduler.priority.urgency, stream->scheduler.priority.incremental ? ",i=?1" : "");
603
0
    return h2o_iovec_init(buf, len);
604
0
}
605
606
static h2o_iovec_t log_cc_name(h2o_req_t *req)
607
0
{
608
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
609
0
    quicly_stats_t stats;
610
611
0
    if (quicly_get_stats(conn->h3.super.quic, &stats) == 0)
612
0
        return h2o_iovec_init(stats.cc.type->name, strlen(stats.cc.type->name));
613
0
    return h2o_iovec_init(NULL, 0);
614
0
}
615
616
static h2o_iovec_t log_delivery_rate(h2o_req_t *req)
617
0
{
618
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
619
0
    quicly_rate_t rate;
620
621
0
    if (quicly_get_delivery_rate(conn->h3.super.quic, &rate) == 0 && rate.latest != 0) {
622
0
        char *buf = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT64_LONGEST_STR));
623
0
        size_t len = sprintf(buf, "%" PRIu64, rate.latest);
624
0
        return h2o_iovec_init(buf, len);
625
0
    }
626
627
0
    return h2o_iovec_init(NULL, 0);
628
0
}
629
630
static h2o_iovec_t log_tls_protocol_version(h2o_req_t *_req)
631
0
{
632
0
    return h2o_iovec_init(H2O_STRLIT("TLSv1.3"));
633
0
}
634
635
static h2o_iovec_t log_session_reused(h2o_req_t *req)
636
0
{
637
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
638
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
639
0
    return ptls_is_psk_handshake(tls) ? h2o_iovec_init(H2O_STRLIT("1")) : h2o_iovec_init(H2O_STRLIT("0"));
640
0
}
641
642
static h2o_iovec_t log_cipher(h2o_req_t *req)
643
0
{
644
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
645
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
646
0
    ptls_cipher_suite_t *cipher = ptls_get_cipher(tls);
647
0
    return cipher != NULL ? h2o_iovec_init(cipher->name, strlen(cipher->name)) : h2o_iovec_init(NULL, 0);
648
0
}
649
650
static h2o_iovec_t log_cipher_bits(h2o_req_t *req)
651
0
{
652
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
653
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
654
0
    ptls_cipher_suite_t *cipher = ptls_get_cipher(tls);
655
0
    if (cipher == NULL)
656
0
        return h2o_iovec_init(NULL, 0);
657
658
0
    char *buf = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT16_LONGEST_STR));
659
0
    return h2o_iovec_init(buf, sprintf(buf, "%" PRIu16, (uint16_t)(cipher->aead->key_size * 8)));
660
0
}
661
662
static h2o_iovec_t log_session_id(h2o_req_t *_req)
663
0
{
664
    /* FIXME */
665
0
    return h2o_iovec_init(NULL, 0);
666
0
}
667
668
static h2o_iovec_t log_negotiated_protocol(h2o_req_t *req)
669
0
{
670
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
671
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
672
0
    const char *proto = ptls_get_negotiated_protocol(tls);
673
0
    return proto != NULL ? h2o_iovec_init(proto, strlen(proto)) : h2o_iovec_init(NULL, 0);
674
0
}
675
676
static h2o_iovec_t log_ech_config_id(h2o_req_t *req)
677
0
{
678
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
679
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
680
0
    uint8_t config_id;
681
682
0
    if (ptls_is_ech_handshake(tls, &config_id, NULL, NULL)) {
683
0
        char *s = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT8_LONGEST_STR));
684
0
        size_t len = sprintf(s, "%" PRIu8, config_id);
685
0
        return h2o_iovec_init(s, len);
686
0
    } else {
687
0
        return h2o_iovec_init(NULL, 0);
688
0
    }
689
0
}
690
691
static h2o_iovec_t log_ech_kem(h2o_req_t *req)
692
0
{
693
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
694
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
695
0
    ptls_hpke_kem_t *kem;
696
697
0
    if (ptls_is_ech_handshake(tls, NULL, &kem, NULL)) {
698
0
        return h2o_iovec_init(kem->keyex->name, strlen(kem->keyex->name));
699
0
    } else {
700
0
        return h2o_iovec_init(NULL, 0);
701
0
    }
702
0
}
703
704
static h2o_iovec_t log_ech_cipher(h2o_req_t *req)
705
0
{
706
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
707
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
708
0
    ptls_hpke_cipher_suite_t *cipher;
709
710
0
    if (ptls_is_ech_handshake(tls, NULL, NULL, &cipher)) {
711
0
        return h2o_iovec_init(cipher->name, strlen(cipher->name));
712
0
    } else {
713
0
        return h2o_iovec_init(NULL, 0);
714
0
    }
715
0
}
716
717
static h2o_iovec_t log_ech_cipher_bits(h2o_req_t *req)
718
0
{
719
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
720
0
    ptls_t *tls = quicly_get_tls(conn->h3.super.quic);
721
0
    ptls_hpke_cipher_suite_t *cipher;
722
723
0
    if (ptls_is_ech_handshake(tls, NULL, NULL, &cipher)) {
724
0
        uint16_t bits = (uint16_t)(cipher->aead->key_size * 8);
725
0
        char *s = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT16_LONGEST_STR));
726
0
        size_t len = sprintf(s, "%" PRIu16, bits);
727
0
        return h2o_iovec_init(s, len);
728
0
    } else {
729
0
        return h2o_iovec_init(NULL, 0);
730
0
    }
731
0
}
732
733
static h2o_iovec_t log_stream_id(h2o_req_t *_req)
734
0
{
735
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req);
736
0
    char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT64_LONGEST_STR));
737
0
    return h2o_iovec_init(buf, sprintf(buf, "%" PRIu64, stream->quic->stream_id));
738
0
}
739
740
static h2o_iovec_t log_quic_stats(h2o_req_t *req)
741
0
{
742
0
#define PUSH_FIELD(field, name)                                                                                                    \
743
0
    do {                                                                                                                           \
744
0
        len += snprintf(buf + len, bufsize - len, name "=%" PRIu64 ",", (uint64_t)stats.field);                                    \
745
0
        if (len + 1 > bufsize) {                                                                                                   \
746
0
            bufsize = bufsize * 3 / 2;                                                                                             \
747
0
            goto Redo;                                                                                                             \
748
0
        }                                                                                                                          \
749
0
    } while (0);
750
751
0
    struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn;
752
0
    quicly_stats_t stats;
753
754
0
    if (quicly_get_stats(conn->h3.super.quic, &stats) != 0)
755
0
        return h2o_iovec_init(H2O_STRLIT("-"));
756
757
0
    char *buf;
758
0
    size_t len;
759
0
    static __thread size_t bufsize = 100; /* this value grows by 1.5x to find adequete value, and is remembered for future
760
                                           * invocations */
761
0
Redo:
762
0
    buf = h2o_mem_alloc_pool(&req->pool, char, bufsize);
763
0
    len = 0;
764
765
0
    QUICLY_STATS_FOREACH(PUSH_FIELD);
766
767
0
    return h2o_iovec_init(buf, len - 1);
768
769
0
#undef PUSH_FIELD
770
0
}
771
772
static h2o_iovec_t log_quic_version(h2o_req_t *_req)
773
0
{
774
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req);
775
0
    char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT32_LONGEST_STR));
776
0
    return h2o_iovec_init(buf, sprintf(buf, "%" PRIu32, quicly_get_protocol_version(stream->quic->conn)));
777
0
}
778
779
void on_stream_destroy(quicly_stream_t *qs, quicly_error_t err)
780
6.29k
{
781
6.29k
    struct st_h2o_http3_server_stream_t *stream = qs->data;
782
6.29k
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
783
784
    /* There is no need to call `update_conn_state` upon stream destruction, as all the streams transition to CLOSE_WAIT before
785
     * being destroyed (and it is hard to call `update_conn_state` here, because the number returned by
786
     * `quicly_num_streams_by_group` is decremented only after returing from this function. */
787
6.29k
    --*get_state_counter(conn, stream->state);
788
789
6.29k
    req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler);
790
791
6.29k
    if (h2o_linklist_is_linked(&stream->link))
792
0
        h2o_linklist_unlink(&stream->link);
793
6.29k
    if (h2o_linklist_is_linked(&stream->link_resp_settings_blocked))
794
0
        h2o_linklist_unlink(&stream->link_resp_settings_blocked);
795
6.29k
    if (stream->state != H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT)
796
4.48k
        pre_dispose_request(stream);
797
6.29k
    if (!stream->req_disposed)
798
4.48k
        h2o_dispose_request(&stream->req);
799
    /* in case the stream is destroyed before the buffer is fully consumed */
800
6.29k
    h2o_buffer_dispose(&stream->recvbuf.buf);
801
802
6.29k
    free(stream);
803
804
6.29k
    uint32_t num_req_streams_incl_self = quicly_num_streams_by_group(conn->h3.super.quic, 0, 0);
805
6.29k
    assert(num_req_streams_incl_self > 0 &&
806
6.29k
           "during the invocation of the destroy callback, stream count should include the number of the stream being destroyed");
807
6.29k
    if (num_req_streams_incl_self == 1)
808
6.29k
        h2o_conn_set_state(&conn->super, H2O_CONN_STATE_IDLE);
809
6.29k
}
810
811
/**
812
 * Converts vectors owned by the generator to ones owned by the HTTP/3 implementation, as the former becomes inaccessible once we
813
 * call `do_proceed`.
814
 */
815
static int retain_sendvecs(struct st_h2o_http3_server_stream_t *stream)
816
429
{
817
1.71k
    for (; stream->sendbuf.min_index_to_addref != stream->sendbuf.vecs.size; ++stream->sendbuf.min_index_to_addref) {
818
1.28k
        struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.min_index_to_addref;
819
1.28k
        assert(vec->vec.callbacks->read_ == h2o_sendvec_read_raw);
820
1.28k
        if (!(vec->vec.callbacks == &self_allocated_vec_callbacks || vec->vec.callbacks == &immutable_vec_callbacks)) {
821
858
            size_t off_within_vec = stream->sendbuf.min_index_to_addref == 0 ? stream->sendbuf.off_within_first_vec : 0,
822
858
                   newlen = vec->vec.len - off_within_vec;
823
858
            void *newbuf = sendvec_size_is_for_recycle(newlen) ? h2o_mem_alloc_recycle(&h2o_socket_ssl_buffer_allocator)
824
858
                                                               : h2o_mem_alloc(newlen);
825
858
            memcpy(newbuf, vec->vec.raw + off_within_vec, newlen);
826
858
            vec->vec = (h2o_sendvec_t){&self_allocated_vec_callbacks, newlen, {newbuf}};
827
858
            if (stream->sendbuf.min_index_to_addref == 0)
828
0
                stream->sendbuf.off_within_first_vec = 0;
829
858
        }
830
1.28k
    }
831
832
429
    return 1;
833
429
}
834
835
static void collect_quic_performance_metrics(struct st_h2o_http3_server_stream_t *stream)
836
801
{
837
801
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
838
839
    /* If the request is the first request, log method, content-length, and ttlb so that the performance of HTTP/3 can be evaluated.
840
     * Note, to compare the performance of connections using different CC parameters, only the numbers from connections that served
841
     * requests capable of fulfilling the CWND regardless of CC behavior (i.e, pre-built local objects) can be used. To extract such
842
     * connections, properties other than method and content-length might be needed. */
843
801
    if (stream->quic->stream_id == 0) {
844
801
#define EMIT_STATS_FIELD(fld, lit) PTLS_LOG__DO_ELEMENT_UNSIGNED(lit, stats.fld);
845
801
        H2O_LOG_CONN(h3s_stream0_ttlb, &conn->super, {
846
801
            PTLS_LOG_ELEMENT_UNSAFESTR(method, stream->req.method.base, stream->req.method.len);
847
801
            PTLS_LOG_ELEMENT_UNSIGNED(content_length, stream->req.res.content_length);
848
801
            struct timeval now = h2o_gettimeofday(conn->super.ctx->loop);
849
801
            int64_t ttlb = (h2o_timeval_subtract(&stream->req.timestamps.request_begin_at, &now) + 500) / 1000;
850
801
            if (ttlb < 0)
851
801
                ttlb = 0;
852
801
            PTLS_LOG_ELEMENT_SIGNED(ttlb, ttlb);
853
801
            quicly_stats_t stats;
854
801
            if (quicly_get_stats(conn->h3.super.quic, &stats) == 0) {
855
801
                QUICLY_STATS_FOREACH(EMIT_STATS_FIELD); /* if this is too heavyweight, we can hexdump `stats` instead */
856
801
            }
857
801
        });
858
801
#undef EMIT_STATS_FIELD
859
801
    }
860
801
}
861
862
static void on_send_shift(quicly_stream_t *qs, size_t delta)
863
801
{
864
801
    struct st_h2o_http3_server_stream_t *stream = qs->data;
865
801
    size_t i;
866
867
801
    assert(H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state &&
868
801
           stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY);
869
801
    assert(delta != 0);
870
801
    assert(stream->sendbuf.vecs.size != 0);
871
872
801
    size_t bytes_avail_in_first_vec = stream->sendbuf.vecs.entries[0].vec.len - stream->sendbuf.off_within_first_vec;
873
801
    if (delta < bytes_avail_in_first_vec) {
874
0
        stream->sendbuf.off_within_first_vec += delta;
875
0
        return;
876
0
    }
877
801
    delta -= bytes_avail_in_first_vec;
878
801
    stream->sendbuf.off_within_first_vec = 0;
879
801
    dispose_sendvec(&stream->sendbuf.vecs.entries[0]);
880
881
2.14k
    for (i = 1; delta != 0; ++i) {
882
1.34k
        assert(i < stream->sendbuf.vecs.size);
883
1.34k
        if (delta < stream->sendbuf.vecs.entries[i].vec.len) {
884
0
            stream->sendbuf.off_within_first_vec = delta;
885
0
            break;
886
0
        }
887
1.34k
        delta -= stream->sendbuf.vecs.entries[i].vec.len;
888
1.34k
        dispose_sendvec(&stream->sendbuf.vecs.entries[i]);
889
1.34k
    }
890
801
    memmove(stream->sendbuf.vecs.entries, stream->sendbuf.vecs.entries + i,
891
801
            (stream->sendbuf.vecs.size - i) * sizeof(stream->sendbuf.vecs.entries[0]));
892
801
    stream->sendbuf.vecs.size -= i;
893
801
    if (stream->sendbuf.min_index_to_addref <= i) {
894
801
        stream->sendbuf.min_index_to_addref = 0;
895
801
    } else {
896
0
        stream->sendbuf.min_index_to_addref -= i;
897
0
    }
898
899
801
    if (stream->sendbuf.vecs.size == 0) {
900
801
        if (quicly_sendstate_is_open(&stream->quic->sendstate)) {
901
0
            assert((H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state &&
902
0
                    stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) ||
903
0
                   stream->proceed_requested);
904
801
        } else {
905
801
            collect_quic_performance_metrics(stream);
906
801
            if (quicly_stream_has_receive_side(0, stream->quic->stream_id))
907
801
                quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE);
908
801
            set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 0);
909
801
        }
910
801
    }
911
801
}
912
913
static void on_send_emit(quicly_stream_t *qs, size_t off, void *_dst, size_t *len, int *wrote_all)
914
1.23k
{
915
1.23k
    struct st_h2o_http3_server_stream_t *stream = qs->data;
916
917
1.23k
    assert(H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state &&
918
1.23k
           stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY);
919
920
1.23k
    uint8_t *dst = _dst, *dst_end = dst + *len;
921
1.23k
    size_t vec_index = 0;
922
923
    /* find the start position identified by vec_index and off */
924
1.23k
    off += stream->sendbuf.off_within_first_vec;
925
1.23k
    while (off != 0) {
926
0
        assert(vec_index < stream->sendbuf.vecs.size);
927
0
        if (off < stream->sendbuf.vecs.entries[vec_index].vec.len)
928
0
            break;
929
0
        off -= stream->sendbuf.vecs.entries[vec_index].vec.len;
930
0
        ++vec_index;
931
0
    }
932
1.23k
    assert(vec_index < stream->sendbuf.vecs.size);
933
934
    /* write */
935
1.23k
    *wrote_all = 0;
936
3.43k
    do {
937
3.43k
        struct st_h2o_http3_server_sendvec_t *this_vec = stream->sendbuf.vecs.entries + vec_index;
938
3.43k
        size_t sz = this_vec->vec.len - off;
939
3.43k
        if (dst_end - dst < sz)
940
0
            sz = dst_end - dst;
941
        /* convert vector into raw form, the first time it's being sent (TODO use ssl_buffer_recyle) */
942
3.43k
        if (this_vec->vec.callbacks->read_ != h2o_sendvec_read_raw) {
943
0
            size_t newlen = this_vec->vec.len;
944
0
            void *newbuf = sendvec_size_is_for_recycle(newlen) ? h2o_mem_alloc_recycle(&h2o_socket_ssl_buffer_allocator)
945
0
                                                               : h2o_mem_alloc(newlen);
946
0
            if (!this_vec->vec.callbacks->read_(&this_vec->vec, newbuf, newlen)) {
947
0
                free(newbuf);
948
0
                goto Error;
949
0
            }
950
0
            this_vec->vec = (h2o_sendvec_t){&self_allocated_vec_callbacks, newlen, {newbuf}};
951
0
        }
952
        /* copy payload */
953
3.43k
        memcpy(dst, this_vec->vec.raw + off, sz);
954
        /* adjust offsets */
955
3.43k
        if (this_vec->entity_offset != UINT64_MAX && stream->req.bytes_sent < this_vec->entity_offset + off + sz)
956
1.10k
            stream->req.bytes_sent = this_vec->entity_offset + off + sz;
957
3.43k
        dst += sz;
958
3.43k
        off += sz;
959
        /* when reaching the end of the current vector, update vec_index, wrote_all */
960
3.43k
        if (off == this_vec->vec.len) {
961
3.43k
            off = 0;
962
3.43k
            ++vec_index;
963
3.43k
            if (vec_index == stream->sendbuf.vecs.size) {
964
1.23k
                *wrote_all = 1;
965
1.23k
                break;
966
1.23k
            }
967
3.43k
        }
968
3.43k
    } while (dst != dst_end);
969
970
1.23k
    *len = dst - (uint8_t *)_dst;
971
972
    /* retain the payload of response body before calling `h2o_proceed_request`, as the generator might discard the buffer */
973
1.23k
    if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY && *wrote_all &&
974
1.23k
        quicly_sendstate_is_open(&stream->quic->sendstate) && !stream->proceed_requested) {
975
429
        if (!retain_sendvecs(stream))
976
0
            goto Error;
977
429
        stream->proceed_requested = 1;
978
429
        stream->proceed_while_sending = 1;
979
429
    }
980
981
1.23k
    return;
982
1.23k
Error:
983
0
    *len = 0;
984
0
    *wrote_all = 1;
985
0
    shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_INTERNAL, 0, 0);
986
0
}
987
988
static void on_send_stop(quicly_stream_t *qs, quicly_error_t err)
989
0
{
990
0
    struct st_h2o_http3_server_stream_t *stream = qs->data;
991
992
0
    shutdown_stream(stream, H2O_HTTP3_ERROR_REQUEST_CANCELLED, err, 0, 0);
993
0
}
994
995
static void handle_buffered_input(struct st_h2o_http3_server_stream_t *stream, int in_generator)
996
6.29k
{
997
6.29k
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
998
999
6.29k
    if (stream->state >= H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT)
1000
0
        return;
1001
1002
6.29k
    { /* Process contiguous bytes in the receive buffer until one of the following conditions are reached:
1003
       * a) connection- or stream-level error (i.e., state advanced to CLOSE_WAIT) is detected - in which case we exit,
1004
       * b) incomplete frame is detected - wait for more (if the stream is open) or raise a connection error, or
1005
       * c) all bytes are processed or read_blocked flag is set synchronously (due to receiving CONNECT request) - exit the loop. */
1006
6.29k
        size_t bytes_available = quicly_recvstate_bytes_available(&stream->quic->recvstate);
1007
6.29k
        assert(bytes_available <= stream->recvbuf.buf->size);
1008
6.29k
        if (bytes_available != 0) {
1009
6.29k
            const uint8_t *src = (const uint8_t *)stream->recvbuf.buf->bytes, *src_end = src + bytes_available;
1010
25.2k
            do {
1011
25.2k
                quicly_error_t err;
1012
25.2k
                const char *err_desc = NULL;
1013
25.2k
                if ((err = stream->recvbuf.handle_input(stream, &src, src_end, in_generator, &err_desc)) != 0) {
1014
4.05k
                    if (err == H2O_HTTP3_ERROR_INCOMPLETE) {
1015
205
                        if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate))
1016
0
                            break;
1017
205
                        err = H2O_HTTP3_ERROR_GENERAL_PROTOCOL;
1018
205
                        err_desc = "incomplete frame";
1019
205
                    }
1020
4.05k
                    h2o_quic_close_connection(&conn->h3.super, err, err_desc);
1021
4.05k
                    return;
1022
21.2k
                } else if (stream->state >= H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) {
1023
381
                    return;
1024
381
                }
1025
25.2k
            } while (src != src_end && !stream->read_blocked && !quicly_stop_requested(stream->quic));
1026
            /* Processed zero or more bytes without noticing an error; shift the bytes that have been processed as frames. */
1027
1.86k
            size_t bytes_consumed = src - (const uint8_t *)stream->recvbuf.buf->bytes;
1028
1.86k
            h2o_buffer_consume(&stream->recvbuf.buf, bytes_consumed);
1029
1.86k
            quicly_stream_sync_recvbuf(stream->quic, bytes_consumed);
1030
1.86k
            if (stream->read_blocked)
1031
224
                return;
1032
1.86k
        }
1033
6.29k
    }
1034
1035
1.64k
    if (quicly_recvstate_transfer_complete(&stream->quic->recvstate)) {
1036
1.64k
        if (stream->recvbuf.buf->size == 0 && (stream->recvbuf.handle_input == handle_input_expect_data ||
1037
1.19k
                                               stream->recvbuf.handle_input == handle_input_post_trailers)) {
1038
            /* have complete request, advance the state and process the request */
1039
1.19k
            if (stream->req.content_length != SIZE_MAX && stream->req.content_length != stream->req.req_body_bytes_received) {
1040
                /* the request terminated abruptly; reset the stream as we do for HTTP/2 */
1041
184
                shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */,
1042
184
                                stream->req.req_body_bytes_received < stream->req.content_length
1043
184
                                    ? H2O_HTTP3_ERROR_REQUEST_INCOMPLETE
1044
184
                                    : H2O_HTTP3_ERROR_GENERAL_PROTOCOL,
1045
184
                                in_generator, 0);
1046
1.00k
            } else {
1047
1.00k
                if (stream->req.write_req.cb != NULL) {
1048
0
                    if (!h2o_linklist_is_linked(&stream->link))
1049
0
                        h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link);
1050
0
                    request_run_delayed(conn);
1051
1.00k
                } else if (!stream->req.process_called && stream->state < H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) {
1052
                    /* process the request, if we haven't called h2o_process_request nor send an error response */
1053
776
                    switch (stream->state) {
1054
0
                    case H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS:
1055
776
                    case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK:
1056
776
                    case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED:
1057
776
                        break;
1058
0
                    default:
1059
0
                        assert(!"unexpected state");
1060
0
                        break;
1061
776
                    }
1062
776
                    set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING, in_generator);
1063
776
                    h2o_linklist_insert(&conn->delayed_streams.pending, &stream->link);
1064
776
                    request_run_delayed(conn);
1065
776
                }
1066
1.00k
            }
1067
1.19k
        } else {
1068
            /* request stream closed with an incomplete request, send error */
1069
450
            shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, H2O_HTTP3_ERROR_REQUEST_INCOMPLETE, in_generator, 0);
1070
450
        }
1071
1.64k
    } else {
1072
0
        if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK && stream->req_body != NULL &&
1073
0
            stream->req_body->size >= H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK) {
1074
            /* switch to blocked state if the request body is becoming large (this limits the concurrency to the backend) */
1075
0
            stream->read_blocked = 1;
1076
0
            h2o_linklist_insert(&conn->delayed_streams.recv_body_blocked, &stream->link);
1077
0
            set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED, in_generator);
1078
0
            check_run_blocked(conn);
1079
0
        } else if (stream->req.write_req.cb != NULL && stream->req_body->size != 0) {
1080
            /* in streaming mode, let the run_delayed invoke write_req */
1081
0
            if (!h2o_linklist_is_linked(&stream->link))
1082
0
                h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link);
1083
0
            request_run_delayed(conn);
1084
0
        }
1085
0
    }
1086
1.64k
}
1087
1088
static void on_receive(quicly_stream_t *qs, size_t off, const void *input, size_t len)
1089
6.29k
{
1090
6.29k
    struct st_h2o_http3_server_stream_t *stream = qs->data;
1091
1092
    /* save received data (FIXME avoid copying if possible; see hqclient.c) */
1093
6.29k
    h2o_http3_update_recvbuf(&stream->recvbuf.buf, off, input, len);
1094
1095
6.29k
    if (stream->read_blocked || quicly_stop_requested(stream->quic))
1096
0
        return;
1097
1098
    /* handle input (FIXME propage err_desc) */
1099
6.29k
    handle_buffered_input(stream, 0);
1100
6.29k
}
1101
1102
static void on_receive_reset(quicly_stream_t *qs, quicly_error_t err)
1103
0
{
1104
0
    struct st_h2o_http3_server_stream_t *stream = qs->data;
1105
1106
0
    shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */,
1107
0
                    stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS ? H2O_HTTP3_ERROR_REQUEST_REJECTED
1108
0
                                                                                : H2O_HTTP3_ERROR_REQUEST_CANCELLED,
1109
0
                    0, 1);
1110
0
}
1111
1112
static void proceed_request_streaming(h2o_req_t *_req, const char *errstr)
1113
0
{
1114
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req);
1115
0
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
1116
1117
0
    assert(stream->req_body != NULL);
1118
0
    assert(errstr != NULL || !h2o_linklist_is_linked(&stream->link));
1119
0
    assert(conn->num_streams_req_streaming != 0 || stream->req.is_tunnel_req);
1120
1121
0
    if (errstr != NULL ||
1122
0
        (quicly_recvstate_transfer_complete(&stream->quic->recvstate) &&
1123
0
         (stream->quic->recvstate.eos == UINT64_MAX || quicly_recvstate_bytes_available(&stream->quic->recvstate) == 0))) {
1124
        /* tidy up the request streaming */
1125
0
        stream->req.write_req.cb = NULL;
1126
0
        stream->req.write_req.ctx = NULL;
1127
0
        stream->req.forward_datagram.write_ = NULL;
1128
0
        stream->req.proceed_req = NULL;
1129
0
        stream->req_streaming = 0;
1130
0
        if (!stream->req.is_tunnel_req)
1131
0
            --conn->num_streams_req_streaming;
1132
0
        check_run_blocked(conn);
1133
        /* close the stream if an error occurred */
1134
0
        if (errstr != NULL || stream->quic->recvstate.eos == UINT64_MAX) {
1135
0
            shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 1, 1);
1136
0
            return;
1137
0
        }
1138
0
    }
1139
1140
    /* remove the bytes from the request body buffer */
1141
0
    assert(stream->req.entity.len == stream->req_body->size);
1142
0
    h2o_buffer_consume(&stream->req_body, stream->req_body->size);
1143
0
    stream->req.entity = h2o_iovec_init(NULL, 0);
1144
1145
    /* unblock read until the next invocation of write_req, or after the final invocation */
1146
0
    stream->read_blocked = 0;
1147
1148
    /* handle input in the receive buffer */
1149
0
    handle_buffered_input(stream, 1);
1150
0
}
1151
1152
static void run_delayed(h2o_timer_t *timer)
1153
776
{
1154
776
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, timeout, timer);
1155
776
    int made_progress;
1156
1157
1.55k
    do {
1158
1.55k
        made_progress = 0;
1159
1160
        /* promote blocked stream to unblocked state, if possible */
1161
1.55k
        if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming <
1162
1.55k
                conn->super.ctx->globalconf->http3.max_concurrent_streaming_requests_per_connection &&
1163
1.55k
            !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)) {
1164
0
            struct st_h2o_http3_server_stream_t *stream =
1165
0
                H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.recv_body_blocked.next);
1166
0
            assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED);
1167
0
            assert(stream->read_blocked);
1168
0
            h2o_linklist_unlink(&stream->link);
1169
0
            made_progress = 1;
1170
0
            quicly_stream_set_receive_window(stream->quic, conn->super.ctx->globalconf->http3.active_stream_window_size);
1171
0
            if (h2o_req_can_stream_request(&stream->req)) {
1172
                /* use streaming mode */
1173
0
                stream->req_streaming = 1;
1174
0
                ++conn->num_streams_req_streaming;
1175
0
                stream->req.proceed_req = proceed_request_streaming;
1176
0
                set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0);
1177
0
                h2o_process_request(&stream->req);
1178
0
            } else {
1179
                /* unblock, read the bytes in receive buffer */
1180
0
                stream->read_blocked = 0;
1181
0
                set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED, 0);
1182
0
                handle_buffered_input(stream, 0);
1183
0
                if (quicly_get_state(conn->h3.super.quic) >= QUICLY_STATE_CLOSING)
1184
0
                    return;
1185
0
            }
1186
0
        }
1187
1188
        /* process streams using request streaming, that have new data to submit */
1189
1.55k
        while (!h2o_linklist_is_empty(&conn->delayed_streams.req_streaming)) {
1190
0
            struct st_h2o_http3_server_stream_t *stream =
1191
0
                H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.req_streaming.next);
1192
0
            int is_end_stream = quicly_recvstate_transfer_complete(&stream->quic->recvstate);
1193
0
            assert(stream->req.process_called);
1194
0
            assert(stream->req.write_req.cb != NULL);
1195
0
            assert(stream->req_body != NULL);
1196
0
            assert(stream->req_body->size != 0 || is_end_stream);
1197
0
            assert(!stream->read_blocked);
1198
0
            h2o_linklist_unlink(&stream->link);
1199
0
            stream->read_blocked = 1;
1200
0
            made_progress = 1;
1201
0
            assert(stream->req.entity.len == stream->req_body->size &&
1202
0
                   (stream->req.entity.len == 0 || stream->req.entity.base == stream->req_body->bytes));
1203
0
            if (stream->req.write_req.cb(stream->req.write_req.ctx, is_end_stream) != 0)
1204
0
                shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 0, 1);
1205
0
        }
1206
1207
        /* process the requests (not in streaming mode); TODO cap concurrency? */
1208
2.32k
        while (!h2o_linklist_is_empty(&conn->delayed_streams.pending)) {
1209
776
            struct st_h2o_http3_server_stream_t *stream =
1210
776
                H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.pending.next);
1211
776
            assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING);
1212
776
            assert(!stream->req.process_called);
1213
776
            assert(!stream->read_blocked);
1214
776
            h2o_linklist_unlink(&stream->link);
1215
776
            made_progress = 1;
1216
776
            set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0);
1217
776
            h2o_process_request(&stream->req);
1218
776
        }
1219
1220
1.55k
    } while (made_progress);
1221
776
}
1222
1223
quicly_error_t handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end,
1224
                                          int in_generator, const char **err_desc)
1225
274
{
1226
274
    h2o_http3_read_frame_t frame;
1227
274
    quicly_error_t ret;
1228
1229
    /* read and ignore unknown frames */
1230
274
    if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src,
1231
274
                                    src_end, err_desc)) != 0)
1232
14
        return ret;
1233
260
    switch (frame.type) {
1234
2
    case H2O_HTTP3_FRAME_TYPE_HEADERS:
1235
4
    case H2O_HTTP3_FRAME_TYPE_DATA:
1236
4
        return H2O_HTTP3_ERROR_FRAME_UNEXPECTED;
1237
256
    default:
1238
256
        break;
1239
260
    }
1240
1241
256
    return 0;
1242
260
}
1243
1244
static quicly_error_t handle_input_expect_data_payload(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src,
1245
                                                       const uint8_t *src_end, int in_generator, const char **err_desc)
1246
2.07k
{
1247
2.07k
    size_t bytes_avail = src_end - *src;
1248
1249
    /* append data to body buffer */
1250
2.07k
    if (bytes_avail > stream->recvbuf.bytes_left_in_data_frame)
1251
1.88k
        bytes_avail = stream->recvbuf.bytes_left_in_data_frame;
1252
2.07k
    if (stream->req_body == NULL)
1253
232
        h2o_buffer_init(&stream->req_body, &h2o_socket_buffer_prototype);
1254
2.07k
    if (!h2o_buffer_try_append(&stream->req_body, *src, bytes_avail))
1255
0
        return H2O_HTTP3_ERROR_INTERNAL;
1256
2.07k
    stream->req.entity = h2o_iovec_init(stream->req_body->bytes, stream->req_body->size);
1257
2.07k
    stream->req.req_body_bytes_received += bytes_avail;
1258
2.07k
    stream->recvbuf.bytes_left_in_data_frame -= bytes_avail;
1259
2.07k
    *src += bytes_avail;
1260
1261
2.07k
    if (stream->recvbuf.bytes_left_in_data_frame == 0)
1262
1.90k
        stream->recvbuf.handle_input = handle_input_expect_data;
1263
1264
2.07k
    return 0;
1265
2.07k
}
1266
1267
quicly_error_t handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end,
1268
                                        int in_generator, const char **err_desc)
1269
15.9k
{
1270
15.9k
    h2o_http3_read_frame_t frame;
1271
15.9k
    quicly_error_t ret;
1272
1273
    /* read frame */
1274
15.9k
    if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src,
1275
15.9k
                                    src_end, err_desc)) != 0)
1276
75
        return ret;
1277
15.8k
    switch (frame.type) {
1278
66
    case H2O_HTTP3_FRAME_TYPE_HEADERS:
1279
        /* when in tunnel mode, trailers forbidden */
1280
66
        if (stream->req.is_tunnel_req) {
1281
0
            *err_desc = "unexpected frame type";
1282
0
            return H2O_HTTP3_ERROR_FRAME_UNEXPECTED;
1283
0
        }
1284
        /* trailers, ignore but disallow succeeding DATA or HEADERS frame */
1285
66
        stream->recvbuf.handle_input = handle_input_post_trailers;
1286
66
        return 0;
1287
15.0k
    case H2O_HTTP3_FRAME_TYPE_DATA:
1288
15.0k
        if (stream->req.content_length != SIZE_MAX &&
1289
709
            stream->req.content_length - stream->req.req_body_bytes_received < frame.length) {
1290
            /* The only viable option here is to reset the stream, as we might have already started streaming the request body
1291
             * upstream. This behavior is consistent with what we do in HTTP/2. */
1292
51
            shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, in_generator, 0);
1293
51
            return 0;
1294
51
        }
1295
15.0k
        break;
1296
15.0k
    default:
1297
705
        return 0;
1298
15.8k
    }
1299
1300
    /* got a DATA frame */
1301
15.0k
    if (frame.length != 0) {
1302
2.16k
        if (h2o_timeval_is_null(&stream->req.timestamps.request_body_begin_at))
1303
314
            stream->req.timestamps.request_body_begin_at = h2o_gettimeofday(get_conn(stream)->super.ctx->loop);
1304
2.16k
        stream->recvbuf.handle_input = handle_input_expect_data_payload;
1305
2.16k
        stream->recvbuf.bytes_left_in_data_frame = frame.length;
1306
2.16k
    }
1307
1308
15.0k
    return 0;
1309
15.8k
}
1310
1311
static int handle_input_expect_headers_send_http_error(struct st_h2o_http3_server_stream_t *stream,
1312
                                                       void (*sendfn)(h2o_req_t *, const char *, const char *, int),
1313
                                                       const char *reason, const char *body, const char **err_desc)
1314
569
{
1315
569
    if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate))
1316
0
        quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE);
1317
1318
569
    set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0);
1319
569
    sendfn(&stream->req, reason, body, 0);
1320
569
    *err_desc = NULL;
1321
1322
569
    return 0;
1323
569
}
1324
1325
static int handle_input_expect_headers_process_request_immediately(struct st_h2o_http3_server_stream_t *stream,
1326
                                                                   const char **err_desc)
1327
224
{
1328
224
    h2o_buffer_init(&stream->req_body, &h2o_socket_buffer_prototype);
1329
224
    stream->req.entity = h2o_iovec_init("", 0);
1330
224
    stream->read_blocked = 1;
1331
224
    stream->req.proceed_req = proceed_request_streaming;
1332
224
    set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0);
1333
224
    quicly_stream_set_receive_window(stream->quic, get_conn(stream)->super.ctx->globalconf->http3.active_stream_window_size);
1334
224
    h2o_process_request(&stream->req);
1335
1336
224
    return 0;
1337
224
}
1338
1339
static int handle_input_expect_headers_process_connect(struct st_h2o_http3_server_stream_t *stream, uint64_t datagram_flow_id,
1340
                                                       const char **err_desc)
1341
436
{
1342
436
    if (stream->req.content_length != SIZE_MAX)
1343
212
        return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request",
1344
212
                                                           "CONNECT request cannot have request body", err_desc);
1345
1346
224
    stream->req.is_tunnel_req = 1;
1347
224
    stream->datagram_flow_id = datagram_flow_id;
1348
224
    ++get_conn(stream)->num_streams_tunnelling;
1349
1350
224
    return handle_input_expect_headers_process_request_immediately(stream, err_desc);
1351
436
}
1352
1353
static quicly_error_t handle_input_expect_headers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src,
1354
                                                  const uint8_t *src_end, int in_generator, const char **err_desc)
1355
6.98k
{
1356
6.98k
    assert(!in_generator); /* this function is processing headers (before generators get assigned), not trailers */
1357
1358
6.98k
    struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
1359
6.98k
    h2o_http3_read_frame_t frame;
1360
6.98k
    int header_exists_map = 0;
1361
6.98k
    h2o_iovec_t expect = h2o_iovec_init(NULL, 0);
1362
6.98k
    h2o_iovec_t datagram_flow_id_field = {};
1363
6.98k
    uint64_t datagram_flow_id = UINT64_MAX;
1364
6.98k
    uint8_t header_ack[H2O_HPACK_ENCODE_INT_MAX_LENGTH];
1365
6.98k
    size_t header_ack_len;
1366
6.98k
    quicly_error_t ret;
1367
1368
    /* read the HEADERS frame (or a frame that precedes that) */
1369
6.98k
    if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src,
1370
6.98k
                                    src_end, err_desc)) != 0) {
1371
315
        if (*err_desc == h2o_http3_err_frame_too_large && frame.type == H2O_HTTP3_FRAME_TYPE_HEADERS) {
1372
61
            shutdown_stream(stream, H2O_HTTP3_ERROR_REQUEST_REJECTED, H2O_HTTP3_ERROR_REQUEST_REJECTED, 0, 0);
1373
61
            return 0;
1374
254
        } else {
1375
254
            return ret;
1376
254
        }
1377
315
    }
1378
6.66k
    if (frame.type != H2O_HTTP3_FRAME_TYPE_HEADERS) {
1379
891
        switch (frame.type) {
1380
10
        case H2O_HTTP3_FRAME_TYPE_DATA:
1381
10
            return H2O_HTTP3_ERROR_FRAME_UNEXPECTED;
1382
881
        default:
1383
881
            break;
1384
891
        }
1385
881
        return 0;
1386
891
    }
1387
5.77k
    stream->req.timestamps.request_begin_at = h2o_gettimeofday(conn->super.ctx->loop);
1388
5.77k
    stream->recvbuf.handle_input = handle_input_expect_data;
1389
1390
    /* parse the headers, and ack */
1391
5.77k
    if ((ret = h2o_qpack_parse_request(&stream->req.pool, get_conn(stream)->h3.qpack.dec, stream->quic->stream_id,
1392
5.77k
                                       &stream->req.input.method, &stream->req.input.scheme, &stream->req.input.authority,
1393
5.77k
                                       &stream->req.input.path, &stream->req.upgrade, &stream->req.headers, &header_exists_map,
1394
5.77k
                                       &stream->req.content_length, &expect, NULL /* TODO cache-digests */, &datagram_flow_id_field,
1395
5.77k
                                       header_ack, &header_ack_len, frame.payload, frame.length, err_desc)) != 0 &&
1396
3.98k
        ret != H2O_HTTP2_ERROR_INVALID_HEADER_CHAR)
1397
3.69k
        return ret;
1398
2.08k
    if (header_ack_len != 0)
1399
0
        h2o_http3_send_qpack_header_ack(&conn->h3, header_ack, header_ack_len);
1400
1401
2.08k
    h2o_probe_log_request(&stream->req, stream->quic->stream_id);
1402
1403
2.08k
    if (stream->req.input.scheme == NULL)
1404
807
        stream->req.input.scheme = &H2O_URL_SCHEME_HTTPS;
1405
1406
2.08k
    int is_connect, must_exist_map, may_exist_map;
1407
2.08k
    const int can_receive_datagrams =
1408
2.08k
        quicly_get_context(get_conn(stream)->h3.super.quic)->transport_params.max_datagram_frame_size != 0;
1409
2.08k
    if (h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT"))) {
1410
584
        is_connect = 1;
1411
584
        must_exist_map = H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS;
1412
584
        may_exist_map = 0;
1413
        /* extended connect looks like an ordinary request plus an upgrade token (:protocol) */
1414
584
        if ((header_exists_map & H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS) != 0) {
1415
1
            must_exist_map |= H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS |
1416
1
                              H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS;
1417
1
            if (can_receive_datagrams)
1418
1
                datagram_flow_id = stream->quic->stream_id / 4;
1419
1
        }
1420
1.50k
    } else if (h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT-UDP"))) {
1421
        /* Handling of masque draft-03. Method is CONNECT-UDP and :protocol is not used, so we set `:protocol` to "connect-udp" to
1422
         * make it look like an upgrade. The method is preserved and can be used to distinguish between RFC 9298 version which uses
1423
         * "CONNECT". The draft requires "masque" in `:scheme` but we need to support clients that put "https" there instead. */
1424
15
        if (!((header_exists_map & H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS) == 0 &&
1425
14
              h2o_memis(stream->req.input.path.base, stream->req.input.path.len, H2O_STRLIT("/")))) {
1426
12
            shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0, 0);
1427
12
            return 0;
1428
12
        }
1429
3
        if (datagram_flow_id_field.base != NULL) {
1430
0
            if (!can_receive_datagrams) {
1431
0
                *err_desc = "unexpected h3 datagram";
1432
0
                return H2O_HTTP3_ERROR_GENERAL_PROTOCOL;
1433
0
            }
1434
0
            datagram_flow_id = 0;
1435
0
            for (const char *p = datagram_flow_id_field.base; p != datagram_flow_id_field.base + datagram_flow_id_field.len; ++p) {
1436
0
                if (!('0' <= *p && *p <= '9'))
1437
0
                    break;
1438
0
                datagram_flow_id = datagram_flow_id * 10 + *p - '0';
1439
0
            }
1440
0
        }
1441
3
        assert(stream->req.upgrade.base == NULL); /* otherwise PROTOCOL_EXISTS will be set */
1442
3
        is_connect = 1;
1443
3
        must_exist_map = H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS |
1444
3
                         H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS;
1445
3
        may_exist_map = 0;
1446
1.48k
    } else {
1447
        /* normal request */
1448
1.48k
        is_connect = 0;
1449
1.48k
        must_exist_map =
1450
1.48k
            H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS;
1451
1.48k
        may_exist_map = H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS;
1452
1.48k
    }
1453
1454
    /* check that all MUST pseudo headers exist, and that there are no other pseudo headers than MUST or MAY */
1455
2.07k
    if (!((header_exists_map & must_exist_map) == must_exist_map && (header_exists_map & ~(must_exist_map | may_exist_map)) == 0)) {
1456
257
        shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0, 0);
1457
257
        return 0;
1458
257
    }
1459
1460
    /* send a 400 error when observing an invalid header character */
1461
1.81k
    if (ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR)
1462
188
        return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request", *err_desc, err_desc);
1463
1464
    /* validate semantic requirement */
1465
1.62k
    if (!h2o_req_validate_pseudo_headers(&stream->req)) {
1466
2
        *err_desc = "invalid pseudo headers";
1467
2
        return H2O_HTTP3_ERROR_GENERAL_PROTOCOL;
1468
2
    }
1469
1470
    /* check if content-length is within the permitted bounds */
1471
1.62k
    if (stream->req.content_length != SIZE_MAX && stream->req.content_length > conn->super.ctx->globalconf->max_request_entity_size)
1472
158
        return handle_input_expect_headers_send_http_error(stream, h2o_send_error_413, "Request Entity Too Large",
1473
158
                                                           "request entity is too large", err_desc);
1474
1475
    /* set priority */
1476
1.62k
    assert(!h2o_linklist_is_linked(&stream->scheduler.link));
1477
1.46k
    if (!stream->received_priority_update) {
1478
1.46k
        ssize_t index;
1479
1.46k
        if ((index = h2o_find_header(&stream->req.headers, H2O_TOKEN_PRIORITY, -1)) != -1) {
1480
278
            h2o_iovec_t *value = &stream->req.headers.entries[index].value;
1481
278
            h2o_absprio_parse_priority(value->base, value->len, &stream->scheduler.priority);
1482
1.19k
        } else if (is_connect) {
1483
233
            stream->scheduler.priority.incremental = 1;
1484
233
        }
1485
1.46k
    }
1486
1487
    /* special handling of CONNECT method */
1488
1.46k
    if (is_connect)
1489
436
        return handle_input_expect_headers_process_connect(stream, datagram_flow_id, err_desc);
1490
1491
    /* change state */
1492
1.03k
    set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK, 0);
1493
1494
    /* handle expect: 100-continue */
1495
1.03k
    if (expect.base != NULL) {
1496
11
        if (!h2o_lcstris(expect.base, expect.len, H2O_STRLIT("100-continue"))) {
1497
11
            return handle_input_expect_headers_send_http_error(stream, h2o_send_error_417, "Expectation Failed",
1498
11
                                                               "unknown expectation", err_desc);
1499
11
        }
1500
0
        if (h2o_req_should_forward_expect(&stream->req)) {
1501
0
            h2o_add_header(&stream->req.pool, &stream->req.headers, H2O_TOKEN_EXPECT, NULL, expect.base, expect.len);
1502
0
            stream->req_streaming = 1;
1503
0
            ++conn->num_streams_req_streaming;
1504
0
            return handle_input_expect_headers_process_request_immediately(stream, err_desc);
1505
0
        } else {
1506
0
            stream->req.res.status = 100;
1507
0
            h2o_send_informational(&stream->req);
1508
0
        }
1509
0
    }
1510
1511
1.02k
    return 0;
1512
1.03k
}
1513
1514
static void write_response(struct st_h2o_http3_server_stream_t *stream, h2o_iovec_t datagram_flow_id)
1515
1.56k
{
1516
1.56k
    size_t serialized_header_len = 0;
1517
1.56k
    h2o_iovec_t frame = h2o_qpack_flatten_response(
1518
1.56k
        get_conn(stream)->h3.qpack.enc, &stream->req.pool, stream->quic->stream_id, NULL, stream->req.res.status,
1519
1.56k
        stream->req.res.headers.entries, stream->req.res.headers.size, &get_conn(stream)->super.ctx->globalconf->server_name,
1520
1.56k
        stream->req.res.content_length, datagram_flow_id, &serialized_header_len);
1521
1.56k
    stream->req.header_bytes_sent += serialized_header_len;
1522
1523
1.56k
    h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1);
1524
1.56k
    struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size++;
1525
1.56k
    vec->vec = (h2o_sendvec_t){&immutable_vec_callbacks, frame.len, {frame.base}};
1526
1.56k
    vec->entity_offset = UINT64_MAX;
1527
1.56k
    stream->sendbuf.final_size += frame.len;
1528
1.56k
}
1529
1530
static size_t flatten_data_frame_header(struct st_h2o_http3_server_stream_t *stream, struct st_h2o_http3_server_sendvec_t *dst,
1531
                                        size_t payload_size)
1532
1.42k
{
1533
1.42k
    size_t header_size = 0;
1534
1535
    /* build header */
1536
1.42k
    stream->sendbuf.data_frame_header_buf[header_size++] = H2O_HTTP3_FRAME_TYPE_DATA;
1537
1.42k
    header_size =
1538
1.42k
        quicly_encodev(stream->sendbuf.data_frame_header_buf + header_size, payload_size) - stream->sendbuf.data_frame_header_buf;
1539
1540
    /* initilaize the vector */
1541
1.42k
    h2o_sendvec_init_raw(&dst->vec, stream->sendbuf.data_frame_header_buf, header_size);
1542
1.42k
    dst->entity_offset = UINT64_MAX;
1543
1544
1.42k
    return header_size;
1545
1.42k
}
1546
1547
static void shutdown_by_generator(struct st_h2o_http3_server_stream_t *stream)
1548
1.56k
{
1549
1.56k
    quicly_sendstate_shutdown(&stream->quic->sendstate, stream->sendbuf.final_size);
1550
1.56k
    if (stream->sendbuf.vecs.size == 0) {
1551
0
        if (quicly_stream_has_receive_side(0, stream->quic->stream_id))
1552
0
            quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE);
1553
0
        set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 1);
1554
0
    }
1555
1.56k
}
1556
1557
/**
1558
 * returns boolean indicating if the response is ready to be sent, building the value of datagram-flow-id header field
1559
 */
1560
static int finalize_do_send_setup_udp_tunnel(struct st_h2o_http3_server_stream_t *stream, h2o_send_state_t send_state,
1561
                                             h2o_iovec_t *datagram_flow_id)
1562
1.56k
{
1563
1.56k
    *datagram_flow_id = h2o_iovec_init(NULL, 0);
1564
1565
    /* TODO Convert H3_DATAGRAMs to capsules either here or inside the proxy handler. At the moment, the connect handler provides
1566
     * `h2o_req_t::forward_datagram` callbacks but the proxy handler does not. As support for H3_DATAGRAMs are advertised at the
1567
     * connection level, we need to support forwarding datagrams also when the proxy handler in use.
1568
     * Until then, connect-udp requests on H3 are refused to be tunneled by the proxy handler, see `h2o__proxy_process_request`.
1569
     * Also, as an abundance of caution, we drop the datagrams associated to requests that do not provide the forwarding hooks, by
1570
     * not registering such streams to `datagram_flows`. */
1571
1.56k
    if (!((200 <= stream->req.res.status && stream->req.res.status <= 299) && stream->req.forward_datagram.write_ != NULL) ||
1572
1.56k
        send_state != H2O_SEND_STATE_IN_PROGRESS) {
1573
1.56k
        stream->datagram_flow_id = UINT64_MAX;
1574
1.56k
        return 1;
1575
1.56k
    }
1576
1577
    /* Register the flow id to the connection so that datagram frames being received from the client would be dispatched to
1578
     * `req->forward_datagram.write_`. */
1579
0
    if (stream->datagram_flow_id != UINT64_MAX) {
1580
0
        struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
1581
0
        int r;
1582
0
        khiter_t iter = kh_put(stream, conn->datagram_flows, stream->datagram_flow_id, &r);
1583
0
        assert(iter != kh_end(conn->datagram_flows));
1584
0
        kh_val(conn->datagram_flows, iter) = stream;
1585
0
    }
1586
1587
    /* If the client sent a `datagram-flow-id` request header field and:
1588
     *  a) if the peer is willing to accept datagrams as well, use the same flow ID for sending datagrams from us,
1589
     *  b) if the peer did not send H3_DATAGRAM Settings, use the stream, or
1590
     *  c) if H3 SETTINGS hasn't been received yet, wait for it, then call `do_send` again. We might drop some packets from origin
1591
     *     that arrive before H3 SETTINGS from the client, in the rare occasion of packet carrying H3 SETTINGS getting lost while
1592
     *     those carrying CONNECT-UDP request and the UDP datagram to be forwarded to the origin arrive. */
1593
0
    if (stream->datagram_flow_id != UINT64_MAX) {
1594
0
        struct st_h2o_http3_server_conn_t *conn = get_conn(stream);
1595
0
        if (!h2o_http3_has_received_settings(&conn->h3)) {
1596
0
            h2o_linklist_insert(&conn->streams_resp_settings_blocked, &stream->link_resp_settings_blocked);
1597
0
            return 0;
1598
0
        }
1599
0
        if (conn->h3.peer_settings.h3_datagram) {
1600
            /* register the route that would be used by the CONNECT handler for forwarding datagrams */
1601
0
            stream->req.forward_datagram.read_ = tunnel_on_udp_read;
1602
            /* if the request type is draft-03, build and return the value of datagram-flow-id header field */
1603
0
            if (stream->req.input.method.len == sizeof("CONNECT-UDP") - 1) {
1604
0
                datagram_flow_id->base = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT64_LONGEST_STR));
1605
0
                datagram_flow_id->len = sprintf(datagram_flow_id->base, "%" PRIu64, stream->datagram_flow_id);
1606
0
            }
1607
0
        }
1608
0
    }
1609
1610
0
    return 1;
1611
0
}
1612
1613
static void finalize_do_send(struct st_h2o_http3_server_stream_t *stream)
1614
1.99k
{
1615
1.99k
    quicly_stream_sync_sendbuf(stream->quic, 1);
1616
1.99k
    if (!stream->proceed_while_sending)
1617
1.56k
        h2o_quic_schedule_timer(&get_conn(stream)->h3.super);
1618
1.99k
}
1619
1620
static void do_send(h2o_ostream_t *_ostr, h2o_req_t *_req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t send_state)
1621
1.99k
{
1622
1.99k
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr);
1623
1.99k
    int empty_payload_allowed =
1624
1.99k
        stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS || send_state != H2O_SEND_STATE_IN_PROGRESS;
1625
1626
1.99k
    assert(&stream->req == _req);
1627
1628
1.99k
    stream->proceed_requested = 0;
1629
1630
1.99k
    switch (stream->state) {
1631
1.56k
    case H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS: {
1632
1.56k
        h2o_iovec_t datagram_flow_id;
1633
1.56k
        ssize_t priority_header_index;
1634
1.56k
        if (stream->req.send_server_timing != 0)
1635
0
            h2o_add_server_timing_header(&stream->req, 0 /* TODO add support for trailers; it's going to be a little complex as we
1636
0
                                                          * need to build trailers the moment they are emitted onto wire */);
1637
1.56k
        if (!finalize_do_send_setup_udp_tunnel(stream, send_state, &datagram_flow_id))
1638
0
            return;
1639
1.56k
        stream->req.timestamps.response_start_at = h2o_gettimeofday(get_conn(stream)->super.ctx->loop);
1640
1.56k
        write_response(stream, datagram_flow_id);
1641
1.56k
        h2o_probe_log_response(&stream->req, stream->quic->stream_id);
1642
1.56k
        set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, 1);
1643
1.56k
        if ((priority_header_index = h2o_find_header(&stream->req.res.headers, H2O_TOKEN_PRIORITY, -1)) != -1) {
1644
0
            const h2o_header_t *header = &stream->req.res.headers.entries[priority_header_index];
1645
0
            handle_priority_change(
1646
0
                stream, header->value.base, header->value.len,
1647
0
                stream->scheduler.priority /* omission of a parameter is disinterest to change (RFC 9218 Section 8) */);
1648
0
        }
1649
1.56k
        break;
1650
1.56k
    }
1651
429
    case H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY:
1652
429
        assert(quicly_sendstate_is_open(&stream->quic->sendstate));
1653
429
        break;
1654
429
    case H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT:
1655
        /* This protocol handler transitions to CLOSE_WAIT when the request side is being reset by the origin. But our client-side
1656
         * implementations are capable of handling uni-directional close, therefore `do_send` might be invoked. The handler swallows
1657
         * the input, and relies on eventual destruction of `h2o_req_t` to discard the generator. */
1658
0
        return;
1659
0
    default:
1660
0
        h2o_fatal("logic flaw");
1661
0
        break;
1662
1.99k
    }
1663
1664
    /* If vectors carrying response body are being provided, copy them, incrementing the reference count if possible (for future
1665
     * retransmissions), as well as prepending a DATA frame header */
1666
1.99k
    h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1 + bufcnt);
1667
1.99k
    size_t dst_slot = stream->sendbuf.vecs.size + 1 /* reserve slot for DATA frame header */, payload_size = 0;
1668
3.42k
    for (size_t i = 0; i != bufcnt; ++i) {
1669
1.42k
        if (bufs[i].len == 0)
1670
0
            continue;
1671
        /* copy one body vector */
1672
1.42k
        payload_size += bufs[i].len;
1673
1.42k
        stream->sendbuf.vecs.entries[dst_slot++] = (struct st_h2o_http3_server_sendvec_t){
1674
1.42k
            .vec = bufs[i],
1675
1.42k
            .entity_offset = stream->sendbuf.final_body_size,
1676
1.42k
        };
1677
1.42k
    }
1678
1.99k
    if (payload_size != 0) {
1679
        /* build DATA frame header */
1680
1.42k
        size_t header_size =
1681
1.42k
            flatten_data_frame_header(stream, stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size, payload_size);
1682
        /* update properties */
1683
1.42k
        stream->sendbuf.vecs.size = dst_slot;
1684
1.42k
        stream->sendbuf.final_body_size += payload_size;
1685
1.42k
        stream->sendbuf.final_size += header_size + payload_size;
1686
1.42k
    } else {
1687
570
        assert(empty_payload_allowed || !"h2o_data must only be called when there is progress");
1688
570
    }
1689
1690
1.99k
    switch (send_state) {
1691
429
    case H2O_SEND_STATE_IN_PROGRESS:
1692
429
        break;
1693
1.54k
    case H2O_SEND_STATE_FINAL:
1694
1.56k
    case H2O_SEND_STATE_ERROR:
1695
        /* TODO consider how to forward error, pending resolution of https://github.com/quicwg/base-drafts/issues/3300 */
1696
1.56k
        shutdown_by_generator(stream);
1697
1.56k
        break;
1698
1.99k
    }
1699
1700
1.99k
    finalize_do_send(stream);
1701
1.99k
}
1702
1703
static void do_send_informational(h2o_ostream_t *_ostr, h2o_req_t *_req)
1704
0
{
1705
0
    struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr);
1706
0
    assert(&stream->req == _req);
1707
1708
0
    write_response(stream, h2o_iovec_init(NULL, 0));
1709
1710
0
    finalize_do_send(stream);
1711
0
}
1712
1713
static quicly_error_t handle_priority_update_frame(struct st_h2o_http3_server_conn_t *conn,
1714
                                                   const h2o_http3_priority_update_frame_t *frame)
1715
0
{
1716
0
    if (frame->element_is_push)
1717
0
        return H2O_HTTP3_ERROR_GENERAL_PROTOCOL;
1718
1719
    /* obtain the stream being referred to (creating one if necessary), or return if the stream has been closed already */
1720
0
    quicly_stream_t *qs;
1721
0
    if (quicly_get_or_open_stream(conn->h3.super.quic, frame->element, &qs) != 0)
1722
0
        return H2O_HTTP3_ERROR_ID;
1723
0
    if (qs == NULL)
1724
0
        return 0;
1725
1726
    /* apply the changes */
1727
0
    struct st_h2o_http3_server_stream_t *stream = qs->data;
1728
0
    assert(stream != NULL);
1729
0
    stream->received_priority_update = 1;
1730
1731
0
    handle_priority_change(stream, frame->value.base, frame->value.len,
1732
0
                           h2o_absprio_default /* the frame communicates a complete set of parameters; RFC 9218 Section 7 */);
1733
1734
0
    return 0;
1735
0
}
1736
1737
static void handle_control_stream_frame(h2o_http3_conn_t *_conn, uint64_t type, const uint8_t *payload, size_t len)
1738
0
{
1739
0
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, _conn);
1740
0
    quicly_error_t err;
1741
0
    const char *err_desc = NULL;
1742
1743
0
    if (!h2o_http3_has_received_settings(&conn->h3)) {
1744
0
        if (type != H2O_HTTP3_FRAME_TYPE_SETTINGS) {
1745
0
            err = H2O_HTTP3_ERROR_MISSING_SETTINGS;
1746
0
            goto Fail;
1747
0
        }
1748
0
        if ((err = h2o_http3_handle_settings_frame(&conn->h3, payload, len, &err_desc)) != 0)
1749
0
            goto Fail;
1750
0
        assert(h2o_http3_has_received_settings(&conn->h3));
1751
0
        while (!h2o_linklist_is_empty(&conn->streams_resp_settings_blocked)) {
1752
0
            struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(
1753
0
                struct st_h2o_http3_server_stream_t, link_resp_settings_blocked, conn->streams_resp_settings_blocked.next);
1754
0
            h2o_linklist_unlink(&stream->link_resp_settings_blocked);
1755
0
            do_send(&stream->ostr_final, &stream->req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS);
1756
0
        }
1757
0
    } else {
1758
0
        switch (type) {
1759
0
        case H2O_HTTP3_FRAME_TYPE_SETTINGS:
1760
0
            err = H2O_HTTP3_ERROR_FRAME_UNEXPECTED;
1761
0
            err_desc = "unexpected SETTINGS frame";
1762
0
            goto Fail;
1763
0
        case H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST:
1764
0
        case H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: {
1765
0
            h2o_http3_priority_update_frame_t frame;
1766
0
            if ((err = h2o_http3_decode_priority_update_frame(&frame, type == H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_PUSH, payload,
1767
0
                                                              len, &err_desc)) != 0)
1768
0
                goto Fail;
1769
0
            if ((err = handle_priority_update_frame(conn, &frame)) != 0) {
1770
0
                err_desc = "invalid PRIORITY_UPDATE frame";
1771
0
                goto Fail;
1772
0
            }
1773
0
        } break;
1774
0
        default:
1775
0
            break;
1776
0
        }
1777
0
    }
1778
1779
0
    return;
1780
0
Fail:
1781
0
    h2o_quic_close_connection(&conn->h3.super, err, err_desc);
1782
0
}
1783
1784
static quicly_error_t stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *qs)
1785
25.1k
{
1786
25.1k
    static const quicly_stream_callbacks_t callbacks = {on_stream_destroy, on_send_shift, on_send_emit,
1787
25.1k
                                                        on_send_stop,      on_receive,    on_receive_reset};
1788
1789
    /* handling of unidirectional streams is not server-specific */
1790
25.1k
    if (quicly_stream_is_unidirectional(qs->stream_id)) {
1791
18.8k
        h2o_http3_on_create_unidirectional_stream(qs);
1792
18.8k
        return 0;
1793
18.8k
    }
1794
1795
25.1k
    assert(quicly_stream_is_client_initiated(qs->stream_id));
1796
1797
6.29k
    struct st_h2o_http3_server_conn_t *conn =
1798
6.29k
        H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn));
1799
1800
    /* create new stream and start handling the request */
1801
6.29k
    struct st_h2o_http3_server_stream_t *stream = h2o_mem_alloc(sizeof(*stream));
1802
6.29k
    stream->quic = qs;
1803
6.29k
    h2o_buffer_init(&stream->recvbuf.buf, &h2o_socket_buffer_prototype);
1804
6.29k
    stream->recvbuf.handle_input = handle_input_expect_headers;
1805
6.29k
    memset(&stream->sendbuf, 0, sizeof(stream->sendbuf));
1806
6.29k
    stream->state = H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS;
1807
6.29k
    stream->link = (h2o_linklist_t){NULL};
1808
6.29k
    stream->link_resp_settings_blocked = (h2o_linklist_t){NULL};
1809
6.29k
    stream->ostr_final = (h2o_ostream_t){
1810
6.29k
        NULL, do_send, NULL,
1811
6.29k
        conn->super.ctx->globalconf->send_informational_mode == H2O_SEND_INFORMATIONAL_MODE_NONE ? NULL : do_send_informational};
1812
6.29k
    stream->scheduler.link = (h2o_linklist_t){NULL};
1813
6.29k
    stream->scheduler.priority = h2o_absprio_default;
1814
6.29k
    stream->scheduler.call_cnt = 0;
1815
1816
6.29k
    stream->read_blocked = 0;
1817
6.29k
    stream->proceed_requested = 0;
1818
6.29k
    stream->proceed_while_sending = 0;
1819
6.29k
    stream->received_priority_update = 0;
1820
6.29k
    stream->req_disposed = 0;
1821
6.29k
    stream->req_streaming = 0;
1822
6.29k
    stream->req_body = NULL;
1823
1824
6.29k
    h2o_init_request(&stream->req, &conn->super, NULL);
1825
6.29k
    stream->req.version = 0x0300;
1826
6.29k
    stream->req._ostr_top = &stream->ostr_final;
1827
1828
6.29k
    stream->quic->data = stream;
1829
6.29k
    stream->quic->callbacks = &callbacks;
1830
1831
6.29k
    ++*get_state_counter(get_conn(stream), stream->state);
1832
6.29k
    h2o_conn_set_state(&get_conn(stream)->super, H2O_CONN_STATE_ACTIVE);
1833
1834
6.29k
    return 0;
1835
6.29k
}
1836
1837
static quicly_stream_open_t on_stream_open = {stream_open_cb};
1838
1839
static void unblock_conn_blocked_streams(struct st_h2o_http3_server_conn_t *conn)
1840
0
{
1841
0
    conn->scheduler.uni.active |= conn->scheduler.uni.conn_blocked;
1842
0
    conn->scheduler.uni.conn_blocked = 0;
1843
0
    req_scheduler_unblock_conn_blocked(&conn->scheduler.reqs, req_scheduler_compare_stream_id);
1844
0
}
1845
1846
static int scheduler_can_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, int conn_is_saturated)
1847
0
{
1848
0
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc));
1849
1850
0
    if (!conn_is_saturated) {
1851
        /* not saturated, activate streams marked as being conn-blocked */
1852
0
        unblock_conn_blocked_streams(conn);
1853
0
    } else {
1854
        /* TODO lazily move the active request and unidirectional streams to conn_blocked.  Not doing so results in at most one
1855
         * spurious call to quicly_send. */
1856
0
    }
1857
1858
0
    if (conn->scheduler.uni.active != 0)
1859
0
        return 1;
1860
0
    if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS)
1861
0
        return 1;
1862
1863
0
    return 0;
1864
0
}
1865
1866
static quicly_error_t scheduler_do_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, quicly_send_context_t *s)
1867
18.0k
{
1868
18.0k
#define HAS_DATA_TO_SEND()                                                                                                         \
1869
25.5k
    (conn->scheduler.uni.active != 0 || conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS)
1870
1871
18.0k
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc));
1872
18.0k
    int had_data_to_send = HAS_DATA_TO_SEND();
1873
18.0k
    quicly_error_t ret = 0;
1874
1875
38.5k
    while (quicly_can_send_data(conn->h3.super.quic, s)) {
1876
        /* The strategy is:
1877
         *
1878
         * 1. dequeue the first active stream
1879
         * 2. link the stream to the conn_blocked list, if nothing can be sent for the stream due to the connection being capped
1880
         * 3. otherwise, send
1881
         * 4. enqueue to the appropriate place
1882
         */
1883
38.5k
        if (conn->scheduler.uni.active != 0) {
1884
18.8k
            static const ptrdiff_t stream_offsets[] = {
1885
18.8k
                offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.control),
1886
18.8k
                offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_encoder),
1887
18.8k
                offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_decoder)};
1888
            /* 1. obtain pointer to the offending stream */
1889
18.8k
            struct st_h2o_http3_egress_unistream_t *stream = NULL;
1890
18.8k
            size_t i;
1891
37.7k
            for (i = 0; i != sizeof(stream_offsets) / sizeof(stream_offsets[0]); ++i) {
1892
37.7k
                stream = *(void **)((char *)conn + stream_offsets[i]);
1893
37.7k
                if ((conn->scheduler.uni.active & (1 << stream->quic->stream_id)) != 0)
1894
18.8k
                    break;
1895
37.7k
            }
1896
18.8k
            assert(i != sizeof(stream_offsets) / sizeof(stream_offsets[0]) && "we should have found one stream");
1897
            /* 2. move to the conn_blocked list if necessary */
1898
18.8k
            if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) {
1899
0
                conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id);
1900
0
                conn->scheduler.uni.conn_blocked |= 1 << stream->quic->stream_id;
1901
0
                continue;
1902
0
            }
1903
            /* 3. send */
1904
18.8k
            if ((ret = quicly_send_stream(stream->quic, s)) != 0)
1905
0
                goto Exit;
1906
            /* 4. update scheduler state */
1907
18.8k
            conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id);
1908
18.8k
            if (quicly_stream_can_send(stream->quic, 1)) {
1909
0
                uint16_t *slot = &conn->scheduler.uni.active;
1910
0
                if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0))
1911
0
                    slot = &conn->scheduler.uni.conn_blocked;
1912
0
                *slot |= 1 << stream->quic->stream_id;
1913
0
            }
1914
19.6k
        } else if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS) {
1915
            /* 1. obtain pointer to the offending stream */
1916
1.65k
            h2o_linklist_t *anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].high;
1917
1.65k
            if (h2o_linklist_is_empty(anchor)) {
1918
6
                anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].low;
1919
6
                assert(!h2o_linklist_is_empty(anchor));
1920
6
            }
1921
1.65k
            struct st_h2o_http3_server_stream_t *stream =
1922
1.65k
                H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler.link, anchor->next);
1923
            /* 1. link to the conn_blocked list if necessary */
1924
1.65k
            if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) {
1925
0
                req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler);
1926
0
                continue;
1927
0
            }
1928
            /* 3. send */
1929
1.65k
            if ((ret = quicly_send_stream(stream->quic, s)) != 0)
1930
0
                goto Exit;
1931
1.65k
            ++stream->scheduler.call_cnt;
1932
1.65k
            if (stream->quic->sendstate.size_inflight == stream->quic->sendstate.final_size &&
1933
1.23k
                h2o_timeval_is_null(&stream->req.timestamps.response_end_at)) {
1934
1.23k
                stream->req.timestamps.response_end_at = h2o_gettimeofday(stream->req.conn->ctx->loop);
1935
1.23k
                if (h2o_timeval_is_null(&stream->req.timestamps.response_start_at)) {
1936
0
                    stream->req.timestamps.response_start_at = stream->req.timestamps.response_end_at;
1937
0
                }
1938
1.23k
            }
1939
            /* 4. invoke h2o_proceed_request synchronously, so that we could obtain additional data for the current (i.e. highest)
1940
             *    stream. */
1941
1.65k
            if (stream->proceed_while_sending) {
1942
429
                assert(stream->proceed_requested);
1943
429
                h2o_proceed_response(&stream->req);
1944
429
                stream->proceed_while_sending = 0;
1945
429
            }
1946
            /* 5. prepare for next */
1947
1.65k
            if (quicly_stream_can_send(stream->quic, 1)) {
1948
429
                if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) {
1949
                    /* capped by connection-level flow control, move the stream to conn-blocked */
1950
0
                    req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler);
1951
429
                } else {
1952
                    /* schedule for next emission */
1953
429
                    req_scheduler_setup_for_next(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id);
1954
429
                }
1955
1.23k
            } else {
1956
                /* nothing to send at this moment */
1957
1.23k
                req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler);
1958
1.23k
            }
1959
18.0k
        } else {
1960
18.0k
            break;
1961
18.0k
        }
1962
38.5k
    }
1963
1964
18.0k
Exit:
1965
    /* Send a resumption token if we've sent all available data and there is still room to send something, but not too frequently.
1966
     * We send a token every 200KB at most; the threshold has been chosen so that the additional overhead would be ~0.1% assuming a
1967
     * token size of 200 bytes (in reality, one NEW_TOKEN frame will uses 56 bytes). */
1968
18.0k
    if (ret == 0 && had_data_to_send && !HAS_DATA_TO_SEND()) {
1969
7.52k
        uint64_t max_data_sent;
1970
7.52k
        quicly_get_max_data(conn->h3.super.quic, NULL, &max_data_sent, NULL);
1971
7.52k
        if (max_data_sent >= conn->skip_jumpstart_token_until) {
1972
6.29k
            quicly_send_resumption_token(conn->h3.super.quic);
1973
6.29k
            conn->skip_jumpstart_token_until = max_data_sent + 200000;
1974
6.29k
        }
1975
7.52k
    }
1976
1977
18.0k
    return ret;
1978
1979
18.0k
#undef HAS_DATA_TO_SEND
1980
18.0k
}
1981
1982
static void scheduler_update_state(struct st_quicly_stream_scheduler_t *sched, quicly_stream_t *qs)
1983
21.9k
{
1984
21.9k
    struct st_h2o_http3_server_conn_t *conn =
1985
21.9k
        H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn));
1986
21.9k
    enum { DEACTIVATE, ACTIVATE, CONN_BLOCKED } new_state;
1987
1988
21.9k
    if (quicly_stream_can_send(qs, 1)) {
1989
20.8k
        if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(qs, 0)) {
1990
0
            new_state = CONN_BLOCKED;
1991
20.8k
        } else {
1992
20.8k
            new_state = ACTIVATE;
1993
20.8k
        }
1994
20.8k
    } else {
1995
1.01k
        new_state = DEACTIVATE;
1996
1.01k
    }
1997
1998
21.9k
    if (quicly_stream_is_unidirectional(qs->stream_id)) {
1999
18.8k
        assert(qs->stream_id < sizeof(uint16_t) * 8);
2000
18.8k
        uint16_t mask = (uint16_t)1 << qs->stream_id;
2001
18.8k
        switch (new_state) {
2002
0
        case DEACTIVATE:
2003
0
            conn->scheduler.uni.active &= ~mask;
2004
0
            conn->scheduler.uni.conn_blocked &= ~mask;
2005
0
            break;
2006
18.8k
        case ACTIVATE:
2007
18.8k
            conn->scheduler.uni.active |= mask;
2008
18.8k
            conn->scheduler.uni.conn_blocked &= ~mask;
2009
18.8k
            break;
2010
0
        case CONN_BLOCKED:
2011
0
            conn->scheduler.uni.active &= ~mask;
2012
0
            conn->scheduler.uni.conn_blocked |= mask;
2013
0
            break;
2014
18.8k
        }
2015
18.8k
    } else {
2016
3.01k
        struct st_h2o_http3_server_stream_t *stream = qs->data;
2017
3.01k
        if (stream->proceed_while_sending)
2018
429
            return;
2019
2.58k
        switch (new_state) {
2020
1.01k
        case DEACTIVATE:
2021
1.01k
            req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler);
2022
1.01k
            break;
2023
1.56k
        case ACTIVATE:
2024
1.56k
            req_scheduler_activate(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id);
2025
1.56k
            break;
2026
0
        case CONN_BLOCKED:
2027
0
            req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler);
2028
0
            break;
2029
2.58k
        }
2030
2.58k
    }
2031
21.9k
}
2032
2033
static quicly_stream_scheduler_t scheduler = {scheduler_can_send, scheduler_do_send, scheduler_update_state};
2034
2035
static void datagram_frame_receive_cb(quicly_receive_datagram_frame_t *self, quicly_conn_t *quic, ptls_iovec_t datagram)
2036
0
{
2037
0
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(quic));
2038
0
    uint64_t flow_id;
2039
0
    h2o_iovec_t payload;
2040
2041
    /* decode */
2042
0
    if ((flow_id = h2o_http3_decode_h3_datagram(&payload, datagram.base, datagram.len)) == UINT64_MAX) {
2043
0
        h2o_quic_close_connection(&conn->h3.super, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, "invalid DATAGRAM frame");
2044
0
        return;
2045
0
    }
2046
2047
    /* find stream */
2048
0
    khiter_t iter = kh_get(stream, conn->datagram_flows, flow_id);
2049
0
    if (iter == kh_end(conn->datagram_flows))
2050
0
        return;
2051
0
    struct st_h2o_http3_server_stream_t *stream = kh_val(conn->datagram_flows, iter);
2052
0
    assert(stream->req.forward_datagram.write_ != NULL);
2053
2054
    /* drop this datagram if req.forward_datagram.write_ has been reset */
2055
0
    if (stream->req.forward_datagram.write_ == NULL)
2056
0
        return;
2057
2058
    /* forward */
2059
0
    stream->req.forward_datagram.write_(&stream->req, &payload, 1);
2060
0
}
2061
2062
static quicly_receive_datagram_frame_t on_receive_datagram_frame = {datagram_frame_receive_cb};
2063
2064
static void on_h3_destroy(h2o_quic_conn_t *h3_)
2065
6.29k
{
2066
6.29k
    h2o_http3_conn_t *h3 = (h2o_http3_conn_t *)h3_;
2067
6.29k
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, h3);
2068
6.29k
    quicly_stats_t stats;
2069
2070
6.29k
    H2O_PROBE_CONN0(H3S_DESTROY, &conn->super);
2071
6.29k
    H2O_LOG_CONN(h3s_destroy, &conn->super, {});
2072
2073
6.29k
    if (quicly_get_stats(h3_->quic, &stats) == 0) {
2074
572k
#define ACC(fld, _unused) conn->super.ctx->quic_stats.quicly.fld += stats.fld;
2075
572k
        QUICLY_STATS_FOREACH_COUNTERS(ACC);
2076
6.29k
#undef ACC
2077
6.29k
        if (conn->super.ctx->quic_stats.num_sentmap_packets_largest < stats.num_sentmap_packets_largest)
2078
0
            conn->super.ctx->quic_stats.num_sentmap_packets_largest = stats.num_sentmap_packets_largest;
2079
6.29k
    }
2080
2081
    /* unlink and dispose */
2082
6.29k
    if (h2o_timer_is_linked(&conn->timeout))
2083
0
        h2o_timer_unlink(&conn->timeout);
2084
6.29k
    if (h2o_timer_is_linked(&conn->_graceful_shutdown_timeout))
2085
0
        h2o_timer_unlink(&conn->_graceful_shutdown_timeout);
2086
6.29k
    h2o_http3_dispose_conn(&conn->h3);
2087
6.29k
    kh_destroy(stream, conn->datagram_flows);
2088
2089
    /* check consistency post-disposal */
2090
6.29k
    assert(conn->num_streams.recv_headers == 0);
2091
6.29k
    assert(conn->num_streams.req_pending == 0);
2092
6.29k
    assert(conn->num_streams.send_headers == 0);
2093
6.29k
    assert(conn->num_streams.send_body == 0);
2094
6.29k
    assert(conn->num_streams.close_wait == 0);
2095
6.29k
    assert(conn->num_streams_req_streaming == 0);
2096
6.29k
    assert(conn->num_streams_tunnelling == 0);
2097
6.29k
    assert(h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked));
2098
6.29k
    assert(h2o_linklist_is_empty(&conn->delayed_streams.req_streaming));
2099
6.29k
    assert(h2o_linklist_is_empty(&conn->delayed_streams.pending));
2100
6.29k
    assert(h2o_linklist_is_empty(&conn->streams_resp_settings_blocked));
2101
6.29k
    assert(conn->scheduler.reqs.active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS);
2102
6.29k
    assert(h2o_linklist_is_empty(&conn->scheduler.reqs.conn_blocked));
2103
2104
    /* free memory */
2105
6.29k
    h2o_destroy_connection(&conn->super);
2106
6.29k
}
2107
2108
void h2o_http3_server_init_context(h2o_context_t *h2o, h2o_quic_ctx_t *ctx, h2o_loop_t *loop, h2o_socket_t *sock,
2109
                                   quicly_context_t *quic, quicly_cid_plaintext_t *next_cid, h2o_quic_accept_cb acceptor,
2110
                                   h2o_quic_notify_connection_update_cb notify_conn_update, uint8_t use_gso)
2111
0
{
2112
0
    return h2o_quic_init_context(ctx, loop, sock, quic, next_cid, acceptor, notify_conn_update, use_gso, &h2o->quic_stats);
2113
0
}
2114
2115
h2o_http3_conn_t *h2o_http3_server_accept(h2o_http3_server_ctx_t *ctx, quicly_address_t *destaddr, quicly_address_t *srcaddr,
2116
                                          quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token,
2117
                                          const h2o_http3_conn_callbacks_t *h3_callbacks)
2118
6.29k
{
2119
6.29k
    static const h2o_conn_callbacks_t conn_callbacks = {
2120
6.29k
        .get_sockname = get_sockname,
2121
6.29k
        .get_peername = get_peername,
2122
6.29k
        .get_ptls = get_ptls,
2123
6.29k
        .get_ssl_server_name = get_ssl_server_name,
2124
6.29k
        .log_state = log_state,
2125
6.29k
        .get_req_id = get_req_id,
2126
6.29k
        .close_idle_connection = close_idle_connection,
2127
6.29k
        .foreach_request = foreach_request,
2128
6.29k
        .request_shutdown = initiate_graceful_shutdown,
2129
6.29k
        .num_reqs_inflight = num_reqs_inflight,
2130
6.29k
        .get_tracer = get_tracer,
2131
6.29k
        .log_ = {{
2132
6.29k
            .extensible_priorities = log_extensible_priorities,
2133
6.29k
            .transport =
2134
6.29k
                {
2135
6.29k
                    .cc_name = log_cc_name,
2136
6.29k
                    .delivery_rate = log_delivery_rate,
2137
6.29k
                },
2138
6.29k
            .ssl =
2139
6.29k
                {
2140
6.29k
                    .protocol_version = log_tls_protocol_version,
2141
6.29k
                    .session_reused = log_session_reused,
2142
6.29k
                    .cipher = log_cipher,
2143
6.29k
                    .cipher_bits = log_cipher_bits,
2144
6.29k
                    .session_id = log_session_id,
2145
6.29k
                    .negotiated_protocol = log_negotiated_protocol,
2146
6.29k
                    .ech_config_id = log_ech_config_id,
2147
6.29k
                    .ech_kem = log_ech_kem,
2148
6.29k
                    .ech_cipher = log_ech_cipher,
2149
6.29k
                    .ech_cipher_bits = log_ech_cipher_bits,
2150
6.29k
                },
2151
6.29k
            .http3 =
2152
6.29k
                {
2153
6.29k
                    .stream_id = log_stream_id,
2154
6.29k
                    .quic_stats = log_quic_stats,
2155
6.29k
                    .quic_version = log_quic_version,
2156
6.29k
                },
2157
6.29k
        }},
2158
6.29k
    };
2159
2160
    /* setup the structure */
2161
6.29k
    struct st_h2o_http3_server_conn_t *conn = (void *)h2o_create_connection(
2162
6.29k
        sizeof(*conn), ctx->accept_ctx->ctx, ctx->accept_ctx->hosts, h2o_gettimeofday(ctx->accept_ctx->ctx->loop), &conn_callbacks);
2163
6.29k
    memset((char *)conn + sizeof(conn->super), 0, sizeof(*conn) - sizeof(conn->super));
2164
2165
6.29k
    h2o_http3_init_conn(&conn->h3, &ctx->super, h3_callbacks, &ctx->qpack, H2O_MAX_REQLEN);
2166
6.29k
    conn->handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}};
2167
6.29k
    h2o_linklist_init_anchor(&conn->delayed_streams.recv_body_blocked);
2168
6.29k
    h2o_linklist_init_anchor(&conn->delayed_streams.req_streaming);
2169
6.29k
    h2o_linklist_init_anchor(&conn->delayed_streams.pending);
2170
6.29k
    h2o_linklist_init_anchor(&conn->streams_resp_settings_blocked);
2171
6.29k
    h2o_timer_init(&conn->timeout, run_delayed);
2172
6.29k
    memset(&conn->num_streams, 0, sizeof(conn->num_streams));
2173
6.29k
    conn->num_streams_req_streaming = 0;
2174
6.29k
    conn->num_streams_tunnelling = 0;
2175
6.29k
    req_scheduler_init(&conn->scheduler.reqs);
2176
6.29k
    conn->scheduler.uni.active = 0;
2177
6.29k
    conn->scheduler.uni.conn_blocked = 0;
2178
6.29k
    conn->datagram_flows = kh_init(stream);
2179
6.29k
    conn->skip_jumpstart_token_until =
2180
6.29k
        quicly_cc_calc_initial_cwnd(ctx->super.quic->initcwnd_packets, ctx->super.quic->transport_params.max_udp_payload_size) *
2181
6.29k
        4; /* sending jumpstart token is meaningless until CWND has grown 2x of IW, which translates to 4x data being sent */
2182
2183
    /* accept connection */
2184
6.29k
    assert(ctx->super.next_cid != NULL && "to set next_cid, h2o_quic_set_context_identifier must be called");
2185
6.29k
    quicly_conn_t *qconn;
2186
6.29k
    quicly_error_t accept_ret = quicly_accept(
2187
6.29k
        &qconn, ctx->super.quic, &destaddr->sa, &srcaddr->sa, packet, address_token, ctx->super.next_cid,
2188
6.29k
        &conn->handshake_properties,
2189
6.29k
        &conn->h3 /* back pointer is set up here so that callbacks being called while parsing ClientHello can refer to `conn` */);
2190
6.29k
    if (accept_ret != 0) {
2191
0
        h2o_http3_conn_t *ret = NULL;
2192
0
        if (accept_ret == QUICLY_ERROR_DECRYPTION_FAILED)
2193
0
            ret = (h2o_http3_conn_t *)&h2o_quic_accept_conn_decryption_failed;
2194
0
        h2o_http3_dispose_conn(&conn->h3);
2195
0
        kh_destroy(stream, conn->datagram_flows);
2196
0
        h2o_destroy_connection(&conn->super);
2197
0
        return ret;
2198
0
    }
2199
6.29k
    if (ctx->super.quic_stats != NULL) {
2200
0
        ++ctx->super.quic_stats->packet_processed;
2201
0
    }
2202
6.29k
    ++ctx->super.next_cid->master_id; /* FIXME check overlap */
2203
6.29k
    h2o_http3_setup(&conn->h3, qconn);
2204
2205
6.29k
    H2O_PROBE_CONN(H3S_ACCEPT, &conn->super, &conn->super, conn->h3.super.quic, h2o_conn_get_uuid(&conn->super));
2206
6.29k
    H2O_LOG_CONN(h3s_accept, &conn->super, {
2207
6.29k
        PTLS_LOG_ELEMENT_PTR(conn, &conn->super);
2208
6.29k
        PTLS_LOG_ELEMENT_PTR(quic, conn->h3.super.quic);
2209
6.29k
        PTLS_LOG_ELEMENT_SAFESTR(conn_uuid, h2o_conn_get_uuid(&conn->super));
2210
6.29k
    });
2211
2212
6.29k
    if (!h2o_quic_send(&conn->h3.super)) {
2213
        /* When `h2o_quic_send` fails, it destroys the connection object. */
2214
0
        return &h2o_http3_accept_conn_closed;
2215
0
    }
2216
2217
6.29k
    return &conn->h3;
2218
6.29k
}
2219
2220
void h2o_http3_server_amend_quicly_context(h2o_globalconf_t *conf, quicly_context_t *quic)
2221
1
{
2222
1
    quic->transport_params.max_data =
2223
1
        conf->http3.active_stream_window_size; /* set to a size that does not block the unblocked request stream */
2224
1
    quic->transport_params.max_streams_uni = 10;
2225
1
    quic->transport_params.max_stream_data.bidi_remote = h2o_http3_calc_min_flow_control_size(H2O_MAX_REQLEN);
2226
1
    quic->transport_params.max_stream_data.uni = h2o_http3_calc_min_flow_control_size(H2O_MAX_REQLEN);
2227
1
    quic->transport_params.max_idle_timeout = conf->http3.idle_timeout;
2228
1
    quic->transport_params.min_ack_delay_usec = conf->http3.allow_delayed_ack ? 0 : UINT64_MAX;
2229
1
    quic->ack_frequency = conf->http3.ack_frequency;
2230
1
    quic->transport_params.max_datagram_frame_size = 1500; /* accept DATAGRAM frames; let the sender determine MTU, instead of being
2231
                                                            * potentially too restrictive */
2232
1
    quic->stream_open = &on_stream_open;
2233
1
    quic->stream_scheduler = &scheduler;
2234
1
    quic->receive_datagram_frame = &on_receive_datagram_frame;
2235
2236
4
    for (size_t i = 0; quic->tls->cipher_suites[i] != NULL; ++i)
2237
3
        assert(quic->tls->cipher_suites[i]->aead->ctr_cipher != NULL &&
2238
1
               "for header protection, QUIC ciphers MUST provide CTR mode");
2239
1
}
2240
2241
h2o_conn_t *h2o_http3_get_connection(quicly_conn_t *quic)
2242
0
{
2243
0
    struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(quic));
2244
2245
    /* this assertion is most likely to fire if the provided QUIC connection does not represent a server-side HTTP connection */
2246
0
    assert(conn->h3.super.quic == NULL || conn->h3.super.quic == quic);
2247
2248
0
    return &conn->super;
2249
0
}
2250
2251
static void graceful_shutdown_close_straggler(h2o_timer_t *entry)
2252
0
{
2253
0
    struct st_h2o_http3_server_conn_t *conn =
2254
0
        H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _graceful_shutdown_timeout, entry);
2255
2256
    /* We've sent two GOAWAY frames, close the remaining connections */
2257
0
    h2o_quic_close_connection(&conn->h3.super, 0, "shutting down");
2258
2259
0
    conn->_graceful_shutdown_timeout.cb = NULL;
2260
0
}
2261
2262
static void graceful_shutdown_resend_goaway(h2o_timer_t *entry)
2263
0
{
2264
0
    struct st_h2o_http3_server_conn_t *conn =
2265
0
        H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _graceful_shutdown_timeout, entry);
2266
2267
    /* HTTP/3 draft section 5.2.8 --
2268
     * "After allowing time for any in-flight requests or pushes to arrive, the endpoint can send another GOAWAY frame
2269
     * indicating which requests or pushes it might accept before the end of the connection.
2270
     * This ensures that a connection can be cleanly shut down without losing requests. */
2271
2272
0
    if (conn->h3.state < H2O_HTTP3_CONN_STATE_HALF_CLOSED && quicly_get_state(conn->h3.super.quic) == QUICLY_STATE_CONNECTED) {
2273
0
        quicly_stream_id_t next_stream_id = quicly_get_remote_next_stream_id(conn->h3.super.quic, 0 /* == bidi */);
2274
        /* Section 5.2-1: "This identifier MAY be zero if no requests or pushes were processed."" */
2275
0
        quicly_stream_id_t max_stream_id = next_stream_id < 4 ? 0 /* we haven't received any stream yet */ : next_stream_id - 4;
2276
0
        h2o_http3_send_goaway_frame(&conn->h3, max_stream_id);
2277
0
        conn->h3.state = H2O_HTTP3_CONN_STATE_HALF_CLOSED;
2278
        /* After waiting a second, we still have an active connection. If configured, wait one
2279
         * final timeout before closing the connection */
2280
0
        if (conn->super.ctx->globalconf->http3.graceful_shutdown_timeout > 0) {
2281
0
            conn->_graceful_shutdown_timeout.cb = graceful_shutdown_close_straggler;
2282
0
            h2o_timer_link(conn->super.ctx->loop, conn->super.ctx->globalconf->http3.graceful_shutdown_timeout,
2283
0
                           &conn->_graceful_shutdown_timeout);
2284
0
        } else {
2285
0
            conn->_graceful_shutdown_timeout.cb = NULL;
2286
0
        }
2287
0
    }
2288
0
}
2289
2290
static void close_idle_connection(h2o_conn_t *_conn)
2291
0
{
2292
0
    initiate_graceful_shutdown(_conn);
2293
0
}
2294
2295
static void initiate_graceful_shutdown(h2o_conn_t *_conn)
2296
0
{
2297
0
    h2o_conn_set_state(_conn, H2O_CONN_STATE_SHUTDOWN);
2298
2299
0
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
2300
0
    assert(conn->_graceful_shutdown_timeout.cb == NULL);
2301
0
    conn->_graceful_shutdown_timeout.cb = graceful_shutdown_resend_goaway;
2302
2303
0
    h2o_http3_send_shutdown_goaway_frame(&conn->h3);
2304
2305
0
    h2o_timer_link(conn->super.ctx->loop, 1000, &conn->_graceful_shutdown_timeout);
2306
0
}
2307
2308
struct foreach_request_ctx {
2309
    int (*cb)(h2o_req_t *req, void *cbdata);
2310
    void *cbdata;
2311
};
2312
2313
static int64_t foreach_request_per_conn(void *_ctx, quicly_stream_t *qs)
2314
0
{
2315
0
    struct foreach_request_ctx *ctx = _ctx;
2316
2317
    /* skip if the stream is not a request stream (TODO handle push?) */
2318
0
    if (!(quicly_stream_is_client_initiated(qs->stream_id) && !quicly_stream_is_unidirectional(qs->stream_id)))
2319
0
        return 0;
2320
2321
0
    struct st_h2o_http3_server_stream_t *stream = qs->data;
2322
0
    assert(stream->quic == qs);
2323
2324
0
    if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT)
2325
0
        return 0;
2326
0
    return ctx->cb(&stream->req, ctx->cbdata);
2327
0
}
2328
2329
static int foreach_request(h2o_conn_t *_conn, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata)
2330
0
{
2331
0
    struct foreach_request_ctx foreach_ctx = {.cb = cb, .cbdata = cbdata};
2332
2333
0
    struct st_h2o_http3_server_conn_t *conn = (void *)_conn;
2334
0
    quicly_foreach_stream(conn->h3.super.quic, &foreach_ctx, foreach_request_per_conn);
2335
0
    return 0;
2336
0
}
2337
2338
const h2o_http3_conn_callbacks_t H2O_HTTP3_CONN_CALLBACKS = {{on_h3_destroy}, handle_control_stream_frame};