/src/h2o/lib/http3/server.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2018 Fastly, Kazuho Oku |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <sys/socket.h> |
23 | | #include "khash.h" |
24 | | #include "h2o/absprio.h" |
25 | | #include "h2o/http3_common.h" |
26 | | #include "h2o/http3_server.h" |
27 | | #include "h2o/http3_internal.h" |
28 | | #include "./../probes_.h" |
29 | | |
30 | | /** |
31 | | * the scheduler |
32 | | */ |
33 | | struct st_h2o_http3_req_scheduler_t { |
34 | | struct { |
35 | | struct { |
36 | | h2o_linklist_t high; |
37 | | h2o_linklist_t low; |
38 | | } urgencies[H2O_ABSPRIO_NUM_URGENCY_LEVELS]; |
39 | | size_t smallest_urgency; |
40 | | } active; |
41 | | h2o_linklist_t conn_blocked; |
42 | | }; |
43 | | |
44 | | /** |
45 | | * |
46 | | */ |
47 | | struct st_h2o_http3_req_scheduler_node_t { |
48 | | h2o_linklist_t link; |
49 | | h2o_absprio_t priority; |
50 | | uint64_t call_cnt; |
51 | | }; |
52 | | |
53 | | /** |
54 | | * callback used to compare precedence of the entries within the same urgency level (e.g., by comparing stream IDs) |
55 | | */ |
56 | | typedef int (*h2o_http3_req_scheduler_compare_cb)(struct st_h2o_http3_req_scheduler_t *sched, |
57 | | const struct st_h2o_http3_req_scheduler_node_t *x, |
58 | | const struct st_h2o_http3_req_scheduler_node_t *y); |
59 | | |
60 | | /** |
61 | | * Once the size of the request body being received exceeds thit limit, streaming mode will be used (if possible), and the |
62 | | * concurrency of such requests would be limited to one per connection. This is set to 1 to avoid blocking requests that send |
63 | | * small payloads without a FIN as well as to have parity with http2. |
64 | | */ |
65 | 0 | #define H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK 1 |
66 | | |
67 | | enum h2o_http3_server_stream_state { |
68 | | /** |
69 | | * receiving headers |
70 | | */ |
71 | | H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS, |
72 | | /** |
73 | | * receiving request body (runs concurrently) |
74 | | */ |
75 | | H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK, |
76 | | /** |
77 | | * blocked, waiting to be unblocked one by one (either in streaming mode or in non-streaming mode) |
78 | | */ |
79 | | H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED, |
80 | | /** |
81 | | * in non-streaming mode, receiving body |
82 | | */ |
83 | | H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED, |
84 | | /** |
85 | | * in non-streaming mode, waiting for the request to be processed |
86 | | */ |
87 | | H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING, |
88 | | /** |
89 | | * request has been processed, waiting for the response headers |
90 | | */ |
91 | | H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, |
92 | | /** |
93 | | * sending body (the generator MAY have closed, but the transmission to the client is still ongoing) |
94 | | */ |
95 | | H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, |
96 | | /** |
97 | | * all data has been sent and ACKed, waiting for the transport stream to close (req might be disposed when entering this state) |
98 | | */ |
99 | | H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT |
100 | | }; |
101 | | |
102 | | struct st_h2o_http3_server_stream_t; |
103 | | KHASH_MAP_INIT_INT64(stream, struct st_h2o_http3_server_stream_t *) |
104 | | |
105 | | struct st_h2o_http3_server_conn_t { |
106 | | h2o_conn_t super; |
107 | | h2o_http3_conn_t h3; |
108 | | ptls_handshake_properties_t handshake_properties; |
109 | | /** |
110 | | * link-list of pending requests using st_h2o_http3_server_stream_t::link |
111 | | */ |
112 | | struct { |
113 | | /** |
114 | | * holds streams in RECV_BODY_BLOCKED state. They are promoted one by one to the POST_BLOCK State. |
115 | | */ |
116 | | h2o_linklist_t recv_body_blocked; |
117 | | /** |
118 | | * holds streams that are in request streaming mode. |
119 | | */ |
120 | | h2o_linklist_t req_streaming; |
121 | | /** |
122 | | * holds streams in REQ_PENDING state or RECV_BODY_POST_BLOCK state (that is using streaming; i.e., write_req.cb != NULL). |
123 | | */ |
124 | | h2o_linklist_t pending; |
125 | | } delayed_streams; |
126 | | /** |
127 | | * responses blocked by SETTINGS frame yet to arrive (e.g., CONNECT-UDP requests waiting for SETTINGS to see if |
128 | | * datagram-flow-id can be sent). There is no separate state for streams linked here, because these streams are techincally |
129 | | * indifferent from those that are currently queued by the filters after `h2o_send` is called. |
130 | | */ |
131 | | h2o_linklist_t streams_resp_settings_blocked; |
132 | | /** |
133 | | * next application-level timeout |
134 | | */ |
135 | | h2o_timer_t timeout; |
136 | | /** |
137 | | * counter (the order MUST match that of h2o_http3_server_stream_state; it is accessed by index via the use of counters[]) |
138 | | */ |
139 | | union { |
140 | | struct { |
141 | | uint32_t recv_headers; |
142 | | uint32_t recv_body_before_block; |
143 | | uint32_t recv_body_blocked; |
144 | | uint32_t recv_body_unblocked; |
145 | | uint32_t req_pending; |
146 | | uint32_t send_headers; |
147 | | uint32_t send_body; |
148 | | uint32_t close_wait; |
149 | | }; |
150 | | uint32_t counters[1]; |
151 | | } num_streams; |
152 | | /** |
153 | | * Number of streams that is request streaming. The state can be in either one of SEND_HEADERS, SEND_BODY, CLOSE_WAIT. |
154 | | */ |
155 | | uint32_t num_streams_req_streaming; |
156 | | /** |
157 | | * number of streams in tunneling mode |
158 | | */ |
159 | | uint32_t num_streams_tunnelling; |
160 | | /** |
161 | | * scheduler |
162 | | */ |
163 | | struct { |
164 | | /** |
165 | | * States for request streams. |
166 | | */ |
167 | | struct st_h2o_http3_req_scheduler_t reqs; |
168 | | /** |
169 | | * States for unidirectional streams. Each element is a bit vector where slot for each stream is defined as: 1 << stream_id. |
170 | | */ |
171 | | struct { |
172 | | uint16_t active; |
173 | | uint16_t conn_blocked; |
174 | | } uni; |
175 | | } scheduler; |
176 | | /** |
177 | | * stream map used for datagram flows |
178 | | * TODO: Get rid of this structure once we drop support for masque draft-03; RFC 9297 uses quater stream ID instead of |
179 | | * dynamically mapping streams with flow IDs. |
180 | | */ |
181 | | khash_t(stream) * datagram_flows; |
182 | | /** |
183 | | * timeout entry used for graceful shutdown |
184 | | */ |
185 | | h2o_timer_t _graceful_shutdown_timeout; |
186 | | }; |
187 | | |
188 | | /** |
189 | | * sendvec, with additional field that contains the starting offset of the content |
190 | | */ |
191 | | struct st_h2o_http3_server_sendvec_t { |
192 | | h2o_sendvec_t vec; |
193 | | /** |
194 | | * Starting offset of the content carried by the vector, or UINT64_MAX if it is not carrying body |
195 | | */ |
196 | | uint64_t entity_offset; |
197 | | }; |
198 | | |
199 | | struct st_h2o_http3_server_stream_t { |
200 | | quicly_stream_t *quic; |
201 | | struct { |
202 | | h2o_buffer_t *buf; |
203 | | int (*handle_input)(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
204 | | int in_generator, const char **err_desc); |
205 | | uint64_t bytes_left_in_data_frame; |
206 | | } recvbuf; |
207 | | struct { |
208 | | H2O_VECTOR(struct st_h2o_http3_server_sendvec_t) vecs; |
209 | | size_t off_within_first_vec; |
210 | | size_t min_index_to_addref; |
211 | | uint64_t final_size, final_body_size; |
212 | | uint8_t data_frame_header_buf[9]; |
213 | | } sendbuf; |
214 | | enum h2o_http3_server_stream_state state; |
215 | | h2o_linklist_t link; |
216 | | h2o_linklist_t link_resp_settings_blocked; |
217 | | h2o_ostream_t ostr_final; |
218 | | struct st_h2o_http3_req_scheduler_node_t scheduler; |
219 | | /** |
220 | | * if read is blocked |
221 | | */ |
222 | | uint8_t read_blocked : 1; |
223 | | /** |
224 | | * if h2o_proceed_response has been invoked, or if the invocation has been requested |
225 | | */ |
226 | | uint8_t proceed_requested : 1; |
227 | | /** |
228 | | * this flag is set by on_send_emit, triggers the invocation h2o_proceed_response in scheduler_do_send, used by do_send to |
229 | | * take different actions based on if it has been called while scheduler_do_send is running. |
230 | | */ |
231 | | uint8_t proceed_while_sending : 1; |
232 | | /** |
233 | | * if a PRIORITY_UPDATE frame has been received |
234 | | */ |
235 | | uint8_t received_priority_update : 1; |
236 | | /** |
237 | | * used in CLOSE_WAIT state to determine if h2o_dispose_request has been called |
238 | | */ |
239 | | uint8_t req_disposed : 1; |
240 | | /** |
241 | | * indicates if the request is in streaming mode |
242 | | */ |
243 | | uint8_t req_streaming : 1; |
244 | | /** |
245 | | * buffer to hold the request body (or a chunk of, if in streaming mode), or CONNECT payload |
246 | | */ |
247 | | h2o_buffer_t *req_body; |
248 | | /** |
249 | | * flow ID used by masque over H3_DATAGRAMS |
250 | | */ |
251 | | uint64_t datagram_flow_id; |
252 | | /** |
253 | | * the request. Placed at the end, as it holds the pool. |
254 | | */ |
255 | | h2o_req_t req; |
256 | | }; |
257 | | |
258 | | static int foreach_request(h2o_conn_t *_conn, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata); |
259 | | static void initiate_graceful_shutdown(h2o_conn_t *_conn); |
260 | | static void close_idle_connection(h2o_conn_t *_conn); |
261 | | static void on_stream_destroy(quicly_stream_t *qs, int err); |
262 | | static int handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
263 | | int in_generator, const char **err_desc); |
264 | | static int handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
265 | | int in_generator, const char **err_desc); |
266 | | |
267 | | static const h2o_sendvec_callbacks_t self_allocated_vec_callbacks = {h2o_sendvec_read_raw, NULL}, |
268 | | immutable_vec_callbacks = {h2o_sendvec_read_raw, NULL}; |
269 | | |
270 | | static int sendvec_size_is_for_recycle(size_t size) |
271 | 2.08k | { |
272 | 2.08k | if (h2o_socket_ssl_buffer_allocator.conf->memsize / 2 <= size && size <= h2o_socket_ssl_buffer_allocator.conf->memsize) |
273 | 0 | return 1; |
274 | 2.08k | return 0; |
275 | 2.08k | } |
276 | | |
277 | | static void dispose_sendvec(struct st_h2o_http3_server_sendvec_t *vec) |
278 | 5.28k | { |
279 | 5.28k | if (vec->vec.callbacks == &self_allocated_vec_callbacks) { |
280 | 1.04k | if (sendvec_size_is_for_recycle(vec->vec.len)) { |
281 | 0 | h2o_mem_free_recycle(&h2o_socket_ssl_buffer_allocator, vec->vec.raw); |
282 | 1.04k | } else { |
283 | 1.04k | free(vec->vec.raw); |
284 | 1.04k | } |
285 | 1.04k | } |
286 | 5.28k | } |
287 | | |
288 | | static void req_scheduler_init(struct st_h2o_http3_req_scheduler_t *sched) |
289 | 6.51k | { |
290 | 6.51k | size_t i; |
291 | | |
292 | 58.6k | for (i = 0; i < H2O_ABSPRIO_NUM_URGENCY_LEVELS; ++i) { |
293 | 52.1k | h2o_linklist_init_anchor(&sched->active.urgencies[i].high); |
294 | 52.1k | h2o_linklist_init_anchor(&sched->active.urgencies[i].low); |
295 | 52.1k | } |
296 | 6.51k | sched->active.smallest_urgency = i; |
297 | 6.51k | h2o_linklist_init_anchor(&sched->conn_blocked); |
298 | 6.51k | } |
299 | | |
300 | | static void req_scheduler_activate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node, |
301 | | h2o_http3_req_scheduler_compare_cb comp) |
302 | 1.86k | { |
303 | | /* unlink if necessary */ |
304 | 1.86k | if (h2o_linklist_is_linked(&node->link)) |
305 | 2 | h2o_linklist_unlink(&node->link); |
306 | | |
307 | 1.86k | if (!node->priority.incremental || node->call_cnt == 0) { |
308 | | /* non-incremental streams and the first emission of incremental streams go in strict order */ |
309 | 1.85k | h2o_linklist_t *anchor = &sched->active.urgencies[node->priority.urgency].high, *pos; |
310 | 1.85k | for (pos = anchor->prev; pos != anchor; pos = pos->prev) { |
311 | 0 | struct st_h2o_http3_req_scheduler_node_t *node_at_pos = |
312 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, pos); |
313 | 0 | if (comp(sched, node_at_pos, node) < 0) |
314 | 0 | break; |
315 | 0 | } |
316 | 1.85k | h2o_linklist_insert(pos->next, &node->link); |
317 | 1.85k | } else { |
318 | | /* once sent, incremental streams go into a lower list */ |
319 | 2 | h2o_linklist_insert(&sched->active.urgencies[node->priority.urgency].low, &node->link); |
320 | 2 | } |
321 | | |
322 | | /* book keeping */ |
323 | 1.86k | if (node->priority.urgency < sched->active.smallest_urgency) |
324 | 1.85k | sched->active.smallest_urgency = node->priority.urgency; |
325 | 1.86k | } |
326 | | |
327 | | static void req_scheduler_update_smallest_urgency_post_removal(struct st_h2o_http3_req_scheduler_t *sched, size_t changed) |
328 | 8.96k | { |
329 | 8.96k | if (sched->active.smallest_urgency < changed) |
330 | 0 | return; |
331 | | |
332 | | /* search from the location that *might* have changed */ |
333 | 8.96k | sched->active.smallest_urgency = changed; |
334 | 44.7k | while (h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].high) && |
335 | 44.7k | h2o_linklist_is_empty(&sched->active.urgencies[sched->active.smallest_urgency].low)) { |
336 | 44.7k | ++sched->active.smallest_urgency; |
337 | 44.7k | if (sched->active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS) |
338 | 8.96k | break; |
339 | 44.7k | } |
340 | 8.96k | } |
341 | | |
342 | | static void req_scheduler_deactivate(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node) |
343 | 8.96k | { |
344 | 8.96k | if (h2o_linklist_is_linked(&node->link)) |
345 | 1.85k | h2o_linklist_unlink(&node->link); |
346 | | |
347 | 8.96k | req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency); |
348 | 8.96k | } |
349 | | |
350 | | static void req_scheduler_setup_for_next(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node, |
351 | | h2o_http3_req_scheduler_compare_cb comp) |
352 | 520 | { |
353 | 520 | assert(h2o_linklist_is_linked(&node->link)); |
354 | | |
355 | | /* reschedule to achieve round-robin behavior */ |
356 | 520 | if (node->priority.incremental) |
357 | 2 | req_scheduler_activate(sched, node, comp); |
358 | 520 | } |
359 | | |
360 | | static void req_scheduler_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, struct st_h2o_http3_req_scheduler_node_t *node) |
361 | 0 | { |
362 | 0 | if (h2o_linklist_is_linked(&node->link)) |
363 | 0 | h2o_linklist_unlink(&node->link); |
364 | |
|
365 | 0 | h2o_linklist_insert(&sched->conn_blocked, &node->link); |
366 | |
|
367 | 0 | req_scheduler_update_smallest_urgency_post_removal(sched, node->priority.urgency); |
368 | 0 | } |
369 | | |
370 | | static void req_scheduler_unblock_conn_blocked(struct st_h2o_http3_req_scheduler_t *sched, h2o_http3_req_scheduler_compare_cb comp) |
371 | 0 | { |
372 | 0 | while (!h2o_linklist_is_empty(&sched->conn_blocked)) { |
373 | 0 | struct st_h2o_http3_req_scheduler_node_t *node = |
374 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_req_scheduler_node_t, link, sched->conn_blocked.next); |
375 | 0 | req_scheduler_activate(sched, node, comp); |
376 | 0 | } |
377 | 0 | } |
378 | | |
379 | | static int req_scheduler_compare_stream_id(struct st_h2o_http3_req_scheduler_t *sched, |
380 | | const struct st_h2o_http3_req_scheduler_node_t *x, |
381 | | const struct st_h2o_http3_req_scheduler_node_t *y) |
382 | 0 | { |
383 | 0 | struct st_h2o_http3_server_stream_t *sx = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, x), |
384 | 0 | *sy = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler, y); |
385 | 0 | if (sx->quic->stream_id < sy->quic->stream_id) { |
386 | 0 | return -1; |
387 | 0 | } else if (sx->quic->stream_id > sy->quic->stream_id) { |
388 | 0 | return 1; |
389 | 0 | } else { |
390 | 0 | return 0; |
391 | 0 | } |
392 | 0 | } |
393 | | |
394 | | static struct st_h2o_http3_server_conn_t *get_conn(struct st_h2o_http3_server_stream_t *stream) |
395 | 82.6k | { |
396 | 82.6k | return (void *)stream->req.conn; |
397 | 82.6k | } |
398 | | |
399 | | static uint32_t *get_state_counter(struct st_h2o_http3_server_conn_t *conn, enum h2o_http3_server_stream_state state) |
400 | 28.2k | { |
401 | 28.2k | return conn->num_streams.counters + (size_t)state; |
402 | 28.2k | } |
403 | | |
404 | | static void handle_priority_change(struct st_h2o_http3_server_stream_t *stream, const char *value, size_t len, h2o_absprio_t base) |
405 | 0 | { |
406 | 0 | int reactivate = 0; |
407 | |
|
408 | 0 | if (h2o_linklist_is_linked(&stream->scheduler.link)) { |
409 | 0 | req_scheduler_deactivate(&get_conn(stream)->scheduler.reqs, &stream->scheduler); |
410 | 0 | reactivate = 1; |
411 | 0 | } |
412 | | |
413 | | /* update priority, using provided value as the base */ |
414 | 0 | stream->scheduler.priority = base; |
415 | 0 | h2o_absprio_parse_priority(value, len, &stream->scheduler.priority); |
416 | |
|
417 | 0 | if (reactivate) |
418 | 0 | req_scheduler_activate(&get_conn(stream)->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); |
419 | 0 | } |
420 | | |
421 | | static void tunnel_on_udp_read(h2o_req_t *_req, h2o_iovec_t *datagrams, size_t num_datagrams) |
422 | 0 | { |
423 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); |
424 | 0 | h2o_http3_send_h3_datagrams(&get_conn(stream)->h3, stream->datagram_flow_id, datagrams, num_datagrams); |
425 | 0 | } |
426 | | |
427 | | static void request_run_delayed(struct st_h2o_http3_server_conn_t *conn) |
428 | 977 | { |
429 | 977 | if (!h2o_timer_is_linked(&conn->timeout)) |
430 | 977 | h2o_timer_link(conn->super.ctx->loop, 0, &conn->timeout); |
431 | 977 | } |
432 | | |
433 | | static void check_run_blocked(struct st_h2o_http3_server_conn_t *conn) |
434 | 0 | { |
435 | 0 | if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming < |
436 | 0 | conn->super.ctx->globalconf->http3.max_concurrent_streaming_requests_per_connection && |
437 | 0 | !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)) |
438 | 0 | request_run_delayed(conn); |
439 | 0 | } |
440 | | |
441 | | static void pre_dispose_request(struct st_h2o_http3_server_stream_t *stream) |
442 | 6.51k | { |
443 | 6.51k | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
444 | 6.51k | size_t i; |
445 | | |
446 | | /* release vectors */ |
447 | 9.33k | for (i = 0; i != stream->sendbuf.vecs.size; ++i) |
448 | 2.81k | dispose_sendvec(stream->sendbuf.vecs.entries + i); |
449 | | |
450 | | /* dispose request body buffer */ |
451 | 6.51k | if (stream->req_body != NULL) |
452 | 437 | h2o_buffer_dispose(&stream->req_body); |
453 | | |
454 | | /* clean up request streaming */ |
455 | 6.51k | if (stream->req_streaming && !stream->req.is_tunnel_req) { |
456 | 0 | assert(conn->num_streams_req_streaming != 0); |
457 | 0 | stream->req_streaming = 0; |
458 | 0 | --conn->num_streams_req_streaming; |
459 | 0 | check_run_blocked(conn); |
460 | 0 | } |
461 | | |
462 | | /* remove stream from datagram flow list */ |
463 | 6.51k | if (stream->datagram_flow_id != UINT64_MAX) { |
464 | 556 | khiter_t iter = kh_get(stream, conn->datagram_flows, stream->datagram_flow_id); |
465 | | /* it's possible the tunnel wasn't established yet */ |
466 | 556 | if (iter != kh_end(conn->datagram_flows)) |
467 | 556 | kh_del(stream, conn->datagram_flows, iter); |
468 | 556 | } |
469 | | |
470 | 6.51k | if (stream->req.is_tunnel_req) |
471 | 247 | --get_conn(stream)->num_streams_tunnelling; |
472 | 6.51k | } |
473 | | |
474 | | static void set_state(struct st_h2o_http3_server_stream_t *stream, enum h2o_http3_server_stream_state state, int in_generator) |
475 | 7.62k | { |
476 | 7.62k | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
477 | 7.62k | enum h2o_http3_server_stream_state old_state = stream->state; |
478 | | |
479 | 7.62k | H2O_PROBE_CONN(H3S_STREAM_SET_STATE, &conn->super, stream->quic->stream_id, (unsigned)state); |
480 | | |
481 | 7.62k | --*get_state_counter(conn, old_state); |
482 | 7.62k | stream->state = state; |
483 | 7.62k | ++*get_state_counter(conn, stream->state); |
484 | | |
485 | 7.62k | switch (state) { |
486 | 0 | case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED: |
487 | 0 | assert(conn->delayed_streams.recv_body_blocked.prev == &stream->link || !"stream is not registered to the recv_body list?"); |
488 | 0 | break; |
489 | 1.92k | case H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT: { |
490 | 1.92k | if (h2o_linklist_is_linked(&stream->link)) |
491 | 0 | h2o_linklist_unlink(&stream->link); |
492 | 1.92k | pre_dispose_request(stream); |
493 | 1.92k | if (!in_generator) { |
494 | 1.92k | h2o_dispose_request(&stream->req); |
495 | 1.92k | stream->req_disposed = 1; |
496 | 1.92k | } |
497 | 1.92k | static const quicly_stream_callbacks_t close_wait_callbacks = {on_stream_destroy, |
498 | 1.92k | quicly_stream_noop_on_send_shift, |
499 | 1.92k | quicly_stream_noop_on_send_emit, |
500 | 1.92k | quicly_stream_noop_on_send_stop, |
501 | 1.92k | quicly_stream_noop_on_receive, |
502 | 1.92k | quicly_stream_noop_on_receive_reset}; |
503 | 1.92k | stream->quic->callbacks = &close_wait_callbacks; |
504 | 1.92k | } break; |
505 | 5.70k | default: |
506 | 5.70k | break; |
507 | 7.62k | } |
508 | 7.62k | } |
509 | | |
510 | | /** |
511 | | * Shutdowns a stream. Note that a request stream should not be shut down until receiving some QUIC frame that refers to that |
512 | | * stream, but we might might have created stream state due to receiving a PRIORITY_UPDATE frame prior to that (see |
513 | | * handle_priority_update_frame). |
514 | | */ |
515 | | static void shutdown_stream(struct st_h2o_http3_server_stream_t *stream, int stop_sending_code, int reset_code, int in_generator) |
516 | 1.00k | { |
517 | 1.00k | assert(stream->state < H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT); |
518 | 1.00k | if (quicly_stream_has_receive_side(0, stream->quic->stream_id)) { |
519 | 1.00k | quicly_request_stop(stream->quic, stop_sending_code); |
520 | 1.00k | h2o_buffer_consume(&stream->recvbuf.buf, stream->recvbuf.buf->size); |
521 | 1.00k | } |
522 | 1.00k | if (quicly_stream_has_send_side(0, stream->quic->stream_id) && !quicly_sendstate_transfer_complete(&stream->quic->sendstate)) |
523 | 1.00k | quicly_reset_stream(stream->quic, reset_code); |
524 | 1.00k | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, in_generator); |
525 | 1.00k | } |
526 | | |
527 | | static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa) |
528 | 0 | { |
529 | 0 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
530 | 0 | struct sockaddr *src = quicly_get_sockname(conn->h3.super.quic); |
531 | 0 | socklen_t len = src->sa_family == AF_UNSPEC ? sizeof(struct sockaddr) : quicly_get_socklen(src); |
532 | 0 | memcpy(sa, src, len); |
533 | 0 | return len; |
534 | 0 | } |
535 | | |
536 | | static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa) |
537 | 612 | { |
538 | 612 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
539 | 612 | struct sockaddr *src = quicly_get_peername(conn->h3.super.quic); |
540 | 612 | socklen_t len = quicly_get_socklen(src); |
541 | 612 | memcpy(sa, src, len); |
542 | 612 | return len; |
543 | 612 | } |
544 | | |
545 | | static ptls_t *get_ptls(h2o_conn_t *_conn) |
546 | 612 | { |
547 | 612 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
548 | 612 | return quicly_get_tls(conn->h3.super.quic); |
549 | 612 | } |
550 | | |
551 | | static int get_skip_tracing(h2o_conn_t *conn) |
552 | 0 | { |
553 | 0 | ptls_t *ptls = get_ptls(conn); |
554 | 0 | return ptls_skip_tracing(ptls); |
555 | 0 | } |
556 | | |
557 | | static uint64_t get_req_id(h2o_req_t *req) |
558 | 0 | { |
559 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, req); |
560 | 0 | return stream->quic->stream_id; |
561 | 0 | } |
562 | | |
563 | | static uint32_t num_reqs_inflight(h2o_conn_t *_conn) |
564 | 0 | { |
565 | 0 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
566 | 0 | return quicly_num_streams_by_group(conn->h3.super.quic, 0, 0); |
567 | 0 | } |
568 | | |
569 | | static quicly_tracer_t *get_tracer(h2o_conn_t *_conn) |
570 | 0 | { |
571 | 0 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
572 | 0 | return quicly_get_tracer(conn->h3.super.quic); |
573 | 0 | } |
574 | | |
575 | | static h2o_iovec_t log_extensible_priorities(h2o_req_t *_req) |
576 | 0 | { |
577 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); |
578 | 0 | char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof("u=" H2O_UINT8_LONGEST_STR ",i=?1")); |
579 | 0 | int len = |
580 | 0 | sprintf(buf, "u=%" PRIu8 "%s", stream->scheduler.priority.urgency, stream->scheduler.priority.incremental ? ",i=?1" : ""); |
581 | 0 | return h2o_iovec_init(buf, len); |
582 | 0 | } |
583 | | |
584 | | static h2o_iovec_t log_cc_name(h2o_req_t *req) |
585 | 0 | { |
586 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
587 | 0 | quicly_stats_t stats; |
588 | |
|
589 | 0 | if (quicly_get_stats(conn->h3.super.quic, &stats) == 0) |
590 | 0 | return h2o_iovec_init(stats.cc.type->name, strlen(stats.cc.type->name)); |
591 | 0 | return h2o_iovec_init(NULL, 0); |
592 | 0 | } |
593 | | |
594 | | static h2o_iovec_t log_delivery_rate(h2o_req_t *req) |
595 | 0 | { |
596 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
597 | 0 | quicly_rate_t rate; |
598 | |
|
599 | 0 | if (quicly_get_delivery_rate(conn->h3.super.quic, &rate) == 0 && rate.latest != 0) { |
600 | 0 | char *buf = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT64_LONGEST_STR)); |
601 | 0 | size_t len = sprintf(buf, "%" PRIu64, rate.latest); |
602 | 0 | return h2o_iovec_init(buf, len); |
603 | 0 | } |
604 | | |
605 | 0 | return h2o_iovec_init(NULL, 0); |
606 | 0 | } |
607 | | |
608 | | static h2o_iovec_t log_tls_protocol_version(h2o_req_t *_req) |
609 | 0 | { |
610 | 0 | return h2o_iovec_init(H2O_STRLIT("TLSv1.3")); |
611 | 0 | } |
612 | | |
613 | | static h2o_iovec_t log_session_reused(h2o_req_t *req) |
614 | 0 | { |
615 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
616 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
617 | 0 | return ptls_is_psk_handshake(tls) ? h2o_iovec_init(H2O_STRLIT("1")) : h2o_iovec_init(H2O_STRLIT("0")); |
618 | 0 | } |
619 | | |
620 | | static h2o_iovec_t log_cipher(h2o_req_t *req) |
621 | 0 | { |
622 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
623 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
624 | 0 | ptls_cipher_suite_t *cipher = ptls_get_cipher(tls); |
625 | 0 | return cipher != NULL ? h2o_iovec_init(cipher->name, strlen(cipher->name)) : h2o_iovec_init(NULL, 0); |
626 | 0 | } |
627 | | |
628 | | static h2o_iovec_t log_cipher_bits(h2o_req_t *req) |
629 | 0 | { |
630 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
631 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
632 | 0 | ptls_cipher_suite_t *cipher = ptls_get_cipher(tls); |
633 | 0 | if (cipher == NULL) |
634 | 0 | return h2o_iovec_init(NULL, 0); |
635 | | |
636 | 0 | char *buf = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT16_LONGEST_STR)); |
637 | 0 | return h2o_iovec_init(buf, sprintf(buf, "%" PRIu16, (uint16_t)(cipher->aead->key_size * 8))); |
638 | 0 | } |
639 | | |
640 | | static h2o_iovec_t log_session_id(h2o_req_t *_req) |
641 | 0 | { |
642 | | /* FIXME */ |
643 | 0 | return h2o_iovec_init(NULL, 0); |
644 | 0 | } |
645 | | |
646 | | static h2o_iovec_t log_server_name(h2o_req_t *req) |
647 | 0 | { |
648 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
649 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
650 | 0 | const char *server_name = ptls_get_server_name(tls); |
651 | 0 | return server_name != NULL ? h2o_iovec_init(server_name, strlen(server_name)) : h2o_iovec_init(NULL, 0); |
652 | 0 | } |
653 | | |
654 | | static h2o_iovec_t log_negotiated_protocol(h2o_req_t *req) |
655 | 0 | { |
656 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
657 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
658 | 0 | const char *proto = ptls_get_negotiated_protocol(tls); |
659 | 0 | return proto != NULL ? h2o_iovec_init(proto, strlen(proto)) : h2o_iovec_init(NULL, 0); |
660 | 0 | } |
661 | | |
662 | | static h2o_iovec_t log_ech_config_id(h2o_req_t *req) |
663 | 0 | { |
664 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
665 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
666 | 0 | uint8_t config_id; |
667 | |
|
668 | 0 | if (ptls_is_ech_handshake(tls, &config_id, NULL, NULL)) { |
669 | 0 | char *s = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT8_LONGEST_STR)); |
670 | 0 | size_t len = sprintf(s, "%" PRIu8, config_id); |
671 | 0 | return h2o_iovec_init(s, len); |
672 | 0 | } else { |
673 | 0 | return h2o_iovec_init(NULL, 0); |
674 | 0 | } |
675 | 0 | } |
676 | | |
677 | | static h2o_iovec_t log_ech_kem(h2o_req_t *req) |
678 | 0 | { |
679 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
680 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
681 | 0 | ptls_hpke_kem_t *kem; |
682 | |
|
683 | 0 | if (ptls_is_ech_handshake(tls, NULL, &kem, NULL)) { |
684 | 0 | return h2o_iovec_init(kem->keyex->name, strlen(kem->keyex->name)); |
685 | 0 | } else { |
686 | 0 | return h2o_iovec_init(NULL, 0); |
687 | 0 | } |
688 | 0 | } |
689 | | |
690 | | static h2o_iovec_t log_ech_cipher(h2o_req_t *req) |
691 | 0 | { |
692 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
693 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
694 | 0 | ptls_hpke_cipher_suite_t *cipher; |
695 | |
|
696 | 0 | if (ptls_is_ech_handshake(tls, NULL, NULL, &cipher)) { |
697 | 0 | return h2o_iovec_init(cipher->name, strlen(cipher->name)); |
698 | 0 | } else { |
699 | 0 | return h2o_iovec_init(NULL, 0); |
700 | 0 | } |
701 | 0 | } |
702 | | |
703 | | static h2o_iovec_t log_ech_cipher_bits(h2o_req_t *req) |
704 | 0 | { |
705 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
706 | 0 | ptls_t *tls = quicly_get_tls(conn->h3.super.quic); |
707 | 0 | ptls_hpke_cipher_suite_t *cipher; |
708 | |
|
709 | 0 | if (ptls_is_ech_handshake(tls, NULL, NULL, &cipher)) { |
710 | 0 | uint16_t bits = (uint16_t)(cipher->aead->key_size * 8); |
711 | 0 | char *s = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT16_LONGEST_STR)); |
712 | 0 | size_t len = sprintf(s, "%" PRIu16, bits); |
713 | 0 | return h2o_iovec_init(s, len); |
714 | 0 | } else { |
715 | 0 | return h2o_iovec_init(NULL, 0); |
716 | 0 | } |
717 | 0 | } |
718 | | |
719 | | static h2o_iovec_t log_stream_id(h2o_req_t *_req) |
720 | 0 | { |
721 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); |
722 | 0 | char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT64_LONGEST_STR)); |
723 | 0 | return h2o_iovec_init(buf, sprintf(buf, "%" PRIu64, stream->quic->stream_id)); |
724 | 0 | } |
725 | | |
726 | | static h2o_iovec_t log_quic_stats(h2o_req_t *req) |
727 | 0 | { |
728 | 0 | #define PUSH_FIELD(name, type, field) \ |
729 | 0 | do { \ |
730 | 0 | len += snprintf(buf + len, bufsize - len, name "=" type ",", stats.field); \ |
731 | 0 | if (len + 1 > bufsize) { \ |
732 | 0 | bufsize = bufsize * 3 / 2; \ |
733 | 0 | goto Redo; \ |
734 | 0 | } \ |
735 | 0 | } while (0) |
736 | 0 | #define PUSH_U64(name, field) PUSH_FIELD(name, "%" PRIu64, field) |
737 | 0 | #define PUSH_U32(name, field) PUSH_FIELD(name, "%" PRIu32, field) |
738 | 0 | #define PUSH_SIZE_T(name, field) PUSH_FIELD(name, "%zu", field) |
739 | |
|
740 | 0 | #define DO_PUSH_NUM_FRAMES(name, dir) PUSH_U64(H2O_TO_STR(name) "-" H2O_TO_STR(dir), num_frames_##dir.name) |
741 | 0 | #define PUSH_NUM_FRAMES(dir) \ |
742 | 0 | do { \ |
743 | 0 | DO_PUSH_NUM_FRAMES(padding, dir); \ |
744 | 0 | DO_PUSH_NUM_FRAMES(ping, dir); \ |
745 | 0 | DO_PUSH_NUM_FRAMES(ack, dir); \ |
746 | 0 | DO_PUSH_NUM_FRAMES(reset_stream, dir); \ |
747 | 0 | DO_PUSH_NUM_FRAMES(stop_sending, dir); \ |
748 | 0 | DO_PUSH_NUM_FRAMES(crypto, dir); \ |
749 | 0 | DO_PUSH_NUM_FRAMES(new_token, dir); \ |
750 | 0 | DO_PUSH_NUM_FRAMES(stream, dir); \ |
751 | 0 | DO_PUSH_NUM_FRAMES(max_data, dir); \ |
752 | 0 | DO_PUSH_NUM_FRAMES(max_stream_data, dir); \ |
753 | 0 | DO_PUSH_NUM_FRAMES(max_streams_bidi, dir); \ |
754 | 0 | DO_PUSH_NUM_FRAMES(max_streams_uni, dir); \ |
755 | 0 | DO_PUSH_NUM_FRAMES(data_blocked, dir); \ |
756 | 0 | DO_PUSH_NUM_FRAMES(stream_data_blocked, dir); \ |
757 | 0 | DO_PUSH_NUM_FRAMES(streams_blocked, dir); \ |
758 | 0 | DO_PUSH_NUM_FRAMES(new_connection_id, dir); \ |
759 | 0 | DO_PUSH_NUM_FRAMES(retire_connection_id, dir); \ |
760 | 0 | DO_PUSH_NUM_FRAMES(path_challenge, dir); \ |
761 | 0 | DO_PUSH_NUM_FRAMES(path_response, dir); \ |
762 | 0 | DO_PUSH_NUM_FRAMES(transport_close, dir); \ |
763 | 0 | DO_PUSH_NUM_FRAMES(application_close, dir); \ |
764 | 0 | DO_PUSH_NUM_FRAMES(handshake_done, dir); \ |
765 | 0 | DO_PUSH_NUM_FRAMES(ack_frequency, dir); \ |
766 | 0 | } while (0) |
767 | |
|
768 | 0 | struct st_h2o_http3_server_conn_t *conn = (struct st_h2o_http3_server_conn_t *)req->conn; |
769 | 0 | quicly_stats_t stats; |
770 | |
|
771 | 0 | if (quicly_get_stats(conn->h3.super.quic, &stats) != 0) |
772 | 0 | return h2o_iovec_init(H2O_STRLIT("-")); |
773 | | |
774 | 0 | char *buf; |
775 | 0 | size_t len; |
776 | 0 | static __thread size_t bufsize = 100; /* this value grows by 1.5x to find adequete value, and is remembered for future |
777 | | * invocations */ |
778 | 0 | Redo: |
779 | 0 | buf = h2o_mem_alloc_pool(&req->pool, char, bufsize); |
780 | 0 | len = 0; |
781 | |
|
782 | 0 | PUSH_U64("packets-received", num_packets.received); |
783 | 0 | PUSH_U64("packets-received-ecn-ect0", num_packets.received_ecn_counts[0]); |
784 | 0 | PUSH_U64("packets-received-ecn-ect1", num_packets.received_ecn_counts[1]); |
785 | 0 | PUSH_U64("packets-received-ecn-ce", num_packets.received_ecn_counts[2]); |
786 | 0 | PUSH_U64("packets-decryption-failed", num_packets.decryption_failed); |
787 | 0 | PUSH_U64("packets-sent", num_packets.sent); |
788 | 0 | PUSH_U64("packets-lost", num_packets.lost); |
789 | 0 | PUSH_U64("packets-lost-time-threshold", num_packets.lost_time_threshold); |
790 | 0 | PUSH_U64("packets-ack-received", num_packets.ack_received); |
791 | 0 | PUSH_U64("packets-acked-ecn-ect0", num_packets.acked_ecn_counts[0]); |
792 | 0 | PUSH_U64("packets-acked-ecn-ect1", num_packets.acked_ecn_counts[1]); |
793 | 0 | PUSH_U64("packets-acked-ecn-ce", num_packets.acked_ecn_counts[2]); |
794 | 0 | PUSH_U64("late-acked", num_packets.late_acked); |
795 | 0 | PUSH_U64("bytes-received", num_bytes.received); |
796 | 0 | PUSH_U64("bytes-sent", num_bytes.sent); |
797 | 0 | PUSH_U64("bytes-lost", num_bytes.lost); |
798 | 0 | PUSH_U64("bytes-ack-received", num_bytes.ack_received); |
799 | 0 | PUSH_U64("bytes-stream-data-sent", num_bytes.stream_data_sent); |
800 | 0 | PUSH_U64("bytes-stream-data-resent", num_bytes.stream_data_resent); |
801 | 0 | PUSH_U64("paths-ecn-validated", num_paths.ecn_validated); |
802 | 0 | PUSH_U64("paths-ecn-failed", num_paths.ecn_failed); |
803 | 0 | PUSH_U32("rtt-minimum", rtt.minimum); |
804 | 0 | PUSH_U32("rtt-smoothed", rtt.smoothed); |
805 | 0 | PUSH_U32("rtt-variance", rtt.variance); |
806 | 0 | PUSH_U32("rtt-latest", rtt.latest); |
807 | 0 | PUSH_U32("cwnd", cc.cwnd); |
808 | 0 | PUSH_U32("ssthresh", cc.ssthresh); |
809 | 0 | PUSH_U32("cwnd-initial", cc.cwnd_initial); |
810 | 0 | PUSH_U32("cwnd-exiting-slow-start", cc.cwnd_exiting_slow_start); |
811 | 0 | PUSH_U32("cwnd-minimum", cc.cwnd_minimum); |
812 | 0 | PUSH_U32("cwnd-maximum", cc.cwnd_maximum); |
813 | 0 | PUSH_U32("num-loss-episodes", cc.num_loss_episodes); |
814 | 0 | PUSH_U32("num-ecn-loss-episodes", cc.num_ecn_loss_episodes); |
815 | 0 | PUSH_U64("num-ptos", num_ptos); |
816 | 0 | PUSH_U64("delivery-rate-latest", delivery_rate.latest); |
817 | 0 | PUSH_U64("delivery-rate-smoothed", delivery_rate.smoothed); |
818 | 0 | PUSH_U64("delivery-rate-stdev", delivery_rate.stdev); |
819 | 0 | PUSH_NUM_FRAMES(received); |
820 | 0 | PUSH_NUM_FRAMES(sent); |
821 | 0 | PUSH_SIZE_T("num-sentmap-packets-largest", num_sentmap_packets_largest); |
822 | | |
823 | 0 | return h2o_iovec_init(buf, len - 1); |
824 | |
|
825 | 0 | #undef PUSH_FIELD |
826 | 0 | #undef PUSH_U64 |
827 | 0 | #undef PUSH_U32 |
828 | 0 | #undef PUSH_SIZE_T |
829 | 0 | #undef DO_PUSH_NUM_FRAMES |
830 | 0 | #undef PUSH_NUM_FRAMES |
831 | 0 | } |
832 | | |
833 | | static h2o_iovec_t log_quic_version(h2o_req_t *_req) |
834 | 0 | { |
835 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); |
836 | 0 | char *buf = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT32_LONGEST_STR)); |
837 | 0 | return h2o_iovec_init(buf, sprintf(buf, "%" PRIu32, quicly_get_protocol_version(stream->quic->conn))); |
838 | 0 | } |
839 | | |
840 | | void on_stream_destroy(quicly_stream_t *qs, int err) |
841 | 6.51k | { |
842 | 6.51k | struct st_h2o_http3_server_stream_t *stream = qs->data; |
843 | 6.51k | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
844 | | |
845 | | /* There is no need to call `update_conn_state` upon stream destruction, as all the streams transition to CLOSE_WAIT before |
846 | | * being destroyed (and it is hard to call `update_conn_state` here, because the number returned by |
847 | | * `quicly_num_streams_by_group` is decremented only after returing from this function. */ |
848 | 6.51k | --*get_state_counter(conn, stream->state); |
849 | | |
850 | 6.51k | req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); |
851 | | |
852 | 6.51k | if (h2o_linklist_is_linked(&stream->link)) |
853 | 0 | h2o_linklist_unlink(&stream->link); |
854 | 6.51k | if (h2o_linklist_is_linked(&stream->link_resp_settings_blocked)) |
855 | 0 | h2o_linklist_unlink(&stream->link_resp_settings_blocked); |
856 | 6.51k | if (stream->state != H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) |
857 | 4.59k | pre_dispose_request(stream); |
858 | 6.51k | if (!stream->req_disposed) |
859 | 4.59k | h2o_dispose_request(&stream->req); |
860 | | /* in case the stream is destroyed before the buffer is fully consumed */ |
861 | 6.51k | h2o_buffer_dispose(&stream->recvbuf.buf); |
862 | | |
863 | 6.51k | free(stream); |
864 | | |
865 | 6.51k | uint32_t num_req_streams_incl_self = quicly_num_streams_by_group(conn->h3.super.quic, 0, 0); |
866 | 6.51k | assert(num_req_streams_incl_self > 0 && |
867 | 6.51k | "during the invocation of the destroy callback, stream count should include the number of the stream being destroyed"); |
868 | 6.51k | if (num_req_streams_incl_self == 1) |
869 | 6.51k | h2o_conn_set_state(&conn->super, H2O_CONN_STATE_IDLE); |
870 | 6.51k | } |
871 | | |
872 | | /** |
873 | | * Converts vectors owned by the generator to ones owned by the HTTP/3 implementation, as the former becomes inaccessible once we |
874 | | * call `do_proceed`. |
875 | | */ |
876 | | static int retain_sendvecs(struct st_h2o_http3_server_stream_t *stream) |
877 | 520 | { |
878 | 2.08k | for (; stream->sendbuf.min_index_to_addref != stream->sendbuf.vecs.size; ++stream->sendbuf.min_index_to_addref) { |
879 | 1.56k | struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.min_index_to_addref; |
880 | 1.56k | assert(vec->vec.callbacks->read_ == h2o_sendvec_read_raw); |
881 | 1.56k | if (!(vec->vec.callbacks == &self_allocated_vec_callbacks || vec->vec.callbacks == &immutable_vec_callbacks)) { |
882 | 1.04k | size_t off_within_vec = stream->sendbuf.min_index_to_addref == 0 ? stream->sendbuf.off_within_first_vec : 0, |
883 | 1.04k | newlen = vec->vec.len - off_within_vec; |
884 | 1.04k | void *newbuf = sendvec_size_is_for_recycle(newlen) ? h2o_mem_alloc_recycle(&h2o_socket_ssl_buffer_allocator) |
885 | 1.04k | : h2o_mem_alloc(newlen); |
886 | 1.04k | memcpy(newbuf, vec->vec.raw + off_within_vec, newlen); |
887 | 1.04k | vec->vec = (h2o_sendvec_t){&self_allocated_vec_callbacks, newlen, {newbuf}}; |
888 | 1.04k | if (stream->sendbuf.min_index_to_addref == 0) |
889 | 0 | stream->sendbuf.off_within_first_vec = 0; |
890 | 1.04k | } |
891 | 1.56k | } |
892 | | |
893 | 520 | return 1; |
894 | 520 | } |
895 | | |
896 | | static void on_send_shift(quicly_stream_t *qs, size_t delta) |
897 | 917 | { |
898 | 917 | struct st_h2o_http3_server_stream_t *stream = qs->data; |
899 | 917 | size_t i; |
900 | | |
901 | 917 | assert(H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state && |
902 | 917 | stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY); |
903 | 0 | assert(delta != 0); |
904 | 0 | assert(stream->sendbuf.vecs.size != 0); |
905 | | |
906 | 0 | size_t bytes_avail_in_first_vec = stream->sendbuf.vecs.entries[0].vec.len - stream->sendbuf.off_within_first_vec; |
907 | 917 | if (delta < bytes_avail_in_first_vec) { |
908 | 0 | stream->sendbuf.off_within_first_vec += delta; |
909 | 0 | return; |
910 | 0 | } |
911 | 917 | delta -= bytes_avail_in_first_vec; |
912 | 917 | stream->sendbuf.off_within_first_vec = 0; |
913 | 917 | dispose_sendvec(&stream->sendbuf.vecs.entries[0]); |
914 | | |
915 | 2.47k | for (i = 1; delta != 0; ++i) { |
916 | 1.55k | assert(i < stream->sendbuf.vecs.size); |
917 | 1.55k | if (delta < stream->sendbuf.vecs.entries[i].vec.len) { |
918 | 0 | stream->sendbuf.off_within_first_vec = delta; |
919 | 0 | break; |
920 | 0 | } |
921 | 1.55k | delta -= stream->sendbuf.vecs.entries[i].vec.len; |
922 | 1.55k | dispose_sendvec(&stream->sendbuf.vecs.entries[i]); |
923 | 1.55k | } |
924 | 917 | memmove(stream->sendbuf.vecs.entries, stream->sendbuf.vecs.entries + i, |
925 | 917 | (stream->sendbuf.vecs.size - i) * sizeof(stream->sendbuf.vecs.entries[0])); |
926 | 917 | stream->sendbuf.vecs.size -= i; |
927 | 917 | if (stream->sendbuf.min_index_to_addref <= i) { |
928 | 917 | stream->sendbuf.min_index_to_addref = 0; |
929 | 917 | } else { |
930 | 0 | stream->sendbuf.min_index_to_addref -= i; |
931 | 0 | } |
932 | | |
933 | 917 | if (stream->sendbuf.vecs.size == 0) { |
934 | 917 | if (quicly_sendstate_is_open(&stream->quic->sendstate)) { |
935 | 0 | assert((H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state && |
936 | 0 | stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) || |
937 | 0 | stream->proceed_requested); |
938 | 917 | } else { |
939 | 917 | if (quicly_stream_has_receive_side(0, stream->quic->stream_id)) |
940 | 917 | quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE); |
941 | 917 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 0); |
942 | 917 | } |
943 | 917 | } |
944 | 917 | } |
945 | | |
946 | | static void on_send_emit(quicly_stream_t *qs, size_t off, void *_dst, size_t *len, int *wrote_all) |
947 | 1.43k | { |
948 | 1.43k | struct st_h2o_http3_server_stream_t *stream = qs->data; |
949 | | |
950 | 1.43k | assert(H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK <= stream->state && |
951 | 1.43k | stream->state <= H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY); |
952 | | |
953 | 0 | uint8_t *dst = _dst, *dst_end = dst + *len; |
954 | 1.43k | size_t vec_index = 0; |
955 | | |
956 | | /* find the start position identified by vec_index and off */ |
957 | 1.43k | off += stream->sendbuf.off_within_first_vec; |
958 | 1.43k | while (off != 0) { |
959 | 0 | assert(vec_index < stream->sendbuf.vecs.size); |
960 | 0 | if (off < stream->sendbuf.vecs.entries[vec_index].vec.len) |
961 | 0 | break; |
962 | 0 | off -= stream->sendbuf.vecs.entries[vec_index].vec.len; |
963 | 0 | ++vec_index; |
964 | 0 | } |
965 | 1.43k | assert(vec_index < stream->sendbuf.vecs.size); |
966 | | |
967 | | /* write */ |
968 | 0 | *wrote_all = 0; |
969 | 4.03k | do { |
970 | 4.03k | struct st_h2o_http3_server_sendvec_t *this_vec = stream->sendbuf.vecs.entries + vec_index; |
971 | 4.03k | size_t sz = this_vec->vec.len - off; |
972 | 4.03k | if (dst_end - dst < sz) |
973 | 0 | sz = dst_end - dst; |
974 | | /* convert vector into raw form, the first time it's being sent (TODO use ssl_buffer_recyle) */ |
975 | 4.03k | if (this_vec->vec.callbacks->read_ != h2o_sendvec_read_raw) { |
976 | 0 | size_t newlen = this_vec->vec.len; |
977 | 0 | void *newbuf = sendvec_size_is_for_recycle(newlen) ? h2o_mem_alloc_recycle(&h2o_socket_ssl_buffer_allocator) |
978 | 0 | : h2o_mem_alloc(newlen); |
979 | 0 | if (!this_vec->vec.callbacks->read_(&this_vec->vec, newbuf, newlen)) { |
980 | 0 | free(newbuf); |
981 | 0 | goto Error; |
982 | 0 | } |
983 | 0 | this_vec->vec = (h2o_sendvec_t){&self_allocated_vec_callbacks, newlen, {newbuf}}; |
984 | 0 | } |
985 | | /* copy payload */ |
986 | 4.03k | memcpy(dst, this_vec->vec.raw + off, sz); |
987 | | /* adjust offsets */ |
988 | 4.03k | if (this_vec->entity_offset != UINT64_MAX && stream->req.bytes_sent < this_vec->entity_offset + off + sz) |
989 | 1.29k | stream->req.bytes_sent = this_vec->entity_offset + off + sz; |
990 | 4.03k | dst += sz; |
991 | 4.03k | off += sz; |
992 | | /* when reaching the end of the current vector, update vec_index, wrote_all */ |
993 | 4.03k | if (off == this_vec->vec.len) { |
994 | 4.03k | off = 0; |
995 | 4.03k | ++vec_index; |
996 | 4.03k | if (vec_index == stream->sendbuf.vecs.size) { |
997 | 1.43k | *wrote_all = 1; |
998 | 1.43k | break; |
999 | 1.43k | } |
1000 | 4.03k | } |
1001 | 4.03k | } while (dst != dst_end); |
1002 | | |
1003 | 1.43k | *len = dst - (uint8_t *)_dst; |
1004 | | |
1005 | | /* retain the payload of response body before calling `h2o_proceed_request`, as the generator might discard the buffer */ |
1006 | 1.43k | if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY && *wrote_all && |
1007 | 1.43k | quicly_sendstate_is_open(&stream->quic->sendstate) && !stream->proceed_requested) { |
1008 | 520 | if (!retain_sendvecs(stream)) |
1009 | 0 | goto Error; |
1010 | 520 | stream->proceed_requested = 1; |
1011 | 520 | stream->proceed_while_sending = 1; |
1012 | 520 | } |
1013 | | |
1014 | 1.43k | return; |
1015 | 1.43k | Error: |
1016 | 0 | *len = 0; |
1017 | 0 | *wrote_all = 1; |
1018 | 0 | shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_INTERNAL, 0); |
1019 | 0 | } |
1020 | | |
1021 | | static void on_send_stop(quicly_stream_t *qs, int err) |
1022 | 0 | { |
1023 | 0 | struct st_h2o_http3_server_stream_t *stream = qs->data; |
1024 | |
|
1025 | 0 | shutdown_stream(stream, H2O_HTTP3_ERROR_REQUEST_CANCELLED, err, 0); |
1026 | 0 | } |
1027 | | |
1028 | | static void handle_buffered_input(struct st_h2o_http3_server_stream_t *stream, int in_generator) |
1029 | 6.51k | { |
1030 | 6.51k | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
1031 | | |
1032 | 6.51k | if (stream->state >= H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) |
1033 | 0 | return; |
1034 | | |
1035 | 6.51k | { /* Process contiguous bytes in the receive buffer until one of the following conditions are reached: |
1036 | | * a) connection- or stream-level error (i.e., state advanced to CLOSE_WAIT) is detected - in which case we exit, |
1037 | | * b) incomplete frame is detected - wait for more (if the stream is open) or raise a connection error, or |
1038 | | * c) all bytes are processed or read_blocked flag is set synchronously (due to receiving CONNECT request) - exit the loop. */ |
1039 | 6.51k | size_t bytes_available = quicly_recvstate_bytes_available(&stream->quic->recvstate); |
1040 | 6.51k | assert(bytes_available <= stream->recvbuf.buf->size); |
1041 | 6.51k | if (bytes_available != 0) { |
1042 | 6.51k | const uint8_t *src = (const uint8_t *)stream->recvbuf.buf->bytes, *src_end = src + bytes_available; |
1043 | 19.6k | do { |
1044 | 19.6k | int err; |
1045 | 19.6k | const char *err_desc = NULL; |
1046 | 19.6k | if ((err = stream->recvbuf.handle_input(stream, &src, src_end, in_generator, &err_desc)) != 0) { |
1047 | 4.07k | if (err == H2O_HTTP3_ERROR_INCOMPLETE) { |
1048 | 170 | if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate)) |
1049 | 0 | break; |
1050 | 170 | err = H2O_HTTP3_ERROR_GENERAL_PROTOCOL; |
1051 | 170 | err_desc = "incomplete frame"; |
1052 | 170 | } |
1053 | 4.07k | h2o_quic_close_connection(&conn->h3.super, err, err_desc); |
1054 | 4.07k | return; |
1055 | 15.6k | } else if (stream->state >= H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) { |
1056 | 419 | return; |
1057 | 419 | } |
1058 | 19.6k | } while (src != src_end && !stream->read_blocked); |
1059 | | /* Processed zero or more bytes without noticing an error; shift the bytes that have been processed as frames. */ |
1060 | 2.02k | size_t bytes_consumed = src - (const uint8_t *)stream->recvbuf.buf->bytes; |
1061 | 2.02k | h2o_buffer_consume(&stream->recvbuf.buf, bytes_consumed); |
1062 | 2.02k | quicly_stream_sync_recvbuf(stream->quic, bytes_consumed); |
1063 | 2.02k | if (stream->read_blocked) |
1064 | 247 | return; |
1065 | 2.02k | } |
1066 | 6.51k | } |
1067 | | |
1068 | 1.77k | if (quicly_recvstate_transfer_complete(&stream->quic->recvstate)) { |
1069 | 1.77k | if (stream->recvbuf.buf->size == 0 && (stream->recvbuf.handle_input == handle_input_expect_data || |
1070 | 1.77k | stream->recvbuf.handle_input == handle_input_post_trailers)) { |
1071 | | /* have complete request, advance the state and process the request */ |
1072 | 1.36k | if (stream->req.content_length != SIZE_MAX && stream->req.content_length != stream->req.req_body_bytes_received) { |
1073 | | /* the request terminated abruptly; reset the stream as we do for HTTP/2 */ |
1074 | 177 | shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, |
1075 | 177 | stream->req.req_body_bytes_received < stream->req.content_length |
1076 | 177 | ? H2O_HTTP3_ERROR_REQUEST_INCOMPLETE |
1077 | 177 | : H2O_HTTP3_ERROR_GENERAL_PROTOCOL, |
1078 | 177 | in_generator); |
1079 | 1.19k | } else { |
1080 | 1.19k | if (stream->req.write_req.cb != NULL) { |
1081 | 0 | if (!h2o_linklist_is_linked(&stream->link)) |
1082 | 0 | h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link); |
1083 | 0 | request_run_delayed(conn); |
1084 | 1.19k | } else if (!stream->req.process_called && stream->state < H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS) { |
1085 | | /* process the request, if we haven't called h2o_process_request nor send an error response */ |
1086 | 977 | switch (stream->state) { |
1087 | 0 | case H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS: |
1088 | 977 | case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK: |
1089 | 977 | case H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED: |
1090 | 977 | break; |
1091 | 0 | default: |
1092 | 0 | assert(!"unexpected state"); |
1093 | 0 | break; |
1094 | 977 | } |
1095 | 977 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING, in_generator); |
1096 | 977 | h2o_linklist_insert(&conn->delayed_streams.pending, &stream->link); |
1097 | 977 | request_run_delayed(conn); |
1098 | 977 | } |
1099 | 1.19k | } |
1100 | 1.36k | } else { |
1101 | 411 | shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, H2O_HTTP3_ERROR_REQUEST_INCOMPLETE, in_generator); |
1102 | 411 | } |
1103 | 1.77k | } else { |
1104 | 0 | if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK && stream->req_body != NULL && |
1105 | 0 | stream->req_body->size >= H2O_HTTP3_REQUEST_BODY_MIN_BYTES_TO_BLOCK) { |
1106 | | /* switch to blocked state if the request body is becoming large (this limits the concurrency to the backend) */ |
1107 | 0 | stream->read_blocked = 1; |
1108 | 0 | h2o_linklist_insert(&conn->delayed_streams.recv_body_blocked, &stream->link); |
1109 | 0 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED, in_generator); |
1110 | 0 | check_run_blocked(conn); |
1111 | 0 | } else if (stream->req.write_req.cb != NULL && stream->req_body->size != 0) { |
1112 | | /* in streaming mode, let the run_delayed invoke write_req */ |
1113 | 0 | if (!h2o_linklist_is_linked(&stream->link)) |
1114 | 0 | h2o_linklist_insert(&conn->delayed_streams.req_streaming, &stream->link); |
1115 | 0 | request_run_delayed(conn); |
1116 | 0 | } |
1117 | 0 | } |
1118 | 1.77k | } |
1119 | | |
1120 | | static void on_receive(quicly_stream_t *qs, size_t off, const void *input, size_t len) |
1121 | 6.51k | { |
1122 | 6.51k | struct st_h2o_http3_server_stream_t *stream = qs->data; |
1123 | | |
1124 | | /* save received data (FIXME avoid copying if possible; see hqclient.c) */ |
1125 | 6.51k | h2o_http3_update_recvbuf(&stream->recvbuf.buf, off, input, len); |
1126 | | |
1127 | 6.51k | if (stream->read_blocked) |
1128 | 0 | return; |
1129 | | |
1130 | | /* handle input (FIXME propage err_desc) */ |
1131 | 6.51k | handle_buffered_input(stream, 0); |
1132 | 6.51k | } |
1133 | | |
1134 | | static void on_receive_reset(quicly_stream_t *qs, int err) |
1135 | 0 | { |
1136 | 0 | struct st_h2o_http3_server_stream_t *stream = qs->data; |
1137 | |
|
1138 | 0 | shutdown_stream(stream, H2O_HTTP3_ERROR_NONE /* ignored */, |
1139 | 0 | stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS ? H2O_HTTP3_ERROR_REQUEST_REJECTED |
1140 | 0 | : H2O_HTTP3_ERROR_REQUEST_CANCELLED, |
1141 | 0 | 0); |
1142 | 0 | } |
1143 | | |
1144 | | static void proceed_request_streaming(h2o_req_t *_req, const char *errstr) |
1145 | 0 | { |
1146 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, req, _req); |
1147 | 0 | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
1148 | |
|
1149 | 0 | assert(stream->req_body != NULL); |
1150 | 0 | assert(errstr != NULL || !h2o_linklist_is_linked(&stream->link)); |
1151 | 0 | assert(conn->num_streams_req_streaming != 0 || stream->req.is_tunnel_req); |
1152 | | |
1153 | 0 | if (errstr != NULL || (quicly_recvstate_bytes_available(&stream->quic->recvstate) == 0 && |
1154 | 0 | quicly_recvstate_transfer_complete(&stream->quic->recvstate))) { |
1155 | | /* tidy up the request streaming */ |
1156 | 0 | stream->req.write_req.cb = NULL; |
1157 | 0 | stream->req.write_req.ctx = NULL; |
1158 | 0 | stream->req.proceed_req = NULL; |
1159 | 0 | stream->req_streaming = 0; |
1160 | 0 | if (!stream->req.is_tunnel_req) |
1161 | 0 | --conn->num_streams_req_streaming; |
1162 | 0 | check_run_blocked(conn); |
1163 | | /* close the stream if an error occurred */ |
1164 | 0 | if (errstr != NULL) { |
1165 | 0 | shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 1); |
1166 | 0 | return; |
1167 | 0 | } |
1168 | 0 | } |
1169 | | |
1170 | | /* remove the bytes from the request body buffer */ |
1171 | 0 | assert(stream->req.entity.len == stream->req_body->size); |
1172 | 0 | h2o_buffer_consume(&stream->req_body, stream->req_body->size); |
1173 | 0 | stream->req.entity = h2o_iovec_init(NULL, 0); |
1174 | | |
1175 | | /* unblock read until the next invocation of write_req, or after the final invocation */ |
1176 | 0 | stream->read_blocked = 0; |
1177 | | |
1178 | | /* handle input in the receive buffer */ |
1179 | 0 | handle_buffered_input(stream, 1); |
1180 | 0 | } |
1181 | | |
1182 | | static void run_delayed(h2o_timer_t *timer) |
1183 | 977 | { |
1184 | 977 | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, timeout, timer); |
1185 | 977 | int made_progress; |
1186 | | |
1187 | 1.95k | do { |
1188 | 1.95k | made_progress = 0; |
1189 | | |
1190 | | /* promote blocked stream to unblocked state, if possible */ |
1191 | 1.95k | if (conn->num_streams.recv_body_unblocked + conn->num_streams_req_streaming < |
1192 | 1.95k | conn->super.ctx->globalconf->http3.max_concurrent_streaming_requests_per_connection && |
1193 | 1.95k | !h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)) { |
1194 | 0 | struct st_h2o_http3_server_stream_t *stream = |
1195 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.recv_body_blocked.next); |
1196 | 0 | assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BLOCKED); |
1197 | 0 | assert(stream->read_blocked); |
1198 | 0 | h2o_linklist_unlink(&stream->link); |
1199 | 0 | made_progress = 1; |
1200 | 0 | quicly_stream_set_receive_window(stream->quic, conn->super.ctx->globalconf->http3.active_stream_window_size); |
1201 | 0 | if (h2o_req_can_stream_request(&stream->req)) { |
1202 | | /* use streaming mode */ |
1203 | 0 | stream->req_streaming = 1; |
1204 | 0 | ++conn->num_streams_req_streaming; |
1205 | 0 | stream->req.proceed_req = proceed_request_streaming; |
1206 | 0 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); |
1207 | 0 | h2o_process_request(&stream->req); |
1208 | 0 | } else { |
1209 | | /* unblock, read the bytes in receive buffer */ |
1210 | 0 | stream->read_blocked = 0; |
1211 | 0 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_UNBLOCKED, 0); |
1212 | 0 | handle_buffered_input(stream, 0); |
1213 | 0 | if (quicly_get_state(conn->h3.super.quic) >= QUICLY_STATE_CLOSING) |
1214 | 0 | return; |
1215 | 0 | } |
1216 | 0 | } |
1217 | | |
1218 | | /* process streams using request streaming, that have new data to submit */ |
1219 | 1.95k | while (!h2o_linklist_is_empty(&conn->delayed_streams.req_streaming)) { |
1220 | 0 | struct st_h2o_http3_server_stream_t *stream = |
1221 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.req_streaming.next); |
1222 | 0 | int is_end_stream = quicly_recvstate_transfer_complete(&stream->quic->recvstate); |
1223 | 0 | assert(stream->req.process_called); |
1224 | 0 | assert(stream->req.write_req.cb != NULL); |
1225 | 0 | assert(stream->req_body != NULL); |
1226 | 0 | assert(stream->req_body->size != 0 || is_end_stream); |
1227 | 0 | assert(!stream->read_blocked); |
1228 | 0 | h2o_linklist_unlink(&stream->link); |
1229 | 0 | stream->read_blocked = 1; |
1230 | 0 | made_progress = 1; |
1231 | 0 | assert(stream->req.entity.len == stream->req_body->size && |
1232 | 0 | (stream->req.entity.len == 0 || stream->req.entity.base == stream->req_body->bytes)); |
1233 | 0 | if (stream->req.write_req.cb(stream->req.write_req.ctx, is_end_stream) != 0) |
1234 | 0 | shutdown_stream(stream, H2O_HTTP3_ERROR_INTERNAL, H2O_HTTP3_ERROR_INTERNAL, 0); |
1235 | 0 | } |
1236 | | |
1237 | | /* process the requests (not in streaming mode); TODO cap concurrency? */ |
1238 | 2.93k | while (!h2o_linklist_is_empty(&conn->delayed_streams.pending)) { |
1239 | 977 | struct st_h2o_http3_server_stream_t *stream = |
1240 | 977 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, link, conn->delayed_streams.pending.next); |
1241 | 977 | assert(stream->state == H2O_HTTP3_SERVER_STREAM_STATE_REQ_PENDING); |
1242 | 0 | assert(!stream->req.process_called); |
1243 | 0 | assert(!stream->read_blocked); |
1244 | 0 | h2o_linklist_unlink(&stream->link); |
1245 | 977 | made_progress = 1; |
1246 | 977 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); |
1247 | 977 | h2o_process_request(&stream->req); |
1248 | 977 | } |
1249 | | |
1250 | 1.95k | } while (made_progress); |
1251 | 977 | } |
1252 | | |
1253 | | int handle_input_post_trailers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
1254 | | int in_generator, const char **err_desc) |
1255 | 459 | { |
1256 | 459 | h2o_http3_read_frame_t frame; |
1257 | 459 | int ret; |
1258 | | |
1259 | | /* read and ignore unknown frames */ |
1260 | 459 | if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src, |
1261 | 459 | src_end, err_desc)) != 0) |
1262 | 10 | return ret; |
1263 | 449 | switch (frame.type) { |
1264 | 2 | case H2O_HTTP3_FRAME_TYPE_HEADERS: |
1265 | 8 | case H2O_HTTP3_FRAME_TYPE_DATA: |
1266 | 8 | return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; |
1267 | 441 | default: |
1268 | 441 | break; |
1269 | 449 | } |
1270 | | |
1271 | 441 | return 0; |
1272 | 449 | } |
1273 | | |
1274 | | static int handle_input_expect_data_payload(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, |
1275 | | const uint8_t *src_end, int in_generator, const char **err_desc) |
1276 | 1.53k | { |
1277 | 1.53k | size_t bytes_avail = src_end - *src; |
1278 | | |
1279 | | /* append data to body buffer */ |
1280 | 1.53k | if (bytes_avail > stream->recvbuf.bytes_left_in_data_frame) |
1281 | 1.37k | bytes_avail = stream->recvbuf.bytes_left_in_data_frame; |
1282 | 1.53k | if (stream->req_body == NULL) |
1283 | 190 | h2o_buffer_init(&stream->req_body, &h2o_socket_buffer_prototype); |
1284 | 1.53k | if (!h2o_buffer_try_append(&stream->req_body, *src, bytes_avail)) |
1285 | 0 | return H2O_HTTP3_ERROR_INTERNAL; |
1286 | 1.53k | stream->req.entity = h2o_iovec_init(stream->req_body->bytes, stream->req_body->size); |
1287 | 1.53k | stream->req.req_body_bytes_received += bytes_avail; |
1288 | 1.53k | stream->recvbuf.bytes_left_in_data_frame -= bytes_avail; |
1289 | 1.53k | *src += bytes_avail; |
1290 | | |
1291 | 1.53k | if (stream->recvbuf.bytes_left_in_data_frame == 0) |
1292 | 1.40k | stream->recvbuf.handle_input = handle_input_expect_data; |
1293 | | |
1294 | 1.53k | return 0; |
1295 | 1.53k | } |
1296 | | |
1297 | | int handle_input_expect_data(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
1298 | | int in_generator, const char **err_desc) |
1299 | 10.0k | { |
1300 | 10.0k | h2o_http3_read_frame_t frame; |
1301 | 10.0k | int ret; |
1302 | | |
1303 | | /* read frame */ |
1304 | 10.0k | if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src, |
1305 | 10.0k | src_end, err_desc)) != 0) |
1306 | 51 | return ret; |
1307 | 9.99k | switch (frame.type) { |
1308 | 84 | case H2O_HTTP3_FRAME_TYPE_HEADERS: |
1309 | | /* when in tunnel mode, trailers forbidden */ |
1310 | 84 | if (stream->req.is_tunnel_req) { |
1311 | 0 | *err_desc = "unexpected frame type"; |
1312 | 0 | return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; |
1313 | 0 | } |
1314 | | /* trailers, ignore but disallow succeeding DATA or HEADERS frame */ |
1315 | 84 | stream->recvbuf.handle_input = handle_input_post_trailers; |
1316 | 84 | return 0; |
1317 | 9.65k | case H2O_HTTP3_FRAME_TYPE_DATA: |
1318 | 9.65k | if (stream->req.content_length != SIZE_MAX && |
1319 | 9.65k | stream->req.content_length - stream->req.req_body_bytes_received < frame.length) { |
1320 | | /* The only viable option here is to reset the stream, as we might have already started streaming the request body |
1321 | | * upstream. This behavior is consistent with what we do in HTTP/2. */ |
1322 | 3 | shutdown_stream(stream, H2O_HTTP3_ERROR_EARLY_RESPONSE, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, in_generator); |
1323 | 3 | return 0; |
1324 | 3 | } |
1325 | 9.64k | break; |
1326 | 9.64k | default: |
1327 | 264 | return 0; |
1328 | 9.99k | } |
1329 | | |
1330 | | /* got a DATA frame */ |
1331 | 9.64k | if (frame.length != 0) { |
1332 | 1.60k | if (h2o_timeval_is_null(&stream->req.timestamps.request_body_begin_at)) |
1333 | 262 | stream->req.timestamps.request_body_begin_at = h2o_gettimeofday(get_conn(stream)->super.ctx->loop); |
1334 | 1.60k | stream->recvbuf.handle_input = handle_input_expect_data_payload; |
1335 | 1.60k | stream->recvbuf.bytes_left_in_data_frame = frame.length; |
1336 | 1.60k | } |
1337 | | |
1338 | 9.64k | return 0; |
1339 | 9.99k | } |
1340 | | |
1341 | | static int handle_input_expect_headers_send_http_error(struct st_h2o_http3_server_stream_t *stream, |
1342 | | void (*sendfn)(h2o_req_t *, const char *, const char *, int), |
1343 | | const char *reason, const char *body, const char **err_desc) |
1344 | 634 | { |
1345 | 634 | if (!quicly_recvstate_transfer_complete(&stream->quic->recvstate)) |
1346 | 0 | quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE); |
1347 | | |
1348 | 634 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); |
1349 | 634 | sendfn(&stream->req, reason, body, 0); |
1350 | 634 | *err_desc = NULL; |
1351 | | |
1352 | 634 | return 0; |
1353 | 634 | } |
1354 | | |
1355 | | static int handle_input_expect_headers_process_connect(struct st_h2o_http3_server_stream_t *stream, uint64_t datagram_flow_id, |
1356 | | const char **err_desc) |
1357 | 428 | { |
1358 | 428 | if (stream->req.content_length != SIZE_MAX) |
1359 | 181 | return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request", |
1360 | 181 | "CONNECT request cannot have request body", err_desc); |
1361 | | |
1362 | 247 | stream->req.is_tunnel_req = 1; |
1363 | 247 | h2o_buffer_init(&stream->req_body, &h2o_socket_buffer_prototype); |
1364 | 247 | stream->req.entity = h2o_iovec_init("", 0); |
1365 | 247 | stream->read_blocked = 1; |
1366 | 247 | stream->req.proceed_req = proceed_request_streaming; |
1367 | 247 | stream->datagram_flow_id = datagram_flow_id; |
1368 | 247 | ++get_conn(stream)->num_streams_tunnelling; |
1369 | 247 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS, 0); |
1370 | 247 | quicly_stream_set_receive_window(stream->quic, get_conn(stream)->super.ctx->globalconf->http3.active_stream_window_size); |
1371 | 247 | h2o_process_request(&stream->req); |
1372 | | |
1373 | 247 | return 0; |
1374 | 428 | } |
1375 | | |
1376 | | static int handle_input_expect_headers(struct st_h2o_http3_server_stream_t *stream, const uint8_t **src, const uint8_t *src_end, |
1377 | | int in_generator, const char **err_desc) |
1378 | 7.64k | { |
1379 | 7.64k | assert(!in_generator); /* this function is processing headers (before generators get assigned), not trailers */ |
1380 | | |
1381 | 0 | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
1382 | 7.64k | h2o_http3_read_frame_t frame; |
1383 | 7.64k | int header_exists_map = 0, ret; |
1384 | 7.64k | h2o_iovec_t expect = h2o_iovec_init(NULL, 0); |
1385 | 7.64k | h2o_iovec_t datagram_flow_id_field = {}; |
1386 | 7.64k | uint64_t datagram_flow_id = UINT64_MAX; |
1387 | 7.64k | uint8_t header_ack[H2O_HPACK_ENCODE_INT_MAX_LENGTH]; |
1388 | 7.64k | size_t header_ack_len; |
1389 | | |
1390 | | /* read the HEADERS frame (or a frame that precedes that) */ |
1391 | 7.64k | if ((ret = h2o_http3_read_frame(&frame, 0, H2O_HTTP3_STREAM_TYPE_REQUEST, get_conn(stream)->h3.max_frame_payload_size, src, |
1392 | 7.64k | src_end, err_desc)) != 0) { |
1393 | 306 | if (*err_desc == h2o_http3_err_frame_too_large && frame.type == H2O_HTTP3_FRAME_TYPE_HEADERS) { |
1394 | 55 | shutdown_stream(stream, H2O_HTTP3_ERROR_REQUEST_REJECTED, H2O_HTTP3_ERROR_REQUEST_REJECTED, 0); |
1395 | 55 | return 0; |
1396 | 251 | } else { |
1397 | 251 | return ret; |
1398 | 251 | } |
1399 | 306 | } |
1400 | 7.33k | if (frame.type != H2O_HTTP3_FRAME_TYPE_HEADERS) { |
1401 | 1.34k | switch (frame.type) { |
1402 | 7 | case H2O_HTTP3_FRAME_TYPE_DATA: |
1403 | 7 | return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; |
1404 | 1.33k | default: |
1405 | 1.33k | break; |
1406 | 1.34k | } |
1407 | 1.33k | return 0; |
1408 | 1.34k | } |
1409 | 5.99k | stream->req.timestamps.request_begin_at = h2o_gettimeofday(conn->super.ctx->loop); |
1410 | 5.99k | stream->recvbuf.handle_input = handle_input_expect_data; |
1411 | | |
1412 | | /* parse the headers, and ack */ |
1413 | 5.99k | if ((ret = h2o_qpack_parse_request(&stream->req.pool, get_conn(stream)->h3.qpack.dec, stream->quic->stream_id, |
1414 | 5.99k | &stream->req.input.method, &stream->req.input.scheme, &stream->req.input.authority, |
1415 | 5.99k | &stream->req.input.path, &stream->req.upgrade, &stream->req.headers, &header_exists_map, |
1416 | 5.99k | &stream->req.content_length, &expect, NULL /* TODO cache-digests */, &datagram_flow_id_field, |
1417 | 5.99k | header_ack, &header_ack_len, frame.payload, frame.length, err_desc)) != 0 && |
1418 | 5.99k | ret != H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) |
1419 | 3.74k | return ret; |
1420 | 2.24k | if (header_ack_len != 0) |
1421 | 0 | h2o_http3_send_qpack_header_ack(&conn->h3, header_ack, header_ack_len); |
1422 | | |
1423 | 2.24k | h2o_probe_log_request(&stream->req, stream->quic->stream_id); |
1424 | | |
1425 | 2.24k | if (stream->req.input.scheme == NULL) |
1426 | 1.05k | stream->req.input.scheme = &H2O_URL_SCHEME_HTTPS; |
1427 | | |
1428 | 2.24k | int is_connect, must_exist_map, may_exist_map; |
1429 | 2.24k | const int can_receive_datagrams = |
1430 | 2.24k | quicly_get_context(get_conn(stream)->h3.super.quic)->transport_params.max_datagram_frame_size != 0; |
1431 | 2.24k | if (h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT"))) { |
1432 | 744 | is_connect = 1; |
1433 | 744 | must_exist_map = H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS; |
1434 | 744 | may_exist_map = 0; |
1435 | | /* extended connect looks like an ordinary request plus an upgrade token (:protocol) */ |
1436 | 744 | if ((header_exists_map & H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS) != 0) { |
1437 | 1 | must_exist_map |= H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS | |
1438 | 1 | H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS; |
1439 | 1 | if (can_receive_datagrams) |
1440 | 1 | datagram_flow_id = stream->quic->stream_id / 4; |
1441 | 1 | } |
1442 | 1.50k | } else if (h2o_memis(stream->req.input.method.base, stream->req.input.method.len, H2O_STRLIT("CONNECT-UDP"))) { |
1443 | | /* Handling of masque draft-03. Method is CONNECT-UDP and :protocol is not used, so we set `:protocol` to "connect-udp" to |
1444 | | * make it look like an upgrade. The method is preserved and can be used to distinguish between RFC 9298 version which uses |
1445 | | * "CONNECT". The draft requires "masque" in `:scheme` but we need to support clients that put "https" there instead. */ |
1446 | 2 | if (!((header_exists_map & H2O_HPACK_PARSE_HEADERS_PROTOCOL_EXISTS) == 0 && |
1447 | 2 | h2o_memis(stream->req.input.path.base, stream->req.input.path.len, H2O_STRLIT("/")))) { |
1448 | 1 | shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); |
1449 | 1 | return 0; |
1450 | 1 | } |
1451 | 1 | if (datagram_flow_id_field.base != NULL) { |
1452 | 0 | if (!can_receive_datagrams) { |
1453 | 0 | *err_desc = "unexpected h3 datagram"; |
1454 | 0 | return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; |
1455 | 0 | } |
1456 | 0 | datagram_flow_id = 0; |
1457 | 0 | for (const char *p = datagram_flow_id_field.base; p != datagram_flow_id_field.base + datagram_flow_id_field.len; ++p) { |
1458 | 0 | if (!('0' <= *p && *p <= '9')) |
1459 | 0 | break; |
1460 | 0 | datagram_flow_id = datagram_flow_id * 10 + *p - '0'; |
1461 | 0 | } |
1462 | 0 | } |
1463 | 1 | assert(stream->req.upgrade.base == NULL); /* otherwise PROTOCOL_EXISTS will be set */ |
1464 | 0 | is_connect = 1; |
1465 | 1 | must_exist_map = H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS | |
1466 | 1 | H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS; |
1467 | 1 | may_exist_map = 0; |
1468 | 1.50k | } else { |
1469 | | /* normal request */ |
1470 | 1.50k | is_connect = 0; |
1471 | 1.50k | must_exist_map = |
1472 | 1.50k | H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS | H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS | H2O_HPACK_PARSE_HEADERS_PATH_EXISTS; |
1473 | 1.50k | may_exist_map = H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS; |
1474 | 1.50k | } |
1475 | | |
1476 | | /* check that all MUST pseudo headers exist, and that there are no other pseudo headers than MUST or MAY */ |
1477 | 2.24k | if (!((header_exists_map & must_exist_map) == must_exist_map && (header_exists_map & ~(must_exist_map | may_exist_map)) == 0)) { |
1478 | 360 | shutdown_stream(stream, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, 0); |
1479 | 360 | return 0; |
1480 | 360 | } |
1481 | | |
1482 | | /* send a 400 error when observing an invalid header character */ |
1483 | 1.88k | if (ret == H2O_HTTP2_ERROR_INVALID_HEADER_CHAR) |
1484 | 319 | return handle_input_expect_headers_send_http_error(stream, h2o_send_error_400, "Invalid Request", *err_desc, err_desc); |
1485 | | |
1486 | | /* validate semantic requirement */ |
1487 | 1.56k | if (!h2o_req_validate_pseudo_headers(&stream->req)) { |
1488 | 0 | *err_desc = "invalid pseudo headers"; |
1489 | 0 | return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; |
1490 | 0 | } |
1491 | | |
1492 | | /* check if content-length is within the permitted bounds */ |
1493 | 1.56k | if (stream->req.content_length != SIZE_MAX && stream->req.content_length > conn->super.ctx->globalconf->max_request_entity_size) |
1494 | 132 | return handle_input_expect_headers_send_http_error(stream, h2o_send_error_413, "Request Entity Too Large", |
1495 | 132 | "request entity is too large", err_desc); |
1496 | | |
1497 | | /* set priority */ |
1498 | 1.43k | assert(!h2o_linklist_is_linked(&stream->scheduler.link)); |
1499 | 1.43k | if (!stream->received_priority_update) { |
1500 | 1.43k | ssize_t index; |
1501 | 1.43k | if ((index = h2o_find_header(&stream->req.headers, H2O_TOKEN_PRIORITY, -1)) != -1) { |
1502 | 289 | h2o_iovec_t *value = &stream->req.headers.entries[index].value; |
1503 | 289 | h2o_absprio_parse_priority(value->base, value->len, &stream->scheduler.priority); |
1504 | 1.14k | } else if (is_connect) { |
1505 | 211 | stream->scheduler.priority.incremental = 1; |
1506 | 211 | } |
1507 | 1.43k | } |
1508 | | |
1509 | | /* special handling of CONNECT method */ |
1510 | 1.43k | if (is_connect) |
1511 | 428 | return handle_input_expect_headers_process_connect(stream, datagram_flow_id, err_desc); |
1512 | | |
1513 | | /* change state */ |
1514 | 1.00k | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_RECV_BODY_BEFORE_BLOCK, 0); |
1515 | | |
1516 | | /* handle expect: 100-continue */ |
1517 | 1.00k | if (expect.base != NULL) { |
1518 | 2 | if (!h2o_lcstris(expect.base, expect.len, H2O_STRLIT("100-continue"))) { |
1519 | 2 | return handle_input_expect_headers_send_http_error(stream, h2o_send_error_417, "Expectation Failed", |
1520 | 2 | "unknown expectation", err_desc); |
1521 | 2 | } |
1522 | 0 | stream->req.res.status = 100; |
1523 | 0 | h2o_send_informational(&stream->req); |
1524 | 0 | } |
1525 | | |
1526 | 1.00k | return 0; |
1527 | 1.00k | } |
1528 | | |
1529 | | static void write_response(struct st_h2o_http3_server_stream_t *stream, h2o_iovec_t datagram_flow_id) |
1530 | 1.85k | { |
1531 | 1.85k | size_t serialized_header_len = 0; |
1532 | 1.85k | h2o_iovec_t frame = h2o_qpack_flatten_response( |
1533 | 1.85k | get_conn(stream)->h3.qpack.enc, &stream->req.pool, stream->quic->stream_id, NULL, stream->req.res.status, |
1534 | 1.85k | stream->req.res.headers.entries, stream->req.res.headers.size, &get_conn(stream)->super.ctx->globalconf->server_name, |
1535 | 1.85k | stream->req.res.content_length, datagram_flow_id, &serialized_header_len); |
1536 | 1.85k | stream->req.header_bytes_sent += serialized_header_len; |
1537 | | |
1538 | 1.85k | h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1); |
1539 | 1.85k | struct st_h2o_http3_server_sendvec_t *vec = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size++; |
1540 | 1.85k | vec->vec = (h2o_sendvec_t){&immutable_vec_callbacks, frame.len, {frame.base}}; |
1541 | 1.85k | vec->entity_offset = UINT64_MAX; |
1542 | 1.85k | stream->sendbuf.final_size += frame.len; |
1543 | 1.85k | } |
1544 | | |
1545 | | static size_t flatten_data_frame_header(struct st_h2o_http3_server_stream_t *stream, struct st_h2o_http3_server_sendvec_t *dst, |
1546 | | size_t payload_size) |
1547 | 1.71k | { |
1548 | 1.71k | size_t header_size = 0; |
1549 | | |
1550 | | /* build header */ |
1551 | 1.71k | stream->sendbuf.data_frame_header_buf[header_size++] = H2O_HTTP3_FRAME_TYPE_DATA; |
1552 | 1.71k | header_size = |
1553 | 1.71k | quicly_encodev(stream->sendbuf.data_frame_header_buf + header_size, payload_size) - stream->sendbuf.data_frame_header_buf; |
1554 | | |
1555 | | /* initilaize the vector */ |
1556 | 1.71k | h2o_sendvec_init_raw(&dst->vec, stream->sendbuf.data_frame_header_buf, header_size); |
1557 | 1.71k | dst->entity_offset = UINT64_MAX; |
1558 | | |
1559 | 1.71k | return header_size; |
1560 | 1.71k | } |
1561 | | |
1562 | | static void shutdown_by_generator(struct st_h2o_http3_server_stream_t *stream) |
1563 | 1.85k | { |
1564 | 1.85k | quicly_sendstate_shutdown(&stream->quic->sendstate, stream->sendbuf.final_size); |
1565 | 1.85k | if (stream->sendbuf.vecs.size == 0) { |
1566 | 0 | if (quicly_stream_has_receive_side(0, stream->quic->stream_id)) |
1567 | 0 | quicly_request_stop(stream->quic, H2O_HTTP3_ERROR_EARLY_RESPONSE); |
1568 | 0 | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT, 1); |
1569 | 0 | } |
1570 | 1.85k | } |
1571 | | |
1572 | | /** |
1573 | | * returns boolean indicating if the response is ready to be sent, building the value of datagram-flow-id header field |
1574 | | */ |
1575 | | static int finalize_do_send_setup_udp_tunnel(struct st_h2o_http3_server_stream_t *stream, h2o_send_state_t send_state, |
1576 | | h2o_iovec_t *datagram_flow_id) |
1577 | 1.85k | { |
1578 | 1.85k | *datagram_flow_id = h2o_iovec_init(NULL, 0); |
1579 | | |
1580 | | /* TODO Convert H3_DATAGRAMs to capsules either here or inside the proxy handler. At the moment, the connect handler provides |
1581 | | * `h2o_req_t::forward_datagram` callbacks but the proxy handler does not. As support for H3_DATAGRAMs are advertised at the |
1582 | | * connection level, we need to support forwarding datagrams also when the proxy handler in use. |
1583 | | * Until then, connect-udp requests on H3 are refused to be tunneled by the proxy handler, see `h2o__proxy_process_request`. |
1584 | | * Also, as an abundance of caution, we drop the datagrams associated to requests that do not provide the forwarding hooks, by |
1585 | | * not registering such streams to `datagram_flows`. */ |
1586 | 1.85k | if (!((200 <= stream->req.res.status && stream->req.res.status <= 299) && stream->req.forward_datagram.write_ != NULL) || |
1587 | 1.85k | send_state != H2O_SEND_STATE_IN_PROGRESS) { |
1588 | 1.85k | stream->datagram_flow_id = UINT64_MAX; |
1589 | 1.85k | return 1; |
1590 | 1.85k | } |
1591 | | |
1592 | | /* Register the flow id to the connection so that datagram frames being received from the client would be dispatched to |
1593 | | * `req->forward_datagram.write_`. */ |
1594 | 0 | if (stream->datagram_flow_id != UINT64_MAX) { |
1595 | 0 | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
1596 | 0 | int r; |
1597 | 0 | khiter_t iter = kh_put(stream, conn->datagram_flows, stream->datagram_flow_id, &r); |
1598 | 0 | assert(iter != kh_end(conn->datagram_flows)); |
1599 | 0 | kh_val(conn->datagram_flows, iter) = stream; |
1600 | 0 | } |
1601 | | |
1602 | | /* If the client sent a `datagram-flow-id` request header field and: |
1603 | | * a) if the peer is willing to accept datagrams as well, use the same flow ID for sending datagrams from us, |
1604 | | * b) if the peer did not send H3_DATAGRAM Settings, use the stream, or |
1605 | | * c) if H3 SETTINGS hasn't been received yet, wait for it, then call `do_send` again. We might drop some packets from origin |
1606 | | * that arrive before H3 SETTINGS from the client, in the rare occasion of packet carrying H3 SETTINGS getting lost while |
1607 | | * those carrying CONNECT-UDP request and the UDP datagram to be forwarded to the origin arrive. */ |
1608 | 0 | if (stream->datagram_flow_id != UINT64_MAX) { |
1609 | 0 | struct st_h2o_http3_server_conn_t *conn = get_conn(stream); |
1610 | 0 | if (!h2o_http3_has_received_settings(&conn->h3)) { |
1611 | 0 | h2o_linklist_insert(&conn->streams_resp_settings_blocked, &stream->link_resp_settings_blocked); |
1612 | 0 | return 0; |
1613 | 0 | } |
1614 | 0 | if (conn->h3.peer_settings.h3_datagram) { |
1615 | | /* register the route that would be used by the CONNECT handler for forwarding datagrams */ |
1616 | 0 | stream->req.forward_datagram.read_ = tunnel_on_udp_read; |
1617 | | /* if the request type is draft-03, build and return the value of datagram-flow-id header field */ |
1618 | 0 | if (stream->req.input.method.len == sizeof("CONNECT-UDP") - 1) { |
1619 | 0 | datagram_flow_id->base = h2o_mem_alloc_pool(&stream->req.pool, char, sizeof(H2O_UINT64_LONGEST_STR)); |
1620 | 0 | datagram_flow_id->len = sprintf(datagram_flow_id->base, "%" PRIu64, stream->datagram_flow_id); |
1621 | 0 | } |
1622 | 0 | } |
1623 | 0 | } |
1624 | | |
1625 | 0 | return 1; |
1626 | 0 | } |
1627 | | |
1628 | | static void finalize_do_send(struct st_h2o_http3_server_stream_t *stream) |
1629 | 2.37k | { |
1630 | 2.37k | quicly_stream_sync_sendbuf(stream->quic, 1); |
1631 | 2.37k | if (!stream->proceed_while_sending) |
1632 | 1.85k | h2o_quic_schedule_timer(&get_conn(stream)->h3.super); |
1633 | 2.37k | } |
1634 | | |
1635 | | static void do_send(h2o_ostream_t *_ostr, h2o_req_t *_req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t send_state) |
1636 | 2.37k | { |
1637 | 2.37k | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr); |
1638 | | |
1639 | 2.37k | assert(&stream->req == _req); |
1640 | | |
1641 | 0 | stream->proceed_requested = 0; |
1642 | | |
1643 | 2.37k | switch (stream->state) { |
1644 | 1.85k | case H2O_HTTP3_SERVER_STREAM_STATE_SEND_HEADERS: { |
1645 | 1.85k | h2o_iovec_t datagram_flow_id; |
1646 | 1.85k | ssize_t priority_header_index; |
1647 | 1.85k | if (stream->req.send_server_timing != 0) |
1648 | 0 | h2o_add_server_timing_header(&stream->req, 0 /* TODO add support for trailers; it's going to be a little complex as we |
1649 | 0 | * need to build trailers the moment they are emitted onto wire */); |
1650 | 1.85k | if (!finalize_do_send_setup_udp_tunnel(stream, send_state, &datagram_flow_id)) |
1651 | 0 | return; |
1652 | 1.85k | stream->req.timestamps.response_start_at = h2o_gettimeofday(get_conn(stream)->super.ctx->loop); |
1653 | 1.85k | write_response(stream, datagram_flow_id); |
1654 | 1.85k | h2o_probe_log_response(&stream->req, stream->quic->stream_id); |
1655 | 1.85k | set_state(stream, H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY, 1); |
1656 | 1.85k | if ((priority_header_index = h2o_find_header(&stream->req.res.headers, H2O_TOKEN_PRIORITY, -1)) != -1) { |
1657 | 0 | const h2o_header_t *header = &stream->req.res.headers.entries[priority_header_index]; |
1658 | 0 | handle_priority_change( |
1659 | 0 | stream, header->value.base, header->value.len, |
1660 | 0 | stream->scheduler.priority /* omission of a parameter is disinterest to change (RFC 9218 Section 8) */); |
1661 | 0 | } |
1662 | 1.85k | break; |
1663 | 1.85k | } |
1664 | 520 | case H2O_HTTP3_SERVER_STREAM_STATE_SEND_BODY: |
1665 | 520 | assert(quicly_sendstate_is_open(&stream->quic->sendstate)); |
1666 | 0 | break; |
1667 | 0 | case H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT: |
1668 | | /* This protocol handler transitions to CLOSE_WAIT when the request side is being reset by the origin. But our client-side |
1669 | | * implementations are capable of handling uni-directional close, therefore `do_send` might be invoked. The handler swallows |
1670 | | * the input, and relies on eventual destruction of `h2o_req_t` to discard the generator. */ |
1671 | 0 | return; |
1672 | 0 | default: |
1673 | 0 | h2o_fatal("logic flaw"); |
1674 | 0 | break; |
1675 | 2.37k | } |
1676 | | |
1677 | | /* If vectors carrying response body are being provided, copy them, incrementing the reference count if possible (for future |
1678 | | * retransmissions), as well as prepending a DATA frame header */ |
1679 | 2.37k | if (bufcnt != 0) { |
1680 | 1.71k | h2o_vector_reserve(&stream->req.pool, &stream->sendbuf.vecs, stream->sendbuf.vecs.size + 1 + bufcnt); |
1681 | 1.71k | uint64_t prev_body_size = stream->sendbuf.final_body_size; |
1682 | 3.42k | for (size_t i = 0; i != bufcnt; ++i) { |
1683 | | /* copy one body vector */ |
1684 | 1.71k | struct st_h2o_http3_server_sendvec_t *dst = stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size + i + 1; |
1685 | 1.71k | dst->vec = bufs[i]; |
1686 | 1.71k | dst->entity_offset = stream->sendbuf.final_body_size; |
1687 | 1.71k | stream->sendbuf.final_body_size += bufs[i].len; |
1688 | 1.71k | } |
1689 | 1.71k | uint64_t payload_size = stream->sendbuf.final_body_size - prev_body_size; |
1690 | | /* build DATA frame header */ |
1691 | 1.71k | size_t header_size = |
1692 | 1.71k | flatten_data_frame_header(stream, stream->sendbuf.vecs.entries + stream->sendbuf.vecs.size, payload_size); |
1693 | | /* update properties */ |
1694 | 1.71k | stream->sendbuf.vecs.size += 1 + bufcnt; |
1695 | 1.71k | stream->sendbuf.final_size += header_size + payload_size; |
1696 | 1.71k | } |
1697 | | |
1698 | 2.37k | switch (send_state) { |
1699 | 520 | case H2O_SEND_STATE_IN_PROGRESS: |
1700 | 520 | break; |
1701 | 1.80k | case H2O_SEND_STATE_FINAL: |
1702 | 1.85k | case H2O_SEND_STATE_ERROR: |
1703 | | /* TODO consider how to forward error, pending resolution of https://github.com/quicwg/base-drafts/issues/3300 */ |
1704 | 1.85k | shutdown_by_generator(stream); |
1705 | 1.85k | break; |
1706 | 2.37k | } |
1707 | | |
1708 | 2.37k | finalize_do_send(stream); |
1709 | 2.37k | } |
1710 | | |
1711 | | static void do_send_informational(h2o_ostream_t *_ostr, h2o_req_t *_req) |
1712 | 0 | { |
1713 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, ostr_final, _ostr); |
1714 | 0 | assert(&stream->req == _req); |
1715 | | |
1716 | 0 | write_response(stream, h2o_iovec_init(NULL, 0)); |
1717 | |
|
1718 | 0 | finalize_do_send(stream); |
1719 | 0 | } |
1720 | | |
1721 | | static int handle_priority_update_frame(struct st_h2o_http3_server_conn_t *conn, const h2o_http3_priority_update_frame_t *frame) |
1722 | 0 | { |
1723 | 0 | if (frame->element_is_push) |
1724 | 0 | return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; |
1725 | | |
1726 | | /* obtain the stream being referred to (creating one if necessary), or return if the stream has been closed already */ |
1727 | 0 | quicly_stream_t *qs; |
1728 | 0 | if (quicly_get_or_open_stream(conn->h3.super.quic, frame->element, &qs) != 0) |
1729 | 0 | return H2O_HTTP3_ERROR_ID; |
1730 | 0 | if (qs == NULL) |
1731 | 0 | return 0; |
1732 | | |
1733 | | /* apply the changes */ |
1734 | 0 | struct st_h2o_http3_server_stream_t *stream = qs->data; |
1735 | 0 | assert(stream != NULL); |
1736 | 0 | stream->received_priority_update = 1; |
1737 | |
|
1738 | 0 | handle_priority_change(stream, frame->value.base, frame->value.len, |
1739 | 0 | h2o_absprio_default /* the frame communicates a complete set of parameters; RFC 9218 Section 7 */); |
1740 | |
|
1741 | 0 | return 0; |
1742 | 0 | } |
1743 | | |
1744 | | static void handle_control_stream_frame(h2o_http3_conn_t *_conn, uint64_t type, const uint8_t *payload, size_t len) |
1745 | 0 | { |
1746 | 0 | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, _conn); |
1747 | 0 | int err; |
1748 | 0 | const char *err_desc = NULL; |
1749 | |
|
1750 | 0 | if (!h2o_http3_has_received_settings(&conn->h3)) { |
1751 | 0 | if (type != H2O_HTTP3_FRAME_TYPE_SETTINGS) { |
1752 | 0 | err = H2O_HTTP3_ERROR_MISSING_SETTINGS; |
1753 | 0 | goto Fail; |
1754 | 0 | } |
1755 | 0 | if ((err = h2o_http3_handle_settings_frame(&conn->h3, payload, len, &err_desc)) != 0) |
1756 | 0 | goto Fail; |
1757 | 0 | assert(h2o_http3_has_received_settings(&conn->h3)); |
1758 | 0 | while (!h2o_linklist_is_empty(&conn->streams_resp_settings_blocked)) { |
1759 | 0 | struct st_h2o_http3_server_stream_t *stream = H2O_STRUCT_FROM_MEMBER( |
1760 | 0 | struct st_h2o_http3_server_stream_t, link_resp_settings_blocked, conn->streams_resp_settings_blocked.next); |
1761 | 0 | h2o_linklist_unlink(&stream->link_resp_settings_blocked); |
1762 | 0 | do_send(&stream->ostr_final, &stream->req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS); |
1763 | 0 | } |
1764 | 0 | } else { |
1765 | 0 | switch (type) { |
1766 | 0 | case H2O_HTTP3_FRAME_TYPE_SETTINGS: |
1767 | 0 | err = H2O_HTTP3_ERROR_FRAME_UNEXPECTED; |
1768 | 0 | err_desc = "unexpected SETTINGS frame"; |
1769 | 0 | goto Fail; |
1770 | 0 | case H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: |
1771 | 0 | case H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: { |
1772 | 0 | h2o_http3_priority_update_frame_t frame; |
1773 | 0 | if ((err = h2o_http3_decode_priority_update_frame(&frame, type == H2O_HTTP3_FRAME_TYPE_PRIORITY_UPDATE_PUSH, payload, |
1774 | 0 | len, &err_desc)) != 0) |
1775 | 0 | goto Fail; |
1776 | 0 | if ((err = handle_priority_update_frame(conn, &frame)) != 0) { |
1777 | 0 | err_desc = "invalid PRIORITY_UPDATE frame"; |
1778 | 0 | goto Fail; |
1779 | 0 | } |
1780 | 0 | } break; |
1781 | 0 | default: |
1782 | 0 | break; |
1783 | 0 | } |
1784 | 0 | } |
1785 | | |
1786 | 0 | return; |
1787 | 0 | Fail: |
1788 | 0 | h2o_quic_close_connection(&conn->h3.super, err, err_desc); |
1789 | 0 | } |
1790 | | |
1791 | | static int stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *qs) |
1792 | 26.0k | { |
1793 | 26.0k | static const quicly_stream_callbacks_t callbacks = {on_stream_destroy, on_send_shift, on_send_emit, |
1794 | 26.0k | on_send_stop, on_receive, on_receive_reset}; |
1795 | | |
1796 | | /* handling of unidirectional streams is not server-specific */ |
1797 | 26.0k | if (quicly_stream_is_unidirectional(qs->stream_id)) { |
1798 | 19.5k | h2o_http3_on_create_unidirectional_stream(qs); |
1799 | 19.5k | return 0; |
1800 | 19.5k | } |
1801 | | |
1802 | 6.51k | assert(quicly_stream_is_client_initiated(qs->stream_id)); |
1803 | | |
1804 | 0 | struct st_h2o_http3_server_conn_t *conn = |
1805 | 6.51k | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn)); |
1806 | | |
1807 | | /* create new stream and start handling the request */ |
1808 | 6.51k | struct st_h2o_http3_server_stream_t *stream = h2o_mem_alloc(sizeof(*stream)); |
1809 | 6.51k | stream->quic = qs; |
1810 | 6.51k | h2o_buffer_init(&stream->recvbuf.buf, &h2o_socket_buffer_prototype); |
1811 | 6.51k | stream->recvbuf.handle_input = handle_input_expect_headers; |
1812 | 6.51k | memset(&stream->sendbuf, 0, sizeof(stream->sendbuf)); |
1813 | 6.51k | stream->state = H2O_HTTP3_SERVER_STREAM_STATE_RECV_HEADERS; |
1814 | 6.51k | stream->link = (h2o_linklist_t){NULL}; |
1815 | 6.51k | stream->link_resp_settings_blocked = (h2o_linklist_t){NULL}; |
1816 | 6.51k | stream->ostr_final = (h2o_ostream_t){ |
1817 | 6.51k | NULL, do_send, NULL, |
1818 | 6.51k | conn->super.ctx->globalconf->send_informational_mode == H2O_SEND_INFORMATIONAL_MODE_NONE ? NULL : do_send_informational}; |
1819 | 6.51k | stream->scheduler.link = (h2o_linklist_t){NULL}; |
1820 | 6.51k | stream->scheduler.priority = h2o_absprio_default; |
1821 | 6.51k | stream->scheduler.call_cnt = 0; |
1822 | | |
1823 | 6.51k | stream->read_blocked = 0; |
1824 | 6.51k | stream->proceed_requested = 0; |
1825 | 6.51k | stream->proceed_while_sending = 0; |
1826 | 6.51k | stream->received_priority_update = 0; |
1827 | 6.51k | stream->req_disposed = 0; |
1828 | 6.51k | stream->req_streaming = 0; |
1829 | 6.51k | stream->req_body = NULL; |
1830 | | |
1831 | 6.51k | h2o_init_request(&stream->req, &conn->super, NULL); |
1832 | 6.51k | stream->req.version = 0x0300; |
1833 | 6.51k | stream->req._ostr_top = &stream->ostr_final; |
1834 | | |
1835 | 6.51k | stream->quic->data = stream; |
1836 | 6.51k | stream->quic->callbacks = &callbacks; |
1837 | | |
1838 | 6.51k | ++*get_state_counter(get_conn(stream), stream->state); |
1839 | 6.51k | h2o_conn_set_state(&get_conn(stream)->super, H2O_CONN_STATE_ACTIVE); |
1840 | | |
1841 | 6.51k | return 0; |
1842 | 26.0k | } |
1843 | | |
1844 | | static quicly_stream_open_t on_stream_open = {stream_open_cb}; |
1845 | | |
1846 | | static void unblock_conn_blocked_streams(struct st_h2o_http3_server_conn_t *conn) |
1847 | 0 | { |
1848 | 0 | conn->scheduler.uni.active |= conn->scheduler.uni.conn_blocked; |
1849 | 0 | conn->scheduler.uni.conn_blocked = 0; |
1850 | 0 | req_scheduler_unblock_conn_blocked(&conn->scheduler.reqs, req_scheduler_compare_stream_id); |
1851 | 0 | } |
1852 | | |
1853 | | static int scheduler_can_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, int conn_is_saturated) |
1854 | 0 | { |
1855 | 0 | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc)); |
1856 | |
|
1857 | 0 | if (!conn_is_saturated) { |
1858 | | /* not saturated, activate streams marked as being conn-blocked */ |
1859 | 0 | unblock_conn_blocked_streams(conn); |
1860 | 0 | } else { |
1861 | | /* TODO lazily move the active request and unidirectional streams to conn_blocked. Not doing so results in at most one |
1862 | | * spurious call to quicly_send. */ |
1863 | 0 | } |
1864 | |
|
1865 | 0 | if (conn->scheduler.uni.active != 0) |
1866 | 0 | return 1; |
1867 | 0 | if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS) |
1868 | 0 | return 1; |
1869 | | |
1870 | 0 | return 0; |
1871 | 0 | } |
1872 | | |
1873 | | static int scheduler_do_send(quicly_stream_scheduler_t *sched, quicly_conn_t *qc, quicly_send_context_t *s) |
1874 | 18.5k | { |
1875 | 18.5k | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qc)); |
1876 | 18.5k | int ret = 0; |
1877 | | |
1878 | 40.0k | while (quicly_can_send_data(conn->h3.super.quic, s)) { |
1879 | | /* The strategy is: |
1880 | | * |
1881 | | * 1. dequeue the first active stream |
1882 | | * 2. link the stream to the conn_blocked list, if nothing can be sent for the stream due to the connection being capped |
1883 | | * 3. otherwise, send |
1884 | | * 4. enqueue to the appropriate place |
1885 | | */ |
1886 | 40.0k | if (conn->scheduler.uni.active != 0) { |
1887 | 19.5k | static const ptrdiff_t stream_offsets[] = { |
1888 | 19.5k | offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.control), |
1889 | 19.5k | offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_encoder), |
1890 | 19.5k | offsetof(struct st_h2o_http3_server_conn_t, h3._control_streams.egress.qpack_decoder)}; |
1891 | | /* 1. obtain pointer to the offending stream */ |
1892 | 19.5k | struct st_h2o_http3_egress_unistream_t *stream = NULL; |
1893 | 19.5k | size_t i; |
1894 | 39.1k | for (i = 0; i != sizeof(stream_offsets) / sizeof(stream_offsets[0]); ++i) { |
1895 | 39.1k | stream = *(void **)((char *)conn + stream_offsets[i]); |
1896 | 39.1k | if ((conn->scheduler.uni.active & (1 << stream->quic->stream_id)) != 0) |
1897 | 19.5k | break; |
1898 | 39.1k | } |
1899 | 19.5k | assert(i != sizeof(stream_offsets) / sizeof(stream_offsets[0]) && "we should have found one stream"); |
1900 | | /* 2. move to the conn_blocked list if necessary */ |
1901 | 19.5k | if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { |
1902 | 0 | conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id); |
1903 | 0 | conn->scheduler.uni.conn_blocked |= 1 << stream->quic->stream_id; |
1904 | 0 | continue; |
1905 | 0 | } |
1906 | | /* 3. send */ |
1907 | 19.5k | if ((ret = quicly_send_stream(stream->quic, s)) != 0) |
1908 | 0 | goto Exit; |
1909 | | /* 4. update scheduler state */ |
1910 | 19.5k | conn->scheduler.uni.active &= ~(1 << stream->quic->stream_id); |
1911 | 19.5k | if (quicly_stream_can_send(stream->quic, 1)) { |
1912 | 0 | uint16_t *slot = &conn->scheduler.uni.active; |
1913 | 0 | if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) |
1914 | 0 | slot = &conn->scheduler.uni.conn_blocked; |
1915 | 0 | *slot |= 1 << stream->quic->stream_id; |
1916 | 0 | } |
1917 | 20.5k | } else if (conn->scheduler.reqs.active.smallest_urgency < H2O_ABSPRIO_NUM_URGENCY_LEVELS) { |
1918 | | /* 1. obtain pointer to the offending stream */ |
1919 | 1.95k | h2o_linklist_t *anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].high; |
1920 | 1.95k | if (h2o_linklist_is_empty(anchor)) { |
1921 | 2 | anchor = &conn->scheduler.reqs.active.urgencies[conn->scheduler.reqs.active.smallest_urgency].low; |
1922 | 2 | assert(!h2o_linklist_is_empty(anchor)); |
1923 | 2 | } |
1924 | 0 | struct st_h2o_http3_server_stream_t *stream = |
1925 | 1.95k | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_stream_t, scheduler.link, anchor->next); |
1926 | | /* 1. link to the conn_blocked list if necessary */ |
1927 | 1.95k | if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { |
1928 | 0 | req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); |
1929 | 0 | continue; |
1930 | 0 | } |
1931 | | /* 3. send */ |
1932 | 1.95k | if ((ret = quicly_send_stream(stream->quic, s)) != 0) |
1933 | 0 | goto Exit; |
1934 | 1.95k | ++stream->scheduler.call_cnt; |
1935 | 1.95k | if (stream->quic->sendstate.size_inflight == stream->quic->sendstate.final_size && |
1936 | 1.95k | h2o_timeval_is_null(&stream->req.timestamps.response_end_at)) |
1937 | 1.43k | stream->req.timestamps.response_end_at = h2o_gettimeofday(stream->req.conn->ctx->loop); |
1938 | | /* 4. invoke h2o_proceed_request synchronously, so that we could obtain additional data for the current (i.e. highest) |
1939 | | * stream. */ |
1940 | 1.95k | if (stream->proceed_while_sending) { |
1941 | 520 | assert(stream->proceed_requested); |
1942 | 0 | h2o_proceed_response(&stream->req); |
1943 | 520 | stream->proceed_while_sending = 0; |
1944 | 520 | } |
1945 | | /* 5. prepare for next */ |
1946 | 1.95k | if (quicly_stream_can_send(stream->quic, 1)) { |
1947 | 520 | if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(stream->quic, 0)) { |
1948 | | /* capped by connection-level flow control, move the stream to conn-blocked */ |
1949 | 0 | req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); |
1950 | 520 | } else { |
1951 | | /* schedule for next emission */ |
1952 | 520 | req_scheduler_setup_for_next(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); |
1953 | 520 | } |
1954 | 1.43k | } else { |
1955 | | /* nothing to send at this moment */ |
1956 | 1.43k | req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); |
1957 | 1.43k | } |
1958 | 18.5k | } else { |
1959 | 18.5k | break; |
1960 | 18.5k | } |
1961 | 40.0k | } |
1962 | | |
1963 | 18.5k | Exit: |
1964 | 18.5k | return ret; |
1965 | 18.5k | } |
1966 | | |
1967 | | static int scheduler_update_state(struct st_quicly_stream_scheduler_t *sched, quicly_stream_t *qs) |
1968 | 22.9k | { |
1969 | 22.9k | struct st_h2o_http3_server_conn_t *conn = |
1970 | 22.9k | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(qs->conn)); |
1971 | 22.9k | enum { DEACTIVATE, ACTIVATE, CONN_BLOCKED } new_state; |
1972 | | |
1973 | 22.9k | if (quicly_stream_can_send(qs, 1)) { |
1974 | 21.9k | if (quicly_is_blocked(conn->h3.super.quic) && !quicly_stream_can_send(qs, 0)) { |
1975 | 0 | new_state = CONN_BLOCKED; |
1976 | 21.9k | } else { |
1977 | 21.9k | new_state = ACTIVATE; |
1978 | 21.9k | } |
1979 | 21.9k | } else { |
1980 | 1.00k | new_state = DEACTIVATE; |
1981 | 1.00k | } |
1982 | | |
1983 | 22.9k | if (quicly_stream_is_unidirectional(qs->stream_id)) { |
1984 | 19.5k | assert(qs->stream_id < sizeof(uint16_t) * 8); |
1985 | 0 | uint16_t mask = (uint16_t)1 << qs->stream_id; |
1986 | 19.5k | switch (new_state) { |
1987 | 0 | case DEACTIVATE: |
1988 | 0 | conn->scheduler.uni.active &= ~mask; |
1989 | 0 | conn->scheduler.uni.conn_blocked &= ~mask; |
1990 | 0 | break; |
1991 | 19.5k | case ACTIVATE: |
1992 | 19.5k | conn->scheduler.uni.active |= mask; |
1993 | 19.5k | conn->scheduler.uni.conn_blocked &= ~mask; |
1994 | 19.5k | break; |
1995 | 0 | case CONN_BLOCKED: |
1996 | 0 | conn->scheduler.uni.active &= ~mask; |
1997 | 0 | conn->scheduler.uni.conn_blocked |= mask; |
1998 | 0 | break; |
1999 | 19.5k | } |
2000 | 19.5k | } else { |
2001 | 3.38k | struct st_h2o_http3_server_stream_t *stream = qs->data; |
2002 | 3.38k | if (stream->proceed_while_sending) |
2003 | 520 | return 0; |
2004 | 2.86k | switch (new_state) { |
2005 | 1.00k | case DEACTIVATE: |
2006 | 1.00k | req_scheduler_deactivate(&conn->scheduler.reqs, &stream->scheduler); |
2007 | 1.00k | break; |
2008 | 1.85k | case ACTIVATE: |
2009 | 1.85k | req_scheduler_activate(&conn->scheduler.reqs, &stream->scheduler, req_scheduler_compare_stream_id); |
2010 | 1.85k | break; |
2011 | 0 | case CONN_BLOCKED: |
2012 | 0 | req_scheduler_conn_blocked(&conn->scheduler.reqs, &stream->scheduler); |
2013 | 0 | break; |
2014 | 2.86k | } |
2015 | 2.86k | } |
2016 | | |
2017 | 22.4k | return 0; |
2018 | 22.9k | } |
2019 | | |
2020 | | static quicly_stream_scheduler_t scheduler = {scheduler_can_send, scheduler_do_send, scheduler_update_state}; |
2021 | | |
2022 | | static void datagram_frame_receive_cb(quicly_receive_datagram_frame_t *self, quicly_conn_t *quic, ptls_iovec_t datagram) |
2023 | 0 | { |
2024 | 0 | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(quic)); |
2025 | 0 | uint64_t flow_id; |
2026 | 0 | h2o_iovec_t payload; |
2027 | | |
2028 | | /* decode */ |
2029 | 0 | if ((flow_id = h2o_http3_decode_h3_datagram(&payload, datagram.base, datagram.len)) == UINT64_MAX) { |
2030 | 0 | h2o_quic_close_connection(&conn->h3.super, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, "invalid DATAGRAM frame"); |
2031 | 0 | return; |
2032 | 0 | } |
2033 | | |
2034 | | /* find stream */ |
2035 | 0 | khiter_t iter = kh_get(stream, conn->datagram_flows, flow_id); |
2036 | 0 | if (iter == kh_end(conn->datagram_flows)) |
2037 | 0 | return; |
2038 | 0 | struct st_h2o_http3_server_stream_t *stream = kh_val(conn->datagram_flows, iter); |
2039 | 0 | assert(stream->req.forward_datagram.write_ != NULL); |
2040 | | |
2041 | | /* forward */ |
2042 | 0 | stream->req.forward_datagram.write_(&stream->req, &payload, 1); |
2043 | 0 | } |
2044 | | |
2045 | | static quicly_receive_datagram_frame_t on_receive_datagram_frame = {datagram_frame_receive_cb}; |
2046 | | |
2047 | | static void on_h3_destroy(h2o_quic_conn_t *h3_) |
2048 | 6.51k | { |
2049 | 6.51k | h2o_http3_conn_t *h3 = (h2o_http3_conn_t *)h3_; |
2050 | 6.51k | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, h3); |
2051 | 6.51k | quicly_stats_t stats; |
2052 | | |
2053 | 6.51k | H2O_PROBE_CONN0(H3S_DESTROY, &conn->super); |
2054 | 6.51k | H2O_LOG_CONN(h3s_destroy, &conn->super, {}); |
2055 | | |
2056 | 6.51k | if (quicly_get_stats(h3_->quic, &stats) == 0) { |
2057 | 410k | #define ACC(fld, _unused) conn->super.ctx->quic_stats.quicly.fld += stats.fld; |
2058 | 410k | H2O_QUIC_AGGREGATED_STATS_APPLY(ACC); |
2059 | 6.51k | #undef ACC |
2060 | 6.51k | if (conn->super.ctx->quic_stats.num_sentmap_packets_largest < stats.num_sentmap_packets_largest) |
2061 | 0 | conn->super.ctx->quic_stats.num_sentmap_packets_largest = stats.num_sentmap_packets_largest; |
2062 | 6.51k | } |
2063 | | |
2064 | | /* unlink and dispose */ |
2065 | 6.51k | if (h2o_timer_is_linked(&conn->timeout)) |
2066 | 0 | h2o_timer_unlink(&conn->timeout); |
2067 | 6.51k | if (h2o_timer_is_linked(&conn->_graceful_shutdown_timeout)) |
2068 | 0 | h2o_timer_unlink(&conn->_graceful_shutdown_timeout); |
2069 | 6.51k | h2o_http3_dispose_conn(&conn->h3); |
2070 | 6.51k | kh_destroy(stream, conn->datagram_flows); |
2071 | | |
2072 | | /* check consistency post-disposal */ |
2073 | 6.51k | assert(conn->num_streams.recv_headers == 0); |
2074 | 0 | assert(conn->num_streams.req_pending == 0); |
2075 | 0 | assert(conn->num_streams.send_headers == 0); |
2076 | 0 | assert(conn->num_streams.send_body == 0); |
2077 | 0 | assert(conn->num_streams.close_wait == 0); |
2078 | 0 | assert(conn->num_streams_req_streaming == 0); |
2079 | 0 | assert(conn->num_streams_tunnelling == 0); |
2080 | 0 | assert(h2o_linklist_is_empty(&conn->delayed_streams.recv_body_blocked)); |
2081 | 0 | assert(h2o_linklist_is_empty(&conn->delayed_streams.req_streaming)); |
2082 | 0 | assert(h2o_linklist_is_empty(&conn->delayed_streams.pending)); |
2083 | 0 | assert(h2o_linklist_is_empty(&conn->streams_resp_settings_blocked)); |
2084 | 0 | assert(conn->scheduler.reqs.active.smallest_urgency >= H2O_ABSPRIO_NUM_URGENCY_LEVELS); |
2085 | 0 | assert(h2o_linklist_is_empty(&conn->scheduler.reqs.conn_blocked)); |
2086 | | |
2087 | | /* free memory */ |
2088 | 0 | h2o_destroy_connection(&conn->super); |
2089 | 6.51k | } |
2090 | | |
2091 | | void h2o_http3_server_init_context(h2o_context_t *h2o, h2o_quic_ctx_t *ctx, h2o_loop_t *loop, h2o_socket_t *sock, |
2092 | | quicly_context_t *quic, h2o_quic_accept_cb acceptor, |
2093 | | h2o_quic_notify_connection_update_cb notify_conn_update, uint8_t use_gso) |
2094 | 0 | { |
2095 | 0 | return h2o_quic_init_context(ctx, loop, sock, quic, acceptor, notify_conn_update, use_gso, &h2o->quic_stats); |
2096 | 0 | } |
2097 | | |
2098 | | h2o_http3_conn_t *h2o_http3_server_accept(h2o_http3_server_ctx_t *ctx, quicly_address_t *destaddr, quicly_address_t *srcaddr, |
2099 | | quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token, |
2100 | | int skip_tracing, const h2o_http3_conn_callbacks_t *h3_callbacks) |
2101 | 6.51k | { |
2102 | 6.51k | static const h2o_conn_callbacks_t conn_callbacks = { |
2103 | 6.51k | .get_sockname = get_sockname, |
2104 | 6.51k | .get_peername = get_peername, |
2105 | 6.51k | .get_ptls = get_ptls, |
2106 | 6.51k | .skip_tracing = get_skip_tracing, |
2107 | 6.51k | .get_req_id = get_req_id, |
2108 | 6.51k | .close_idle_connection = close_idle_connection, |
2109 | 6.51k | .foreach_request = foreach_request, |
2110 | 6.51k | .request_shutdown = initiate_graceful_shutdown, |
2111 | 6.51k | .num_reqs_inflight = num_reqs_inflight, |
2112 | 6.51k | .get_tracer = get_tracer, |
2113 | 6.51k | .log_ = {{ |
2114 | 6.51k | .extensible_priorities = log_extensible_priorities, |
2115 | 6.51k | .transport = |
2116 | 6.51k | { |
2117 | 6.51k | .cc_name = log_cc_name, |
2118 | 6.51k | .delivery_rate = log_delivery_rate, |
2119 | 6.51k | }, |
2120 | 6.51k | .ssl = |
2121 | 6.51k | { |
2122 | 6.51k | .protocol_version = log_tls_protocol_version, |
2123 | 6.51k | .session_reused = log_session_reused, |
2124 | 6.51k | .cipher = log_cipher, |
2125 | 6.51k | .cipher_bits = log_cipher_bits, |
2126 | 6.51k | .session_id = log_session_id, |
2127 | 6.51k | .server_name = log_server_name, |
2128 | 6.51k | .negotiated_protocol = log_negotiated_protocol, |
2129 | 6.51k | .ech_config_id = log_ech_config_id, |
2130 | 6.51k | .ech_kem = log_ech_kem, |
2131 | 6.51k | .ech_cipher = log_ech_cipher, |
2132 | 6.51k | .ech_cipher_bits = log_ech_cipher_bits, |
2133 | 6.51k | }, |
2134 | 6.51k | .http3 = |
2135 | 6.51k | { |
2136 | 6.51k | .stream_id = log_stream_id, |
2137 | 6.51k | .quic_stats = log_quic_stats, |
2138 | 6.51k | .quic_version = log_quic_version, |
2139 | 6.51k | }, |
2140 | 6.51k | }}, |
2141 | 6.51k | }; |
2142 | | |
2143 | | /* setup the structure */ |
2144 | 6.51k | struct st_h2o_http3_server_conn_t *conn = (void *)h2o_create_connection( |
2145 | 6.51k | sizeof(*conn), ctx->accept_ctx->ctx, ctx->accept_ctx->hosts, h2o_gettimeofday(ctx->accept_ctx->ctx->loop), &conn_callbacks); |
2146 | 6.51k | memset((char *)conn + sizeof(conn->super), 0, sizeof(*conn) - sizeof(conn->super)); |
2147 | | |
2148 | 6.51k | h2o_http3_init_conn(&conn->h3, &ctx->super, h3_callbacks, &ctx->qpack, H2O_MAX_REQLEN); |
2149 | 6.51k | conn->handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}}; |
2150 | 6.51k | h2o_linklist_init_anchor(&conn->delayed_streams.recv_body_blocked); |
2151 | 6.51k | h2o_linklist_init_anchor(&conn->delayed_streams.req_streaming); |
2152 | 6.51k | h2o_linklist_init_anchor(&conn->delayed_streams.pending); |
2153 | 6.51k | h2o_linklist_init_anchor(&conn->streams_resp_settings_blocked); |
2154 | 6.51k | h2o_timer_init(&conn->timeout, run_delayed); |
2155 | 6.51k | memset(&conn->num_streams, 0, sizeof(conn->num_streams)); |
2156 | 6.51k | conn->num_streams_req_streaming = 0; |
2157 | 6.51k | conn->num_streams_tunnelling = 0; |
2158 | 6.51k | req_scheduler_init(&conn->scheduler.reqs); |
2159 | 6.51k | conn->scheduler.uni.active = 0; |
2160 | 6.51k | conn->scheduler.uni.conn_blocked = 0; |
2161 | 6.51k | conn->datagram_flows = kh_init(stream); |
2162 | | |
2163 | | /* accept connection */ |
2164 | | #if PICOTLS_USE_DTRACE |
2165 | | unsigned orig_skip_tracing = ptls_default_skip_tracing; |
2166 | | ptls_default_skip_tracing = skip_tracing; |
2167 | | #endif |
2168 | 6.51k | quicly_conn_t *qconn; |
2169 | 6.51k | int accept_ret = quicly_accept( |
2170 | 6.51k | &qconn, ctx->super.quic, &destaddr->sa, &srcaddr->sa, packet, address_token, &ctx->super.next_cid, |
2171 | 6.51k | &conn->handshake_properties, |
2172 | 6.51k | &conn->h3 /* back pointer is set up here so that callbacks being called while parsing ClientHello can refer to `conn` */); |
2173 | | #if PICOTLS_USE_DTRACE |
2174 | | ptls_default_skip_tracing = orig_skip_tracing; |
2175 | | #endif |
2176 | 6.51k | if (accept_ret != 0) { |
2177 | 0 | h2o_http3_conn_t *ret = NULL; |
2178 | 0 | if (accept_ret == QUICLY_ERROR_DECRYPTION_FAILED) |
2179 | 0 | ret = (h2o_http3_conn_t *)&h2o_quic_accept_conn_decryption_failed; |
2180 | 0 | h2o_http3_dispose_conn(&conn->h3); |
2181 | 0 | kh_destroy(stream, conn->datagram_flows); |
2182 | 0 | h2o_destroy_connection(&conn->super); |
2183 | 0 | return ret; |
2184 | 0 | } |
2185 | 6.51k | if (ctx->super.quic_stats != NULL) { |
2186 | 0 | ++ctx->super.quic_stats->packet_processed; |
2187 | 0 | } |
2188 | 6.51k | ++ctx->super.next_cid.master_id; /* FIXME check overlap */ |
2189 | 6.51k | h2o_http3_setup(&conn->h3, qconn); |
2190 | | |
2191 | 6.51k | H2O_PROBE_CONN(H3S_ACCEPT, &conn->super, &conn->super, conn->h3.super.quic, h2o_conn_get_uuid(&conn->super)); |
2192 | 6.51k | H2O_LOG_CONN(h3s_accept, &conn->super, { |
2193 | 6.51k | PTLS_LOG_ELEMENT_PTR(conn, &conn->super); |
2194 | 6.51k | PTLS_LOG_ELEMENT_PTR(quic, conn->h3.super.quic); |
2195 | 6.51k | PTLS_LOG_ELEMENT_SAFESTR(conn_uuid, h2o_conn_get_uuid(&conn->super)); |
2196 | 6.51k | }); |
2197 | | |
2198 | 6.51k | if (!h2o_quic_send(&conn->h3.super)) { |
2199 | | /* When `h2o_quic_send` fails, it destroys the connection object. */ |
2200 | 0 | return &h2o_http3_accept_conn_closed; |
2201 | 0 | } |
2202 | | |
2203 | 6.51k | return &conn->h3; |
2204 | 6.51k | } |
2205 | | |
2206 | | void h2o_http3_server_amend_quicly_context(h2o_globalconf_t *conf, quicly_context_t *quic) |
2207 | 1 | { |
2208 | 1 | quic->transport_params.max_data = |
2209 | 1 | conf->http3.active_stream_window_size; /* set to a size that does not block the unblocked request stream */ |
2210 | 1 | quic->transport_params.max_streams_uni = 10; |
2211 | 1 | quic->transport_params.max_stream_data.bidi_remote = h2o_http3_calc_min_flow_control_size(H2O_MAX_REQLEN); |
2212 | 1 | quic->transport_params.max_stream_data.uni = h2o_http3_calc_min_flow_control_size(H2O_MAX_REQLEN); |
2213 | 1 | quic->transport_params.max_idle_timeout = conf->http3.idle_timeout; |
2214 | 1 | quic->transport_params.min_ack_delay_usec = conf->http3.allow_delayed_ack ? 0 : UINT64_MAX; |
2215 | 1 | quic->ack_frequency = conf->http3.ack_frequency; |
2216 | 1 | quic->transport_params.max_datagram_frame_size = 1500; /* accept DATAGRAM frames; let the sender determine MTU, instead of being |
2217 | | * potentially too restrictive */ |
2218 | 1 | quic->stream_open = &on_stream_open; |
2219 | 1 | quic->stream_scheduler = &scheduler; |
2220 | 1 | quic->receive_datagram_frame = &on_receive_datagram_frame; |
2221 | | |
2222 | 4 | for (size_t i = 0; quic->tls->cipher_suites[i] != NULL; ++i) |
2223 | 3 | assert(quic->tls->cipher_suites[i]->aead->ctr_cipher != NULL && |
2224 | 1 | "for header protection, QUIC ciphers MUST provide CTR mode"); |
2225 | 1 | } |
2226 | | |
2227 | | h2o_conn_t *h2o_http3_get_connection(quicly_conn_t *quic) |
2228 | 0 | { |
2229 | 0 | struct st_h2o_http3_server_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, h3, *quicly_get_data(quic)); |
2230 | | |
2231 | | /* this assertion is most likely to fire if the provided QUIC connection does not represent a server-side HTTP connection */ |
2232 | 0 | assert(conn->h3.super.quic == NULL || conn->h3.super.quic == quic); |
2233 | | |
2234 | 0 | return &conn->super; |
2235 | 0 | } |
2236 | | |
2237 | | static void graceful_shutdown_close_straggler(h2o_timer_t *entry) |
2238 | 0 | { |
2239 | 0 | struct st_h2o_http3_server_conn_t *conn = |
2240 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _graceful_shutdown_timeout, entry); |
2241 | | |
2242 | | /* We've sent two GOAWAY frames, close the remaining connections */ |
2243 | 0 | h2o_quic_close_connection(&conn->h3.super, 0, "shutting down"); |
2244 | |
|
2245 | 0 | conn->_graceful_shutdown_timeout.cb = NULL; |
2246 | 0 | } |
2247 | | |
2248 | | static void graceful_shutdown_resend_goaway(h2o_timer_t *entry) |
2249 | 0 | { |
2250 | 0 | struct st_h2o_http3_server_conn_t *conn = |
2251 | 0 | H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3_server_conn_t, _graceful_shutdown_timeout, entry); |
2252 | | |
2253 | | /* HTTP/3 draft section 5.2.8 -- |
2254 | | * "After allowing time for any in-flight requests or pushes to arrive, the endpoint can send another GOAWAY frame |
2255 | | * indicating which requests or pushes it might accept before the end of the connection. |
2256 | | * This ensures that a connection can be cleanly shut down without losing requests. */ |
2257 | |
|
2258 | 0 | if (conn->h3.state < H2O_HTTP3_CONN_STATE_HALF_CLOSED && quicly_get_state(conn->h3.super.quic) == QUICLY_STATE_CONNECTED) { |
2259 | 0 | quicly_stream_id_t next_stream_id = quicly_get_remote_next_stream_id(conn->h3.super.quic, 0 /* == bidi */); |
2260 | | /* Section 5.2-1: "This identifier MAY be zero if no requests or pushes were processed."" */ |
2261 | 0 | quicly_stream_id_t max_stream_id = next_stream_id < 4 ? 0 /* we haven't received any stream yet */ : next_stream_id - 4; |
2262 | 0 | h2o_http3_send_goaway_frame(&conn->h3, max_stream_id); |
2263 | 0 | conn->h3.state = H2O_HTTP3_CONN_STATE_HALF_CLOSED; |
2264 | | /* After waiting a second, we still have an active connection. If configured, wait one |
2265 | | * final timeout before closing the connection */ |
2266 | 0 | if (conn->super.ctx->globalconf->http3.graceful_shutdown_timeout > 0) { |
2267 | 0 | conn->_graceful_shutdown_timeout.cb = graceful_shutdown_close_straggler; |
2268 | 0 | h2o_timer_link(conn->super.ctx->loop, conn->super.ctx->globalconf->http3.graceful_shutdown_timeout, |
2269 | 0 | &conn->_graceful_shutdown_timeout); |
2270 | 0 | } else { |
2271 | 0 | conn->_graceful_shutdown_timeout.cb = NULL; |
2272 | 0 | } |
2273 | 0 | } |
2274 | 0 | } |
2275 | | |
2276 | | static void close_idle_connection(h2o_conn_t *_conn) |
2277 | 0 | { |
2278 | 0 | initiate_graceful_shutdown(_conn); |
2279 | 0 | } |
2280 | | |
2281 | | static void initiate_graceful_shutdown(h2o_conn_t *_conn) |
2282 | 0 | { |
2283 | 0 | h2o_conn_set_state(_conn, H2O_CONN_STATE_SHUTDOWN); |
2284 | |
|
2285 | 0 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
2286 | 0 | assert(conn->_graceful_shutdown_timeout.cb == NULL); |
2287 | 0 | conn->_graceful_shutdown_timeout.cb = graceful_shutdown_resend_goaway; |
2288 | |
|
2289 | 0 | h2o_http3_send_shutdown_goaway_frame(&conn->h3); |
2290 | |
|
2291 | 0 | h2o_timer_link(conn->super.ctx->loop, 1000, &conn->_graceful_shutdown_timeout); |
2292 | 0 | } |
2293 | | |
2294 | | struct foreach_request_ctx { |
2295 | | int (*cb)(h2o_req_t *req, void *cbdata); |
2296 | | void *cbdata; |
2297 | | }; |
2298 | | |
2299 | | static int foreach_request_per_conn(void *_ctx, quicly_stream_t *qs) |
2300 | 0 | { |
2301 | 0 | struct foreach_request_ctx *ctx = _ctx; |
2302 | | |
2303 | | /* skip if the stream is not a request stream (TODO handle push?) */ |
2304 | 0 | if (!(quicly_stream_is_client_initiated(qs->stream_id) && !quicly_stream_is_unidirectional(qs->stream_id))) |
2305 | 0 | return 0; |
2306 | | |
2307 | 0 | struct st_h2o_http3_server_stream_t *stream = qs->data; |
2308 | 0 | assert(stream->quic == qs); |
2309 | | |
2310 | 0 | if (stream->state == H2O_HTTP3_SERVER_STREAM_STATE_CLOSE_WAIT) |
2311 | 0 | return 0; |
2312 | 0 | return ctx->cb(&stream->req, ctx->cbdata); |
2313 | 0 | } |
2314 | | |
2315 | | static int foreach_request(h2o_conn_t *_conn, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata) |
2316 | 0 | { |
2317 | 0 | struct foreach_request_ctx foreach_ctx = {.cb = cb, .cbdata = cbdata}; |
2318 | |
|
2319 | 0 | struct st_h2o_http3_server_conn_t *conn = (void *)_conn; |
2320 | 0 | quicly_foreach_stream(conn->h3.super.quic, &foreach_ctx, foreach_request_per_conn); |
2321 | 0 | return 0; |
2322 | 0 | } |
2323 | | |
2324 | | const h2o_http3_conn_callbacks_t H2O_HTTP3_CONN_CALLBACKS = {{on_h3_destroy}, handle_control_stream_frame}; |