/src/h2o/deps/quicly/lib/defaults.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017-2019 Fastly, Kazuho Oku |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <sys/time.h> |
23 | | #include "quicly/defaults.h" |
24 | | |
25 | | #define DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE 1280 |
26 | | #define DEFAULT_MAX_UDP_PAYLOAD_SIZE 1472 |
27 | | #define DEFAULT_MAX_PACKETS_PER_KEY 16777216 |
28 | | #define DEFAULT_MAX_CRYPTO_BYTES 65536 |
29 | | #define DEFAULT_INITCWND_PACKETS 10 |
30 | | #define DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT 3 |
31 | | #define DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER 400 |
32 | | #define DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS 1000 |
33 | | #define DEFAULT_MAX_PROBE_PACKETS 5 |
34 | | #define DEFAULT_MAX_PATH_VALIDATION_FAILURES 100 |
35 | | |
36 | | /* profile that employs IETF specified values */ |
37 | | const quicly_context_t quicly_spec_context = {NULL, /* tls */ |
38 | | DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE, /* client_initial_size */ |
39 | | QUICLY_LOSS_SPEC_CONF, /* loss */ |
40 | | {{1 * 1024 * 1024, 1 * 1024 * 1024, 1 * 1024 * 1024}, /* max_stream_data */ |
41 | | 16 * 1024 * 1024, /* max_data */ |
42 | | 30 * 1000, /* idle_timeout (30 seconds) */ |
43 | | 100, /* max_concurrent_streams_bidi */ |
44 | | 0, /* max_concurrent_streams_uni */ |
45 | | DEFAULT_MAX_UDP_PAYLOAD_SIZE}, |
46 | | DEFAULT_MAX_PACKETS_PER_KEY, |
47 | | DEFAULT_MAX_CRYPTO_BYTES, |
48 | | DEFAULT_INITCWND_PACKETS, |
49 | | QUICLY_PROTOCOL_VERSION_1, |
50 | | DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT, |
51 | | 0, /* ack_frequency */ |
52 | | DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER, |
53 | | DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS, |
54 | | DEFAULT_MAX_PROBE_PACKETS, |
55 | | DEFAULT_MAX_PATH_VALIDATION_FAILURES, |
56 | | 0, /* default_jumpstart_cwnd_bytes */ |
57 | | 0, /* max_jumpstart_cwnd_bytes */ |
58 | | 0, /* enlarge_client_hello */ |
59 | | 1, /* enable_ecn */ |
60 | | 0, /* use_pacing */ |
61 | | 1, /* cc_recognize_app_limited */ |
62 | | NULL, |
63 | | NULL, /* on_stream_open */ |
64 | | &quicly_default_stream_scheduler, |
65 | | NULL, /* receive_datagram_frame */ |
66 | | NULL, /* on_conn_close */ |
67 | | &quicly_default_now, |
68 | | NULL, |
69 | | NULL, |
70 | | &quicly_default_crypto_engine, |
71 | | &quicly_default_init_cc}; |
72 | | |
73 | | /* profile with a focus on reducing latency for the HTTP use case */ |
74 | | const quicly_context_t quicly_performant_context = {NULL, /* tls */ |
75 | | DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE, /* client_initial_size */ |
76 | | QUICLY_LOSS_PERFORMANT_CONF, /* loss */ |
77 | | {{1 * 1024 * 1024, 1 * 1024 * 1024, 1 * 1024 * 1024}, /* max_stream_data */ |
78 | | 16 * 1024 * 1024, /* max_data */ |
79 | | 30 * 1000, /* idle_timeout (30 seconds) */ |
80 | | 100, /* max_concurrent_streams_bidi */ |
81 | | 0, /* max_concurrent_streams_uni */ |
82 | | DEFAULT_MAX_UDP_PAYLOAD_SIZE}, |
83 | | DEFAULT_MAX_PACKETS_PER_KEY, |
84 | | DEFAULT_MAX_CRYPTO_BYTES, |
85 | | DEFAULT_INITCWND_PACKETS, |
86 | | QUICLY_PROTOCOL_VERSION_1, |
87 | | DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT, |
88 | | 0, /* ack_frequency */ |
89 | | DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER, |
90 | | DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS, |
91 | | DEFAULT_MAX_PROBE_PACKETS, |
92 | | DEFAULT_MAX_PATH_VALIDATION_FAILURES, |
93 | | 0, /* default_jumpstart_cwnd_bytes */ |
94 | | 0, /* max_jumpstart_cwnd_bytes */ |
95 | | 0, /* enlarge_client_hello */ |
96 | | 1, /* enable_ecn */ |
97 | | 0, /* use_pacing */ |
98 | | 1, /* cc_recognize_app_limited */ |
99 | | NULL, |
100 | | NULL, /* on_stream_open */ |
101 | | &quicly_default_stream_scheduler, |
102 | | NULL, /* receive_datagram_frame */ |
103 | | NULL, /* on_conn_close */ |
104 | | &quicly_default_now, |
105 | | NULL, |
106 | | NULL, |
107 | | &quicly_default_crypto_engine, |
108 | | &quicly_default_init_cc}; |
109 | | |
110 | | /** |
111 | | * The context of the default CID encryptor. All the contexts being used here are ECB ciphers and therefore stateless - they can be |
112 | | * used concurrently from multiple threads. |
113 | | */ |
114 | | struct st_quicly_default_encrypt_cid_t { |
115 | | quicly_cid_encryptor_t super; |
116 | | ptls_cipher_context_t *cid_encrypt_ctx, *cid_decrypt_ctx, *reset_token_ctx; |
117 | | }; |
118 | | |
119 | | static void generate_reset_token(struct st_quicly_default_encrypt_cid_t *self, void *token, const void *cid) |
120 | 0 | { |
121 | 0 | uint8_t expandbuf[QUICLY_STATELESS_RESET_TOKEN_LEN]; |
122 | |
|
123 | 0 | assert(self->reset_token_ctx->algo->block_size == QUICLY_STATELESS_RESET_TOKEN_LEN); |
124 | | |
125 | | /* expand the input to full size, if CID is shorter than the size of the reset token */ |
126 | 0 | if (self->cid_encrypt_ctx->algo->block_size != QUICLY_STATELESS_RESET_TOKEN_LEN) { |
127 | 0 | assert(self->cid_encrypt_ctx->algo->block_size < QUICLY_STATELESS_RESET_TOKEN_LEN); |
128 | 0 | memset(expandbuf, 0, sizeof(expandbuf)); |
129 | 0 | memcpy(expandbuf, cid, self->cid_encrypt_ctx->algo->block_size); |
130 | 0 | cid = expandbuf; |
131 | 0 | } |
132 | | |
133 | | /* transform */ |
134 | 0 | ptls_cipher_encrypt(self->reset_token_ctx, token, cid, QUICLY_STATELESS_RESET_TOKEN_LEN); |
135 | 0 | } |
136 | | |
137 | | static void default_encrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_t *encrypted, void *reset_token, |
138 | | const quicly_cid_plaintext_t *plaintext) |
139 | 0 | { |
140 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
141 | 0 | uint8_t buf[16], *p; |
142 | | |
143 | | /* encode */ |
144 | 0 | p = buf; |
145 | 0 | switch (self->cid_encrypt_ctx->algo->block_size) { |
146 | 0 | case 8: |
147 | 0 | break; |
148 | 0 | case 16: |
149 | 0 | p = quicly_encode64(p, plaintext->node_id); |
150 | 0 | break; |
151 | 0 | default: |
152 | 0 | assert(!"unexpected block size"); |
153 | 0 | break; |
154 | 0 | } |
155 | 0 | p = quicly_encode32(p, plaintext->master_id); |
156 | 0 | p = quicly_encode32(p, (plaintext->thread_id << 8) | plaintext->path_id); |
157 | 0 | assert(p - buf == self->cid_encrypt_ctx->algo->block_size); |
158 | | |
159 | | /* generate CID */ |
160 | 0 | ptls_cipher_encrypt(self->cid_encrypt_ctx, encrypted->cid, buf, self->cid_encrypt_ctx->algo->block_size); |
161 | 0 | encrypted->len = self->cid_encrypt_ctx->algo->block_size; |
162 | | |
163 | | /* generate stateless reset token if requested */ |
164 | 0 | if (reset_token != NULL) |
165 | 0 | generate_reset_token(self, reset_token, encrypted->cid); |
166 | 0 | } |
167 | | |
168 | | static size_t default_decrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_plaintext_t *plaintext, const void *encrypted, |
169 | | size_t len) |
170 | 0 | { |
171 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
172 | 0 | uint8_t ptbuf[16]; |
173 | 0 | const uint8_t *p; |
174 | |
|
175 | 0 | if (len != 0) { |
176 | | /* long header packet; decrypt only if given Connection ID matches the expected size */ |
177 | 0 | if (len != self->cid_decrypt_ctx->algo->block_size) |
178 | 0 | return SIZE_MAX; |
179 | 0 | } else { |
180 | | /* short header packet; we are the one to name the size */ |
181 | 0 | len = self->cid_decrypt_ctx->algo->block_size; |
182 | 0 | } |
183 | | |
184 | | /* decrypt */ |
185 | 0 | ptls_cipher_encrypt(self->cid_decrypt_ctx, ptbuf, encrypted, len); |
186 | | |
187 | | /* decode */ |
188 | 0 | p = ptbuf; |
189 | 0 | if (len == 16) { |
190 | 0 | plaintext->node_id = quicly_decode64(&p); |
191 | 0 | } else { |
192 | 0 | plaintext->node_id = 0; |
193 | 0 | } |
194 | 0 | plaintext->master_id = quicly_decode32(&p); |
195 | 0 | plaintext->thread_id = quicly_decode24(&p); |
196 | 0 | plaintext->path_id = *p++; |
197 | 0 | assert(p - ptbuf == len); |
198 | | |
199 | 0 | return len; |
200 | 0 | } |
201 | | |
202 | | static int default_generate_reset_token(quicly_cid_encryptor_t *_self, void *token, const void *cid) |
203 | 0 | { |
204 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
205 | 0 | generate_reset_token(self, token, cid); |
206 | 0 | return 1; |
207 | 0 | } |
208 | | |
209 | | quicly_cid_encryptor_t *quicly_new_default_cid_encryptor(ptls_cipher_algorithm_t *cid_cipher, |
210 | | ptls_cipher_algorithm_t *reset_token_cipher, ptls_hash_algorithm_t *hash, |
211 | | ptls_iovec_t key) |
212 | 0 | { |
213 | 0 | struct st_quicly_default_encrypt_cid_t *self; |
214 | 0 | uint8_t digestbuf[PTLS_MAX_DIGEST_SIZE], keybuf[PTLS_MAX_SECRET_SIZE]; |
215 | |
|
216 | 0 | assert(cid_cipher->block_size == 8 || cid_cipher->block_size == 16); |
217 | 0 | assert(reset_token_cipher->block_size == 16); |
218 | | |
219 | 0 | if (key.len > hash->block_size) { |
220 | 0 | ptls_calc_hash(hash, digestbuf, key.base, key.len); |
221 | 0 | key = ptls_iovec_init(digestbuf, hash->digest_size); |
222 | 0 | } |
223 | |
|
224 | 0 | if ((self = malloc(sizeof(*self))) == NULL) |
225 | 0 | goto Fail; |
226 | 0 | *self = (struct st_quicly_default_encrypt_cid_t){{default_encrypt_cid, default_decrypt_cid, default_generate_reset_token}}; |
227 | |
|
228 | 0 | if (ptls_hkdf_expand_label(hash, keybuf, cid_cipher->key_size, key, "cid", ptls_iovec_init(NULL, 0), "") != 0) |
229 | 0 | goto Fail; |
230 | 0 | if ((self->cid_encrypt_ctx = ptls_cipher_new(cid_cipher, 1, keybuf)) == NULL) |
231 | 0 | goto Fail; |
232 | 0 | if ((self->cid_decrypt_ctx = ptls_cipher_new(cid_cipher, 0, keybuf)) == NULL) |
233 | 0 | goto Fail; |
234 | 0 | if (ptls_hkdf_expand_label(hash, keybuf, reset_token_cipher->key_size, key, "reset", ptls_iovec_init(NULL, 0), "") != 0) |
235 | 0 | goto Fail; |
236 | 0 | if ((self->reset_token_ctx = ptls_cipher_new(reset_token_cipher, 1, keybuf)) == NULL) |
237 | 0 | goto Fail; |
238 | | |
239 | 0 | ptls_clear_memory(digestbuf, sizeof(digestbuf)); |
240 | 0 | ptls_clear_memory(keybuf, sizeof(keybuf)); |
241 | 0 | return &self->super; |
242 | | |
243 | 0 | Fail: |
244 | 0 | if (self != NULL) { |
245 | 0 | if (self->cid_encrypt_ctx != NULL) |
246 | 0 | ptls_cipher_free(self->cid_encrypt_ctx); |
247 | 0 | if (self->cid_decrypt_ctx != NULL) |
248 | 0 | ptls_cipher_free(self->cid_decrypt_ctx); |
249 | 0 | if (self->reset_token_ctx != NULL) |
250 | 0 | ptls_cipher_free(self->reset_token_ctx); |
251 | 0 | free(self); |
252 | 0 | } |
253 | 0 | ptls_clear_memory(digestbuf, sizeof(digestbuf)); |
254 | 0 | ptls_clear_memory(keybuf, sizeof(keybuf)); |
255 | 0 | return NULL; |
256 | 0 | } |
257 | | |
258 | | void quicly_free_default_cid_encryptor(quicly_cid_encryptor_t *_self) |
259 | 0 | { |
260 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
261 | |
|
262 | 0 | ptls_cipher_free(self->cid_encrypt_ctx); |
263 | 0 | ptls_cipher_free(self->cid_decrypt_ctx); |
264 | 0 | ptls_cipher_free(self->reset_token_ctx); |
265 | 0 | free(self); |
266 | 0 | } |
267 | | |
268 | | /** |
269 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
270 | | */ |
271 | | static int default_stream_scheduler_can_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, int conn_is_saturated) |
272 | 0 | { |
273 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler; |
274 | |
|
275 | 0 | if (!conn_is_saturated) { |
276 | | /* not saturated */ |
277 | 0 | quicly_linklist_insert_list(&sched->active, &sched->blocked); |
278 | 0 | } else { |
279 | | /* The code below is disabled, because H2O's scheduler doesn't allow you to "walk" the priority tree without actually |
280 | | * running the round robin, and we want quicly's default to behave like H2O so that we can catch errors. The downside is |
281 | | * that there'd be at most one spurious call of `quicly_send` when the connection is saturated, but that should be fine. |
282 | | */ |
283 | 0 | if (0) { |
284 | | /* Saturated. Lazily move such streams to the "blocked" list, at the same time checking if anything can be sent. */ |
285 | 0 | while (quicly_linklist_is_linked(&sched->active)) { |
286 | 0 | quicly_stream_t *stream = |
287 | 0 | (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler)); |
288 | 0 | if (quicly_stream_can_send(stream, 0)) |
289 | 0 | return 1; |
290 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
291 | 0 | quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler); |
292 | 0 | } |
293 | 0 | } |
294 | 0 | } |
295 | | |
296 | 0 | return quicly_linklist_is_linked(&sched->active); |
297 | 0 | } |
298 | | |
299 | | static void link_stream(struct st_quicly_default_scheduler_state_t *sched, quicly_stream_t *stream, int conn_is_blocked) |
300 | 0 | { |
301 | 0 | if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler)) { |
302 | 0 | quicly_linklist_t *slot = &sched->active; |
303 | 0 | if (conn_is_blocked && !quicly_stream_can_send(stream, 0)) |
304 | 0 | slot = &sched->blocked; |
305 | 0 | quicly_linklist_insert(slot->prev, &stream->_send_aux.pending_link.default_scheduler); |
306 | 0 | } |
307 | 0 | } |
308 | | |
309 | | /** |
310 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
311 | | */ |
312 | | static quicly_error_t default_stream_scheduler_do_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, |
313 | | quicly_send_context_t *s) |
314 | 0 | { |
315 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler; |
316 | 0 | int conn_is_blocked = quicly_is_blocked(conn); |
317 | 0 | quicly_error_t ret = 0; |
318 | |
|
319 | 0 | if (!conn_is_blocked) |
320 | 0 | quicly_linklist_insert_list(&sched->active, &sched->blocked); |
321 | |
|
322 | 0 | while (quicly_can_send_data((quicly_conn_t *)conn, s) && quicly_linklist_is_linked(&sched->active)) { |
323 | | /* detach the first active stream */ |
324 | 0 | quicly_stream_t *stream = |
325 | 0 | (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler)); |
326 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
327 | | /* relink the stream to the blocked list if necessary */ |
328 | 0 | if (conn_is_blocked && !quicly_stream_can_send(stream, 0)) { |
329 | 0 | quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler); |
330 | 0 | continue; |
331 | 0 | } |
332 | | /* send! */ |
333 | 0 | if ((ret = quicly_send_stream(stream, s)) != 0) { |
334 | | /* FIXME Stop quicly_send_stream emitting SENDBUF_FULL (happens when CWND is congested). Otherwise, we need to make |
335 | | * adjustments to the scheduler after popping a stream */ |
336 | 0 | if (ret == QUICLY_ERROR_SENDBUF_FULL) { |
337 | 0 | assert(quicly_stream_can_send(stream, 1)); |
338 | 0 | link_stream(sched, stream, conn_is_blocked); |
339 | 0 | } |
340 | 0 | break; |
341 | 0 | } |
342 | | /* reschedule */ |
343 | 0 | conn_is_blocked = quicly_is_blocked(conn); |
344 | 0 | if (quicly_stream_can_send(stream, 1)) |
345 | 0 | link_stream(sched, stream, conn_is_blocked); |
346 | 0 | } |
347 | | |
348 | 0 | return ret; |
349 | 0 | } |
350 | | |
351 | | /** |
352 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
353 | | */ |
354 | | static void default_stream_scheduler_update_state(quicly_stream_scheduler_t *self, quicly_stream_t *stream) |
355 | 0 | { |
356 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)stream->conn)->_default_scheduler; |
357 | |
|
358 | 0 | if (quicly_stream_can_send(stream, 1)) { |
359 | | /* activate if not */ |
360 | 0 | link_stream(sched, stream, quicly_is_blocked(stream->conn)); |
361 | 0 | } else { |
362 | | /* deactivate if active */ |
363 | 0 | if (quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler)) |
364 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
365 | 0 | } |
366 | 0 | } |
367 | | |
368 | | quicly_stream_scheduler_t quicly_default_stream_scheduler = {default_stream_scheduler_can_send, default_stream_scheduler_do_send, |
369 | | default_stream_scheduler_update_state}; |
370 | | |
371 | | quicly_stream_t *quicly_default_alloc_stream(quicly_context_t *ctx) |
372 | 0 | { |
373 | 0 | return malloc(sizeof(quicly_stream_t)); |
374 | 0 | } |
375 | | |
376 | | void quicly_default_free_stream(quicly_stream_t *stream) |
377 | 0 | { |
378 | 0 | free(stream); |
379 | 0 | } |
380 | | |
381 | | static int64_t default_now(quicly_now_t *self) |
382 | 0 | { |
383 | 0 | struct timeval tv; |
384 | 0 | gettimeofday(&tv, NULL); |
385 | 0 | int64_t tv_now = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; |
386 | | |
387 | | /* make sure that the time does not get rewind */ |
388 | 0 | static __thread int64_t now; |
389 | 0 | if (now < tv_now) |
390 | 0 | now = tv_now; |
391 | 0 | return now; |
392 | 0 | } |
393 | | |
394 | | quicly_now_t quicly_default_now = {default_now}; |
395 | | |
396 | | static int default_setup_cipher(quicly_crypto_engine_t *engine, quicly_conn_t *conn, size_t epoch, int is_enc, |
397 | | ptls_cipher_context_t **hp_ctx, ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead, |
398 | | ptls_hash_algorithm_t *hash, const void *secret) |
399 | 0 | { |
400 | 0 | uint8_t hpkey[PTLS_MAX_SECRET_SIZE]; |
401 | 0 | int ret; |
402 | |
|
403 | 0 | if (hp_ctx != NULL) |
404 | 0 | *hp_ctx = NULL; |
405 | 0 | *aead_ctx = NULL; |
406 | | |
407 | | /* generate new header protection key */ |
408 | 0 | if (hp_ctx != NULL) { |
409 | 0 | if ((ret = ptls_hkdf_expand_label(hash, hpkey, aead->ctr_cipher->key_size, ptls_iovec_init(secret, hash->digest_size), |
410 | 0 | "quic hp", ptls_iovec_init(NULL, 0), NULL)) != 0) |
411 | 0 | goto Exit; |
412 | 0 | if ((*hp_ctx = ptls_cipher_new(aead->ctr_cipher, is_enc, hpkey)) == NULL) { |
413 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
414 | 0 | goto Exit; |
415 | 0 | } |
416 | 0 | } |
417 | | |
418 | | /* generate new AEAD context */ |
419 | 0 | if ((*aead_ctx = ptls_aead_new(aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL)) == NULL) { |
420 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
421 | 0 | goto Exit; |
422 | 0 | } |
423 | | |
424 | 0 | ret = 0; |
425 | 0 | Exit: |
426 | 0 | if (ret != 0) { |
427 | 0 | if (*aead_ctx != NULL) { |
428 | 0 | ptls_aead_free(*aead_ctx); |
429 | 0 | *aead_ctx = NULL; |
430 | 0 | } |
431 | 0 | if (hp_ctx != NULL && *hp_ctx != NULL) { |
432 | 0 | ptls_cipher_free(*hp_ctx); |
433 | 0 | *hp_ctx = NULL; |
434 | 0 | } |
435 | 0 | } |
436 | 0 | ptls_clear_memory(hpkey, sizeof(hpkey)); |
437 | 0 | return ret; |
438 | 0 | } |
439 | | |
440 | | static void default_finalize_send_packet(quicly_crypto_engine_t *engine, quicly_conn_t *conn, |
441 | | ptls_cipher_context_t *header_protect_ctx, ptls_aead_context_t *packet_protect_ctx, |
442 | | ptls_iovec_t datagram, size_t first_byte_at, size_t payload_from, uint64_t packet_number, |
443 | | int coalesced) |
444 | 0 | { |
445 | 0 | ptls_aead_supplementary_encryption_t supp = {.ctx = header_protect_ctx, |
446 | 0 | .input = datagram.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE}; |
447 | |
|
448 | 0 | ptls_aead_encrypt_s(packet_protect_ctx, datagram.base + payload_from, datagram.base + payload_from, |
449 | 0 | datagram.len - payload_from - packet_protect_ctx->algo->tag_size, packet_number, |
450 | 0 | datagram.base + first_byte_at, payload_from - first_byte_at, &supp); |
451 | |
|
452 | 0 | datagram.base[first_byte_at] ^= supp.output[0] & (QUICLY_PACKET_IS_LONG_HEADER(datagram.base[first_byte_at]) ? 0xf : 0x1f); |
453 | 0 | for (size_t i = 0; i != QUICLY_SEND_PN_SIZE; ++i) |
454 | 0 | datagram.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^= supp.output[i + 1]; |
455 | 0 | } |
456 | | |
457 | | quicly_crypto_engine_t quicly_default_crypto_engine = {default_setup_cipher, default_finalize_send_packet}; |