/src/h2o/deps/quicly/lib/defaults.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2017-2019 Fastly, Kazuho Oku |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <sys/time.h> |
23 | | #include "quicly/defaults.h" |
24 | | |
25 | | #define DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE 1280 |
26 | | #define DEFAULT_MAX_UDP_PAYLOAD_SIZE 1472 |
27 | | #define DEFAULT_MAX_PACKETS_PER_KEY 16777216 |
28 | | #define DEFAULT_MAX_CRYPTO_BYTES 65536 |
29 | | #define DEFAULT_INITCWND_PACKETS 10 |
30 | | #define DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT 3 |
31 | | #define DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER 400 |
32 | | #define DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS 1000 |
33 | | #define DEFAULT_MAX_PROBE_PACKETS 5 |
34 | | #define DEFAULT_MAX_PATH_VALIDATION_FAILURES 100 |
35 | | |
36 | | /* profile that employs IETF specified values */ |
37 | | const quicly_context_t quicly_spec_context = { |
38 | | .initial_egress_max_udp_payload_size = DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE, |
39 | | .loss = QUICLY_LOSS_SPEC_CONF, |
40 | | .transport_params = |
41 | | { |
42 | | .max_stream_data.bidi_local = 1 * 1024 * 1024, |
43 | | .max_stream_data.bidi_remote = 11 * 1024 * 1024, |
44 | | .max_stream_data.uni = 1 * 1024 * 1024, |
45 | | .max_data = 16 * 1024 * 1024, |
46 | | .max_idle_timeout = 30 * 1000, |
47 | | .max_streams_bidi = 100, |
48 | | .max_streams_uni = 0, |
49 | | .max_udp_payload_size = DEFAULT_MAX_UDP_PAYLOAD_SIZE, |
50 | | }, |
51 | | .max_packets_per_key = DEFAULT_MAX_PACKETS_PER_KEY, |
52 | | .max_crypto_bytes = DEFAULT_MAX_CRYPTO_BYTES, |
53 | | .initcwnd_packets = DEFAULT_INITCWND_PACKETS, |
54 | | .initial_version = QUICLY_PROTOCOL_VERSION_1, |
55 | | .pre_validation_amplification_limit = DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT, |
56 | | .handshake_timeout_rtt_multiplier = DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER, |
57 | | .max_initial_handshake_packets = DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS, |
58 | | .max_probe_packets = DEFAULT_MAX_PROBE_PACKETS, |
59 | | .max_path_validation_failures = DEFAULT_MAX_PATH_VALIDATION_FAILURES, |
60 | | .enable_ratio = |
61 | | { |
62 | | .jumpstart.non_resume = 255, |
63 | | .jumpstart.resume = 255, |
64 | | .rapid_start = 0, /* off by default */ |
65 | | .ecn = 255, |
66 | | .pacing = 0, /* off by default */ |
67 | | .respect_app_limited = 255, |
68 | | }, |
69 | | .stream_scheduler = &quicly_default_stream_scheduler, |
70 | | .now = &quicly_default_now, |
71 | | .crypto_engine = &quicly_default_crypto_engine, |
72 | | .init_cc = &quicly_default_init_cc, |
73 | | }; |
74 | | |
75 | | /* profile with a focus on reducing latency for the HTTP use case */ |
76 | | const quicly_context_t quicly_performant_context = { |
77 | | .initial_egress_max_udp_payload_size = DEFAULT_INITIAL_EGRESS_MAX_UDP_PAYLOAD_SIZE, |
78 | | .loss = QUICLY_LOSS_PERFORMANT_CONF, |
79 | | .transport_params = |
80 | | { |
81 | | .max_stream_data.bidi_local = 1 * 1024 * 1024, |
82 | | .max_stream_data.bidi_remote = 11 * 1024 * 1024, |
83 | | .max_stream_data.uni = 1 * 1024 * 1024, |
84 | | .max_data = 16 * 1024 * 1024, |
85 | | .max_idle_timeout = 30 * 1000, |
86 | | .max_streams_bidi = 100, |
87 | | .max_streams_uni = 0, |
88 | | .max_udp_payload_size = DEFAULT_MAX_UDP_PAYLOAD_SIZE, |
89 | | }, |
90 | | .max_packets_per_key = DEFAULT_MAX_PACKETS_PER_KEY, |
91 | | .max_crypto_bytes = DEFAULT_MAX_CRYPTO_BYTES, |
92 | | .initcwnd_packets = DEFAULT_INITCWND_PACKETS, |
93 | | .initial_version = QUICLY_PROTOCOL_VERSION_1, |
94 | | .pre_validation_amplification_limit = DEFAULT_PRE_VALIDATION_AMPLIFICATION_LIMIT, |
95 | | .handshake_timeout_rtt_multiplier = DEFAULT_HANDSHAKE_TIMEOUT_RTT_MULTIPLIER, |
96 | | .max_initial_handshake_packets = DEFAULT_MAX_INITIAL_HANDSHAKE_PACKETS, |
97 | | .max_probe_packets = DEFAULT_MAX_PROBE_PACKETS, |
98 | | .max_path_validation_failures = DEFAULT_MAX_PATH_VALIDATION_FAILURES, |
99 | | .enable_ratio = |
100 | | { |
101 | | .jumpstart.non_resume = 255, |
102 | | .jumpstart.resume = 255, |
103 | | .rapid_start = 0, /* off by default */ |
104 | | .ecn = 255, |
105 | | .pacing = 0, /* off by default */ |
106 | | .respect_app_limited = 255, |
107 | | }, |
108 | | .stream_scheduler = &quicly_default_stream_scheduler, |
109 | | .now = &quicly_default_now, |
110 | | .crypto_engine = &quicly_default_crypto_engine, |
111 | | .init_cc = &quicly_default_init_cc, |
112 | | }; |
113 | | |
114 | | /** |
115 | | * The context of the default CID encryptor. All the contexts being used here are ECB ciphers and therefore stateless - they can be |
116 | | * used concurrently from multiple threads. |
117 | | */ |
118 | | struct st_quicly_default_encrypt_cid_t { |
119 | | quicly_cid_encryptor_t super; |
120 | | ptls_cipher_context_t *cid_encrypt_ctx, *cid_decrypt_ctx, *reset_token_ctx; |
121 | | }; |
122 | | |
123 | | static void generate_reset_token(struct st_quicly_default_encrypt_cid_t *self, void *token, const void *cid) |
124 | 0 | { |
125 | 0 | uint8_t expandbuf[QUICLY_STATELESS_RESET_TOKEN_LEN]; |
126 | |
|
127 | 0 | assert(self->reset_token_ctx->algo->block_size == QUICLY_STATELESS_RESET_TOKEN_LEN); |
128 | | |
129 | | /* expand the input to full size, if CID is shorter than the size of the reset token */ |
130 | 0 | if (self->cid_encrypt_ctx->algo->block_size != QUICLY_STATELESS_RESET_TOKEN_LEN) { |
131 | 0 | assert(self->cid_encrypt_ctx->algo->block_size < QUICLY_STATELESS_RESET_TOKEN_LEN); |
132 | 0 | memset(expandbuf, 0, sizeof(expandbuf)); |
133 | 0 | memcpy(expandbuf, cid, self->cid_encrypt_ctx->algo->block_size); |
134 | 0 | cid = expandbuf; |
135 | 0 | } |
136 | | |
137 | | /* transform */ |
138 | 0 | ptls_cipher_encrypt(self->reset_token_ctx, token, cid, QUICLY_STATELESS_RESET_TOKEN_LEN); |
139 | 0 | } |
140 | | |
141 | | static void default_encrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_t *encrypted, void *reset_token, |
142 | | const quicly_cid_plaintext_t *plaintext) |
143 | 0 | { |
144 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
145 | 0 | uint8_t buf[16], *p; |
146 | | |
147 | | /* encode */ |
148 | 0 | p = buf; |
149 | 0 | switch (self->cid_encrypt_ctx->algo->block_size) { |
150 | 0 | case 8: |
151 | 0 | break; |
152 | 0 | case 16: |
153 | 0 | p = quicly_encode64(p, plaintext->node_id); |
154 | 0 | break; |
155 | 0 | default: |
156 | 0 | assert(!"unexpected block size"); |
157 | 0 | break; |
158 | 0 | } |
159 | 0 | p = quicly_encode32(p, plaintext->master_id); |
160 | 0 | p = quicly_encode32(p, (plaintext->thread_id << 8) | plaintext->path_id); |
161 | 0 | assert(p - buf == self->cid_encrypt_ctx->algo->block_size); |
162 | | |
163 | | /* generate CID */ |
164 | 0 | ptls_cipher_encrypt(self->cid_encrypt_ctx, encrypted->cid, buf, self->cid_encrypt_ctx->algo->block_size); |
165 | 0 | encrypted->len = self->cid_encrypt_ctx->algo->block_size; |
166 | | |
167 | | /* generate stateless reset token if requested */ |
168 | 0 | if (reset_token != NULL) |
169 | 0 | generate_reset_token(self, reset_token, encrypted->cid); |
170 | 0 | } |
171 | | |
172 | | static size_t default_decrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_plaintext_t *plaintext, const void *encrypted, |
173 | | size_t len) |
174 | 0 | { |
175 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
176 | 0 | uint8_t ptbuf[16]; |
177 | 0 | const uint8_t *p; |
178 | |
|
179 | 0 | if (len != 0) { |
180 | | /* long header packet; decrypt only if given Connection ID matches the expected size */ |
181 | 0 | if (len != self->cid_decrypt_ctx->algo->block_size) |
182 | 0 | return SIZE_MAX; |
183 | 0 | } else { |
184 | | /* short header packet; we are the one to name the size */ |
185 | 0 | len = self->cid_decrypt_ctx->algo->block_size; |
186 | 0 | } |
187 | | |
188 | | /* decrypt */ |
189 | 0 | ptls_cipher_encrypt(self->cid_decrypt_ctx, ptbuf, encrypted, len); |
190 | | |
191 | | /* decode */ |
192 | 0 | p = ptbuf; |
193 | 0 | if (len == 16) { |
194 | 0 | plaintext->node_id = quicly_decode64(&p); |
195 | 0 | } else { |
196 | 0 | plaintext->node_id = 0; |
197 | 0 | } |
198 | 0 | plaintext->master_id = quicly_decode32(&p); |
199 | 0 | plaintext->thread_id = quicly_decode24(&p); |
200 | 0 | plaintext->path_id = *p++; |
201 | 0 | assert(p - ptbuf == len); |
202 | | |
203 | 0 | return len; |
204 | 0 | } |
205 | | |
206 | | static int default_generate_reset_token(quicly_cid_encryptor_t *_self, void *token, const void *cid) |
207 | 0 | { |
208 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
209 | 0 | generate_reset_token(self, token, cid); |
210 | 0 | return 1; |
211 | 0 | } |
212 | | |
213 | | quicly_cid_encryptor_t *quicly_new_default_cid_encryptor(ptls_cipher_algorithm_t *cid_cipher, |
214 | | ptls_cipher_algorithm_t *reset_token_cipher, ptls_hash_algorithm_t *hash, |
215 | | ptls_iovec_t key) |
216 | 0 | { |
217 | 0 | struct st_quicly_default_encrypt_cid_t *self; |
218 | 0 | uint8_t digestbuf[PTLS_MAX_DIGEST_SIZE], keybuf[PTLS_MAX_SECRET_SIZE]; |
219 | |
|
220 | 0 | assert(cid_cipher->block_size == 8 || cid_cipher->block_size == 16); |
221 | 0 | assert(reset_token_cipher->block_size == 16); |
222 | | |
223 | 0 | if (key.len > hash->block_size) { |
224 | 0 | ptls_calc_hash(hash, digestbuf, key.base, key.len); |
225 | 0 | key = ptls_iovec_init(digestbuf, hash->digest_size); |
226 | 0 | } |
227 | |
|
228 | 0 | if ((self = malloc(sizeof(*self))) == NULL) |
229 | 0 | goto Fail; |
230 | 0 | *self = (struct st_quicly_default_encrypt_cid_t){{default_encrypt_cid, default_decrypt_cid, default_generate_reset_token}}; |
231 | |
|
232 | 0 | if (ptls_hkdf_expand_label(hash, keybuf, cid_cipher->key_size, key, "cid", ptls_iovec_init(NULL, 0), "") != 0) |
233 | 0 | goto Fail; |
234 | 0 | if ((self->cid_encrypt_ctx = ptls_cipher_new(cid_cipher, 1, keybuf)) == NULL) |
235 | 0 | goto Fail; |
236 | 0 | if ((self->cid_decrypt_ctx = ptls_cipher_new(cid_cipher, 0, keybuf)) == NULL) |
237 | 0 | goto Fail; |
238 | 0 | if (ptls_hkdf_expand_label(hash, keybuf, reset_token_cipher->key_size, key, "reset", ptls_iovec_init(NULL, 0), "") != 0) |
239 | 0 | goto Fail; |
240 | 0 | if ((self->reset_token_ctx = ptls_cipher_new(reset_token_cipher, 1, keybuf)) == NULL) |
241 | 0 | goto Fail; |
242 | | |
243 | 0 | ptls_clear_memory(digestbuf, sizeof(digestbuf)); |
244 | 0 | ptls_clear_memory(keybuf, sizeof(keybuf)); |
245 | 0 | return &self->super; |
246 | | |
247 | 0 | Fail: |
248 | 0 | if (self != NULL) { |
249 | 0 | if (self->cid_encrypt_ctx != NULL) |
250 | 0 | ptls_cipher_free(self->cid_encrypt_ctx); |
251 | 0 | if (self->cid_decrypt_ctx != NULL) |
252 | 0 | ptls_cipher_free(self->cid_decrypt_ctx); |
253 | 0 | if (self->reset_token_ctx != NULL) |
254 | 0 | ptls_cipher_free(self->reset_token_ctx); |
255 | 0 | free(self); |
256 | 0 | } |
257 | 0 | ptls_clear_memory(digestbuf, sizeof(digestbuf)); |
258 | 0 | ptls_clear_memory(keybuf, sizeof(keybuf)); |
259 | 0 | return NULL; |
260 | 0 | } |
261 | | |
262 | | void quicly_free_default_cid_encryptor(quicly_cid_encryptor_t *_self) |
263 | 0 | { |
264 | 0 | struct st_quicly_default_encrypt_cid_t *self = (void *)_self; |
265 | |
|
266 | 0 | ptls_cipher_free(self->cid_encrypt_ctx); |
267 | 0 | ptls_cipher_free(self->cid_decrypt_ctx); |
268 | 0 | ptls_cipher_free(self->reset_token_ctx); |
269 | 0 | free(self); |
270 | 0 | } |
271 | | |
272 | | /** |
273 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
274 | | */ |
275 | | static int default_stream_scheduler_can_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, int conn_is_saturated) |
276 | 0 | { |
277 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler; |
278 | |
|
279 | 0 | if (!conn_is_saturated) { |
280 | | /* not saturated */ |
281 | 0 | quicly_linklist_insert_list(&sched->active, &sched->blocked); |
282 | 0 | } else { |
283 | | /* The code below is disabled, because H2O's scheduler doesn't allow you to "walk" the priority tree without actually |
284 | | * running the round robin, and we want quicly's default to behave like H2O so that we can catch errors. The downside is |
285 | | * that there'd be at most one spurious call of `quicly_send` when the connection is saturated, but that should be fine. |
286 | | */ |
287 | 0 | if (0) { |
288 | | /* Saturated. Lazily move such streams to the "blocked" list, at the same time checking if anything can be sent. */ |
289 | 0 | while (quicly_linklist_is_linked(&sched->active)) { |
290 | 0 | quicly_stream_t *stream = |
291 | 0 | (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler)); |
292 | 0 | if (quicly_stream_can_send(stream, 0)) |
293 | 0 | return 1; |
294 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
295 | 0 | quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler); |
296 | 0 | } |
297 | 0 | } |
298 | 0 | } |
299 | | |
300 | 0 | return quicly_linklist_is_linked(&sched->active); |
301 | 0 | } |
302 | | |
303 | | static void link_stream(struct st_quicly_default_scheduler_state_t *sched, quicly_stream_t *stream, int conn_is_blocked) |
304 | 0 | { |
305 | 0 | if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler)) { |
306 | 0 | quicly_linklist_t *slot = &sched->active; |
307 | 0 | if (conn_is_blocked && !quicly_stream_can_send(stream, 0)) |
308 | 0 | slot = &sched->blocked; |
309 | 0 | quicly_linklist_insert(slot->prev, &stream->_send_aux.pending_link.default_scheduler); |
310 | 0 | } |
311 | 0 | } |
312 | | |
313 | | /** |
314 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
315 | | */ |
316 | | static quicly_error_t default_stream_scheduler_do_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, |
317 | | quicly_send_context_t *s) |
318 | 0 | { |
319 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler; |
320 | 0 | int conn_is_blocked = quicly_is_blocked(conn); |
321 | 0 | quicly_error_t ret = 0; |
322 | |
|
323 | 0 | if (!conn_is_blocked) |
324 | 0 | quicly_linklist_insert_list(&sched->active, &sched->blocked); |
325 | |
|
326 | 0 | while (quicly_can_send_data((quicly_conn_t *)conn, s) && quicly_linklist_is_linked(&sched->active)) { |
327 | | /* detach the first active stream */ |
328 | 0 | quicly_stream_t *stream = |
329 | 0 | (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler)); |
330 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
331 | | /* relink the stream to the blocked list if necessary */ |
332 | 0 | if (conn_is_blocked && !quicly_stream_can_send(stream, 0)) { |
333 | 0 | quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler); |
334 | 0 | continue; |
335 | 0 | } |
336 | | /* send! */ |
337 | 0 | if ((ret = quicly_send_stream(stream, s)) != 0) { |
338 | | /* FIXME Stop quicly_send_stream emitting SENDBUF_FULL (happens when CWND is congested). Otherwise, we need to make |
339 | | * adjustments to the scheduler after popping a stream */ |
340 | 0 | if (ret == QUICLY_ERROR_SENDBUF_FULL) { |
341 | 0 | assert(quicly_stream_can_send(stream, 1)); |
342 | 0 | link_stream(sched, stream, conn_is_blocked); |
343 | 0 | } |
344 | 0 | break; |
345 | 0 | } |
346 | | /* reschedule */ |
347 | 0 | conn_is_blocked = quicly_is_blocked(conn); |
348 | 0 | if (quicly_stream_can_send(stream, 1)) |
349 | 0 | link_stream(sched, stream, conn_is_blocked); |
350 | 0 | } |
351 | | |
352 | 0 | return ret; |
353 | 0 | } |
354 | | |
355 | | /** |
356 | | * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic. |
357 | | */ |
358 | | static void default_stream_scheduler_update_state(quicly_stream_scheduler_t *self, quicly_stream_t *stream) |
359 | 0 | { |
360 | 0 | struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)stream->conn)->_default_scheduler; |
361 | |
|
362 | 0 | if (quicly_stream_can_send(stream, 1)) { |
363 | | /* activate if not */ |
364 | 0 | link_stream(sched, stream, quicly_is_blocked(stream->conn)); |
365 | 0 | } else { |
366 | | /* deactivate if active */ |
367 | 0 | if (quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler)) |
368 | 0 | quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler); |
369 | 0 | } |
370 | 0 | } |
371 | | |
372 | | quicly_stream_scheduler_t quicly_default_stream_scheduler = {default_stream_scheduler_can_send, default_stream_scheduler_do_send, |
373 | | default_stream_scheduler_update_state}; |
374 | | |
375 | | quicly_stream_t *quicly_default_alloc_stream(quicly_context_t *ctx) |
376 | 0 | { |
377 | 0 | return malloc(sizeof(quicly_stream_t)); |
378 | 0 | } |
379 | | |
380 | | void quicly_default_free_stream(quicly_stream_t *stream) |
381 | 0 | { |
382 | 0 | free(stream); |
383 | 0 | } |
384 | | |
385 | | static int64_t default_now(quicly_now_t *self) |
386 | 32.1k | { |
387 | 32.1k | struct timeval tv; |
388 | 32.1k | gettimeofday(&tv, NULL); |
389 | 32.1k | int64_t tv_now = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; |
390 | | |
391 | | /* make sure that the time does not get rewind */ |
392 | 32.1k | static __thread int64_t now; |
393 | 32.1k | if (now < tv_now) |
394 | 18.1k | now = tv_now; |
395 | 32.1k | return now; |
396 | 32.1k | } |
397 | | |
398 | | quicly_now_t quicly_default_now = {default_now}; |
399 | | |
400 | | static int default_setup_cipher(quicly_crypto_engine_t *engine, quicly_conn_t *conn, size_t epoch, int is_enc, |
401 | | ptls_cipher_context_t **hp_ctx, ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead, |
402 | | ptls_hash_algorithm_t *hash, const void *secret) |
403 | 0 | { |
404 | 0 | uint8_t hpkey[PTLS_MAX_SECRET_SIZE]; |
405 | 0 | int ret; |
406 | |
|
407 | 0 | if (hp_ctx != NULL) |
408 | 0 | *hp_ctx = NULL; |
409 | 0 | *aead_ctx = NULL; |
410 | | |
411 | | /* generate new header protection key */ |
412 | 0 | if (hp_ctx != NULL) { |
413 | 0 | if ((ret = ptls_hkdf_expand_label(hash, hpkey, aead->ctr_cipher->key_size, ptls_iovec_init(secret, hash->digest_size), |
414 | 0 | "quic hp", ptls_iovec_init(NULL, 0), NULL)) != 0) |
415 | 0 | goto Exit; |
416 | 0 | if ((*hp_ctx = ptls_cipher_new(aead->ctr_cipher, is_enc, hpkey)) == NULL) { |
417 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
418 | 0 | goto Exit; |
419 | 0 | } |
420 | 0 | } |
421 | | |
422 | | /* generate new AEAD context */ |
423 | 0 | if ((*aead_ctx = ptls_aead_new(aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL)) == NULL) { |
424 | 0 | ret = PTLS_ERROR_NO_MEMORY; |
425 | 0 | goto Exit; |
426 | 0 | } |
427 | | |
428 | 0 | ret = 0; |
429 | 0 | Exit: |
430 | 0 | if (ret != 0) { |
431 | 0 | if (*aead_ctx != NULL) { |
432 | 0 | ptls_aead_free(*aead_ctx); |
433 | 0 | *aead_ctx = NULL; |
434 | 0 | } |
435 | 0 | if (hp_ctx != NULL && *hp_ctx != NULL) { |
436 | 0 | ptls_cipher_free(*hp_ctx); |
437 | 0 | *hp_ctx = NULL; |
438 | 0 | } |
439 | 0 | } |
440 | 0 | ptls_clear_memory(hpkey, sizeof(hpkey)); |
441 | 0 | return ret; |
442 | 0 | } |
443 | | |
444 | | static void default_finalize_send_packet(quicly_crypto_engine_t *engine, quicly_conn_t *conn, |
445 | | ptls_cipher_context_t *header_protect_ctx, ptls_aead_context_t *packet_protect_ctx, |
446 | | ptls_iovec_t datagram, size_t first_byte_at, size_t payload_from, uint64_t packet_number, |
447 | | int coalesced) |
448 | 0 | { |
449 | 0 | ptls_aead_supplementary_encryption_t supp = {.ctx = header_protect_ctx, |
450 | 0 | .input = datagram.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE}; |
451 | |
|
452 | 0 | ptls_aead_encrypt_s(packet_protect_ctx, datagram.base + payload_from, datagram.base + payload_from, |
453 | 0 | datagram.len - payload_from - packet_protect_ctx->algo->tag_size, packet_number, |
454 | 0 | datagram.base + first_byte_at, payload_from - first_byte_at, &supp); |
455 | |
|
456 | 0 | datagram.base[first_byte_at] ^= supp.output[0] & (QUICLY_PACKET_IS_LONG_HEADER(datagram.base[first_byte_at]) ? 0xf : 0x1f); |
457 | 0 | for (size_t i = 0; i != QUICLY_SEND_PN_SIZE; ++i) |
458 | 0 | datagram.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^= supp.output[i + 1]; |
459 | 0 | } |
460 | | |
461 | | quicly_crypto_engine_t quicly_default_crypto_engine = {default_setup_cipher, default_finalize_send_packet}; |