/src/boringssl/crypto/cipher/e_tls.cc
Line | Count | Source |
1 | | // Copyright 2014 The BoringSSL Authors |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | #include <assert.h> |
16 | | #include <limits.h> |
17 | | #include <string.h> |
18 | | |
19 | | #include <openssl/aead.h> |
20 | | #include <openssl/cipher.h> |
21 | | #include <openssl/err.h> |
22 | | #include <openssl/hmac.h> |
23 | | #include <openssl/md5.h> |
24 | | #include <openssl/mem.h> |
25 | | #include <openssl/sha.h> |
26 | | |
27 | | #include "../fipsmodule/cipher/internal.h" |
28 | | #include "../internal.h" |
29 | | #include "internal.h" |
30 | | |
31 | | |
32 | | typedef struct { |
33 | | EVP_CIPHER_CTX cipher_ctx; |
34 | | HMAC_CTX *hmac_ctx; |
35 | | // mac_key is the portion of the key used for the MAC. It is retained |
36 | | // separately for the constant-time CBC code. |
37 | | uint8_t mac_key[EVP_MAX_MD_SIZE]; |
38 | | uint8_t mac_key_len; |
39 | | // implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit |
40 | | // IV. |
41 | | char implicit_iv; |
42 | | } AEAD_TLS_CTX; |
43 | | |
44 | | static_assert(EVP_MAX_MD_SIZE < 256, "mac_key_len does not fit in uint8_t"); |
45 | | |
46 | | static_assert(sizeof(((EVP_AEAD_CTX *)nullptr)->state) >= sizeof(AEAD_TLS_CTX), |
47 | | "AEAD state is too small"); |
48 | | static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(AEAD_TLS_CTX), |
49 | | "AEAD state has insufficient alignment"); |
50 | | |
51 | 60.6k | static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) { |
52 | 60.6k | AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
53 | 60.6k | EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx); |
54 | 60.6k | HMAC_CTX_free(tls_ctx->hmac_ctx); |
55 | 60.6k | } |
56 | | |
57 | | static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, |
58 | | size_t tag_len, enum evp_aead_direction_t dir, |
59 | | const EVP_CIPHER *cipher, const EVP_MD *md, |
60 | 60.6k | char implicit_iv) { |
61 | 60.6k | if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH && tag_len != EVP_MD_size(md)) { |
62 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE); |
63 | 0 | return 0; |
64 | 0 | } |
65 | | |
66 | 60.6k | if (key_len != EVP_AEAD_key_length(ctx->aead)) { |
67 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); |
68 | 0 | return 0; |
69 | 0 | } |
70 | | |
71 | 60.6k | size_t mac_key_len = EVP_MD_size(md); |
72 | 60.6k | size_t enc_key_len = EVP_CIPHER_key_length(cipher); |
73 | 60.6k | assert(mac_key_len + enc_key_len + |
74 | 60.6k | (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == |
75 | 60.6k | key_len); |
76 | | |
77 | 60.6k | AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
78 | 60.6k | tls_ctx->hmac_ctx = HMAC_CTX_new(); |
79 | 60.6k | if (!tls_ctx->hmac_ctx) { |
80 | 0 | return 0; |
81 | 0 | } |
82 | 60.6k | EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx); |
83 | 60.6k | assert(mac_key_len <= EVP_MAX_MD_SIZE); |
84 | 60.6k | OPENSSL_memcpy(tls_ctx->mac_key, key, mac_key_len); |
85 | 60.6k | tls_ctx->mac_key_len = (uint8_t)mac_key_len; |
86 | 60.6k | tls_ctx->implicit_iv = implicit_iv; |
87 | | |
88 | 60.6k | if (!EVP_CipherInit_ex( |
89 | 60.6k | &tls_ctx->cipher_ctx, cipher, nullptr, &key[mac_key_len], |
90 | 60.6k | implicit_iv ? &key[mac_key_len + enc_key_len] : nullptr, |
91 | 60.6k | dir == evp_aead_seal) || |
92 | 60.6k | !HMAC_Init_ex(tls_ctx->hmac_ctx, key, mac_key_len, md, nullptr)) { |
93 | 0 | aead_tls_cleanup(ctx); |
94 | 0 | return 0; |
95 | 0 | } |
96 | 60.6k | EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0); |
97 | | |
98 | 60.6k | return 1; |
99 | 60.6k | } |
100 | | |
101 | | static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, |
102 | 6.28k | const size_t extra_in_len) { |
103 | 6.28k | assert(extra_in_len == 0); |
104 | 6.28k | const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
105 | | |
106 | 6.28k | const size_t hmac_len = HMAC_size(tls_ctx->hmac_ctx); |
107 | 6.28k | if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) { |
108 | | // The NULL cipher. |
109 | 0 | return hmac_len; |
110 | 0 | } |
111 | | |
112 | 6.28k | const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); |
113 | | // An overflow of |in_len + hmac_len| doesn't affect the result mod |
114 | | // |block_size|, provided that |block_size| is a smaller power of two. |
115 | 6.28k | assert(block_size != 0 && (block_size & (block_size - 1)) == 0); |
116 | 6.28k | const size_t pad_len = block_size - (in_len + hmac_len) % block_size; |
117 | 6.28k | return hmac_len + pad_len; |
118 | 6.28k | } |
119 | | |
120 | | static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, |
121 | | uint8_t *out_tag, size_t *out_tag_len, |
122 | | const size_t max_out_tag_len, |
123 | | const uint8_t *nonce, const size_t nonce_len, |
124 | | const uint8_t *in, const size_t in_len, |
125 | | const uint8_t *extra_in, |
126 | | const size_t extra_in_len, const uint8_t *ad, |
127 | 1.04k | const size_t ad_len) { |
128 | 1.04k | AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
129 | | |
130 | 1.04k | if (!tls_ctx->cipher_ctx.encrypt) { |
131 | | // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. |
132 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); |
133 | 0 | return 0; |
134 | 0 | } |
135 | | |
136 | 1.04k | if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) { |
137 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); |
138 | 0 | return 0; |
139 | 0 | } |
140 | | |
141 | 1.04k | if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { |
142 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); |
143 | 0 | return 0; |
144 | 0 | } |
145 | | |
146 | 1.04k | if (ad_len != 13 - 2 /* length bytes */) { |
147 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); |
148 | 0 | return 0; |
149 | 0 | } |
150 | | |
151 | | // To allow for CBC mode which changes cipher length, |ad| doesn't include the |
152 | | // length for legacy ciphers. |
153 | 1.04k | uint8_t ad_extra[2]; |
154 | 1.04k | ad_extra[0] = (uint8_t)(in_len >> 8); |
155 | 1.04k | ad_extra[1] = (uint8_t)(in_len & 0xff); |
156 | | |
157 | | // Compute the MAC. This must be first in case the operation is being done |
158 | | // in-place. |
159 | 1.04k | uint8_t mac[EVP_MAX_MD_SIZE]; |
160 | 1.04k | unsigned mac_len; |
161 | 1.04k | if (!HMAC_Init_ex(tls_ctx->hmac_ctx, nullptr, 0, nullptr, nullptr) || |
162 | 1.04k | !HMAC_Update(tls_ctx->hmac_ctx, ad, ad_len) || |
163 | 1.04k | !HMAC_Update(tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) || |
164 | 1.04k | !HMAC_Update(tls_ctx->hmac_ctx, in, in_len) || |
165 | 1.04k | !HMAC_Final(tls_ctx->hmac_ctx, mac, &mac_len)) { |
166 | 0 | return 0; |
167 | 0 | } |
168 | | |
169 | | // Configure the explicit IV. |
170 | 1.04k | if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && |
171 | 1.04k | !tls_ctx->implicit_iv && |
172 | 756 | !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, nullptr, nullptr, nullptr, |
173 | 756 | nonce)) { |
174 | 0 | return 0; |
175 | 0 | } |
176 | | |
177 | | // Encrypt the input. |
178 | 1.04k | size_t len; |
179 | 1.04k | if (!EVP_EncryptUpdate_ex(&tls_ctx->cipher_ctx, out, &len, in_len, in, |
180 | 1.04k | in_len)) { |
181 | 0 | return 0; |
182 | 0 | } |
183 | | |
184 | 1.04k | unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); |
185 | | |
186 | | // Feed the MAC into the cipher in two steps. First complete the final partial |
187 | | // block from encrypting the input and split the result between |out| and |
188 | | // |out_tag|. Then feed the rest. |
189 | | |
190 | 1.04k | const size_t early_mac_len = |
191 | 1.04k | (block_size - (in_len % block_size)) % block_size; |
192 | 1.04k | if (early_mac_len != 0) { |
193 | 375 | assert(len + block_size - early_mac_len == in_len); |
194 | 375 | uint8_t buf[EVP_MAX_BLOCK_LENGTH]; |
195 | 375 | size_t buf_len; |
196 | 375 | if (!EVP_EncryptUpdate_ex(&tls_ctx->cipher_ctx, buf, &buf_len, sizeof(buf), |
197 | 375 | mac, early_mac_len)) { |
198 | 0 | return 0; |
199 | 0 | } |
200 | 375 | assert(buf_len == block_size); |
201 | 375 | OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); |
202 | 375 | OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); |
203 | 375 | } |
204 | 1.04k | size_t tag_len = early_mac_len; |
205 | | |
206 | 1.04k | if (!EVP_EncryptUpdate_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, |
207 | 1.04k | max_out_tag_len - tag_len, mac + tag_len, |
208 | 1.04k | mac_len - tag_len)) { |
209 | 0 | return 0; |
210 | 0 | } |
211 | 1.04k | tag_len += len; |
212 | | |
213 | 1.04k | if (block_size > 1) { |
214 | 1.04k | assert(block_size <= 256); |
215 | 1.04k | assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); |
216 | | |
217 | | // Compute padding and feed that into the cipher. |
218 | 1.04k | uint8_t padding[256]; |
219 | 1.04k | unsigned padding_len = block_size - ((in_len + mac_len) % block_size); |
220 | 1.04k | OPENSSL_memset(padding, padding_len - 1, padding_len); |
221 | 1.04k | if (!EVP_EncryptUpdate_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, |
222 | 1.04k | max_out_tag_len - tag_len, padding, |
223 | 1.04k | padding_len)) { |
224 | 0 | return 0; |
225 | 0 | } |
226 | 1.04k | tag_len += len; |
227 | 1.04k | } |
228 | | |
229 | 1.04k | if (!EVP_EncryptFinal_ex2(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, |
230 | 1.04k | max_out_tag_len - tag_len)) { |
231 | 0 | return 0; |
232 | 0 | } |
233 | 1.04k | assert(len == 0); // Padding is explicit. |
234 | 1.04k | assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); |
235 | | |
236 | 1.04k | *out_tag_len = tag_len; |
237 | 1.04k | return 1; |
238 | 1.04k | } |
239 | | |
240 | | static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, |
241 | | size_t max_out_len, const uint8_t *nonce, |
242 | | size_t nonce_len, const uint8_t *in, size_t in_len, |
243 | 4.38k | const uint8_t *ad, size_t ad_len) { |
244 | 4.38k | AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
245 | | |
246 | 4.38k | if (tls_ctx->cipher_ctx.encrypt) { |
247 | | // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. |
248 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); |
249 | 0 | return 0; |
250 | 0 | } |
251 | | |
252 | 4.38k | if (in_len < HMAC_size(tls_ctx->hmac_ctx)) { |
253 | 14 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); |
254 | 14 | return 0; |
255 | 14 | } |
256 | | |
257 | 4.36k | if (max_out_len < in_len) { |
258 | | // This requires that the caller provide space for the MAC, even though it |
259 | | // will always be removed on return. |
260 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); |
261 | 0 | return 0; |
262 | 0 | } |
263 | | |
264 | 4.36k | if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { |
265 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); |
266 | 0 | return 0; |
267 | 0 | } |
268 | | |
269 | 4.36k | if (ad_len != 13 - 2 /* length bytes */) { |
270 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); |
271 | 0 | return 0; |
272 | 0 | } |
273 | | |
274 | | // Configure the explicit IV. |
275 | 4.36k | if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && |
276 | 4.36k | !tls_ctx->implicit_iv && |
277 | 1.04k | !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, nullptr, nullptr, nullptr, |
278 | 1.04k | nonce)) { |
279 | 0 | return 0; |
280 | 0 | } |
281 | | |
282 | | // Decrypt to get the plaintext + MAC + padding. |
283 | 4.36k | size_t total = 0; |
284 | 4.36k | size_t len; |
285 | 4.36k | if (!EVP_DecryptUpdate_ex(&tls_ctx->cipher_ctx, out, &len, max_out_len, in, |
286 | 4.36k | in_len)) { |
287 | 0 | return 0; |
288 | 0 | } |
289 | 4.36k | total += len; |
290 | 4.36k | if (!EVP_DecryptFinal_ex2(&tls_ctx->cipher_ctx, out + total, &len, |
291 | 4.36k | max_out_len - total)) { |
292 | 63 | return 0; |
293 | 63 | } |
294 | 4.30k | total += len; |
295 | 4.30k | assert(total == in_len); |
296 | | |
297 | 4.30k | CONSTTIME_SECRET(out, total); |
298 | | |
299 | | // Remove CBC padding. Code from here on is timing-sensitive with respect to |
300 | | // |padding_ok| and |data_plus_mac_len| for CBC ciphers. |
301 | 4.30k | size_t data_plus_mac_len; |
302 | 4.30k | crypto_word_t padding_ok; |
303 | 4.30k | if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { |
304 | 4.30k | if (!EVP_tls_cbc_remove_padding( |
305 | 4.30k | &padding_ok, &data_plus_mac_len, out, total, |
306 | 4.30k | EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), |
307 | 4.30k | HMAC_size(tls_ctx->hmac_ctx))) { |
308 | | // Publicly invalid. This can be rejected in non-constant time. |
309 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); |
310 | 0 | return 0; |
311 | 0 | } |
312 | 4.30k | } else { |
313 | 0 | padding_ok = CONSTTIME_TRUE_W; |
314 | 0 | data_plus_mac_len = total; |
315 | | // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has |
316 | | // already been checked against the MAC size at the top of the function. |
317 | 0 | assert(data_plus_mac_len >= HMAC_size(tls_ctx->hmac_ctx)); |
318 | 0 | } |
319 | 4.30k | size_t data_len = data_plus_mac_len - HMAC_size(tls_ctx->hmac_ctx); |
320 | | |
321 | | // At this point, if the padding is valid, the first |data_plus_mac_len| bytes |
322 | | // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is |
323 | | // still large enough to extract a MAC, but it will be irrelevant. |
324 | | |
325 | | // To allow for CBC mode which changes cipher length, |ad| doesn't include the |
326 | | // length for legacy ciphers. |
327 | 4.30k | uint8_t ad_fixed[13]; |
328 | 4.30k | OPENSSL_memcpy(ad_fixed, ad, 11); |
329 | 4.30k | ad_fixed[11] = (uint8_t)(data_len >> 8); |
330 | 4.30k | ad_fixed[12] = (uint8_t)(data_len & 0xff); |
331 | 4.30k | ad_len += 2; |
332 | | |
333 | | // Compute the MAC and extract the one in the record. |
334 | 4.30k | uint8_t mac[EVP_MAX_MD_SIZE]; |
335 | 4.30k | size_t mac_len; |
336 | 4.30k | uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; |
337 | 4.30k | uint8_t *record_mac; |
338 | 4.30k | if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && |
339 | 4.30k | EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx->md)) { |
340 | 4.30k | if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx->md, mac, &mac_len, |
341 | 4.30k | ad_fixed, out, data_len, total, |
342 | 4.30k | tls_ctx->mac_key, tls_ctx->mac_key_len)) { |
343 | 0 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); |
344 | 0 | return 0; |
345 | 0 | } |
346 | 4.30k | assert(mac_len == HMAC_size(tls_ctx->hmac_ctx)); |
347 | | |
348 | 4.30k | record_mac = record_mac_tmp; |
349 | 4.30k | EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); |
350 | 4.30k | } else { |
351 | | // We should support the constant-time path for all CBC-mode ciphers |
352 | | // implemented. |
353 | 0 | assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); |
354 | | |
355 | 0 | unsigned mac_len_u; |
356 | 0 | if (!HMAC_Init_ex(tls_ctx->hmac_ctx, nullptr, 0, nullptr, nullptr) || |
357 | 0 | !HMAC_Update(tls_ctx->hmac_ctx, ad_fixed, ad_len) || |
358 | 0 | !HMAC_Update(tls_ctx->hmac_ctx, out, data_len) || |
359 | 0 | !HMAC_Final(tls_ctx->hmac_ctx, mac, &mac_len_u)) { |
360 | 0 | return 0; |
361 | 0 | } |
362 | 0 | mac_len = mac_len_u; |
363 | |
|
364 | 0 | assert(mac_len == HMAC_size(tls_ctx->hmac_ctx)); |
365 | 0 | record_mac = &out[data_len]; |
366 | 0 | } |
367 | | |
368 | | // Perform the MAC check and the padding check in constant-time. It should be |
369 | | // safe to simply perform the padding check first, but it would not be under a |
370 | | // different choice of MAC location on padding failure. See |
371 | | // EVP_tls_cbc_remove_padding. |
372 | 4.30k | crypto_word_t good = |
373 | 4.30k | constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); |
374 | 4.30k | good &= padding_ok; |
375 | 4.30k | CONSTTIME_DECLASSIFY(&good, sizeof(good)); |
376 | 4.30k | if (!good) { |
377 | 232 | OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); |
378 | 232 | return 0; |
379 | 232 | } |
380 | | |
381 | 4.07k | CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len)); |
382 | 4.07k | CONSTTIME_DECLASSIFY(out, data_len); |
383 | | |
384 | | // End of timing-sensitive code. |
385 | | |
386 | 4.07k | *out_len = data_len; |
387 | 4.07k | return 1; |
388 | 4.30k | } |
389 | | |
390 | | static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, |
391 | | size_t key_len, size_t tag_len, |
392 | 3.55k | enum evp_aead_direction_t dir) { |
393 | 3.55k | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), |
394 | 3.55k | EVP_sha1(), 0); |
395 | 3.55k | } |
396 | | |
397 | | static int aead_aes_128_cbc_sha1_tls_implicit_iv_init( |
398 | | EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, |
399 | 691 | enum evp_aead_direction_t dir) { |
400 | 691 | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), |
401 | 691 | EVP_sha1(), 1); |
402 | 691 | } |
403 | | |
404 | | static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx, |
405 | | const uint8_t *key, size_t key_len, |
406 | | size_t tag_len, |
407 | 0 | enum evp_aead_direction_t dir) { |
408 | 0 | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), |
409 | 0 | EVP_sha256(), 0); |
410 | 0 | } |
411 | | |
412 | | static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, |
413 | | size_t key_len, size_t tag_len, |
414 | 51.1k | enum evp_aead_direction_t dir) { |
415 | 51.1k | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(), |
416 | 51.1k | EVP_sha1(), 0); |
417 | 51.1k | } |
418 | | |
419 | | static int aead_aes_256_cbc_sha1_tls_implicit_iv_init( |
420 | | EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, |
421 | 2.26k | enum evp_aead_direction_t dir) { |
422 | 2.26k | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(), |
423 | 2.26k | EVP_sha1(), 1); |
424 | 2.26k | } |
425 | | |
426 | | static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, |
427 | | const uint8_t *key, size_t key_len, |
428 | | size_t tag_len, |
429 | 2.86k | enum evp_aead_direction_t dir) { |
430 | 2.86k | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(), |
431 | 2.86k | EVP_sha1(), 0); |
432 | 2.86k | } |
433 | | |
434 | | static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init( |
435 | | EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, |
436 | 141 | enum evp_aead_direction_t dir) { |
437 | 141 | return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(), |
438 | 141 | EVP_sha1(), 1); |
439 | 141 | } |
440 | | |
441 | | static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, |
442 | 0 | size_t *out_iv_len) { |
443 | 0 | const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; |
444 | 0 | const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx); |
445 | 0 | if (iv_len <= 1) { |
446 | 0 | OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
447 | 0 | return 0; |
448 | 0 | } |
449 | | |
450 | 0 | *out_iv = tls_ctx->cipher_ctx.iv; |
451 | 0 | *out_iv_len = iv_len; |
452 | 0 | return 1; |
453 | 0 | } |
454 | | |
455 | | static const EVP_AEAD aead_aes_128_cbc_sha1_tls = { |
456 | | SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128) |
457 | | 16, // nonce len (IV) |
458 | | 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
459 | | SHA_DIGEST_LENGTH, // max tag length |
460 | | 0, // seal_scatter_supports_extra_in |
461 | | |
462 | | nullptr, // init |
463 | | aead_aes_128_cbc_sha1_tls_init, |
464 | | aead_tls_cleanup, |
465 | | aead_tls_open, |
466 | | aead_tls_seal_scatter, |
467 | | nullptr, // open_gather |
468 | | nullptr, // get_iv |
469 | | aead_tls_tag_len, |
470 | | }; |
471 | | |
472 | | static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = { |
473 | | SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) |
474 | | 0, // nonce len |
475 | | 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
476 | | SHA_DIGEST_LENGTH, // max tag length |
477 | | 0, // seal_scatter_supports_extra_in |
478 | | |
479 | | nullptr, // init |
480 | | aead_aes_128_cbc_sha1_tls_implicit_iv_init, |
481 | | aead_tls_cleanup, |
482 | | aead_tls_open, |
483 | | aead_tls_seal_scatter, |
484 | | nullptr, // open_gather |
485 | | aead_tls_get_iv, // get_iv |
486 | | aead_tls_tag_len, |
487 | | }; |
488 | | |
489 | | static const EVP_AEAD aead_aes_128_cbc_sha256_tls = { |
490 | | SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128) |
491 | | 16, // nonce len (IV) |
492 | | 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) |
493 | | SHA256_DIGEST_LENGTH, // max tag length |
494 | | 0, // seal_scatter_supports_extra_in |
495 | | |
496 | | nullptr, // init |
497 | | aead_aes_128_cbc_sha256_tls_init, |
498 | | aead_tls_cleanup, |
499 | | aead_tls_open, |
500 | | aead_tls_seal_scatter, |
501 | | nullptr, // open_gather |
502 | | nullptr, // get_iv |
503 | | aead_tls_tag_len, |
504 | | }; |
505 | | |
506 | | static const EVP_AEAD aead_aes_256_cbc_sha1_tls = { |
507 | | SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256) |
508 | | 16, // nonce len (IV) |
509 | | 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
510 | | SHA_DIGEST_LENGTH, // max tag length |
511 | | 0, // seal_scatter_supports_extra_in |
512 | | |
513 | | nullptr, // init |
514 | | aead_aes_256_cbc_sha1_tls_init, |
515 | | aead_tls_cleanup, |
516 | | aead_tls_open, |
517 | | aead_tls_seal_scatter, |
518 | | nullptr, // open_gather |
519 | | nullptr, // get_iv |
520 | | aead_tls_tag_len, |
521 | | }; |
522 | | |
523 | | static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = { |
524 | | SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) |
525 | | 0, // nonce len |
526 | | 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
527 | | SHA_DIGEST_LENGTH, // max tag length |
528 | | 0, // seal_scatter_supports_extra_in |
529 | | |
530 | | nullptr, // init |
531 | | aead_aes_256_cbc_sha1_tls_implicit_iv_init, |
532 | | aead_tls_cleanup, |
533 | | aead_tls_open, |
534 | | aead_tls_seal_scatter, |
535 | | nullptr, // open_gather |
536 | | aead_tls_get_iv, // get_iv |
537 | | aead_tls_tag_len, |
538 | | }; |
539 | | |
540 | | static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = { |
541 | | SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES) |
542 | | 8, // nonce len (IV) |
543 | | 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
544 | | SHA_DIGEST_LENGTH, // max tag length |
545 | | 0, // seal_scatter_supports_extra_in |
546 | | |
547 | | nullptr, // init |
548 | | aead_des_ede3_cbc_sha1_tls_init, |
549 | | aead_tls_cleanup, |
550 | | aead_tls_open, |
551 | | aead_tls_seal_scatter, |
552 | | nullptr, // open_gather |
553 | | nullptr, // get_iv |
554 | | aead_tls_tag_len, |
555 | | }; |
556 | | |
557 | | static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = { |
558 | | SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) |
559 | | 0, // nonce len |
560 | | 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) |
561 | | SHA_DIGEST_LENGTH, // max tag length |
562 | | 0, // seal_scatter_supports_extra_in |
563 | | |
564 | | nullptr, // init |
565 | | aead_des_ede3_cbc_sha1_tls_implicit_iv_init, |
566 | | aead_tls_cleanup, |
567 | | aead_tls_open, |
568 | | aead_tls_seal_scatter, |
569 | | nullptr, // open_gather |
570 | | aead_tls_get_iv, // get_iv |
571 | | aead_tls_tag_len, |
572 | | }; |
573 | | |
574 | 7.11k | const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) { |
575 | 7.11k | return &aead_aes_128_cbc_sha1_tls; |
576 | 7.11k | } |
577 | | |
578 | 1.38k | const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) { |
579 | 1.38k | return &aead_aes_128_cbc_sha1_tls_implicit_iv; |
580 | 1.38k | } |
581 | | |
582 | 0 | const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) { |
583 | 0 | return &aead_aes_128_cbc_sha256_tls; |
584 | 0 | } |
585 | | |
586 | 102k | const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) { |
587 | 102k | return &aead_aes_256_cbc_sha1_tls; |
588 | 102k | } |
589 | | |
590 | 4.53k | const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) { |
591 | 4.53k | return &aead_aes_256_cbc_sha1_tls_implicit_iv; |
592 | 4.53k | } |
593 | | |
594 | 5.72k | const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) { |
595 | 5.72k | return &aead_des_ede3_cbc_sha1_tls; |
596 | 5.72k | } |
597 | | |
598 | 282 | const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) { |
599 | 282 | return &aead_des_ede3_cbc_sha1_tls_implicit_iv; |
600 | 282 | } |