/src/spdm-rs/external/ring/src/aead/aes_gcm.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2015-2025 Brian Smith. |
2 | | // |
3 | | // Permission to use, copy, modify, and/or distribute this software for any |
4 | | // purpose with or without fee is hereby granted, provided that the above |
5 | | // copyright notice and this permission notice appear in all copies. |
6 | | // |
7 | | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
8 | | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
9 | | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
10 | | // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
11 | | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION |
12 | | // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN |
13 | | // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
14 | | |
15 | | use super::{ |
16 | | aes::{self, Counter, Overlapping, OverlappingPartialBlock, BLOCK_LEN, ZERO_BLOCK}, |
17 | | gcm, |
18 | | overlapping::IndexError, |
19 | | Aad, Nonce, Tag, |
20 | | }; |
21 | | use crate::{ |
22 | | cpu, |
23 | | error::{self, InputTooLongError}, |
24 | | polyfill::{slice, sliceutil::overwrite_at_start, usize_from_u64_saturated}, |
25 | | }; |
26 | | use core::ops::RangeFrom; |
27 | | |
28 | | #[cfg(any( |
29 | | all(target_arch = "aarch64", target_endian = "little"), |
30 | | all(target_arch = "arm", target_endian = "little"), |
31 | | target_arch = "x86", |
32 | | target_arch = "x86_64" |
33 | | ))] |
34 | | use cpu::GetFeature as _; |
35 | | |
36 | | mod aarch64; |
37 | | mod aeshwclmulmovbe; |
38 | | mod vaesclmulavx2; |
39 | | |
40 | | #[derive(Clone)] |
41 | | pub(super) struct Key(DynKey); |
42 | | |
43 | | impl Key { |
44 | 0 | pub(super) fn new( |
45 | 0 | key: aes::KeyBytes, |
46 | 0 | cpu_features: cpu::Features, |
47 | 0 | ) -> Result<Self, error::Unspecified> { |
48 | 0 | Ok(Self(DynKey::new(key, cpu_features)?)) |
49 | 0 | } |
50 | | } |
51 | | |
52 | | #[derive(Clone)] |
53 | | enum DynKey { |
54 | | #[cfg(target_arch = "x86_64")] |
55 | | VAesClMulAvx2(Combo<aes::hw::Key, gcm::vclmulavx2::Key>), |
56 | | |
57 | | #[cfg(target_arch = "x86_64")] |
58 | | AesHwClMulAvxMovbe(Combo<aes::hw::Key, gcm::clmulavxmovbe::Key>), |
59 | | |
60 | | #[cfg(any( |
61 | | all(target_arch = "aarch64", target_endian = "little"), |
62 | | target_arch = "x86", |
63 | | target_arch = "x86_64" |
64 | | ))] |
65 | | AesHwClMul(Combo<aes::hw::Key, gcm::clmul::Key>), |
66 | | |
67 | | #[cfg(any( |
68 | | all(target_arch = "aarch64", target_endian = "little"), |
69 | | all(target_arch = "arm", target_endian = "little") |
70 | | ))] |
71 | | Simd(Combo<aes::vp::Key, gcm::neon::Key>), |
72 | | |
73 | | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] |
74 | | Simd(Combo<aes::vp::Key, gcm::fallback::Key>), |
75 | | |
76 | | Fallback(Combo<aes::fallback::Key, gcm::fallback::Key>), |
77 | | } |
78 | | |
79 | | impl DynKey { |
80 | 0 | fn new(key: aes::KeyBytes, cpu: cpu::Features) -> Result<Self, error::Unspecified> { |
81 | 0 | let cpu = cpu.values(); |
82 | | |
83 | | #[cfg(target_arch = "x86_64")] |
84 | 0 | if let Some((aes, gcm)) = cpu.get_feature() { |
85 | | // 14.3.1 Detection of VEX-Encoded AES and VPCLMULQDQ |
86 | 0 | let aes_key = aes::hw::Key::new(key, aes, cpu.get_feature())?; |
87 | 0 | let gcm_key_value = derive_gcm_key_value(&aes_key); |
88 | 0 | let combo = if let Some(cpu) = cpu.get_feature() { |
89 | 0 | let gcm_key = gcm::vclmulavx2::Key::new(gcm_key_value, cpu); |
90 | 0 | Self::VAesClMulAvx2(Combo { aes_key, gcm_key }) |
91 | 0 | } else if let Some(cpu) = cpu.get_feature() { |
92 | 0 | let gcm_key = gcm::clmulavxmovbe::Key::new(gcm_key_value, cpu); |
93 | 0 | Self::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key }) |
94 | | } else { |
95 | 0 | let gcm_key = gcm::clmul::Key::new(gcm_key_value, gcm); |
96 | 0 | Self::AesHwClMul(Combo { aes_key, gcm_key }) |
97 | | }; |
98 | 0 | return Ok(combo); |
99 | 0 | } |
100 | | |
101 | | // x86_64 is handled above. |
102 | | #[cfg(any( |
103 | | all(target_arch = "aarch64", target_endian = "little"), |
104 | | target_arch = "x86" |
105 | | ))] |
106 | | if let (Some(aes), Some(gcm)) = (cpu.get_feature(), cpu.get_feature()) { |
107 | | let aes_key = aes::hw::Key::new(key, aes, cpu.get_feature())?; |
108 | | let gcm_key_value = derive_gcm_key_value(&aes_key); |
109 | | let gcm_key = gcm::clmul::Key::new(gcm_key_value, gcm); |
110 | | return Ok(Self::AesHwClMul(Combo { aes_key, gcm_key })); |
111 | | } |
112 | | |
113 | | #[cfg(any( |
114 | | all(target_arch = "aarch64", target_endian = "little"), |
115 | | all(target_arch = "arm", target_endian = "little") |
116 | | ))] |
117 | | if let Some(cpu) = cpu.get_feature() { |
118 | | return Self::new_neon(key, cpu); |
119 | | } |
120 | | |
121 | | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] |
122 | 0 | if let Some(cpu) = cpu.get_feature() { |
123 | 0 | return Self::new_ssse3(key, cpu); |
124 | 0 | } |
125 | 0 |
|
126 | 0 | let _ = cpu; |
127 | 0 | Self::new_fallback(key) |
128 | 0 | } |
129 | | |
130 | | #[cfg(any( |
131 | | all(target_arch = "aarch64", target_endian = "little"), |
132 | | all(target_arch = "arm", target_endian = "little") |
133 | | ))] |
134 | | #[cfg_attr(target_arch = "aarch64", inline(never))] |
135 | | fn new_neon(key: aes::KeyBytes, cpu: cpu::arm::Neon) -> Result<Self, error::Unspecified> { |
136 | | let aes_key = aes::vp::Key::new(key, cpu)?; |
137 | | let gcm_key_value = derive_gcm_key_value(&aes_key); |
138 | | let gcm_key = gcm::neon::Key::new(gcm_key_value, cpu); |
139 | | Ok(Self::Simd(Combo { aes_key, gcm_key })) |
140 | | } |
141 | | |
142 | | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] |
143 | | #[inline(never)] |
144 | 0 | fn new_ssse3( |
145 | 0 | key: aes::KeyBytes, |
146 | 0 | cpu: aes::vp::RequiredCpuFeatures, |
147 | 0 | ) -> Result<Self, error::Unspecified> { |
148 | 0 | let aes_key = aes::vp::Key::new(key, cpu)?; |
149 | 0 | let gcm_key_value = derive_gcm_key_value(&aes_key); |
150 | 0 | let gcm_key = gcm::fallback::Key::new(gcm_key_value); |
151 | 0 | Ok(Self::Simd(Combo { aes_key, gcm_key })) |
152 | 0 | } |
153 | | |
154 | | #[cfg_attr( |
155 | | any( |
156 | | all(target_arch = "aarch64", target_endian = "little"), |
157 | | all(target_arch = "arm", target_endian = "little"), |
158 | | target_arch = "x86", |
159 | | target_arch = "x86_64", |
160 | | ), |
161 | | inline(never) |
162 | | )] |
163 | 0 | fn new_fallback(key: aes::KeyBytes) -> Result<Self, error::Unspecified> { |
164 | 0 | let aes_key = aes::fallback::Key::new(key)?; |
165 | 0 | let gcm_key_value = derive_gcm_key_value(&aes_key); |
166 | 0 | let gcm_key = gcm::fallback::Key::new(gcm_key_value); |
167 | 0 | Ok(Self::Fallback(Combo { aes_key, gcm_key })) |
168 | 0 | } |
169 | | } |
170 | | |
171 | 0 | fn derive_gcm_key_value(aes_key: &impl aes::EncryptBlock) -> gcm::KeyValue { |
172 | 0 | gcm::KeyValue::new(aes_key.encrypt_block(ZERO_BLOCK)) |
173 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::derive_gcm_key_value::<ring::aead::aes::hw::Key> Unexecuted instantiation: ring::aead::aes_gcm::derive_gcm_key_value::<ring::aead::aes::vp::Key> Unexecuted instantiation: ring::aead::aes_gcm::derive_gcm_key_value::<ring::aead::aes::fallback::Key> |
174 | | |
175 | | const CHUNK_BLOCKS: usize = 3 * 1024 / 16; |
176 | | |
177 | | #[inline(never)] |
178 | 0 | pub(super) fn seal( |
179 | 0 | Key(key): &Key, |
180 | 0 | nonce: Nonce, |
181 | 0 | aad: Aad<&[u8]>, |
182 | 0 | in_out: &mut [u8], |
183 | 0 | ) -> Result<Tag, error::Unspecified> { |
184 | 0 | let mut ctr = Counter::one(nonce); |
185 | 0 | let tag_iv = ctr.increment(); |
186 | 0 |
|
187 | 0 | match key { |
188 | | #[cfg(all(target_arch = "aarch64", target_endian = "little"))] |
189 | | DynKey::AesHwClMul(c) => { |
190 | | seal_whole_partial(c, aad, in_out, ctr, tag_iv, aarch64::seal_whole) |
191 | | } |
192 | | |
193 | | #[cfg(target_arch = "x86_64")] |
194 | 0 | DynKey::VAesClMulAvx2(c) => seal_whole_partial( |
195 | 0 | c, |
196 | 0 | aad, |
197 | 0 | in_out, |
198 | 0 | ctr, |
199 | 0 | tag_iv, |
200 | 0 | vaesclmulavx2::seal_whole_vaes_clmul_avx2, |
201 | 0 | ), |
202 | | |
203 | | #[cfg(target_arch = "x86_64")] |
204 | 0 | DynKey::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key }) => { |
205 | 0 | aeshwclmulmovbe::seal(aes_key, gcm_key, ctr, tag_iv, aad, in_out) |
206 | | } |
207 | | |
208 | | #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] |
209 | 0 | DynKey::AesHwClMul(c) => seal_strided(c, aad, in_out, ctr, tag_iv), |
210 | | |
211 | | #[cfg(any( |
212 | | all(target_arch = "aarch64", target_endian = "little"), |
213 | | all(target_arch = "arm", target_endian = "little"), |
214 | | target_arch = "x86_64", |
215 | | target_arch = "x86" |
216 | | ))] |
217 | 0 | DynKey::Simd(c) => seal_strided(c, aad, in_out, ctr, tag_iv), |
218 | | |
219 | 0 | DynKey::Fallback(c) => seal_strided(c, aad, in_out, ctr, tag_iv), |
220 | | } |
221 | 0 | } |
222 | | |
223 | | #[cfg(any( |
224 | | all(target_arch = "aarch64", target_endian = "little"), |
225 | | target_arch = "x86_64" |
226 | | ))] |
227 | 0 | fn seal_whole_partial<A: aes::EncryptBlock, G: gcm::UpdateBlock>( |
228 | 0 | Combo { aes_key, gcm_key }: &Combo<A, G>, |
229 | 0 | aad: Aad<&[u8]>, |
230 | 0 | in_out: &mut [u8], |
231 | 0 | mut ctr: Counter, |
232 | 0 | tag_iv: aes::Iv, |
233 | 0 | seal_whole: impl FnOnce(&A, &mut gcm::Context<G>, &mut Counter, slice::AsChunksMut<u8, BLOCK_LEN>), |
234 | 0 | ) -> Result<Tag, error::Unspecified> { |
235 | 0 | let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?; |
236 | 0 | let (whole, remainder) = slice::as_chunks_mut(in_out); |
237 | 0 | seal_whole(aes_key, &mut auth, &mut ctr, whole); |
238 | 0 | let remainder = OverlappingPartialBlock::new(remainder.into()) |
239 | 0 | .unwrap_or_else(|InputTooLongError { .. }| unreachable!()); |
240 | 0 | seal_finish(aes_key, auth, remainder, ctr, tag_iv) |
241 | 0 | } |
242 | | |
243 | | #[cfg_attr( |
244 | | any( |
245 | | all(target_arch = "aarch64", target_endian = "little"), |
246 | | all(target_arch = "arm", target_endian = "little"), |
247 | | target_arch = "x86", |
248 | | target_arch = "x86_64" |
249 | | ), |
250 | | inline(never) |
251 | | )] |
252 | | #[cfg_attr( |
253 | | any( |
254 | | all(target_arch = "aarch64", target_endian = "little"), |
255 | | target_arch = "x86_64" |
256 | | ), |
257 | | cold |
258 | | )] |
259 | 0 | fn seal_strided< |
260 | 0 | A: aes::EncryptBlock + aes::EncryptCtr32, |
261 | 0 | G: gcm::UpdateBlock + gcm::UpdateBlocks, |
262 | 0 | >( |
263 | 0 | Combo { aes_key, gcm_key }: &Combo<A, G>, |
264 | 0 | aad: Aad<&[u8]>, |
265 | 0 | in_out: &mut [u8], |
266 | 0 | mut ctr: Counter, |
267 | 0 | tag_iv: aes::Iv, |
268 | 0 | ) -> Result<Tag, error::Unspecified> { |
269 | 0 | let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?; |
270 | | |
271 | 0 | let (mut whole, remainder) = slice::as_chunks_mut(in_out); |
272 | | |
273 | 0 | for mut chunk in whole.chunks_mut::<CHUNK_BLOCKS>() { |
274 | 0 | aes_key.ctr32_encrypt_within(chunk.as_flattened_mut().into(), &mut ctr); |
275 | 0 | auth.update_blocks(chunk.as_ref()); |
276 | 0 | } |
277 | | |
278 | 0 | let remainder = OverlappingPartialBlock::new(remainder.into()) |
279 | 0 | .unwrap_or_else(|InputTooLongError { .. }| unreachable!()); |
280 | 0 | seal_finish(aes_key, auth, remainder, ctr, tag_iv) |
281 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::seal_strided::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_strided::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_strided::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key> |
282 | | |
283 | 0 | fn seal_finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>( |
284 | 0 | aes_key: &A, |
285 | 0 | mut auth: gcm::Context<G>, |
286 | 0 | remainder: OverlappingPartialBlock<'_>, |
287 | 0 | ctr: Counter, |
288 | 0 | tag_iv: aes::Iv, |
289 | 0 | ) -> Result<Tag, error::Unspecified> { |
290 | 0 | let remainder_len = remainder.len(); |
291 | 0 | if remainder_len > 0 { |
292 | 0 | let mut input = ZERO_BLOCK; |
293 | 0 | overwrite_at_start(&mut input, remainder.input()); |
294 | 0 | let mut output = aes_key.encrypt_iv_xor_block(ctr.into(), input); |
295 | 0 | output[remainder_len..].fill(0); |
296 | 0 | auth.update_block(output); |
297 | 0 | remainder.overwrite_at_start(output); |
298 | 0 | } |
299 | | |
300 | 0 | Ok(finish(aes_key, auth, tag_iv)) |
301 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::seal_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::vclmulavx2::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmulavxmovbe::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_finish::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key> Unexecuted instantiation: ring::aead::aes_gcm::seal_finish::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key> |
302 | | |
303 | | #[inline(never)] |
304 | 0 | pub(super) fn open( |
305 | 0 | Key(key): &Key, |
306 | 0 | nonce: Nonce, |
307 | 0 | aad: Aad<&[u8]>, |
308 | 0 | in_out_slice: &mut [u8], |
309 | 0 | src: RangeFrom<usize>, |
310 | 0 | ) -> Result<Tag, error::Unspecified> { |
311 | 0 | let mut ctr = Counter::one(nonce); |
312 | 0 | let tag_iv = ctr.increment(); |
313 | 0 |
|
314 | 0 | match key { |
315 | | #[cfg(all(target_arch = "aarch64", target_endian = "little"))] |
316 | | DynKey::AesHwClMul(c) => { |
317 | | open_whole_partial(c, aad, in_out_slice, src, ctr, tag_iv, aarch64::open_whole) |
318 | | } |
319 | | |
320 | | #[cfg(target_arch = "x86_64")] |
321 | 0 | DynKey::VAesClMulAvx2(c) => open_whole_partial( |
322 | 0 | c, |
323 | 0 | aad, |
324 | 0 | in_out_slice, |
325 | 0 | src, |
326 | 0 | ctr, |
327 | 0 | tag_iv, |
328 | 0 | vaesclmulavx2::open_whole_vaes_clmul_avx2, |
329 | 0 | ), |
330 | | |
331 | | #[cfg(target_arch = "x86_64")] |
332 | 0 | DynKey::AesHwClMulAvxMovbe(Combo { aes_key, gcm_key }) => { |
333 | 0 | aeshwclmulmovbe::open(aes_key, gcm_key, ctr, tag_iv, aad, in_out_slice, src) |
334 | | } |
335 | | |
336 | | #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] |
337 | 0 | DynKey::AesHwClMul(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv), |
338 | | |
339 | | #[cfg(any( |
340 | | all(target_arch = "aarch64", target_endian = "little"), |
341 | | all(target_arch = "arm", target_endian = "little"), |
342 | | target_arch = "x86_64", |
343 | | target_arch = "x86" |
344 | | ))] |
345 | 0 | DynKey::Simd(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv), |
346 | | |
347 | 0 | DynKey::Fallback(c) => open_strided(c, aad, in_out_slice, src, ctr, tag_iv), |
348 | | } |
349 | 0 | } |
350 | | |
351 | | #[cfg(any( |
352 | | all(target_arch = "aarch64", target_endian = "little"), |
353 | | target_arch = "x86_64" |
354 | | ))] |
355 | 0 | fn open_whole_partial<A: aes::EncryptBlock, G: gcm::UpdateBlock>( |
356 | 0 | Combo { aes_key, gcm_key }: &Combo<A, G>, |
357 | 0 | aad: Aad<&[u8]>, |
358 | 0 | in_out_slice: &mut [u8], |
359 | 0 | src: RangeFrom<usize>, |
360 | 0 | mut ctr: Counter, |
361 | 0 | tag_iv: aes::Iv, |
362 | 0 | open_whole: impl FnOnce(&A, &mut gcm::Context<G>, Overlapping, &mut Counter), |
363 | 0 | ) -> Result<Tag, error::Unspecified> { |
364 | 0 | let in_out = Overlapping::new(in_out_slice, src.clone()).map_err(error::erase::<IndexError>)?; |
365 | 0 | let mut auth = gcm::Context::new(gcm_key, aad, in_out.len())?; |
366 | | |
367 | 0 | let remainder_len = in_out.len() % BLOCK_LEN; |
368 | 0 |
|
369 | 0 | let in_out_slice_len = in_out_slice.len(); |
370 | 0 | let whole_in_out_slice = &mut in_out_slice[..(in_out_slice_len - remainder_len)]; |
371 | 0 | let whole = Overlapping::new(whole_in_out_slice, src.clone()) |
372 | 0 | .unwrap_or_else(|IndexError { .. }| unreachable!()); |
373 | 0 | let whole_len = whole.len(); |
374 | 0 | open_whole(aes_key, &mut auth, whole, &mut ctr); |
375 | 0 |
|
376 | 0 | let remainder = &mut in_out_slice[whole_len..]; |
377 | 0 | let remainder = |
378 | 0 | Overlapping::new(remainder, src).unwrap_or_else(|IndexError { .. }| unreachable!()); |
379 | 0 | let remainder = OverlappingPartialBlock::new(remainder) |
380 | 0 | .unwrap_or_else(|InputTooLongError { .. }| unreachable!()); |
381 | 0 | open_finish(aes_key, auth, remainder, ctr, tag_iv) |
382 | 0 | } |
383 | | |
384 | | #[cfg_attr( |
385 | | any( |
386 | | all( |
387 | | any( |
388 | | all(target_arch = "aarch64", target_endian = "little"), |
389 | | all(target_arch = "arm", target_endian = "little") |
390 | | ), |
391 | | target_feature = "neon" |
392 | | ), |
393 | | all( |
394 | | any(target_arch = "x86", target_arch = "x86_64"), |
395 | | target_feature = "sse" |
396 | | ) |
397 | | ), |
398 | | inline(never) |
399 | | )] |
400 | | #[cfg_attr( |
401 | | any( |
402 | | all(target_arch = "aarch64", target_endian = "little"), |
403 | | target_arch = "x86_64" |
404 | | ), |
405 | | cold |
406 | | )] |
407 | 0 | fn open_strided< |
408 | 0 | A: aes::EncryptBlock + aes::EncryptCtr32, |
409 | 0 | G: gcm::UpdateBlock + gcm::UpdateBlocks, |
410 | 0 | >( |
411 | 0 | Combo { aes_key, gcm_key }: &Combo<A, G>, |
412 | 0 | aad: Aad<&[u8]>, |
413 | 0 | in_out_slice: &mut [u8], |
414 | 0 | src: RangeFrom<usize>, |
415 | 0 | mut ctr: Counter, |
416 | 0 | tag_iv: aes::Iv, |
417 | 0 | ) -> Result<Tag, error::Unspecified> { |
418 | 0 | let in_out = Overlapping::new(in_out_slice, src.clone()).map_err(error::erase::<IndexError>)?; |
419 | 0 | let input = in_out.input(); |
420 | 0 | let input_len = input.len(); |
421 | | |
422 | 0 | let mut auth = gcm::Context::new(gcm_key, aad, input_len)?; |
423 | | |
424 | 0 | let remainder_len = input_len % BLOCK_LEN; |
425 | 0 | let whole_len = input_len - remainder_len; |
426 | 0 | let in_prefix_len = src.start; |
427 | 0 |
|
428 | 0 | { |
429 | 0 | let mut chunk_len = CHUNK_BLOCKS * BLOCK_LEN; |
430 | 0 | let mut output = 0; |
431 | 0 | let mut input = in_prefix_len; |
432 | | loop { |
433 | 0 | if whole_len - output < chunk_len { |
434 | 0 | chunk_len = whole_len - output; |
435 | 0 | } |
436 | | |
437 | 0 | let ciphertext = &in_out_slice[input..][..chunk_len]; |
438 | 0 | let (ciphertext, leftover) = slice::as_chunks(ciphertext); |
439 | 0 | debug_assert_eq!(leftover.len(), 0); |
440 | 0 | if ciphertext.is_empty() { |
441 | 0 | break; |
442 | 0 | } |
443 | 0 | auth.update_blocks(ciphertext); |
444 | | |
445 | 0 | let chunk = Overlapping::new( |
446 | 0 | &mut in_out_slice[output..][..(chunk_len + in_prefix_len)], |
447 | 0 | in_prefix_len.., |
448 | 0 | ) |
449 | 0 | .map_err(error::erase::<IndexError>)?; |
450 | 0 | aes_key.ctr32_encrypt_within(chunk, &mut ctr); |
451 | 0 | output += chunk_len; |
452 | 0 | input += chunk_len; |
453 | | } |
454 | | } |
455 | | |
456 | 0 | let in_out = Overlapping::new(&mut in_out_slice[whole_len..], src) |
457 | 0 | .unwrap_or_else(|IndexError { .. }| unreachable!()); |
458 | 0 | let in_out = OverlappingPartialBlock::new(in_out) |
459 | 0 | .unwrap_or_else(|InputTooLongError { .. }| unreachable!()); |
460 | 0 |
|
461 | 0 | open_finish(aes_key, auth, in_out, ctr, tag_iv) |
462 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::open_strided::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_strided::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_strided::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key> |
463 | | |
464 | 0 | fn open_finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>( |
465 | 0 | aes_key: &A, |
466 | 0 | mut auth: gcm::Context<G>, |
467 | 0 | remainder: OverlappingPartialBlock<'_>, |
468 | 0 | ctr: Counter, |
469 | 0 | tag_iv: aes::Iv, |
470 | 0 | ) -> Result<Tag, error::Unspecified> { |
471 | 0 | if remainder.len() > 0 { |
472 | 0 | let mut input = ZERO_BLOCK; |
473 | 0 | overwrite_at_start(&mut input, remainder.input()); |
474 | 0 | auth.update_block(input); |
475 | 0 | remainder.overwrite_at_start(aes_key.encrypt_iv_xor_block(ctr.into(), input)); |
476 | 0 | } |
477 | 0 | Ok(finish(aes_key, auth, tag_iv)) |
478 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::open_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::vclmulavx2::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmulavxmovbe::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_finish::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key> Unexecuted instantiation: ring::aead::aes_gcm::open_finish::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key> |
479 | | |
480 | 0 | fn finish<A: aes::EncryptBlock, G: gcm::UpdateBlock>( |
481 | 0 | aes_key: &A, |
482 | 0 | gcm_ctx: gcm::Context<G>, |
483 | 0 | tag_iv: aes::Iv, |
484 | 0 | ) -> Tag { |
485 | 0 | // Finalize the tag and return it. |
486 | 0 | gcm_ctx.pre_finish(|pre_tag| Tag(aes_key.encrypt_iv_xor_block(tag_iv, pre_tag))) Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::vclmulavx2::Key>::{closure#0} Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmulavxmovbe::Key>::{closure#0} Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key>::{closure#0} Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key>::{closure#0} Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key>::{closure#0} |
487 | 0 | } Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::vclmulavx2::Key> Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmulavxmovbe::Key> Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::hw::Key, ring::aead::gcm::clmul::Key> Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::vp::Key, ring::aead::gcm::fallback::Key> Unexecuted instantiation: ring::aead::aes_gcm::finish::<ring::aead::aes::fallback::Key, ring::aead::gcm::fallback::Key> |
488 | | |
489 | | pub(super) const MAX_IN_OUT_LEN: usize = super::max_input_len(BLOCK_LEN, 2); |
490 | | |
491 | | // [NIST SP800-38D] Section 5.2.1.1. Note that [RFC 5116 Section 5.1] and |
492 | | // [RFC 5116 Section 5.2] have an off-by-one error in `P_MAX`. |
493 | | // |
494 | | // [NIST SP800-38D]: |
495 | | // http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf |
496 | | // [RFC 5116 Section 5.1]: https://tools.ietf.org/html/rfc5116#section-5.1 |
497 | | // [RFC 5116 Section 5.2]: https://tools.ietf.org/html/rfc5116#section-5.2 |
498 | | const _MAX_INPUT_LEN_BOUNDED_BY_NIST: () = |
499 | | assert!(MAX_IN_OUT_LEN == usize_from_u64_saturated(((1u64 << 39) - 256) / 8)); |
500 | | |
501 | | #[derive(Copy, Clone)] |
502 | | pub(super) struct Combo<Aes, Gcm> { |
503 | | pub(super) aes_key: Aes, |
504 | | pub(super) gcm_key: Gcm, |
505 | | } |