/src/libgcrypt/cipher/bulkhelp.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* bulkhelp.h - Some bulk processing helpers |
2 | | * Copyright (C) 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
3 | | * |
4 | | * This file is part of Libgcrypt. |
5 | | * |
6 | | * Libgcrypt is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU Lesser General Public License as |
8 | | * published by the Free Software Foundation; either version 2.1 of |
9 | | * the License, or (at your option) any later version. |
10 | | * |
11 | | * Libgcrypt is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | #ifndef GCRYPT_BULKHELP_H |
20 | | #define GCRYPT_BULKHELP_H |
21 | | |
22 | | |
23 | | #include "g10lib.h" |
24 | | #include "cipher-internal.h" |
25 | | |
26 | | |
27 | | #ifdef __x86_64__ |
28 | | /* Use u64 to store pointers for x32 support (assembly function assumes |
29 | | * 64-bit pointers). */ |
30 | | typedef u64 ocb_L_uintptr_t; |
31 | | #else |
32 | | typedef uintptr_t ocb_L_uintptr_t; |
33 | | #endif |
34 | | |
35 | | typedef unsigned int (*bulk_crypt_fn_t) (const void *ctx, byte *out, |
36 | | const byte *in, |
37 | | unsigned int num_blks); |
38 | | |
39 | | |
40 | | static inline ocb_L_uintptr_t * |
41 | | bulk_ocb_prepare_L_pointers_array_blk64 (gcry_cipher_hd_t c, |
42 | | ocb_L_uintptr_t Ls[64], u64 blkn) |
43 | 0 | { |
44 | 0 | unsigned int n = 64 - (blkn % 64); |
45 | 0 | unsigned int i; |
46 | |
|
47 | 0 | for (i = 0; i < 64; i += 8) |
48 | 0 | { |
49 | 0 | Ls[(i + 0 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
50 | 0 | Ls[(i + 1 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
51 | 0 | Ls[(i + 2 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
52 | 0 | Ls[(i + 3 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; |
53 | 0 | Ls[(i + 4 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
54 | 0 | Ls[(i + 5 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
55 | 0 | Ls[(i + 6 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
56 | 0 | } |
57 | |
|
58 | 0 | Ls[(7 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
59 | 0 | Ls[(15 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[4]; |
60 | 0 | Ls[(23 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
61 | 0 | Ls[(31 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[5]; |
62 | 0 | Ls[(39 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
63 | 0 | Ls[(47 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[4]; |
64 | 0 | Ls[(55 + n) % 64] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
65 | 0 | return &Ls[(63 + n) % 64]; |
66 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_prepare_L_pointers_array_blk64 Unexecuted instantiation: sm4.c:bulk_ocb_prepare_L_pointers_array_blk64 Unexecuted instantiation: twofish.c:bulk_ocb_prepare_L_pointers_array_blk64 Unexecuted instantiation: camellia-glue.c:bulk_ocb_prepare_L_pointers_array_blk64 |
67 | | |
68 | | |
69 | | static inline ocb_L_uintptr_t * |
70 | | bulk_ocb_prepare_L_pointers_array_blk32 (gcry_cipher_hd_t c, |
71 | | ocb_L_uintptr_t Ls[32], u64 blkn) |
72 | 0 | { |
73 | 0 | unsigned int n = 32 - (blkn % 32); |
74 | 0 | unsigned int i; |
75 | |
|
76 | 0 | for (i = 0; i < 32; i += 8) |
77 | 0 | { |
78 | 0 | Ls[(i + 0 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
79 | 0 | Ls[(i + 1 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
80 | 0 | Ls[(i + 2 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
81 | 0 | Ls[(i + 3 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; |
82 | 0 | Ls[(i + 4 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
83 | 0 | Ls[(i + 5 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
84 | 0 | Ls[(i + 6 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
85 | 0 | } |
86 | |
|
87 | 0 | Ls[(7 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
88 | 0 | Ls[(15 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[4]; |
89 | 0 | Ls[(23 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
90 | 0 | return &Ls[(31 + n) % 32]; |
91 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_prepare_L_pointers_array_blk32 Unexecuted instantiation: sm4.c:bulk_ocb_prepare_L_pointers_array_blk32 Unexecuted instantiation: twofish.c:bulk_ocb_prepare_L_pointers_array_blk32 Unexecuted instantiation: camellia-glue.c:bulk_ocb_prepare_L_pointers_array_blk32 |
92 | | |
93 | | |
94 | | static inline ocb_L_uintptr_t * |
95 | | bulk_ocb_prepare_L_pointers_array_blk16 (gcry_cipher_hd_t c, |
96 | | ocb_L_uintptr_t Ls[16], u64 blkn) |
97 | 0 | { |
98 | 0 | unsigned int n = 16 - (blkn % 16); |
99 | 0 | unsigned int i; |
100 | |
|
101 | 0 | for (i = 0; i < 16; i += 8) |
102 | 0 | { |
103 | 0 | Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
104 | 0 | Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
105 | 0 | Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
106 | 0 | Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; |
107 | 0 | Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
108 | 0 | Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
109 | 0 | Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
110 | 0 | } |
111 | |
|
112 | 0 | Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
113 | 0 | return &Ls[(15 + n) % 16]; |
114 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_prepare_L_pointers_array_blk16 Unexecuted instantiation: sm4.c:bulk_ocb_prepare_L_pointers_array_blk16 Unexecuted instantiation: twofish.c:bulk_ocb_prepare_L_pointers_array_blk16 Unexecuted instantiation: camellia-glue.c:bulk_ocb_prepare_L_pointers_array_blk16 |
115 | | |
116 | | |
117 | | static inline ocb_L_uintptr_t * |
118 | | bulk_ocb_prepare_L_pointers_array_blk8 (gcry_cipher_hd_t c, |
119 | | ocb_L_uintptr_t Ls[8], u64 blkn) |
120 | 0 | { |
121 | 0 | unsigned int n = 8 - (blkn % 8); |
122 | |
|
123 | 0 | Ls[(0 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
124 | 0 | Ls[(1 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
125 | 0 | Ls[(2 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
126 | 0 | Ls[(3 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; |
127 | 0 | Ls[(4 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
128 | 0 | Ls[(5 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; |
129 | 0 | Ls[(6 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; |
130 | 0 | Ls[(7 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; |
131 | |
|
132 | 0 | return &Ls[(7 + n) % 8]; |
133 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_prepare_L_pointers_array_blk8 Unexecuted instantiation: sm4.c:bulk_ocb_prepare_L_pointers_array_blk8 Unexecuted instantiation: twofish.c:bulk_ocb_prepare_L_pointers_array_blk8 Unexecuted instantiation: camellia-glue.c:bulk_ocb_prepare_L_pointers_array_blk8 |
134 | | |
135 | | |
136 | | static inline unsigned int |
137 | | bulk_ctr_enc_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
138 | | const byte *inbuf, size_t nblocks, byte *ctr, |
139 | | byte *tmpbuf, size_t tmpbuf_nblocks, |
140 | | unsigned int *num_used_tmpblocks) |
141 | 0 | { |
142 | 0 | unsigned int tmp_used = 16; |
143 | 0 | unsigned int burn_depth = 0; |
144 | 0 | unsigned int nburn; |
145 | |
|
146 | 0 | while (nblocks >= 1) |
147 | 0 | { |
148 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
149 | 0 | size_t i; |
150 | |
|
151 | 0 | if (curr_blks * 16 > tmp_used) |
152 | 0 | tmp_used = curr_blks * 16; |
153 | |
|
154 | 0 | cipher_block_cpy (tmpbuf + 0 * 16, ctr, 16); |
155 | 0 | for (i = 1; i < curr_blks; i++) |
156 | 0 | { |
157 | 0 | cipher_block_cpy (&tmpbuf[i * 16], ctr, 16); |
158 | 0 | cipher_block_add (&tmpbuf[i * 16], i, 16); |
159 | 0 | } |
160 | 0 | cipher_block_add (ctr, curr_blks, 16); |
161 | |
|
162 | 0 | nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks); |
163 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
164 | |
|
165 | 0 | for (i = 0; i < curr_blks; i++) |
166 | 0 | { |
167 | 0 | cipher_block_xor (outbuf, &tmpbuf[i * 16], inbuf, 16); |
168 | 0 | outbuf += 16; |
169 | 0 | inbuf += 16; |
170 | 0 | } |
171 | |
|
172 | 0 | nblocks -= curr_blks; |
173 | 0 | } |
174 | |
|
175 | 0 | *num_used_tmpblocks = tmp_used; |
176 | 0 | return burn_depth; |
177 | 0 | } Unexecuted instantiation: serpent.c:bulk_ctr_enc_128 Unexecuted instantiation: sm4.c:bulk_ctr_enc_128 Unexecuted instantiation: twofish.c:bulk_ctr_enc_128 Unexecuted instantiation: camellia-glue.c:bulk_ctr_enc_128 |
178 | | |
179 | | |
180 | | static inline unsigned int |
181 | | bulk_ctr32le_enc_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
182 | | const byte *inbuf, size_t nblocks, byte *ctr, |
183 | | byte *tmpbuf, size_t tmpbuf_nblocks, |
184 | | unsigned int *num_used_tmpblocks) |
185 | 0 | { |
186 | 0 | unsigned int tmp_used = 16; |
187 | 0 | unsigned int burn_depth = 0; |
188 | 0 | unsigned int nburn; |
189 | |
|
190 | 0 | while (nblocks >= 1) |
191 | 0 | { |
192 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
193 | 0 | u64 ctr_lo = buf_get_le64(ctr + 0 * 8); |
194 | 0 | u64 ctr_hi = buf_get_he64(ctr + 1 * 8); |
195 | 0 | size_t i; |
196 | |
|
197 | 0 | if (curr_blks * 16 > tmp_used) |
198 | 0 | tmp_used = curr_blks * 16; |
199 | |
|
200 | 0 | cipher_block_cpy (tmpbuf + 0 * 16, ctr, 16); |
201 | 0 | for (i = 1; i < curr_blks; i++) |
202 | 0 | { |
203 | 0 | u32 lo_u32 = (u32)ctr_lo + i; |
204 | 0 | u64 lo_u64 = ctr_lo & ~(u64)(u32)-1; |
205 | 0 | lo_u64 += lo_u32; |
206 | 0 | buf_put_le64(&tmpbuf[0 * 8 + i * 16], lo_u64); |
207 | 0 | buf_put_he64(&tmpbuf[1 * 8 + i * 16], ctr_hi); |
208 | 0 | } |
209 | 0 | buf_put_le32(ctr, (u32)ctr_lo + curr_blks); |
210 | |
|
211 | 0 | nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks); |
212 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
213 | |
|
214 | 0 | for (i = 0; i < curr_blks; i++) |
215 | 0 | { |
216 | 0 | cipher_block_xor (outbuf, &tmpbuf[i * 16], inbuf, 16); |
217 | 0 | outbuf += 16; |
218 | 0 | inbuf += 16; |
219 | 0 | } |
220 | |
|
221 | 0 | nblocks -= curr_blks; |
222 | 0 | } |
223 | |
|
224 | 0 | *num_used_tmpblocks = tmp_used; |
225 | 0 | return burn_depth; |
226 | 0 | } Unexecuted instantiation: serpent.c:bulk_ctr32le_enc_128 Unexecuted instantiation: sm4.c:bulk_ctr32le_enc_128 Unexecuted instantiation: twofish.c:bulk_ctr32le_enc_128 Unexecuted instantiation: camellia-glue.c:bulk_ctr32le_enc_128 |
227 | | |
228 | | |
229 | | static inline unsigned int |
230 | | bulk_cbc_dec_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
231 | | const byte *inbuf, size_t nblocks, byte *iv, |
232 | | byte *tmpbuf, size_t tmpbuf_nblocks, |
233 | | unsigned int *num_used_tmpblocks) |
234 | 0 | { |
235 | 0 | unsigned int tmp_used = 16; |
236 | 0 | unsigned int burn_depth = 0; |
237 | 0 | unsigned int nburn; |
238 | |
|
239 | 0 | while (nblocks >= 1) |
240 | 0 | { |
241 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
242 | 0 | size_t i; |
243 | |
|
244 | 0 | if (curr_blks * 16 > tmp_used) |
245 | 0 | tmp_used = curr_blks * 16; |
246 | |
|
247 | 0 | nburn = crypt_fn (priv, tmpbuf, inbuf, curr_blks); |
248 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
249 | |
|
250 | 0 | for (i = 0; i < curr_blks; i++) |
251 | 0 | { |
252 | 0 | cipher_block_xor_n_copy_2(outbuf, &tmpbuf[i * 16], iv, inbuf, 16); |
253 | 0 | outbuf += 16; |
254 | 0 | inbuf += 16; |
255 | 0 | } |
256 | |
|
257 | 0 | nblocks -= curr_blks; |
258 | 0 | } |
259 | |
|
260 | 0 | *num_used_tmpblocks = tmp_used; |
261 | 0 | return burn_depth; |
262 | 0 | } Unexecuted instantiation: serpent.c:bulk_cbc_dec_128 Unexecuted instantiation: sm4.c:bulk_cbc_dec_128 Unexecuted instantiation: twofish.c:bulk_cbc_dec_128 Unexecuted instantiation: camellia-glue.c:bulk_cbc_dec_128 |
263 | | |
264 | | |
265 | | static inline unsigned int |
266 | | bulk_cfb_dec_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
267 | | const byte *inbuf, size_t nblocks, byte *iv, |
268 | | byte *tmpbuf, size_t tmpbuf_nblocks, |
269 | | unsigned int *num_used_tmpblocks) |
270 | 0 | { |
271 | 0 | unsigned int tmp_used = 16; |
272 | 0 | unsigned int burn_depth = 0; |
273 | 0 | unsigned int nburn; |
274 | |
|
275 | 0 | while (nblocks >= 1) |
276 | 0 | { |
277 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
278 | 0 | size_t i; |
279 | |
|
280 | 0 | if (curr_blks * 16 > tmp_used) |
281 | 0 | tmp_used = curr_blks * 16; |
282 | |
|
283 | 0 | cipher_block_cpy (&tmpbuf[0 * 16], iv, 16); |
284 | 0 | if (curr_blks > 1) |
285 | 0 | memcpy (&tmpbuf[1 * 16], &inbuf[(1 - 1) * 16], 16 * curr_blks - 16); |
286 | 0 | cipher_block_cpy (iv, &inbuf[(curr_blks - 1) * 16], 16); |
287 | |
|
288 | 0 | nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks); |
289 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
290 | |
|
291 | 0 | for (i = 0; i < curr_blks; i++) |
292 | 0 | { |
293 | 0 | cipher_block_xor (outbuf, inbuf, &tmpbuf[i * 16], 16); |
294 | 0 | outbuf += 16; |
295 | 0 | inbuf += 16; |
296 | 0 | } |
297 | |
|
298 | 0 | nblocks -= curr_blks; |
299 | 0 | } |
300 | |
|
301 | 0 | *num_used_tmpblocks = tmp_used; |
302 | 0 | return burn_depth; |
303 | 0 | } Unexecuted instantiation: serpent.c:bulk_cfb_dec_128 Unexecuted instantiation: sm4.c:bulk_cfb_dec_128 Unexecuted instantiation: twofish.c:bulk_cfb_dec_128 Unexecuted instantiation: camellia-glue.c:bulk_cfb_dec_128 |
304 | | |
305 | | |
306 | | static inline unsigned int |
307 | | bulk_ocb_crypt_128 (gcry_cipher_hd_t c, void *priv, bulk_crypt_fn_t crypt_fn, |
308 | | byte *outbuf, const byte *inbuf, size_t nblocks, u64 *blkn, |
309 | | int encrypt, byte *tmpbuf, size_t tmpbuf_nblocks, |
310 | | unsigned int *num_used_tmpblocks) |
311 | 0 | { |
312 | 0 | unsigned int tmp_used = 16; |
313 | 0 | unsigned int burn_depth = 0; |
314 | 0 | unsigned int nburn; |
315 | |
|
316 | 0 | while (nblocks >= 1) |
317 | 0 | { |
318 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
319 | 0 | size_t i; |
320 | |
|
321 | 0 | if (curr_blks * 16 > tmp_used) |
322 | 0 | tmp_used = curr_blks * 16; |
323 | |
|
324 | 0 | for (i = 0; i < curr_blks; i++) |
325 | 0 | { |
326 | 0 | const unsigned char *l = ocb_get_l(c, ++*blkn); |
327 | | |
328 | | /* Checksum_i = Checksum_{i-1} xor P_i */ |
329 | 0 | if (encrypt) |
330 | 0 | cipher_block_xor_1(c->u_ctr.ctr, &inbuf[i * 16], 16); |
331 | | |
332 | | /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ |
333 | 0 | cipher_block_xor_2dst (&tmpbuf[i * 16], c->u_iv.iv, l, 16); |
334 | 0 | cipher_block_xor (&outbuf[i * 16], &inbuf[i * 16], |
335 | 0 | c->u_iv.iv, 16); |
336 | 0 | } |
337 | | |
338 | | /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ |
339 | 0 | nburn = crypt_fn (priv, outbuf, outbuf, curr_blks); |
340 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
341 | |
|
342 | 0 | for (i = 0; i < curr_blks; i++) |
343 | 0 | { |
344 | 0 | cipher_block_xor_1 (&outbuf[i * 16], &tmpbuf[i * 16], 16); |
345 | | |
346 | | /* Checksum_i = Checksum_{i-1} xor P_i */ |
347 | 0 | if (!encrypt) |
348 | 0 | cipher_block_xor_1(c->u_ctr.ctr, &outbuf[i * 16], 16); |
349 | 0 | } |
350 | |
|
351 | 0 | outbuf += curr_blks * 16; |
352 | 0 | inbuf += curr_blks * 16; |
353 | 0 | nblocks -= curr_blks; |
354 | 0 | } |
355 | |
|
356 | 0 | *num_used_tmpblocks = tmp_used; |
357 | 0 | return burn_depth; |
358 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_crypt_128 Unexecuted instantiation: sm4.c:bulk_ocb_crypt_128 Unexecuted instantiation: twofish.c:bulk_ocb_crypt_128 Unexecuted instantiation: camellia-glue.c:bulk_ocb_crypt_128 |
359 | | |
360 | | |
361 | | static inline unsigned int |
362 | | bulk_ocb_auth_128 (gcry_cipher_hd_t c, void *priv, bulk_crypt_fn_t crypt_fn, |
363 | | const byte *abuf, size_t nblocks, u64 *blkn, byte *tmpbuf, |
364 | | size_t tmpbuf_nblocks, unsigned int *num_used_tmpblocks) |
365 | 0 | { |
366 | 0 | unsigned int tmp_used = 16; |
367 | 0 | unsigned int burn_depth = 0; |
368 | 0 | unsigned int nburn; |
369 | |
|
370 | 0 | while (nblocks >= 1) |
371 | 0 | { |
372 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
373 | 0 | size_t i; |
374 | |
|
375 | 0 | if (curr_blks * 16 > tmp_used) |
376 | 0 | tmp_used = curr_blks * 16; |
377 | |
|
378 | 0 | for (i = 0; i < curr_blks; i++) |
379 | 0 | { |
380 | 0 | const unsigned char *l = ocb_get_l(c, ++*blkn); |
381 | | |
382 | | /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ |
383 | 0 | cipher_block_xor_2dst (&tmpbuf[i * 16], |
384 | 0 | c->u_mode.ocb.aad_offset, l, 16); |
385 | 0 | cipher_block_xor_1 (&tmpbuf[i * 16], &abuf[i * 16], 16); |
386 | 0 | } |
387 | | |
388 | | /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ |
389 | 0 | nburn = crypt_fn (priv, tmpbuf, tmpbuf, curr_blks); |
390 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
391 | |
|
392 | 0 | for (i = 0; i < curr_blks; i++) |
393 | 0 | { |
394 | 0 | cipher_block_xor_1 (c->u_mode.ocb.aad_sum, &tmpbuf[i * 16], 16); |
395 | 0 | } |
396 | |
|
397 | 0 | abuf += curr_blks * 16; |
398 | 0 | nblocks -= curr_blks; |
399 | 0 | } |
400 | |
|
401 | 0 | *num_used_tmpblocks = tmp_used; |
402 | 0 | return burn_depth; |
403 | 0 | } Unexecuted instantiation: serpent.c:bulk_ocb_auth_128 Unexecuted instantiation: sm4.c:bulk_ocb_auth_128 Unexecuted instantiation: twofish.c:bulk_ocb_auth_128 Unexecuted instantiation: camellia-glue.c:bulk_ocb_auth_128 |
404 | | |
405 | | |
406 | | static inline unsigned int |
407 | | bulk_xts_crypt_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
408 | | const byte *inbuf, size_t nblocks, byte *tweak, |
409 | | byte *tmpbuf, size_t tmpbuf_nblocks, |
410 | | unsigned int *num_used_tmpblocks) |
411 | 0 | { |
412 | 0 | u64 tweak_lo, tweak_hi, tweak_next_lo, tweak_next_hi, tmp_lo, tmp_hi, carry; |
413 | 0 | unsigned int tmp_used = 16; |
414 | 0 | unsigned int burn_depth = 0; |
415 | 0 | unsigned int nburn; |
416 | |
|
417 | 0 | tweak_next_lo = buf_get_le64 (tweak + 0); |
418 | 0 | tweak_next_hi = buf_get_le64 (tweak + 8); |
419 | |
|
420 | 0 | while (nblocks >= 1) |
421 | 0 | { |
422 | 0 | size_t curr_blks = nblocks > tmpbuf_nblocks ? tmpbuf_nblocks : nblocks; |
423 | 0 | size_t i; |
424 | |
|
425 | 0 | if (curr_blks * 16 > tmp_used) |
426 | 0 | tmp_used = curr_blks * 16; |
427 | |
|
428 | 0 | for (i = 0; i < curr_blks; i++) |
429 | 0 | { |
430 | 0 | tweak_lo = tweak_next_lo; |
431 | 0 | tweak_hi = tweak_next_hi; |
432 | | |
433 | | /* Generate next tweak. */ |
434 | 0 | carry = -(tweak_next_hi >> 63) & 0x87; |
435 | 0 | tweak_next_hi = (tweak_next_hi << 1) + (tweak_next_lo >> 63); |
436 | 0 | tweak_next_lo = (tweak_next_lo << 1) ^ carry; |
437 | | |
438 | | /* Xor-Encrypt/Decrypt-Xor block. */ |
439 | 0 | tmp_lo = buf_get_le64 (inbuf + i * 16 + 0) ^ tweak_lo; |
440 | 0 | tmp_hi = buf_get_le64 (inbuf + i * 16 + 8) ^ tweak_hi; |
441 | 0 | buf_put_he64 (&tmpbuf[i * 16 + 0], tweak_lo); |
442 | 0 | buf_put_he64 (&tmpbuf[i * 16 + 8], tweak_hi); |
443 | 0 | buf_put_le64 (outbuf + i * 16 + 0, tmp_lo); |
444 | 0 | buf_put_le64 (outbuf + i * 16 + 8, tmp_hi); |
445 | 0 | } |
446 | |
|
447 | 0 | nburn = crypt_fn (priv, outbuf, outbuf, curr_blks); |
448 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
449 | |
|
450 | 0 | for (i = 0; i < curr_blks; i++) |
451 | 0 | { |
452 | | /* Xor-Encrypt/Decrypt-Xor block. */ |
453 | 0 | tweak_lo = buf_get_he64 (&tmpbuf[i * 16 + 0]); |
454 | 0 | tweak_hi = buf_get_he64 (&tmpbuf[i * 16 + 8]); |
455 | 0 | tmp_lo = buf_get_le64 (outbuf + i * 16 + 0) ^ tweak_lo; |
456 | 0 | tmp_hi = buf_get_le64 (outbuf + i * 16 + 8) ^ tweak_hi; |
457 | 0 | buf_put_le64 (outbuf + i * 16 + 0, tmp_lo); |
458 | 0 | buf_put_le64 (outbuf + i * 16 + 8, tmp_hi); |
459 | 0 | } |
460 | |
|
461 | 0 | inbuf += curr_blks * 16; |
462 | 0 | outbuf += curr_blks * 16; |
463 | 0 | nblocks -= curr_blks; |
464 | 0 | } |
465 | |
|
466 | 0 | buf_put_le64 (tweak + 0, tweak_next_lo); |
467 | 0 | buf_put_le64 (tweak + 8, tweak_next_hi); |
468 | |
|
469 | 0 | *num_used_tmpblocks = tmp_used; |
470 | 0 | return burn_depth; |
471 | 0 | } Unexecuted instantiation: serpent.c:bulk_xts_crypt_128 Unexecuted instantiation: sm4.c:bulk_xts_crypt_128 Unexecuted instantiation: twofish.c:bulk_xts_crypt_128 Unexecuted instantiation: camellia-glue.c:bulk_xts_crypt_128 |
472 | | |
473 | | static inline unsigned int |
474 | | bulk_ecb_crypt_128 (void *priv, bulk_crypt_fn_t crypt_fn, byte *outbuf, |
475 | | const byte *inbuf, size_t nblocks, size_t fn_max_nblocks) |
476 | 0 | { |
477 | 0 | unsigned int burn_depth = 0; |
478 | 0 | unsigned int nburn; |
479 | |
|
480 | 0 | while (nblocks >= 1) |
481 | 0 | { |
482 | 0 | size_t curr_blks = nblocks > fn_max_nblocks ? fn_max_nblocks : nblocks; |
483 | 0 | nburn = crypt_fn (priv, outbuf, inbuf, curr_blks); |
484 | 0 | burn_depth = nburn > burn_depth ? nburn : burn_depth; |
485 | 0 | inbuf += curr_blks * 16; |
486 | 0 | outbuf += curr_blks * 16; |
487 | 0 | nblocks -= curr_blks; |
488 | 0 | } |
489 | |
|
490 | 0 | return burn_depth; |
491 | 0 | } Unexecuted instantiation: serpent.c:bulk_ecb_crypt_128 Unexecuted instantiation: sm4.c:bulk_ecb_crypt_128 Unexecuted instantiation: twofish.c:bulk_ecb_crypt_128 Unexecuted instantiation: camellia-glue.c:bulk_ecb_crypt_128 |
492 | | |
493 | | #endif /*GCRYPT_BULKHELP_H*/ |