/src/libgcrypt/cipher/cipher-ccm.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* cipher-ccm.c - CTR mode with CBC-MAC mode implementation |
2 | | * Copyright (C) 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
3 | | * |
4 | | * This file is part of Libgcrypt. |
5 | | * |
6 | | * Libgcrypt is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU Lesser general Public License as |
8 | | * published by the Free Software Foundation; either version 2.1 of |
9 | | * the License, or (at your option) any later version. |
10 | | * |
11 | | * Libgcrypt is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <config.h> |
21 | | #include <stdio.h> |
22 | | #include <stdlib.h> |
23 | | #include <string.h> |
24 | | #include <errno.h> |
25 | | |
26 | | #include "g10lib.h" |
27 | | #include "cipher.h" |
28 | | #include "bufhelp.h" |
29 | | #include "./cipher-internal.h" |
30 | | |
31 | | |
32 | 0 | #define set_burn(burn, nburn) do { \ |
33 | 0 | unsigned int __nburn = (nburn); \ |
34 | 0 | (burn) = (burn) > __nburn ? (burn) : __nburn; } while (0) |
35 | | |
36 | | |
37 | | static unsigned int |
38 | | do_cbc_mac (gcry_cipher_hd_t c, const unsigned char *inbuf, size_t inlen, |
39 | | int do_padding) |
40 | 0 | { |
41 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
42 | 0 | unsigned char tmp[16]; |
43 | 0 | const unsigned int blocksize = DIM(tmp); |
44 | 0 | unsigned int burn = 0; |
45 | 0 | unsigned int unused = c->u_mode.ccm.mac_unused; |
46 | 0 | size_t nblocks; |
47 | 0 | size_t n; |
48 | |
|
49 | 0 | if (inlen == 0 && (unused == 0 || !do_padding)) |
50 | 0 | return 0; |
51 | | |
52 | 0 | do |
53 | 0 | { |
54 | 0 | if (inlen + unused < blocksize || unused > 0) |
55 | 0 | { |
56 | 0 | n = (inlen > blocksize - unused) ? blocksize - unused : inlen; |
57 | |
|
58 | 0 | buf_cpy (&c->u_mode.ccm.macbuf[unused], inbuf, n); |
59 | 0 | unused += n; |
60 | 0 | inlen -= n; |
61 | 0 | inbuf += n; |
62 | 0 | } |
63 | 0 | if (!inlen) |
64 | 0 | { |
65 | 0 | if (!do_padding) |
66 | 0 | break; |
67 | | |
68 | 0 | n = blocksize - unused; |
69 | 0 | if (n > 0) |
70 | 0 | { |
71 | 0 | memset (&c->u_mode.ccm.macbuf[unused], 0, n); |
72 | 0 | unused = blocksize; |
73 | 0 | } |
74 | 0 | } |
75 | | |
76 | 0 | if (unused > 0) |
77 | 0 | { |
78 | | /* Process one block from macbuf. */ |
79 | 0 | cipher_block_xor(c->u_iv.iv, c->u_iv.iv, c->u_mode.ccm.macbuf, |
80 | 0 | blocksize); |
81 | 0 | set_burn (burn, enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv )); |
82 | |
|
83 | 0 | unused = 0; |
84 | 0 | } |
85 | |
|
86 | 0 | if (c->bulk.cbc_enc) |
87 | 0 | { |
88 | 0 | nblocks = inlen / blocksize; |
89 | 0 | c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, tmp, inbuf, nblocks, 1); |
90 | 0 | inbuf += nblocks * blocksize; |
91 | 0 | inlen -= nblocks * blocksize; |
92 | |
|
93 | 0 | wipememory (tmp, sizeof(tmp)); |
94 | 0 | } |
95 | 0 | else |
96 | 0 | { |
97 | 0 | while (inlen >= blocksize) |
98 | 0 | { |
99 | 0 | cipher_block_xor(c->u_iv.iv, c->u_iv.iv, inbuf, blocksize); |
100 | |
|
101 | 0 | set_burn (burn, enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv )); |
102 | |
|
103 | 0 | inlen -= blocksize; |
104 | 0 | inbuf += blocksize; |
105 | 0 | } |
106 | 0 | } |
107 | 0 | } |
108 | 0 | while (inlen > 0); |
109 | | |
110 | 0 | c->u_mode.ccm.mac_unused = unused; |
111 | |
|
112 | 0 | if (burn) |
113 | 0 | burn += 4 * sizeof(void *); |
114 | |
|
115 | 0 | return burn; |
116 | 0 | } |
117 | | |
118 | | |
119 | | gcry_err_code_t |
120 | | _gcry_cipher_ccm_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce, |
121 | | size_t noncelen) |
122 | 0 | { |
123 | 0 | unsigned int marks_key; |
124 | 0 | size_t L = 15 - noncelen; |
125 | 0 | size_t L_; |
126 | |
|
127 | 0 | L_ = L - 1; |
128 | |
|
129 | 0 | if (!nonce) |
130 | 0 | return GPG_ERR_INV_ARG; |
131 | | /* Length field must be 2, 3, ..., or 8. */ |
132 | 0 | if (L < 2 || L > 8) |
133 | 0 | return GPG_ERR_INV_LENGTH; |
134 | | |
135 | | /* Reset state */ |
136 | 0 | marks_key = c->marks.key; |
137 | 0 | memset (&c->u_mode, 0, sizeof(c->u_mode)); |
138 | 0 | memset (&c->marks, 0, sizeof(c->marks)); |
139 | 0 | memset (&c->u_iv, 0, sizeof(c->u_iv)); |
140 | 0 | memset (&c->u_ctr, 0, sizeof(c->u_ctr)); |
141 | 0 | memset (c->lastiv, 0, sizeof(c->lastiv)); |
142 | 0 | c->unused = 0; |
143 | 0 | c->marks.key = marks_key; |
144 | | |
145 | | /* Setup CTR */ |
146 | 0 | c->u_ctr.ctr[0] = L_; |
147 | 0 | memcpy (&c->u_ctr.ctr[1], nonce, noncelen); |
148 | 0 | memset (&c->u_ctr.ctr[1 + noncelen], 0, L); |
149 | | |
150 | | /* Setup IV */ |
151 | 0 | c->u_iv.iv[0] = L_; |
152 | 0 | memcpy (&c->u_iv.iv[1], nonce, noncelen); |
153 | | /* Add (8 * M_ + 64 * flags) to iv[0] and set iv[noncelen + 1 ... 15] later |
154 | | in set_aad. */ |
155 | 0 | memset (&c->u_iv.iv[1 + noncelen], 0, L); |
156 | |
|
157 | 0 | c->u_mode.ccm.nonce = 1; |
158 | |
|
159 | 0 | return GPG_ERR_NO_ERROR; |
160 | 0 | } |
161 | | |
162 | | |
163 | | gcry_err_code_t |
164 | | _gcry_cipher_ccm_set_lengths (gcry_cipher_hd_t c, u64 encryptlen, u64 aadlen, |
165 | | u64 taglen) |
166 | 0 | { |
167 | 0 | unsigned int burn = 0; |
168 | 0 | unsigned char b0[16]; |
169 | 0 | size_t noncelen = 15 - (c->u_iv.iv[0] + 1); |
170 | 0 | u64 M = taglen; |
171 | 0 | u64 M_; |
172 | 0 | int i; |
173 | |
|
174 | 0 | M_ = (M - 2) / 2; |
175 | | |
176 | | /* Authentication field must be 4, 6, 8, 10, 12, 14 or 16. */ |
177 | 0 | if ((M_ * 2 + 2) != M || M < 4 || M > 16) |
178 | 0 | return GPG_ERR_INV_LENGTH; |
179 | 0 | if (!c->u_mode.ccm.nonce || c->marks.tag) |
180 | 0 | return GPG_ERR_INV_STATE; |
181 | 0 | if (c->u_mode.ccm.lengths) |
182 | 0 | return GPG_ERR_INV_STATE; |
183 | | |
184 | 0 | c->u_mode.ccm.authlen = taglen; |
185 | 0 | c->u_mode.ccm.encryptlen = encryptlen; |
186 | 0 | c->u_mode.ccm.aadlen = aadlen; |
187 | | |
188 | | /* Complete IV setup. */ |
189 | 0 | c->u_iv.iv[0] += (aadlen > 0) * 64 + M_ * 8; |
190 | 0 | for (i = 16 - 1; i >= 1 + noncelen; i--) |
191 | 0 | { |
192 | 0 | c->u_iv.iv[i] = encryptlen & 0xff; |
193 | 0 | encryptlen >>= 8; |
194 | 0 | } |
195 | |
|
196 | 0 | memcpy (b0, c->u_iv.iv, 16); |
197 | 0 | memset (c->u_iv.iv, 0, 16); |
198 | |
|
199 | 0 | set_burn (burn, do_cbc_mac (c, b0, 16, 0)); |
200 | |
|
201 | 0 | if (aadlen == 0) |
202 | 0 | { |
203 | | /* Do nothing. */ |
204 | 0 | } |
205 | 0 | else if (aadlen > 0 && aadlen <= (unsigned int)0xfeff) |
206 | 0 | { |
207 | 0 | b0[0] = (aadlen >> 8) & 0xff; |
208 | 0 | b0[1] = aadlen & 0xff; |
209 | 0 | set_burn (burn, do_cbc_mac (c, b0, 2, 0)); |
210 | 0 | } |
211 | 0 | else if (aadlen > 0xfeff && aadlen <= (unsigned int)0xffffffff) |
212 | 0 | { |
213 | 0 | b0[0] = 0xff; |
214 | 0 | b0[1] = 0xfe; |
215 | 0 | buf_put_be32(&b0[2], aadlen); |
216 | 0 | set_burn (burn, do_cbc_mac (c, b0, 6, 0)); |
217 | 0 | } |
218 | 0 | else if (aadlen > (unsigned int)0xffffffff) |
219 | 0 | { |
220 | 0 | b0[0] = 0xff; |
221 | 0 | b0[1] = 0xff; |
222 | 0 | buf_put_be64(&b0[2], aadlen); |
223 | 0 | set_burn (burn, do_cbc_mac (c, b0, 10, 0)); |
224 | 0 | } |
225 | | |
226 | | /* Generate S_0 and increase counter. */ |
227 | 0 | set_burn (burn, c->spec->encrypt ( &c->context.c, c->u_mode.ccm.s0, |
228 | 0 | c->u_ctr.ctr )); |
229 | 0 | c->u_ctr.ctr[15]++; |
230 | |
|
231 | 0 | if (burn) |
232 | 0 | _gcry_burn_stack (burn + sizeof(void *) * 5); |
233 | |
|
234 | 0 | c->u_mode.ccm.lengths = 1; |
235 | |
|
236 | 0 | return GPG_ERR_NO_ERROR; |
237 | 0 | } |
238 | | |
239 | | |
240 | | gcry_err_code_t |
241 | | _gcry_cipher_ccm_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf, |
242 | | size_t abuflen) |
243 | 0 | { |
244 | 0 | unsigned int burn; |
245 | |
|
246 | 0 | if (abuflen > 0 && !abuf) |
247 | 0 | return GPG_ERR_INV_ARG; |
248 | 0 | if (!c->u_mode.ccm.nonce || !c->u_mode.ccm.lengths || c->marks.tag) |
249 | 0 | return GPG_ERR_INV_STATE; |
250 | 0 | if (abuflen > c->u_mode.ccm.aadlen) |
251 | 0 | return GPG_ERR_INV_LENGTH; |
252 | | |
253 | 0 | c->u_mode.ccm.aadlen -= abuflen; |
254 | 0 | burn = do_cbc_mac (c, abuf, abuflen, c->u_mode.ccm.aadlen == 0); |
255 | |
|
256 | 0 | if (burn) |
257 | 0 | _gcry_burn_stack (burn + sizeof(void *) * 5); |
258 | |
|
259 | 0 | return GPG_ERR_NO_ERROR; |
260 | 0 | } |
261 | | |
262 | | |
263 | | static gcry_err_code_t |
264 | | _gcry_cipher_ccm_tag (gcry_cipher_hd_t c, unsigned char *outbuf, |
265 | | size_t outbuflen, int check) |
266 | 0 | { |
267 | 0 | unsigned int burn; |
268 | |
|
269 | 0 | if (!outbuf || outbuflen == 0) |
270 | 0 | return GPG_ERR_INV_ARG; |
271 | | /* Tag length must be same as initial authlen. */ |
272 | 0 | if (c->u_mode.ccm.authlen != outbuflen) |
273 | 0 | return GPG_ERR_INV_LENGTH; |
274 | 0 | if (!c->u_mode.ccm.nonce || !c->u_mode.ccm.lengths || c->u_mode.ccm.aadlen > 0) |
275 | 0 | return GPG_ERR_INV_STATE; |
276 | | /* Initial encrypt length must match with length of actual data processed. */ |
277 | 0 | if (c->u_mode.ccm.encryptlen > 0) |
278 | 0 | return GPG_ERR_UNFINISHED; |
279 | | |
280 | 0 | if (!c->marks.tag) |
281 | 0 | { |
282 | 0 | burn = do_cbc_mac (c, NULL, 0, 1); /* Perform final padding. */ |
283 | | |
284 | | /* Add S_0 */ |
285 | 0 | cipher_block_xor (c->u_iv.iv, c->u_iv.iv, c->u_mode.ccm.s0, 16); |
286 | |
|
287 | 0 | wipememory (c->u_ctr.ctr, 16); |
288 | 0 | wipememory (c->u_mode.ccm.s0, 16); |
289 | 0 | wipememory (c->u_mode.ccm.macbuf, 16); |
290 | |
|
291 | 0 | if (burn) |
292 | 0 | _gcry_burn_stack (burn + sizeof(void *) * 5); |
293 | |
|
294 | 0 | c->marks.tag = 1; |
295 | 0 | } |
296 | |
|
297 | 0 | if (!check) |
298 | 0 | { |
299 | 0 | memcpy (outbuf, c->u_iv.iv, outbuflen); |
300 | 0 | return GPG_ERR_NO_ERROR; |
301 | 0 | } |
302 | 0 | else |
303 | 0 | { |
304 | 0 | return buf_eq_const(outbuf, c->u_iv.iv, outbuflen) ? |
305 | 0 | GPG_ERR_NO_ERROR : GPG_ERR_CHECKSUM; |
306 | 0 | } |
307 | 0 | } |
308 | | |
309 | | |
310 | | gcry_err_code_t |
311 | | _gcry_cipher_ccm_get_tag (gcry_cipher_hd_t c, unsigned char *outtag, |
312 | | size_t taglen) |
313 | 0 | { |
314 | 0 | return _gcry_cipher_ccm_tag (c, outtag, taglen, 0); |
315 | 0 | } |
316 | | |
317 | | |
318 | | gcry_err_code_t |
319 | | _gcry_cipher_ccm_check_tag (gcry_cipher_hd_t c, const unsigned char *intag, |
320 | | size_t taglen) |
321 | 0 | { |
322 | 0 | return _gcry_cipher_ccm_tag (c, (unsigned char *)intag, taglen, 1); |
323 | 0 | } |
324 | | |
325 | | |
326 | | gcry_err_code_t |
327 | | _gcry_cipher_ccm_encrypt (gcry_cipher_hd_t c, unsigned char *outbuf, |
328 | | size_t outbuflen, const unsigned char *inbuf, |
329 | | size_t inbuflen) |
330 | 0 | { |
331 | 0 | gcry_err_code_t err = 0; |
332 | 0 | unsigned int burn = 0; |
333 | 0 | unsigned int nburn; |
334 | |
|
335 | 0 | if (outbuflen < inbuflen) |
336 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
337 | 0 | if (!c->u_mode.ccm.nonce || c->marks.tag || !c->u_mode.ccm.lengths || |
338 | 0 | c->u_mode.ccm.aadlen > 0) |
339 | 0 | return GPG_ERR_INV_STATE; |
340 | 0 | if (inbuflen > c->u_mode.ccm.encryptlen) |
341 | 0 | return GPG_ERR_INV_LENGTH; |
342 | | |
343 | 0 | while (inbuflen) |
344 | 0 | { |
345 | 0 | size_t currlen = inbuflen; |
346 | | |
347 | | /* Since checksumming is done before encryption, process input in 24KiB |
348 | | * chunks to keep data loaded in L1 cache for encryption. However only |
349 | | * do splitting if input is large enough so that last chunks does not |
350 | | * end up being short. */ |
351 | 0 | if (currlen > 32 * 1024) |
352 | 0 | currlen = 24 * 1024; |
353 | |
|
354 | 0 | c->u_mode.ccm.encryptlen -= currlen; |
355 | 0 | nburn = do_cbc_mac (c, inbuf, currlen, 0); |
356 | 0 | burn = nburn > burn ? nburn : burn; |
357 | |
|
358 | 0 | err = _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, currlen); |
359 | 0 | if (err) |
360 | 0 | break; |
361 | | |
362 | 0 | outbuf += currlen; |
363 | 0 | inbuf += currlen; |
364 | 0 | outbuflen -= currlen; |
365 | 0 | inbuflen -= currlen; |
366 | 0 | } |
367 | |
|
368 | 0 | if (burn) |
369 | 0 | _gcry_burn_stack (burn + sizeof(void *) * 5); |
370 | 0 | return err; |
371 | 0 | } |
372 | | |
373 | | |
374 | | gcry_err_code_t |
375 | | _gcry_cipher_ccm_decrypt (gcry_cipher_hd_t c, unsigned char *outbuf, |
376 | | size_t outbuflen, const unsigned char *inbuf, |
377 | | size_t inbuflen) |
378 | 0 | { |
379 | 0 | gcry_err_code_t err = 0; |
380 | 0 | unsigned int burn = 0; |
381 | 0 | unsigned int nburn; |
382 | |
|
383 | 0 | if (outbuflen < inbuflen) |
384 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
385 | 0 | if (!c->u_mode.ccm.nonce || c->marks.tag || !c->u_mode.ccm.lengths || |
386 | 0 | c->u_mode.ccm.aadlen > 0) |
387 | 0 | return GPG_ERR_INV_STATE; |
388 | 0 | if (inbuflen > c->u_mode.ccm.encryptlen) |
389 | 0 | return GPG_ERR_INV_LENGTH; |
390 | | |
391 | 0 | while (inbuflen) |
392 | 0 | { |
393 | 0 | size_t currlen = inbuflen; |
394 | | |
395 | | /* Since checksumming is done after decryption, process input in 24KiB |
396 | | * chunks to keep data loaded in L1 cache for checksumming. However |
397 | | * only do splitting if input is large enough so that last chunks |
398 | | * does not end up being short. */ |
399 | 0 | if (currlen > 32 * 1024) |
400 | 0 | currlen = 24 * 1024; |
401 | |
|
402 | 0 | err = _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, currlen); |
403 | 0 | if (err) |
404 | 0 | break; |
405 | | |
406 | 0 | c->u_mode.ccm.encryptlen -= currlen; |
407 | 0 | nburn = do_cbc_mac (c, outbuf, currlen, 0); |
408 | 0 | burn = nburn > burn ? nburn : burn; |
409 | |
|
410 | 0 | outbuf += currlen; |
411 | 0 | inbuf += currlen; |
412 | 0 | outbuflen -= currlen; |
413 | 0 | inbuflen -= currlen; |
414 | 0 | } |
415 | |
|
416 | 0 | if (burn) |
417 | 0 | _gcry_burn_stack (burn + sizeof(void *) * 5); |
418 | 0 | return err; |
419 | 0 | } |