/src/libgcrypt/cipher/cipher-cmac.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* cmac.c - CMAC, Cipher-based MAC. |
2 | | * Copyright (C) 2013,2018 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
3 | | * |
4 | | * This file is part of Libgcrypt. |
5 | | * |
6 | | * Libgcrypt is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU Lesser General Public License as |
8 | | * published by the Free Software Foundation; either version 2.1 of |
9 | | * the License, or (at your option) any later version. |
10 | | * |
11 | | * Libgcrypt is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <config.h> |
21 | | #include <stdio.h> |
22 | | #include <stdlib.h> |
23 | | #include <string.h> |
24 | | |
25 | | #include "g10lib.h" |
26 | | #include "cipher.h" |
27 | | #include "cipher-internal.h" |
28 | | #include "bufhelp.h" |
29 | | |
30 | | |
31 | 0 | #define set_burn(burn, nburn) do { \ |
32 | 0 | unsigned int __nburn = (nburn); \ |
33 | 0 | (burn) = (burn) > __nburn ? (burn) : __nburn; } while (0) |
34 | | |
35 | | |
36 | | gcry_err_code_t |
37 | | _gcry_cmac_write (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx, |
38 | | const byte * inbuf, size_t inlen) |
39 | 0 | { |
40 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
41 | 0 | size_t blocksize_shift = _gcry_blocksize_shift(c); |
42 | 0 | size_t blocksize = 1 << blocksize_shift; |
43 | 0 | byte outbuf[MAX_BLOCKSIZE]; |
44 | 0 | unsigned int burn = 0; |
45 | 0 | unsigned int nblocks; |
46 | 0 | size_t n; |
47 | |
|
48 | 0 | if (ctx->tag) |
49 | 0 | return GPG_ERR_INV_STATE; |
50 | | |
51 | 0 | if (!inbuf) |
52 | 0 | return GPG_ERR_INV_ARG; |
53 | | |
54 | 0 | if (inlen == 0) |
55 | 0 | return 0; |
56 | | |
57 | | /* Last block is needed for cmac_final. */ |
58 | 0 | if (ctx->mac_unused + inlen <= blocksize) |
59 | 0 | { |
60 | 0 | buf_cpy (&ctx->macbuf[ctx->mac_unused], inbuf, inlen); |
61 | 0 | ctx->mac_unused += inlen; |
62 | 0 | inbuf += inlen; |
63 | 0 | inlen -= inlen; |
64 | |
|
65 | 0 | return 0; |
66 | 0 | } |
67 | | |
68 | 0 | if (ctx->mac_unused) |
69 | 0 | { |
70 | 0 | n = inlen; |
71 | 0 | if (n > blocksize - ctx->mac_unused) |
72 | 0 | n = blocksize - ctx->mac_unused; |
73 | |
|
74 | 0 | buf_cpy (&ctx->macbuf[ctx->mac_unused], inbuf, n); |
75 | 0 | ctx->mac_unused += n; |
76 | 0 | inbuf += n; |
77 | 0 | inlen -= n; |
78 | |
|
79 | 0 | cipher_block_xor (ctx->u_iv.iv, ctx->u_iv.iv, ctx->macbuf, blocksize); |
80 | 0 | set_burn (burn, enc_fn (&c->context.c, ctx->u_iv.iv, ctx->u_iv.iv)); |
81 | |
|
82 | 0 | ctx->mac_unused = 0; |
83 | 0 | } |
84 | |
|
85 | 0 | if (c->bulk.cbc_enc && inlen > blocksize) |
86 | 0 | { |
87 | 0 | nblocks = inlen >> blocksize_shift; |
88 | 0 | nblocks -= ((nblocks << blocksize_shift) == inlen); |
89 | |
|
90 | 0 | c->bulk.cbc_enc (&c->context.c, ctx->u_iv.iv, outbuf, inbuf, nblocks, 1); |
91 | 0 | inbuf += nblocks << blocksize_shift; |
92 | 0 | inlen -= nblocks << blocksize_shift; |
93 | |
|
94 | 0 | wipememory (outbuf, sizeof (outbuf)); |
95 | 0 | } |
96 | 0 | else |
97 | 0 | while (inlen > blocksize) |
98 | 0 | { |
99 | 0 | cipher_block_xor (ctx->u_iv.iv, ctx->u_iv.iv, inbuf, blocksize); |
100 | 0 | set_burn (burn, enc_fn (&c->context.c, ctx->u_iv.iv, ctx->u_iv.iv)); |
101 | 0 | inlen -= blocksize; |
102 | 0 | inbuf += blocksize; |
103 | 0 | } |
104 | | |
105 | | /* Make sure that last block is passed to cmac_final. */ |
106 | 0 | if (inlen == 0) |
107 | 0 | BUG (); |
108 | | |
109 | 0 | n = inlen; |
110 | 0 | if (n > blocksize - ctx->mac_unused) |
111 | 0 | n = blocksize - ctx->mac_unused; |
112 | |
|
113 | 0 | buf_cpy (&ctx->macbuf[ctx->mac_unused], inbuf, n); |
114 | 0 | ctx->mac_unused += n; |
115 | 0 | inbuf += n; |
116 | 0 | inlen -= n; |
117 | |
|
118 | 0 | if (burn) |
119 | 0 | _gcry_burn_stack (burn + 4 * sizeof (void *)); |
120 | |
|
121 | 0 | return 0; |
122 | 0 | } |
123 | | |
124 | | |
125 | | gcry_err_code_t |
126 | | _gcry_cmac_generate_subkeys (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx) |
127 | 0 | { |
128 | 0 | const unsigned int blocksize = c->spec->blocksize; |
129 | 0 | byte rb, carry, t, bi; |
130 | 0 | unsigned int burn; |
131 | 0 | int i, j; |
132 | 0 | union |
133 | 0 | { |
134 | 0 | size_t _aligned; |
135 | 0 | byte buf[MAX_BLOCKSIZE]; |
136 | 0 | } u; |
137 | | |
138 | | /* Tell compiler that we require a cipher with a 64bit or 128 bit block |
139 | | * length, to allow better optimization of this function. */ |
140 | 0 | if (blocksize > 16 || blocksize < 8 || blocksize & (8 - 1)) |
141 | 0 | return GPG_ERR_INV_CIPHER_MODE; |
142 | | |
143 | 0 | if (MAX_BLOCKSIZE < blocksize) |
144 | 0 | BUG (); |
145 | | |
146 | | /* encrypt zero block */ |
147 | 0 | memset (u.buf, 0, blocksize); |
148 | 0 | burn = c->spec->encrypt (&c->context.c, u.buf, u.buf); |
149 | | |
150 | | /* Currently supported blocksizes are 16 and 8. */ |
151 | 0 | rb = blocksize == 16 ? 0x87 : 0x1B /* blocksize == 8 */ ; |
152 | |
|
153 | 0 | for (j = 0; j < 2; j++) |
154 | 0 | { |
155 | | /* Generate subkeys K1 and K2 */ |
156 | 0 | carry = 0; |
157 | 0 | for (i = blocksize - 1; i >= 0; i--) |
158 | 0 | { |
159 | 0 | bi = u.buf[i]; |
160 | 0 | t = carry | (bi << 1); |
161 | 0 | carry = bi >> 7; |
162 | 0 | u.buf[i] = t & 0xff; |
163 | 0 | ctx->subkeys[j][i] = u.buf[i]; |
164 | 0 | } |
165 | 0 | u.buf[blocksize - 1] ^= carry ? rb : 0; |
166 | 0 | ctx->subkeys[j][blocksize - 1] = u.buf[blocksize - 1]; |
167 | 0 | } |
168 | |
|
169 | 0 | wipememory (&u, sizeof (u)); |
170 | 0 | if (burn) |
171 | 0 | _gcry_burn_stack (burn + 4 * sizeof (void *)); |
172 | |
|
173 | 0 | return 0; |
174 | 0 | } |
175 | | |
176 | | |
177 | | gcry_err_code_t |
178 | | _gcry_cmac_final (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx) |
179 | 0 | { |
180 | 0 | const unsigned int blocksize = c->spec->blocksize; |
181 | 0 | unsigned int count = ctx->mac_unused; |
182 | 0 | unsigned int burn; |
183 | 0 | byte *subkey; |
184 | | |
185 | | /* Tell compiler that we require a cipher with a 64bit or 128 bit block |
186 | | * length, to allow better optimization of this function. */ |
187 | 0 | if (blocksize > 16 || blocksize < 8 || blocksize & (8 - 1)) |
188 | 0 | return GPG_ERR_INV_CIPHER_MODE; |
189 | | |
190 | 0 | if (count == blocksize) |
191 | 0 | subkey = ctx->subkeys[0]; /* K1 */ |
192 | 0 | else |
193 | 0 | { |
194 | 0 | subkey = ctx->subkeys[1]; /* K2 */ |
195 | 0 | ctx->macbuf[count++] = 0x80; |
196 | 0 | while (count < blocksize) |
197 | 0 | ctx->macbuf[count++] = 0; |
198 | 0 | } |
199 | |
|
200 | 0 | cipher_block_xor (ctx->macbuf, ctx->macbuf, subkey, blocksize); |
201 | |
|
202 | 0 | cipher_block_xor (ctx->u_iv.iv, ctx->u_iv.iv, ctx->macbuf, blocksize); |
203 | 0 | burn = c->spec->encrypt (&c->context.c, ctx->u_iv.iv, ctx->u_iv.iv); |
204 | 0 | if (burn) |
205 | 0 | _gcry_burn_stack (burn + 4 * sizeof (void *)); |
206 | |
|
207 | 0 | ctx->mac_unused = 0; |
208 | |
|
209 | 0 | return 0; |
210 | 0 | } |
211 | | |
212 | | |
213 | | static gcry_err_code_t |
214 | | cmac_tag (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx, |
215 | | unsigned char *tag, size_t taglen, int check) |
216 | 0 | { |
217 | 0 | gcry_err_code_t ret; |
218 | |
|
219 | 0 | if (!tag || taglen == 0 || taglen > c->spec->blocksize) |
220 | 0 | return GPG_ERR_INV_ARG; |
221 | | |
222 | 0 | if (!ctx->tag) |
223 | 0 | { |
224 | 0 | ret = _gcry_cmac_final (c, ctx); |
225 | 0 | if (ret != 0) |
226 | 0 | return ret; |
227 | | |
228 | 0 | ctx->tag = 1; |
229 | 0 | } |
230 | | |
231 | 0 | if (!check) |
232 | 0 | { |
233 | 0 | memcpy (tag, ctx->u_iv.iv, taglen); |
234 | 0 | return GPG_ERR_NO_ERROR; |
235 | 0 | } |
236 | 0 | else |
237 | 0 | { |
238 | 0 | return buf_eq_const (tag, ctx->u_iv.iv, taglen) ? |
239 | 0 | GPG_ERR_NO_ERROR : GPG_ERR_CHECKSUM; |
240 | 0 | } |
241 | 0 | } |
242 | | |
243 | | |
244 | | void |
245 | | _gcry_cmac_reset (gcry_cmac_context_t *ctx) |
246 | 0 | { |
247 | 0 | char tmp_buf[sizeof(ctx->subkeys)]; |
248 | | |
249 | | /* Only keep subkeys when reseting context. */ |
250 | |
|
251 | 0 | buf_cpy (tmp_buf, ctx->subkeys, sizeof(ctx->subkeys)); |
252 | 0 | memset (ctx, 0, sizeof(*ctx)); |
253 | 0 | buf_cpy (ctx->subkeys, tmp_buf, sizeof(ctx->subkeys)); |
254 | 0 | wipememory (tmp_buf, sizeof(tmp_buf)); |
255 | 0 | } |
256 | | |
257 | | |
258 | | gcry_err_code_t |
259 | | _gcry_cipher_cmac_authenticate (gcry_cipher_hd_t c, |
260 | | const unsigned char *abuf, size_t abuflen) |
261 | 0 | { |
262 | 0 | if (abuflen > 0 && !abuf) |
263 | 0 | return GPG_ERR_INV_ARG; |
264 | | /* To support new blocksize, update cmac_generate_subkeys() then add new |
265 | | blocksize here. */ |
266 | 0 | if (c->spec->blocksize != 16 && c->spec->blocksize != 8) |
267 | 0 | return GPG_ERR_INV_CIPHER_MODE; |
268 | | |
269 | 0 | return _gcry_cmac_write (c, &c->u_mode.cmac, abuf, abuflen); |
270 | 0 | } |
271 | | |
272 | | |
273 | | gcry_err_code_t |
274 | | _gcry_cipher_cmac_get_tag (gcry_cipher_hd_t c, |
275 | | unsigned char *outtag, size_t taglen) |
276 | 0 | { |
277 | 0 | return cmac_tag (c, &c->u_mode.cmac, outtag, taglen, 0); |
278 | 0 | } |
279 | | |
280 | | |
281 | | gcry_err_code_t |
282 | | _gcry_cipher_cmac_check_tag (gcry_cipher_hd_t c, |
283 | | const unsigned char *intag, size_t taglen) |
284 | 0 | { |
285 | 0 | return cmac_tag (c, &c->u_mode.cmac, (unsigned char *) intag, taglen, 1); |
286 | 0 | } |
287 | | |
288 | | gcry_err_code_t |
289 | | _gcry_cipher_cmac_set_subkeys (gcry_cipher_hd_t c) |
290 | 0 | { |
291 | 0 | return _gcry_cmac_generate_subkeys (c, &c->u_mode.cmac); |
292 | 0 | } |