/src/libgcrypt/cipher/cipher-cfb.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* cipher-cfb.c - Generic CFB mode implementation |
2 | | * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 |
3 | | * 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. |
4 | | * |
5 | | * This file is part of Libgcrypt. |
6 | | * |
7 | | * Libgcrypt is free software; you can redistribute it and/or modify |
8 | | * it under the terms of the GNU Lesser general Public License as |
9 | | * published by the Free Software Foundation; either version 2.1 of |
10 | | * the License, or (at your option) any later version. |
11 | | * |
12 | | * Libgcrypt is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | | * GNU Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
19 | | */ |
20 | | |
21 | | #include <config.h> |
22 | | #include <stdio.h> |
23 | | #include <stdlib.h> |
24 | | #include <string.h> |
25 | | #include <errno.h> |
26 | | |
27 | | #include "g10lib.h" |
28 | | #include "cipher.h" |
29 | | #include "bufhelp.h" |
30 | | #include "./cipher-internal.h" |
31 | | |
32 | | |
33 | | gcry_err_code_t |
34 | | _gcry_cipher_cfb_encrypt (gcry_cipher_hd_t c, |
35 | | unsigned char *outbuf, size_t outbuflen, |
36 | | const unsigned char *inbuf, size_t inbuflen) |
37 | 0 | { |
38 | 0 | unsigned char *ivp; |
39 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
40 | 0 | size_t blocksize_shift = _gcry_blocksize_shift(c); |
41 | 0 | size_t blocksize = 1 << blocksize_shift; |
42 | 0 | size_t blocksize_x_2 = blocksize + blocksize; |
43 | 0 | unsigned int burn, nburn; |
44 | |
|
45 | 0 | if (outbuflen < inbuflen) |
46 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
47 | | |
48 | 0 | if ( inbuflen <= c->unused ) |
49 | 0 | { |
50 | | /* Short enough to be encoded by the remaining XOR mask. */ |
51 | | /* XOR the input with the IV and store input into IV. */ |
52 | 0 | ivp = c->u_iv.iv + blocksize - c->unused; |
53 | 0 | buf_xor_2dst(outbuf, ivp, inbuf, inbuflen); |
54 | 0 | c->unused -= inbuflen; |
55 | 0 | return 0; |
56 | 0 | } |
57 | | |
58 | 0 | burn = 0; |
59 | |
|
60 | 0 | if ( c->unused ) |
61 | 0 | { |
62 | | /* XOR the input with the IV and store input into IV */ |
63 | 0 | inbuflen -= c->unused; |
64 | 0 | ivp = c->u_iv.iv + blocksize - c->unused; |
65 | 0 | buf_xor_2dst(outbuf, ivp, inbuf, c->unused); |
66 | 0 | outbuf += c->unused; |
67 | 0 | inbuf += c->unused; |
68 | 0 | c->unused = 0; |
69 | 0 | } |
70 | | |
71 | | /* Now we can process complete blocks. We use a loop as long as we |
72 | | have at least 2 blocks and use conditions for the rest. This |
73 | | also allows to use a bulk encryption function if available. */ |
74 | 0 | if (inbuflen >= blocksize_x_2 && c->bulk.cfb_enc) |
75 | 0 | { |
76 | 0 | size_t nblocks = inbuflen >> blocksize_shift; |
77 | 0 | c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks); |
78 | 0 | outbuf += nblocks << blocksize_shift; |
79 | 0 | inbuf += nblocks << blocksize_shift; |
80 | 0 | inbuflen -= nblocks << blocksize_shift; |
81 | 0 | } |
82 | 0 | else |
83 | 0 | { |
84 | 0 | while ( inbuflen >= blocksize_x_2 ) |
85 | 0 | { |
86 | | /* Encrypt the IV. */ |
87 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
88 | 0 | burn = nburn > burn ? nburn : burn; |
89 | | /* XOR the input with the IV and store input into IV. */ |
90 | 0 | cipher_block_xor_2dst(outbuf, c->u_iv.iv, inbuf, blocksize); |
91 | 0 | outbuf += blocksize; |
92 | 0 | inbuf += blocksize; |
93 | 0 | inbuflen -= blocksize; |
94 | 0 | } |
95 | 0 | } |
96 | |
|
97 | 0 | if ( inbuflen >= blocksize ) |
98 | 0 | { |
99 | | /* Save the current IV and then encrypt the IV. */ |
100 | 0 | cipher_block_cpy( c->lastiv, c->u_iv.iv, blocksize ); |
101 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
102 | 0 | burn = nburn > burn ? nburn : burn; |
103 | | /* XOR the input with the IV and store input into IV */ |
104 | 0 | cipher_block_xor_2dst(outbuf, c->u_iv.iv, inbuf, blocksize); |
105 | 0 | outbuf += blocksize; |
106 | 0 | inbuf += blocksize; |
107 | 0 | inbuflen -= blocksize; |
108 | 0 | } |
109 | 0 | if ( inbuflen ) |
110 | 0 | { |
111 | | /* Save the current IV and then encrypt the IV. */ |
112 | 0 | cipher_block_cpy( c->lastiv, c->u_iv.iv, blocksize ); |
113 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
114 | 0 | burn = nburn > burn ? nburn : burn; |
115 | 0 | c->unused = blocksize; |
116 | | /* Apply the XOR. */ |
117 | 0 | c->unused -= inbuflen; |
118 | 0 | buf_xor_2dst(outbuf, c->u_iv.iv, inbuf, inbuflen); |
119 | 0 | outbuf += inbuflen; |
120 | 0 | inbuf += inbuflen; |
121 | 0 | inbuflen = 0; |
122 | 0 | } |
123 | |
|
124 | 0 | if (burn > 0) |
125 | 0 | _gcry_burn_stack (burn + 4 * sizeof(void *)); |
126 | |
|
127 | 0 | return 0; |
128 | 0 | } |
129 | | |
130 | | |
131 | | gcry_err_code_t |
132 | | _gcry_cipher_cfb_decrypt (gcry_cipher_hd_t c, |
133 | | unsigned char *outbuf, size_t outbuflen, |
134 | | const unsigned char *inbuf, size_t inbuflen) |
135 | 0 | { |
136 | 0 | unsigned char *ivp; |
137 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
138 | 0 | size_t blocksize_shift = _gcry_blocksize_shift(c); |
139 | 0 | size_t blocksize = 1 << blocksize_shift; |
140 | 0 | size_t blocksize_x_2 = blocksize + blocksize; |
141 | 0 | unsigned int burn, nburn; |
142 | |
|
143 | 0 | if (outbuflen < inbuflen) |
144 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
145 | | |
146 | 0 | if (inbuflen <= c->unused) |
147 | 0 | { |
148 | | /* Short enough to be encoded by the remaining XOR mask. */ |
149 | | /* XOR the input with the IV and store input into IV. */ |
150 | 0 | ivp = c->u_iv.iv + blocksize - c->unused; |
151 | 0 | buf_xor_n_copy(outbuf, ivp, inbuf, inbuflen); |
152 | 0 | c->unused -= inbuflen; |
153 | 0 | return 0; |
154 | 0 | } |
155 | | |
156 | 0 | burn = 0; |
157 | |
|
158 | 0 | if (c->unused) |
159 | 0 | { |
160 | | /* XOR the input with the IV and store input into IV. */ |
161 | 0 | inbuflen -= c->unused; |
162 | 0 | ivp = c->u_iv.iv + blocksize - c->unused; |
163 | 0 | buf_xor_n_copy(outbuf, ivp, inbuf, c->unused); |
164 | 0 | outbuf += c->unused; |
165 | 0 | inbuf += c->unused; |
166 | 0 | c->unused = 0; |
167 | 0 | } |
168 | | |
169 | | /* Now we can process complete blocks. We use a loop as long as we |
170 | | have at least 2 blocks and use conditions for the rest. This |
171 | | also allows to use a bulk encryption function if available. */ |
172 | 0 | if (inbuflen >= blocksize_x_2 && c->bulk.cfb_dec) |
173 | 0 | { |
174 | 0 | size_t nblocks = inbuflen >> blocksize_shift; |
175 | 0 | c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks); |
176 | 0 | outbuf += nblocks << blocksize_shift; |
177 | 0 | inbuf += nblocks << blocksize_shift; |
178 | 0 | inbuflen -= nblocks << blocksize_shift; |
179 | 0 | } |
180 | 0 | else |
181 | 0 | { |
182 | 0 | while (inbuflen >= blocksize_x_2 ) |
183 | 0 | { |
184 | | /* Encrypt the IV. */ |
185 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
186 | 0 | burn = nburn > burn ? nburn : burn; |
187 | | /* XOR the input with the IV and store input into IV. */ |
188 | 0 | cipher_block_xor_n_copy(outbuf, c->u_iv.iv, inbuf, blocksize); |
189 | 0 | outbuf += blocksize; |
190 | 0 | inbuf += blocksize; |
191 | 0 | inbuflen -= blocksize; |
192 | 0 | } |
193 | 0 | } |
194 | |
|
195 | 0 | if (inbuflen >= blocksize ) |
196 | 0 | { |
197 | | /* Save the current IV and then encrypt the IV. */ |
198 | 0 | cipher_block_cpy ( c->lastiv, c->u_iv.iv, blocksize); |
199 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
200 | 0 | burn = nburn > burn ? nburn : burn; |
201 | | /* XOR the input with the IV and store input into IV */ |
202 | 0 | cipher_block_xor_n_copy(outbuf, c->u_iv.iv, inbuf, blocksize); |
203 | 0 | outbuf += blocksize; |
204 | 0 | inbuf += blocksize; |
205 | 0 | inbuflen -= blocksize; |
206 | 0 | } |
207 | |
|
208 | 0 | if (inbuflen) |
209 | 0 | { |
210 | | /* Save the current IV and then encrypt the IV. */ |
211 | 0 | cipher_block_cpy ( c->lastiv, c->u_iv.iv, blocksize ); |
212 | 0 | nburn = enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ); |
213 | 0 | burn = nburn > burn ? nburn : burn; |
214 | 0 | c->unused = blocksize; |
215 | | /* Apply the XOR. */ |
216 | 0 | c->unused -= inbuflen; |
217 | 0 | buf_xor_n_copy(outbuf, c->u_iv.iv, inbuf, inbuflen); |
218 | 0 | outbuf += inbuflen; |
219 | 0 | inbuf += inbuflen; |
220 | 0 | inbuflen = 0; |
221 | 0 | } |
222 | |
|
223 | 0 | if (burn > 0) |
224 | 0 | _gcry_burn_stack (burn + 4 * sizeof(void *)); |
225 | |
|
226 | 0 | return 0; |
227 | 0 | } |
228 | | |
229 | | |
230 | | gcry_err_code_t |
231 | | _gcry_cipher_cfb8_encrypt (gcry_cipher_hd_t c, |
232 | | unsigned char *outbuf, size_t outbuflen, |
233 | | const unsigned char *inbuf, size_t inbuflen) |
234 | 0 | { |
235 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
236 | 0 | size_t blocksize = c->spec->blocksize; |
237 | 0 | unsigned int burn, nburn; |
238 | |
|
239 | 0 | if (outbuflen < inbuflen) |
240 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
241 | | |
242 | 0 | burn = 0; |
243 | |
|
244 | 0 | while ( inbuflen > 0) |
245 | 0 | { |
246 | 0 | int i; |
247 | | |
248 | | /* Encrypt the IV. */ |
249 | 0 | nburn = enc_fn ( &c->context.c, c->lastiv, c->u_iv.iv ); |
250 | 0 | burn = nburn > burn ? nburn : burn; |
251 | |
|
252 | 0 | outbuf[0] = c->lastiv[0] ^ inbuf[0]; |
253 | | |
254 | | /* Bitshift iv by 8 bit to the left */ |
255 | 0 | for (i = 0; i < blocksize-1; i++) |
256 | 0 | c->u_iv.iv[i] = c->u_iv.iv[i+1]; |
257 | | |
258 | | /* append cipher text to iv */ |
259 | 0 | c->u_iv.iv[blocksize-1] = outbuf[0]; |
260 | |
|
261 | 0 | outbuf += 1; |
262 | 0 | inbuf += 1; |
263 | 0 | inbuflen -= 1; |
264 | 0 | } |
265 | |
|
266 | 0 | if (burn > 0) |
267 | 0 | _gcry_burn_stack (burn + 4 * sizeof(void *)); |
268 | |
|
269 | 0 | return 0; |
270 | 0 | } |
271 | | |
272 | | |
273 | | gcry_err_code_t |
274 | | _gcry_cipher_cfb8_decrypt (gcry_cipher_hd_t c, |
275 | | unsigned char *outbuf, size_t outbuflen, |
276 | | const unsigned char *inbuf, size_t inbuflen) |
277 | 0 | { |
278 | 0 | gcry_cipher_encrypt_t enc_fn = c->spec->encrypt; |
279 | 0 | size_t blocksize = c->spec->blocksize; |
280 | 0 | unsigned int burn, nburn; |
281 | 0 | unsigned char appendee; |
282 | |
|
283 | 0 | if (outbuflen < inbuflen) |
284 | 0 | return GPG_ERR_BUFFER_TOO_SHORT; |
285 | | |
286 | 0 | burn = 0; |
287 | |
|
288 | 0 | while (inbuflen > 0) |
289 | 0 | { |
290 | 0 | int i; |
291 | | |
292 | | /* Encrypt the IV. */ |
293 | 0 | nburn = enc_fn ( &c->context.c, c->lastiv, c->u_iv.iv ); |
294 | 0 | burn = nburn > burn ? nburn : burn; |
295 | | |
296 | | /* inbuf might == outbuf, make sure we keep the value |
297 | | so we can append it later */ |
298 | 0 | appendee = inbuf[0]; |
299 | |
|
300 | 0 | outbuf[0] = inbuf[0] ^ c->lastiv[0]; |
301 | | |
302 | | /* Bitshift iv by 8 bit to the left */ |
303 | 0 | for (i = 0; i < blocksize-1; i++) |
304 | 0 | c->u_iv.iv[i] = c->u_iv.iv[i+1]; |
305 | |
|
306 | 0 | c->u_iv.iv[blocksize-1] = appendee; |
307 | |
|
308 | 0 | outbuf += 1; |
309 | 0 | inbuf += 1; |
310 | 0 | inbuflen -= 1; |
311 | 0 | } |
312 | |
|
313 | 0 | if (burn > 0) |
314 | 0 | _gcry_burn_stack (burn + 4 * sizeof(void *)); |
315 | |
|
316 | 0 | return 0; |
317 | 0 | } |