/src/libgcrypt/cipher/bufhelp.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* bufhelp.h - Some buffer manipulation helpers |
2 | | * Copyright (C) 2012-2017 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
3 | | * |
4 | | * This file is part of Libgcrypt. |
5 | | * |
6 | | * Libgcrypt is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU Lesser General Public License as |
8 | | * published by the Free Software Foundation; either version 2.1 of |
9 | | * the License, or (at your option) any later version. |
10 | | * |
11 | | * Libgcrypt is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | #ifndef GCRYPT_BUFHELP_H |
20 | | #define GCRYPT_BUFHELP_H |
21 | | |
22 | | |
23 | | #include "g10lib.h" |
24 | | #include "bithelp.h" |
25 | | |
26 | | |
27 | | #undef BUFHELP_UNALIGNED_ACCESS |
28 | | #if defined(HAVE_GCC_ATTRIBUTE_PACKED) && \ |
29 | | defined(HAVE_GCC_ATTRIBUTE_ALIGNED) && \ |
30 | | defined(HAVE_GCC_ATTRIBUTE_MAY_ALIAS) |
31 | | /* Compiler is supports attributes needed for automatically issuing unaligned |
32 | | memory access instructions. |
33 | | */ |
34 | | # define BUFHELP_UNALIGNED_ACCESS 1 |
35 | | #endif |
36 | | |
37 | | |
38 | | #ifndef BUFHELP_UNALIGNED_ACCESS |
39 | | |
40 | | /* Functions for loading and storing unaligned u32 values of different |
41 | | endianness. */ |
42 | | static inline u32 buf_get_be32(const void *_buf) |
43 | | { |
44 | | const byte *in = _buf; |
45 | | return ((u32)in[0] << 24) | ((u32)in[1] << 16) | \ |
46 | | ((u32)in[2] << 8) | (u32)in[3]; |
47 | | } |
48 | | |
49 | | static inline u32 buf_get_le32(const void *_buf) |
50 | | { |
51 | | const byte *in = _buf; |
52 | | return ((u32)in[3] << 24) | ((u32)in[2] << 16) | \ |
53 | | ((u32)in[1] << 8) | (u32)in[0]; |
54 | | } |
55 | | |
56 | | static inline void buf_put_be32(void *_buf, u32 val) |
57 | | { |
58 | | byte *out = _buf; |
59 | | out[0] = val >> 24; |
60 | | out[1] = val >> 16; |
61 | | out[2] = val >> 8; |
62 | | out[3] = val; |
63 | | } |
64 | | |
65 | | static inline void buf_put_le32(void *_buf, u32 val) |
66 | | { |
67 | | byte *out = _buf; |
68 | | out[3] = val >> 24; |
69 | | out[2] = val >> 16; |
70 | | out[1] = val >> 8; |
71 | | out[0] = val; |
72 | | } |
73 | | |
74 | | |
75 | | /* Functions for loading and storing unaligned u64 values of different |
76 | | endianness. */ |
77 | | static inline u64 buf_get_be64(const void *_buf) |
78 | | { |
79 | | const byte *in = _buf; |
80 | | return ((u64)in[0] << 56) | ((u64)in[1] << 48) | \ |
81 | | ((u64)in[2] << 40) | ((u64)in[3] << 32) | \ |
82 | | ((u64)in[4] << 24) | ((u64)in[5] << 16) | \ |
83 | | ((u64)in[6] << 8) | (u64)in[7]; |
84 | | } |
85 | | |
86 | | static inline u64 buf_get_le64(const void *_buf) |
87 | | { |
88 | | const byte *in = _buf; |
89 | | return ((u64)in[7] << 56) | ((u64)in[6] << 48) | \ |
90 | | ((u64)in[5] << 40) | ((u64)in[4] << 32) | \ |
91 | | ((u64)in[3] << 24) | ((u64)in[2] << 16) | \ |
92 | | ((u64)in[1] << 8) | (u64)in[0]; |
93 | | } |
94 | | |
95 | | static inline void buf_put_be64(void *_buf, u64 val) |
96 | | { |
97 | | byte *out = _buf; |
98 | | out[0] = val >> 56; |
99 | | out[1] = val >> 48; |
100 | | out[2] = val >> 40; |
101 | | out[3] = val >> 32; |
102 | | out[4] = val >> 24; |
103 | | out[5] = val >> 16; |
104 | | out[6] = val >> 8; |
105 | | out[7] = val; |
106 | | } |
107 | | |
108 | | static inline void buf_put_le64(void *_buf, u64 val) |
109 | | { |
110 | | byte *out = _buf; |
111 | | out[7] = val >> 56; |
112 | | out[6] = val >> 48; |
113 | | out[5] = val >> 40; |
114 | | out[4] = val >> 32; |
115 | | out[3] = val >> 24; |
116 | | out[2] = val >> 16; |
117 | | out[1] = val >> 8; |
118 | | out[0] = val; |
119 | | } |
120 | | |
121 | | #else /*BUFHELP_UNALIGNED_ACCESS*/ |
122 | | |
123 | | typedef struct bufhelp_u32_s |
124 | | { |
125 | | u32 a; |
126 | | } __attribute__((packed, aligned(1), may_alias)) bufhelp_u32_t; |
127 | | |
128 | | /* Functions for loading and storing unaligned u32 values of different |
129 | | endianness. */ |
130 | | static inline u32 buf_get_be32(const void *_buf) |
131 | 0 | { |
132 | 0 | return be_bswap32(((const bufhelp_u32_t *)_buf)->a); |
133 | 0 | } |
134 | | |
135 | | static inline u32 buf_get_le32(const void *_buf) |
136 | 0 | { |
137 | 0 | return le_bswap32(((const bufhelp_u32_t *)_buf)->a); |
138 | 0 | } |
139 | | |
140 | | static inline void buf_put_be32(void *_buf, u32 val) |
141 | 0 | { |
142 | 0 | bufhelp_u32_t *out = _buf; |
143 | 0 | out->a = be_bswap32(val); |
144 | 0 | } |
145 | | |
146 | | static inline void buf_put_le32(void *_buf, u32 val) |
147 | 0 | { |
148 | 0 | bufhelp_u32_t *out = _buf; |
149 | 0 | out->a = le_bswap32(val); |
150 | 0 | } |
151 | | |
152 | | |
153 | | typedef struct bufhelp_u64_s |
154 | | { |
155 | | u64 a; |
156 | | } __attribute__((packed, aligned(1), may_alias)) bufhelp_u64_t; |
157 | | |
158 | | /* Functions for loading and storing unaligned u64 values of different |
159 | | endianness. */ |
160 | | static inline u64 buf_get_be64(const void *_buf) |
161 | 0 | { |
162 | 0 | return be_bswap64(((const bufhelp_u64_t *)_buf)->a); |
163 | 0 | } |
164 | | |
165 | | static inline u64 buf_get_le64(const void *_buf) |
166 | 0 | { |
167 | 0 | return le_bswap64(((const bufhelp_u64_t *)_buf)->a); |
168 | 0 | } |
169 | | |
170 | | static inline void buf_put_be64(void *_buf, u64 val) |
171 | 0 | { |
172 | 0 | bufhelp_u64_t *out = _buf; |
173 | 0 | out->a = be_bswap64(val); |
174 | 0 | } |
175 | | |
176 | | static inline void buf_put_le64(void *_buf, u64 val) |
177 | 0 | { |
178 | 0 | bufhelp_u64_t *out = _buf; |
179 | 0 | out->a = le_bswap64(val); |
180 | 0 | } |
181 | | |
182 | | #endif /*BUFHELP_UNALIGNED_ACCESS*/ |
183 | | |
184 | | |
185 | | /* Host-endian get/put macros */ |
186 | | #ifdef WORDS_BIGENDIAN |
187 | | # define buf_get_he32 buf_get_be32 |
188 | | # define buf_put_he32 buf_put_be32 |
189 | | # define buf_get_he64 buf_get_be64 |
190 | | # define buf_put_he64 buf_put_be64 |
191 | | #else |
192 | | # define buf_get_he32 buf_get_le32 |
193 | | # define buf_put_he32 buf_put_le32 |
194 | | # define buf_get_he64 buf_get_le64 |
195 | | # define buf_put_he64 buf_put_le64 |
196 | | #endif |
197 | | |
198 | | |
199 | | |
200 | | /* Optimized function for small buffer copying */ |
201 | | static inline void |
202 | | buf_cpy(void *_dst, const void *_src, size_t len) |
203 | 0 | { |
204 | 0 | byte *dst = _dst; |
205 | 0 | const byte *src = _src; |
206 | 0 |
|
207 | 0 | #if __GNUC__ >= 4 |
208 | 0 | if (!__builtin_constant_p (len)) |
209 | 0 | { |
210 | 0 | if (UNLIKELY(len == 0)) |
211 | 0 | return; |
212 | 0 | memcpy(_dst, _src, len); |
213 | 0 | return; |
214 | 0 | } |
215 | 0 | #endif |
216 | 0 |
|
217 | 0 | while (len >= sizeof(u64)) |
218 | 0 | { |
219 | 0 | buf_put_he64(dst, buf_get_he64(src)); |
220 | 0 | dst += sizeof(u64); |
221 | 0 | src += sizeof(u64); |
222 | 0 | len -= sizeof(u64); |
223 | 0 | } |
224 | 0 |
|
225 | 0 | if (len >= sizeof(u32)) |
226 | 0 | { |
227 | 0 | buf_put_he32(dst, buf_get_he32(src)); |
228 | 0 | dst += sizeof(u32); |
229 | 0 | src += sizeof(u32); |
230 | 0 | len -= sizeof(u32); |
231 | 0 | } |
232 | 0 |
|
233 | 0 | /* Handle tail. */ |
234 | 0 | for (; len; len--) |
235 | 0 | *dst++ = *src++; |
236 | 0 | } |
237 | | |
238 | | |
239 | | /* Optimized function for buffer xoring */ |
240 | | static inline void |
241 | | buf_xor(void *_dst, const void *_src1, const void *_src2, size_t len) |
242 | 0 | { |
243 | 0 | byte *dst = _dst; |
244 | 0 | const byte *src1 = _src1; |
245 | 0 | const byte *src2 = _src2; |
246 | 0 |
|
247 | 0 | while (len >= sizeof(u64)) |
248 | 0 | { |
249 | 0 | buf_put_he64(dst, buf_get_he64(src1) ^ buf_get_he64(src2)); |
250 | 0 | dst += sizeof(u64); |
251 | 0 | src1 += sizeof(u64); |
252 | 0 | src2 += sizeof(u64); |
253 | 0 | len -= sizeof(u64); |
254 | 0 | } |
255 | 0 |
|
256 | 0 | if (len > sizeof(u32)) |
257 | 0 | { |
258 | 0 | buf_put_he32(dst, buf_get_he32(src1) ^ buf_get_he32(src2)); |
259 | 0 | dst += sizeof(u32); |
260 | 0 | src1 += sizeof(u32); |
261 | 0 | src2 += sizeof(u32); |
262 | 0 | len -= sizeof(u32); |
263 | 0 | } |
264 | 0 |
|
265 | 0 | /* Handle tail. */ |
266 | 0 | for (; len; len--) |
267 | 0 | *dst++ = *src1++ ^ *src2++; |
268 | 0 | } |
269 | | |
270 | | |
271 | | /* Optimized function for buffer xoring with two destination buffers. Used |
272 | | mainly by CFB mode encryption. */ |
273 | | static inline void |
274 | | buf_xor_2dst(void *_dst1, void *_dst2, const void *_src, size_t len) |
275 | 0 | { |
276 | 0 | byte *dst1 = _dst1; |
277 | 0 | byte *dst2 = _dst2; |
278 | 0 | const byte *src = _src; |
279 | 0 |
|
280 | 0 | while (len >= sizeof(u64)) |
281 | 0 | { |
282 | 0 | u64 temp = buf_get_he64(dst2) ^ buf_get_he64(src); |
283 | 0 | buf_put_he64(dst2, temp); |
284 | 0 | buf_put_he64(dst1, temp); |
285 | 0 | dst2 += sizeof(u64); |
286 | 0 | dst1 += sizeof(u64); |
287 | 0 | src += sizeof(u64); |
288 | 0 | len -= sizeof(u64); |
289 | 0 | } |
290 | 0 |
|
291 | 0 | if (len >= sizeof(u32)) |
292 | 0 | { |
293 | 0 | u32 temp = buf_get_he32(dst2) ^ buf_get_he32(src); |
294 | 0 | buf_put_he32(dst2, temp); |
295 | 0 | buf_put_he32(dst1, temp); |
296 | 0 | dst2 += sizeof(u32); |
297 | 0 | dst1 += sizeof(u32); |
298 | 0 | src += sizeof(u32); |
299 | 0 | len -= sizeof(u32); |
300 | 0 | } |
301 | 0 |
|
302 | 0 | /* Handle tail. */ |
303 | 0 | for (; len; len--) |
304 | 0 | *dst1++ = (*dst2++ ^= *src++); |
305 | 0 | } |
306 | | |
307 | | |
308 | | /* Optimized function for combined buffer xoring and copying. Used by mainly |
309 | | CBC mode decryption. */ |
310 | | static inline void |
311 | | buf_xor_n_copy_2(void *_dst_xor, const void *_src_xor, void *_srcdst_cpy, |
312 | | const void *_src_cpy, size_t len) |
313 | 0 | { |
314 | 0 | byte *dst_xor = _dst_xor; |
315 | 0 | byte *srcdst_cpy = _srcdst_cpy; |
316 | 0 | const byte *src_xor = _src_xor; |
317 | 0 | const byte *src_cpy = _src_cpy; |
318 | 0 |
|
319 | 0 | while (len >= sizeof(u64)) |
320 | 0 | { |
321 | 0 | u64 temp = buf_get_he64(src_cpy); |
322 | 0 | buf_put_he64(dst_xor, buf_get_he64(srcdst_cpy) ^ buf_get_he64(src_xor)); |
323 | 0 | buf_put_he64(srcdst_cpy, temp); |
324 | 0 | dst_xor += sizeof(u64); |
325 | 0 | srcdst_cpy += sizeof(u64); |
326 | 0 | src_xor += sizeof(u64); |
327 | 0 | src_cpy += sizeof(u64); |
328 | 0 | len -= sizeof(u64); |
329 | 0 | } |
330 | 0 |
|
331 | 0 | if (len >= sizeof(u32)) |
332 | 0 | { |
333 | 0 | u32 temp = buf_get_he32(src_cpy); |
334 | 0 | buf_put_he32(dst_xor, buf_get_he32(srcdst_cpy) ^ buf_get_he32(src_xor)); |
335 | 0 | buf_put_he32(srcdst_cpy, temp); |
336 | 0 | dst_xor += sizeof(u32); |
337 | 0 | srcdst_cpy += sizeof(u32); |
338 | 0 | src_xor += sizeof(u32); |
339 | 0 | src_cpy += sizeof(u32); |
340 | 0 | len -= sizeof(u32); |
341 | 0 | } |
342 | 0 |
|
343 | 0 | /* Handle tail. */ |
344 | 0 | for (; len; len--) |
345 | 0 | { |
346 | 0 | byte temp = *src_cpy++; |
347 | 0 | *dst_xor++ = *srcdst_cpy ^ *src_xor++; |
348 | 0 | *srcdst_cpy++ = temp; |
349 | 0 | } |
350 | 0 | } |
351 | | |
352 | | |
353 | | /* Optimized function for combined buffer xoring and copying. Used by mainly |
354 | | CFB mode decryption. */ |
355 | | static inline void |
356 | | buf_xor_n_copy(void *_dst_xor, void *_srcdst_cpy, const void *_src, size_t len) |
357 | 0 | { |
358 | 0 | buf_xor_n_copy_2(_dst_xor, _src, _srcdst_cpy, _src, len); |
359 | 0 | } |
360 | | |
361 | | |
362 | | /* Constant-time compare of two buffers. Returns 1 if buffers are equal, |
363 | | and 0 if buffers differ. */ |
364 | | static inline int |
365 | | buf_eq_const(const void *_a, const void *_b, size_t len) |
366 | 0 | { |
367 | 0 | const byte *a = _a; |
368 | 0 | const byte *b = _b; |
369 | 0 | int ab, ba; |
370 | 0 | size_t i; |
371 | 0 |
|
372 | 0 | /* Constant-time compare. */ |
373 | 0 | for (i = 0, ab = 0, ba = 0; i < len; i++) |
374 | 0 | { |
375 | 0 | /* If a[i] != b[i], either ab or ba will be negative. */ |
376 | 0 | ab |= a[i] - b[i]; |
377 | 0 | ba |= b[i] - a[i]; |
378 | 0 | } |
379 | 0 |
|
380 | 0 | /* 'ab | ba' is negative when buffers are not equal. */ |
381 | 0 | return (ab | ba) >= 0; |
382 | 0 | } |
383 | | |
384 | | |
385 | | #endif /*GCRYPT_BUFHELP_H*/ |