/src/mbedtls/library/constant_time.c
Line | Count | Source |
1 | | /** |
2 | | * Constant-time functions |
3 | | * |
4 | | * Copyright The Mbed TLS Contributors |
5 | | * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later |
6 | | */ |
7 | | |
8 | | /* |
9 | | * The following functions are implemented without using comparison operators, as those |
10 | | * might be translated to branches by some compilers on some platforms. |
11 | | */ |
12 | | |
13 | | #include "common.h" |
14 | | #include "constant_time_internal.h" |
15 | | #include "mbedtls/constant_time.h" |
16 | | #include "mbedtls/error.h" |
17 | | #include "mbedtls/platform_util.h" |
18 | | |
19 | | #include <limits.h> |
20 | | #include <stdint.h> |
21 | | #include <string.h> |
22 | | |
23 | | #if !defined(MBEDTLS_CT_ASM) |
24 | | /* |
25 | | * Define an object with the value zero, such that the compiler cannot prove that it |
26 | | * has the value zero (because it is volatile, it "may be modified in ways unknown to |
27 | | * the implementation"). |
28 | | */ |
29 | | volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0; |
30 | | #endif |
31 | | |
32 | | /* |
33 | | * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to |
34 | | * perform fast unaligned access to volatile data. |
35 | | * |
36 | | * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile |
37 | | * memory accesses. |
38 | | * |
39 | | * Some of these definitions could be moved into alignment.h but for now they are |
40 | | * only used here. |
41 | | */ |
42 | | #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \ |
43 | | ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \ |
44 | | defined(MBEDTLS_CT_AARCH64_ASM)) |
45 | | /* We check pointer sizes to avoid issues with them not matching register size requirements */ |
46 | | #define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS |
47 | | |
48 | | static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p) |
49 | | { |
50 | | /* This is UB, even where it's safe: |
51 | | * return *((volatile uint32_t*)p); |
52 | | * so instead the same thing is expressed in assembly below. |
53 | | */ |
54 | | uint32_t r; |
55 | | #if defined(MBEDTLS_CT_ARM_ASM) |
56 | | asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :); |
57 | | #elif defined(MBEDTLS_CT_AARCH64_ASM) |
58 | | asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :); |
59 | | #else |
60 | | #error "No assembly defined for mbedtls_get_unaligned_volatile_uint32" |
61 | | #endif |
62 | | return r; |
63 | | } |
64 | | #endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && |
65 | | (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */ |
66 | | |
67 | | int mbedtls_ct_memcmp(const void *a, |
68 | | const void *b, |
69 | | size_t n) |
70 | 1.10k | { |
71 | 1.10k | size_t i = 0; |
72 | | /* |
73 | | * `A` and `B` are cast to volatile to ensure that the compiler |
74 | | * generates code that always fully reads both buffers. |
75 | | * Otherwise it could generate a test to exit early if `diff` has all |
76 | | * bits set early in the loop. |
77 | | */ |
78 | 1.10k | volatile const unsigned char *A = (volatile const unsigned char *) a; |
79 | 1.10k | volatile const unsigned char *B = (volatile const unsigned char *) b; |
80 | 1.10k | uint32_t diff = 0; |
81 | | |
82 | | #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS) |
83 | | for (; (i + 4) <= n; i += 4) { |
84 | | uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i); |
85 | | uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i); |
86 | | diff |= x ^ y; |
87 | | } |
88 | | #endif |
89 | | |
90 | 21.9k | for (; i < n; i++) { |
91 | | /* Read volatile data in order before computing diff. |
92 | | * This avoids IAR compiler warning: |
93 | | * 'the order of volatile accesses is undefined ..' */ |
94 | 20.8k | unsigned char x = A[i], y = B[i]; |
95 | 20.8k | diff |= x ^ y; |
96 | 20.8k | } |
97 | | |
98 | | |
99 | | #if (INT_MAX < INT32_MAX) |
100 | | /* We don't support int smaller than 32-bits, but if someone tried to build |
101 | | * with this configuration, there is a risk that, for differing data, the |
102 | | * only bits set in diff are in the top 16-bits, and would be lost by a |
103 | | * simple cast from uint32 to int. |
104 | | * This would have significant security implications, so protect against it. */ |
105 | | #error "mbedtls_ct_memcmp() requires minimum 32-bit ints" |
106 | | #else |
107 | | /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting |
108 | | * a value that is in the range 0..INT_MAX - a value larger than this would |
109 | | * result in implementation defined behaviour. |
110 | | * |
111 | | * This ensures that the value returned by the function is non-zero iff |
112 | | * diff is non-zero. |
113 | | */ |
114 | 1.10k | return (int) ((diff & 0xffff) | (diff >> 16)); |
115 | 1.10k | #endif |
116 | 1.10k | } |
117 | | |
118 | | #if defined(MBEDTLS_NIST_KW_C) |
119 | | |
120 | | int mbedtls_ct_memcmp_partial(const void *a, |
121 | | const void *b, |
122 | | size_t n, |
123 | | size_t skip_head, |
124 | | size_t skip_tail) |
125 | 0 | { |
126 | 0 | unsigned int diff = 0; |
127 | |
|
128 | 0 | volatile const unsigned char *A = (volatile const unsigned char *) a; |
129 | 0 | volatile const unsigned char *B = (volatile const unsigned char *) b; |
130 | |
|
131 | 0 | size_t valid_end = n - skip_tail; |
132 | |
|
133 | 0 | for (size_t i = 0; i < n; i++) { |
134 | 0 | unsigned char x = A[i], y = B[i]; |
135 | 0 | unsigned int d = x ^ y; |
136 | 0 | mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head), |
137 | 0 | mbedtls_ct_uint_lt(i, valid_end)); |
138 | 0 | diff |= mbedtls_ct_uint_if_else_0(valid, d); |
139 | 0 | } |
140 | | |
141 | | /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the |
142 | | * cast from uint to int is safe. */ |
143 | 0 | return (int) diff; |
144 | 0 | } |
145 | | |
146 | | #endif |
147 | | |
148 | | #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) |
149 | | |
150 | | void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset) |
151 | 0 | { |
152 | 0 | volatile unsigned char *buf = start; |
153 | 0 | for (size_t i = 0; i < total; i++) { |
154 | 0 | mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i); |
155 | | /* The first `total - offset` passes are a no-op. The last |
156 | | * `offset` passes shift the data one byte to the left and |
157 | | * zero out the last byte. */ |
158 | 0 | for (size_t n = 0; n < total - 1; n++) { |
159 | 0 | unsigned char current = buf[n]; |
160 | 0 | unsigned char next = buf[n+1]; |
161 | 0 | buf[n] = mbedtls_ct_uint_if(no_op, current, next); |
162 | 0 | } |
163 | 0 | buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]); |
164 | 0 | } |
165 | 0 | } |
166 | | |
167 | | #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */ |
168 | | |
169 | | void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition, |
170 | | unsigned char *dest, |
171 | | const unsigned char *src1, |
172 | | const unsigned char *src2, |
173 | | size_t len) |
174 | 2.07M | { |
175 | 2.07M | #if defined(MBEDTLS_CT_SIZE_64) |
176 | 2.07M | const uint64_t mask = (uint64_t) condition; |
177 | 2.07M | const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition); |
178 | | #else |
179 | | const uint32_t mask = (uint32_t) condition; |
180 | | const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition); |
181 | | #endif |
182 | | |
183 | | /* If src2 is NULL, setup src2 so that we read from the destination address. |
184 | | * |
185 | | * This means that if src2 == NULL && condition is false, the result will be a |
186 | | * no-op because we read from dest and write the same data back into dest. |
187 | | */ |
188 | 2.07M | if (src2 == NULL) { |
189 | 2.07M | src2 = dest; |
190 | 2.07M | } |
191 | | |
192 | | /* dest[i] = c1 == c2 ? src[i] : dest[i] */ |
193 | 2.07M | size_t i = 0; |
194 | 2.07M | #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) |
195 | 2.07M | #if defined(MBEDTLS_CT_SIZE_64) |
196 | 19.9M | for (; (i + 8) <= len; i += 8) { |
197 | 17.8M | uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask; |
198 | 17.8M | uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask; |
199 | 17.8M | mbedtls_put_unaligned_uint64(dest + i, a | b); |
200 | 17.8M | } |
201 | | #else |
202 | | for (; (i + 4) <= len; i += 4) { |
203 | | uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask; |
204 | | uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask; |
205 | | mbedtls_put_unaligned_uint32(dest + i, a | b); |
206 | | } |
207 | | #endif /* defined(MBEDTLS_CT_SIZE_64) */ |
208 | 2.07M | #endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */ |
209 | 2.16M | for (; i < len; i++) { |
210 | 91.9k | dest[i] = (src1[i] & mask) | (src2[i] & not_mask); |
211 | 91.9k | } |
212 | 2.07M | } |
213 | | |
214 | | void mbedtls_ct_memcpy_offset(unsigned char *dest, |
215 | | const unsigned char *src, |
216 | | size_t offset, |
217 | | size_t offset_min, |
218 | | size_t offset_max, |
219 | | size_t len) |
220 | 216 | { |
221 | 216 | size_t offsetval; |
222 | | |
223 | 34.6k | for (offsetval = offset_min; offsetval <= offset_max; offsetval++) { |
224 | 34.4k | mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL, |
225 | 34.4k | len); |
226 | 34.4k | } |
227 | 216 | } |
228 | | |
229 | | #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) |
230 | | |
231 | | void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len) |
232 | 0 | { |
233 | 0 | uint32_t mask = (uint32_t) ~condition; |
234 | 0 | uint8_t *p = (uint8_t *) buf; |
235 | 0 | size_t i = 0; |
236 | 0 | #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) |
237 | 0 | for (; (i + 4) <= len; i += 4) { |
238 | 0 | mbedtls_put_unaligned_uint32((void *) (p + i), |
239 | 0 | mbedtls_get_unaligned_uint32((void *) (p + i)) & mask); |
240 | 0 | } |
241 | 0 | #endif |
242 | 0 | for (; i < len; i++) { |
243 | 0 | p[i] = p[i] & mask; |
244 | 0 | } |
245 | 0 | } |
246 | | |
247 | | #endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */ |