/src/cryptsetup/lib/crypto_backend/crypto_storage.c
Line | Count | Source |
1 | | // SPDX-License-Identifier: LGPL-2.1-or-later |
2 | | /* |
3 | | * Generic wrapper for storage encryption modes and Initial Vectors |
4 | | * (reimplementation of some functions from Linux dm-crypt kernel) |
5 | | * |
6 | | * Copyright (C) 2014-2025 Milan Broz |
7 | | */ |
8 | | |
9 | | #include <stdio.h> |
10 | | #include <stdlib.h> |
11 | | #include <errno.h> |
12 | | #include <strings.h> |
13 | | #include "bitops.h" |
14 | | #include "crypto_backend.h" |
15 | | |
16 | 0 | #define SECTOR_SHIFT 9 |
17 | | #define MAX_CAPI_LEN 64 |
18 | | #define MAX_CAPI_LEN_STR "63" |
19 | | |
20 | | /* |
21 | | * Internal IV helper |
22 | | * IV documentation: https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt |
23 | | */ |
24 | | struct crypt_sector_iv { |
25 | | enum { IV_NONE, IV_NULL, IV_PLAIN, IV_PLAIN64, IV_ESSIV, IV_BENBI, IV_PLAIN64BE, IV_EBOIV } type; |
26 | | int iv_size; |
27 | | char *iv; |
28 | | struct crypt_cipher *cipher; |
29 | | int shift; |
30 | | }; |
31 | | |
32 | | /* Block encryption storage context */ |
33 | | struct crypt_storage { |
34 | | size_t sector_size; |
35 | | unsigned iv_shift; |
36 | | struct crypt_cipher *cipher; |
37 | | struct crypt_sector_iv cipher_iv; |
38 | | }; |
39 | | |
40 | | static int int_log2(unsigned int x) |
41 | 0 | { |
42 | 0 | int r = 0; |
43 | 0 | for (x >>= 1; x > 0; x >>= 1) |
44 | 0 | r++; |
45 | 0 | return r; |
46 | 0 | } |
47 | | |
48 | | static int crypt_sector_iv_init(struct crypt_sector_iv *ctx, |
49 | | const char *cipher_name, const char *mode_name, |
50 | | const char *iv_name, const void *key, size_t key_length, |
51 | | size_t sector_size) |
52 | 0 | { |
53 | 0 | int r; |
54 | |
|
55 | 0 | memset(ctx, 0, sizeof(*ctx)); |
56 | |
|
57 | 0 | ctx->iv_size = crypt_cipher_ivsize(cipher_name, mode_name); |
58 | 0 | if (ctx->iv_size < 0 || (strcmp(mode_name, "ecb") && ctx->iv_size < 8)) |
59 | 0 | return -ENOENT; |
60 | | |
61 | 0 | if (!strcmp(cipher_name, "cipher_null") || |
62 | 0 | !strcmp(mode_name, "ecb")) { |
63 | 0 | if (iv_name) |
64 | 0 | return -EINVAL; |
65 | 0 | ctx->type = IV_NONE; |
66 | 0 | ctx->iv_size = 0; |
67 | 0 | return 0; |
68 | 0 | } else if (!iv_name) { |
69 | 0 | return -EINVAL; |
70 | 0 | } else if (!strcasecmp(iv_name, "null")) { |
71 | 0 | ctx->type = IV_NULL; |
72 | 0 | } else if (!strcasecmp(iv_name, "plain64")) { |
73 | 0 | ctx->type = IV_PLAIN64; |
74 | 0 | } else if (!strcasecmp(iv_name, "plain64be")) { |
75 | 0 | ctx->type = IV_PLAIN64BE; |
76 | 0 | } else if (!strcasecmp(iv_name, "plain")) { |
77 | 0 | ctx->type = IV_PLAIN; |
78 | 0 | } else if (!strncasecmp(iv_name, "essiv:", 6)) { |
79 | 0 | struct crypt_hash *h = NULL; |
80 | 0 | char *hash_name = strchr(iv_name, ':'); |
81 | 0 | int hash_size; |
82 | 0 | char tmp[256]; |
83 | |
|
84 | 0 | if (!hash_name) |
85 | 0 | return -EINVAL; |
86 | | |
87 | 0 | hash_size = crypt_hash_size(++hash_name); |
88 | 0 | if (hash_size < 0) |
89 | 0 | return -ENOENT; |
90 | | |
91 | 0 | if ((unsigned)hash_size > sizeof(tmp)) |
92 | 0 | return -EINVAL; |
93 | | |
94 | 0 | if (crypt_hash_init(&h, hash_name)) |
95 | 0 | return -EINVAL; |
96 | | |
97 | 0 | r = crypt_hash_write(h, key, key_length); |
98 | 0 | if (r) { |
99 | 0 | crypt_hash_destroy(h); |
100 | 0 | return r; |
101 | 0 | } |
102 | | |
103 | 0 | r = crypt_hash_final(h, tmp, hash_size); |
104 | 0 | crypt_hash_destroy(h); |
105 | 0 | if (r) { |
106 | 0 | crypt_backend_memzero(tmp, sizeof(tmp)); |
107 | 0 | return r; |
108 | 0 | } |
109 | | |
110 | 0 | r = crypt_cipher_init(&ctx->cipher, cipher_name, "ecb", |
111 | 0 | tmp, hash_size); |
112 | 0 | crypt_backend_memzero(tmp, sizeof(tmp)); |
113 | 0 | if (r) |
114 | 0 | return r; |
115 | | |
116 | 0 | ctx->type = IV_ESSIV; |
117 | 0 | } else if (!strncasecmp(iv_name, "benbi", 5)) { |
118 | 0 | int log = int_log2(ctx->iv_size); |
119 | 0 | if (log > SECTOR_SHIFT) |
120 | 0 | return -EINVAL; |
121 | | |
122 | 0 | ctx->type = IV_BENBI; |
123 | 0 | ctx->shift = SECTOR_SHIFT - log; |
124 | 0 | } else if (!strncasecmp(iv_name, "eboiv", 5)) { |
125 | 0 | r = crypt_cipher_init(&ctx->cipher, cipher_name, "ecb", |
126 | 0 | key, key_length); |
127 | 0 | if (r) |
128 | 0 | return r; |
129 | | |
130 | 0 | ctx->type = IV_EBOIV; |
131 | 0 | ctx->shift = int_log2(sector_size); |
132 | 0 | } else |
133 | 0 | return -ENOENT; |
134 | | |
135 | 0 | ctx->iv = malloc(ctx->iv_size); |
136 | 0 | if (!ctx->iv) |
137 | 0 | return -ENOMEM; |
138 | | |
139 | 0 | return 0; |
140 | 0 | } |
141 | | |
142 | | static int crypt_sector_iv_generate(struct crypt_sector_iv *ctx, uint64_t sector) |
143 | 0 | { |
144 | 0 | uint64_t val, *u64_iv; |
145 | 0 | uint32_t *u32_iv; |
146 | |
|
147 | 0 | switch (ctx->type) { |
148 | 0 | case IV_NONE: |
149 | 0 | break; |
150 | 0 | case IV_NULL: |
151 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
152 | 0 | break; |
153 | 0 | case IV_PLAIN: |
154 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
155 | 0 | u32_iv = (void *)ctx->iv; |
156 | 0 | *u32_iv = cpu_to_le32(sector & 0xffffffff); |
157 | 0 | break; |
158 | 0 | case IV_PLAIN64: |
159 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
160 | 0 | u64_iv = (void *)ctx->iv; |
161 | 0 | *u64_iv = cpu_to_le64(sector); |
162 | 0 | break; |
163 | 0 | case IV_PLAIN64BE: |
164 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
165 | | /* iv_size is at least of size u64; usually it is 16 bytes */ |
166 | 0 | u64_iv = (void *)&ctx->iv[ctx->iv_size - sizeof(uint64_t)]; |
167 | 0 | *u64_iv = cpu_to_be64(sector); |
168 | 0 | break; |
169 | 0 | case IV_ESSIV: |
170 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
171 | 0 | u64_iv = (void *)ctx->iv; |
172 | 0 | *u64_iv = cpu_to_le64(sector); |
173 | 0 | return crypt_cipher_encrypt(ctx->cipher, |
174 | 0 | ctx->iv, ctx->iv, ctx->iv_size, NULL, 0); |
175 | 0 | break; |
176 | 0 | case IV_BENBI: |
177 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
178 | 0 | val = cpu_to_be64((sector << ctx->shift) + 1); |
179 | 0 | memcpy(ctx->iv + ctx->iv_size - sizeof(val), &val, sizeof(val)); |
180 | 0 | break; |
181 | 0 | case IV_EBOIV: |
182 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
183 | 0 | u64_iv = (void *)ctx->iv; |
184 | 0 | *u64_iv = cpu_to_le64(sector << ctx->shift); |
185 | 0 | return crypt_cipher_encrypt(ctx->cipher, |
186 | 0 | ctx->iv, ctx->iv, ctx->iv_size, NULL, 0); |
187 | 0 | break; |
188 | 0 | default: |
189 | 0 | return -EINVAL; |
190 | 0 | } |
191 | | |
192 | 0 | return 0; |
193 | 0 | } |
194 | | |
195 | | static void crypt_sector_iv_destroy(struct crypt_sector_iv *ctx) |
196 | 0 | { |
197 | 0 | if (ctx->type == IV_ESSIV || ctx->type == IV_EBOIV) |
198 | 0 | crypt_cipher_destroy(ctx->cipher); |
199 | |
|
200 | 0 | if (ctx->iv) { |
201 | 0 | memset(ctx->iv, 0, ctx->iv_size); |
202 | 0 | free(ctx->iv); |
203 | 0 | } |
204 | |
|
205 | 0 | memset(ctx, 0, sizeof(*ctx)); |
206 | 0 | } |
207 | | |
208 | | /* Block encryption storage wrappers */ |
209 | | |
210 | | int crypt_storage_init(struct crypt_storage **ctx, |
211 | | size_t sector_size, |
212 | | const char *cipher, |
213 | | const char *cipher_mode, |
214 | | const void *key, size_t key_length, |
215 | | bool large_iv) |
216 | 0 | { |
217 | 0 | struct crypt_storage *s; |
218 | 0 | char cipher_name[MAX_CAPI_LEN], mode_name[MAX_CAPI_LEN], mode_tmp[MAX_CAPI_LEN]; |
219 | 0 | char *cipher_iv = NULL; |
220 | 0 | int r; |
221 | |
|
222 | 0 | if (sector_size < (1 << SECTOR_SHIFT) || |
223 | 0 | sector_size > (1 << (SECTOR_SHIFT + 3)) || |
224 | 0 | sector_size & (sector_size - 1)) |
225 | 0 | return -EINVAL; |
226 | | |
227 | | /* Convert from capi mode */ |
228 | 0 | if (!strncmp(cipher, "capi:", 5)) { |
229 | 0 | r = sscanf(cipher, "capi:%" MAX_CAPI_LEN_STR "[^(](%" MAX_CAPI_LEN_STR "[^)])", mode_tmp, cipher_name); |
230 | 0 | if (r != 2) |
231 | 0 | return -EINVAL; |
232 | 0 | r = snprintf(mode_name, sizeof(mode_name), "%s-%s", mode_tmp, cipher_mode); |
233 | 0 | if (r < 0 || (size_t)r >= sizeof(mode_name)) |
234 | 0 | return -EINVAL; |
235 | 0 | } else { |
236 | 0 | strncpy(cipher_name, cipher, sizeof(cipher_name)); |
237 | 0 | cipher_name[sizeof(cipher_name) - 1] = 0; |
238 | 0 | strncpy(mode_name, cipher_mode, sizeof(mode_name)); |
239 | 0 | mode_name[sizeof(mode_name) - 1] = 0; |
240 | 0 | } |
241 | | |
242 | | /* Remove IV if present */ |
243 | 0 | cipher_iv = strchr(mode_name, '-'); |
244 | 0 | if (cipher_iv) { |
245 | 0 | *cipher_iv = '\0'; |
246 | 0 | cipher_iv++; |
247 | 0 | } |
248 | |
|
249 | 0 | s = malloc(sizeof(*s)); |
250 | 0 | if (!s) |
251 | 0 | return -ENOMEM; |
252 | 0 | memset(s, 0, sizeof(*s)); |
253 | |
|
254 | 0 | r = crypt_cipher_init(&s->cipher, cipher_name, mode_name, key, key_length); |
255 | 0 | if (r) { |
256 | 0 | crypt_storage_destroy(s); |
257 | 0 | return r; |
258 | 0 | } |
259 | | |
260 | 0 | r = crypt_sector_iv_init(&s->cipher_iv, cipher_name, mode_name, cipher_iv, key, key_length, sector_size); |
261 | 0 | if (r) { |
262 | 0 | crypt_storage_destroy(s); |
263 | 0 | return r; |
264 | 0 | } |
265 | | |
266 | 0 | s->sector_size = sector_size; |
267 | 0 | s->iv_shift = large_iv ? (unsigned)int_log2(sector_size) - SECTOR_SHIFT : 0; |
268 | |
|
269 | 0 | *ctx = s; |
270 | 0 | return 0; |
271 | 0 | } |
272 | | |
273 | | int crypt_storage_decrypt(struct crypt_storage *ctx, |
274 | | uint64_t iv_offset, |
275 | | uint64_t length, char *buffer) |
276 | 0 | { |
277 | 0 | uint64_t i; |
278 | 0 | int r = 0; |
279 | |
|
280 | 0 | if (length & (ctx->sector_size - 1)) |
281 | 0 | return -EINVAL; |
282 | | |
283 | 0 | if (iv_offset & ((ctx->sector_size >> SECTOR_SHIFT) - 1)) |
284 | 0 | return -EINVAL; |
285 | | |
286 | 0 | for (i = 0; i < length; i += ctx->sector_size) { |
287 | 0 | r = crypt_sector_iv_generate(&ctx->cipher_iv, (iv_offset + (i >> SECTOR_SHIFT)) >> ctx->iv_shift); |
288 | 0 | if (r) |
289 | 0 | break; |
290 | 0 | r = crypt_cipher_decrypt(ctx->cipher, |
291 | 0 | &buffer[i], |
292 | 0 | &buffer[i], |
293 | 0 | ctx->sector_size, |
294 | 0 | ctx->cipher_iv.iv, |
295 | 0 | ctx->cipher_iv.iv_size); |
296 | 0 | if (r) |
297 | 0 | break; |
298 | 0 | } |
299 | |
|
300 | 0 | return r; |
301 | 0 | } |
302 | | |
303 | | int crypt_storage_encrypt(struct crypt_storage *ctx, |
304 | | uint64_t iv_offset, |
305 | | uint64_t length, char *buffer) |
306 | 0 | { |
307 | 0 | uint64_t i; |
308 | 0 | int r = 0; |
309 | |
|
310 | 0 | if (length & (ctx->sector_size - 1)) |
311 | 0 | return -EINVAL; |
312 | | |
313 | 0 | if (iv_offset & ((ctx->sector_size >> SECTOR_SHIFT) - 1)) |
314 | 0 | return -EINVAL; |
315 | | |
316 | 0 | for (i = 0; i < length; i += ctx->sector_size) { |
317 | 0 | r = crypt_sector_iv_generate(&ctx->cipher_iv, (iv_offset + (i >> SECTOR_SHIFT)) >> ctx->iv_shift); |
318 | 0 | if (r) |
319 | 0 | break; |
320 | 0 | r = crypt_cipher_encrypt(ctx->cipher, |
321 | 0 | &buffer[i], |
322 | 0 | &buffer[i], |
323 | 0 | ctx->sector_size, |
324 | 0 | ctx->cipher_iv.iv, |
325 | 0 | ctx->cipher_iv.iv_size); |
326 | 0 | if (r) |
327 | 0 | break; |
328 | 0 | } |
329 | |
|
330 | 0 | return r; |
331 | 0 | } |
332 | | |
333 | | void crypt_storage_destroy(struct crypt_storage *ctx) |
334 | 0 | { |
335 | 0 | if (!ctx) |
336 | 0 | return; |
337 | | |
338 | 0 | crypt_sector_iv_destroy(&ctx->cipher_iv); |
339 | |
|
340 | 0 | if (ctx->cipher) |
341 | 0 | crypt_cipher_destroy(ctx->cipher); |
342 | |
|
343 | 0 | memset(ctx, 0, sizeof(*ctx)); |
344 | 0 | free(ctx); |
345 | 0 | } |
346 | | |
347 | | bool crypt_storage_kernel_only(struct crypt_storage *ctx) |
348 | 0 | { |
349 | 0 | return crypt_cipher_kernel_only(ctx->cipher); |
350 | 0 | } |